From a756939ffc44282ab695af45e652dcd79b2047cc Mon Sep 17 00:00:00 2001 From: Yeting Kuo Date: Fri, 28 Jun 2024 00:22:55 -0700 Subject: [PATCH] [Asan] Provide TTI hook to provide memory reference infromation of target intrinsics. Previously asan considers target intrinsics as black boxes, so asan could not instrument accurate check. This patch provide TTI hooks to make targets describe their intrinsic informations to asan. Note, 1. this patch renames InterestingMemoryOperand to MemoryRefInfo. 2. this patch does not support RVV indexed/segment load/store. Co-authored-by: Hank Chang --- llvm/include/llvm/Analysis/MemoryRefInfo.h | 53 + .../llvm/Analysis/TargetTransformInfo.h | 14 + .../llvm/Analysis/TargetTransformInfoImpl.h | 5 + .../Instrumentation/AddressSanitizerCommon.h | 32 +- llvm/lib/Analysis/TargetTransformInfo.cpp | 5 + .../AMDGPU/AMDGPUAsanInstrumentation.cpp | 2 +- .../Target/AMDGPU/AMDGPUAsanInstrumentation.h | 3 +- llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp | 4 +- .../Target/RISCV/RISCVTargetTransformInfo.cpp | 93 + .../Target/RISCV/RISCVTargetTransformInfo.h | 6 + .../Instrumentation/AddressSanitizer.cpp | 37 +- .../Instrumentation/HWAddressSanitizer.cpp | 32 +- .../RISCV/asan-rvv-intrinsics.ll | 2304 +++++++++++++++++ 13 files changed, 2526 insertions(+), 64 deletions(-) create mode 100644 llvm/include/llvm/Analysis/MemoryRefInfo.h create mode 100644 llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll diff --git a/llvm/include/llvm/Analysis/MemoryRefInfo.h b/llvm/include/llvm/Analysis/MemoryRefInfo.h new file mode 100644 index 0000000000000..094141e22aa67 --- /dev/null +++ b/llvm/include/llvm/Analysis/MemoryRefInfo.h @@ -0,0 +1,53 @@ +//===--------- Definition of the MemoryRefInfo class -*- C++ -*------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines MemoryRefInfo class that is used when getting +// the information of a memory reference instruction. +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_ANALYSIS_MEMORYREFINFO_H +#define LLVM_ANALYSIS_MEMORYREFINFO_H + +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instruction.h" +#include "llvm/Support/TypeSize.h" + +namespace llvm { +class MemoryRefInfo { +public: + Use *PtrUse = nullptr; + bool IsWrite; + Type *OpType; + TypeSize TypeStoreSize = TypeSize::getFixed(0); + MaybeAlign Alignment; + // The mask Value, if we're looking at a masked load/store. + Value *MaybeMask; + // The EVL Value, if we're looking at a vp intrinsic. + Value *MaybeEVL; + // The Stride Value, if we're looking at a strided load/store. + Value *MaybeStride; + + MemoryRefInfo() = default; + MemoryRefInfo(Instruction *I, unsigned OperandNo, bool IsWrite, + class Type *OpType, MaybeAlign Alignment, + Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr, + Value *MaybeStride = nullptr) + : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), + MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) { + const DataLayout &DL = I->getDataLayout(); + TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); + PtrUse = &I->getOperandUse(OperandNo); + } + + Instruction *getInsn() { return cast(PtrUse->getUser()); } + Value *getPtr() { return PtrUse->get(); } + operator bool() { return PtrUse != nullptr; } +}; + +} // namespace llvm +#endif diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index 9048481b49189..bae4968092a3b 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -23,6 +23,8 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallBitVector.h" +#include "llvm/Analysis/MemoryRefInfo.h" #include "llvm/IR/FMF.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/PassManager.h" @@ -998,6 +1000,10 @@ class TargetTransformInfo { MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const; + // Add MemoryRefInfo of Intrinsic \p II into array \p Interesting. + bool getMemoryRefInfo(SmallVectorImpl &Interesting, + IntrinsicInst *II) const; + /// Should the Select Optimization pass be enabled and ran. bool enableSelectOptimize() const; @@ -2083,6 +2089,8 @@ class TargetTransformInfo::Concept { virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0; virtual MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0; + virtual bool getMemoryRefInfo(SmallVectorImpl &Interesting, + IntrinsicInst *II) const = 0; virtual bool enableSelectOptimize() = 0; virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) = 0; virtual bool enableInterleavedAccessVectorization() = 0; @@ -2711,6 +2719,12 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept { bool IsZeroCmp) const override { return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp); } + + bool getMemoryRefInfo(SmallVectorImpl &Interesting, + IntrinsicInst *II) const override { + return Impl.getMemoryRefInfo(Interesting, II); + } + bool enableSelectOptimize() override { return Impl.enableSelectOptimize(); } diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h index a8d6dd18266bb..93f2f9a35f04d 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -434,6 +434,11 @@ class TargetTransformInfoImplBase { return {}; } + bool getMemoryRefInfo(SmallVectorImpl &Interesting, + IntrinsicInst *II) const { + return false; + } + bool enableSelectOptimize() const { return true; } bool shouldTreatInstructionLikeSelect(const Instruction *I) { diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h index 9fe2716220e83..f7bd36c2def03 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h +++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h @@ -14,6 +14,7 @@ #define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H #include "llvm/Analysis/CFG.h" +#include "llvm/Analysis/MemoryRefInfo.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instruction.h" @@ -22,37 +23,6 @@ namespace llvm { -class InterestingMemoryOperand { -public: - Use *PtrUse; - bool IsWrite; - Type *OpType; - TypeSize TypeStoreSize = TypeSize::getFixed(0); - MaybeAlign Alignment; - // The mask Value, if we're looking at a masked load/store. - Value *MaybeMask; - // The EVL Value, if we're looking at a vp intrinsic. - Value *MaybeEVL; - // The Stride Value, if we're looking at a strided load/store. - Value *MaybeStride; - - InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, - class Type *OpType, MaybeAlign Alignment, - Value *MaybeMask = nullptr, - Value *MaybeEVL = nullptr, - Value *MaybeStride = nullptr) - : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), - MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) { - const DataLayout &DL = I->getDataLayout(); - TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); - PtrUse = &I->getOperandUse(OperandNo); - } - - Instruction *getInsn() { return cast(PtrUse->getUser()); } - - Value *getPtr() { return PtrUse->get(); } -}; - // Get AddressSanitizer parameters. void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 1ca9a16b18112..113f995cb4693 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -660,6 +660,11 @@ TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp); } +bool TargetTransformInfo::getMemoryRefInfo( + SmallVectorImpl &Interesting, IntrinsicInst *II) const { + return TTIImpl->getMemoryRefInfo(Interesting, II); +} + bool TargetTransformInfo::enableSelectOptimize() const { return TTIImpl->enableSelectOptimize(); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp index 6554863e08c91..5e69c64b89a06 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp @@ -219,7 +219,7 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, void getInterestingMemoryOperands( Module &M, Instruction *I, - SmallVectorImpl &Interesting) { + SmallVectorImpl &Interesting) { const DataLayout &DL = M.getDataLayout(); if (LoadInst *LI = dyn_cast(I)) { Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h index b2b8ec19b49ec..b51500c5ac885 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h @@ -52,8 +52,7 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, /// that needs to be instrumented void getInterestingMemoryOperands( Module &M, Instruction *I, - SmallVectorImpl &Interesting); - + SmallVectorImpl &Interesting); } // end namespace AMDGPU } // end namespace llvm diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp index 3159b497a1ecb..1c4a86f7c29f8 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp @@ -1261,9 +1261,9 @@ bool AMDGPUSwLowerLDS::run() { } if (AsanInstrumentLDS) { - SmallVector OperandsToInstrument; + SmallVector OperandsToInstrument; for (Instruction *Inst : AsanInfo.Instructions) { - SmallVector InterestingOperands; + SmallVector InterestingOperands; getInterestingMemoryOperands(M, Inst, InterestingOperands); for (auto &Operand : InterestingOperands) { OperandsToInstrument.push_back(Operand); diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 79e3b9ee09744..44e21658300be 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -15,6 +15,7 @@ #include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/IR/PatternMatch.h" #include #include @@ -37,6 +38,83 @@ static cl::opt SLPMaxVF( "exclusively by SLP vectorizer."), cl::Hidden); +bool RISCVTTIImpl::getMemoryRefInfo(SmallVectorImpl &Interesting, + IntrinsicInst *II) const { + const DataLayout &DL = getDataLayout(); + Intrinsic::ID IID = II->getIntrinsicID(); + LLVMContext &C = II->getContext(); + bool HasMask = false; + + switch (IID) { + case Intrinsic::riscv_vle_mask: + case Intrinsic::riscv_vse_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vle: + case Intrinsic::riscv_vse: { + // Intrinsic interface: + // riscv_vle(merge, ptr, vl) + // riscv_vle_mask(merge, ptr, mask, vl, policy) + // riscv_vse(val, ptr, vl) + // riscv_vse_mask(val, ptr, mask, vl, policy) + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 1 - HasMask; + MaybeAlign Alignment = + II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL); + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Value *Mask = ConstantInt::getTrue(MaskType); + if (HasMask) + Mask = II->getArgOperand(VLIndex - 1); + Value *EVL = II->getArgOperand(VLIndex); + Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask, + EVL); + return true; + } + case Intrinsic::riscv_vlse_mask: + case Intrinsic::riscv_vsse_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vlse: + case Intrinsic::riscv_vsse: { + // Intrinsic interface: + // riscv_vlse(merge, ptr, stride, vl) + // riscv_vlse_mask(merge, ptr, stride, mask, vl, policy) + // riscv_vsse(val, ptr, stride, vl) + // riscv_vsse_mask(val, ptr, stride, mask, vl, policy) + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 2 - HasMask; + MaybeAlign Alignment = + II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL); + + Value *Stride = II->getArgOperand(PtrOperandNo + 1); + // Use the pointer alignment as the element alignment if the stride is a + // multiple of the pointer alignment. Otherwise, the element alignment + // should be the greatest common divisor of pointer alignment and stride. + // For simplicity, just consider unalignment for elements. + unsigned PointerAlign = Alignment.valueOrOne().value(); + if (!isa(Stride) || + cast(Stride)->getZExtValue() % PointerAlign != 0) + Alignment = Align(1); + + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Value *Mask = ConstantInt::getTrue(MaskType); + if (HasMask) + Mask = II->getArgOperand(VLIndex - 1); + Value *EVL = II->getArgOperand(VLIndex); + Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask, + EVL, Stride); + return true; + } + } + return false; +} + InstructionCost RISCVTTIImpl::getRISCVInstructionCost(ArrayRef OpCodes, MVT VT, TTI::TargetCostKind CostKind) { @@ -2796,3 +2874,18 @@ RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { } return Options; } + +bool RISCVTTIImpl::areInlineCompatible(const Function *Caller, + const Function *Callee) const { + const TargetMachine &TM = getTLI()->getTargetMachine(); + + const FeatureBitset &CallerBits = + TM.getSubtargetImpl(*Caller)->getFeatureBits(); + const FeatureBitset &CalleeBits = + TM.getSubtargetImpl(*Callee)->getFeatureBits(); + + // Inline a callee if its target-features are a subset of the callers + // target-features. + return (CallerBits & CalleeBits) == CalleeBits; +} + diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 5389e9bc4a8fe..66cd29a874075 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -68,6 +68,12 @@ class RISCVTTIImpl : public BasicTTIImplBase { : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {} + bool getMemoryRefInfo(SmallVectorImpl &Interesting, + IntrinsicInst *II) const; + //This might be the problem + bool areInlineCompatible(const Function *Caller, + const Function *Callee) const; + /// Return the cost of materializing an immediate for a value operand of /// a store instruction. InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index bbe7040121649..75a8a9fa40b54 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -29,6 +29,7 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/StackSafetyAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/BinaryFormat/MachO.h" #include "llvm/Demangle/Demangle.h" @@ -759,12 +760,13 @@ struct AddressSanitizer { bool isInterestingAlloca(const AllocaInst &AI); bool ignoreAccess(Instruction *Inst, Value *Ptr); - void getInterestingMemoryOperands( - Instruction *I, SmallVectorImpl &Interesting); + void getMemoryRefInfos(Instruction *I, + SmallVectorImpl &Interesting, + const TargetTransformInfo *TTI); - void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, - InterestingMemoryOperand &O, bool UseCalls, - const DataLayout &DL, RuntimeCallInserter &RTCI); + void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, MemoryRefInfo &O, + bool UseCalls, const DataLayout &DL, + RuntimeCallInserter &RTCI); void instrumentPointerComparisonOrSubtraction(Instruction *I, RuntimeCallInserter &RTCI); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, @@ -800,7 +802,8 @@ struct AddressSanitizer { void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool suppressInstrumentationSiteForDebug(int &Instrumented); - bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); + bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI, + const TargetTransformInfo *TTI); bool maybeInsertAsanInitAtFunctionEntry(Function &F); bool maybeInsertDynamicShadowAtFunctionEntry(Function &F); void markEscapedLocalAllocas(Function &F); @@ -1277,7 +1280,8 @@ PreservedAnalyses AddressSanitizerPass::run(Module &M, Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover, Options.UseAfterScope, Options.UseAfterReturn); const TargetLibraryInfo &TLI = FAM.getResult(F); - Modified |= FunctionSanitizer.instrumentFunction(F, &TLI); + const TargetTransformInfo &TTI = FAM.getResult(F); + Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI); } Modified |= ModuleSanitizer.instrumentModule(); if (!Modified) @@ -1414,8 +1418,9 @@ bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) { return false; } -void AddressSanitizer::getInterestingMemoryOperands( - Instruction *I, SmallVectorImpl &Interesting) { +void AddressSanitizer::getMemoryRefInfos( + Instruction *I, SmallVectorImpl &Interesting, + const TargetTransformInfo *TTI) { // Do not instrument the load fetching the dynamic shadow address. if (LocalDynamicShadow == I) return; @@ -1533,6 +1538,9 @@ void AddressSanitizer::getInterestingMemoryOperands( break; } default: + if (auto *II = dyn_cast(I)) + if (TTI->getMemoryRefInfo(Interesting, II)) + return; for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) { if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || ignoreAccess(I, CI->getArgOperand(ArgNo))) @@ -1695,7 +1703,7 @@ void AddressSanitizer::instrumentMaskedLoadOrStore( } void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, - InterestingMemoryOperand &O, bool UseCalls, + MemoryRefInfo &O, bool UseCalls, const DataLayout &DL, RuntimeCallInserter &RTCI) { Value *Addr = O.getPtr(); @@ -2945,7 +2953,8 @@ bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) { } bool AddressSanitizer::instrumentFunction(Function &F, - const TargetLibraryInfo *TLI) { + const TargetLibraryInfo *TLI, + const TargetTransformInfo *TTI) { if (F.empty()) return false; if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; @@ -2989,7 +2998,7 @@ bool AddressSanitizer::instrumentFunction(Function &F, // We want to instrument every address only once per basic block (unless there // are calls between uses). SmallPtrSet TempsToInstrument; - SmallVector OperandsToInstrument; + SmallVector OperandsToInstrument; SmallVector IntrinToInstrument; SmallVector NoReturnCalls; SmallVector AllBlocks; @@ -3005,8 +3014,8 @@ bool AddressSanitizer::instrumentFunction(Function &F, // Skip instructions inserted by another instrumentation. if (Inst.hasMetadata(LLVMContext::MD_nosanitize)) continue; - SmallVector InterestingOperands; - getInterestingMemoryOperands(&Inst, InterestingOperands); + SmallVector InterestingOperands; + getMemoryRefInfos(&Inst, InterestingOperands, TTI); if (!InterestingOperands.empty()) { for (auto &Operand : InterestingOperands) { diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp index a3423912d4f31..ac639c16ea21f 100644 --- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp @@ -350,16 +350,16 @@ class HWAddressSanitizer { LoopInfo *LI); bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI); void instrumentMemIntrinsic(MemIntrinsic *MI); - bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU, - LoopInfo *LI, const DataLayout &DL); +// bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU, +// LoopInfo *LI, const DataLayout &DL); + bool instrumentMemAccess(MemoryRefInfo &O, DomTreeUpdater &DTU, LoopInfo *LI, const DataLayout &DL); bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr); bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst, Value *Ptr); - void getInterestingMemoryOperands( - OptimizationRemarkEmitter &ORE, Instruction *I, - const TargetLibraryInfo &TLI, - SmallVectorImpl &Interesting); + void getMemoryRefInfos(OptimizationRemarkEmitter &ORE, Instruction *I, + const TargetLibraryInfo &TLI, + SmallVectorImpl &Interesting); void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size); Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag); @@ -891,10 +891,9 @@ bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE, return Ignored; } -void HWAddressSanitizer::getInterestingMemoryOperands( +void HWAddressSanitizer::getMemoryRefInfos( OptimizationRemarkEmitter &ORE, Instruction *I, - const TargetLibraryInfo &TLI, - SmallVectorImpl &Interesting) { + const TargetLibraryInfo &TLI, SmallVectorImpl &Interesting) { // Skip memory accesses inserted by another instrumentation. if (I->hasMetadata(LLVMContext::MD_nosanitize)) return; @@ -1165,9 +1164,14 @@ void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { MI->eraseFromParent(); } -bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O, - DomTreeUpdater &DTU, LoopInfo *LI, - const DataLayout &DL) { + +//bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O, +// DomTreeUpdater &DTU, LoopInfo *LI, +// const DataLayout &DL) { + +bool HWAddressSanitizer::instrumentMemAccess(MemoryRefInfo &O, + DomTreeUpdater &DTU, + LoopInfo *LI, const DataLayout &DL) { Value *Addr = O.getPtr(); LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n"); @@ -1640,7 +1644,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F, LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n"); - SmallVector OperandsToInstrument; + SmallVector OperandsToInstrument; SmallVector IntrinToInstrument; SmallVector LandingPadVec; const TargetLibraryInfo &TLI = FAM.getResult(F); @@ -1654,7 +1658,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F, if (InstrumentLandingPads && isa(Inst)) LandingPadVec.push_back(&Inst); - getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument); + getMemoryRefInfos(ORE, &Inst, TLI, OperandsToInstrument); if (MemIntrinsic *MI = dyn_cast(&Inst)) if (!ignoreMemIntrinsic(ORE, MI)) diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll new file mode 100644 index 0000000000000..7571679659533 --- /dev/null +++ b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll @@ -0,0 +1,2304 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -mtriple=riscv64 -mattr=+v -passes=asan \ +; RUN: -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s + +declare @llvm.riscv.vle.nxv1i32( + , + *, + i64) +define @intrinsic_vle_v_nxv1i32_nxv1i32(* align 4 %0, i64 %1) sanitize_address { +; CHECK-LABEL: @intrinsic_vle_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[TMP1:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP12:%.*]] +; CHECK: 4: +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[TMP5]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP4]] ], [ [[IV_NEXT:%.*]], [[TMP11:%.*]] ] +; CHECK-NEXT: [[TMP7:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP11]] +; CHECK: 8: +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr , ptr [[TMP0:%.*]], i64 0, i64 [[IV]] +; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64 +; CHECK-NEXT: call void @__asan_load4(i64 [[TMP10]]) +; CHECK-NEXT: br label [[TMP11]] +; CHECK: 11: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP6]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP12]] +; CHECK: 12: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vle.nxv1i32.i64( undef, ptr [[TMP0]], i64 [[TMP1]]) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vle.nxv1i32( + undef, + * %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i32( + , + *, + , + i64, + i64) +define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, * align 4 %1, %2, i64 %3) sanitize_address { +; CHECK-LABEL: @intrinsic_vle_mask_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP14:%.*]] +; CHECK: 6: +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP7]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP6]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ] +; CHECK-NEXT: [[TMP9:%.*]] = extractelement [[TMP2:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP13]] +; CHECK: 10: +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr , ptr [[TMP1:%.*]], i64 0, i64 [[IV]] +; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64 +; CHECK-NEXT: call void @__asan_load4(i64 [[TMP12]]) +; CHECK-NEXT: br label [[TMP13]] +; CHECK: 13: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP8]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]], i64 1) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vle.mask.nxv1i32( + %0, + * %1, + %2, + i64 %3, i64 1) + ret %a +} + +declare void @llvm.riscv.vse.nxv1i32( + , + *, + i64) +define void @intrinsic_vse_v_nxv1i32_nxv1i32( %0, * align 4 %1, i64 %2) sanitize_address { +; CHECK-LABEL: @intrinsic_vse_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP13:%.*]] +; CHECK: 5: +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP6]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP12:%.*]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP12]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr , ptr [[TMP1:%.*]], i64 0, i64 [[IV]] +; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64 +; CHECK-NEXT: call void @__asan_store4(i64 [[TMP11]]) +; CHECK-NEXT: br label [[TMP12]] +; CHECK: 12: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP13]] +; CHECK: 13: +; CHECK-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vse.nxv1i32( + %0, + * %1, + i64 %2) + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1i32( + , + *, + , + i64) +define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32( %0, * align 4 %1, %2, i64 %3) sanitize_address { +; CHECK-LABEL: @intrinsic_vse_mask_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP14:%.*]] +; CHECK: 6: +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP7]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP6]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ] +; CHECK-NEXT: [[TMP9:%.*]] = extractelement [[TMP2:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP13]] +; CHECK: 10: +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr , ptr [[TMP1:%.*]], i64 0, i64 [[IV]] +; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64 +; CHECK-NEXT: call void @__asan_store4(i64 [[TMP12]]) +; CHECK-NEXT: br label [[TMP13]] +; CHECK: 13: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP8]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vse.mask.nxv1i32( + %0, + * %1, + %2, + i64 %3) + ret void +} + + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) + +define @test_vlseg2_nxv1i32(ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlseg2_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP24:%.*]] = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) undef, ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP25:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) [[TMP24]], i32 1) +; CHECK-NEXT: ret [[TMP25]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlseg2_mask_nxv1i32(ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlseg2_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP24:%.*]] = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.i64(target("riscv.vector.tuple", , 2) undef, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP25:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) [[TMP24]], i32 1) +; CHECK-NEXT: ret [[TMP25]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) + +define @test_vlseg3_nxv1i32(ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlseg3_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP36:%.*]] = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) undef, ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP37:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) [[TMP36]], i32 1) +; CHECK-NEXT: ret [[TMP37]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlseg3_mask_nxv1i32(ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlseg3_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP36:%.*]] = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.i64(target("riscv.vector.tuple", , 3) undef, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP37:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) [[TMP36]], i32 1) +; CHECK-NEXT: ret [[TMP37]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) + +define @test_vlseg4_nxv1i32(ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlseg4_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP48:%.*]] = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) undef, ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP49:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) [[TMP48]], i32 1) +; CHECK-NEXT: ret [[TMP49]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlseg4_mask_nxv1i32(ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlseg4_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP48:%.*]] = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.i64(target("riscv.vector.tuple", , 4) undef, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP49:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) [[TMP48]], i32 1) +; CHECK-NEXT: ret [[TMP49]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) + +define @test_vlseg5_nxv1i32(ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlseg5_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP60:%.*]] = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) undef, ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP61:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) [[TMP60]], i32 1) +; CHECK-NEXT: ret [[TMP61]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlseg5_mask_nxv1i32(ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlseg5_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP60:%.*]] = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.i64(target("riscv.vector.tuple", , 5) undef, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP61:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) [[TMP60]], i32 1) +; CHECK-NEXT: ret [[TMP61]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) + +define @test_vlseg6_nxv1i32(ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlseg6_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP72:%.*]] = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) undef, ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP73:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) [[TMP72]], i32 1) +; CHECK-NEXT: ret [[TMP73]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlseg6_mask_nxv1i32(ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlseg6_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP72:%.*]] = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.i64(target("riscv.vector.tuple", , 6) undef, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP73:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) [[TMP72]], i32 1) +; CHECK-NEXT: ret [[TMP73]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) + +define @test_vlseg7_nxv1i32(ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlseg7_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP84:%.*]] = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) undef, ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP85:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) [[TMP84]], i32 1) +; CHECK-NEXT: ret [[TMP85]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlseg7_mask_nxv1i32(ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlseg7_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP84:%.*]] = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.i64(target("riscv.vector.tuple", , 7) undef, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP85:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) [[TMP84]], i32 1) +; CHECK-NEXT: ret [[TMP85]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) + +define @test_vlseg8_nxv1i32(ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlseg8_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP96:%.*]] = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) undef, ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP97:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) [[TMP96]], i32 1) +; CHECK-NEXT: ret [[TMP97]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlseg8_mask_nxv1i32(ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlseg8_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP96:%.*]] = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.i64(target("riscv.vector.tuple", , 8) undef, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP97:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) [[TMP96]], i32 1) +; CHECK-NEXT: ret [[TMP97]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) +declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) + +define void @test_vsseg2_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsseg2_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg2_mask_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsseg2_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VAL:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) +declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) + +define void @test_vsseg3_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsseg3_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg3_mask_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsseg3_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VAL:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) +declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) + +define void @test_vsseg4_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsseg4_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg4_mask_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsseg4_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VAL:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) +declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) + +define void @test_vsseg5_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsseg5_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg5_mask_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsseg5_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VAL:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) +declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) + +define void @test_vsseg6_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsseg6_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg6_mask_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsseg6_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VAL:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) +declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) + +define void @test_vsseg7_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsseg7_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg7_mask_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsseg7_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VAL:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) +declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) + +define void @test_vsseg8_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsseg8_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, i64 5) + ret void +} + +define void @test_vsseg8_mask_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsseg8_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VAL:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %mask, i64 %vl, i64 5) + ret void +} + + +; Test stride load +declare @llvm.riscv.vlse.nxv1i32( + , + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv1i32_nxv1i32(* align 4 %0, i64 %1, i64 %2) sanitize_address { +; CHECK-LABEL: @intrinsic_vlse_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP14:%.*]] +; CHECK: 5: +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP6]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP13]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[IV]], [[TMP1:%.*]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP12]], i64 4) +; CHECK-NEXT: br label [[TMP13]] +; CHECK: 13: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vlse.nxv1i32.i64( undef, ptr [[TMP0]], i64 [[TMP1]], i64 [[TMP2]]) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vlse.nxv1i32( + undef, + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i32( + , + *, + i64, + , + i64, + i64); + +define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, * %1, i64 %2, %3, i64 %4) sanitize_address { +; CHECK-LABEL: @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP16:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP15]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[IV]], [[TMP2:%.*]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP14]], i64 4) +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]], [[TMP3]], i64 [[TMP4]], i64 1) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i32( + %0, + * %1, + i64 %2, + %3, + i64 %4, i64 1) + + ret %a +} + +; Test stride store +declare void @llvm.riscv.vsse.nxv1i32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, * %1, i64 %2, i64 %3) sanitize_address { +; CHECK-LABEL: @intrinsic_vsse_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP15:%.*]] +; CHECK: 6: +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP7]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP6]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP9:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP14]] +; CHECK: 10: +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[IV]], [[TMP2:%.*]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP8]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]], i64 [[TMP3]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vsse.nxv1i32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, * %1, i64 %2, %3, i64 %4) sanitize_address { +; CHECK-LABEL: @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP16:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP15]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[IV]], [[TMP2:%.*]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4) +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]], [[TMP3]], i64 [[TMP4]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vsse.mask.nxv1i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) + +define @test_vlsseg2_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlsseg2_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP24:%.*]] = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP25:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) [[TMP24]], i32 1) +; CHECK-NEXT: ret [[TMP25]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vlsseg2_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlsseg2_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP24:%.*]] = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP25:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) [[TMP24]], i32 1) +; CHECK-NEXT: ret [[TMP25]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) + +define @test_vlsseg3_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlsseg3_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP36:%.*]] = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP37:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) [[TMP36]], i32 1) +; CHECK-NEXT: ret [[TMP37]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vlsseg3_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlsseg3_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP36:%.*]] = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP37:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) [[TMP36]], i32 1) +; CHECK-NEXT: ret [[TMP37]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) + +define @test_vlsseg4_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlsseg4_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP48:%.*]] = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP49:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) [[TMP48]], i32 1) +; CHECK-NEXT: ret [[TMP49]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vlsseg4_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlsseg4_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP48:%.*]] = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP49:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) [[TMP48]], i32 1) +; CHECK-NEXT: ret [[TMP49]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) + +define @test_vlsseg5_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlsseg5_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP60:%.*]] = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP61:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) [[TMP60]], i32 1) +; CHECK-NEXT: ret [[TMP61]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vlsseg5_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlsseg5_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP60:%.*]] = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP61:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) [[TMP60]], i32 1) +; CHECK-NEXT: ret [[TMP61]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) + +define @test_vlsseg6_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlsseg6_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP72:%.*]] = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP73:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) [[TMP72]], i32 1) +; CHECK-NEXT: ret [[TMP73]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vlsseg6_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlsseg6_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP72:%.*]] = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP73:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) [[TMP72]], i32 1) +; CHECK-NEXT: ret [[TMP73]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) + +define @test_vlsseg7_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlsseg7_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP84:%.*]] = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP85:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) [[TMP84]], i32 1) +; CHECK-NEXT: ret [[TMP85]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vlsseg7_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlsseg7_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP84:%.*]] = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP85:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) [[TMP84]], i32 1) +; CHECK-NEXT: ret [[TMP85]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) + +define @test_vlsseg8_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vlsseg8_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP96:%.*]] = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP97:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) [[TMP96]], i32 1) +; CHECK-NEXT: ret [[TMP97]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vlsseg8_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vlsseg8_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP96:%.*]] = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP97:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) [[TMP96]], i32 1) +; CHECK-NEXT: ret [[TMP97]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, i64 %offset, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) + +define void @test_vssseg2_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vssseg2_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg2_mask_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vssseg2_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) + +define void @test_vssseg3_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vssseg3_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg3_mask_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vssseg3_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) + +define void @test_vssseg4_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vssseg4_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg4_mask_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vssseg4_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) + +define void @test_vssseg5_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vssseg5_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg5_mask_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vssseg5_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) + +define void @test_vssseg6_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vssseg6_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg6_mask_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vssseg6_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) + +define void @test_vssseg7_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vssseg7_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg7_mask_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vssseg7_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) +declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) + +define void @test_vssseg8_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vssseg8_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5) + ret void +} + +define void @test_vssseg8_mask_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vssseg8_mask_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VAL:%.*]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, %mask, i64 %vl, i64 5) + ret void +} + + +; Test stride value is a multiple of pointer alignment. +define @intrinsic_vlse_v_nxv1i32_nxv1i32_align(* align 4 %0, i64 %1, i64 %2) sanitize_address { +; CHECK-LABEL: @intrinsic_vlse_v_nxv1i32_nxv1i32_align( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP14:%.*]] +; CHECK: 5: +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP6]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP13]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[IV]], 4 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64 +; CHECK-NEXT: call void @__asan_load4(i64 [[TMP12]]) +; CHECK-NEXT: br label [[TMP13]] +; CHECK: 13: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vlse.nxv1i32.i64( undef, ptr [[TMP0]], i64 4, i64 [[TMP2]]) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vlse.nxv1i32( + undef, + * %0, + i64 4, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( + , + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i64 %2) sanitize_address { +; CHECK-LABEL: @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64( undef, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i16( + undef, + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) sanitize_address { +; CHECK-LABEL: @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( + , + *, + , + i64); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, i64 %2) sanitize_address { +; CHECK-LABEL: @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64( undef, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i16( + undef, + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( + , + *, + , + i64); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i64 %2) sanitize_address { +; CHECK-LABEL: @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64( undef, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i16( + undef, + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) sanitize_address { +; CHECK-LABEL: @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i64 %3) sanitize_address { +; CHECK-LABEL: @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], i64 [[TMP3:%.*]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) sanitize_address { +; CHECK-LABEL: @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i64 %3) sanitize_address { +; CHECK-LABEL: @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], i64 [[TMP3:%.*]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) sanitize_address { +; CHECK-LABEL: @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) + +define @test_vloxseg2_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vloxseg2_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP25:%.*]] = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP26:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) [[TMP25]], i32 1) +; CHECK-NEXT: ret [[TMP26]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vloxseg2_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vloxseg2_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP25:%.*]] = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP26:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) [[TMP25]], i32 1) +; CHECK-NEXT: ret [[TMP26]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) + +define @test_vloxseg3_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vloxseg3_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP37:%.*]] = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP38:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) [[TMP37]], i32 1) +; CHECK-NEXT: ret [[TMP38]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vloxseg3_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vloxseg3_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP37:%.*]] = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP38:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) [[TMP37]], i32 1) +; CHECK-NEXT: ret [[TMP38]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) + +define @test_vloxseg4_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vloxseg4_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP49:%.*]] = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP50:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) [[TMP49]], i32 1) +; CHECK-NEXT: ret [[TMP50]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vloxseg4_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vloxseg4_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP49:%.*]] = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP50:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) [[TMP49]], i32 1) +; CHECK-NEXT: ret [[TMP50]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) + +define @test_vloxseg5_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vloxseg5_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP61:%.*]] = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP62:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) [[TMP61]], i32 1) +; CHECK-NEXT: ret [[TMP62]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vloxseg5_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vloxseg5_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP61:%.*]] = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP62:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) [[TMP61]], i32 1) +; CHECK-NEXT: ret [[TMP62]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) + +define @test_vloxseg6_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vloxseg6_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP73:%.*]] = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP74:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) [[TMP73]], i32 1) +; CHECK-NEXT: ret [[TMP74]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vloxseg6_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vloxseg6_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP73:%.*]] = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP74:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) [[TMP73]], i32 1) +; CHECK-NEXT: ret [[TMP74]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) + +define @test_vloxseg7_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vloxseg7_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP85:%.*]] = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP86:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) [[TMP85]], i32 1) +; CHECK-NEXT: ret [[TMP86]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vloxseg7_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vloxseg7_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP85:%.*]] = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP86:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) [[TMP85]], i32 1) +; CHECK-NEXT: ret [[TMP86]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) + +define @test_vloxseg8_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vloxseg8_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP97:%.*]] = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP98:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) [[TMP97]], i32 1) +; CHECK-NEXT: ret [[TMP98]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vloxseg8_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vloxseg8_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP97:%.*]] = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP98:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) [[TMP97]], i32 1) +; CHECK-NEXT: ret [[TMP98]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) + +define @test_vluxseg2_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vluxseg2_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP25:%.*]] = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP26:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) [[TMP25]], i32 1) +; CHECK-NEXT: ret [[TMP26]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + +define @test_vluxseg2_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vluxseg2_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP25:%.*]] = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP26:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) [[TMP25]], i32 1) +; CHECK-NEXT: ret [[TMP26]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) + +define @test_vluxseg3_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vluxseg3_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP37:%.*]] = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP38:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) [[TMP37]], i32 1) +; CHECK-NEXT: ret [[TMP38]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + +define @test_vluxseg3_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vluxseg3_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP37:%.*]] = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP38:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) [[TMP37]], i32 1) +; CHECK-NEXT: ret [[TMP38]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) + +define @test_vluxseg4_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vluxseg4_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP49:%.*]] = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP50:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) [[TMP49]], i32 1) +; CHECK-NEXT: ret [[TMP50]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + +define @test_vluxseg4_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vluxseg4_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP49:%.*]] = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP50:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) [[TMP49]], i32 1) +; CHECK-NEXT: ret [[TMP50]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) + +define @test_vluxseg5_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vluxseg5_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP61:%.*]] = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP62:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) [[TMP61]], i32 1) +; CHECK-NEXT: ret [[TMP62]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + +define @test_vluxseg5_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vluxseg5_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP61:%.*]] = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP62:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) [[TMP61]], i32 1) +; CHECK-NEXT: ret [[TMP62]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) + +define @test_vluxseg6_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vluxseg6_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP73:%.*]] = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP74:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) [[TMP73]], i32 1) +; CHECK-NEXT: ret [[TMP74]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + +define @test_vluxseg6_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vluxseg6_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP73:%.*]] = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP74:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) [[TMP73]], i32 1) +; CHECK-NEXT: ret [[TMP74]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) + +define @test_vluxseg7_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vluxseg7_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP85:%.*]] = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP86:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) [[TMP85]], i32 1) +; CHECK-NEXT: ret [[TMP86]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + +define @test_vluxseg7_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vluxseg7_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP85:%.*]] = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP86:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) [[TMP85]], i32 1) +; CHECK-NEXT: ret [[TMP86]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + + +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) + +define @test_vluxseg8_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vluxseg8_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP97:%.*]] = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: [[TMP98:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) [[TMP97]], i32 1) +; CHECK-NEXT: ret [[TMP98]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, i64 %vl, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + +define @test_vluxseg8_mask_nxv1i32_nxv1i16(ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vluxseg8_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: [[TMP97:%.*]] = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) undef, ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1, i64 5) +; CHECK-NEXT: [[TMP98:%.*]] = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) [[TMP97]], i32 1) +; CHECK-NEXT: ret [[TMP98]] +; +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1.nxv1i32(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, %mask, i64 %vl, i64 1, i64 5) + %1 = call @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + + +declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsoxseg2_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsoxseg2_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsoxseg2_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsoxseg3_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsoxseg3_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsoxseg3_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsoxseg3_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsoxseg4_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsoxseg4_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsoxseg4_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsoxseg4_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsoxseg5_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsoxseg5_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsoxseg5_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsoxseg5_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsoxseg6_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsoxseg6_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsoxseg6_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsoxseg6_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsoxseg7_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsoxseg7_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsoxseg7_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsoxseg7_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsoxseg8_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsoxseg8_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsoxseg8_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsoxseg8_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) + +define void @test_vsuxseg2_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsuxseg2_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsuxseg2_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsuxseg2_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) + +define void @test_vsuxseg3_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsuxseg3_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsuxseg3_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsuxseg3_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) + +define void @test_vsuxseg4_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsuxseg4_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsuxseg4_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsuxseg4_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) + +define void @test_vsuxseg5_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsuxseg5_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsuxseg5_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsuxseg5_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) + +define void @test_vsuxseg6_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsuxseg6_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsuxseg6_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsuxseg6_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) + +define void @test_vsuxseg7_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsuxseg7_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsuxseg7_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsuxseg7_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} + + +declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) +declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) + +define void @test_vsuxseg8_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) sanitize_address { +; CHECK-LABEL: @test_vsuxseg8_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, i64 5) + ret void +} + +define void @test_vsuxseg8_mask_nxv1i32_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) sanitize_address { +; CHECK-LABEL: @test_vsuxseg8_mask_nxv1i32_nxv1i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 +; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VAL:%.*]], ptr [[BASE:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 5) +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, %mask, i64 %vl, i64 5) + ret void +} +