diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h index 8b380751c2f9d..bca0feb45aab2 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h @@ -14,6 +14,7 @@ #ifndef MLIR_DIALECT_LLVMIR_LLVMTYPES_H_ #define MLIR_DIALECT_LLVMIR_LLVMTYPES_H_ +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Interfaces/MemorySlotInterfaces.h" diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td index 2ecbf8f50c50c..3386003cb61fb 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td @@ -11,6 +11,7 @@ include "mlir/Dialect/LLVMIR/LLVMOpBase.td" include "mlir/IR/AttrTypeBase.td" +include "mlir/IR/BuiltinTypes.td" include "mlir/Interfaces/DataLayoutInterfaces.td" include "mlir/Interfaces/MemorySlotInterfaces.td" @@ -259,7 +260,8 @@ def LLVMStructType : LLVMType<"LLVMStruct", "struct", [ def LLVMPointerType : LLVMType<"LLVMPointer", "ptr", [ DeclareTypeInterfaceMethods]> { + "getPreferredAlignment"]>, + PointerLike]> { let summary = "LLVM pointer type"; let description = [{ The `!llvm.ptr` type is an LLVM pointer type. This type typically represents diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.td b/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.td index 857e68cec8c76..c73b6ab3a46b5 100644 --- a/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.td +++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.td @@ -12,6 +12,7 @@ include "mlir/Interfaces/DataLayoutInterfaces.td" include "mlir/IR/AttrTypeBase.td" include "mlir/IR/BuiltinTypeInterfaces.td" +include "mlir/IR/BuiltinTypes.td" include "mlir/IR/OpBase.td" //===----------------------------------------------------------------------===// @@ -39,7 +40,8 @@ def Ptr_PtrType : Ptr_Type<"Ptr", "ptr", [ MemRefElementTypeInterface, DeclareTypeInterfaceMethods + "getPreferredAlignment"]>, + PointerLike ]> { let summary = "pointer type"; let description = [{ diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h b/mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h index 4fe1b5a1aa423..564d02218eb8e 100644 --- a/mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h +++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h @@ -14,6 +14,7 @@ #define MLIR_DIALECT_PTR_IR_PTRTYPES_H #include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h" +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h index df1e02732617d..5b8d5463ab428 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -43,6 +43,10 @@ template class ValueSemantics : public TypeTrait::TraitBase {}; +/// Type trait indicating that the type is a bare pointer-like type. +template +class PointerLike : public TypeTrait::TraitBase {}; + //===----------------------------------------------------------------------===// // TensorType //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td index af474b3e3ec47..a1aa8c2cd4a2c 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -40,6 +40,11 @@ def ValueSemantics : NativeTypeTrait<"ValueSemantics"> { let cppNamespace = "::mlir"; } +/// Type trait indicating that the type is a pointer-like type. +def PointerLike : NativeTypeTrait<"PointerLike"> { + let cppNamespace = "::mlir"; +} + //===----------------------------------------------------------------------===// // ComplexType //===----------------------------------------------------------------------===// @@ -1249,7 +1254,12 @@ def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ // VectorType //===----------------------------------------------------------------------===// -def Builtin_VectorTypeElementType : AnyTypeOf<[AnyInteger, Index, AnyFloat]> { +// Note: VectorType supports pointer-like types as element types. Examples for +// pointer-like types are !llvm.ptr and !ptr.ptr. This makes the MLIR vector +// type design symmetric to the LLVM vector type. That's desirable because the +// MLIR vector type is used in the LLVM dialect. +def Builtin_VectorTypeElementType + : AnyTypeOf<[AnyInteger, Index, AnyFloat, AnyPointerLike]> { let cppFunctionName = "isValidVectorTypeElementType"; } diff --git a/mlir/include/mlir/IR/CommonTypeConstraints.td b/mlir/include/mlir/IR/CommonTypeConstraints.td index 601517717978e..d6d1a1a38fef9 100644 --- a/mlir/include/mlir/IR/CommonTypeConstraints.td +++ b/mlir/include/mlir/IR/CommonTypeConstraints.td @@ -301,6 +301,9 @@ def Index : Type($_self)">, "index", "::mlir::IndexType">, BuildableType<"$_builder.getIndexType()">; +def AnyPointerLike : Type()">, + "pointer-like", "::mlir::Type">; + // Any signless integer type or index type. def AnySignlessIntegerOrIndex : Type, "signless integer or index">; diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp index 1053a25515d5c..51dcb071f9c18 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp @@ -140,9 +140,13 @@ static bool isSupportedTypeForConversion(Type type) { if (isa(type)) return false; - // Scalable types are not supported. - if (auto vectorType = dyn_cast(type)) + if (auto vectorType = dyn_cast(type)) { + // Vectors of pointers cannot be casted. + if (isa(vectorType.getElementType())) + return false; + // Scalable types are not supported. return !vectorType.isScalable(); + } return true; } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp index 8f39ede721c92..403756765268e 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp @@ -690,7 +690,7 @@ LLVMFixedVectorType::getChecked(function_ref emitError, } bool LLVMFixedVectorType::isValidElementType(Type type) { - return llvm::isa(type); + return llvm::isa(type); } LogicalResult @@ -890,7 +890,7 @@ bool mlir::LLVM::isCompatibleVectorType(Type type) { if (auto intType = llvm::dyn_cast(elementType)) return intType.isSignless(); return llvm::isa(elementType); + Float80Type, Float128Type, LLVMPointerType>(elementType); } return false; } diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir index 44b4a25a051f1..3df14528bac39 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir @@ -2002,8 +2002,8 @@ func.func @gather(%arg0: memref, %arg1: vector<3xi32>, %arg2: vector<3xi1 } // CHECK-LABEL: func @gather -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi32>) -> vector<3x!llvm.ptr>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<3x!llvm.ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> // CHECK: return %[[G]] : vector<3xf32> // ----- @@ -2015,8 +2015,8 @@ func.func @gather_scalable(%arg0: memref, %arg1: vector<[3]xi32>, %arg2: } // CHECK-LABEL: func @gather_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi32>) -> vector<[3]x!llvm.ptr>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<[3]x!llvm.ptr>, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> // CHECK: return %[[G]] : vector<[3]xf32> // ----- @@ -2028,8 +2028,8 @@ func.func @gather_global_memory(%arg0: memref, %arg1: vector<3xi32>, % } // CHECK-LABEL: func @gather_global_memory -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<1>, vector<3xi32>) -> !llvm.vec<3 x ptr<1>>, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr<1>>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<1>, vector<3xi32>) -> vector<3x!llvm.ptr<1>>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<3x!llvm.ptr<1>>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> // CHECK: return %[[G]] : vector<3xf32> // ----- @@ -2041,8 +2041,8 @@ func.func @gather_global_memory_scalable(%arg0: memref, %arg1: vector< } // CHECK-LABEL: func @gather_global_memory_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<1>, vector<[3]xi32>) -> !llvm.vec>, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec>, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<1>, vector<[3]xi32>) -> vector<[3]x!llvm.ptr<1>>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<[3]x!llvm.ptr<1>>, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> // CHECK: return %[[G]] : vector<[3]xf32> // ----- @@ -2055,8 +2055,8 @@ func.func @gather_index(%arg0: memref, %arg1: vector<3xindex>, %arg2: v } // CHECK-LABEL: func @gather_index -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi64>) -> !llvm.vec<3 x ptr>, i64 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xi64>) -> vector<3xi64> +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi64>) -> vector<3x!llvm.ptr>, i64 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (vector<3x!llvm.ptr>, vector<3xi1>, vector<3xi64>) -> vector<3xi64> // CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[G]] : vector<3xi64> to vector<3xindex> // ----- @@ -2068,13 +2068,12 @@ func.func @gather_index_scalable(%arg0: memref, %arg1: vector<[3]xindex } // CHECK-LABEL: func @gather_index_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi64>) -> !llvm.vec, i64 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xi64>) -> vector<[3]xi64> +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi64>) -> vector<[3]x!llvm.ptr>, i64 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (vector<[3]x!llvm.ptr>, vector<[3]xi1>, vector<[3]xi64>) -> vector<[3]xi64> // CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[G]] : vector<[3]xi64> to vector<[3]xindex> // ----- - func.func @gather_1d_from_2d(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) -> vector<4xf32> { %0 = arith.constant 3 : index %1 = vector.gather %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x4xf32>, vector<4xi32>, vector<4xi1>, vector<4xf32> into vector<4xf32> @@ -2083,8 +2082,8 @@ func.func @gather_1d_from_2d(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2 // CHECK-LABEL: func @gather_1d_from_2d // CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<4xi32>) -> !llvm.vec<4 x ptr>, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<4 x ptr>, vector<4xi1>, vector<4xf32>) -> vector<4xf32> +// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<4xi32>) -> vector<4x!llvm.ptr>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<4x!llvm.ptr>, vector<4xi1>, vector<4xf32>) -> vector<4xf32> // CHECK: return %[[G]] : vector<4xf32> // ----- @@ -2097,8 +2096,8 @@ func.func @gather_1d_from_2d_scalable(%arg0: memref<4x?xf32>, %arg1: vector<[4]x // CHECK-LABEL: func @gather_1d_from_2d_scalable // CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<[4]xi32>) -> !llvm.vec, f32 -// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec, vector<[4]xi1>, vector<[4]xf32>) -> vector<[4]xf32> +// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<[4]xi32>) -> vector<[4]x!llvm.ptr>, f32 +// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<[4]x!llvm.ptr>, vector<[4]xi1>, vector<[4]xf32>) -> vector<[4]xf32> // CHECK: return %[[G]] : vector<[4]xf32> // ----- @@ -2114,8 +2113,8 @@ func.func @scatter(%arg0: memref, %arg1: vector<3xi32>, %arg2: vector<3xi } // CHECK-LABEL: func @scatter -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi32>) -> !llvm.vec<3 x ptr>, f32 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<3xf32>, vector<3xi1> into !llvm.vec<3 x ptr> +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi32>) -> vector<3x!llvm.ptr>, f32 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<3xf32>, vector<3xi1> into vector<3x!llvm.ptr> // ----- @@ -2126,8 +2125,8 @@ func.func @scatter_scalable(%arg0: memref, %arg1: vector<[3]xi32>, %arg2: } // CHECK-LABEL: func @scatter_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi32>) -> !llvm.vec, f32 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[3]xf32>, vector<[3]xi1> into !llvm.vec +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi32>) -> vector<[3]x!llvm.ptr>, f32 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[3]xf32>, vector<[3]xi1> into vector<[3]x!llvm.ptr> // ----- @@ -2138,8 +2137,8 @@ func.func @scatter_index(%arg0: memref, %arg1: vector<3xindex>, %arg2: } // CHECK-LABEL: func @scatter_index -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi64>) -> !llvm.vec<3 x ptr>, i64 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<3xi64>, vector<3xi1> into !llvm.vec<3 x ptr> +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<3xi64>) -> vector<3x!llvm.ptr>, i64 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<3xi64>, vector<3xi1> into vector<3x!llvm.ptr> // ----- @@ -2150,8 +2149,8 @@ func.func @scatter_index_scalable(%arg0: memref, %arg1: vector<[3]xinde } // CHECK-LABEL: func @scatter_index_scalable -// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi64>) -> !llvm.vec, i64 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<[3]xi64>, vector<[3]xi1> into !llvm.vec +// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, vector<[3]xi64>) -> vector<[3]x!llvm.ptr>, i64 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<[3]xi64>, vector<[3]xi1> into vector<[3]x!llvm.ptr> // ----- @@ -2163,8 +2162,8 @@ func.func @scatter_1d_into_2d(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg // CHECK-LABEL: func @scatter_1d_into_2d // CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<4xi32>) -> !llvm.vec<4 x ptr>, f32 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<4xf32>, vector<4xi1> into !llvm.vec<4 x ptr> +// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<4xi32>) -> vector<4x!llvm.ptr>, f32 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<4xf32>, vector<4xi1> into vector<4x!llvm.ptr> // ----- @@ -2176,8 +2175,8 @@ func.func @scatter_1d_into_2d_scalable(%arg0: memref<4x?xf32>, %arg1: vector<[4] // CHECK-LABEL: func @scatter_1d_into_2d_scalable // CHECK: %[[B:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32 -// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<[4]xi32>) -> !llvm.vec, f32 -// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[4]xf32>, vector<[4]xi1> into !llvm.vec +// CHECK: %[[P:.*]] = llvm.getelementptr %[[B]][%{{.*}}] : (!llvm.ptr, vector<[4]xi32>) -> vector<[4]x!llvm.ptr>, f32 +// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : vector<[4]xf32>, vector<[4]xi1> into vector<[4]x!llvm.ptr> // ----- diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir index ae73dfef0c74f..64e51f5554628 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -1669,8 +1669,8 @@ func.func @gather_with_mask(%arg0: memref, %arg1: vector<2x3xi32>, %arg2: } // CHECK-LABEL: func @gather_with_mask -// CHECK: %[[G0:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> -// CHECK: %[[G1:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> +// CHECK: %[[G0:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<3x!llvm.ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> +// CHECK: %[[G1:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<3x!llvm.ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32> // ----- @@ -1685,8 +1685,8 @@ func.func @gather_with_mask_scalable(%arg0: memref, %arg1: vector<2x[3]xi } // CHECK-LABEL: func @gather_with_mask_scalable -// CHECK: %[[G0:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> -// CHECK: %[[G1:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> +// CHECK: %[[G0:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<[3]x!llvm.ptr>, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> +// CHECK: %[[G1:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<[3]x!llvm.ptr>, vector<[3]xi1>, vector<[3]xf32>) -> vector<[3]xf32> // ----- diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir index 6d3d3937b651c..330846a7b1649 100644 --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -1328,16 +1328,16 @@ func.func @invalid_bitcast_i64_to_ptr() { // ----- -func.func @invalid_bitcast_vec_to_ptr(%arg : !llvm.vec<4 x ptr>) { +func.func @invalid_bitcast_vec_to_ptr(%arg : vector<4x!llvm.ptr>) { // expected-error@+1 {{cannot cast vector of pointers to pointer}} - %0 = llvm.bitcast %arg : !llvm.vec<4 x ptr> to !llvm.ptr + %0 = llvm.bitcast %arg : vector<4x!llvm.ptr> to !llvm.ptr } // ----- func.func @invalid_bitcast_ptr_to_vec(%arg : !llvm.ptr) { // expected-error@+1 {{cannot cast pointer to vector of pointers}} - %0 = llvm.bitcast %arg : !llvm.ptr to !llvm.vec<4 x ptr> + %0 = llvm.bitcast %arg : !llvm.ptr to vector<4x!llvm.ptr> } // ----- @@ -1349,9 +1349,9 @@ func.func @invalid_bitcast_addr_cast(%arg : !llvm.ptr<1>) { // ----- -func.func @invalid_bitcast_addr_cast_vec(%arg : !llvm.vec<4 x ptr<1>>) { +func.func @invalid_bitcast_addr_cast_vec(%arg : vector<4x!llvm.ptr<1>>) { // expected-error@+1 {{cannot cast pointers of different address spaces, use 'llvm.addrspacecast' instead}} - %0 = llvm.bitcast %arg : !llvm.vec<4 x ptr<1>> to !llvm.vec<4 x ptr> + %0 = llvm.bitcast %arg : vector<4x!llvm.ptr<1>> to vector<4x!llvm.ptr> } // ----- diff --git a/mlir/test/Dialect/LLVMIR/mem2reg.mlir b/mlir/test/Dialect/LLVMIR/mem2reg.mlir index e66b498ed4fa0..3c13eacde4856 100644 --- a/mlir/test/Dialect/LLVMIR/mem2reg.mlir +++ b/mlir/test/Dialect/LLVMIR/mem2reg.mlir @@ -1011,7 +1011,7 @@ llvm.func @load_first_vector_elem() -> i16 { llvm.func @load_first_llvm_vector_elem() -> i16 { %0 = llvm.mlir.constant(1 : i32) : i32 // CHECK: llvm.alloca - %1 = llvm.alloca %0 x !llvm.vec<4 x ptr> : (i32) -> !llvm.ptr + %1 = llvm.alloca %0 x vector<4x!llvm.ptr> : (i32) -> !llvm.ptr %2 = llvm.load %1 : !llvm.ptr -> i16 llvm.return %2 : i16 } diff --git a/mlir/test/Dialect/LLVMIR/opaque-ptr.mlir b/mlir/test/Dialect/LLVMIR/opaque-ptr.mlir index 373931c747fc3..82c3e5bf2e2db 100644 --- a/mlir/test/Dialect/LLVMIR/opaque-ptr.mlir +++ b/mlir/test/Dialect/LLVMIR/opaque-ptr.mlir @@ -68,10 +68,10 @@ llvm.func @opaque_ptr_masked_load(%arg0: !llvm.ptr, %arg1: vector<7xi1>) -> vect } // CHECK-LABEL: @opaque_ptr_gather -llvm.func @opaque_ptr_gather(%M: !llvm.vec<7 x ptr>, %mask: vector<7xi1>) -> vector<7xf32> { +llvm.func @opaque_ptr_gather(%M: vector<7x!llvm.ptr>, %mask: vector<7xi1>) -> vector<7xf32> { // CHECK: = llvm.intr.masked.gather - // CHECK: (!llvm.vec<7 x ptr>, vector<7xi1>) -> vector<7xf32> + // CHECK: (vector<7x!llvm.ptr>, vector<7xi1>) -> vector<7xf32> %a = llvm.intr.masked.gather %M, %mask { alignment = 1: i32} : - (!llvm.vec<7 x ptr>, vector<7xi1>) -> vector<7xf32> + (vector<7x!llvm.ptr>, vector<7xi1>) -> vector<7xf32> llvm.return %a : vector<7xf32> } diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir index d0aa65d14a176..b3df1155b9de8 100644 --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -6,10 +6,10 @@ llvm.func @baz() // CHECK-LABEL: func @ops -// CHECK-SAME: (%[[I32:.*]]: i32, %[[FLOAT:.*]]: f32, %[[PTR1:.*]]: !llvm.ptr, %[[PTR2:.*]]: !llvm.ptr, %[[BOOL:.*]]: i1, %[[VPTR1:.*]]: !llvm.vec<2 x ptr>) +// CHECK-SAME: (%[[I32:.*]]: i32, %[[FLOAT:.*]]: f32, %[[PTR1:.*]]: !llvm.ptr, %[[PTR2:.*]]: !llvm.ptr, %[[BOOL:.*]]: i1, %[[VPTR1:.*]]: vector<2x!llvm.ptr>) func.func @ops(%arg0: i32, %arg1: f32, %arg2: !llvm.ptr, %arg3: !llvm.ptr, - %arg4: i1, %arg5 : !llvm.vec<2x!llvm.ptr>) { + %arg4: i1, %arg5 : vector<2x!llvm.ptr>) { // Integer arithmetic binary operations. // // CHECK: {{.*}} = llvm.add %[[I32]], %[[I32]] : i32 @@ -23,7 +23,7 @@ func.func @ops(%arg0: i32, %arg1: f32, // CHECK: {{.*}} = llvm.add %[[SCALAR_PRED0]], %[[SCALAR_PRED0]] : i1 // CHECK: %[[SCALAR_PRED1:.+]] = llvm.icmp "ne" %[[PTR1]], %[[PTR1]] : !llvm.ptr // CHECK: {{.*}} = llvm.add %[[SCALAR_PRED1]], %[[SCALAR_PRED1]] : i1 -// CHECK: %[[VEC_PRED:.+]] = llvm.icmp "ne" %[[VPTR1]], %[[VPTR1]] : !llvm.vec<2 x ptr> +// CHECK: %[[VEC_PRED:.+]] = llvm.icmp "ne" %[[VPTR1]], %[[VPTR1]] : vector<2x!llvm.ptr> // CHECK: {{.*}} = llvm.add %[[VEC_PRED]], %[[VEC_PRED]] : vector<2xi1> %0 = llvm.add %arg0, %arg0 : i32 %1 = llvm.sub %arg0, %arg0 : i32 @@ -36,7 +36,7 @@ func.func @ops(%arg0: i32, %arg1: f32, %typecheck_7 = llvm.add %7, %7 : i1 %ptrcmp = llvm.icmp "ne" %arg2, %arg2 : !llvm.ptr %typecheck_ptrcmp = llvm.add %ptrcmp, %ptrcmp : i1 - %vptrcmp = llvm.icmp "ne" %arg5, %arg5 : !llvm.vec<2 x ptr> + %vptrcmp = llvm.icmp "ne" %arg5, %arg5 : vector<2x!llvm.ptr> %typecheck_vptrcmp = llvm.add %vptrcmp, %vptrcmp : vector<2 x i1> // Integer overflow flags @@ -362,15 +362,15 @@ func.func @casts_overflow(%arg0: i32, %arg1: i64, %arg2: vector<4xi32>, } // CHECK-LABEL: @vect -func.func @vect(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32, %arg3: !llvm.vec<2 x ptr>) { +func.func @vect(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32, %arg3: vector<2x!llvm.ptr>) { // CHECK: = llvm.extractelement {{.*}} : vector<4xf32> %0 = llvm.extractelement %arg0[%arg1 : i32] : vector<4xf32> // CHECK: = llvm.insertelement {{.*}} : vector<4xf32> %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : vector<4xf32> // CHECK: = llvm.shufflevector {{.*}} [0, 0, 0, 0, 7] : vector<4xf32> %2 = llvm.shufflevector %arg0, %arg0 [0, 0, 0, 0, 7] : vector<4xf32> -// CHECK: = llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : !llvm.vec<2 x ptr> - %3 = llvm.shufflevector %arg3, %arg3 [1, 0] : !llvm.vec<2 x ptr> +// CHECK: = llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : vector<2x!llvm.ptr> + %3 = llvm.shufflevector %arg3, %arg3 [1, 0] : vector<2x!llvm.ptr> // CHECK: = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : vector<4xf32> %4 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : vector<4xf32> return diff --git a/mlir/test/Dialect/LLVMIR/types.mlir b/mlir/test/Dialect/LLVMIR/types.mlir index fd771b6152557..184205bb0b1e7 100644 --- a/mlir/test/Dialect/LLVMIR/types.mlir +++ b/mlir/test/Dialect/LLVMIR/types.mlir @@ -76,8 +76,8 @@ func.func @vec() { "some.op"() : () -> !llvm.vec // CHECK: !llvm.vec "some.op"() : () -> !llvm.vec - // CHECK: !llvm.vec<4 x ptr> - "some.op"() : () -> !llvm.vec<4 x ptr> + // CHECK: vector<4x!llvm.ptr> + "some.op"() : () -> vector<4x!llvm.ptr> return } diff --git a/mlir/test/IR/test-verifiers-type.mlir b/mlir/test/IR/test-verifiers-type.mlir index 96d0005eb7a19..7888e4d3fadcc 100644 --- a/mlir/test/IR/test-verifiers-type.mlir +++ b/mlir/test/IR/test-verifiers-type.mlir @@ -7,3 +7,18 @@ // expected-error @below{{failed to verify 'param': 16-bit signless integer or 32-bit signless integer}} "test.type_producer"() : () -> !test.type_verification + +// ----- + +// CHECK: "test.type_producer"() : () -> vector> +"test.type_producer"() : () -> vector> + +// ----- + +// CHECK: "test.type_producer"() : () -> vector> +"test.type_producer"() : () -> vector> + +// ----- + +// expected-error @below{{failed to verify 'elementType': integer or index or floating-point or pointer-like}} +"test.type_producer"() : () -> vector> diff --git a/mlir/test/Target/LLVMIR/Import/constant.ll b/mlir/test/Target/LLVMIR/Import/constant.ll index 3c5f5825d47ee..103d0ff001969 100644 --- a/mlir/test/Target/LLVMIR/Import/constant.ll +++ b/mlir/test/Target/LLVMIR/Import/constant.ll @@ -199,12 +199,12 @@ define i32 @function_address_after_def() { @nested_agg = global %nested_agg_type { %simple_agg_type{i32 1, i8 2, i16 3, i32 4}, ptr null } ; CHECK-DAG: %[[NULL:.+]] = llvm.mlir.zero : !llvm.ptr -; CHECK-DAG: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.vec<2 x ptr> +; CHECK-DAG: %[[ROOT:.+]] = llvm.mlir.undef : vector<2x!llvm.ptr> ; CHECK-DAG: %[[P0:.+]] = llvm.mlir.constant(0 : i32) : i32 -; CHECK-DAG: %[[CHAIN0:.+]] = llvm.insertelement %[[NULL]], %[[ROOT]][%[[P0]] : i32] : !llvm.vec<2 x ptr> +; CHECK-DAG: %[[CHAIN0:.+]] = llvm.insertelement %[[NULL]], %[[ROOT]][%[[P0]] : i32] : vector<2x!llvm.ptr> ; CHECK-DAG: %[[P1:.+]] = llvm.mlir.constant(1 : i32) : i32 -; CHECK-DAG: %[[CHAIN1:.+]] = llvm.insertelement %[[NULL]], %[[CHAIN0]][%[[P1]] : i32] : !llvm.vec<2 x ptr> -; CHECK-DAG: llvm.return %[[CHAIN1]] : !llvm.vec<2 x ptr> +; CHECK-DAG: %[[CHAIN1:.+]] = llvm.insertelement %[[NULL]], %[[CHAIN0]][%[[P1]] : i32] : vector<2x!llvm.ptr> +; CHECK-DAG: llvm.return %[[CHAIN1]] : vector<2x!llvm.ptr> @vector_agg = global <2 x ptr> ; // ----- diff --git a/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll b/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll index 6bf7572e7e791..858f714b1f640 100644 --- a/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll +++ b/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll @@ -2,7 +2,7 @@ ; CHECK: llvm.func @shufflevector_crash define void @shufflevector_crash(<2 x ptr> %arg0) { - ; CHECK: llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : !llvm.vec<2 x ptr> + ; CHECK: llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : vector<2x!llvm.ptr> %1 = shufflevector <2 x ptr> %arg0, <2 x ptr> undef, <2 x i32> ret void } diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll index 02fea85b028b3..ecc9fdc91d62e 100644 --- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll +++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll @@ -497,12 +497,12 @@ define void @masked_load_store_intrinsics(ptr %vec, <7 x i1> %mask) { define void @masked_gather_scatter_intrinsics(<7 x ptr> %vec, <7 x i1> %mask) { ; CHECK: %[[UNDEF:.+]] = llvm.mlir.undef ; CHECK: %[[VAL1:.+]] = llvm.intr.masked.gather %[[VEC]], %[[MASK]], %[[UNDEF]] {alignment = 1 : i32} - ; CHECK-SAME: (!llvm.vec<7 x ptr>, vector<7xi1>, vector<7xf32>) -> vector<7xf32> + ; CHECK-SAME: (vector<7x!llvm.ptr>, vector<7xi1>, vector<7xf32>) -> vector<7xf32> %1 = call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %vec, i32 1, <7 x i1> %mask, <7 x float> undef) ; CHECK: %[[VAL2:.+]] = llvm.intr.masked.gather %[[VEC]], %[[MASK]], %[[VAL1]] {alignment = 4 : i32} %2 = call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %vec, i32 4, <7 x i1> %mask, <7 x float> %1) ; CHECK: llvm.intr.masked.scatter %[[VAL2]], %[[VEC]], %[[MASK]] {alignment = 8 : i32} - ; CHECK-SAME: vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr> + ; CHECK-SAME: vector<7xf32>, vector<7xi1> into vector<7x!llvm.ptr> call void @llvm.masked.scatter.v7f32.v7p0(<7 x float> %2, <7 x ptr> %vec, i32 8, <7 x i1> %mask) ret void } @@ -988,9 +988,9 @@ define void @vector_predication_intrinsics(<8 x i32> %0, <8 x i32> %1, <8 x floa %56 = call <8 x i64> @llvm.vp.fptoui.v8i64.v8f64(<8 x double> %5, <8 x i1> %11, i32 %12) ; CHECK: "llvm.intr.vp.fptosi"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xf64>, vector<8xi1>, i32) -> vector<8xi64> %57 = call <8 x i64> @llvm.vp.fptosi.v8i64.v8f64(<8 x double> %5, <8 x i1> %11, i32 %12) - ; CHECK: "llvm.intr.vp.ptrtoint"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.vec<8 x ptr>, vector<8xi1>, i32) -> vector<8xi64> + ; CHECK: "llvm.intr.vp.ptrtoint"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8x!llvm.ptr>, vector<8xi1>, i32) -> vector<8xi64> %58 = call <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0(<8 x ptr> %6, <8 x i1> %11, i32 %12) - ; CHECK: "llvm.intr.vp.inttoptr"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi64>, vector<8xi1>, i32) -> !llvm.vec<8 x ptr> + ; CHECK: "llvm.intr.vp.inttoptr"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi64>, vector<8xi1>, i32) -> vector<8x!llvm.ptr> %59 = call <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64(<8 x i64> %4, <8 x i1> %11, i32 %12) ; CHECK: "llvm.intr.vp.fmuladd"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xf32>, vector<8xf32>, vector<8xf32>, vector<8xi1>, i32) -> vector<8xf32> %60 = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %2, <8 x float> %3, <8 x float> %3, <8 x i1> %11, i32 %12) @@ -1021,7 +1021,7 @@ define ptr @ptrmask(ptr %0, i64 %1) { ; CHECK-LABEL: llvm.func @vector_ptrmask define <8 x ptr> @vector_ptrmask(<8 x ptr> %0, <8 x i64> %1) { - ; CHECK: %{{.*}} = llvm.intr.ptrmask %{{.*}} : (!llvm.vec<8 x ptr>, vector<8xi64>) -> !llvm.vec<8 x ptr> + ; CHECK: %{{.*}} = llvm.intr.ptrmask %{{.*}} : (vector<8x!llvm.ptr>, vector<8xi64>) -> vector<8x!llvm.ptr> %3 = call <8 x ptr> @llvm.ptrmask.v8p0.v8i64(<8 x ptr> %0, <8 x i64> %1) ret <8 x ptr> %3 } diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir index d33f99d44eb88..e6bfb8f31a8cd 100644 --- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir +++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir @@ -521,16 +521,16 @@ llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr, %mask: vector<7xi1>) { } // CHECK-LABEL: @masked_gather_scatter_intrinsics -llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr>, %mask: vector<7xi1>) { +llvm.func @masked_gather_scatter_intrinsics(%M: vector<7 x !llvm.ptr>, %mask: vector<7xi1>) { // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> poison) %a = llvm.intr.masked.gather %M, %mask { alignment = 1: i32} : - (!llvm.vec<7 x ptr>, vector<7xi1>) -> vector<7xf32> + (vector<7 x !llvm.ptr>, vector<7xi1>) -> vector<7xf32> // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}}) %b = llvm.intr.masked.gather %M, %mask, %a { alignment = 1: i32} : - (!llvm.vec<7 x ptr>, vector<7xi1>, vector<7xf32>) -> vector<7xf32> + (vector<7 x !llvm.ptr>, vector<7xi1>, vector<7xf32>) -> vector<7xf32> // CHECK: call void @llvm.masked.scatter.v7f32.v7p0(<7 x float> %{{.*}}, <7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}) llvm.intr.masked.scatter %b, %M, %mask { alignment = 1: i32} : - vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr> + vector<7xf32>, vector<7xi1> into vector<7 x !llvm.ptr> llvm.return } @@ -849,7 +849,7 @@ llvm.func @stack_restore(%arg0: !llvm.ptr, %arg1: !llvm.ptr<1>) { llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>, %C: vector<8xf32>, %D: vector<8xf32>, %E: vector<8xi64>, %F: vector<8xf64>, - %G: !llvm.vec<8 x !llvm.ptr>, + %G: vector<8 x !llvm.ptr>, %i: i32, %f: f32, %iptr : !llvm.ptr, %fptr : !llvm.ptr, @@ -1018,10 +1018,10 @@ llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>, // CHECK: call <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0 "llvm.intr.vp.ptrtoint" (%G, %mask, %evl) : - (!llvm.vec<8 x !llvm.ptr>, vector<8xi1>, i32) -> vector<8xi64> + (vector<8 x !llvm.ptr>, vector<8xi1>, i32) -> vector<8xi64> // CHECK: call <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64 "llvm.intr.vp.inttoptr" (%E, %mask, %evl) : - (vector<8xi64>, vector<8xi1>, i32) -> !llvm.vec<8 x !llvm.ptr> + (vector<8xi64>, vector<8xi1>, i32) -> vector<8 x !llvm.ptr> llvm.return } @@ -1105,10 +1105,10 @@ llvm.func @ptrmask(%p: !llvm.ptr, %mask: i64) -> !llvm.ptr { } // CHECK-LABEL: @vector_ptrmask -llvm.func @vector_ptrmask(%p: !llvm.vec<8 x ptr>, %mask: vector<8 x i64>) -> !llvm.vec<8 x ptr> { +llvm.func @vector_ptrmask(%p: vector<8 x !llvm.ptr>, %mask: vector<8 x i64>) -> vector<8 x !llvm.ptr> { // CHECK: call <8 x ptr> @llvm.ptrmask.v8p0.v8i64 - %0 = llvm.intr.ptrmask %p, %mask : (!llvm.vec<8 x ptr>, vector<8 x i64>) -> !llvm.vec<8 x ptr> - llvm.return %0 : !llvm.vec<8 x ptr> + %0 = llvm.intr.ptrmask %p, %mask : (vector<8 x !llvm.ptr>, vector<8 x i64>) -> vector<8 x !llvm.ptr> + llvm.return %0 : vector<8 x !llvm.ptr> } // CHECK-LABEL: @experimental_constrained_sitofp diff --git a/mlir/test/Target/LLVMIR/llvmir-invalid.mlir b/mlir/test/Target/LLVMIR/llvmir-invalid.mlir index 83566d6649932..4500175cd22b2 100644 --- a/mlir/test/Target/LLVMIR/llvmir-invalid.mlir +++ b/mlir/test/Target/LLVMIR/llvmir-invalid.mlir @@ -277,25 +277,25 @@ llvm.func @masked_gather_intr_wrong_type(%ptrs : vector<7xf32>, %mask : vector<7 // ----- -llvm.func @masked_gather_intr_wrong_type_scalable(%ptrs : !llvm.vec<7 x ptr>, %mask : vector<[7]xi1>) -> vector<[7]xf32> { - // expected-error @below{{expected operand #1 type to be '!llvm.vec'}} - %0 = llvm.intr.masked.gather %ptrs, %mask { alignment = 1: i32} : (!llvm.vec<7 x ptr>, vector<[7]xi1>) -> vector<[7]xf32> +llvm.func @masked_gather_intr_wrong_type_scalable(%ptrs : vector<7x!llvm.ptr>, %mask : vector<[7]xi1>) -> vector<[7]xf32> { + // expected-error @below{{expected operand #1 type to be 'vector<[7]x!llvm.ptr>'}} + %0 = llvm.intr.masked.gather %ptrs, %mask { alignment = 1: i32} : (vector<7x!llvm.ptr>, vector<[7]xi1>) -> vector<[7]xf32> llvm.return %0 : vector<[7]xf32> } // ----- -llvm.func @masked_scatter_intr_wrong_type(%vec : f32, %ptrs : !llvm.vec<7xptr>, %mask : vector<7xi1>) { +llvm.func @masked_scatter_intr_wrong_type(%vec : f32, %ptrs : vector<7x!llvm.ptr>, %mask : vector<7xi1>) { // expected-error @below{{op operand #0 must be LLVM dialect-compatible vector type, but got 'f32'}} - llvm.intr.masked.scatter %vec, %ptrs, %mask { alignment = 1: i32} : f32, vector<7xi1> into !llvm.vec<7xptr> + llvm.intr.masked.scatter %vec, %ptrs, %mask { alignment = 1: i32} : f32, vector<7xi1> into vector<7x!llvm.ptr> llvm.return } // ----- -llvm.func @masked_scatter_intr_wrong_type_scalable(%vec : vector<[7]xf32>, %ptrs : !llvm.vec<7xptr>, %mask : vector<[7]xi1>) { - // expected-error @below{{expected operand #2 type to be '!llvm.vec'}} - llvm.intr.masked.scatter %vec, %ptrs, %mask { alignment = 1: i32} : vector<[7]xf32>, vector<[7]xi1> into !llvm.vec<7xptr> +llvm.func @masked_scatter_intr_wrong_type_scalable(%vec : vector<[7]xf32>, %ptrs : vector<7x!llvm.ptr>, %mask : vector<[7]xi1>) { + // expected-error @below{{expected operand #2 type to be 'vector<[7]x!llvm.ptr>'}} + llvm.intr.masked.scatter %vec, %ptrs, %mask { alignment = 1: i32} : vector<[7]xf32>, vector<[7]xi1> into vector<7x!llvm.ptr> llvm.return } diff --git a/mlir/test/Target/LLVMIR/llvmir-types.mlir b/mlir/test/Target/LLVMIR/llvmir-types.mlir index 6e54bb022c077..33e1c7e6382ae 100644 --- a/mlir/test/Target/LLVMIR/llvmir-types.mlir +++ b/mlir/test/Target/LLVMIR/llvmir-types.mlir @@ -85,7 +85,7 @@ llvm.func @return_vs_4_i32() -> !llvm.vec // CHECK: declare @return_vs_8_half() llvm.func @return_vs_8_half() -> !llvm.vec // CHECK: declare <4 x ptr> @return_v_4_pi8() -llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr> +llvm.func @return_v_4_pi8() -> vector<4x!llvm.ptr> // // Arrays. diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir index 2476b1b15faaa..3a6402f1b66a0 100644 --- a/mlir/test/Target/LLVMIR/llvmir.mlir +++ b/mlir/test/Target/LLVMIR/llvmir.mlir @@ -2493,11 +2493,11 @@ llvm.mlir.global linkonce @partially_zeroinit_aggregate() : !llvm.struct<(i32, i llvm.func @zeroinit_complex_local_aggregate() { // CHECK: %[[#VAR:]] = alloca [1000 x { i32, [3 x { double, <4 x ptr>, [2 x ptr] }], [6 x ptr] }], i64 1, align 32 %0 = llvm.mlir.constant(1 : i64) : i64 - %1 = llvm.alloca %0 x !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, !llvm.vec<4 x ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>> : (i64) -> !llvm.ptr + %1 = llvm.alloca %0 x !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, vector<4 x !llvm.ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>> : (i64) -> !llvm.ptr // CHECK: store [1000 x { i32, [3 x { double, <4 x ptr>, [2 x ptr] }], [6 x ptr] }] zeroinitializer, ptr %[[#VAR]], align 32 - %2 = llvm.mlir.zero : !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, !llvm.vec<4 x ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>> - llvm.store %2, %1 : !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, !llvm.vec<4 x ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>>, !llvm.ptr + %2 = llvm.mlir.zero : !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, vector<4 x !llvm.ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>> + llvm.store %2, %1 : !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, vector<4 x !llvm.ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>>, !llvm.ptr llvm.return } diff --git a/mlir/test/Target/LLVMIR/opaque-ptr.mlir b/mlir/test/Target/LLVMIR/opaque-ptr.mlir index c21f9b0542deb..895e998edebae 100644 --- a/mlir/test/Target/LLVMIR/opaque-ptr.mlir +++ b/mlir/test/Target/LLVMIR/opaque-ptr.mlir @@ -66,9 +66,9 @@ llvm.func @opaque_ptr_masked_load(%arg0: !llvm.ptr, %arg1: vector<7xi1>) -> vect } // CHECK-LABEL: @opaque_ptr_gather -llvm.func @opaque_ptr_gather(%M: !llvm.vec<7 x ptr>, %mask: vector<7xi1>) -> vector<7xf32> { +llvm.func @opaque_ptr_gather(%M: vector<7 x !llvm.ptr>, %mask: vector<7xi1>) -> vector<7xf32> { // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> {{.*}}, i32 %a = llvm.intr.masked.gather %M, %mask { alignment = 1: i32} : - (!llvm.vec<7 x ptr>, vector<7xi1>) -> vector<7xf32> + (vector<7 x !llvm.ptr>, vector<7xi1>) -> vector<7xf32> llvm.return %a : vector<7xf32> }