diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp index 4d6c5965c4fcc..603e86ca3d766 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -3865,22 +3865,14 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) { llvm::zip(packedType.getShape().take_back(mixedTiles.size()), mixedTiles), [](std::tuple it) { - std::optional constTileSize = - getConstantIntValue(std::get<1>(it)); int64_t shape = std::get<0>(it); - if (!constTileSize) { - // If specified tile size is dynamic, output shape should - // be dynamic too. - return ShapedType::isDynamic(shape); + if (Attribute attr = + llvm::dyn_cast_if_present(std::get<1>(it))) { + IntegerAttr intAttr = dyn_cast_or_null(attr); + int64_t staticTileSize = intAttr.getValue().getSExtValue(); + return shape == staticTileSize; } - if (ShapedType::isDynamic(shape)) { - // For the shape being dynamic when tile size is - // specified, return true. In canonical form a constant - // tile size should lead to constant shape of the tiled - // dimension, but not needed for verification. - return true; - } - return shape == constTileSize.value(); + return ShapedType::isDynamic(shape); })) { return op->emitError("mismatch in inner tile sizes specified and shaped of " "tiled dimension in the packed type"); diff --git a/mlir/test/Dialect/Linalg/transform-lower-pack.mlir b/mlir/test/Dialect/Linalg/transform-lower-pack.mlir index 48bf1c151de8f..7aadf19069563 100644 --- a/mlir/test/Dialect/Linalg/transform-lower-pack.mlir +++ b/mlir/test/Dialect/Linalg/transform-lower-pack.mlir @@ -586,7 +586,7 @@ module attributes {transform.with_named_sequence} { // Check that we can lower unpack "as unpad" with dynamic dims. // CHECK-LABEL: func.func @unpack_as_pad_dynamic( -// CHECK-SAME: %[[ARG0:.*]]: tensor<1x1x1x1x?x?x?x?xf32>, %[[ARG1:.*]]: tensor +// CHECK-SAME: %[[ARG0:.*]]: tensor<1x1x1x1x136x64x16x16xf32>, %[[ARG1:.*]]: tensor // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index @@ -602,10 +602,10 @@ module attributes {transform.with_named_sequence} { // CHECK-SAME: [1, 1, 1, 1, %[[DIM0]], %[[DIM1]], %[[DIM2]], %[[DIM3]]] // strides multiplers. // CHECK-SAME: [1, 1, 1, 1, 1, 1, 1, 1] -// CHECK-SAME: : tensor<1x1x1x1x?x?x?x?xf32> to tensor -func.func @unpack_as_pad_dynamic(%arg0: tensor<1x1x1x1x?x?x?x?xf32>, %arg1: tensor) -> tensor { +// CHECK-SAME: : tensor<1x1x1x1x136x64x16x16xf32> to tensor +func.func @unpack_as_pad_dynamic(%arg0: tensor<1x1x1x1x136x64x16x16xf32>, %arg1: tensor) -> tensor { %pack = tensor.unpack %arg0 inner_dims_pos = [0, 1, 2, 3] inner_tiles = [136, 64, 16, 16] into %arg1 - : tensor<1x1x1x1x?x?x?x?xf32> -> tensor + : tensor<1x1x1x1x136x64x16x16xf32> -> tensor return %pack : tensor } diff --git a/mlir/test/Dialect/Tensor/fold-empty-op.mlir b/mlir/test/Dialect/Tensor/fold-empty-op.mlir index 5beb8c250aa10..65ceb4ff3e3df 100644 --- a/mlir/test/Dialect/Tensor/fold-empty-op.mlir +++ b/mlir/test/Dialect/Tensor/fold-empty-op.mlir @@ -77,20 +77,20 @@ func.func @pack_empty(%arg0: tensor<8x8x32x32xf32>) -> tensor<8x8x32x32xf32> { // CHECK-NOT: tensor.pack // CHECK: return %[[T]] : tensor<8x8x32x32xf32> -func.func @pack_empty_dynamic(%arg0: tensor, %dim0: index, %dim1: index) -> tensor { +func.func @pack_empty_dynamic(%arg0: tensor, %dim0: index, %dim1: index) -> tensor { %empty_unpacked = tensor.empty(%dim0, %dim1) : tensor %packed = tensor.pack %empty_unpacked inner_dims_pos = [0, 1] inner_tiles = [32, 32] - into %arg0 : tensor -> tensor - return %packed : tensor + into %arg0 : tensor -> tensor + return %packed : tensor } // CHECK-LABEL: func.func @pack_empty_dynamic( -// CHECK-SAME: %[[T:.+]]: tensor, +// CHECK-SAME: %[[T:.+]]: tensor, // CHECK-SAME: %[[DIM0:[a-zA-Z0-9_]+]]: index, // CHECK-SAME: %[[DIM1:[a-zA-Z0-9_]+]]: index // CHECK-NOT: tensor.pack -// CHECK: return %[[T]] : tensor +// CHECK: return %[[T]] : tensor func.func @unpack_empty(%arg0: tensor<256x256xf32>) -> tensor<256x256xf32> { %empty_packed = tensor.empty() : tensor<8x8x32x32xf32> @@ -105,20 +105,18 @@ func.func @unpack_empty(%arg0: tensor<256x256xf32>) -> tensor<256x256xf32> { // CHECK-NOT: tensor.unpack // CHECK: return %[[T]] : tensor<256x256xf32> -func.func @unpack_empty_dynamic(%arg0: tensor, %dim0: index, %dim1: index, %dim2: index, %dim3: index) -> tensor { - %empty_packed = tensor.empty(%dim0, %dim1, %dim2, %dim3) : tensor +func.func @unpack_empty_dynamic(%arg0: tensor, %dim0: index, %dim1: index) -> tensor { + %empty_packed = tensor.empty(%dim0, %dim1) : tensor %unpacked = tensor.unpack %empty_packed inner_dims_pos = [0, 1] inner_tiles = [32, 32] - into %arg0 : tensor -> tensor + into %arg0 : tensor -> tensor return %unpacked : tensor } // CHECK-LABEL: func.func @unpack_empty_dynamic( // CHECK-SAME: %[[T:.+]]: tensor, // CHECK-SAME: %[[DIM0:[a-zA-Z0-9_]+]]: index, -// CHECK-SAME: %[[DIM1:[a-zA-Z0-9_]+]]: index, -// CHECK-SAME: %[[DIM2:[a-zA-Z0-9_]+]]: index, -// CHECK-SAME: %[[DIM3:[a-zA-Z0-9_]+]]: index +// CHECK-SAME: %[[DIM1:[a-zA-Z0-9_]+]]: index // CHECK-NOT: tensor.unpack // CHECK: return %[[T]] : tensor diff --git a/mlir/test/Dialect/Tensor/invalid.mlir b/mlir/test/Dialect/Tensor/invalid.mlir index 921d7f9f1fefc..be470ce2af9b3 100644 --- a/mlir/test/Dialect/Tensor/invalid.mlir +++ b/mlir/test/Dialect/Tensor/invalid.mlir @@ -755,9 +755,47 @@ func.func @pack_mismatch_inner_tile_size_and_output_shape( // ----- +func.func @pack_dynamic_inner_tile_size_and_static_output_shape( + %input : tensor, %output : tensor) -> tensor { + %c8 = arith.constant 8 : index + // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}} + %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, %c8] into %output : tensor -> tensor + return %0 : tensor +} + +// ----- + +func.func @pack_static_inner_tile_size_and_dynamic_output_shape( + %input : tensor, %output : tensor) -> tensor { + // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}} + %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %output : tensor -> tensor + return %0 : tensor +} + +// ----- + func.func @unpack_mismatch_inner_tile_size_and_output_shape( %input : tensor, %output : tensor) -> tensor { // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}} %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor -> tensor return %0 : tensor } + +// ----- + +func.func @unpack_dynamic_inner_tile_size_and_static_output_shape( + %input : tensor, %output : tensor) -> tensor { + %c8 = arith.constant 8 : index + // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}} + %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [%c8, 4] into %output : tensor -> tensor + return %0 : tensor +} + +// ----- + +func.func @unpack_static_inner_tile_size_and_dynamic_output_shape( + %input : tensor, %output : tensor) -> tensor { + // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}} + %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor -> tensor + return %0 : tensor +}