Skip to content

Commit 8fff238

Browse files
authored
[mlir][NFC] update mlir/Dialect create APIs (23/n) (llvm#149930)
See llvm#147168 for more info.
1 parent fc0a978 commit 8fff238

15 files changed

+209
-202
lines changed

mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -74,12 +74,12 @@ struct CreatorOpShardingInterface
7474
if (!oldType.isDynamicDim(i) && shardType.isDynamicDim(i)) {
7575
if (!newSharding) {
7676
newSharding =
77-
builder.create<ShardingOp>(op->getLoc(), resultShardings[0]);
77+
ShardingOp::create(builder, op->getLoc(), resultShardings[0]);
7878
device =
79-
builder.create<mesh::ProcessMultiIndexOp>(op->getLoc(), mesh)
79+
mesh::ProcessMultiIndexOp::create(builder, op->getLoc(), mesh)
8080
.getResults();
81-
shapeForDevice = builder.create<mesh::ShardShapeOp>(
82-
op->getLoc(), oldType.getShape(), spmdizedOperands,
81+
shapeForDevice = mesh::ShardShapeOp::create(
82+
builder, op->getLoc(), oldType.getShape(), spmdizedOperands,
8383
newSharding->getResult(0), device);
8484
}
8585
newOperands.emplace_back(shapeForDevice.getResult()[i]);
@@ -88,7 +88,7 @@ struct CreatorOpShardingInterface
8888
newOperands.emplace_back(spmdizedOperands[++currOldOprndNum]);
8989
}
9090
}
91-
newOp = builder.create<OpTy>(op->getLoc(), shardType, newOperands);
91+
newOp = OpTy::create(builder, op->getLoc(), shardType, newOperands);
9292
spmdizationMap.map(op->getResult(0), newOp->getResult(0));
9393
} else {
9494
// `clone` will populate the mapping of old to new results.

mlir/lib/Dialect/Tensor/IR/TensorOps.cpp

Lines changed: 71 additions & 67 deletions
Large diffs are not rendered by default.

mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -207,13 +207,13 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
207207
if (isZeroInteger(newLength)) {
208208
hasZeroLen = true;
209209
} else if (!hasZeroLen) {
210-
Value check = b.create<arith::CmpIOp>(
211-
loc, arith::CmpIPredicate::eq,
210+
Value check = arith::CmpIOp::create(
211+
b, loc, arith::CmpIPredicate::eq,
212212
getValueOrCreateConstantIndexOp(b, loc, newLength),
213213
getValueOrCreateConstantIndexOp(b, loc, zero));
214214
dynHasZeroLenCond =
215215
dynHasZeroLenCond
216-
? b.create<arith::OrIOp>(loc, check, dynHasZeroLenCond)
216+
? arith::OrIOp::create(b, loc, check, dynHasZeroLenCond)
217217
: check;
218218
}
219219

@@ -237,18 +237,18 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
237237
auto castResult = [&](Value val) -> Value {
238238
if (resultType == val.getType())
239239
return val;
240-
return b.create<tensor::CastOp>(loc, resultType, val);
240+
return tensor::CastOp::create(b, loc, resultType, val);
241241
};
242242

243243
// In cases where the original data source is unused: Emit a GenerateOp and
244244
// do not generate a SliceOp. (The result shape of the SliceOp would
245245
// have a dimension of size 0, the semantics of which is unclear.)
246246
auto createGenerateOp = [&]() {
247247
// Create GenerateOp.
248-
auto generateOp = b.create<tensor::GenerateOp>(
249-
loc, resultType, dynDims,
248+
auto generateOp = tensor::GenerateOp::create(
249+
b, loc, resultType, dynDims,
250250
[&](OpBuilder &builder, Location gLoc, ValueRange indices) {
251-
builder.create<tensor::YieldOp>(gLoc, padValue);
251+
tensor::YieldOp::create(builder, gLoc, padValue);
252252
});
253253
return generateOp;
254254
};
@@ -257,10 +257,10 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
257257
// the result shape of the new SliceOp has a zero dimension.
258258
auto createPadOfExtractSlice = [&]() {
259259
// Create pad(extract_slice(x)).
260-
auto newSliceOp = b.create<tensor::ExtractSliceOp>(
261-
loc, padOp.getSource(), newOffsets, newLengths, newStrides);
262-
auto newPadOp = b.create<PadOp>(
263-
loc, Type(), newSliceOp, newLows, newHighs,
260+
auto newSliceOp = tensor::ExtractSliceOp::create(
261+
b, loc, padOp.getSource(), newOffsets, newLengths, newStrides);
262+
auto newPadOp = PadOp::create(
263+
b, loc, Type(), newSliceOp, newLows, newHighs,
264264
/*nofold=*/padOp.getNofold(),
265265
getPrunedAttributeList(padOp, PadOp::getAttributeNames()));
266266

@@ -287,17 +287,17 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
287287
Operation *thenOp;
288288
Operation *elseOp;
289289
Operation *sliceOp;
290-
auto result = b.create<scf::IfOp>(
291-
loc, dynHasZeroLenCond,
290+
auto result = scf::IfOp::create(
291+
b, loc, dynHasZeroLenCond,
292292
/*thenBuilder=*/
293293
[&](OpBuilder &b, Location loc) {
294294
thenOp = createGenerateOp();
295-
b.create<scf::YieldOp>(loc, castResult(thenOp->getResult(0)));
295+
scf::YieldOp::create(b, loc, castResult(thenOp->getResult(0)));
296296
},
297297
/*elseBuilder=*/
298298
[&](OpBuilder &b, Location loc) {
299299
std::tie(elseOp, sliceOp) = createPadOfExtractSlice();
300-
b.create<scf::YieldOp>(loc, castResult(elseOp->getResult(0)));
300+
scf::YieldOp::create(b, loc, castResult(elseOp->getResult(0)));
301301
});
302302
return TilingResult{
303303
{elseOp}, SmallVector<Value>(result->getResults()), {sliceOp}};

mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ void transform::TypeConversionCastShapeDynamicDimsOp::
165165
if (!tensor::CastOp::areCastCompatible(input.getType(), resultType)) {
166166
return Value();
167167
}
168-
return builder.create<tensor::CastOp>(loc, resultType, input).getResult();
168+
return tensor::CastOp::create(builder, loc, resultType, input).getResult();
169169
});
170170
converter.addTargetMaterialization([](OpBuilder &builder, Type resultType,
171171
ValueRange inputs,
@@ -177,7 +177,7 @@ void transform::TypeConversionCastShapeDynamicDimsOp::
177177
if (!tensor::CastOp::areCastCompatible(input.getType(), resultType)) {
178178
return Value();
179179
}
180-
return builder.create<tensor::CastOp>(loc, resultType, input).getResult();
180+
return tensor::CastOp::create(builder, loc, resultType, input).getResult();
181181
});
182182
}
183183

mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp

Lines changed: 41 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -222,8 +222,8 @@ struct CollapseShapeOpInterface
222222
MemRefType::get(collapseShapeOp.getSrcType().getShape(),
223223
collapseShapeOp.getSrcType().getElementType(),
224224
AffineMap(), bufferType.getMemorySpace());
225-
buffer = rewriter.create<bufferization::ToBufferOp>(
226-
op->getLoc(), memrefType, *tensorAlloc);
225+
buffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(),
226+
memrefType, *tensorAlloc);
227227
}
228228

229229
// Result type is inferred by the builder.
@@ -349,8 +349,8 @@ struct ExpandShapeOpInterface
349349
if (failed(buffer))
350350
return failure();
351351

352-
auto memrefExpandShape = rewriter.create<memref::ExpandShapeOp>(
353-
op->getLoc(), tensorResultType.getShape(), *buffer,
352+
auto memrefExpandShape = memref::ExpandShapeOp::create(
353+
rewriter, op->getLoc(), tensorResultType.getShape(), *buffer,
354354
expandShapeOp.getReassociationIndices(),
355355
expandShapeOp.getMixedOutputShape());
356356
replaceOpWithBufferizedValues(rewriter, op,
@@ -398,8 +398,8 @@ struct ExtractSliceOpInterface
398398
extractSliceOp.getResult(), options, state);
399399
if (failed(resultMemrefType))
400400
return failure();
401-
Value subView = rewriter.create<memref::SubViewOp>(
402-
loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
401+
Value subView = memref::SubViewOp::create(
402+
rewriter, loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
403403
mixedOffsets, mixedSizes, mixedStrides);
404404

405405
replaceOpWithBufferizedValues(rewriter, op, subView);
@@ -469,7 +469,7 @@ static void createStores(RewriterBase &rewriter, Location loc, int dim,
469469
if (dim == static_cast<int>(shape.size()) - 1) {
470470
for (int i = 0; i < shape.back(); ++i) {
471471
indices.back() = constants[i];
472-
rewriter.create<memref::StoreOp>(loc, *elementIt, buffer, indices);
472+
memref::StoreOp::create(rewriter, loc, *elementIt, buffer, indices);
473473
++elementIt;
474474
}
475475
return;
@@ -507,8 +507,8 @@ struct FromElementsOpInterface
507507
bufferization::getBufferType(*tensorAlloc, options, state);
508508
if (failed(memrefType))
509509
return failure();
510-
Value buffer = rewriter.create<bufferization::ToBufferOp>(
511-
op->getLoc(), *memrefType, *tensorAlloc);
510+
Value buffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(),
511+
*memrefType, *tensorAlloc);
512512

513513
// Case: tensor<0xelem_type>.
514514
if (fromElementsOp.getElements().empty()) {
@@ -518,8 +518,8 @@ struct FromElementsOpInterface
518518

519519
// Case: tensor<elem_type>.
520520
if (shape.empty()) {
521-
rewriter.create<memref::StoreOp>(
522-
loc, fromElementsOp.getElements().front(), buffer);
521+
memref::StoreOp::create(rewriter, loc,
522+
fromElementsOp.getElements().front(), buffer);
523523
replaceOpWithBufferizedValues(rewriter, op, buffer);
524524
return success();
525525
}
@@ -529,7 +529,7 @@ struct FromElementsOpInterface
529529
SmallVector<Value, 2> constants;
530530
constants.reserve(maxDim);
531531
for (int i = 0; i < maxDim; ++i)
532-
constants.push_back(rewriter.create<arith::ConstantIndexOp>(loc, i));
532+
constants.push_back(arith::ConstantIndexOp::create(rewriter, loc, i));
533533

534534
// Traverse all `elements` and create `memref.store` ops.
535535
auto elementIt = fromElementsOp.getElements().begin();
@@ -576,15 +576,15 @@ static Value lowerGenerateLikeOpBody(RewriterBase &rewriter, Location loc,
576576
// Create linalg::MapOp.
577577
OpBuilder::InsertionGuard g(rewriter);
578578
auto linalgOp =
579-
rewriter.create<linalg::MapOp>(loc, tensorType, /*inputs=*/ValueRange(),
580-
/*init=*/tensorDestination);
579+
linalg::MapOp::create(rewriter, loc, tensorType, /*inputs=*/ValueRange(),
580+
/*init=*/tensorDestination);
581581
Block &linalgBody = linalgOp.getMapper().emplaceBlock();
582582

583583
// Create linalg::IndexOps.
584584
rewriter.setInsertionPointToStart(&linalgBody);
585585
SmallVector<Value> indices;
586586
for (int64_t dim = 0; dim < tensorType.getRank(); ++dim)
587-
indices.push_back(rewriter.create<linalg::IndexOp>(loc, dim));
587+
indices.push_back(linalg::IndexOp::create(rewriter, loc, dim));
588588

589589
// Move over body.
590590
rewriter.mergeBlocks(&generateBody.front(), &linalgBody, indices);
@@ -644,8 +644,8 @@ struct InsertOpInterface
644644
getBuffer(rewriter, insertOp.getDest(), options, state);
645645
if (failed(destMemref))
646646
return failure();
647-
rewriter.create<memref::StoreOp>(insertOp.getLoc(), insertOp.getScalar(),
648-
*destMemref, insertOp.getIndices());
647+
memref::StoreOp::create(rewriter, insertOp.getLoc(), insertOp.getScalar(),
648+
*destMemref, insertOp.getIndices());
649649
replaceOpWithBufferizedValues(rewriter, op, *destMemref);
650650
return success();
651651
}
@@ -713,9 +713,9 @@ struct InsertSliceOpInterface
713713
memref::SubViewOp::inferRankReducedResultType(
714714
insertSliceOp.getSourceType().getShape(), dstMemrefType,
715715
mixedOffsets, mixedSizes, mixedStrides);
716-
Value subView = rewriter.create<memref::SubViewOp>(
717-
loc, subviewMemRefType, *dstMemref, mixedOffsets, mixedSizes,
718-
mixedStrides);
716+
Value subView =
717+
memref::SubViewOp::create(rewriter, loc, subviewMemRefType, *dstMemref,
718+
mixedOffsets, mixedSizes, mixedStrides);
719719

720720
// Copy tensor. If this tensor.insert_slice has a matching
721721
// tensor.extract_slice, the copy operation will eventually fold away.
@@ -796,14 +796,14 @@ struct PadOpInterface
796796
for (int64_t i = 0; i < resultType.getRank(); ++i) {
797797
if (!resultType.isDynamicDim(i))
798798
continue;
799-
Value srcDim = rewriter.create<tensor::DimOp>(loc, padOp.getSource(), i);
799+
Value srcDim = tensor::DimOp::create(rewriter, loc, padOp.getSource(), i);
800800
Value lowPad = toValue(mixedLowPad[i]);
801801
Value highPad = toValue(mixedHighPad[i]);
802802
AffineExpr s0, s1, s2;
803803
bindSymbols(op->getContext(), s0, s1, s2);
804804
AffineExpr sumExpr = s0 + s1 + s2;
805-
Value sum = rewriter.create<affine::AffineApplyOp>(
806-
loc, sumExpr, ValueRange{srcDim, lowPad, highPad});
805+
Value sum = affine::AffineApplyOp::create(
806+
rewriter, loc, sumExpr, ValueRange{srcDim, lowPad, highPad});
807807
dynamicSizes.push_back(sum);
808808
}
809809

@@ -995,9 +995,9 @@ struct ParallelInsertSliceOpInterface
995995
parallelInsertSliceOp.getMixedOffsets(),
996996
parallelInsertSliceOp.getMixedSizes(),
997997
parallelInsertSliceOp.getMixedStrides());
998-
Value subview = rewriter.create<memref::SubViewOp>(
999-
parallelInsertSliceOp.getLoc(), subviewMemRefType, *destBuffer,
1000-
parallelInsertSliceOp.getMixedOffsets(),
998+
Value subview = memref::SubViewOp::create(
999+
rewriter, parallelInsertSliceOp.getLoc(), subviewMemRefType,
1000+
*destBuffer, parallelInsertSliceOp.getMixedOffsets(),
10011001
parallelInsertSliceOp.getMixedSizes(),
10021002
parallelInsertSliceOp.getMixedStrides());
10031003

@@ -1065,14 +1065,14 @@ struct SplatOpInterface
10651065
if (options.defaultMemorySpaceFn(tensorType) != Attribute())
10661066
return op->emitError("memory space not implemented yet");
10671067

1068-
auto linalgOp =
1069-
rewriter.create<linalg::MapOp>(loc, tensorType, /*inputs=*/ValueRange(),
1070-
/*init=*/*tensorAlloc);
1068+
auto linalgOp = linalg::MapOp::create(rewriter, loc, tensorType,
1069+
/*inputs=*/ValueRange(),
1070+
/*init=*/*tensorAlloc);
10711071
Block &linalgBody = linalgOp.getMapper().emplaceBlock();
10721072

10731073
// Create linalg::IndexOps.
10741074
rewriter.setInsertionPointToStart(&linalgBody);
1075-
rewriter.create<linalg::YieldOp>(loc, splatOp.getInput());
1075+
linalg::YieldOp::create(rewriter, loc, splatOp.getInput());
10761076
rewriter.replaceOp(splatOp, linalgOp.getResult()[0]);
10771077

10781078
return success();
@@ -1126,8 +1126,8 @@ struct ConcatOpInterface
11261126
MemRefType memrefType =
11271127
MemRefType::get(concatOp.getResultType().getShape(),
11281128
concatOp.getResultType().getElementType(), layout);
1129-
Value dstBuffer = rewriter.create<bufferization::ToBufferOp>(
1130-
op->getLoc(), memrefType, *tensorAlloc);
1129+
Value dstBuffer = bufferization::ToBufferOp::create(
1130+
rewriter, op->getLoc(), memrefType, *tensorAlloc);
11311131

11321132
// Extract the dimension for the concat op
11331133
uint64_t concatDim = concatOp.getDim();
@@ -1142,7 +1142,7 @@ struct ConcatOpInterface
11421142
for (const auto &[dimIdx, dimSize] :
11431143
llvm::enumerate(tensorType.getShape())) {
11441144
if (dimSize == ShapedType::kDynamic) {
1145-
auto dimOp = rewriter.create<memref::DimOp>(loc, dstBuffer, dimIdx);
1145+
auto dimOp = memref::DimOp::create(rewriter, loc, dstBuffer, dimIdx);
11461146
sizes.push_back(dimOp.getResult());
11471147
if (dimIdx == concatDim)
11481148
dynamicConcatDim = true;
@@ -1157,7 +1157,7 @@ struct ConcatOpInterface
11571157
if (dynamicConcatDim) {
11581158
// One or more operands have dynamic size, so we must accumulate the
11591159
// offset with arith ops.
1160-
dynamicOffset = rewriter.create<arith::ConstantIndexOp>(loc, 0);
1160+
dynamicOffset = arith::ConstantIndexOp::create(rewriter, loc, 0);
11611161
}
11621162

11631163
for (auto operand : concatOp.getInputs()) {
@@ -1174,8 +1174,9 @@ struct ConcatOpInterface
11741174

11751175
if (dynamicConcatDim) {
11761176
offsets[concatDim] = dynamicOffset.value();
1177-
dynamicSize = rewriter.create<memref::DimOp>(loc, *srcBuffer, concatDim)
1178-
.getResult();
1177+
dynamicSize =
1178+
memref::DimOp::create(rewriter, loc, *srcBuffer, concatDim)
1179+
.getResult();
11791180
sizes[concatDim] = dynamicSize.value();
11801181
} else {
11811182
sizes[concatDim] = rewriter.getIndexAttr(operandConcatDimSize);
@@ -1188,16 +1189,16 @@ struct ConcatOpInterface
11881189
memref::SubViewOp::inferRankReducedResultType(
11891190
operandTensorType.getShape(), dstMemrefType, offsets, sizes,
11901191
strides);
1191-
Value subview = rewriter.create<memref::SubViewOp>(
1192-
loc, subviewMemRefType, dstBuffer, offsets, sizes, strides);
1192+
Value subview = memref::SubViewOp::create(
1193+
rewriter, loc, subviewMemRefType, dstBuffer, offsets, sizes, strides);
11931194

11941195
// Copy the source buffer into the destination subview.
11951196
if (failed(options.createMemCpy(rewriter, loc, *srcBuffer, subview)))
11961197
return failure();
11971198

11981199
if (dynamicConcatDim) {
1199-
dynamicOffset = rewriter.create<arith::AddIOp>(
1200-
loc, dynamicOffset.value(), dynamicSize.value());
1200+
dynamicOffset = arith::AddIOp::create(
1201+
rewriter, loc, dynamicOffset.value(), dynamicSize.value());
12011202
} else {
12021203
concatDimOffset += operandConcatDimSize;
12031204
}

mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,9 @@ struct FoldEmptyTensorWithReshapeOp : public OpRewritePattern<ReshapeOp> {
4242

4343
// Create new tensor.empty op.
4444
// TODO: Do not drop tensor type encoding.
45-
Value emptyTensor = rewriter.create<EmptyOp>(
46-
loc, resultShapes[0], reshapeOp.getResultType().getElementType());
45+
Value emptyTensor =
46+
EmptyOp::create(rewriter, loc, resultShapes[0],
47+
reshapeOp.getResultType().getElementType());
4748
if (emptyTensor.getType() != reshapeOp.getResultType()) {
4849
rewriter.replaceOpWithNewOp<tensor::CastOp>(
4950
reshapeOp, reshapeOp.getResultType(), emptyTensor);

0 commit comments

Comments
 (0)