Skip to content

Commit 30f65cc

Browse files
committed
[mlir][NFC] update mlir/Dialect create APIs (17/n)
See llvm#147168 for more info.
1 parent 9deb7f6 commit 30f65cc

39 files changed

+1028
-973
lines changed

mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

Lines changed: 153 additions & 147 deletions
Large diffs are not rendered by default.

mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -672,9 +672,10 @@ static Operation *replaceForAllWithNewSignature(
672672
newOuts.push_back(outputs[resultNumber]);
673673

674674
// Create new scf.forall op
675-
auto newforallOp = rewriter.create<scf::ForallOp>(
676-
loc, forallOp.getMixedLowerBound(), forallOp.getMixedUpperBound(),
677-
forallOp.getMixedStep(), newOuts, forallOp.getMapping());
675+
auto newforallOp = scf::ForallOp::create(
676+
rewriter, loc, forallOp.getMixedLowerBound(),
677+
forallOp.getMixedUpperBound(), forallOp.getMixedStep(), newOuts,
678+
forallOp.getMapping());
678679
rewriter.eraseBlock(newforallOp.getBody());
679680
newforallOp.getRegion().takeBody(forallOp.getRegion());
680681

@@ -699,8 +700,8 @@ static Operation *replaceForAllWithNewSignature(
699700
Value src = tileAndFuseResult.tiledValues[0];
700701
Value dst = newforallOp.getRegionIterArgs().back();
701702
SmallVector<OpFoldResult> strides(offsets.size(), rewriter.getIndexAttr(1));
702-
rewriter.create<tensor::ParallelInsertSliceOp>(firstYieldOp->getLoc(), src,
703-
dst, offsets, sizes, strides);
703+
tensor::ParallelInsertSliceOp::create(rewriter, firstYieldOp->getLoc(), src,
704+
dst, offsets, sizes, strides);
704705

705706
for (auto result : llvm::enumerate(forallOp.getResults())) {
706707
rewriter.replaceAllUsesWith(result.value(),
@@ -3410,12 +3411,12 @@ transform::TileUsingForOp::apply(transform::TransformRewriter &rewriter,
34103411
for (auto [ofrIdx, ofr] : llvm::enumerate(getMixedSizes())) {
34113412
if (auto attr = llvm::dyn_cast_if_present<Attribute>(ofr)) {
34123413
if (scalableSizes[ofrIdx]) {
3413-
auto val = b.create<arith::ConstantIndexOp>(
3414-
getLoc(), cast<IntegerAttr>(attr).getInt());
3414+
auto val = arith::ConstantIndexOp::create(
3415+
b, getLoc(), cast<IntegerAttr>(attr).getInt());
34153416
Value vscale =
3416-
b.create<vector::VectorScaleOp>(getLoc(), b.getIndexType());
3417+
vector::VectorScaleOp::create(b, getLoc(), b.getIndexType());
34173418
sizes.push_back(
3418-
b.create<arith::MulIOp>(getLoc(), val, vscale).getResult());
3419+
arith::MulIOp::create(b, getLoc(), val, vscale).getResult());
34193420
} else {
34203421
sizes.push_back(attr);
34213422
}
@@ -3626,9 +3627,10 @@ static scf::ForallOp normalizeForallLoopOp(RewriterBase &rewriter,
36263627
SmallVector<OpFoldResult> normalizedSteps(normalizedUbs.size(),
36273628
rewriter.getIndexAttr(1));
36283629

3629-
auto normalizedForallOp = rewriter.create<scf::ForallOp>(
3630-
loc, normalizedLbs, normalizedUbs, normalizedSteps, loop.getOutputs(),
3631-
loop.getMapping(), [](OpBuilder &, Location, ValueRange) {});
3630+
auto normalizedForallOp = scf::ForallOp::create(
3631+
rewriter, loc, normalizedLbs, normalizedUbs, normalizedSteps,
3632+
loop.getOutputs(), loop.getMapping(),
3633+
[](OpBuilder &, Location, ValueRange) {});
36323634

36333635
auto normalizedLoopIvs = normalizedForallOp.getInductionVars();
36343636
OpBuilder::InsertionGuard g(rewriter);
@@ -4131,8 +4133,8 @@ DiagnosedSilenceableFailure doit(RewriterBase &rewriter, OpTy target,
41314133
target->template getParentOfType<scf::InParallelOp>());
41324134
}
41334135

4134-
Value extracted = rewriter.create<tensor::ExtractSliceOp>(
4135-
target.getLoc(), target.getDest(), target.getMixedOffsets(),
4136+
Value extracted = tensor::ExtractSliceOp::create(
4137+
rewriter, target.getLoc(), target.getDest(), target.getMixedOffsets(),
41364138
target.getMixedSizes(), target.getMixedStrides());
41374139
Value copied = rewriter
41384140
.create<linalg::CopyOp>(target.getLoc(),

mlir/lib/Dialect/Linalg/Transforms/BufferizableOpInterfaceImpl.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,9 @@ struct SoftmaxOpInterface
184184
getBuffer(rewriter, softmaxOp.getOutput(), options, state);
185185
if (failed(outputBuffer))
186186
return failure();
187-
rewriter.create<linalg::SoftmaxOp>(softmaxOp.getLoc(),
188-
/*result=*/TypeRange(), *inputBuffer,
189-
*outputBuffer, softmaxOp.getDimension());
187+
linalg::SoftmaxOp::create(rewriter, softmaxOp.getLoc(),
188+
/*result=*/TypeRange(), *inputBuffer,
189+
*outputBuffer, softmaxOp.getDimension());
190190
replaceOpWithBufferizedValues(rewriter, op, *outputBuffer);
191191
return success();
192192
}

mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp

Lines changed: 86 additions & 85 deletions
Large diffs are not rendered by default.

mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp

Lines changed: 40 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ static Value createInserts(RewriterBase &rewriter, Location loc, int dim,
3737
if (dim == static_cast<int>(shape.size()) - 1) {
3838
for (int i = 0; i < shape.back(); ++i) {
3939
indices.back() = constants[i];
40-
destination = rewriter.create<tensor::InsertOp>(loc, *elementIt,
41-
destination, indices);
40+
destination = tensor::InsertOp::create(rewriter, loc, *elementIt,
41+
destination, indices);
4242
++elementIt;
4343
}
4444
return destination;
@@ -65,27 +65,27 @@ static void createMemcpy(OpBuilder &b, Location loc, Value tensorSource,
6565
MaterializeInDestination: {
6666
// Note: This is the preferred way of memcpy'ing because no layout map
6767
// and/or memory space must be specified for the source.
68-
auto materializeOp = b.create<bufferization::MaterializeInDestinationOp>(
69-
loc, tensorSource, memrefDest);
68+
auto materializeOp = bufferization::MaterializeInDestinationOp::create(
69+
b, loc, tensorSource, memrefDest);
7070
materializeOp.setWritable(true);
7171
} break;
7272
case linalg::BufferizeToAllocationOptions::MemcpyOp::MemrefCopy: {
7373
// TODO: Support custom memory space on source.
7474
// We do not know the layout map of the source yet, so use a fully dynamic
7575
// layout for best compatibility.
76-
Value toBuffer = b.create<bufferization::ToBufferOp>(
77-
loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
76+
Value toBuffer = bufferization::ToBufferOp::create(
77+
b, loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
7878
tensorSource, /*readOnly=*/true);
79-
b.create<memref::CopyOp>(loc, toBuffer, memrefDest);
79+
memref::CopyOp::create(b, loc, toBuffer, memrefDest);
8080
} break;
8181
case linalg::BufferizeToAllocationOptions::MemcpyOp::LinalgCopy: {
8282
// TODO: Support custom memory space on source.
8383
// We do not know the layout map of the source yet, so use a fully dynamic
8484
// layout for best compatibility.
85-
Value toBuffer = b.create<bufferization::ToBufferOp>(
86-
loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
85+
Value toBuffer = bufferization::ToBufferOp::create(
86+
b, loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType),
8787
tensorSource, /*readOnly=*/true);
88-
b.create<linalg::CopyOp>(loc, toBuffer, memrefDest);
88+
linalg::CopyOp::create(b, loc, toBuffer, memrefDest);
8989
} break;
9090
};
9191
}
@@ -120,15 +120,15 @@ static Operation *movePaddingToFillOrGenericOp(RewriterBase &rewriter,
120120
->materializeConstant(rewriter, constYieldedValue,
121121
yieldedValue.getType(), yieldedValue.getLoc())
122122
->getResult(0);
123-
auto fillOp = rewriter.create<linalg::FillOp>(loc, ValueRange(fillValue),
124-
ValueRange(dest));
123+
auto fillOp = linalg::FillOp::create(rewriter, loc, ValueRange(fillValue),
124+
ValueRange(dest));
125125
return fillOp;
126126
}
127127

128128
if (invariantYieldedValue) {
129129
// Padding with an invariant value.
130-
auto fillOp = rewriter.create<linalg::FillOp>(loc, ValueRange(yieldedValue),
131-
ValueRange(dest));
130+
auto fillOp = linalg::FillOp::create(
131+
rewriter, loc, ValueRange(yieldedValue), ValueRange(dest));
132132
return fillOp;
133133
}
134134

@@ -137,16 +137,16 @@ static Operation *movePaddingToFillOrGenericOp(RewriterBase &rewriter,
137137
utils::IteratorType::parallel);
138138
SmallVector<AffineMap> indexingMaps(
139139
1, rewriter.getMultiDimIdentityMap(resultType.getRank()));
140-
auto genericOp = rewriter.create<linalg::GenericOp>(
141-
loc, resultType, /*inputs=*/ValueRange(),
140+
auto genericOp = linalg::GenericOp::create(
141+
rewriter, loc, resultType, /*inputs=*/ValueRange(),
142142
/*outputs=*/ValueRange{dest}, /*indexingMaps=*/
143143
indexingMaps, iteratorTypes);
144144
Block *body = rewriter.createBlock(&genericOp->getRegion(0), {},
145145
resultType.getElementType(), loc);
146146
rewriter.setInsertionPointToStart(body);
147147
SmallVector<Value> bbArgReplacements;
148148
for (int64_t i = 0; i < resultType.getRank(); ++i)
149-
bbArgReplacements.push_back(rewriter.create<linalg::IndexOp>(loc, i));
149+
bbArgReplacements.push_back(linalg::IndexOp::create(rewriter, loc, i));
150150
rewriter.mergeBlocks(padOp.getBody(), body, bbArgReplacements);
151151

152152
// Update terminator.
@@ -179,8 +179,8 @@ static SmallVector<Value> reifyOrComputeDynamicSizes(OpBuilder &b,
179179
for (int64_t i = 0; i < tensorType.getRank(); ++i) {
180180
if (tensorType.isDynamicDim(i))
181181
dynSizes.push_back(
182-
b.create<DimOp>(value.getLoc(), value,
183-
b.create<arith::ConstantIndexOp>(value.getLoc(), i)));
182+
DimOp::create(b, value.getLoc(), value,
183+
arith::ConstantIndexOp::create(b, value.getLoc(), i)));
184184
}
185185
return dynSizes;
186186
}
@@ -201,15 +201,15 @@ createAllocationForTensor(RewriterBase &rewriter, Location loc, Value value,
201201
Value alloc;
202202
if (options.allocOp ==
203203
linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloc) {
204-
alloc = rewriter.create<memref::AllocOp>(loc, memrefType, dynamicSizes);
204+
alloc = memref::AllocOp::create(rewriter, loc, memrefType, dynamicSizes);
205205
if (options.emitDealloc) {
206206
// Place deallocation at the end of the block.
207207
rewriter.setInsertionPoint(rewriter.getInsertionBlock()->getTerminator());
208-
rewriter.create<memref::DeallocOp>(loc, alloc);
208+
memref::DeallocOp::create(rewriter, loc, alloc);
209209
}
210210
} else if (options.allocOp ==
211211
linalg::BufferizeToAllocationOptions::AllocOp::MemrefAlloca) {
212-
alloc = rewriter.create<memref::AllocaOp>(loc, memrefType, dynamicSizes);
212+
alloc = memref::AllocaOp::create(rewriter, loc, memrefType, dynamicSizes);
213213
// No dealloc is needed.
214214
}
215215

@@ -243,14 +243,14 @@ Value linalg::bufferizeToAllocation(
243243
getMixedSizes(rewriter, loc, padOp.getSource());
244244
SmallVector<OpFoldResult> strides(padOp.getResultType().getRank(),
245245
rewriter.getIndexAttr(1));
246-
Value subview = rewriter.create<memref::SubViewOp>(
247-
loc, alloc, /*offsets=*/padOp.getMixedLowPad(), sizes, strides);
246+
Value subview = memref::SubViewOp::create(
247+
rewriter, loc, alloc, /*offsets=*/padOp.getMixedLowPad(), sizes, strides);
248248
createMemcpy(rewriter, loc, padOp.getSource(), subview, options);
249249

250250
// Create bufferization.to_tensor with "restrict" and "writable". The returned
251251
// tensor is a new buffer allocation, so it does not alias with any buffer.
252-
Value toTensorOp = rewriter.create<bufferization::ToTensorOp>(
253-
loc, padOp.getResult().getType(), alloc, /*restrict=*/true,
252+
Value toTensorOp = bufferization::ToTensorOp::create(
253+
rewriter, loc, padOp.getResult().getType(), alloc, /*restrict=*/true,
254254
/*writable=*/true);
255255
rewriter.replaceOp(padOp, toTensorOp);
256256
return alloc;
@@ -338,8 +338,9 @@ Value linalg::bufferizeToAllocation(
338338

339339
// Create bufferization.to_tensor with "restrict" and "writable". The returned
340340
// tensor is a new buffer allocation, so it does not alias with any buffer.
341-
Value toTensorOp = rewriter.create<bufferization::ToTensorOp>(
342-
loc, allocTensorOp.getResult().getType(), alloc, /*restrict=*/true,
341+
Value toTensorOp = bufferization::ToTensorOp::create(
342+
rewriter, loc, allocTensorOp.getResult().getType(), alloc,
343+
/*restrict=*/true,
343344
/*writable=*/true);
344345
rewriter.replaceOp(allocTensorOp, toTensorOp);
345346
return alloc;
@@ -354,7 +355,7 @@ FailureOr<Operation *> mlir::linalg::rewriteInDestinationPassingStyle(
354355
auto shape = tensorType.getShape();
355356

356357
// Create tensor.empty.
357-
auto emptyOp = rewriter.create<EmptyOp>(loc, tensorType, ValueRange());
358+
auto emptyOp = EmptyOp::create(rewriter, loc, tensorType, ValueRange());
358359

359360
// Case: tensor<elem_type>.
360361
if (shape.empty()) {
@@ -369,7 +370,7 @@ FailureOr<Operation *> mlir::linalg::rewriteInDestinationPassingStyle(
369370
SmallVector<Value, 2> constants;
370371
constants.reserve(maxDim);
371372
for (int i = 0; i < maxDim; ++i)
372-
constants.push_back(rewriter.create<arith::ConstantIndexOp>(loc, i));
373+
constants.push_back(arith::ConstantIndexOp::create(rewriter, loc, i));
373374

374375
// Traverse all elements and create tensor.insert ops.
375376
auto elementIt = fromElementsOp.getElements().begin();
@@ -394,24 +395,24 @@ mlir::linalg::rewriteInDestinationPassingStyle(RewriterBase &rewriter,
394395
RankedTensorType tensorType = cast<RankedTensorType>(generateOp.getType());
395396

396397
// Create tensor.empty.
397-
auto emptyOp =
398-
rewriter.create<EmptyOp>(loc, tensorType, generateOp.getDynamicExtents());
398+
auto emptyOp = EmptyOp::create(rewriter, loc, tensorType,
399+
generateOp.getDynamicExtents());
399400

400401
// Create linalg.generic.
401402
SmallVector<utils::IteratorType> iteratorTypes(tensorType.getRank(),
402403
utils::IteratorType::parallel);
403404
SmallVector<AffineMap> indexingMaps(
404405
1, rewriter.getMultiDimIdentityMap(tensorType.getRank()));
405-
auto genericOp = rewriter.create<linalg::GenericOp>(
406-
loc, tensorType, /*inputs=*/ValueRange(),
406+
auto genericOp = linalg::GenericOp::create(
407+
rewriter, loc, tensorType, /*inputs=*/ValueRange(),
407408
/*outputs=*/ValueRange{emptyOp.getResult()}, /*indexingMaps=*/
408409
indexingMaps, iteratorTypes);
409410
Block *body = rewriter.createBlock(&genericOp->getRegion(0), {},
410411
tensorType.getElementType(), loc);
411412
rewriter.setInsertionPointToStart(body);
412413
SmallVector<Value> bbArgReplacements;
413414
for (int64_t i = 0; i < tensorType.getRank(); ++i)
414-
bbArgReplacements.push_back(rewriter.create<linalg::IndexOp>(loc, i));
415+
bbArgReplacements.push_back(linalg::IndexOp::create(rewriter, loc, i));
415416
rewriter.mergeBlocks(&generateOp.getBody().front(), body, bbArgReplacements);
416417

417418
// Update terminator.
@@ -450,13 +451,13 @@ mlir::linalg::rewriteInDestinationPassingStyle(RewriterBase &rewriter,
450451
llvm::all_of(padOp.getMixedHighPad(), isZeroInteger)) {
451452
using bufferization::AllocTensorOp;
452453
Value allocated =
453-
rewriter.create<AllocTensorOp>(loc, resultType, dynamicSizes);
454+
AllocTensorOp::create(rewriter, loc, resultType, dynamicSizes);
454455
auto copyOp = rewriter.replaceOpWithNewOp<linalg::CopyOp>(
455456
padOp, padOp.getSource(), allocated);
456457
return copyOp.getOperation();
457458
}
458459

459-
Value empty = rewriter.create<EmptyOp>(loc, resultType, dynamicSizes);
460+
Value empty = EmptyOp::create(rewriter, loc, resultType, dynamicSizes);
460461
// Create linalg.fill or linalg.generic.
461462
Operation *fillOp = movePaddingToFillOrGenericOp(rewriter, loc, padOp, empty);
462463
rewriter.setInsertionPointAfter(fillOp);
@@ -567,8 +568,8 @@ Value linalg::bufferizeToAllocation(
567568
createMemcpy(rewriter, op->getLoc(), operand->get(), alloc, options);
568569
}
569570
rewriter.modifyOpInPlace(op, [&]() {
570-
auto toTensorOp = rewriter.create<ToTensorOp>(
571-
op->getLoc(), operand->get().getType(), alloc);
571+
auto toTensorOp = ToTensorOp::create(rewriter, op->getLoc(),
572+
operand->get().getType(), alloc);
572573
operand->set(toTensorOp);
573574
if (options.bufferizeDestinationOnly) {
574575
rewriter.modifyOpInPlace(toTensorOp, [&]() {

0 commit comments

Comments
 (0)