Skip to content

Commit b51ab7b

Browse files
maksleventalgithub-actions[bot]
authored andcommitted
Automerge: [mlir][NFC] update mlir/Dialect create APIs (18/n) (#149925)
See llvm/llvm-project#147168 for more info.
2 parents 7473cde + a636b7b commit b51ab7b

17 files changed

+257
-253
lines changed

mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -85,11 +85,11 @@ Value memref::AllocaOp::getDefaultValue(const MemorySlot &slot,
8585
// TODO: support more types.
8686
return TypeSwitch<Type, Value>(slot.elemType)
8787
.Case([&](MemRefType t) {
88-
return builder.create<memref::AllocaOp>(getLoc(), t);
88+
return memref::AllocaOp::create(builder, getLoc(), t);
8989
})
9090
.Default([&](Type t) {
91-
return builder.create<arith::ConstantOp>(getLoc(), t,
92-
builder.getZeroAttr(t));
91+
return arith::ConstantOp::create(builder, getLoc(), t,
92+
builder.getZeroAttr(t));
9393
});
9494
}
9595

@@ -135,7 +135,7 @@ DenseMap<Attribute, MemorySlot> memref::AllocaOp::destructure(
135135
for (Attribute usedIndex : usedIndices) {
136136
Type elemType = memrefType.getTypeAtIndex(usedIndex);
137137
MemRefType elemPtr = MemRefType::get({}, elemType);
138-
auto subAlloca = builder.create<memref::AllocaOp>(getLoc(), elemPtr);
138+
auto subAlloca = memref::AllocaOp::create(builder, getLoc(), elemPtr);
139139
newAllocators.push_back(subAlloca);
140140
slotMap.try_emplace<MemorySlot>(usedIndex,
141141
{subAlloca.getResult(), elemType});

mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -213,9 +213,9 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
213213
assert(dynamicSizes.size() == newMemRefType.getNumDynamicDims());
214214

215215
// Create and insert the alloc op for the new memref.
216-
auto newAlloc = rewriter.create<AllocLikeOp>(
217-
alloc.getLoc(), newMemRefType, dynamicSizes, alloc.getSymbolOperands(),
218-
alloc.getAlignmentAttr());
216+
auto newAlloc = AllocLikeOp::create(rewriter, alloc.getLoc(), newMemRefType,
217+
dynamicSizes, alloc.getSymbolOperands(),
218+
alloc.getAlignmentAttr());
219219
// Insert a cast so we have the same type as the old alloc.
220220
rewriter.replaceOpWithNewOp<CastOp>(alloc, alloc.getType(), newAlloc);
221221
return success();
@@ -797,7 +797,7 @@ void DimOp::getAsmResultNames(function_ref<void(Value, StringRef)> setNameFn) {
797797
void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
798798
int64_t index) {
799799
auto loc = result.location;
800-
Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
800+
Value indexValue = arith::ConstantIndexOp::create(builder, loc, index);
801801
build(builder, result, source, indexValue);
802802
}
803803

@@ -1044,9 +1044,9 @@ struct DimOfMemRefReshape : public OpRewritePattern<DimOp> {
10441044
rewriter.setInsertionPointAfter(reshape);
10451045
Location loc = dim.getLoc();
10461046
Value load =
1047-
rewriter.create<LoadOp>(loc, reshape.getShape(), dim.getIndex());
1047+
LoadOp::create(rewriter, loc, reshape.getShape(), dim.getIndex());
10481048
if (load.getType() != dim.getType())
1049-
load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load);
1049+
load = arith::IndexCastOp::create(rewriter, loc, dim.getType(), load);
10501050
rewriter.replaceOp(dim, load);
10511051
return success();
10521052
}
@@ -1319,8 +1319,9 @@ static bool replaceConstantUsesOf(OpBuilder &rewriter, Location loc,
13191319
assert(isa<Attribute>(maybeConstant) &&
13201320
"The constified value should be either unchanged (i.e., == result) "
13211321
"or a constant");
1322-
Value constantVal = rewriter.create<arith::ConstantIndexOp>(
1323-
loc, llvm::cast<IntegerAttr>(cast<Attribute>(maybeConstant)).getInt());
1322+
Value constantVal = arith::ConstantIndexOp::create(
1323+
rewriter, loc,
1324+
llvm::cast<IntegerAttr>(cast<Attribute>(maybeConstant)).getInt());
13241325
for (Operation *op : llvm::make_early_inc_range(result.getUsers())) {
13251326
// modifyOpInPlace: lambda cannot capture structured bindings in C++17
13261327
// yet.
@@ -2548,8 +2549,9 @@ struct CollapseShapeOpMemRefCastFolder
25482549
rewriter.modifyOpInPlace(
25492550
op, [&]() { op.getSrcMutable().assign(cast.getSource()); });
25502551
} else {
2551-
Value newOp = rewriter.create<CollapseShapeOp>(
2552-
op->getLoc(), cast.getSource(), op.getReassociationIndices());
2552+
Value newOp =
2553+
CollapseShapeOp::create(rewriter, op->getLoc(), cast.getSource(),
2554+
op.getReassociationIndices());
25532555
rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
25542556
}
25552557
return success();
@@ -3006,15 +3008,15 @@ SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
30063008
Value offset =
30073009
op.isDynamicOffset(idx)
30083010
? op.getDynamicOffset(idx)
3009-
: b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx));
3011+
: arith::ConstantIndexOp::create(b, loc, op.getStaticOffset(idx));
30103012
Value size =
30113013
op.isDynamicSize(idx)
30123014
? op.getDynamicSize(idx)
3013-
: b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx));
3015+
: arith::ConstantIndexOp::create(b, loc, op.getStaticSize(idx));
30143016
Value stride =
30153017
op.isDynamicStride(idx)
30163018
? op.getDynamicStride(idx)
3017-
: b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx));
3019+
: arith::ConstantIndexOp::create(b, loc, op.getStaticStride(idx));
30183020
res.emplace_back(Range{offset, size, stride});
30193021
}
30203022
return res;
@@ -3173,8 +3175,8 @@ class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
31733175
if (!resultType)
31743176
return failure();
31753177

3176-
Value newSubView = rewriter.create<SubViewOp>(
3177-
subViewOp.getLoc(), resultType, castOp.getSource(),
3178+
Value newSubView = SubViewOp::create(
3179+
rewriter, subViewOp.getLoc(), resultType, castOp.getSource(),
31783180
subViewOp.getOffsets(), subViewOp.getSizes(), subViewOp.getStrides(),
31793181
subViewOp.getStaticOffsets(), subViewOp.getStaticSizes(),
31803182
subViewOp.getStaticStrides());
@@ -3495,9 +3497,9 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
34953497
return failure();
34963498

34973499
// Create new ViewOp.
3498-
auto newViewOp = rewriter.create<ViewOp>(
3499-
viewOp.getLoc(), newMemRefType, viewOp.getOperand(0),
3500-
viewOp.getByteShift(), newOperands);
3500+
auto newViewOp = ViewOp::create(rewriter, viewOp.getLoc(), newMemRefType,
3501+
viewOp.getOperand(0), viewOp.getByteShift(),
3502+
newOperands);
35013503
// Insert a cast so we have the same type as the old memref type.
35023504
rewriter.replaceOpWithNewOp<CastOp>(viewOp, viewOp.getType(), newViewOp);
35033505
return success();

mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -155,9 +155,10 @@ transform::MemRefAllocaToGlobalOp::apply(transform::TransformRewriter &rewriter,
155155
Type resultType = alloca.getResult().getType();
156156
OpBuilder builder(rewriter.getContext());
157157
// TODO: Add a better builder for this.
158-
globalOp = builder.create<memref::GlobalOp>(
159-
loc, StringAttr::get(ctx, "alloca"), StringAttr::get(ctx, "private"),
160-
TypeAttr::get(resultType), Attribute{}, UnitAttr{}, IntegerAttr{});
158+
globalOp = memref::GlobalOp::create(
159+
builder, loc, StringAttr::get(ctx, "alloca"),
160+
StringAttr::get(ctx, "private"), TypeAttr::get(resultType),
161+
Attribute{}, UnitAttr{}, IntegerAttr{});
161162
symbolTable.insert(globalOp);
162163
}
163164

mlir/lib/Dialect/MemRef/Transforms/AllocationOpInterfaceImpl.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,11 @@ struct DefaultAllocationInterface
2222
DefaultAllocationInterface, memref::AllocOp> {
2323
static std::optional<Operation *> buildDealloc(OpBuilder &builder,
2424
Value alloc) {
25-
return builder.create<memref::DeallocOp>(alloc.getLoc(), alloc)
25+
return memref::DeallocOp::create(builder, alloc.getLoc(), alloc)
2626
.getOperation();
2727
}
2828
static std::optional<Value> buildClone(OpBuilder &builder, Value alloc) {
29-
return builder.create<bufferization::CloneOp>(alloc.getLoc(), alloc)
29+
return bufferization::CloneOp::create(builder, alloc.getLoc(), alloc)
3030
.getResult();
3131
}
3232
static ::mlir::HoistingKind getHoistingKind() {
@@ -35,8 +35,9 @@ struct DefaultAllocationInterface
3535
static ::std::optional<::mlir::Operation *>
3636
buildPromotedAlloc(OpBuilder &builder, Value alloc) {
3737
Operation *definingOp = alloc.getDefiningOp();
38-
return builder.create<memref::AllocaOp>(
39-
definingOp->getLoc(), cast<MemRefType>(definingOp->getResultTypes()[0]),
38+
return memref::AllocaOp::create(
39+
builder, definingOp->getLoc(),
40+
cast<MemRefType>(definingOp->getResultTypes()[0]),
4041
definingOp->getOperands(), definingOp->getAttrs());
4142
}
4243
};
@@ -52,7 +53,7 @@ struct DefaultReallocationInterface
5253
DefaultAllocationInterface, memref::ReallocOp> {
5354
static std::optional<Operation *> buildDealloc(OpBuilder &builder,
5455
Value realloc) {
55-
return builder.create<memref::DeallocOp>(realloc.getLoc(), realloc)
56+
return memref::DeallocOp::create(builder, realloc.getLoc(), realloc)
5657
.getOperation();
5758
}
5859
};

mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,8 @@ struct ComposeSubViewOpPattern : public OpRewritePattern<memref::SubViewOp> {
124124
}
125125

126126
AffineMap map = AffineMap::get(0, affineApplyOperands.size(), expr);
127-
Value result = rewriter.create<affine::AffineApplyOp>(
128-
op.getLoc(), map, affineApplyOperands);
127+
Value result = affine::AffineApplyOp::create(rewriter, op.getLoc(), map,
128+
affineApplyOperands);
129129
offsets.push_back(result);
130130
}
131131
}

mlir/lib/Dialect/MemRef/Transforms/EmulateNarrowType.cpp

Lines changed: 30 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ static Value getOffsetForBitwidth(Location loc, OpFoldResult srcIdx,
9999
affine::makeComposedFoldedAffineApply(builder, loc, offsetExpr, {srcIdx});
100100
Value bitOffset = getValueOrCreateConstantIndexOp(builder, loc, offsetVal);
101101
IntegerType dstType = builder.getIntegerType(targetBits);
102-
return builder.create<arith::IndexCastOp>(loc, dstType, bitOffset);
102+
return arith::IndexCastOp::create(builder, loc, dstType, bitOffset);
103103
}
104104

105105
/// When writing a subbyte size, masked bitwise operations are used to only
@@ -112,14 +112,14 @@ static Value getSubByteWriteMask(Location loc, OpFoldResult linearizedIndices,
112112
auto dstIntegerType = builder.getIntegerType(dstBits);
113113
auto maskRightAlignedAttr =
114114
builder.getIntegerAttr(dstIntegerType, (1 << srcBits) - 1);
115-
Value maskRightAligned = builder.create<arith::ConstantOp>(
116-
loc, dstIntegerType, maskRightAlignedAttr);
115+
Value maskRightAligned = arith::ConstantOp::create(
116+
builder, loc, dstIntegerType, maskRightAlignedAttr);
117117
Value writeMaskInverse =
118-
builder.create<arith::ShLIOp>(loc, maskRightAligned, bitwidthOffset);
118+
arith::ShLIOp::create(builder, loc, maskRightAligned, bitwidthOffset);
119119
auto flipValAttr = builder.getIntegerAttr(dstIntegerType, -1);
120120
Value flipVal =
121-
builder.create<arith::ConstantOp>(loc, dstIntegerType, flipValAttr);
122-
return builder.create<arith::XOrIOp>(loc, writeMaskInverse, flipVal);
121+
arith::ConstantOp::create(builder, loc, dstIntegerType, flipValAttr);
122+
return arith::XOrIOp::create(builder, loc, writeMaskInverse, flipVal);
123123
}
124124

125125
/// Returns the scaled linearized index based on the `srcBits` and `dstBits`
@@ -141,7 +141,7 @@ getLinearizedSrcIndices(OpBuilder &builder, Location loc, int64_t srcBits,
141141
const SmallVector<OpFoldResult> &indices,
142142
Value memref) {
143143
auto stridedMetadata =
144-
builder.create<memref::ExtractStridedMetadataOp>(loc, memref);
144+
memref::ExtractStridedMetadataOp::create(builder, loc, memref);
145145
OpFoldResult linearizedIndices;
146146
std::tie(std::ignore, linearizedIndices) =
147147
memref::getLinearizedMemRefOffsetAndSize(
@@ -298,24 +298,24 @@ struct ConvertMemRefLoad final : OpConversionPattern<memref::LoadOp> {
298298
// Special case 0-rank memref loads.
299299
Value bitsLoad;
300300
if (convertedType.getRank() == 0) {
301-
bitsLoad = rewriter.create<memref::LoadOp>(loc, adaptor.getMemref(),
302-
ValueRange{});
301+
bitsLoad = memref::LoadOp::create(rewriter, loc, adaptor.getMemref(),
302+
ValueRange{});
303303
} else {
304304
// Linearize the indices of the original load instruction. Do not account
305305
// for the scaling yet. This will be accounted for later.
306306
OpFoldResult linearizedIndices = getLinearizedSrcIndices(
307307
rewriter, loc, srcBits, adaptor.getIndices(), op.getMemRef());
308308

309-
Value newLoad = rewriter.create<memref::LoadOp>(
310-
loc, adaptor.getMemref(),
309+
Value newLoad = memref::LoadOp::create(
310+
rewriter, loc, adaptor.getMemref(),
311311
getIndicesForLoadOrStore(rewriter, loc, linearizedIndices, srcBits,
312312
dstBits));
313313

314314
// Get the offset and shift the bits to the rightmost.
315315
// Note, currently only the big-endian is supported.
316316
Value bitwidthOffset = getOffsetForBitwidth(loc, linearizedIndices,
317317
srcBits, dstBits, rewriter);
318-
bitsLoad = rewriter.create<arith::ShRSIOp>(loc, newLoad, bitwidthOffset);
318+
bitsLoad = arith::ShRSIOp::create(rewriter, loc, newLoad, bitwidthOffset);
319319
}
320320

321321
// Get the corresponding bits. If the arith computation bitwidth equals
@@ -331,17 +331,17 @@ struct ConvertMemRefLoad final : OpConversionPattern<memref::LoadOp> {
331331
: IntegerType::get(rewriter.getContext(),
332332
resultTy.getIntOrFloatBitWidth());
333333
if (conversionTy == convertedElementType) {
334-
auto mask = rewriter.create<arith::ConstantOp>(
335-
loc, convertedElementType,
334+
auto mask = arith::ConstantOp::create(
335+
rewriter, loc, convertedElementType,
336336
rewriter.getIntegerAttr(convertedElementType, (1 << srcBits) - 1));
337337

338-
result = rewriter.create<arith::AndIOp>(loc, bitsLoad, mask);
338+
result = arith::AndIOp::create(rewriter, loc, bitsLoad, mask);
339339
} else {
340-
result = rewriter.create<arith::TruncIOp>(loc, conversionTy, bitsLoad);
340+
result = arith::TruncIOp::create(rewriter, loc, conversionTy, bitsLoad);
341341
}
342342

343343
if (conversionTy != resultTy) {
344-
result = rewriter.create<arith::BitcastOp>(loc, resultTy, result);
344+
result = arith::BitcastOp::create(rewriter, loc, resultTy, result);
345345
}
346346

347347
rewriter.replaceOp(op, result);
@@ -428,20 +428,20 @@ struct ConvertMemrefStore final : OpConversionPattern<memref::StoreOp> {
428428
// Pad the input value with 0s on the left.
429429
Value input = adaptor.getValue();
430430
if (!input.getType().isInteger()) {
431-
input = rewriter.create<arith::BitcastOp>(
432-
loc,
431+
input = arith::BitcastOp::create(
432+
rewriter, loc,
433433
IntegerType::get(rewriter.getContext(),
434434
input.getType().getIntOrFloatBitWidth()),
435435
input);
436436
}
437437
Value extendedInput =
438-
rewriter.create<arith::ExtUIOp>(loc, dstIntegerType, input);
438+
arith::ExtUIOp::create(rewriter, loc, dstIntegerType, input);
439439

440440
// Special case 0-rank memref stores. No need for masking.
441441
if (convertedType.getRank() == 0) {
442-
rewriter.create<memref::AtomicRMWOp>(loc, arith::AtomicRMWKind::assign,
443-
extendedInput, adaptor.getMemref(),
444-
ValueRange{});
442+
memref::AtomicRMWOp::create(rewriter, loc, arith::AtomicRMWKind::assign,
443+
extendedInput, adaptor.getMemref(),
444+
ValueRange{});
445445
rewriter.eraseOp(op);
446446
return success();
447447
}
@@ -456,16 +456,14 @@ struct ConvertMemrefStore final : OpConversionPattern<memref::StoreOp> {
456456
dstBits, bitwidthOffset, rewriter);
457457
// Align the value to write with the destination bits
458458
Value alignedVal =
459-
rewriter.create<arith::ShLIOp>(loc, extendedInput, bitwidthOffset);
459+
arith::ShLIOp::create(rewriter, loc, extendedInput, bitwidthOffset);
460460

461461
// Clear destination bits
462-
rewriter.create<memref::AtomicRMWOp>(loc, arith::AtomicRMWKind::andi,
463-
writeMask, adaptor.getMemref(),
464-
storeIndices);
462+
memref::AtomicRMWOp::create(rewriter, loc, arith::AtomicRMWKind::andi,
463+
writeMask, adaptor.getMemref(), storeIndices);
465464
// Write srcs bits to destination
466-
rewriter.create<memref::AtomicRMWOp>(loc, arith::AtomicRMWKind::ori,
467-
alignedVal, adaptor.getMemref(),
468-
storeIndices);
465+
memref::AtomicRMWOp::create(rewriter, loc, arith::AtomicRMWKind::ori,
466+
alignedVal, adaptor.getMemref(), storeIndices);
469467
rewriter.eraseOp(op);
470468
return success();
471469
}
@@ -525,8 +523,8 @@ struct ConvertMemRefSubview final : OpConversionPattern<memref::SubViewOp> {
525523
}
526524

527525
// Transform the offsets, sizes and strides according to the emulation.
528-
auto stridedMetadata = rewriter.create<memref::ExtractStridedMetadataOp>(
529-
loc, subViewOp.getViewSource());
526+
auto stridedMetadata = memref::ExtractStridedMetadataOp::create(
527+
rewriter, loc, subViewOp.getViewSource());
530528

531529
OpFoldResult linearizedIndices;
532530
auto strides = stridedMetadata.getConstifiedMixedStrides();

mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -48,15 +48,15 @@ struct MemRefReshapeOpConverter : public OpRewritePattern<memref::ReshapeOp> {
4848
Value size;
4949
// Load dynamic sizes from the shape input, use constants for static dims.
5050
if (op.getType().isDynamicDim(i)) {
51-
Value index = rewriter.create<arith::ConstantIndexOp>(loc, i);
52-
size = rewriter.create<memref::LoadOp>(loc, op.getShape(), index);
51+
Value index = arith::ConstantIndexOp::create(rewriter, loc, i);
52+
size = memref::LoadOp::create(rewriter, loc, op.getShape(), index);
5353
if (!isa<IndexType>(size.getType()))
54-
size = rewriter.create<arith::IndexCastOp>(
55-
loc, rewriter.getIndexType(), size);
54+
size = arith::IndexCastOp::create(rewriter, loc,
55+
rewriter.getIndexType(), size);
5656
sizes[i] = size;
5757
} else {
5858
auto sizeAttr = rewriter.getIndexAttr(op.getType().getDimSize(i));
59-
size = rewriter.create<arith::ConstantOp>(loc, sizeAttr);
59+
size = arith::ConstantOp::create(rewriter, loc, sizeAttr);
6060
sizes[i] = sizeAttr;
6161
}
6262
if (stride)
@@ -66,10 +66,11 @@ struct MemRefReshapeOpConverter : public OpRewritePattern<memref::ReshapeOp> {
6666

6767
if (i > 0) {
6868
if (stride) {
69-
stride = rewriter.create<arith::MulIOp>(loc, stride, size);
69+
stride = arith::MulIOp::create(rewriter, loc, stride, size);
7070
} else if (op.getType().isDynamicDim(i)) {
71-
stride = rewriter.create<arith::MulIOp>(
72-
loc, rewriter.create<arith::ConstantIndexOp>(loc, staticStride),
71+
stride = arith::MulIOp::create(
72+
rewriter, loc,
73+
arith::ConstantIndexOp::create(rewriter, loc, staticStride),
7374
size);
7475
} else {
7576
staticStride *= op.getType().getDimSize(i);

0 commit comments

Comments
 (0)