Skip to content

Commit 74298fd

Browse files
committed
[mlir][NFC] update mlir create APIs (34/n)
See #147168 for more info.
1 parent 2571924 commit 74298fd

File tree

16 files changed

+82
-128
lines changed

16 files changed

+82
-128
lines changed

mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,7 @@ class CastConversion : public OpConversionPattern<ArithOp> {
402402
Value actualOp = adaptValueType(adaptor.getIn(), rewriter, castSrcType);
403403

404404
// Actual cast (may change bitwidth)
405-
auto cast = rewriter.template create<emitc::CastOp>(op.getLoc(),
405+
auto cast = emitc::CastOp::create(rewriter, op.getLoc(),
406406
castDestType, actualOp);
407407

408408
// Cast to the expected output type
@@ -507,7 +507,7 @@ class IntegerOpConversion final : public OpConversionPattern<ArithOp> {
507507
Value lhs = adaptValueType(adaptor.getLhs(), rewriter, arithmeticType);
508508
Value rhs = adaptValueType(adaptor.getRhs(), rewriter, arithmeticType);
509509

510-
Value arithmeticResult = rewriter.template create<EmitCOp>(
510+
Value arithmeticResult = EmitCOp::create(rewriter,
511511
op.getLoc(), arithmeticType, lhs, rhs);
512512

513513
Value result = adaptValueType(arithmeticResult, rewriter, type);
@@ -547,7 +547,7 @@ class BitwiseOpConversion : public OpConversionPattern<ArithOp> {
547547
Value lhs = adaptValueType(adaptor.getLhs(), rewriter, arithmeticType);
548548
Value rhs = adaptValueType(adaptor.getRhs(), rewriter, arithmeticType);
549549

550-
Value arithmeticResult = rewriter.template create<EmitCOp>(
550+
Value arithmeticResult = EmitCOp::create(rewriter,
551551
op.getLoc(), arithmeticType, lhs, rhs);
552552

553553
Value result = adaptValueType(arithmeticResult, rewriter, type);
@@ -748,7 +748,7 @@ class ItoFCastOpConversion : public OpConversionPattern<CastOp> {
748748
}
749749
Value fpCastOperand = adaptor.getIn();
750750
if (actualOperandType != operandType) {
751-
fpCastOperand = rewriter.template create<emitc::CastOp>(
751+
fpCastOperand = emitc::CastOp::create(rewriter,
752752
castOp.getLoc(), actualOperandType, fpCastOperand);
753753
}
754754
rewriter.replaceOpWithNewOp<emitc::CastOp>(castOp, dstType, fpCastOperand);

mlir/lib/Conversion/BufferizationToMemRef/BufferizationToMemRef.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,7 @@ struct CloneOpConversion : public OpConversionPattern<bufferization::CloneOp> {
6868

6969
scf::YieldOp::create(rewriter, loc, acc);
7070
};
71-
auto size = rewriter
72-
.create<scf::ForOp>(loc, zero, rank, one, ValueRange(one),
71+
auto size = scf::ForOp::create(rewriter, loc, zero, rank, one, ValueRange(one),
7372
loopBody)
7473
.getResult(0);
7574

mlir/lib/Conversion/ControlFlowToSCF/ControlFlowToSCF.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,7 @@ ControlFlowToSCFTransformation::createUnreachableTerminator(Location loc,
144144
return emitError(loc, "Cannot create unreachable terminator for '")
145145
<< parentOp->getName() << "'";
146146

147-
return builder
148-
.create<func::ReturnOp>(
147+
return func::ReturnOp::create(builder,
149148
loc, llvm::map_to_vector(funcOp.getResultTypes(),
150149
[&](Type type) {
151150
return getUndefValue(loc, builder, type);

mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -559,8 +559,7 @@ static Value createGroupReduceOpImpl(OpBuilder &builder, Location loc,
559559
builder, loc, builder.getI32Type(),
560560
builder.getIntegerAttr(builder.getI32Type(), *clusterSize));
561561

562-
return builder
563-
.create<NonUniformOp>(loc, type, scope, groupOp, arg, clusterSizeValue)
562+
return NonUniformOp::create(builder, loc, type, scope, groupOp, arg, clusterSizeValue)
564563
.getResult();
565564
}
566565

mlir/lib/Conversion/LLVMCommon/Pattern.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,8 +273,7 @@ LogicalResult ConvertToLLVMPattern::copyUnrankedDescriptors(
273273
// Allocate memory, copy, and free the source if necessary.
274274
Value memory =
275275
toDynamic
276-
? builder
277-
.create<LLVM::CallOp>(loc, mallocFunc.value(), allocationSize)
276+
? LLVM::CallOp::create(builder, loc, mallocFunc.value(), allocationSize)
278277
.getResult()
279278
: LLVM::AllocaOp::create(builder, loc, getPtrType(),
280279
IntegerType::get(getContext(), 8),

mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ static Op getOrDefineGlobal(ModuleOp &moduleOp, const Location loc,
3535
if (!(ret = moduleOp.lookupSymbol<Op>(name))) {
3636
ConversionPatternRewriter::InsertionGuard guard(rewriter);
3737
rewriter.setInsertionPointToStart(moduleOp.getBody());
38-
ret = rewriter.template create<Op>(loc, std::forward<Args>(args)...);
38+
ret = Op::create(rewriter, loc, std::forward<Args>(args)...);
3939
}
4040
return ret;
4141
}

mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -575,8 +575,7 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> {
575575
Value sizePtr = LLVM::GEPOp::create(rewriter, loc, indexPtrTy,
576576
getTypeConverter()->getIndexType(),
577577
offsetPtr, idxPlusOne);
578-
return rewriter
579-
.create<LLVM::LoadOp>(loc, getTypeConverter()->getIndexType(), sizePtr)
578+
return LLVM::LoadOp::create(rewriter, loc, getTypeConverter()->getIndexType(), sizePtr)
580579
.getResult();
581580
}
582581

mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1493,20 +1493,20 @@ class ShiftPattern : public SPIRVToLLVMConversion<SPIRVOp> {
14931493
Value extended;
14941494
if (op2TypeWidth < dstTypeWidth) {
14951495
if (isUnsignedIntegerOrVector(op2Type)) {
1496-
extended = rewriter.template create<LLVM::ZExtOp>(
1497-
loc, dstType, adaptor.getOperand2());
1496+
extended =
1497+
LLVM::ZExtOp::create(rewriter, loc, dstType, adaptor.getOperand2());
14981498
} else {
1499-
extended = rewriter.template create<LLVM::SExtOp>(
1500-
loc, dstType, adaptor.getOperand2());
1499+
extended =
1500+
LLVM::SExtOp::create(rewriter, loc, dstType, adaptor.getOperand2());
15011501
}
15021502
} else if (op2TypeWidth == dstTypeWidth) {
15031503
extended = adaptor.getOperand2();
15041504
} else {
15051505
return failure();
15061506
}
15071507

1508-
Value result = rewriter.template create<LLVMOp>(
1509-
loc, dstType, adaptor.getOperand1(), extended);
1508+
Value result =
1509+
LLVMOp::create(rewriter, loc, dstType, adaptor.getOperand1(), extended);
15101510
rewriter.replaceOp(op, result);
15111511
return success();
15121512
}

mlir/lib/Conversion/ShardToMPI/ShardToMPI.cpp

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,7 @@ struct ConvertShardingOp : public OpConversionPattern<ShardingOp> {
177177
auto type = RankedTensorType::get({nSplits, 2}, i64);
178178
Value resHaloSizes =
179179
haloSizes.empty()
180-
? rewriter
181-
.create<tensor::EmptyOp>(loc, std::array<int64_t, 2>{0, 0},
180+
? tensor::EmptyOp::create(rewriter, loc, std::array<int64_t, 2>{0, 0},
182181
i64)
183182
.getResult()
184183
: tensor::FromElementsOp::create(rewriter, loc, type, haloSizes)
@@ -307,8 +306,7 @@ class ConvertProcessLinearIndexOp
307306
Value commWorld =
308307
mpi::CommWorldOp::create(rewriter, loc, mpi::CommType::get(ctx));
309308
auto rank =
310-
rewriter
311-
.create<mpi::CommRankOp>(
309+
mpi::CommRankOp::create(rewriter,
312310
loc,
313311
TypeRange{mpi::RetvalType::get(ctx), rewriter.getI32Type()},
314312
commWorld)
@@ -704,8 +702,7 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
704702
for (auto &sz : haloSizes) {
705703
if (auto value = dyn_cast<Value>(sz))
706704
sz =
707-
rewriter
708-
.create<arith::IndexCastOp>(loc, rewriter.getIndexType(), value)
705+
arith::IndexCastOp::create(rewriter, loc, rewriter.getIndexType(), value)
709706
.getResult();
710707
}
711708

@@ -758,8 +755,7 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
758755
assert(currHaloDim >= 0 && (size_t)currHaloDim < haloSizes.size() / 2);
759756
// Get the linearized ids of the neighbors (down and up) for the
760757
// given split
761-
auto tmp = rewriter
762-
.create<NeighborsLinearIndicesOp>(loc, grid, myMultiIndex,
758+
auto tmp = NeighborsLinearIndicesOp::create(rewriter, loc, grid, myMultiIndex,
763759
splitAxes)
764760
.getResults();
765761
// MPI operates on i32...

mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp

Lines changed: 19 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -569,8 +569,7 @@ static Value createLinalgBodyCalculationForElementwiseOp(
569569
// to UIToFP.
570570
if (srcTy.isUnsignedInteger() && isa<FloatType>(dstTy)) {
571571
auto unrealizedCast =
572-
rewriter
573-
.create<UnrealizedConversionCastOp>(
572+
UnrealizedConversionCastOp::create(rewriter,
574573
loc, rewriter.getIntegerType(srcTy.getIntOrFloatBitWidth()),
575574
args[0])
576575
.getResult(0);
@@ -868,8 +867,7 @@ static Value broadcastDynamicDimension(PatternRewriter &rewriter, Location loc,
868867

869868
// Emit 'linalg.generic' op
870869
auto resultTensor =
871-
opBuilder
872-
.create<linalg::GenericOp>(
870+
linalg::GenericOp::create(opBuilder,
873871
loc, outputTensor.getType(), operand, outputTensor, affineMaps,
874872
getNParallelLoopsAttrs(rank),
875873
[&](OpBuilder &opBuilder, Location loc, ValueRange blockArgs) {
@@ -1156,8 +1154,7 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
11561154

11571155
// First fill the output buffer with the init value.
11581156
auto emptyTensor =
1159-
rewriter
1160-
.create<tensor::EmptyOp>(loc, reduceShape, resultTy.getElementType(),
1157+
tensor::EmptyOp::create(rewriter, loc, reduceShape, resultTy.getElementType(),
11611158
dynDims)
11621159
.getResult();
11631160

@@ -1167,8 +1164,7 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
11671164
op, "No initial value found for reduction operation");
11681165

11691166
auto fillValue = arith::ConstantOp::create(rewriter, loc, fillValueAttr);
1170-
auto filledTensor = rewriter
1171-
.create<linalg::FillOp>(loc, ValueRange{fillValue},
1167+
auto filledTensor = linalg::FillOp::create(rewriter, loc, ValueRange{fillValue},
11721168
ValueRange{emptyTensor})
11731169
.result();
11741170
outputs.push_back(filledTensor);
@@ -1186,13 +1182,11 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
11861182
auto trueAttr = rewriter.getBoolAttr(true);
11871183
auto trueValue = arith::ConstantOp::create(rewriter, loc, trueAttr);
11881184
auto emptyBoolTensor =
1189-
rewriter
1190-
.create<tensor::EmptyOp>(loc, reduceShape, trueValue.getType(),
1185+
tensor::EmptyOp::create(rewriter, loc, reduceShape, trueValue.getType(),
11911186
dynDims)
11921187
.getResult();
11931188
auto allResultsNaNTensor =
1194-
rewriter
1195-
.create<linalg::FillOp>(loc, ValueRange{trueValue},
1189+
linalg::FillOp::create(rewriter, loc, ValueRange{trueValue},
11961190
ValueRange{emptyBoolTensor})
11971191
.result();
11981192
// Note that because the linalg::ReduceOp has two variadic arguments
@@ -1261,21 +1255,18 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
12611255
APFloat::getNaN(cast<FloatType>(elementTy).getFloatSemantics(), false));
12621256
auto nanValue = arith::ConstantOp::create(rewriter, loc, nanValueAttr);
12631257
auto emptyNanTensor =
1264-
rewriter
1265-
.create<tensor::EmptyOp>(loc, reduceShape,
1258+
tensor::EmptyOp::create(rewriter, loc, reduceShape,
12661259
resultTy.getElementType(), dynDims)
12671260
.getResult();
12681261
auto nanFilledTensor =
1269-
rewriter
1270-
.create<linalg::FillOp>(loc, ValueRange{nanValue},
1262+
linalg::FillOp::create(rewriter, loc, ValueRange{nanValue},
12711263
ValueRange{emptyNanTensor})
12721264
.result();
12731265

12741266
// Create an empty tensor, non need to fill this since it will be
12751267
// overwritten by the select.
12761268
auto finalEmptyTensor =
1277-
rewriter
1278-
.create<tensor::EmptyOp>(loc, reduceShape,
1269+
tensor::EmptyOp::create(rewriter, loc, reduceShape,
12791270
resultTy.getElementType(), dynDims)
12801271
.getResult();
12811272

@@ -1503,8 +1494,7 @@ class RescaleConverter : public OpRewritePattern<tosa::RescaleOp> {
15031494
Value shift = shiftConstant ? shiftConstant : blockArgs[shiftArg];
15041495

15051496
if (valueTy.isUnsignedInteger()) {
1506-
value = nestedBuilder
1507-
.create<UnrealizedConversionCastOp>(
1497+
value = UnrealizedConversionCastOp::create(nestedBuilder,
15081498
nestedLoc,
15091499
nestedBuilder.getIntegerType(
15101500
valueTy.getIntOrFloatBitWidth()),
@@ -1557,8 +1547,7 @@ class RescaleConverter : public OpRewritePattern<tosa::RescaleOp> {
15571547
}
15581548

15591549
if (outIntType.isUnsignedInteger()) {
1560-
value = nestedBuilder
1561-
.create<UnrealizedConversionCastOp>(nestedLoc,
1550+
value = UnrealizedConversionCastOp::create(nestedBuilder, nestedLoc,
15621551
outIntType, value)
15631552
.getResult(0);
15641553
}
@@ -2095,8 +2084,7 @@ class ReverseConverter : public OpRewritePattern<tosa::ReverseOp> {
20952084
Value axisDimSize = tensor::DimOp::create(rewriter, loc, input, axis);
20962085

20972086
// First fill the output buffer with the init value.
2098-
auto emptyTensor = rewriter
2099-
.create<tensor::EmptyOp>(loc, inputTy.getShape(),
2087+
auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, inputTy.getShape(),
21002088
inputTy.getElementType(),
21012089
ArrayRef<Value>({dynDims}))
21022090
.getResult();
@@ -2241,21 +2229,18 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
22412229
}
22422230

22432231
// First fill the output buffer for the index.
2244-
auto emptyTensorIdx = rewriter
2245-
.create<tensor::EmptyOp>(loc, resultTy.getShape(),
2232+
auto emptyTensorIdx = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
22462233
outElementTy, dynDims)
22472234
.getResult();
22482235
auto fillValueIdx = arith::ConstantOp::create(
22492236
rewriter, loc, rewriter.getIntegerAttr(outElementTy, 0));
22502237
auto filledTensorIdx =
2251-
rewriter
2252-
.create<linalg::FillOp>(loc, ValueRange{fillValueIdx},
2238+
linalg::FillOp::create(rewriter, loc, ValueRange{fillValueIdx},
22532239
ValueRange{emptyTensorIdx})
22542240
.result();
22552241

22562242
// Second fill the output buffer for the running max.
2257-
auto emptyTensorMax = rewriter
2258-
.create<tensor::EmptyOp>(loc, resultTy.getShape(),
2243+
auto emptyTensorMax = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
22592244
inElementTy, dynDims)
22602245
.getResult();
22612246
auto fillValueMaxAttr =
@@ -2268,8 +2253,7 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
22682253
auto fillValueMax =
22692254
arith::ConstantOp::create(rewriter, loc, fillValueMaxAttr);
22702255
auto filledTensorMax =
2271-
rewriter
2272-
.create<linalg::FillOp>(loc, ValueRange{fillValueMax},
2256+
linalg::FillOp::create(rewriter, loc, ValueRange{fillValueMax},
22732257
ValueRange{emptyTensorMax})
22742258
.result();
22752259

@@ -2371,8 +2355,7 @@ class GatherConverter : public OpConversionPattern<tosa::GatherOp> {
23712355

23722356
auto loc = op.getLoc();
23732357
auto emptyTensor =
2374-
rewriter
2375-
.create<tensor::EmptyOp>(loc, resultTy.getShape(), resultElementTy,
2358+
tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(), resultElementTy,
23762359
dynamicDims)
23772360
.getResult();
23782361

@@ -2448,8 +2431,7 @@ class TableConverter : public OpRewritePattern<tosa::TableOp> {
24482431
}
24492432
}
24502433

2451-
auto emptyTensor = rewriter
2452-
.create<tensor::EmptyOp>(loc, resultTy.getShape(),
2434+
auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
24532435
resultElementTy, dynDims)
24542436
.getResult();
24552437

@@ -2585,8 +2567,7 @@ struct RFFT2dConverter final : public OpRewritePattern<RFFT2dOp> {
25852567
tensor::EmptyOp::create(rewriter, loc, type, dynamicSizes);
25862568
auto fillValueAttr = rewriter.getZeroAttr(type.getElementType());
25872569
auto fillValue = arith::ConstantOp::create(rewriter, loc, fillValueAttr);
2588-
auto filledTensor = rewriter
2589-
.create<linalg::FillOp>(loc, ValueRange{fillValue},
2570+
auto filledTensor = linalg::FillOp::create(rewriter, loc, ValueRange{fillValue},
25902571
ValueRange{emptyTensor})
25912572
.result();
25922573
return filledTensor;

0 commit comments

Comments
 (0)