Skip to content

[mlir][NFC] update mlir/Dialect create APIs (25/n) #149932

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -845,9 +845,9 @@ struct PadSliceOptimization : public OpRewritePattern<tosa::SliceOp> {
getTosaConstShape(rewriter, sliceOp.getLoc(), newPadPaddings);
auto newPadTy =
RankedTensorType::get(newPadShape, inputTy.getElementType());
auto newPadOp = rewriter.create<tosa::PadOp>(
padOp.getLoc(), newPadTy, padOp.getInput1(), newPaddingsOp,
padOp.getPadConst());
auto newPadOp = tosa::PadOp::create(rewriter, padOp.getLoc(), newPadTy,
padOp.getInput1(), newPaddingsOp,
padOp.getPadConst());

// Update SliceOp and point to new PadOp
auto newStartOp =
Expand Down Expand Up @@ -897,9 +897,9 @@ struct SliceDynamicSizeCanonicalization
}

auto size_op = getTosaConstShape(rewriter, sliceOp.getLoc(), sliceSizes);
auto newSliceOp = rewriter.create<tosa::SliceOp>(
sliceOp.getLoc(), sliceOp.getType(), sliceOp.getInput1(),
sliceOp.getStart(), size_op);
auto newSliceOp =
tosa::SliceOp::create(rewriter, sliceOp.getLoc(), sliceOp.getType(),
sliceOp.getInput1(), sliceOp.getStart(), size_op);

rewriter.replaceOp(sliceOp, newSliceOp.getResult());
return success();
Expand Down
16 changes: 8 additions & 8 deletions mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -180,12 +180,12 @@ Operation *TosaDialect::materializeConstant(OpBuilder &builder, Attribute value,
// Tosa dialect constants only support ElementsAttr unlike standard dialect
// constant which supports all attributes.
if (llvm::isa<shapeType>(type) && llvm::isa<DenseIntElementsAttr>(value)) {
return builder.create<tosa::ConstShapeOp>(
loc, type, llvm::cast<DenseIntElementsAttr>(value));
return tosa::ConstShapeOp::create(builder, loc, type,
llvm::cast<DenseIntElementsAttr>(value));
}
if (llvm::isa<ElementsAttr>(value))
return builder.create<tosa::ConstOp>(loc, type,
llvm::cast<ElementsAttr>(value));
return tosa::ConstOp::create(builder, loc, type,
llvm::cast<ElementsAttr>(value));
return nullptr;
}

Expand Down Expand Up @@ -323,7 +323,7 @@ Value mlir::tosa::createPadConstTensor(OpBuilder &builder, Location loc,
builder.getFloatAttr(srcElemType, val))
: DenseElementsAttr::get(padConstEType,
builder.getIntegerAttr(srcElemType, val))};
return builder.create<tosa::ConstOp>(loc, padConstType, padConstAttr);
return tosa::ConstOp::create(builder, loc, padConstType, padConstAttr);
}

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -2415,7 +2415,7 @@ LogicalResult TransposeOp::reifyResultShapes(
int32_t dimInInput = transposePerms[dim];
if (inputType.isDynamicDim(dimInInput))
returnedDims[dim] =
builder.create<tensor::DimOp>(getLoc(), input, dimInInput)
tensor::DimOp::create(builder, getLoc(), input, dimInInput)
.getResult();
else
returnedDims[dim] =
Expand Down Expand Up @@ -3947,12 +3947,12 @@ std::optional<Value> mlir::tosa::createZeroPointTensor(OpBuilder &builder,
if (llvm::isa<FloatType>(srcElemType)) {
auto zpAttr = DenseElementsAttr::get(
zpType, builder.getFloatAttr(srcElemType, static_cast<double>(zp)));
return builder.create<tosa::ConstOp>(loc, zpType, zpAttr);
return tosa::ConstOp::create(builder, loc, zpType, zpAttr);
}
if (llvm::isa<IntegerType>(srcElemType)) {
auto zpAttr =
DenseElementsAttr::get(zpType, builder.getIntegerAttr(srcElemType, zp));
return builder.create<tosa::ConstOp>(loc, zpType, zpAttr);
return tosa::ConstOp::create(builder, loc, zpType, zpAttr);
}
llvm::errs() << "zero point is not allowed for unsupported data types\n";
return std::nullopt;
Expand Down
22 changes: 11 additions & 11 deletions mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,12 +90,12 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {

if (inputETy != resultETy) {
inputType = inputType.clone(resultETy);
input = rewriter.create<tosa::CastOp>(op.getLoc(), inputType, input);
input = tosa::CastOp::create(rewriter, op.getLoc(), inputType, input);
}

if (weightETy != resultETy) {
weightType = weightType.clone(resultETy);
weight = rewriter.create<tosa::CastOp>(op.getLoc(), weightType, weight);
weight = tosa::CastOp::create(rewriter, op.getLoc(), weightType, weight);
}

if (iZp != 0 || wZp != 0) {
Expand All @@ -109,9 +109,9 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
auto zpTy = RankedTensorType::get(shape, ety);
auto zpAttr =
DenseElementsAttr::get(zpTy, rewriter.getIntegerAttr(ety, zp));
auto zpVal = rewriter.create<tosa::ConstOp>(op.getLoc(), zpTy, zpAttr);
return rewriter.create<tosa::SubOp>(op.getLoc(), val.getType(), val,
zpVal);
auto zpVal = tosa::ConstOp::create(rewriter, op.getLoc(), zpTy, zpAttr);
return tosa::SubOp::create(rewriter, op.getLoc(), val.getType(), val,
zpVal);
};

input = applyZp(input, iZp);
Expand All @@ -138,10 +138,10 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
auto padTy = RankedTensorType::get({1}, inputETy);
auto padAttr = DenseElementsAttr::get(padTy, zeroAttr);
Value padVal =
rewriter.create<tosa::ConstOp>(op->getLoc(), padTy, padAttr);
tosa::ConstOp::create(rewriter, op->getLoc(), padTy, padAttr);
inputType = RankedTensorType::get(newShape, inputETy);
input = rewriter.create<tosa::PadOp>(op->getLoc(), inputType, input,
padSizeVal, padVal);
input = tosa::PadOp::create(rewriter, op->getLoc(), inputType, input,
padSizeVal, padVal);
}

// Perform an elementwise mul over the reshaped input and weight.
Expand All @@ -161,7 +161,7 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
auto shiftZeroAttr = DenseElementsAttr::get(
shiftType, rewriter.getIntegerAttr(shiftElementType, 0));
Value constZero =
rewriter.create<tosa::ConstOp>(op.getLoc(), shiftType, shiftZeroAttr);
tosa::ConstOp::create(rewriter, op.getLoc(), shiftType, shiftZeroAttr);
Value mulValue = rewriter
.create<tosa::MulOp>(op.getLoc(), mulShapeType, input,
weight, constZero)
Expand All @@ -174,8 +174,8 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
dyn_cast<RankedTensorType>(input.getType()).getElementType());
auto outputShapeValue =
getTosaConstShape(rewriter, op->getLoc(), outputShape);
Value outputValue = rewriter.create<tosa::ReshapeOp>(
op.getLoc(), outputShapeType, mulValue, outputShapeValue);
Value outputValue = tosa::ReshapeOp::create(
rewriter, op.getLoc(), outputShapeType, mulValue, outputShapeValue);

Value bias = op.getBias();
if (EqualizeRanks(rewriter, op.getLoc(), outputValue, bias).failed()) {
Expand Down
22 changes: 12 additions & 10 deletions mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,14 +62,16 @@ class TransposeConvNonStridedConverter
convPad[2] = kernelWidth - 1 + pad[2];
convPad[3] = kernelWidth - 1 + pad[3];

auto reverse1 = rewriter.create<tosa::ReverseOp>(
loc, weightTy, weight, /* axis = */ rewriter.getI32IntegerAttr(1));
auto reverse2 = rewriter.create<tosa::ReverseOp>(
loc, weightTy, reverse1, /* axis = */ rewriter.getI32IntegerAttr(2));

Value conv2d = rewriter.create<tosa::Conv2DOp>(
loc, resultTy, input, reverse2, bias, op.getInputZp(), op.getWeightZp(),
rewriter.getDenseI64ArrayAttr(convPad),
auto reverse1 =
tosa::ReverseOp::create(rewriter, loc, weightTy, weight,
/* axis = */ rewriter.getI32IntegerAttr(1));
auto reverse2 =
tosa::ReverseOp::create(rewriter, loc, weightTy, reverse1,
/* axis = */ rewriter.getI32IntegerAttr(2));

Value conv2d = tosa::Conv2DOp::create(
rewriter, loc, resultTy, input, reverse2, bias, op.getInputZp(),
op.getWeightZp(), rewriter.getDenseI64ArrayAttr(convPad),
rewriter.getDenseI64ArrayAttr(stride),
rewriter.getDenseI64ArrayAttr({1, 1}),
/* acc_type = */ op.getAccType());
Expand Down Expand Up @@ -216,8 +218,8 @@ class TransposeConvStridedConverter
inputPaddingVal, inputPadConst);

// We use a zero bias as we need to broadcast the bias.
auto zeroBias = rewriter.create<tosa::ConstOp>(
loc,
auto zeroBias = tosa::ConstOp::create(
rewriter, loc,
RankedTensorType::get({outputChannels * stride[0] * stride[1]},
biasETy),
DenseElementsAttr::get(
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ class TypeModificationState {
OpBuilder builder{value.getContext()};
builder.setInsertionPointAfter(value.getDefiningOp());
castValue =
builder.create<tensor::CastOp>(value.getLoc(), oldType, value);
tensor::CastOp::create(builder, value.getLoc(), oldType, value);
}

use->set(castValue);
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -419,8 +419,8 @@ std::optional<Value> TosaReduceTransposes::buildMappedToValue(
return std::nullopt;
}
ImplicitLocOpBuilder builder(reshapeOp.getLoc(), rewriter);
auto foldedReshape = rewriter.create<ReshapeOp>(
reshapeOp.getLoc(),
auto foldedReshape = ReshapeOp::create(
rewriter, reshapeOp.getLoc(),
RankedTensorType::get(applyTOSAPermutation(shape, hoistedPerms),
reshapeOutputType.getElementType()),
reshapeOp.getInput1(),
Expand All @@ -439,8 +439,8 @@ std::optional<Value> TosaReduceTransposes::buildMappedToValue(
if (!maybeNewDenseAttr.has_value())
return std::nullopt;
auto newDenseAttr = maybeNewDenseAttr.value();
auto newConstOp = rewriter.create<ConstOp>(
constOp.getLoc(), newDenseAttr.getType(), newDenseAttr);
auto newConstOp = ConstOp::create(rewriter, constOp.getLoc(),
newDenseAttr.getType(), newDenseAttr);
return newConstOp->getResult(0);
}

Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ void mlir::tosa::populateTosaTypeConversion(TypeConverter &converter) {
if (inputs.size() != 1)
return Value();

return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
return UnrealizedConversionCastOp::create(builder, loc, resultType, inputs)
.getResult(0);
});
converter.addTargetMaterialization([&](OpBuilder &builder, Type resultType,
Expand All @@ -46,7 +46,7 @@ void mlir::tosa::populateTosaTypeConversion(TypeConverter &converter) {
if (inputs.size() != 1)
return Value();

return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
return UnrealizedConversionCastOp::create(builder, loc, resultType, inputs)
.getResult(0);
});
}
18 changes: 9 additions & 9 deletions mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,18 @@ mlir::tosa::condenseValues(const SmallVector<Value> &values) {

Value mlir::tosa::clampFloatHelper(Location loc, Value arg, Value min,
Value max, OpBuilder &rewriter) {
Value minValue = rewriter.create<arith::MinimumFOp>(loc, arg, max);
return rewriter.create<arith::MaximumFOp>(loc, minValue, min);
Value minValue = arith::MinimumFOp::create(rewriter, loc, arg, max);
return arith::MaximumFOp::create(rewriter, loc, minValue, min);
}

Value mlir::tosa::clampIntHelper(Location loc, Value arg, Value min, Value max,
OpBuilder &rewriter, bool isUnsigned) {
if (isUnsigned) {
auto minOrArg = rewriter.create<arith::MaxUIOp>(loc, min, arg);
return rewriter.create<arith::MinUIOp>(loc, max, minOrArg);
auto minOrArg = arith::MaxUIOp::create(rewriter, loc, min, arg);
return arith::MinUIOp::create(rewriter, loc, max, minOrArg);
}
auto minOrArg = rewriter.create<arith::MaxSIOp>(loc, min, arg);
return rewriter.create<arith::MinSIOp>(loc, max, minOrArg);
auto minOrArg = arith::MaxSIOp::create(rewriter, loc, min, arg);
return arith::MinSIOp::create(rewriter, loc, max, minOrArg);
}

bool mlir::tosa::validIntegerRange(IntegerType ty, int64_t value) {
Expand Down Expand Up @@ -144,8 +144,8 @@ LogicalResult mlir::tosa::EqualizeRanks(ImplicitLocOpBuilder &builder,
ArrayRef<int64_t>(reshapeOutputShape), reshapeInputType.getElementType());
auto reshapeOutputShapeValue = getTosaConstShape(builder, reshapeOutputShape);

auto reshapeLower = builder.create<tosa::ReshapeOp>(
reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue);
auto reshapeLower = tosa::ReshapeOp::create(
builder, reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue);

if (input1Rank > input2Rank) {
input1 = higherTensorValue;
Expand All @@ -162,7 +162,7 @@ Value mlir::tosa::getTosaConstShape(ImplicitLocOpBuilder &builder,
llvm::ArrayRef<int64_t> shape) {
auto attr = builder.getIndexTensorAttr(convertFromMlirShape(shape));
auto type = mlir::tosa::shapeType::get(builder.getContext(), shape.size());
mlir::Operation *mlir_op = builder.create<tosa::ConstShapeOp>(type, attr);
mlir::Operation *mlir_op = tosa::ConstShapeOp::create(builder, type, attr);
return mlir_op->getResult(0);
}

Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/UB/IR/UBOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void UBDialect::initialize() {
Operation *UBDialect::materializeConstant(OpBuilder &builder, Attribute value,
Type type, Location loc) {
if (auto attr = dyn_cast<PoisonAttr>(value))
return builder.create<PoisonOp>(loc, type, attr);
return PoisonOp::create(builder, loc, type, attr);

return nullptr;
}
Expand Down
16 changes: 8 additions & 8 deletions mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,11 @@ SmallVector<Value> x86vector::MaskCompressOp::getIntrinsicOperands(
if (adaptor.getSrc()) {
src = adaptor.getSrc();
} else if (adaptor.getConstantSrc()) {
src = rewriter.create<LLVM::ConstantOp>(loc, opType,
adaptor.getConstantSrcAttr());
src = LLVM::ConstantOp::create(rewriter, loc, opType,
adaptor.getConstantSrcAttr());
} else {
auto zeroAttr = rewriter.getZeroAttr(opType);
src = rewriter.create<LLVM::ConstantOp>(loc, opType, zeroAttr);
src = LLVM::ConstantOp::create(rewriter, loc, opType, zeroAttr);
}

return SmallVector<Value>{adaptor.getA(), src, adaptor.getK()};
Expand All @@ -77,7 +77,7 @@ x86vector::DotOp::getIntrinsicOperands(ArrayRef<Value> operands,
SmallVector<Value> intrinsicOperands(operands);
// Dot product of all elements, broadcasted to all elements.
Value scale =
rewriter.create<LLVM::ConstantOp>(getLoc(), rewriter.getI8Type(), 0xff);
LLVM::ConstantOp::create(rewriter, getLoc(), rewriter.getI8Type(), 0xff);
intrinsicOperands.push_back(scale);

return intrinsicOperands;
Expand All @@ -90,14 +90,14 @@ SmallVector<Value> x86vector::DotInt8Op::getIntrinsicOperands(
Adaptor adaptor(operands, *this);
intrinsicOprnds.push_back(adaptor.getW());
// Bitcast `a` and `b` to i32
Value bitcast_a = rewriter.create<LLVM::BitcastOp>(
getLoc(),
Value bitcast_a = LLVM::BitcastOp::create(
rewriter, getLoc(),
VectorType::get((getA().getType().getShape()[0] / 4),
rewriter.getIntegerType(32)),
adaptor.getA());
intrinsicOprnds.push_back(bitcast_a);
Value bitcast_b = rewriter.create<LLVM::BitcastOp>(
getLoc(),
Value bitcast_b = LLVM::BitcastOp::create(
rewriter, getLoc(),
VectorType::get((getB().getType().getShape()[0] / 4),
rewriter.getIntegerType(32)),
adaptor.getB());
Expand Down
Loading
Loading