-
Notifications
You must be signed in to change notification settings - Fork 14.5k
[mlir][NFC] update mlir/Dialect
create APIs (25/n)
#149932
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
makslevental
merged 1 commit into
llvm:main
from
makslevental:makslevental/update-create-25n
Jul 21, 2025
Merged
[mlir][NFC] update mlir/Dialect
create APIs (25/n)
#149932
makslevental
merged 1 commit into
llvm:main
from
makslevental:makslevental/update-create-25n
Jul 21, 2025
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
✅ With the latest revision this PR passed the C/C++ code formatter. |
See llvm#147168 for more info.
8d9f398
to
1ddb80e
Compare
@llvm/pr-subscribers-mlir-tosa @llvm/pr-subscribers-mlir-ub Author: Maksim Levental (makslevental) ChangesSee #147168 for more info. Patch is 50.55 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/149932.diff 16 Files Affected:
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
index 5758d8d5ef506..606626dfe4d2c 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
@@ -845,9 +845,9 @@ struct PadSliceOptimization : public OpRewritePattern<tosa::SliceOp> {
getTosaConstShape(rewriter, sliceOp.getLoc(), newPadPaddings);
auto newPadTy =
RankedTensorType::get(newPadShape, inputTy.getElementType());
- auto newPadOp = rewriter.create<tosa::PadOp>(
- padOp.getLoc(), newPadTy, padOp.getInput1(), newPaddingsOp,
- padOp.getPadConst());
+ auto newPadOp = tosa::PadOp::create(rewriter, padOp.getLoc(), newPadTy,
+ padOp.getInput1(), newPaddingsOp,
+ padOp.getPadConst());
// Update SliceOp and point to new PadOp
auto newStartOp =
@@ -897,9 +897,9 @@ struct SliceDynamicSizeCanonicalization
}
auto size_op = getTosaConstShape(rewriter, sliceOp.getLoc(), sliceSizes);
- auto newSliceOp = rewriter.create<tosa::SliceOp>(
- sliceOp.getLoc(), sliceOp.getType(), sliceOp.getInput1(),
- sliceOp.getStart(), size_op);
+ auto newSliceOp =
+ tosa::SliceOp::create(rewriter, sliceOp.getLoc(), sliceOp.getType(),
+ sliceOp.getInput1(), sliceOp.getStart(), size_op);
rewriter.replaceOp(sliceOp, newSliceOp.getResult());
return success();
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index f0ff430bae882..5b4a2c9d85ea1 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -180,12 +180,12 @@ Operation *TosaDialect::materializeConstant(OpBuilder &builder, Attribute value,
// Tosa dialect constants only support ElementsAttr unlike standard dialect
// constant which supports all attributes.
if (llvm::isa<shapeType>(type) && llvm::isa<DenseIntElementsAttr>(value)) {
- return builder.create<tosa::ConstShapeOp>(
- loc, type, llvm::cast<DenseIntElementsAttr>(value));
+ return tosa::ConstShapeOp::create(builder, loc, type,
+ llvm::cast<DenseIntElementsAttr>(value));
}
if (llvm::isa<ElementsAttr>(value))
- return builder.create<tosa::ConstOp>(loc, type,
- llvm::cast<ElementsAttr>(value));
+ return tosa::ConstOp::create(builder, loc, type,
+ llvm::cast<ElementsAttr>(value));
return nullptr;
}
@@ -323,7 +323,7 @@ Value mlir::tosa::createPadConstTensor(OpBuilder &builder, Location loc,
builder.getFloatAttr(srcElemType, val))
: DenseElementsAttr::get(padConstEType,
builder.getIntegerAttr(srcElemType, val))};
- return builder.create<tosa::ConstOp>(loc, padConstType, padConstAttr);
+ return tosa::ConstOp::create(builder, loc, padConstType, padConstAttr);
}
//===----------------------------------------------------------------------===//
@@ -2415,7 +2415,7 @@ LogicalResult TransposeOp::reifyResultShapes(
int32_t dimInInput = transposePerms[dim];
if (inputType.isDynamicDim(dimInInput))
returnedDims[dim] =
- builder.create<tensor::DimOp>(getLoc(), input, dimInInput)
+ tensor::DimOp::create(builder, getLoc(), input, dimInInput)
.getResult();
else
returnedDims[dim] =
@@ -3947,12 +3947,12 @@ std::optional<Value> mlir::tosa::createZeroPointTensor(OpBuilder &builder,
if (llvm::isa<FloatType>(srcElemType)) {
auto zpAttr = DenseElementsAttr::get(
zpType, builder.getFloatAttr(srcElemType, static_cast<double>(zp)));
- return builder.create<tosa::ConstOp>(loc, zpType, zpAttr);
+ return tosa::ConstOp::create(builder, loc, zpType, zpAttr);
}
if (llvm::isa<IntegerType>(srcElemType)) {
auto zpAttr =
DenseElementsAttr::get(zpType, builder.getIntegerAttr(srcElemType, zp));
- return builder.create<tosa::ConstOp>(loc, zpType, zpAttr);
+ return tosa::ConstOp::create(builder, loc, zpType, zpAttr);
}
llvm::errs() << "zero point is not allowed for unsupported data types\n";
return std::nullopt;
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
index f6caa2a985a4d..9474299a39582 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
@@ -90,12 +90,12 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
if (inputETy != resultETy) {
inputType = inputType.clone(resultETy);
- input = rewriter.create<tosa::CastOp>(op.getLoc(), inputType, input);
+ input = tosa::CastOp::create(rewriter, op.getLoc(), inputType, input);
}
if (weightETy != resultETy) {
weightType = weightType.clone(resultETy);
- weight = rewriter.create<tosa::CastOp>(op.getLoc(), weightType, weight);
+ weight = tosa::CastOp::create(rewriter, op.getLoc(), weightType, weight);
}
if (iZp != 0 || wZp != 0) {
@@ -109,9 +109,9 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
auto zpTy = RankedTensorType::get(shape, ety);
auto zpAttr =
DenseElementsAttr::get(zpTy, rewriter.getIntegerAttr(ety, zp));
- auto zpVal = rewriter.create<tosa::ConstOp>(op.getLoc(), zpTy, zpAttr);
- return rewriter.create<tosa::SubOp>(op.getLoc(), val.getType(), val,
- zpVal);
+ auto zpVal = tosa::ConstOp::create(rewriter, op.getLoc(), zpTy, zpAttr);
+ return tosa::SubOp::create(rewriter, op.getLoc(), val.getType(), val,
+ zpVal);
};
input = applyZp(input, iZp);
@@ -138,10 +138,10 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
auto padTy = RankedTensorType::get({1}, inputETy);
auto padAttr = DenseElementsAttr::get(padTy, zeroAttr);
Value padVal =
- rewriter.create<tosa::ConstOp>(op->getLoc(), padTy, padAttr);
+ tosa::ConstOp::create(rewriter, op->getLoc(), padTy, padAttr);
inputType = RankedTensorType::get(newShape, inputETy);
- input = rewriter.create<tosa::PadOp>(op->getLoc(), inputType, input,
- padSizeVal, padVal);
+ input = tosa::PadOp::create(rewriter, op->getLoc(), inputType, input,
+ padSizeVal, padVal);
}
// Perform an elementwise mul over the reshaped input and weight.
@@ -161,7 +161,7 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
auto shiftZeroAttr = DenseElementsAttr::get(
shiftType, rewriter.getIntegerAttr(shiftElementType, 0));
Value constZero =
- rewriter.create<tosa::ConstOp>(op.getLoc(), shiftType, shiftZeroAttr);
+ tosa::ConstOp::create(rewriter, op.getLoc(), shiftType, shiftZeroAttr);
Value mulValue = rewriter
.create<tosa::MulOp>(op.getLoc(), mulShapeType, input,
weight, constZero)
@@ -174,8 +174,8 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
dyn_cast<RankedTensorType>(input.getType()).getElementType());
auto outputShapeValue =
getTosaConstShape(rewriter, op->getLoc(), outputShape);
- Value outputValue = rewriter.create<tosa::ReshapeOp>(
- op.getLoc(), outputShapeType, mulValue, outputShapeValue);
+ Value outputValue = tosa::ReshapeOp::create(
+ rewriter, op.getLoc(), outputShapeType, mulValue, outputShapeValue);
Value bias = op.getBias();
if (EqualizeRanks(rewriter, op.getLoc(), outputValue, bias).failed()) {
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
index df6d52615478e..dc5c51b0abad5 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
@@ -62,14 +62,16 @@ class TransposeConvNonStridedConverter
convPad[2] = kernelWidth - 1 + pad[2];
convPad[3] = kernelWidth - 1 + pad[3];
- auto reverse1 = rewriter.create<tosa::ReverseOp>(
- loc, weightTy, weight, /* axis = */ rewriter.getI32IntegerAttr(1));
- auto reverse2 = rewriter.create<tosa::ReverseOp>(
- loc, weightTy, reverse1, /* axis = */ rewriter.getI32IntegerAttr(2));
-
- Value conv2d = rewriter.create<tosa::Conv2DOp>(
- loc, resultTy, input, reverse2, bias, op.getInputZp(), op.getWeightZp(),
- rewriter.getDenseI64ArrayAttr(convPad),
+ auto reverse1 =
+ tosa::ReverseOp::create(rewriter, loc, weightTy, weight,
+ /* axis = */ rewriter.getI32IntegerAttr(1));
+ auto reverse2 =
+ tosa::ReverseOp::create(rewriter, loc, weightTy, reverse1,
+ /* axis = */ rewriter.getI32IntegerAttr(2));
+
+ Value conv2d = tosa::Conv2DOp::create(
+ rewriter, loc, resultTy, input, reverse2, bias, op.getInputZp(),
+ op.getWeightZp(), rewriter.getDenseI64ArrayAttr(convPad),
rewriter.getDenseI64ArrayAttr(stride),
rewriter.getDenseI64ArrayAttr({1, 1}),
/* acc_type = */ op.getAccType());
@@ -216,8 +218,8 @@ class TransposeConvStridedConverter
inputPaddingVal, inputPadConst);
// We use a zero bias as we need to broadcast the bias.
- auto zeroBias = rewriter.create<tosa::ConstOp>(
- loc,
+ auto zeroBias = tosa::ConstOp::create(
+ rewriter, loc,
RankedTensorType::get({outputChannels * stride[0] * stride[1]},
biasETy),
DenseElementsAttr::get(
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
index a9e98c8908e15..4d347c02ee16d 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
@@ -112,7 +112,7 @@ class TypeModificationState {
OpBuilder builder{value.getContext()};
builder.setInsertionPointAfter(value.getDefiningOp());
castValue =
- builder.create<tensor::CastOp>(value.getLoc(), oldType, value);
+ tensor::CastOp::create(builder, value.getLoc(), oldType, value);
}
use->set(castValue);
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
index db7a3c671dedc..5590927c3f774 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp
@@ -419,8 +419,8 @@ std::optional<Value> TosaReduceTransposes::buildMappedToValue(
return std::nullopt;
}
ImplicitLocOpBuilder builder(reshapeOp.getLoc(), rewriter);
- auto foldedReshape = rewriter.create<ReshapeOp>(
- reshapeOp.getLoc(),
+ auto foldedReshape = ReshapeOp::create(
+ rewriter, reshapeOp.getLoc(),
RankedTensorType::get(applyTOSAPermutation(shape, hoistedPerms),
reshapeOutputType.getElementType()),
reshapeOp.getInput1(),
@@ -439,8 +439,8 @@ std::optional<Value> TosaReduceTransposes::buildMappedToValue(
if (!maybeNewDenseAttr.has_value())
return std::nullopt;
auto newDenseAttr = maybeNewDenseAttr.value();
- auto newConstOp = rewriter.create<ConstOp>(
- constOp.getLoc(), newDenseAttr.getType(), newDenseAttr);
+ auto newConstOp = ConstOp::create(rewriter, constOp.getLoc(),
+ newDenseAttr.getType(), newDenseAttr);
return newConstOp->getResult(0);
}
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp
index 3b697a2ee3e47..677d8e9904a67 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaTypeConverters.cpp
@@ -37,7 +37,7 @@ void mlir::tosa::populateTosaTypeConversion(TypeConverter &converter) {
if (inputs.size() != 1)
return Value();
- return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
+ return UnrealizedConversionCastOp::create(builder, loc, resultType, inputs)
.getResult(0);
});
converter.addTargetMaterialization([&](OpBuilder &builder, Type resultType,
@@ -46,7 +46,7 @@ void mlir::tosa::populateTosaTypeConversion(TypeConverter &converter) {
if (inputs.size() != 1)
return Value();
- return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
+ return UnrealizedConversionCastOp::create(builder, loc, resultType, inputs)
.getResult(0);
});
}
diff --git a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
index 9844abcc34cb1..69eda03e03ab3 100644
--- a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
+++ b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp
@@ -33,18 +33,18 @@ mlir::tosa::condenseValues(const SmallVector<Value> &values) {
Value mlir::tosa::clampFloatHelper(Location loc, Value arg, Value min,
Value max, OpBuilder &rewriter) {
- Value minValue = rewriter.create<arith::MinimumFOp>(loc, arg, max);
- return rewriter.create<arith::MaximumFOp>(loc, minValue, min);
+ Value minValue = arith::MinimumFOp::create(rewriter, loc, arg, max);
+ return arith::MaximumFOp::create(rewriter, loc, minValue, min);
}
Value mlir::tosa::clampIntHelper(Location loc, Value arg, Value min, Value max,
OpBuilder &rewriter, bool isUnsigned) {
if (isUnsigned) {
- auto minOrArg = rewriter.create<arith::MaxUIOp>(loc, min, arg);
- return rewriter.create<arith::MinUIOp>(loc, max, minOrArg);
+ auto minOrArg = arith::MaxUIOp::create(rewriter, loc, min, arg);
+ return arith::MinUIOp::create(rewriter, loc, max, minOrArg);
}
- auto minOrArg = rewriter.create<arith::MaxSIOp>(loc, min, arg);
- return rewriter.create<arith::MinSIOp>(loc, max, minOrArg);
+ auto minOrArg = arith::MaxSIOp::create(rewriter, loc, min, arg);
+ return arith::MinSIOp::create(rewriter, loc, max, minOrArg);
}
bool mlir::tosa::validIntegerRange(IntegerType ty, int64_t value) {
@@ -144,8 +144,8 @@ LogicalResult mlir::tosa::EqualizeRanks(ImplicitLocOpBuilder &builder,
ArrayRef<int64_t>(reshapeOutputShape), reshapeInputType.getElementType());
auto reshapeOutputShapeValue = getTosaConstShape(builder, reshapeOutputShape);
- auto reshapeLower = builder.create<tosa::ReshapeOp>(
- reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue);
+ auto reshapeLower = tosa::ReshapeOp::create(
+ builder, reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue);
if (input1Rank > input2Rank) {
input1 = higherTensorValue;
@@ -162,7 +162,7 @@ Value mlir::tosa::getTosaConstShape(ImplicitLocOpBuilder &builder,
llvm::ArrayRef<int64_t> shape) {
auto attr = builder.getIndexTensorAttr(convertFromMlirShape(shape));
auto type = mlir::tosa::shapeType::get(builder.getContext(), shape.size());
- mlir::Operation *mlir_op = builder.create<tosa::ConstShapeOp>(type, attr);
+ mlir::Operation *mlir_op = tosa::ConstShapeOp::create(builder, type, attr);
return mlir_op->getResult(0);
}
diff --git a/mlir/lib/Dialect/UB/IR/UBOps.cpp b/mlir/lib/Dialect/UB/IR/UBOps.cpp
index 5b2cfe7bf4264..ee523f9522953 100644
--- a/mlir/lib/Dialect/UB/IR/UBOps.cpp
+++ b/mlir/lib/Dialect/UB/IR/UBOps.cpp
@@ -52,7 +52,7 @@ void UBDialect::initialize() {
Operation *UBDialect::materializeConstant(OpBuilder &builder, Attribute value,
Type type, Location loc) {
if (auto attr = dyn_cast<PoisonAttr>(value))
- return builder.create<PoisonOp>(loc, type, attr);
+ return PoisonOp::create(builder, loc, type, attr);
return nullptr;
}
diff --git a/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp b/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
index 7de32f7cbfb8b..0fa353abc4972 100644
--- a/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
+++ b/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp
@@ -60,11 +60,11 @@ SmallVector<Value> x86vector::MaskCompressOp::getIntrinsicOperands(
if (adaptor.getSrc()) {
src = adaptor.getSrc();
} else if (adaptor.getConstantSrc()) {
- src = rewriter.create<LLVM::ConstantOp>(loc, opType,
- adaptor.getConstantSrcAttr());
+ src = LLVM::ConstantOp::create(rewriter, loc, opType,
+ adaptor.getConstantSrcAttr());
} else {
auto zeroAttr = rewriter.getZeroAttr(opType);
- src = rewriter.create<LLVM::ConstantOp>(loc, opType, zeroAttr);
+ src = LLVM::ConstantOp::create(rewriter, loc, opType, zeroAttr);
}
return SmallVector<Value>{adaptor.getA(), src, adaptor.getK()};
@@ -77,7 +77,7 @@ x86vector::DotOp::getIntrinsicOperands(ArrayRef<Value> operands,
SmallVector<Value> intrinsicOperands(operands);
// Dot product of all elements, broadcasted to all elements.
Value scale =
- rewriter.create<LLVM::ConstantOp>(getLoc(), rewriter.getI8Type(), 0xff);
+ LLVM::ConstantOp::create(rewriter, getLoc(), rewriter.getI8Type(), 0xff);
intrinsicOperands.push_back(scale);
return intrinsicOperands;
@@ -90,14 +90,14 @@ SmallVector<Value> x86vector::DotInt8Op::getIntrinsicOperands(
Adaptor adaptor(operands, *this);
intrinsicOprnds.push_back(adaptor.getW());
// Bitcast `a` and `b` to i32
- Value bitcast_a = rewriter.create<LLVM::BitcastOp>(
- getLoc(),
+ Value bitcast_a = LLVM::BitcastOp::create(
+ rewriter, getLoc(),
VectorType::get((getA().getType().getShape()[0] / 4),
rewriter.getIntegerType(32)),
adaptor.getA());
intrinsicOprnds.push_back(bitcast_a);
- Value bitcast_b = rewriter.create<LLVM::BitcastOp>(
- getLoc(),
+ Value bitcast_b = LLVM::BitcastOp::create(
+ rewriter, getLoc(),
VectorType::get((getB().getType().getShape()[0] / 4),
rewriter.getIntegerType(32)),
adaptor.getB());
diff --git a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
index 87f7867fe1b7c..385ec5e824051 100644
--- a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
+++ b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
@@ -37,8 +37,8 @@ Value mlir::x86vector::avx2::inline_asm::mm256BlendPsAsm(
"=x,x,x"; // Careful: constraint parser is very brittle: no ws!
SmallVector<Value> asmVals{v1, v2};
auto asmStr = llvm::formatv(asmTp, llvm::format_hex(mask, /*width=*/2)).str();
- auto asmOp = b.create<LLVM::InlineAsmOp>(
- v1.getType(), /*operands=*/asmVals, /*asm_string=*/asmStr,
+ auto asmOp = LLVM::InlineAsmOp::create(
+ b, v1.getType(), /*operands=*/asmVals, /*asm_string=*/asmStr,
/*constraints=*/asmCstr, /*has_side_effects=*/false,
/*is_align_stack=*/false, LLVM::TailCallKind::None,
/*asm_dialect=*/asmDialectAttr,
@@ -48,14 +48,14 @@ Value mlir::x86vector::avx2::inline_asm::mm256BlendPsAsm(
Value mlir::x86vector::avx2::intrin::mm256UnpackLoPs(ImplicitLocOpBuilder &b,
Value v1, Value v2) {
- return b.create<vector::ShuffleOp>(
- v1, v2, ArrayRef<int64_t>{0, 8, 1, 9, 4, 12, 5, 13});
+ return vector::ShuffleOp::create(b, v1, v2,
+ ArrayRef<int64_t>{0, 8, 1, 9, 4, 12, 5, 13});
}
Value mlir::x86vector::avx2::intrin::mm256UnpackHiPs(ImplicitLocOpBuilder &b,
Value v1, Value v2) {
- return b.create<vector::ShuffleOp>(
- v1, v2, ArrayRef<int64_t>{2, 10, 3, 11, 6, 14, 7, 15});
+ return vector::ShuffleOp::create(
+ b, v1, v2, ArrayRef<int64_t>{2, 10, 3, 11, 6, 14, 7, 15});
}
/// a a b b a a b b
/// Takes an 8 bit mask, 2 bit for each position of a[0, 3) **and** b[0, 4):
@@ -68,7 +68,7 @@ Value mlir::...
[truncated]
|
kuhar
approved these changes
Jul 21, 2025
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
See #147168 for more info.