Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1927,16 +1927,16 @@ struct AMDGPUPermlaneLowering : public ConvertOpToLLVMPattern<PermlaneSwapOp> {
else
llvm_unreachable("unsupported row length");

const Value vdst0 = LLVM::ExtractValueOp::create(rewriter, loc, res, {0});
const Value vdst1 = LLVM::ExtractValueOp::create(rewriter, loc, res, {1});
Value vdst0 = LLVM::ExtractValueOp::create(rewriter, loc, res, {0});
Value vdst1 = LLVM::ExtractValueOp::create(rewriter, loc, res, {1});

const Value isEqual =
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::eq, vdst0, v);
Value isEqual = LLVM::ICmpOp::create(rewriter, loc,
LLVM::ICmpPredicate::eq, vdst0, v);

// Per `permlane(16|32)` semantics: if the first extracted element equals
// 'v', the result is the second element; otherwise it is the first.
Value vdstNew =
rewriter.create<LLVM::SelectOp>(loc, isEqual, vdst1, vdst0);
LLVM::SelectOp::create(rewriter, loc, isEqual, vdst1, vdst0);
permuted.emplace_back(vdstNew);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,11 +93,11 @@ struct PowiOpToROCDLLibraryCalls : public OpRewritePattern<complex::PowiOp> {

Location loc = op.getLoc();
Value exponentReal =
rewriter.create<arith::SIToFPOp>(loc, exponentFloatType, op.getRhs());
Value zeroImag = rewriter.create<arith::ConstantOp>(
loc, rewriter.getZeroAttr(exponentFloatType));
Value exponent = rewriter.create<complex::CreateOp>(
loc, op.getLhs().getType(), exponentReal, zeroImag);
arith::SIToFPOp::create(rewriter, loc, exponentFloatType, op.getRhs());
Value zeroImag = arith::ConstantOp::create(
rewriter, loc, rewriter.getZeroAttr(exponentFloatType));
Value exponent = complex::CreateOp::create(
rewriter, loc, op.getLhs().getType(), exponentReal, zeroImag);

rewriter.replaceOpWithNewOp<complex::PowOp>(op, op.getType(), op.getLhs(),
exponent, op.getFastmathAttr());
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -937,14 +937,14 @@ struct PowiOpConversion : public OpConversionPattern<complex::PowiOp> {
auto elementType = cast<FloatType>(type.getElementType());

Value floatExponent =
builder.create<arith::SIToFPOp>(elementType, adaptor.getRhs());
arith::SIToFPOp::create(builder, elementType, adaptor.getRhs());
Value zero = arith::ConstantOp::create(
builder, elementType, builder.getFloatAttr(elementType, 0.0));
Value complexExponent =
complex::CreateOp::create(builder, type, floatExponent, zero);

auto pow = builder.create<complex::PowOp>(
type, adaptor.getLhs(), complexExponent, op.getFastmathAttr());
auto pow = complex::PowOp::create(builder, type, adaptor.getLhs(),
complexExponent, op.getFastmathAttr());
rewriter.replaceOp(op, pow.getResult());
return success();
}
Expand Down
21 changes: 11 additions & 10 deletions mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -500,19 +500,19 @@ struct SincosOpLowering : public ConvertOpToLLVMPattern<math::SincosOp> {
op->getParentWithTrait<mlir::OpTrait::AutomaticAllocationScope>();
assert(scope && "Expected op to be inside automatic allocation scope");
rewriter.setInsertionPointToStart(&scope->getRegion(0).front());
auto one = rewriter.create<LLVM::ConstantOp>(
loc, rewriter.getI32Type(), rewriter.getI32IntegerAttr(1));
auto one = LLVM::ConstantOp::create(rewriter, loc, rewriter.getI32Type(),
rewriter.getI32IntegerAttr(1));
sinPtr =
rewriter.create<LLVM::AllocaOp>(loc, ptrType, computeType, one, 0);
LLVM::AllocaOp::create(rewriter, loc, ptrType, computeType, one, 0);
cosPtr =
rewriter.create<LLVM::AllocaOp>(loc, ptrType, computeType, one, 0);
LLVM::AllocaOp::create(rewriter, loc, ptrType, computeType, one, 0);
}

createSincosCall(rewriter, loc, sincosFunc, convertedInput, sinPtr, cosPtr,
op);

auto sinResult = rewriter.create<LLVM::LoadOp>(loc, computeType, sinPtr);
auto cosResult = rewriter.create<LLVM::LoadOp>(loc, computeType, cosPtr);
auto sinResult = LLVM::LoadOp::create(rewriter, loc, computeType, sinPtr);
auto cosResult = LLVM::LoadOp::create(rewriter, loc, computeType, cosPtr);

rewriter.replaceOp(op, {maybeTrunc(sinResult, inputType, rewriter),
maybeTrunc(cosResult, inputType, rewriter)});
Expand All @@ -522,14 +522,15 @@ struct SincosOpLowering : public ConvertOpToLLVMPattern<math::SincosOp> {
private:
Value maybeExt(Value operand, PatternRewriter &rewriter) const {
if (isa<Float16Type, BFloat16Type>(operand.getType()))
return rewriter.create<LLVM::FPExtOp>(
operand.getLoc(), Float32Type::get(rewriter.getContext()), operand);
return LLVM::FPExtOp::create(rewriter, operand.getLoc(),
Float32Type::get(rewriter.getContext()),
operand);
return operand;
}

Value maybeTrunc(Value operand, Type type, PatternRewriter &rewriter) const {
if (operand.getType() != type)
return rewriter.create<LLVM::FPTruncOp>(operand.getLoc(), type, operand);
return LLVM::FPTruncOp::create(rewriter, operand.getLoc(), type, operand);
return operand;
}

Expand All @@ -556,7 +557,7 @@ struct SincosOpLowering : public ConvertOpToLLVMPattern<math::SincosOp> {
}

SmallVector<Value> callOperands = {input, sinPtr, cosPtr};
rewriter.create<LLVM::CallOp>(loc, funcOp, callOperands);
LLVM::CallOp::create(rewriter, loc, funcOp, callOperands);
}
};

Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,8 @@ struct SincosOpLowering : public ConvertOpToLLVMPattern<math::SincosOp> {
auto structType = LLVM::LLVMStructType::getLiteral(
rewriter.getContext(), {llvmOperandType, llvmOperandType});

auto sincosOp = rewriter.create<LLVM::SincosOp>(
loc, structType, adaptor.getOperand(), attrs.getAttrs());
auto sincosOp = LLVM::SincosOp::create(
rewriter, loc, structType, adaptor.getOperand(), attrs.getAttrs());

auto sinValue = LLVM::ExtractValueOp::create(rewriter, loc, sincosOp, 0);
auto cosValue = LLVM::ExtractValueOp::create(rewriter, loc, sincosOp, 1);
Expand Down
26 changes: 13 additions & 13 deletions mlir/lib/Conversion/SCFToEmitC/SCFToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -394,9 +394,9 @@ struct WhileLowering : public OpConversionPattern<WhileOp> {
if (!convertedType)
return rewriter.notifyMatchFailure(whileOp, "type conversion failed");

emitc::VariableOp var = rewriter.create<emitc::VariableOp>(
loc, emitc::LValueType::get(convertedType), noInit);
rewriter.create<emitc::AssignOp>(loc, var.getResult(), init);
auto var = emitc::VariableOp::create(
rewriter, loc, emitc::LValueType::get(convertedType), noInit);
emitc::AssignOp::create(rewriter, loc, var.getResult(), init);
loopVars.push_back(var);
}

Expand All @@ -411,11 +411,11 @@ struct WhileLowering : public OpConversionPattern<WhileOp> {
// Create a global boolean variable to store the loop condition state.
Type i1Type = IntegerType::get(context, 1);
auto globalCondition =
rewriter.create<emitc::VariableOp>(loc, emitc::LValueType::get(i1Type),
emitc::OpaqueAttr::get(context, ""));
emitc::VariableOp::create(rewriter, loc, emitc::LValueType::get(i1Type),
emitc::OpaqueAttr::get(context, ""));
Value conditionVal = globalCondition.getResult();

auto loweredDo = rewriter.create<emitc::DoOp>(loc);
auto loweredDo = emitc::DoOp::create(rewriter, loc);

// Convert region types to match the target dialect type system.
if (failed(rewriter.convertRegionTypes(&whileOp.getBefore(),
Expand Down Expand Up @@ -450,12 +450,12 @@ struct WhileLowering : public OpConversionPattern<WhileOp> {

// Convert scf.condition to condition variable assignment.
Value condition = rewriter.getRemappedValue(condOp.getCondition());
rewriter.create<emitc::AssignOp>(loc, conditionVal, condition);
emitc::AssignOp::create(rewriter, loc, conditionVal, condition);

// Wrap body region in conditional to preserve scf semantics. Only create
// ifOp if after-region is non-empty.
if (whileOp.getAfterBody()->getOperations().size() > 1) {
auto ifOp = rewriter.create<emitc::IfOp>(loc, condition, false, false);
auto ifOp = emitc::IfOp::create(rewriter, loc, condition, false, false);

// Prepare the after region (loop body) for merging.
Block *afterBlock = &whileOp.getAfter().front();
Expand All @@ -480,8 +480,8 @@ struct WhileLowering : public OpConversionPattern<WhileOp> {
Block *condBlock = rewriter.createBlock(&condRegion);
rewriter.setInsertionPointToStart(condBlock);

auto exprOp = rewriter.create<emitc::ExpressionOp>(
loc, i1Type, conditionVal, /*do_not_inline=*/false);
auto exprOp = emitc::ExpressionOp::create(
rewriter, loc, i1Type, conditionVal, /*do_not_inline=*/false);
Block *exprBlock = rewriter.createBlock(&exprOp.getBodyRegion());

// Set up the expression block to load the condition variable.
Expand All @@ -490,12 +490,12 @@ struct WhileLowering : public OpConversionPattern<WhileOp> {

// Load the condition value and yield it as the expression result.
Value cond =
rewriter.create<emitc::LoadOp>(loc, i1Type, exprBlock->getArgument(0));
rewriter.create<emitc::YieldOp>(loc, cond);
emitc::LoadOp::create(rewriter, loc, i1Type, exprBlock->getArgument(0));
emitc::YieldOp::create(rewriter, loc, cond);

// Yield the expression as the condition region result.
rewriter.setInsertionPointToEnd(condBlock);
rewriter.create<emitc::YieldOp>(loc, exprOp);
emitc::YieldOp::create(rewriter, loc, exprOp);

return success();
}
Expand Down
24 changes: 12 additions & 12 deletions mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -232,16 +232,16 @@ static Value createLinalgBodyCalculationForElementwiseOp(
}

intermediateType = rewriter.getIntegerType(intermediateBitWidth);
zpAddValue = rewriter.create<arith::ConstantOp>(
loc, rewriter.getIntegerAttr(intermediateType, zpAdd));
zpAddValue = arith::ConstantOp::create(
rewriter, loc, rewriter.getIntegerAttr(intermediateType, zpAdd));
} else {
intermediateType = rewriter.getIntegerType(intermediateBitWidth);
auto arg1 =
rewriter.create<arith::ExtSIOp>(loc, intermediateType, args[1]);
arith::ExtSIOp::create(rewriter, loc, intermediateType, args[1]);
auto arg2 =
rewriter.create<arith::ExtSIOp>(loc, intermediateType, args[2]);
arith::ExtSIOp::create(rewriter, loc, intermediateType, args[2]);
zpAddValue =
rewriter.create<arith::AddIOp>(loc, intermediateType, arg1, arg2);
arith::AddIOp::create(rewriter, loc, intermediateType, arg1, arg2);
}

// The negation can be applied by doing:
Expand Down Expand Up @@ -1402,8 +1402,8 @@ static Value collapse1xNTensorToN(PatternRewriter &rewriter, Value input,
auto elemType = inputType.getElementType();
auto collapsedType = RankedTensorType::get({}, elemType);
// Emit the collapse op
return rewriter.create<tensor::CollapseShapeOp>(loc, collapsedType, input,
reassociation);
return tensor::CollapseShapeOp::create(rewriter, loc, collapsedType, input,
reassociation);
}

static llvm::SmallVector<int8_t>
Expand Down Expand Up @@ -1443,7 +1443,7 @@ static void setupLinalgGenericOpInputAndIndexingMap(
IntegerAttr intAttr = isShift
? rewriter.getI8IntegerAttr(values.front())
: rewriter.getI32IntegerAttr(values.front());
constant = rewriter.create<arith::ConstantOp>(loc, intAttr);
constant = arith::ConstantOp::create(rewriter, loc, intAttr);
} else {
auto elementType =
isShift ? rewriter.getIntegerType(8) : rewriter.getI32Type();
Expand Down Expand Up @@ -1511,14 +1511,14 @@ static Value getExtendZp(OpBuilder &builder, Type valueTy,
.getResult(0);
}
if (zpTy.isUnsignedInteger()) {
return builder.create<arith::ExtUIOp>(loc, extendType, result);
return arith::ExtUIOp::create(builder, loc, extendType, result);
} else {
return builder.create<arith::ExtSIOp>(loc, extendType, result);
return arith::ExtSIOp::create(builder, loc, extendType, result);
}
}
} else {
return builder.create<arith::ConstantOp>(
loc, IntegerAttr::get(extendType, *maybeZp));
return arith::ConstantOp::create(builder, loc,
IntegerAttr::get(extendType, *maybeZp));
}
return result;
}
Expand Down
14 changes: 8 additions & 6 deletions mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -437,13 +437,15 @@ transform::PromoteTensorOp::apply(transform::TransformRewriter &rewriter,
for (auto [pos, dim] : llvm::enumerate(type.getShape())) {
if (!ShapedType::isDynamic(dim))
continue;
Value cst = rewriter.create<arith::ConstantIndexOp>(tensor.getLoc(), pos);
auto dimOp = rewriter.create<tensor::DimOp>(tensor.getLoc(), tensor, cst);
Value cst =
arith::ConstantIndexOp::create(rewriter, tensor.getLoc(), pos);
auto dimOp =
tensor::DimOp::create(rewriter, tensor.getLoc(), tensor, cst);
preservedOps.insert(dimOp);
dynamicDims.push_back(dimOp);
}
auto allocation = rewriter.create<bufferization::AllocTensorOp>(
tensor.getLoc(), type, dynamicDims);
auto allocation = bufferization::AllocTensorOp::create(
rewriter, tensor.getLoc(), type, dynamicDims);
// Set memory space if provided.
if (getMemorySpaceAttr())
allocation.setMemorySpaceAttr(getMemorySpaceAttr());
Expand All @@ -452,8 +454,8 @@ transform::PromoteTensorOp::apply(transform::TransformRewriter &rewriter,
// Only insert a materialization (typically bufferizes to a copy) when the
// value may be read from.
if (needsMaterialization) {
auto copy = rewriter.create<bufferization::MaterializeInDestinationOp>(
tensor.getLoc(), tensor, allocated);
auto copy = bufferization::MaterializeInDestinationOp::create(
rewriter, tensor.getLoc(), tensor, allocated);
preservedOps.insert(copy);
promoted.push_back(copy.getResult());
} else {
Expand Down
13 changes: 7 additions & 6 deletions mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,24 +50,25 @@ struct StructuredOpInterface
auto endValue = getValueOrCreateConstantIndexOp(builder, loc, end);

// Loop Trip count > 0 iff start < end
Value dimensionHasNonZeroTripCount = builder.create<index::CmpOp>(
loc, index::IndexCmpPredicate::SLT, startValue, endValue);
Value dimensionHasNonZeroTripCount = index::CmpOp::create(
builder, loc, index::IndexCmpPredicate::SLT, startValue, endValue);

if (!iterationDomainIsNonDegenerate) {
iterationDomainIsNonDegenerate = dimensionHasNonZeroTripCount;
} else {
// Iteration domain is non-degenerate iff all dimensions have loop trip
// count > 0
iterationDomainIsNonDegenerate = builder.create<arith::AndIOp>(
loc, iterationDomainIsNonDegenerate, dimensionHasNonZeroTripCount);
iterationDomainIsNonDegenerate =
arith::AndIOp::create(builder, loc, iterationDomainIsNonDegenerate,
dimensionHasNonZeroTripCount);
}
}

if (!iterationDomainIsNonDegenerate)
return;

auto ifOp = builder.create<scf::IfOp>(loc, iterationDomainIsNonDegenerate,
/*withElseRegion=*/false);
auto ifOp = scf::IfOp::create(builder, loc, iterationDomainIsNonDegenerate,
/*withElseRegion=*/false);
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());

// Subtract one from the loop ends before composing with the indexing map
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Dialect/SCF/IR/SCF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2490,8 +2490,8 @@ struct ConditionPropagation : public OpRewritePattern<IfOp> {
changed = true;

if (!constantTrue)
constantTrue = rewriter.create<arith::ConstantOp>(
op.getLoc(), i1Ty, rewriter.getIntegerAttr(i1Ty, 1));
constantTrue = arith::ConstantOp::create(
rewriter, op.getLoc(), i1Ty, rewriter.getIntegerAttr(i1Ty, 1));

rewriter.modifyOpInPlace(use.getOwner(),
[&]() { use.set(constantTrue); });
Expand All @@ -2500,8 +2500,8 @@ struct ConditionPropagation : public OpRewritePattern<IfOp> {
changed = true;

if (!constantFalse)
constantFalse = rewriter.create<arith::ConstantOp>(
op.getLoc(), i1Ty, rewriter.getIntegerAttr(i1Ty, 0));
constantFalse = arith::ConstantOp::create(
rewriter, op.getLoc(), i1Ty, rewriter.getIntegerAttr(i1Ty, 0));

rewriter.modifyOpInPlace(use.getOwner(),
[&]() { use.set(constantFalse); });
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,9 @@ struct MixedSizeInputShuffleOpRewrite final
for (int64_t i = 0; i < origNumElems; ++i)
promoteMask[i] = i;

Value promotedInput = rewriter.create<vector::ShuffleOp>(
shuffleOp.getLoc(), promotedType, inputToPromote, inputToPromote,
promoteMask);
Value promotedInput =
vector::ShuffleOp::create(rewriter, shuffleOp.getLoc(), promotedType,
inputToPromote, inputToPromote, promoteMask);

// Create the final shuffle with the promoted inputs.
Value promotedV1 = promoteV1 ? promotedInput : shuffleOp.getV1();
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -736,7 +736,7 @@ OpFoldResult genBinOp(OpFoldResult a, OpFoldResult b, Location loc,
OpBuilder &builder) {
auto aVal = getValueOrCreateConstantIndexOp(builder, loc, a);
auto bVal = getValueOrCreateConstantIndexOp(builder, loc, b);
return builder.create<ArithOp>(loc, aVal, bVal).getResult();
return ArithOp::create(builder, loc, aVal, bVal).getResult();
}

// a helper utility to perform division operation on OpFoldResult and int64_t.
Expand Down
12 changes: 5 additions & 7 deletions mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1525,15 +1525,13 @@ void XeGPUSubgroupDistributePass::runOnOperation() {
auto warpReduction = [](Location loc, OpBuilder &builder, Value input,
vector::CombiningKind kind, uint32_t size) {
// First reduce on a single thread to get per lane reduction value.
Value laneVal = builder.create<vector::ReductionOp>(loc, kind, input);
Value laneVal = vector::ReductionOp::create(builder, loc, kind, input);
// Parallel reduction using butterfly shuffles.
for (uint64_t i = 1; i < size; i <<= 1) {
Value shuffled =
builder
.create<gpu::ShuffleOp>(loc, laneVal, i,
/*width=*/size,
/*mode=*/gpu::ShuffleMode::XOR)
.getShuffleResult();
Value shuffled = gpu::ShuffleOp::create(builder, loc, laneVal, i,
/*width=*/size,
/*mode=*/gpu::ShuffleMode::XOR)
.getShuffleResult();
laneVal = makeArithReduction(builder, loc, kind, laneVal, shuffled);
}
return laneVal;
Expand Down
Loading