diff --git a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp index 39ae6a016eb41..a9592bcd7814a 100644 --- a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp @@ -264,8 +264,7 @@ class MLIRGenImpl { // The attribute is a vector with a floating point value per element // (number) in the array, see `collectData()` below for more details. std::vector data; - data.reserve(std::accumulate(lit.getDims().begin(), lit.getDims().end(), 1, - std::multiplies())); + data.reserve(llvm::product_of(lit.getDims())); collectData(lit, data); // The type of this attribute is tensor of 64-bit floating-point with the diff --git a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp index 0573af699c1f4..8c21951948496 100644 --- a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp @@ -264,8 +264,7 @@ class MLIRGenImpl { // The attribute is a vector with a floating point value per element // (number) in the array, see `collectData()` below for more details. std::vector data; - data.reserve(std::accumulate(lit.getDims().begin(), lit.getDims().end(), 1, - std::multiplies())); + data.reserve(llvm::product_of(lit.getDims())); collectData(lit, data); // The type of this attribute is tensor of 64-bit floating-point with the diff --git a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp index 7d676f1b39200..6b7ab40299be5 100644 --- a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp @@ -268,8 +268,7 @@ class MLIRGenImpl { // The attribute is a vector with a floating point value per element // (number) in the array, see `collectData()` below for more details. std::vector data; - data.reserve(std::accumulate(lit.getDims().begin(), lit.getDims().end(), 1, - std::multiplies())); + data.reserve(llvm::product_of(lit.getDims())); collectData(lit, data); // The type of this attribute is tensor of 64-bit floating-point with the diff --git a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp index 7d676f1b39200..6b7ab40299be5 100644 --- a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp @@ -268,8 +268,7 @@ class MLIRGenImpl { // The attribute is a vector with a floating point value per element // (number) in the array, see `collectData()` below for more details. std::vector data; - data.reserve(std::accumulate(lit.getDims().begin(), lit.getDims().end(), 1, - std::multiplies())); + data.reserve(llvm::product_of(lit.getDims())); collectData(lit, data); // The type of this attribute is tensor of 64-bit floating-point with the diff --git a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp index 7d676f1b39200..6b7ab40299be5 100644 --- a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp @@ -268,8 +268,7 @@ class MLIRGenImpl { // The attribute is a vector with a floating point value per element // (number) in the array, see `collectData()` below for more details. std::vector data; - data.reserve(std::accumulate(lit.getDims().begin(), lit.getDims().end(), 1, - std::multiplies())); + data.reserve(llvm::product_of(lit.getDims())); collectData(lit, data); // The type of this attribute is tensor of 64-bit floating-point with the diff --git a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp index 75dbc9104a992..73133247335dc 100644 --- a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp @@ -405,8 +405,7 @@ class MLIRGenImpl { // The attribute is a vector with a floating point value per element // (number) in the array, see `collectData()` below for more details. std::vector data; - data.reserve(std::accumulate(lit.getDims().begin(), lit.getDims().end(), 1, - std::multiplies())); + data.reserve(llvm::product_of(lit.getDims())); collectData(lit, data); // The type of this attribute is tensor of 64-bit floating-point with the diff --git a/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp index 2b7bdc9a7b7f8..11f866c103639 100644 --- a/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp +++ b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp @@ -22,6 +22,7 @@ #include "mlir/IR/TypeRange.h" #include "mlir/IR/Value.h" #include "mlir/Transforms/DialectConversion.h" +#include "llvm/ADT/STLExtras.h" #include #include @@ -110,9 +111,7 @@ static Value calculateMemrefTotalSizeBytes(Location loc, MemRefType memrefType, {TypeAttr::get(memrefType.getElementType())})); IndexType indexType = builder.getIndexType(); - int64_t numElements = std::accumulate(memrefType.getShape().begin(), - memrefType.getShape().end(), int64_t{1}, - std::multiplies()); + int64_t numElements = llvm::product_of(memrefType.getShape()); emitc::ConstantOp numElementsValue = emitc::ConstantOp::create( builder, loc, indexType, builder.getIndexAttr(numElements)); diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp index 802691c1f7d76..9bf9ca3ae7a89 100644 --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -18,6 +18,7 @@ #include "mlir/Dialect/Tosa/Utils/ConversionUtils.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Transforms/DialectConversion.h" +#include "llvm/ADT/STLExtras.h" #include @@ -70,8 +71,7 @@ TensorType inferReshapeExpandedType(TensorType inputType, // Calculate the product of all elements in 'newShape' except for the -1 // placeholder, which we discard by negating the result. - int64_t totalSizeNoPlaceholder = -std::accumulate( - newShape.begin(), newShape.end(), 1, std::multiplies()); + int64_t totalSizeNoPlaceholder = -llvm::product_of(newShape); // If there is a 0 component in 'newShape', resolve the placeholder as // 0. diff --git a/mlir/lib/Conversion/VectorToAMX/VectorToAMX.cpp b/mlir/lib/Conversion/VectorToAMX/VectorToAMX.cpp index 79c2f23c8e7f3..245a3efe98ecc 100644 --- a/mlir/lib/Conversion/VectorToAMX/VectorToAMX.cpp +++ b/mlir/lib/Conversion/VectorToAMX/VectorToAMX.cpp @@ -20,6 +20,7 @@ #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/DebugLog.h" #include @@ -265,8 +266,7 @@ loadStoreFromTransfer(PatternRewriter &rewriter, if (isPacked) src = collapseLastDim(rewriter, src); int64_t rows = vecShape[0]; - int64_t cols = std::accumulate(vecShape.begin() + 1, vecShape.end(), 1, - std::multiplies()); + int64_t cols = llvm::product_of(vecShape.drop_front()); auto tileType = amx::TileType::get({rows, cols}, vecTy.getElementType()); Value zeroIndex = rewriter.createOrFold(loc, 0); @@ -336,8 +336,7 @@ static TypedValue loadTile(PatternRewriter &rewriter, ArrayRef shape = vecTy.getShape(); int64_t rows = shape[0]; - int64_t cols = std::accumulate(shape.begin() + 1, shape.end(), 1, - std::multiplies()); + int64_t cols = llvm::product_of(shape.drop_front()); auto tileType = amx::TileType::get({rows, cols}, vecTy.getElementType()); return amx::TileLoadOp::create(rewriter, loc, tileType, buf, diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp index c45c45e4712f3..c9eba6962e6a4 100644 --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -26,6 +26,7 @@ #include "mlir/IR/Builders.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "llvm/ADT/STLExtras.h" namespace mlir { #define GEN_PASS_DEF_CONVERTVECTORTOSCF @@ -760,8 +761,7 @@ struct DecomposePrintOpConversion : public VectorToSCFPattern { if (vectorType.getRank() != 1) { // Flatten n-D vectors to 1D. This is done to allow indexing with a // non-constant value. - auto flatLength = std::accumulate(shape.begin(), shape.end(), 1, - std::multiplies()); + int64_t flatLength = llvm::product_of(shape); auto flatVectorType = VectorType::get({flatLength}, vectorType.getElementType()); value = vector::ShapeCastOp::create(rewriter, loc, flatVectorType, value); diff --git a/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp b/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp index 9ead1d89069d6..71687b1479a7d 100644 --- a/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp +++ b/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp @@ -23,6 +23,7 @@ #include "mlir/Dialect/XeGPU/IR/XeGPU.h" #include "mlir/Pass/Pass.h" #include "mlir/Support/LLVM.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/FormatVariadic.h" #include "mlir/IR/BuiltinTypes.h" @@ -774,9 +775,7 @@ struct ConvertXeGPUToXeVMPass if (rank < 1 || type.getNumElements() == 1) return elemType; // Otherwise, convert the vector to a flat vector type. - int64_t sum = - std::accumulate(type.getShape().begin(), type.getShape().end(), - int64_t{1}, std::multiplies()); + int64_t sum = llvm::product_of(type.getShape()); return VectorType::get(sum, elemType); }); typeConverter.addConversion([&](xegpu::TensorDescType type) -> Type { diff --git a/mlir/lib/Dialect/Arith/Utils/Utils.cpp b/mlir/lib/Dialect/Arith/Utils/Utils.cpp index b1fc9aa57c3ba..f54bafff58322 100644 --- a/mlir/lib/Dialect/Arith/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Arith/Utils/Utils.cpp @@ -351,9 +351,9 @@ Value createProduct(OpBuilder &builder, Location loc, ArrayRef values, Value one = ConstantOp::create(builder, loc, resultType, builder.getOneAttr(resultType)); ArithBuilder arithBuilder(builder, loc); - return std::accumulate( - values.begin(), values.end(), one, - [&arithBuilder](Value acc, Value v) { return arithBuilder.mul(acc, v); }); + return llvm::accumulate(values, one, [&arithBuilder](Value acc, Value v) { + return arithBuilder.mul(acc, v); + }); } /// Map strings to float types. diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp index 19eba6beacd86..b5f8ddaadacdf 100644 --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -2460,8 +2460,7 @@ static LogicalResult verifyDistributedType(Type expanded, Type distributed, << dDim << ")"; scales[i] = eDim / dDim; } - if (std::accumulate(scales.begin(), scales.end(), 1, - std::multiplies()) != warpSize) + if (llvm::product_of(scales) != warpSize) return op->emitOpError() << "incompatible distribution dimensions from " << expandedVecType << " to " << distributedVecType << " with warp size = " << warpSize; diff --git a/mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp b/mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp index 88f531f394765..572b746358379 100644 --- a/mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp +++ b/mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp @@ -15,6 +15,7 @@ #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/IR/Value.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/STLExtras.h" #include @@ -118,8 +119,7 @@ bool WarpDistributionPattern::delinearizeLaneId( return false; sizes.push_back(large / small); } - if (std::accumulate(sizes.begin(), sizes.end(), 1, - std::multiplies()) != warpSize) + if (llvm::product_of(sizes) != warpSize) return false; AffineExpr s0, s1; diff --git a/mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp index f277c5f5be5fc..0ae2a9cc0318c 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp @@ -266,9 +266,8 @@ struct StructuredOpShardingInterface LinalgOp linalgOp = llvm::cast(op); SmallVector iteratorTypes = linalgOp.getIteratorTypesArray(); - unsigned reductionItersCount = std::accumulate( - iteratorTypes.begin(), iteratorTypes.end(), 0, - [](unsigned count, utils::IteratorType iter) { + unsigned reductionItersCount = llvm::accumulate( + iteratorTypes, 0u, [](unsigned count, utils::IteratorType iter) { return count + (iter == utils::IteratorType::reduction); }); shard::ReductionKind reductionKind = getReductionKindOfLinalgOp(linalgOp); diff --git a/mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp b/mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp index b66390819103e..8c4f80f13e2f6 100644 --- a/mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp +++ b/mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp @@ -8,6 +8,7 @@ #include "mlir/Dialect/Quant/Utils/UniformSupport.h" #include "mlir/IR/BuiltinTypes.h" +#include "llvm/ADT/STLExtras.h" #include using namespace mlir; @@ -76,9 +77,7 @@ UniformQuantizedPerAxisValueConverter::convert(DenseFPElementsAttr attr) { // using the right quantization parameters. int64_t flattenIndex = 0; auto shape = type.getShape(); - int64_t chunkSize = - std::accumulate(std::next(shape.begin(), quantizationDim + 1), - shape.end(), 1, std::multiplies()); + int64_t chunkSize = llvm::product_of(shape.drop_front(quantizationDim + 1)); Type newElementType = IntegerType::get(attr.getContext(), storageBitWidth); return attr.mapValues(newElementType, [&](const APFloat &old) { int chunkIndex = (flattenIndex++) / chunkSize; diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp index 55119984d9cc1..fe50865bb7c49 100644 --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp @@ -400,7 +400,7 @@ LogicalResult spirv::CompositeConstructOp::verify() { return emitOpError("operand element type mismatch: expected to be ") << resultType.getElementType() << ", but provided " << elementType; } - unsigned totalCount = std::accumulate(sizes.begin(), sizes.end(), 0); + unsigned totalCount = llvm::sum_of(sizes); if (totalCount != cType.getNumElements()) return emitOpError("has incorrect number of operands: expected ") << cType.getNumElements() << ", but provided " << totalCount; diff --git a/mlir/lib/Dialect/Shard/IR/ShardOps.cpp b/mlir/lib/Dialect/Shard/IR/ShardOps.cpp index 08fccfa25c0c7..135c03311ce0e 100644 --- a/mlir/lib/Dialect/Shard/IR/ShardOps.cpp +++ b/mlir/lib/Dialect/Shard/IR/ShardOps.cpp @@ -1010,18 +1010,6 @@ static LogicalResult verifyInGroupDevice(Location loc, StringRef deviceName, return success(); } -template -static auto product(It begin, It end) { - using ElementType = std::decay_t; - return std::accumulate(begin, end, static_cast(1), - std::multiplies()); -} - -template -static auto product(R &&range) { - return product(adl_begin(range), adl_end(range)); -} - static LogicalResult verifyDimensionCompatibility(Location loc, int64_t expectedDimSize, int64_t resultDimSize, diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index c51b5e9cbfc78..00f84bc43f444 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -2368,9 +2368,10 @@ llvm::LogicalResult tosa::ReshapeOp::verify() { } } - int64_t newShapeElementsNum = std::accumulate( - shapeValues.begin(), shapeValues.end(), 1LL, - [](int64_t acc, int64_t dim) { return (dim > 0) ? acc * dim : acc; }); + int64_t newShapeElementsNum = + llvm::accumulate(shapeValues, int64_t(1), [](int64_t acc, int64_t dim) { + return (dim > 0) ? acc * dim : acc; + }); bool isStaticNewShape = llvm::all_of(shapeValues, [](int64_t s) { return s > 0; }); if ((isStaticNewShape && inputElementsNum != newShapeElementsNum) || diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp index d33ebe397cd35..5786f53b8133d 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp @@ -20,6 +20,7 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectResourceBlobManager.h" #include "mlir/IR/Matchers.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" using namespace mlir; @@ -375,8 +376,7 @@ llvm::APInt calculateReducedValue(const mlir::ElementsAttr &oldTensorAttr, for (int64_t reductionAxisVal = 1; reductionAxisVal < oldShape[reductionAxis]; ++reductionAxisVal) { - int64_t stride = std::accumulate(oldShape.begin() + reductionAxis + 1, - oldShape.end(), 1, std::multiplies()); + int64_t stride = llvm::product_of(oldShape.drop_front(reductionAxis + 1)); int64_t index = indexAtOldTensor + stride * reductionAxisVal; reducedValue = OperationType::calcOneElement(reducedValue, oldTensor[index]); @@ -424,8 +424,7 @@ struct ReduceConstantOptimization : public OpRewritePattern { auto oldShape = shapedOldElementsValues.getShape(); auto newShape = resultType.getShape(); - auto newNumOfElements = std::accumulate(newShape.begin(), newShape.end(), 1, - std::multiplies()); + int64_t newNumOfElements = llvm::product_of(newShape); llvm::SmallVector newReducedTensor(newNumOfElements); for (int64_t reductionIndex = 0; reductionIndex < newNumOfElements; diff --git a/mlir/lib/Dialect/Utils/IndexingUtils.cpp b/mlir/lib/Dialect/Utils/IndexingUtils.cpp index e1648ab99ff25..305b06eb38fdf 100644 --- a/mlir/lib/Dialect/Utils/IndexingUtils.cpp +++ b/mlir/lib/Dialect/Utils/IndexingUtils.cpp @@ -81,21 +81,10 @@ SmallVector mlir::computeElementwiseMul(ArrayRef v1, return computeElementwiseMulImpl(v1, v2); } -int64_t mlir::computeSum(ArrayRef basis) { - assert(llvm::all_of(basis, [](int64_t s) { return s > 0; }) && - "basis must be nonnegative"); - if (basis.empty()) - return 0; - return std::accumulate(basis.begin(), basis.end(), 1, std::plus()); -} - int64_t mlir::computeProduct(ArrayRef basis) { assert(llvm::all_of(basis, [](int64_t s) { return s > 0; }) && "basis must be nonnegative"); - if (basis.empty()) - return 1; - return std::accumulate(basis.begin(), basis.end(), 1, - std::multiplies()); + return llvm::product_of(basis); } int64_t mlir::linearize(ArrayRef offsets, ArrayRef basis) { @@ -158,19 +147,11 @@ SmallVector mlir::computeElementwiseMul(ArrayRef v1, } AffineExpr mlir::computeSum(MLIRContext *ctx, ArrayRef basis) { - if (basis.empty()) - return getAffineConstantExpr(0, ctx); - return std::accumulate(basis.begin(), basis.end(), - getAffineConstantExpr(0, ctx), - std::plus()); + return llvm::sum_of(basis, getAffineConstantExpr(0, ctx)); } AffineExpr mlir::computeProduct(MLIRContext *ctx, ArrayRef basis) { - if (basis.empty()) - return getAffineConstantExpr(1, ctx); - return std::accumulate(basis.begin(), basis.end(), - getAffineConstantExpr(1, ctx), - std::multiplies()); + return llvm::product_of(basis, getAffineConstantExpr(1, ctx)); } AffineExpr mlir::linearize(MLIRContext *ctx, ArrayRef offsets, diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp index 7b2734d8c22fc..6e9118e1f7b0b 100644 --- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp +++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp @@ -374,11 +374,11 @@ mlir::composeReassociationIndices( if (consumerReassociations.empty()) return composedIndices; - size_t consumerDims = std::accumulate( - consumerReassociations.begin(), consumerReassociations.end(), 0, - [](size_t all, ReassociationIndicesRef indices) { - return all + indices.size(); - }); + size_t consumerDims = + llvm::accumulate(consumerReassociations, size_t(0), + [](size_t all, ReassociationIndicesRef indices) { + return all + indices.size(); + }); if (producerReassociations.size() != consumerDims) return std::nullopt; diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index a7e3ba8ca3285..58256b0ade9f6 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -2496,8 +2496,7 @@ struct ToElementsOfBroadcast final : OpRewritePattern { auto srcElems = vector::ToElementsOp::create( rewriter, toElementsOp.getLoc(), bcastOp.getSource()); - int64_t dstCount = std::accumulate(dstShape.begin(), dstShape.end(), 1, - std::multiplies()); + int64_t dstCount = llvm::product_of(dstShape); SmallVector replacements; replacements.reserve(dstCount); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp index c5f22b2eafeb7..0eba0b12259ac 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp @@ -21,6 +21,7 @@ #include "mlir/IR/Location.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeUtilities.h" +#include "llvm/ADT/STLExtras.h" #include #define DEBUG_TYPE "vector-shape-cast-lowering" @@ -166,10 +167,7 @@ class ShapeCastOpRewritePattern : public OpRewritePattern { const VectorType resultType = shapeCast.getResultVectorType(); const ArrayRef resultShape = resultType.getShape(); - const int64_t nSlices = - std::accumulate(sourceShape.begin(), sourceShape.begin() + sourceDim, 1, - std::multiplies()); - + const int64_t nSlices = llvm::product_of(sourceShape.take_front(sourceDim)); SmallVector extractIndex(sourceDim, 0); SmallVector insertIndex(resultDim, 0); Value result = ub::PoisonOp::create(rewriter, loc, resultType); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp index 963b2c803bc5a..aa2dd89b182e1 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp @@ -15,6 +15,7 @@ #include "mlir/Dialect/Vector/Utils/VectorUtils.h" #include "mlir/IR/Builders.h" #include "mlir/IR/TypeUtilities.h" +#include "llvm/ADT/STLExtras.h" #define DEBUG_TYPE "vector-drop-unit-dim" @@ -557,8 +558,7 @@ struct CastAwayConstantMaskLeadingOneDim // If any of the dropped unit dims has a size of `0`, the entire mask is a // zero mask, else the unit dim has no effect on the mask. int64_t flatLeadingSize = - std::accumulate(dimSizes.begin(), dimSizes.begin() + dropDim + 1, - static_cast(1), std::multiplies()); + llvm::product_of(dimSizes.take_front(dropDim + 1)); SmallVector newDimSizes = {flatLeadingSize}; newDimSizes.append(dimSizes.begin() + dropDim + 1, dimSizes.end()); diff --git a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp index b72d5648b29f9..2c56a438ea62c 100644 --- a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp +++ b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp @@ -52,8 +52,7 @@ mlir::xegpu::getDistributedVectorType(xegpu::TensorDescType tdescTy) { // compute sgSize by multiply elements of laneLayout // e.g. for 2D layout, sgSize = laneLayout[0] * laneLayout[1] // e.g. for 1D layout, sgSize = laneLayout[0] - auto sgSize = std::accumulate(laneLayout.begin(), laneLayout.end(), 1, - std::multiplies()); + int64_t sgSize = llvm::product_of(laneLayout); // Case 1: regular loads/stores auto scatterAttr = tdescTy.getEncodingOfType(); diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp index 8bcfa465e4a22..ce421f4bf7e0e 100644 --- a/mlir/lib/IR/Operation.cpp +++ b/mlir/lib/IR/Operation.cpp @@ -18,6 +18,7 @@ #include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeUtilities.h" #include "mlir/Interfaces/FoldInterfaces.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ErrorHandling.h" #include @@ -1274,10 +1275,7 @@ LogicalResult OpTrait::impl::verifyValueSizeAttr(Operation *op, return op->emitOpError("'") << attrName << "' attribute cannot have negative elements"; - size_t totalCount = - std::accumulate(sizes.begin(), sizes.end(), 0, - [](unsigned all, int32_t one) { return all + one; }); - + size_t totalCount = llvm::sum_of(sizes, size_t(0)); if (totalCount != expectedCount) return op->emitOpError() << valueGroupName << " count (" << expectedCount diff --git a/mlir/lib/IR/OperationSupport.cpp b/mlir/lib/IR/OperationSupport.cpp index 394ac7765ed61..2a37f3860fe00 100644 --- a/mlir/lib/IR/OperationSupport.cpp +++ b/mlir/lib/IR/OperationSupport.cpp @@ -406,15 +406,13 @@ OperandRangeRange::OperandRangeRange(OperandRange operands, OperandRange OperandRangeRange::join() const { const OwnerT &owner = getBase(); ArrayRef sizeData = llvm::cast(owner.second); - return OperandRange(owner.first, - std::accumulate(sizeData.begin(), sizeData.end(), 0)); + return OperandRange(owner.first, llvm::sum_of(sizeData)); } OperandRange OperandRangeRange::dereference(const OwnerT &object, ptrdiff_t index) { ArrayRef sizeData = llvm::cast(object.second); - uint32_t startIndex = - std::accumulate(sizeData.begin(), sizeData.begin() + index, 0); + uint32_t startIndex = llvm::sum_of(sizeData.take_front(index)); return OperandRange(object.first + startIndex, *(sizeData.begin() + index)); } @@ -565,8 +563,7 @@ MutableOperandRange MutableOperandRangeRange::dereference(const OwnerT &object, ptrdiff_t index) { ArrayRef sizeData = llvm::cast(object.second.getValue()); - uint32_t startIndex = - std::accumulate(sizeData.begin(), sizeData.begin() + index, 0); + uint32_t startIndex = llvm::sum_of(sizeData.take_front(index)); return object.first.slice( startIndex, *(sizeData.begin() + index), MutableOperandRange::OperandSegment(index, object.second)); diff --git a/mlir/lib/IR/TypeUtilities.cpp b/mlir/lib/IR/TypeUtilities.cpp index d2d115ea39277..e438631ffe1f5 100644 --- a/mlir/lib/IR/TypeUtilities.cpp +++ b/mlir/lib/IR/TypeUtilities.cpp @@ -104,8 +104,8 @@ LogicalResult mlir::verifyCompatibleShapes(TypeRange types1, TypeRange types2) { LogicalResult mlir::verifyCompatibleDims(ArrayRef dims) { if (dims.empty()) return success(); - auto staticDim = std::accumulate( - dims.begin(), dims.end(), dims.front(), [](auto fold, auto dim) { + auto staticDim = + llvm::accumulate(dims, dims.front(), [](auto fold, auto dim) { return ShapedType::isDynamic(dim) ? fold : dim; }); return success(llvm::all_of(dims, [&](auto dim) { diff --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp index 33fbd2a9579f0..42843ea1780c4 100644 --- a/mlir/lib/Rewrite/ByteCode.cpp +++ b/mlir/lib/Rewrite/ByteCode.cpp @@ -1835,8 +1835,7 @@ executeGetOperandsResults(RangeT values, Operation *op, unsigned index, return nullptr; ArrayRef segments = segmentAttr; - unsigned startIndex = - std::accumulate(segments.begin(), segments.begin() + index, 0); + unsigned startIndex = llvm::sum_of(segments.take_front(index)); values = values.slice(startIndex, *std::next(segments.begin(), index)); LDBG() << " * Extracting range[" << startIndex << ", " diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp index 5a3eb209f0a92..845a14f34c016 100644 --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -922,8 +922,7 @@ llvm::CallInst *mlir::LLVM::detail::createIntrinsicCall( assert(opBundleSizes.size() == opBundleTagsAttr.size() && "operand bundles and tags do not match"); - numOpBundleOperands = - std::accumulate(opBundleSizes.begin(), opBundleSizes.end(), size_t(0)); + numOpBundleOperands = llvm::sum_of(opBundleSizes); assert(numOpBundleOperands <= intrOp->getNumOperands() && "operand bundle operands is more than the number of operands"); diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp index 969011546985b..daae3c79ffd43 100644 --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -3513,9 +3513,9 @@ void OpEmitter::genCodeForAddingArgAndRegionForBuilder( body << "(" << operandName << " ? 1 : 0)"; } else if (operand.isVariadicOfVariadic()) { body << llvm::formatv( - "static_cast(std::accumulate({0}.begin(), {0}.end(), 0, " + "llvm::accumulate({0}, int32_t(0), " "[](int32_t curSum, ::mlir::ValueRange range) {{ return curSum + " - "static_cast(range.size()); }))", + "static_cast(range.size()); })", operandName); } else { body << "static_cast(" << getArgumentName(op, i) << ".size())";