Skip to content

Commit

Permalink
[mlir][linalg] New tiling option: Scalarize dynamic dims
Browse files Browse the repository at this point in the history
This tiling option scalarizes all dynamic dimensions, i.e., it tiles all dynamic dimensions by 1.

This option is useful for linalg ops with partly dynamic tensor dimensions. E.g., such ops can appear in the partial iteration after loop peeling. After scalarizing dynamic dims, those ops can be vectorized.

Differential Revision: https://reviews.llvm.org/D109268
  • Loading branch information
matthias-springer committed Sep 14, 2021
1 parent 8faf35c commit fb1def9
Show file tree
Hide file tree
Showing 4 changed files with 75 additions and 4 deletions.
4 changes: 4 additions & 0 deletions mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
Expand Up @@ -479,6 +479,10 @@ struct LinalgTilingOptions {
/// proper interaction with folding.
LinalgTilingOptions &setTileSizes(ArrayRef<int64_t> ts);

/// Tile all dynamic dimensions by 1. I.e., scalarize those dimensions.
/// Note: `scalarizeDynamicDims` and `setTileSizes` cannot be used together.
LinalgTilingOptions &scalarizeDynamicDims();

/// The interchange vector to reorder the tiled loops.
SmallVector<unsigned, 4> interchangeVector = {};

Expand Down
25 changes: 25 additions & 0 deletions mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
Expand Up @@ -107,6 +107,7 @@ void mlir::linalg::LinalgTransformationFilter::

LinalgTilingOptions &
mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
assert(!tileSizeComputationFunction && "tile sizes already set");
SmallVector<int64_t, 4> tileSizes(ts.begin(), ts.end());
tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
OpBuilder::InsertionGuard guard(b);
Expand All @@ -120,6 +121,30 @@ mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
return *this;
}

LinalgTilingOptions &mlir::linalg::LinalgTilingOptions::scalarizeDynamicDims() {
assert(!tileSizeComputationFunction && "tile sizes already set");
tileSizeComputationFunction = [](OpBuilder &b, Operation *op) {
SmallVector<Value, 4> tileSizes;
auto linalgOp = dyn_cast<LinalgOp>(op);
if (!linalgOp)
return tileSizes;
Location loc = linalgOp.getLoc();
auto allShapeSizes = linalgOp.createFlatListOfOperandDims(b, loc);
AffineMap map = linalgOp.getShapesToLoopsMap();
if (!map)
return tileSizes;
auto shapeSizes = applyMapToValues(b, loc, map, allShapeSizes);
// If the shape size is dynamic, tile by 1. Otherwise, do not tile (tile
// size 0).
for (Value shapeSize : shapeSizes)
tileSizes.push_back(getConstantIntValue(shapeSize).hasValue()
? b.create<ConstantIndexOp>(loc, 0)
: b.create<ConstantIndexOp>(loc, 1));
return tileSizes;
};
return *this;
}

/// Try to compute a static bounding box for `operand`
/// Return success if either:
/// 1. The operand is already statically shaped, `result` is left unchanged.
Expand Down
27 changes: 27 additions & 0 deletions mlir/test/Dialect/Linalg/tile-scalarize-dynamic-dims.mlir
@@ -0,0 +1,27 @@
// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-scalarize-dynamic-dims" -for-loop-canonicalization -canonicalize -split-input-file | \
// RUN: FileCheck %s

// CHECK-LABEL: func @matmul_partly_dynamic_tensor(
// CHECK-SAME: %[[ARG0:.*]]: tensor<?x?xf32>, %[[ARG1:.*]]: tensor<?x2000xf32>
// CHECK-DAG: %[[C0:.*]] = constant 0 : index
// CHECK-DAG: %[[C1:.*]] = constant 1 : index
// CHECK: tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[UB1:.*]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?xf32>
// CHECK: %[[UB2:.*]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?xf32>
// CHECK: scf.for %[[IV0:.*]] = %[[C0]] to %[[UB1]] step %[[C1]]
// CHECK: scf.for %[[IV1:.*]] = %[[C0]] to %[[UB2]] step %[[C1]]
// CHECK: %[[S1:.*]] = tensor.extract_slice %[[ARG0]][%[[IV0]], %[[IV1]]] [1, 1] [1, 1] : tensor<?x?xf32> to tensor<1x1xf32>
// CHECK: %[[S2:.*]] = tensor.extract_slice %[[ARG1]][%[[IV1]], 0] [1, 2000] [1, 1] : tensor<?x2000xf32> to tensor<1x2000xf32>
// CHECK: %[[S3:.*]] = tensor.extract_slice %{{.*}}[%[[IV0]], 0] [1, 2000] [1, 1] : tensor<?x2000xf32> to tensor<1x2000xf32>
// CHECK: linalg.matmul ins(%[[S1]], %[[S2]] : tensor<1x1xf32>, tensor<1x2000xf32>) outs(%[[S3]] : tensor<1x2000xf32>) -> tensor<1x2000xf32>
func @matmul_partly_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x2000xf32>)
-> tensor<?x2000xf32> {
%c0 = constant 0 : index
%c1 = constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%out = linalg.init_tensor [%d0, 2000] : tensor<?x2000xf32>
%r = linalg.matmul {__internal_linalg_transform__ = "tile"}
ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x2000xf32>)
outs(%out: tensor<?x2000xf32>) -> tensor<?x2000xf32>
return %r : tensor<?x2000xf32>
}
23 changes: 19 additions & 4 deletions mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
Expand Up @@ -87,6 +87,10 @@ struct TestLinalgTransforms
Option<bool> testTilePattern{*this, "test-tile-pattern",
llvm::cl::desc("Test tile pattern"),
llvm::cl::init(false)};
Option<bool> testTileScalarizeDynamicDims{
*this, "test-tile-scalarize-dynamic-dims",
llvm::cl::desc("Test tiling of dynamic dims by 1"),
llvm::cl::init(false)};
Option<int> testHoistPadding{*this, "test-hoist-padding",
llvm::cl::desc("Test hoist padding"),
llvm::cl::init(0)};
Expand Down Expand Up @@ -566,12 +570,19 @@ static Value getNeutralOfLinalgOp(OpBuilder &b, OpOperand &op) {
}

static void applyTilePattern(FuncOp funcOp, ArrayRef<int64_t> tileSizes,
bool padTiles, ArrayRef<int64_t> peeledLoops) {
bool padTiles, ArrayRef<int64_t> peeledLoops,
bool scalarizeDynamicDims) {
MLIRContext *context = funcOp.getContext();
RewritePatternSet tilingPattern(context);
auto linalgTilingOptions =
linalg::LinalgTilingOptions().setTileSizes(tileSizes).setPeeledLoops(
peeledLoops);
linalg::LinalgTilingOptions().setPeeledLoops(peeledLoops);
if (scalarizeDynamicDims) {
linalgTilingOptions.scalarizeDynamicDims();
assert(tileSizes.empty() &&
"tileSizes and scalarizeDynamicDims is mutually exclusive");
} else {
linalgTilingOptions.setTileSizes(tileSizes);
}
if (padTiles)
linalgTilingOptions.setPaddingValueComputationFunction(
getNeutralOfLinalgOp);
Expand Down Expand Up @@ -709,7 +720,11 @@ void TestLinalgTransforms::runOnFunction() {
return applyTiledLoopPeelingPattern(getFunction(), testTiledLoopPeeling,
skipPartial);
if (testTilePattern)
return applyTilePattern(getFunction(), tileSizes, padTiles, peeledLoops);
return applyTilePattern(getFunction(), tileSizes, padTiles, peeledLoops,
/*scalarizeDynamicDims=*/false);
if (testTileScalarizeDynamicDims)
return applyTilePattern(getFunction(), tileSizes, padTiles,
/*peeledLoops=*/{}, /*scalarizeDynamicDims=*/true);
if (testHoistPadding) {
getFunction().walk([&](linalg::PadTensorOp padTensorOp) {
(void)linalg::hoistPaddingOnTensors(padTensorOp, testHoistPadding);
Expand Down

0 comments on commit fb1def9

Please sign in to comment.