-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[Linalg] Fix linalg.pack canonicalization priority issue #160340
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Linalg] Fix linalg.pack canonicalization priority issue #160340
Conversation
@llvm/pr-subscribers-mlir Author: Nirvedh Meshram (nirvedhmeshram) ChangesThe current canonicalization prioritizes unpack->pack folder over dropping padding if not needed but that folder fails if there is padding and hence blocks all canonicalizations. Full diff: https://github.com/llvm/llvm-project/pull/160340.diff 2 Files Affected:
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 578931e1351c6..de6324da22445 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -5581,6 +5581,14 @@ static bool inferStaticShape(PackOp packOp, SmallVectorImpl<int64_t> &srcShape,
}
LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
+ // Fold optional PaddingValue operand away if padding is not needed.
+ if (packOp.getPaddingValue() && paddingIsNotNeeded(packOp)) {
+ rewriter.startOpModification(packOp);
+ packOp.getPaddingValueMutable().clear();
+ rewriter.finalizeOpModification(packOp);
+ return success();
+ }
+
// Fold an pack(unpack(x)) to x.
if (auto unPackOp = packOp.getSource().getDefiningOp<UnPackOp>()) {
if (unPackOp.getSourceType() != packOp.getDestType())
@@ -5593,14 +5601,6 @@ LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
return success();
}
- // Fold optional PaddingValue operand away if padding is not needed.
- if (packOp.getPaddingValue() && paddingIsNotNeeded(packOp)) {
- rewriter.startOpModification(packOp);
- packOp.getPaddingValueMutable().clear();
- rewriter.finalizeOpModification(packOp);
- return success();
- }
-
// Insert tensor.cast ops if static shape inference is available..
SmallVector<int64_t> srcShape, destShape;
if (inferStaticShape(packOp, srcShape, destShape)) {
diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index 5c5f7e861d37d..26d2d98572f47 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -1756,10 +1756,11 @@ func.func @pack_unpack(%t: tensor<16x16x?x?xf32>, %tile1: index, %tile2: index)
// CHECK-SAME: %[[T:.+]]: tensor<16x16x8x8xf32>
// CHECK: return %[[T]] : tensor<16x16x8x8xf32>
func.func @pack_unpack(%t: tensor<16x16x8x8xf32>) -> tensor<16x16x8x8xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
%tensor_empty = tensor.empty() : tensor<128x128xf32>
%unpacked = linalg.unpack %t inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty : tensor<16x16x8x8xf32> -> tensor<128x128xf32>
%tensor_empty1 = tensor.empty() : tensor<16x16x8x8xf32>
- %packed = linalg.pack %unpacked inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty1 : tensor<128x128xf32> -> tensor<16x16x8x8xf32>
+ %packed = linalg.pack %unpacked padding_value(%cst : f32) inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty1 : tensor<128x128xf32> -> tensor<16x16x8x8xf32>
return %packed : tensor<16x16x8x8xf32>
}
|
@llvm/pr-subscribers-mlir-linalg Author: Nirvedh Meshram (nirvedhmeshram) ChangesThe current canonicalization prioritizes unpack->pack folder over dropping padding if not needed but that folder fails if there is padding and hence blocks all canonicalizations. Full diff: https://github.com/llvm/llvm-project/pull/160340.diff 2 Files Affected:
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 578931e1351c6..de6324da22445 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -5581,6 +5581,14 @@ static bool inferStaticShape(PackOp packOp, SmallVectorImpl<int64_t> &srcShape,
}
LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
+ // Fold optional PaddingValue operand away if padding is not needed.
+ if (packOp.getPaddingValue() && paddingIsNotNeeded(packOp)) {
+ rewriter.startOpModification(packOp);
+ packOp.getPaddingValueMutable().clear();
+ rewriter.finalizeOpModification(packOp);
+ return success();
+ }
+
// Fold an pack(unpack(x)) to x.
if (auto unPackOp = packOp.getSource().getDefiningOp<UnPackOp>()) {
if (unPackOp.getSourceType() != packOp.getDestType())
@@ -5593,14 +5601,6 @@ LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) {
return success();
}
- // Fold optional PaddingValue operand away if padding is not needed.
- if (packOp.getPaddingValue() && paddingIsNotNeeded(packOp)) {
- rewriter.startOpModification(packOp);
- packOp.getPaddingValueMutable().clear();
- rewriter.finalizeOpModification(packOp);
- return success();
- }
-
// Insert tensor.cast ops if static shape inference is available..
SmallVector<int64_t> srcShape, destShape;
if (inferStaticShape(packOp, srcShape, destShape)) {
diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index 5c5f7e861d37d..26d2d98572f47 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -1756,10 +1756,11 @@ func.func @pack_unpack(%t: tensor<16x16x?x?xf32>, %tile1: index, %tile2: index)
// CHECK-SAME: %[[T:.+]]: tensor<16x16x8x8xf32>
// CHECK: return %[[T]] : tensor<16x16x8x8xf32>
func.func @pack_unpack(%t: tensor<16x16x8x8xf32>) -> tensor<16x16x8x8xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
%tensor_empty = tensor.empty() : tensor<128x128xf32>
%unpacked = linalg.unpack %t inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty : tensor<16x16x8x8xf32> -> tensor<128x128xf32>
%tensor_empty1 = tensor.empty() : tensor<16x16x8x8xf32>
- %packed = linalg.pack %unpacked inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty1 : tensor<128x128xf32> -> tensor<16x16x8x8xf32>
+ %packed = linalg.pack %unpacked padding_value(%cst : f32) inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty1 : tensor<128x128xf32> -> tensor<16x16x8x8xf32>
return %packed : tensor<16x16x8x8xf32>
}
|
452f891
to
5c5b64d
Compare
Signed-off-by: Nirvedh Meshram <nirvedh@gmail.com>
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM, thanks
The current canonicalization prioritizes unpack->pack folder over dropping padding if not needed but that folder fails if there is padding and hence blocks all canonicalizations. We now put the failures in the if statement so that we can proceed if the unpack->pack folder conditions are not met.