diff --git a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp index 264e704f914ffd..9a5e2a608df980 100644 --- a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp +++ b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp @@ -24,36 +24,17 @@ using namespace mlir; using namespace mlir::loop; -static llvm::cl::OptionCategory clOptionsCategory(PASS_NAME " options"); -static llvm::cl::opt - clNumBlockDims("gpu-block-dims", - llvm::cl::desc("Number of GPU block dimensions for mapping"), - llvm::cl::cat(clOptionsCategory), llvm::cl::init(1u)); -static llvm::cl::opt clNumThreadDims( - "gpu-thread-dims", - llvm::cl::desc("Number of GPU thread dimensions for mapping"), - llvm::cl::cat(clOptionsCategory), llvm::cl::init(1u)); - -static llvm::cl::OptionCategory clLoopOpToGPUCategory(LOOPOP_TO_GPU_PASS_NAME - " options"); -static llvm::cl::list - clNumWorkGroups("gpu-num-workgroups", - llvm::cl::desc("Num workgroups in the GPU launch"), - llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated, - llvm::cl::cat(clLoopOpToGPUCategory)); -static llvm::cl::list - clWorkGroupSize("gpu-workgroup-size", - llvm::cl::desc("Workgroup Size in the GPU launch"), - llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated, - llvm::cl::cat(clLoopOpToGPUCategory)); - namespace { // A pass that traverses top-level loops in the function and converts them to // GPU launch operations. Nested launches are not allowed, so this does not // walk the function recursively to avoid considering nested loops. struct ForLoopMapper : public FunctionPass { - ForLoopMapper(unsigned numBlockDims, unsigned numThreadDims) - : numBlockDims(numBlockDims), numThreadDims(numThreadDims) {} + ForLoopMapper() = default; + ForLoopMapper(const ForLoopMapper &) {} + ForLoopMapper(unsigned numBlockDims, unsigned numThreadDims) { + this->numBlockDims = numBlockDims; + this->numThreadDims = numThreadDims; + } void runOnFunction() override { for (Block &block : getFunction()) @@ -70,8 +51,14 @@ struct ForLoopMapper : public FunctionPass { } } - unsigned numBlockDims; - unsigned numThreadDims; + Option numBlockDims{ + *this, "gpu-block-dims", + llvm::cl::desc("Number of GPU block dimensions for mapping"), + llvm::cl::init(1u)}; + Option numThreadDims{ + *this, "gpu-thread-dims", + llvm::cl::desc("Number of GPU thread dimensions for mapping"), + llvm::cl::init(1u)}; }; // A pass that traverses top-level loops in the function and convertes them to @@ -81,10 +68,13 @@ struct ForLoopMapper : public FunctionPass { // to be perfectly nested upto depth equal to size of `workGroupSize`. struct ImperfectlyNestedForLoopMapper : public FunctionPass { + ImperfectlyNestedForLoopMapper() = default; + ImperfectlyNestedForLoopMapper(const ImperfectlyNestedForLoopMapper &) {} ImperfectlyNestedForLoopMapper(ArrayRef numWorkGroups, - ArrayRef workGroupSize) - : numWorkGroups(numWorkGroups.begin(), numWorkGroups.end()), - workGroupSize(workGroupSize.begin(), workGroupSize.end()) {} + ArrayRef workGroupSize) { + this->numWorkGroups->assign(numWorkGroups.begin(), numWorkGroups.end()); + this->workGroupSize->assign(workGroupSize.begin(), workGroupSize.end()); + } void runOnFunction() override { // Insert the num work groups and workgroup sizes as constant values. This @@ -113,8 +103,14 @@ struct ImperfectlyNestedForLoopMapper } } } - SmallVector numWorkGroups; - SmallVector workGroupSize; + ListOption numWorkGroups{ + *this, "gpu-num-workgroups", + llvm::cl::desc("Num workgroups in the GPU launch"), llvm::cl::ZeroOrMore, + llvm::cl::MiscFlags::CommaSeparated}; + ListOption workGroupSize{ + *this, "gpu-workgroup-size", + llvm::cl::desc("Workgroup Size in the GPU launch"), llvm::cl::ZeroOrMore, + llvm::cl::MiscFlags::CommaSeparated}; }; struct ParallelLoopToGpuPass : public OperationPass { @@ -152,20 +148,11 @@ std::unique_ptr mlir::createParallelLoopToGpuPass() { } static PassRegistration - registration(PASS_NAME, "Convert top-level loops to GPU kernels", [] { - return std::make_unique(clNumBlockDims.getValue(), - clNumThreadDims.getValue()); - }); - -static PassRegistration loopOpToGPU( - LOOPOP_TO_GPU_PASS_NAME, "Convert top-level loop::ForOp to GPU kernels", - [] { - SmallVector numWorkGroups, workGroupSize; - numWorkGroups.assign(clNumWorkGroups.begin(), clNumWorkGroups.end()); - workGroupSize.assign(clWorkGroupSize.begin(), clWorkGroupSize.end()); - return std::make_unique(numWorkGroups, - workGroupSize); - }); + registration(PASS_NAME, "Convert top-level loops to GPU kernels"); + +static PassRegistration + loopOpToGPU(LOOPOP_TO_GPU_PASS_NAME, + "Convert top-level loop::ForOp to GPU kernels"); static PassRegistration pass("convert-parallel-loops-to-gpu", "Convert mapped loop.parallel ops" diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp index 8da000fa5260d3..b6af16c979c357 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -55,14 +55,6 @@ using llvm::dbgs; /// More advanced use cases, analyses as well as profitability heuristics are /// left for future work. -static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options"); -static llvm::cl::list clTileSizes( - "linalg-fusion-tile-sizes", - llvm::cl::desc( - "Tile sizes by which to tile linalg operations during linalg fusion"), - llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated, - llvm::cl::cat(clOptionsCategory)); - // Return a cloned version of `op` that operates on `loopRanges`, assumed to be // a subset of the original loop ranges of `op`. // This is achieved by applying the `loopToOperandRangesMaps` permutation maps diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp index bee3f0dff0d286..8a6b5cf8b5dafd 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -43,12 +43,6 @@ using folded_linalg_range = folded::ValueBuilder; #define DEBUG_TYPE "linalg-promotion" -static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options"); -static llvm::cl::opt clPromoteDynamic( - "test-linalg-promote-dynamic", - llvm::cl::desc("Test generation of dynamic promoted buffers"), - llvm::cl::cat(clOptionsCategory), llvm::cl::init(false)); - static Value allocBuffer(Type elementType, Value size, bool dynamicBuffers) { auto *ctx = size.getContext(); auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8); @@ -238,13 +232,19 @@ static void promoteSubViews(FuncOp f, bool dynamicBuffers) { namespace { struct LinalgPromotionPass : public FunctionPass { LinalgPromotionPass() = default; - LinalgPromotionPass(bool dynamicBuffers) : dynamicBuffers(dynamicBuffers) {} + LinalgPromotionPass(const LinalgPromotionPass &) {} + LinalgPromotionPass(bool dynamicBuffers) { + this->dynamicBuffers = dynamicBuffers; + } void runOnFunction() override { promoteSubViews(getFunction(), dynamicBuffers); } - bool dynamicBuffers; + Option dynamicBuffers{ + *this, "test-promote-dynamic", + llvm::cl::desc("Test generation of dynamic promoted buffers"), + llvm::cl::init(false)}; }; } // namespace @@ -254,6 +254,4 @@ mlir::createLinalgPromotionPass(bool dynamicBuffers) { } static PassRegistration - pass("linalg-promote-subviews", "promote subview ops to local buffers", [] { - return std::make_unique(clPromoteDynamic); - }); + pass("linalg-promote-subviews", "promote subview ops to local buffers"); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp index 7b4a5c651d243e..2d9ca16c63b698 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -39,13 +39,6 @@ using folded_affine_min = folded::ValueBuilder; #define DEBUG_TYPE "linalg-tiling" -static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options"); -static llvm::cl::list - clTileSizes("linalg-tile-sizes", - llvm::cl::desc("Tile sizes by which to tile linalg operations"), - llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated, - llvm::cl::cat(clOptionsCategory)); - static bool isZero(Value v) { return isa_and_nonnull(v.getDefiningOp()) && cast(v.getDefiningOp()).getValue() == 0; @@ -513,15 +506,19 @@ namespace { template struct LinalgTilingPass : public FunctionPass> { LinalgTilingPass() = default; + LinalgTilingPass(const LinalgTilingPass &) {} LinalgTilingPass(ArrayRef sizes) { - this->tileSizes.assign(sizes.begin(), sizes.end()); + this->tileSizes->assign(sizes.begin(), sizes.end()); } void runOnFunction() override { tileLinalgOps(this->getFunction(), tileSizes); } - SmallVector tileSizes; + Pass::ListOption tileSizes{ + *this, "linalg-tile-sizes", + llvm::cl::desc("Tile sizes by which to tile linalg operations"), + llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated}; }; } // namespace @@ -537,17 +534,9 @@ mlir::createLinalgTilingToParallelLoopsPass(ArrayRef tileSizes) { } static PassRegistration> - tiling_pass("linalg-tile", "Tile operations in the linalg dialect", [] { - auto pass = std::make_unique>(); - pass->tileSizes.assign(clTileSizes.begin(), clTileSizes.end()); - return pass; - }); + tiling_pass("linalg-tile", "Tile operations in the linalg dialect"); static PassRegistration> tiling_to_parallel_loops( "linalg-tile-to-parallel-loops", - "Tile operations in the linalg dialect to parallel loops", [] { - auto pass = std::make_unique>(); - pass->tileSizes.assign(clTileSizes.begin(), clTileSizes.end()); - return pass; - }); + "Tile operations in the linalg dialect to parallel loops"); diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp index b89702b501887e..7d091b2f2433c0 100644 --- a/mlir/lib/Transforms/Vectorize.cpp +++ b/mlir/lib/Transforms/Vectorize.cpp @@ -414,7 +414,7 @@ using namespace mlir; /// /// The -affine-vectorize pass with the following arguments: /// ``` -/// -affine-vectorize -virtual-vector-size 256 --test-fastest-varying=0 +/// -affine-vectorize="virtual-vector-size=256 test-fastest-varying=0" /// ``` /// /// produces this standard innermost-loop vectorized code: @@ -468,8 +468,7 @@ using namespace mlir; /// /// The -affine-vectorize pass with the following arguments: /// ``` -/// -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 -/// --test-fastest-varying=1 --test-fastest-varying=0 +/// -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0" /// ``` /// /// produces this more interesting mixed outer-innermost-loop vectorized code: @@ -531,21 +530,6 @@ using functional::map; using llvm::dbgs; using llvm::SetVector; -static llvm::cl::OptionCategory clOptionsCategory("vectorize options"); - -static llvm::cl::list clVirtualVectorSize( - "virtual-vector-size", - llvm::cl::desc("Specify an n-D virtual vector size for vectorization"), - llvm::cl::ZeroOrMore, llvm::cl::cat(clOptionsCategory)); - -static llvm::cl::list clFastestVaryingPattern( - "test-fastest-varying", - llvm::cl::desc( - "Specify a 1-D, 2-D or 3-D pattern of fastest varying memory" - " dimensions to match. See defaultPatterns in Vectorize.cpp for a" - " description and examples. This is used for testing purposes"), - llvm::cl::ZeroOrMore, llvm::cl::cat(clOptionsCategory)); - /// Forward declaration. static FilterFunctionType isVectorizableLoopPtrFactory(const DenseSet ¶llelLoops, @@ -590,33 +574,35 @@ namespace { /// Base state for the vectorize pass. /// Command line arguments are preempted by non-empty pass arguments. struct Vectorize : public FunctionPass { - Vectorize(); + Vectorize() = default; + Vectorize(const Vectorize &) {} Vectorize(ArrayRef virtualVectorSize); void runOnFunction() override; - // The virtual vector size that we vectorize to. - SmallVector vectorSizes; - // Optionally, the fixed mapping from loop to fastest varying MemRef dimension - // for all the MemRefs within a loop pattern: - // the index represents the loop depth, the value represents the k^th - // fastest varying memory dimension. - // This is voluntarily restrictive and is meant to precisely target a - // particular loop/op pair, for testing purposes. - SmallVector fastestVaryingPattern; + /// The virtual vector size that we vectorize to. + ListOption vectorSizes{ + *this, "virtual-vector-size", + llvm::cl::desc("Specify an n-D virtual vector size for vectorization"), + llvm::cl::ZeroOrMore, llvm::cl::CommaSeparated}; + /// Optionally, the fixed mapping from loop to fastest varying MemRef + /// dimension for all the MemRefs within a loop pattern: + /// the index represents the loop depth, the value represents the k^th + /// fastest varying memory dimension. + /// This is voluntarily restrictive and is meant to precisely target a + /// particular loop/op pair, for testing purposes. + ListOption fastestVaryingPattern{ + *this, "test-fastest-varying", + llvm::cl::desc( + "Specify a 1-D, 2-D or 3-D pattern of fastest varying memory" + " dimensions to match. See defaultPatterns in Vectorize.cpp for a" + " description and examples. This is used for testing purposes"), + llvm::cl::ZeroOrMore, llvm::cl::CommaSeparated}; }; } // end anonymous namespace -Vectorize::Vectorize() - : vectorSizes(clVirtualVectorSize.begin(), clVirtualVectorSize.end()), - fastestVaryingPattern(clFastestVaryingPattern.begin(), - clFastestVaryingPattern.end()) {} - -Vectorize::Vectorize(ArrayRef virtualVectorSize) : Vectorize() { - if (!virtualVectorSize.empty()) { - this->vectorSizes.assign(virtualVectorSize.begin(), - virtualVectorSize.end()); - } +Vectorize::Vectorize(ArrayRef virtualVectorSize) { + vectorSizes->assign(virtualVectorSize.begin(), virtualVectorSize.end()); } /////// TODO(ntv): Hoist to a VectorizationStrategy.cpp when appropriate. diff --git a/mlir/test/Conversion/LoopsToGPU/imperfect_2D.mlir b/mlir/test/Conversion/LoopsToGPU/imperfect_2D.mlir index 59d18a1d7654e8..49562a7f7840f7 100644 --- a/mlir/test/Conversion/LoopsToGPU/imperfect_2D.mlir +++ b/mlir/test/Conversion/LoopsToGPU/imperfect_2D.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-loop-op-to-gpu -gpu-num-workgroups=2,2 -gpu-workgroup-size=32,4 %s | FileCheck %s +// RUN: mlir-opt -convert-loop-op-to-gpu="gpu-num-workgroups=2,2 gpu-workgroup-size=32,4" %s | FileCheck %s module { // arg2 = arg0 * transpose(arg1) ; with intermediate buffer and tile size passed as argument diff --git a/mlir/test/Conversion/LoopsToGPU/imperfect_3D.mlir b/mlir/test/Conversion/LoopsToGPU/imperfect_3D.mlir index 73f0ab7d71baec..f6cc5e2398b5b9 100644 --- a/mlir/test/Conversion/LoopsToGPU/imperfect_3D.mlir +++ b/mlir/test/Conversion/LoopsToGPU/imperfect_3D.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-loop-op-to-gpu -gpu-num-workgroups=4,2,2 -gpu-workgroup-size=32,2,2 %s | FileCheck %s +// RUN: mlir-opt -convert-loop-op-to-gpu="gpu-num-workgroups=4,2,2 gpu-workgroup-size=32,2,2" %s | FileCheck %s module { func @imperfect_3D(%arg0 : memref, %arg1 : memref, %arg2 : memref, %arg3 : memref, %t1 : index, %t2 : index, %t3 : index, %step1 : index, %step2 : index, %step3 : index) { @@ -80,4 +80,4 @@ module { } return } -} \ No newline at end of file +} diff --git a/mlir/test/Conversion/LoopsToGPU/imperfect_4D.mlir b/mlir/test/Conversion/LoopsToGPU/imperfect_4D.mlir index 2c5dd5c0fb2fa3..8858a3e5e63159 100644 --- a/mlir/test/Conversion/LoopsToGPU/imperfect_4D.mlir +++ b/mlir/test/Conversion/LoopsToGPU/imperfect_4D.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-loop-op-to-gpu -gpu-num-workgroups=4,2,2 -gpu-workgroup-size=32,2,2 %s | FileCheck %s +// RUN: mlir-opt -convert-loop-op-to-gpu="gpu-num-workgroups=4,2,2 gpu-workgroup-size=32,2,2" %s | FileCheck %s module { func @imperfect_3D(%arg0 : memref, %arg1 : memref, %arg2 : memref, %arg3 : memref, %t1 : index, %t2 : index, %t3 : index, %t4 : index, %step1 : index, %step2 : index, %step3 : index, %step4 : index) { @@ -83,4 +83,4 @@ module { } return } -} \ No newline at end of file +} diff --git a/mlir/test/Conversion/LoopsToGPU/imperfect_linalg.mlir b/mlir/test/Conversion/LoopsToGPU/imperfect_linalg.mlir index abf8da6b562ea3..4ffb8906d4d6a5 100644 --- a/mlir/test/Conversion/LoopsToGPU/imperfect_linalg.mlir +++ b/mlir/test/Conversion/LoopsToGPU/imperfect_linalg.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-loop-op-to-gpu -gpu-num-workgroups=2,16 -gpu-workgroup-size=32,4 | FileCheck %s +// RUN: mlir-opt %s -convert-loop-op-to-gpu="gpu-num-workgroups=2,16 gpu-workgroup-size=32,4" | FileCheck %s module { func @fmul(%arg0: memref, %arg1: memref, %arg2: memref) { diff --git a/mlir/test/Conversion/LoopsToGPU/no_blocks_no_threads.mlir b/mlir/test/Conversion/LoopsToGPU/no_blocks_no_threads.mlir index 51cedeb63cf464..6100a10e704f83 100644 --- a/mlir/test/Conversion/LoopsToGPU/no_blocks_no_threads.mlir +++ b/mlir/test/Conversion/LoopsToGPU/no_blocks_no_threads.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -convert-loops-to-gpu -gpu-block-dims=0 -gpu-thread-dims=1 %s | FileCheck --check-prefix=CHECK-THREADS %s --dump-input-on-failure -// RUN: mlir-opt -convert-loops-to-gpu -gpu-block-dims=1 -gpu-thread-dims=0 %s | FileCheck --check-prefix=CHECK-BLOCKS %s --dump-input-on-failure +// RUN: mlir-opt -convert-loops-to-gpu="gpu-block-dims=0 gpu-thread-dims=1" %s | FileCheck --check-prefix=CHECK-THREADS %s --dump-input-on-failure +// RUN: mlir-opt -convert-loops-to-gpu="gpu-block-dims=1 gpu-thread-dims=0" %s | FileCheck --check-prefix=CHECK-BLOCKS %s --dump-input-on-failure // CHECK-THREADS-LABEL: @one_d_loop // CHECK-BLOCKS-LABEL: @one_d_loop diff --git a/mlir/test/Conversion/LoopsToGPU/perfect_1D_setlaunch.mlir b/mlir/test/Conversion/LoopsToGPU/perfect_1D_setlaunch.mlir index bf437a348b64f7..2861b33c9e7bdf 100644 --- a/mlir/test/Conversion/LoopsToGPU/perfect_1D_setlaunch.mlir +++ b/mlir/test/Conversion/LoopsToGPU/perfect_1D_setlaunch.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-loop-op-to-gpu -gpu-num-workgroups=2 -gpu-workgroup-size=32 %s | FileCheck %s +// RUN: mlir-opt -convert-loop-op-to-gpu="gpu-num-workgroups=2 gpu-workgroup-size=32" %s | FileCheck %s module { func @foo(%arg0: memref, %arg1 : memref, %arg2 : memref) { @@ -23,4 +23,4 @@ module { } return } -} \ No newline at end of file +} diff --git a/mlir/test/Conversion/LoopsToGPU/step_one.mlir b/mlir/test/Conversion/LoopsToGPU/step_one.mlir index e0cdbd456209e2..a088880e5821cb 100644 --- a/mlir/test/Conversion/LoopsToGPU/step_one.mlir +++ b/mlir/test/Conversion/LoopsToGPU/step_one.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -convert-loops-to-gpu -gpu-block-dims=1 -gpu-thread-dims=1 %s | FileCheck --check-prefix=CHECK-11 %s -// RUN: mlir-opt -convert-loops-to-gpu -gpu-block-dims=2 -gpu-thread-dims=2 %s | FileCheck --check-prefix=CHECK-22 %s +// RUN: mlir-opt -convert-loops-to-gpu="gpu-block-dims=1 gpu-thread-dims=1" %s | FileCheck --check-prefix=CHECK-11 %s +// RUN: mlir-opt -convert-loops-to-gpu="gpu-block-dims=2 gpu-thread-dims=2" %s | FileCheck --check-prefix=CHECK-22 %s // CHECK-11-LABEL: @step_1 // CHECK-22-LABEL: @step_1 diff --git a/mlir/test/Conversion/LoopsToGPU/step_positive.mlir b/mlir/test/Conversion/LoopsToGPU/step_positive.mlir index 6bedc92abca627..9037eace6584b5 100644 --- a/mlir/test/Conversion/LoopsToGPU/step_positive.mlir +++ b/mlir/test/Conversion/LoopsToGPU/step_positive.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-loops-to-gpu -gpu-block-dims=1 -gpu-thread-dims=1 %s | FileCheck %s +// RUN: mlir-opt -convert-loops-to-gpu="gpu-block-dims=1 gpu-thread-dims=1" %s | FileCheck %s // CHECK-LABEL: @step_var func @step_var(%A : memref, %B : memref) { diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir index e9eea206b26e66..a9b860d8f28b0a 100644 --- a/mlir/test/Dialect/Linalg/promote.mlir +++ b/mlir/test/Dialect/Linalg/promote.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s -linalg-promote-subviews | FileCheck %s -// RUN: mlir-opt %s -linalg-promote-subviews -test-linalg-promote-dynamic | FileCheck %s --check-prefix=DYNAMIC +// RUN: mlir-opt %s -linalg-promote-subviews="test-promote-dynamic" | FileCheck %s --check-prefix=DYNAMIC #map0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> #map1 = affine_map<(d0) -> (d0 + 2)> diff --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir index c06447f29c0f9a..c1903cbd4d34af 100644 --- a/mlir/test/Dialect/Linalg/tile.mlir +++ b/mlir/test/Dialect/Linalg/tile.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2 | FileCheck %s -check-prefix=TILE-2 -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=0,2 | FileCheck %s -check-prefix=TILE-02 -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=0,0,2 | FileCheck %s -check-prefix=TILE-002 -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2,3,4 | FileCheck %s -check-prefix=TILE-234 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2" | FileCheck %s -check-prefix=TILE-2 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,2" | FileCheck %s -check-prefix=TILE-02 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,0,2" | FileCheck %s -check-prefix=TILE-002 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" | FileCheck %s -check-prefix=TILE-234 // TILE-2-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> // TILE-02-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> diff --git a/mlir/test/Dialect/Linalg/tile_conv.mlir b/mlir/test/Dialect/Linalg/tile_conv.mlir index 25cabc02efb057..c62b240511e79b 100644 --- a/mlir/test/Dialect/Linalg/tile_conv.mlir +++ b/mlir/test/Dialect/Linalg/tile_conv.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2,3,0,0,4 | FileCheck %s -check-prefix=TILE-23004 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,0,0,4" | FileCheck %s -check-prefix=TILE-23004 // TILE-23004-DAG: #[[D0x30pS0x10:.*]] = affine_map<(d0) -> (d0 * 30)> // TILE-23004-DAG: #[[S0x10p90:.*]] = affine_map<()[s0] -> (s0 * 10 + 90)> diff --git a/mlir/test/Dialect/Linalg/tile_indexed_generic.mlir b/mlir/test/Dialect/Linalg/tile_indexed_generic.mlir index 24619bf404b884..fc1d27a5a26864 100644 --- a/mlir/test/Dialect/Linalg/tile_indexed_generic.mlir +++ b/mlir/test/Dialect/Linalg/tile_indexed_generic.mlir @@ -1,6 +1,6 @@ -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=10,25 | FileCheck %s -check-prefix=TILE-10n25 -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=25,0 | FileCheck %s -check-prefix=TILE-25n0 -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=0,25 | FileCheck %s -check-prefix=TILE-0n25 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=10,25" | FileCheck %s -check-prefix=TILE-10n25 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=25,0" | FileCheck %s -check-prefix=TILE-25n0 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,25" | FileCheck %s -check-prefix=TILE-0n25 #id_1d = affine_map<(i) -> (i)> #pointwise_1d_trait = { diff --git a/mlir/test/Dialect/Linalg/tile_parallel.mlir b/mlir/test/Dialect/Linalg/tile_parallel.mlir index 7db9da0715aa17..caca3a0e795e0d 100644 --- a/mlir/test/Dialect/Linalg/tile_parallel.mlir +++ b/mlir/test/Dialect/Linalg/tile_parallel.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -linalg-tile-to-parallel-loops -linalg-tile-sizes=2 | FileCheck %s -check-prefix=TILE-2 --dump-input-on-failure -// RUN: mlir-opt %s -linalg-tile-to-parallel-loops -linalg-tile-sizes=0,2 | FileCheck %s -check-prefix=TILE-02 --dump-input-on-failure -// RUN: mlir-opt %s -linalg-tile-to-parallel-loops -linalg-tile-sizes=0,0,2 | FileCheck %s -check-prefix=TILE-002 --dump-input-on-failure -// RUN: mlir-opt %s -linalg-tile-to-parallel-loops -linalg-tile-sizes=2,3,4 | FileCheck %s -check-prefix=TILE-234 --dump-input-on-failure +// RUN: mlir-opt %s -linalg-tile-to-parallel-loops="linalg-tile-sizes=2" | FileCheck %s -check-prefix=TILE-2 --dump-input-on-failure +// RUN: mlir-opt %s -linalg-tile-to-parallel-loops="linalg-tile-sizes=0,2" | FileCheck %s -check-prefix=TILE-02 --dump-input-on-failure +// RUN: mlir-opt %s -linalg-tile-to-parallel-loops="linalg-tile-sizes=0,0,2" | FileCheck %s -check-prefix=TILE-002 --dump-input-on-failure +// RUN: mlir-opt %s -linalg-tile-to-parallel-loops="linalg-tile-sizes=2,3,4" | FileCheck %s -check-prefix=TILE-234 --dump-input-on-failure #id_2d = affine_map<(i, j) -> (i, j)> #pointwise_2d_trait = { diff --git a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir index 7fbb6fe0b226f1..da9c214edb1aaa 100644 --- a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir +++ b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 128 --test-fastest-varying=0 | FileCheck %s +// RUN: mlir-opt %s -affine-vectorize="virtual-vector-size=128 test-fastest-varying=0" | FileCheck %s // Permutation maps used in vectorization. // CHECK: #[[map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)> diff --git a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir index 8fa3842edea5ce..f4d83baa520f9d 100644 --- a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir +++ b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 4 -virtual-vector-size 8 | FileCheck %s -check-prefix=VECT -// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 --test-fastest-varying=1 --test-fastest-varying=0 | FileCheck %s +// RUN: mlir-opt %s -affine-vectorize="virtual-vector-size=4,8" | FileCheck %s -check-prefix=VECT +// RUN: mlir-opt %s -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0" | FileCheck %s // Permutation maps used in vectorization. // CHECK-DAG: #[[map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)> diff --git a/mlir/test/Transforms/Vectorize/vectorize_3d.mlir b/mlir/test/Transforms/Vectorize/vectorize_3d.mlir index b7355c6e3cf694..2fbe0fe7c45252 100644 --- a/mlir/test/Transforms/Vectorize/vectorize_3d.mlir +++ b/mlir/test/Transforms/Vectorize/vectorize_3d.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 64 -virtual-vector-size 256 --test-fastest-varying=2 --test-fastest-varying=1 --test-fastest-varying=0 | FileCheck %s +// RUN: mlir-opt %s -affine-vectorize="virtual-vector-size=32,64,256 test-fastest-varying=2,1,0" | FileCheck %s // Permutation maps used in vectorization. // CHECK: #[[map_proj_d0d1d2_d0d1d2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> diff --git a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir index 39350c88610bcb..0d5abdd70f5559 100644 --- a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir +++ b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 --test-fastest-varying=2 --test-fastest-varying=0 | FileCheck %s +// RUN: mlir-opt %s -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=2,0" | FileCheck %s // Permutation maps used in vectorization. // CHECK: #[[map_proj_d0d1d2_d0d2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d2)> diff --git a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir index bac0c0cdb58c93..e9c1bbdfde3d60 100644 --- a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir +++ b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 --test-fastest-varying=0 --test-fastest-varying=2 | FileCheck %s +// RUN: mlir-opt %s -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=0,2" | FileCheck %s // Permutation maps used in vectorization. // CHECK: #[[map_proj_d0d1d2_d2d0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2, d0)> diff --git a/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir index d86ad1ccbde3bb..d4a721f7bf9999 100644 --- a/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir +++ b/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 --test-fastest-varying=0 --test-fastest-varying=1 | FileCheck %s +// RUN: mlir-opt %s -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=0,1" | FileCheck %s // Permutation maps used in vectorization. // CHECK-DAG: #[[map_proj_d0d1d2_d2d1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2, d1)> diff --git a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir index 306d86f03739bc..c7676f7031f325 100644 --- a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir +++ b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir @@ -2,8 +2,8 @@ // RUN: mlir-opt %s -convert-linalg-to-loops -convert-linalg-to-llvm | mlir-cpu-runner -e dot -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s // RUN: mlir-opt %s -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s // RUN: mlir-opt %s -convert-linalg-to-loops -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2,3,4 -linalg-promote-subviews -convert-linalg-to-loops -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s -// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2,3,4 -linalg-promote-subviews -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" -linalg-promote-subviews -convert-linalg-to-loops -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" -linalg-promote-subviews -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s #strided1D = affine_map<(d0) -> (d0)> #strided2D = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>