33 changes: 17 additions & 16 deletions mlir/include/mlir/Conversion/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ include "mlir/Pass/PassBase.td"
// AffineToStandard
//===----------------------------------------------------------------------===//

def ConvertAffineToStandard : Pass<"lower-affine"> {
def ConvertAffineToStandard : FunctionPass<"lower-affine"> {
let summary = "Lower Affine operations to a combination of Standard and Loop "
"operations";
let description = [{
Expand Down Expand Up @@ -72,7 +72,7 @@ def ConvertAffineToStandard : Pass<"lower-affine"> {
// AVX512ToLLVM
//===----------------------------------------------------------------------===//

def ConvertAVX512ToLLVM : Pass<"convert-avx512-to-llvm"> {
def ConvertAVX512ToLLVM : Pass<"convert-avx512-to-llvm", "ModuleOp"> {
let summary = "Convert the operations from the avx512 dialect into the LLVM "
"dialect";
let constructor = "mlir::createConvertAVX512ToLLVMPass()";
Expand All @@ -82,7 +82,7 @@ def ConvertAVX512ToLLVM : Pass<"convert-avx512-to-llvm"> {
// GPUToCUDA
//===----------------------------------------------------------------------===//

def ConvertGpuLaunchFuncToCudaCalls : Pass<"launch-func-to-cuda"> {
def ConvertGpuLaunchFuncToCudaCalls : Pass<"launch-func-to-cuda", "ModuleOp"> {
let summary = "Convert all launch_func ops to CUDA runtime calls";
let constructor = "mlir::createConvertGpuLaunchFuncToCudaCallsPass()";
}
Expand All @@ -91,7 +91,7 @@ def ConvertGpuLaunchFuncToCudaCalls : Pass<"launch-func-to-cuda"> {
// GPUToNVVM
//===----------------------------------------------------------------------===//

def ConvertGpuOpsToNVVMOps : Pass<"convert-gpu-to-nvvm"> {
def ConvertGpuOpsToNVVMOps : Pass<"convert-gpu-to-nvvm", "gpu::GPUModuleOp"> {
let summary = "Generate NVVM operations for gpu operations";
let constructor = "mlir::createLowerGpuOpsToNVVMOpsPass()";
}
Expand All @@ -100,7 +100,7 @@ def ConvertGpuOpsToNVVMOps : Pass<"convert-gpu-to-nvvm"> {
// GPUToROCDL
//===----------------------------------------------------------------------===//

def ConvertGpuOpsToROCDLOps : Pass<"convert-gpu-to-rocdl"> {
def ConvertGpuOpsToROCDLOps : Pass<"convert-gpu-to-rocdl", "gpu::GPUModuleOp"> {
let summary = "Generate ROCDL operations for gpu operations";
let constructor = "mlir::createLowerGpuOpsToROCDLOpsPass()";
}
Expand All @@ -109,7 +109,7 @@ def ConvertGpuOpsToROCDLOps : Pass<"convert-gpu-to-rocdl"> {
// GPUToSPIRV
//===----------------------------------------------------------------------===//

def ConvertGPUToSPIRV : Pass<"convert-gpu-to-spirv"> {
def ConvertGPUToSPIRV : Pass<"convert-gpu-to-spirv", "ModuleOp"> {
let summary = "Convert GPU dialect to SPIR-V dialect";
let constructor = "mlir::createConvertGPUToSPIRVPass()";
}
Expand All @@ -119,12 +119,13 @@ def ConvertGPUToSPIRV : Pass<"convert-gpu-to-spirv"> {
//===----------------------------------------------------------------------===//

def ConvertGpuLaunchFuncToVulkanLaunchFunc
: Pass<"convert-gpu-launch-to-vulkan-launch"> {
: Pass<"convert-gpu-launch-to-vulkan-launch", "ModuleOp"> {
let summary = "Convert gpu.launch_func to vulkanLaunch external call";
let constructor = "mlir::createConvertGpuLaunchFuncToVulkanLaunchFuncPass()";
}

def ConvertVulkanLaunchFuncToVulkanCalls : Pass<"launch-func-to-vulkan"> {
def ConvertVulkanLaunchFuncToVulkanCalls
: Pass<"launch-func-to-vulkan", "ModuleOp"> {
let summary = "Convert vulkanLaunch external call to Vulkan runtime external "
"calls";
let constructor = "mlir::createConvertVulkanLaunchFuncToVulkanCallsPass()";
Expand All @@ -134,7 +135,7 @@ def ConvertVulkanLaunchFuncToVulkanCalls : Pass<"launch-func-to-vulkan"> {
// LinalgToLLVM
//===----------------------------------------------------------------------===//

def ConvertLinalgToLLVM : Pass<"convert-linalg-to-llvm"> {
def ConvertLinalgToLLVM : Pass<"convert-linalg-to-llvm", "ModuleOp"> {
let summary = "Convert the operations from the linalg dialect into the LLVM "
"dialect";
let constructor = "mlir::createConvertLinalgToLLVMPass()";
Expand All @@ -144,7 +145,7 @@ def ConvertLinalgToLLVM : Pass<"convert-linalg-to-llvm"> {
// LinalgToSPIRV
//===----------------------------------------------------------------------===//

def ConvertLinalgToSPIRV : Pass<"convert-linalg-to-spirv"> {
def ConvertLinalgToSPIRV : Pass<"convert-linalg-to-spirv", "ModuleOp"> {
let summary = "Convert Linalg ops to SPIR-V ops";
let constructor = "mlir::createLinalgToSPIRVPass()";
}
Expand All @@ -163,7 +164,7 @@ def ConvertLoopToStandard : Pass<"convert-loop-to-std"> {
// LoopsToGPU
//===----------------------------------------------------------------------===//

def ConvertSimpleLoopsToGPU : Pass<"convert-loops-to-gpu"> {
def ConvertSimpleLoopsToGPU : FunctionPass<"convert-loops-to-gpu"> {
let summary = "Convert top-level loops to GPU kernels";
let constructor = "mlir::createSimpleLoopsToGPUPass()";
let options = [
Expand All @@ -174,7 +175,7 @@ def ConvertSimpleLoopsToGPU : Pass<"convert-loops-to-gpu"> {
];
}

def ConvertLoopsToGPU : Pass<"convert-loop-op-to-gpu"> {
def ConvertLoopsToGPU : FunctionPass<"convert-loop-op-to-gpu"> {
let summary = "Convert top-level loop::ForOp to GPU kernels";
let constructor = "mlir::createLoopToGPUPass()";
let options = [
Expand All @@ -196,7 +197,7 @@ def ConvertParallelLoopToGpu : Pass<"convert-parallel-loops-to-gpu"> {
// StandardToLLVM
//===----------------------------------------------------------------------===//

def ConvertStandardToLLVM : Pass<"convert-std-to-llvm"> {
def ConvertStandardToLLVM : Pass<"convert-std-to-llvm", "ModuleOp"> {
let summary = "Convert scalar and vector operations from the Standard to the "
"LLVM dialect";
let description = [{
Expand Down Expand Up @@ -233,7 +234,7 @@ def ConvertStandardToLLVM : Pass<"convert-std-to-llvm"> {
"Emit wrappers for C-compatible pointer-to-struct memref "
"descriptors">,
Option<"indexBitwidth", "index-bitwidth", "unsigned",
/*default=*/"kDeriveIndexBitwidthFromDataLayout",
/*default=kDeriveIndexBitwidthFromDataLayout*/"0",
"Bitwidth of the index type, 0 to use size of machine word">,
];
}
Expand All @@ -247,7 +248,7 @@ def LegalizeStandardForSPIRV : Pass<"legalize-std-for-spirv"> {
let constructor = "mlir::createLegalizeStdOpsForSPIRVLoweringPass()";
}

def ConvertStandardToSPIRV : Pass<"convert-std-to-spirv"> {
def ConvertStandardToSPIRV : Pass<"convert-std-to-spirv", "ModuleOp"> {
let summary = "Convert Standard Ops to SPIR-V dialect";
let constructor = "mlir::createConvertStandardToSPIRVPass()";
}
Expand All @@ -256,7 +257,7 @@ def ConvertStandardToSPIRV : Pass<"convert-std-to-spirv"> {
// VectorToLLVM
//===----------------------------------------------------------------------===//

def ConvertVectorToLLVM : Pass<"convert-vector-to-llvm"> {
def ConvertVectorToLLVM : Pass<"convert-vector-to-llvm", "ModuleOp"> {
let summary = "Lower the operations from the vector dialect into the LLVM "
"dialect";
let constructor = "mlir::createConvertVectorToLLVMPass()";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
namespace mlir {
class LLVMTypeConverter;
class ModuleOp;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;
class OwningRewritePatternList;

/// Collect a set of patterns to convert memory-related operations from the
Expand Down Expand Up @@ -61,7 +61,7 @@ struct LowerToLLVMOptions {
/// Creates a pass to convert the Standard dialect into the LLVMIR dialect.
/// stdlib malloc/free is used for allocating memrefs allocated with std.alloc,
/// while LLVM's alloca is used for those allocated with std.alloca.
std::unique_ptr<OpPassBase<ModuleOp>> createLowerToLLVMPass(
std::unique_ptr<OperationPass<ModuleOp>> createLowerToLLVMPass(
const LowerToLLVMOptions &options = {
/*useBarePtrCallConv=*/false, /*emitCWrappers=*/false,
/*indexBitwidth=*/kDeriveIndexBitwidthFromDataLayout});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
namespace mlir {

/// Pass to convert StandardOps to SPIR-V ops.
std::unique_ptr<OpPassBase<ModuleOp>> createConvertStandardToSPIRVPass();
std::unique_ptr<OperationPass<ModuleOp>> createConvertStandardToSPIRVPass();

/// Pass to legalize ops that are not directly lowered to SPIR-V.
std::unique_ptr<Pass> createLegalizeStdOpsForSPIRVLoweringPass();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
namespace mlir {
class LLVMTypeConverter;
class ModuleOp;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;

/// Collect a set of patterns to convert from Vector contractions to LLVM Matrix
/// Intrinsics. To lower to assembly, the LLVM flag -lower-matrix-intrinsics
Expand All @@ -26,7 +26,7 @@ void populateVectorToLLVMConversionPatterns(LLVMTypeConverter &converter,
OwningRewritePatternList &patterns);

/// Create a pass to convert vector operations to the LLVMIR dialect.
std::unique_ptr<OpPassBase<ModuleOp>> createConvertVectorToLLVMPass();
std::unique_ptr<OperationPass<ModuleOp>> createConvertVectorToLLVMPass();

} // namespace mlir

Expand Down
23 changes: 12 additions & 11 deletions mlir/include/mlir/Dialect/Affine/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,55 +24,56 @@ class AffineForOp;
class FuncOp;
class ModuleOp;
class Pass;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;

/// Creates a simplification pass for affine structures (maps and sets). In
/// addition, this pass also normalizes memrefs to have the trivial (identity)
/// layout map.
std::unique_ptr<OpPassBase<FuncOp>> createSimplifyAffineStructuresPass();
std::unique_ptr<OperationPass<FuncOp>> createSimplifyAffineStructuresPass();

/// Creates a loop invariant code motion pass that hoists loop invariant
/// operations out of affine loops.
std::unique_ptr<OpPassBase<FuncOp>> createAffineLoopInvariantCodeMotionPass();
std::unique_ptr<OperationPass<FuncOp>>
createAffineLoopInvariantCodeMotionPass();

/// Performs packing (or explicit copying) of accessed memref regions into
/// buffers in the specified faster memory space through either pointwise copies
/// or DMA operations.
std::unique_ptr<OpPassBase<FuncOp>> createAffineDataCopyGenerationPass(
std::unique_ptr<OperationPass<FuncOp>> createAffineDataCopyGenerationPass(
unsigned slowMemorySpace, unsigned fastMemorySpace,
unsigned tagMemorySpace = 0, int minDmaTransferSize = 1024,
uint64_t fastMemCapacityBytes = std::numeric_limits<uint64_t>::max());
/// Overload relying on pass options for initialization.
std::unique_ptr<OpPassBase<FuncOp>> createAffineDataCopyGenerationPass();
std::unique_ptr<OperationPass<FuncOp>> createAffineDataCopyGenerationPass();

/// Creates a pass to perform tiling on loop nests.
std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
createLoopTilingPass(uint64_t cacheSizeBytes);
/// Overload relying on pass options for initialization.
std::unique_ptr<OpPassBase<FuncOp>> createLoopTilingPass();
std::unique_ptr<OperationPass<FuncOp>> createLoopTilingPass();

/// Creates a loop unrolling pass with the provided parameters.
/// 'getUnrollFactor' is a function callback for clients to supply a function
/// that computes an unroll factor - the callback takes precedence over unroll
/// factors supplied through other means. If -1 is passed as the unrollFactor
/// and no callback is provided, anything passed from the command-line (if at
/// all) or the default unroll factor is used (LoopUnroll:kDefaultUnrollFactor).
std::unique_ptr<OpPassBase<FuncOp>> createLoopUnrollPass(
std::unique_ptr<OperationPass<FuncOp>> createLoopUnrollPass(
int unrollFactor = -1, int unrollFull = -1,
const std::function<unsigned(AffineForOp)> &getUnrollFactor = nullptr);

/// Creates a loop unroll jam pass to unroll jam by the specified factor. A
/// factor of -1 lets the pass use the default factor or the one on the command
/// line if provided.
std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
createLoopUnrollAndJamPass(int unrollJamFactor = -1);

/// Creates a pass to vectorize loops, operations and data types using a
/// target-independent, n-D super-vector abstraction.
std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize);
/// Overload relying on pass options for initialization.
std::unique_ptr<OpPassBase<FuncOp>> createSuperVectorizePass();
std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass();

} // end namespace mlir

Expand Down
15 changes: 8 additions & 7 deletions mlir/include/mlir/Dialect/Affine/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -15,32 +15,33 @@

include "mlir/Pass/PassBase.td"

def AffineDataCopyGeneration : Pass<"affine-data-copy-generate"> {
def AffineDataCopyGeneration : FunctionPass<"affine-data-copy-generate"> {
let summary = "Generate explicit copying for affine memory operations";
let constructor = "mlir::createAffineDataCopyGenerationPass()";
}

def AffineLoopInvariantCodeMotion : Pass<"affine-loop-invariant-code-motion"> {
def AffineLoopInvariantCodeMotion
: FunctionPass<"affine-loop-invariant-code-motion"> {
let summary = "Hoist loop invariant instructions outside of affine loops";
let constructor = "mlir::createAffineLoopInvariantCodeMotionPass()";
}

def AffineLoopTiling : Pass<"affine-loop-tile"> {
def AffineLoopTiling : FunctionPass<"affine-loop-tile"> {
let summary = "Tile affine loop nests";
let constructor = "mlir::createLoopTilingPass()";
}

def AffineLoopUnroll : Pass<"affine-loop-unroll"> {
def AffineLoopUnroll : FunctionPass<"affine-loop-unroll"> {
let summary = "Unroll affine loops";
let constructor = "mlir::createLoopUnrollPass()";
}

def AffineLoopUnrollAndJam : Pass<"affine-loop-unroll-jam"> {
def AffineLoopUnrollAndJam : FunctionPass<"affine-loop-unroll-jam"> {
let summary = "Unroll and jam affine loops";
let constructor = "mlir::createLoopUnrollAndJamPass()";
}

def AffineVectorize : Pass<"affine-super-vectorize"> {
def AffineVectorize : FunctionPass<"affine-super-vectorize"> {
let summary = "Vectorize to a target independent n-D vector abstraction";
let constructor = "mlir::createSuperVectorizePass()";
let options = [
Expand All @@ -61,7 +62,7 @@ def AffineVectorize : Pass<"affine-super-vectorize"> {
];
}

def SimplifyAffineStructures : Pass<"simplify-affine-structures"> {
def SimplifyAffineStructures : FunctionPass<"simplify-affine-structures"> {
let summary = "Simplify affine expressions in maps/sets and normalize "
"memrefs";
let constructor = "mlir::createSimplifyAffineStructuresPass()";
Expand Down
4 changes: 2 additions & 2 deletions mlir/include/mlir/Dialect/GPU/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ namespace mlir {

class MLIRContext;
class ModuleOp;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;
class OwningRewritePatternList;

std::unique_ptr<OpPassBase<ModuleOp>> createGpuKernelOutliningPass();
std::unique_ptr<OperationPass<ModuleOp>> createGpuKernelOutliningPass();

/// Collect a set of patterns to rewrite ops within the GPU dialect.
void populateGpuRewritePatterns(MLIRContext *context,
Expand Down
2 changes: 1 addition & 1 deletion mlir/include/mlir/Dialect/GPU/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

include "mlir/Pass/PassBase.td"

def GpuKernelOutlining : Pass<"gpu-kernel-outlining"> {
def GpuKernelOutlining : Pass<"gpu-kernel-outlining", "ModuleOp"> {
let summary = "Outline gpu.launch bodies to kernel functions";
let constructor = "mlir::createGpuKernelOutliningPass()";
}
Expand Down
18 changes: 9 additions & 9 deletions mlir/include/mlir/Dialect/Linalg/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,34 +19,34 @@
namespace mlir {
class FuncOp;
class ModuleOp;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;
class Pass;

std::unique_ptr<OpPassBase<FuncOp>> createLinalgFusionPass();
std::unique_ptr<OperationPass<FuncOp>> createLinalgFusionPass();
std::unique_ptr<Pass> createLinalgFusionOfTensorOpsPass();

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
createLinalgTilingPass(ArrayRef<int64_t> tileSizes = {});

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
createLinalgTilingToParallelLoopsPass(ArrayRef<int64_t> tileSizes = {});

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
createLinalgPromotionPass(bool dynamicBuffers);
std::unique_ptr<OpPassBase<FuncOp>> createLinalgPromotionPass();
std::unique_ptr<OperationPass<FuncOp>> createLinalgPromotionPass();

/// Create a pass to convert Linalg operations to loop.for loops and
/// std.load/std.store accesses.
std::unique_ptr<OpPassBase<FuncOp>> createConvertLinalgToLoopsPass();
std::unique_ptr<OperationPass<FuncOp>> createConvertLinalgToLoopsPass();

/// Create a pass to convert Linalg operations to loop.parallel loops and
/// std.load/std.store accesses.
std::unique_ptr<OpPassBase<FuncOp>> createConvertLinalgToParallelLoopsPass();
std::unique_ptr<OperationPass<FuncOp>> createConvertLinalgToParallelLoopsPass();

/// Create a pass to convert Linalg operations to affine.for loops and
/// affine_load/affine_store accesses.
/// Placeholder for now, this is NYI.
std::unique_ptr<OpPassBase<FuncOp>> createConvertLinalgToAffineLoopsPass();
std::unique_ptr<OperationPass<FuncOp>> createConvertLinalgToAffineLoopsPass();

} // namespace mlir

Expand Down
16 changes: 9 additions & 7 deletions mlir/include/mlir/Dialect/Linalg/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

include "mlir/Pass/PassBase.td"

def LinalgFusion : Pass<"linalg-fusion"> {
def LinalgFusion : FunctionPass<"linalg-fusion"> {
let summary = "Fuse operations in the linalg dialect";
let constructor = "mlir::createLinalgFusionPass()";
}
Expand All @@ -21,24 +21,25 @@ def LinalgFusionOfTensorOps : Pass<"linalg-fusion-for-tensor-ops"> {
let constructor = "mlir::createLinalgFusionOfTensorOpsPass()";
}

def LinalgLowerToAffineLoops : Pass<"convert-linalg-to-affine-loops"> {
def LinalgLowerToAffineLoops : FunctionPass<"convert-linalg-to-affine-loops"> {
let summary = "Lower the operations from the linalg dialect into affine "
"loops";
let constructor = "mlir::createConvertLinalgToAffineLoopsPass()";
}

def LinalgLowerToLoops : Pass<"convert-linalg-to-loops"> {
def LinalgLowerToLoops : FunctionPass<"convert-linalg-to-loops"> {
let summary = "Lower the operations from the linalg dialect into loops";
let constructor = "mlir::createConvertLinalgToLoopsPass()";
}

def LinalgLowerToParallelLoops : Pass<"convert-linalg-to-parallel-loops"> {
def LinalgLowerToParallelLoops
: FunctionPass<"convert-linalg-to-parallel-loops"> {
let summary = "Lower the operations from the linalg dialect into parallel "
"loops";
let constructor = "mlir::createConvertLinalgToParallelLoopsPass()";
}

def LinalgPromotion : Pass<"linalg-promote-subviews"> {
def LinalgPromotion : FunctionPass<"linalg-promote-subviews"> {
let summary = "Promote subview ops to local buffers";
let constructor = "mlir::createLinalgPromotionPass()";
let options = [
Expand All @@ -47,7 +48,7 @@ def LinalgPromotion : Pass<"linalg-promote-subviews"> {
];
}

def LinalgTiling : Pass<"linalg-tile"> {
def LinalgTiling : FunctionPass<"linalg-tile"> {
let summary = "Tile operations in the linalg dialect";
let constructor = "mlir::createLinalgTilingPass()";
let options = [
Expand All @@ -57,7 +58,8 @@ def LinalgTiling : Pass<"linalg-tile"> {
];
}

def LinalgTilingToParallelLoops : Pass<"linalg-tile-to-parallel-loops"> {
def LinalgTilingToParallelLoops
: FunctionPass<"linalg-tile-to-parallel-loops"> {
let summary = "Tile operations in the linalg dialect to parallel loops";
let constructor = "mlir::createLinalgTilingToParallelLoopsPass()";
let options = [
Expand Down
5 changes: 3 additions & 2 deletions mlir/include/mlir/Dialect/LoopOps/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@ def LoopParallelLoopFusion : Pass<"parallel-loop-fusion"> {
let constructor = "mlir::createParallelLoopFusionPass()";
}

def LoopParallelLoopSpecialization : Pass<"parallel-loop-specialization"> {
def LoopParallelLoopSpecialization
: FunctionPass<"parallel-loop-specialization"> {
let summary = "Specialize parallel loops for vectorization";
let constructor = "mlir::createParallelLoopSpecializationPass()";
}

def LoopParallelLoopTiling : Pass<"parallel-loop-tiling"> {
def LoopParallelLoopTiling : FunctionPass<"parallel-loop-tiling"> {
let summary = "Tile parallel loops";
let constructor = "mlir::createParallelLoopTilingPass()";
let options = [
Expand Down
6 changes: 3 additions & 3 deletions mlir/include/mlir/Dialect/Quant/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,20 +20,20 @@

namespace mlir {
class FuncOp;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;

namespace quant {

/// Creates a pass that converts quantization simulation operations (i.e.
/// FakeQuant and those like it) to casts into/out of supported QuantizedTypes.
std::unique_ptr<OpPassBase<FuncOp>> createConvertSimulatedQuantPass();
std::unique_ptr<OperationPass<FuncOp>> createConvertSimulatedQuantPass();

/// Creates a pass that converts constants followed by a qbarrier to a
/// constant whose value is quantized. This is typically one of the last
/// passes done when lowering to express actual quantized arithmetic in a
/// low level representation. Because it modifies the constant, it is
/// destructive and cannot be undone.
std::unique_ptr<OpPassBase<FuncOp>> createConvertConstPass();
std::unique_ptr<OperationPass<FuncOp>> createConvertConstPass();

} // namespace quant
} // namespace mlir
Expand Down
5 changes: 3 additions & 2 deletions mlir/include/mlir/Dialect/Quant/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,14 @@

include "mlir/Pass/PassBase.td"

def QuantConvertConst : Pass<"quant-convert-const"> {
def QuantConvertConst : FunctionPass<"quant-convert-const"> {
let summary = "Converts constants followed by qbarrier to actual quantized "
"values";
let constructor = "mlir::quant::createConvertConstPass()";
}

def QuantConvertSimulatedQuant : Pass<"quant-convert-simulated-quantization"> {
def QuantConvertSimulatedQuant
: FunctionPass<"quant-convert-simulated-quantization"> {
let summary = "Converts training-time simulated quantization ops to "
"corresponding quantize/dequantize casts";
let constructor = "mlir::quant::createConvertSimulatedQuantPass()";
Expand Down
6 changes: 3 additions & 3 deletions mlir/include/mlir/Dialect/SPIRV/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class ModuleOp;
/// StorageBuffer, PhysicalStorageBuffer, Uniform, and PushConstant storage
/// classes with layout information.
/// Right now this pass only supports Vulkan layout rules.
std::unique_ptr<OpPassBase<mlir::ModuleOp>>
std::unique_ptr<OperationPass<mlir::ModuleOp>>
createDecorateSPIRVCompositeTypeLayoutPass();

/// Creates an operation pass that deduces and attaches the minimal version/
Expand All @@ -34,7 +34,7 @@ createDecorateSPIRVCompositeTypeLayoutPass();
/// to know which one to pick. `spv.target_env` gives the hard limit as for
/// what the target environment can support; this pass deduces what are
/// actually needed for a specific spv.module op.
std::unique_ptr<OpPassBase<spirv::ModuleOp>>
std::unique_ptr<OperationPass<spirv::ModuleOp>>
createUpdateVersionCapabilityExtensionPass();

/// Creates an operation pass that lowers the ABI attributes specified during
Expand All @@ -44,7 +44,7 @@ createUpdateVersionCapabilityExtensionPass();
/// argument.
/// 2. Inserts the EntryPointOp and the ExecutionModeOp for entry point
/// functions using the specification in the `spv.entry_point_abi` attribute.
std::unique_ptr<OpPassBase<spirv::ModuleOp>> createLowerABIAttributesPass();
std::unique_ptr<OperationPass<spirv::ModuleOp>> createLowerABIAttributesPass();

} // namespace spirv
} // namespace mlir
Expand Down
7 changes: 4 additions & 3 deletions mlir/include/mlir/Dialect/SPIRV/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,18 @@

include "mlir/Pass/PassBase.td"

def SPIRVCompositeTypeLayout : Pass<"decorate-spirv-composite-type-layout"> {
def SPIRVCompositeTypeLayout
: Pass<"decorate-spirv-composite-type-layout", "ModuleOp"> {
let summary = "Decorate SPIR-V composite type with layout info";
let constructor = "mlir::spirv::createDecorateSPIRVCompositeTypeLayoutPass()";
}

def SPIRVLowerABIAttributes : Pass<"spirv-lower-abi-attrs"> {
def SPIRVLowerABIAttributes : Pass<"spirv-lower-abi-attrs", "spirv::ModuleOp"> {
let summary = "Decorate SPIR-V composite type with layout info";
let constructor = "mlir::spirv::createLowerABIAttributesPass()";
}

def SPIRVUpdateVCE : Pass<"spirv-update-vce"> {
def SPIRVUpdateVCE : Pass<"spirv-update-vce", "spirv::ModuleOp"> {
let summary = "Deduce and attach minimal (version, capabilities, extensions) "
"requirements to spv.module ops";
let constructor = "mlir::spirv::createUpdateVersionCapabilityExtensionPass()";
Expand Down
230 changes: 108 additions & 122 deletions mlir/include/mlir/Pass/Pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,161 +139,119 @@ class Pass {
virtual void runOnOperation() = 0;

/// A clone method to create a copy of this pass.
virtual std::unique_ptr<Pass> clone() const = 0;
std::unique_ptr<Pass> clone() const {
auto newInst = clonePass();
newInst->copyOptionValuesFrom(this);
return newInst;
}

/// Return the current operation being transformed.
Operation *getOperation() {
return getPassState().irAndPassFailed.getPointer();
}

/// Returns the current analysis manager.
AnalysisManager getAnalysisManager() {
return getPassState().analysisManager;
}

/// Copy the option values from 'other', which is another instance of this
/// pass.
void copyOptionValuesFrom(const Pass *other);

private:
/// Forwarding function to execute this pass on the given operation.
LLVM_NODISCARD
LogicalResult run(Operation *op, AnalysisManager am);

/// Out of line virtual method to ensure vtables and metadata are emitted to a
/// single .o file.
virtual void anchor();

/// Represents a unique identifier for the pass.
const PassID *passID;

/// The name of the operation that this pass operates on, or None if this is a
/// generic OperationPass.
Optional<StringRef> opName;

/// The current execution state for the pass.
Optional<detail::PassExecutionState> passState;

/// The set of statistics held by this pass.
std::vector<Statistic *> statistics;

/// The pass options registered to this pass instance.
detail::PassOptions passOptions;

/// Allow access to 'clone' and 'run'.
friend class OpPassManager;

/// Allow access to 'passOptions'.
friend class PassInfo;
};

//===----------------------------------------------------------------------===//
// Pass Model Definitions
//===----------------------------------------------------------------------===//
namespace detail {
/// The opaque CRTP model of a pass. This class provides utilities for derived
/// pass execution and handles all of the necessary polymorphic API.
template <typename PassT, typename BasePassT>
class PassModel : public BasePassT {
public:
/// Support isa/dyn_cast functionality for the derived pass class.
static bool classof(const Pass *pass) {
return pass->getPassID() == PassID::getID<PassT>();
}

protected:
explicit PassModel(Optional<StringRef> opName = llvm::None)
: BasePassT(PassID::getID<PassT>(), opName) {}

/// Signal that some invariant was broken when running. The IR is allowed to
/// be in an invalid state.
void signalPassFailure() {
this->getPassState().irAndPassFailed.setInt(true);
}
void signalPassFailure() { getPassState().irAndPassFailed.setInt(true); }

/// Query an analysis for the current ir unit.
template <typename AnalysisT> AnalysisT &getAnalysis() {
return this->getAnalysisManager().template getAnalysis<AnalysisT>();
return getAnalysisManager().getAnalysis<AnalysisT>();
}

/// Query a cached instance of an analysis for the current ir unit if one
/// exists.
template <typename AnalysisT>
Optional<std::reference_wrapper<AnalysisT>> getCachedAnalysis() {
return this->getAnalysisManager().template getCachedAnalysis<AnalysisT>();
return getAnalysisManager().getCachedAnalysis<AnalysisT>();
}

/// Mark all analyses as preserved.
void markAllAnalysesPreserved() {
this->getPassState().preservedAnalyses.preserveAll();
getPassState().preservedAnalyses.preserveAll();
}

/// Mark the provided analyses as preserved.
template <typename... AnalysesT> void markAnalysesPreserved() {
this->getPassState().preservedAnalyses.template preserve<AnalysesT...>();
getPassState().preservedAnalyses.preserve<AnalysesT...>();
}
void markAnalysesPreserved(const AnalysisID *id) {
this->getPassState().preservedAnalyses.preserve(id);
}

/// Returns the derived pass name.
StringRef getName() override {
StringRef name = llvm::getTypeName<PassT>();
if (!name.consume_front("mlir::"))
name.consume_front("(anonymous namespace)::");
return name;
}

/// A clone method to create a copy of this pass.
std::unique_ptr<Pass> clone() const override {
auto newInst = std::make_unique<PassT>(*static_cast<const PassT *>(this));
newInst->copyOptionValuesFrom(this);
return newInst;
getPassState().preservedAnalyses.preserve(id);
}

/// Returns the analysis for the parent operation if it exists.
template <typename AnalysisT>
Optional<std::reference_wrapper<AnalysisT>>
getCachedParentAnalysis(Operation *parent) {
return this->getAnalysisManager()
.template getCachedParentAnalysis<AnalysisT>(parent);
return getAnalysisManager().getCachedParentAnalysis<AnalysisT>(parent);
}
template <typename AnalysisT>
Optional<std::reference_wrapper<AnalysisT>> getCachedParentAnalysis() {
return this->getAnalysisManager()
.template getCachedParentAnalysis<AnalysisT>(
this->getOperation()->getParentOp());
return getAnalysisManager().getCachedParentAnalysis<AnalysisT>(
getOperation()->getParentOp());
}

/// Returns the analysis for the given child operation if it exists.
template <typename AnalysisT>
Optional<std::reference_wrapper<AnalysisT>>
getCachedChildAnalysis(Operation *child) {
return this->getAnalysisManager()
.template getCachedChildAnalysis<AnalysisT>(child);
return getAnalysisManager().getCachedChildAnalysis<AnalysisT>(child);
}

/// Returns the analysis for the given child operation, or creates it if it
/// doesn't exist.
template <typename AnalysisT> AnalysisT &getChildAnalysis(Operation *child) {
return this->getAnalysisManager().template getChildAnalysis<AnalysisT>(
child);
return getAnalysisManager().getChildAnalysis<AnalysisT>(child);
}
};
} // end namespace detail

/// Utility base class for OpPass below to denote an opaque pass operating on a
/// specific operation type.
template <typename OpT> class OpPassBase : public Pass {
public:
using Pass::Pass;

/// Support isa/dyn_cast functionality.
static bool classof(const Pass *pass) {
return pass->getOpName() == OpT::getOperationName();
/// Returns the current analysis manager.
AnalysisManager getAnalysisManager() {
return getPassState().analysisManager;
}

/// Create a copy of this pass, ignoring statistics and options.
virtual std::unique_ptr<Pass> clonePass() const = 0;

/// Copy the option values from 'other', which is another instance of this
/// pass.
void copyOptionValuesFrom(const Pass *other);

private:
/// Forwarding function to execute this pass on the given operation.
LLVM_NODISCARD
LogicalResult run(Operation *op, AnalysisManager am);

/// Out of line virtual method to ensure vtables and metadata are emitted to a
/// single .o file.
virtual void anchor();

/// Represents a unique identifier for the pass.
const PassID *passID;

/// The name of the operation that this pass operates on, or None if this is a
/// generic OperationPass.
Optional<StringRef> opName;

/// The current execution state for the pass.
Optional<detail::PassExecutionState> passState;

/// The set of statistics held by this pass.
std::vector<Statistic *> statistics;

/// The pass options registered to this pass instance.
detail::PassOptions passOptions;

/// Allow access to 'clone' and 'run'.
friend class OpPassManager;

/// Allow access to 'passOptions'.
friend class PassInfo;
};

//===----------------------------------------------------------------------===//
// Pass Model Definitions
//===----------------------------------------------------------------------===//

/// Pass to transform an operation of a specific type.
///
/// Operation passes must not:
Expand All @@ -304,11 +262,16 @@ template <typename OpT> class OpPassBase : public Pass {
///
/// Derived function passes are expected to provide the following:
/// - A 'void runOnOperation()' method.
template <typename PassT, typename OpT = void>
class OperationPass : public detail::PassModel<PassT, OpPassBase<OpT>> {
/// - A 'StringRef getName() const' method.
/// - A 'std::unique_ptr<Pass> clonePass() const' method.
template <typename OpT = void> class OperationPass : public Pass {
protected:
OperationPass()
: detail::PassModel<PassT, OpPassBase<OpT>>(OpT::getOperationName()) {}
OperationPass(const PassID *passID) : Pass(passID, OpT::getOperationName()) {}

/// Support isa/dyn_cast functionality.
static bool classof(const Pass *pass) {
return pass->getOpName() == OpT::getOperationName();
}

/// Return the current operation being transformed.
OpT getOperation() { return cast<OpT>(Pass::getOperation()); }
Expand All @@ -324,14 +287,23 @@ class OperationPass : public detail::PassModel<PassT, OpPassBase<OpT>> {
///
/// Derived function passes are expected to provide the following:
/// - A 'void runOnOperation()' method.
template <typename PassT>
struct OperationPass<PassT, void> : public detail::PassModel<PassT, Pass> {};
/// - A 'StringRef getName() const' method.
/// - A 'std::unique_ptr<Pass> clonePass() const' method.
template <> class OperationPass<void> : public Pass {
protected:
OperationPass(const PassID *passID) : Pass(passID) {}
};

/// A model for providing function pass specific utilities.
///
/// Derived function passes are expected to provide the following:
/// - A 'void runOnFunction()' method.
template <typename T> struct FunctionPass : public OperationPass<T, FuncOp> {
/// - A 'StringRef getName() const' method.
/// - A 'std::unique_ptr<Pass> clonePass() const' method.
class FunctionPass : public OperationPass<FuncOp> {
public:
using OperationPass<FuncOp>::OperationPass;

/// The polymorphic API that runs the pass over the currently held function.
virtual void runOnFunction() = 0;

Expand All @@ -341,24 +313,38 @@ template <typename T> struct FunctionPass : public OperationPass<T, FuncOp> {
runOnFunction();
}

/// Return the current module being transformed.
/// Return the current function being transformed.
FuncOp getFunction() { return this->getOperation(); }
};

/// A model for providing module pass specific utilities.
///
/// Derived module passes are expected to provide the following:
/// - A 'void runOnModule()' method.
template <typename T> struct ModulePass : public OperationPass<T, ModuleOp> {
/// The polymorphic API that runs the pass over the currently held module.
virtual void runOnModule() = 0;
/// This class provides a CRTP wrapper around a base pass class to define
/// several necessary utility methods. This should only be used for passes that
/// are not suitably represented using the declarative pass specification(i.e.
/// tablegen backend).
template <typename PassT, typename BaseT> class PassWrapper : public BaseT {
public:
/// Support isa/dyn_cast functionality for the derived pass class.
static bool classof(const Pass *pass) {
return pass->getPassID() == PassID::getID<PassT>();
}

/// The polymorphic API that runs the pass over the currently held operation.
void runOnOperation() final { runOnModule(); }
protected:
PassWrapper() : BaseT(PassID::getID<PassT>()) {}

/// Returns the derived pass name.
StringRef getName() override {
StringRef name = llvm::getTypeName<PassT>();
if (!name.consume_front("mlir::"))
name.consume_front("(anonymous namespace)::");
return name;
}

/// Return the current module being transformed.
ModuleOp getModule() { return this->getOperation(); }
/// A clone method to create a copy of this pass.
std::unique_ptr<Pass> clonePass() const override {
return std::make_unique<PassT>(*static_cast<const PassT *>(this));
}
};

} // end namespace mlir

#endif // MLIR_PASS_PASS_H
12 changes: 11 additions & 1 deletion mlir/include/mlir/Pass/PassBase.td
Original file line number Diff line number Diff line change
Expand Up @@ -62,10 +62,13 @@ class Statistic<string varName, string statName, string desc> {
// Pass
//===----------------------------------------------------------------------===//

class Pass<string passArg> {
class PassBase<string passArg, string base> {
// The command line argument of the pass.
string argument = passArg;

// The C++ base class for the pass.
string baseClass = base;

// A short 1-line summary of the pass.
string summary = "";

Expand All @@ -82,4 +85,11 @@ class Pass<string passArg> {
list<Statistic> statistics = [];
}

// This class represents an mlir::OperationPass.
class Pass<string passArg, string operation = "">
: PassBase<passArg, "::mlir::OperationPass<" # operation # ">">;

// This class represents an mlir::FunctionPass.
class FunctionPass<string passArg> : PassBase<passArg, "::mlir::FunctionPass">;

#endif // MLIR_PASS_PASSBASE
3 changes: 3 additions & 0 deletions mlir/include/mlir/TableGen/Pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,9 @@ class Pass {
/// Return the command line argument of the pass.
StringRef getArgument() const;

/// Return the name for the C++ base class.
StringRef getBaseClass() const;

/// Return the short 1-line summary of the pass.
StringRef getSummary() const;

Expand Down
14 changes: 7 additions & 7 deletions mlir/include/mlir/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class AffineForOp;
class FuncOp;
class ModuleOp;
class Pass;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;

/// Creates an instance of the Canonicalizer pass.
std::unique_ptr<Pass> createCanonicalizerPass();
Expand All @@ -35,7 +35,7 @@ std::unique_ptr<Pass> createCSEPass();
/// Creates a loop fusion pass which fuses loops. Buffers of size less than or
/// equal to `localBufSizeThreshold` are promoted to memory space
/// `fastMemorySpace'.
std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
createLoopFusionPass(unsigned fastMemorySpace = 0,
uint64_t localBufSizeThreshold = 0,
bool maximalFusion = false);
Expand All @@ -46,31 +46,31 @@ std::unique_ptr<Pass> createLoopInvariantCodeMotionPass();

/// Creates a pass to pipeline explicit movement of data across levels of the
/// memory hierarchy.
std::unique_ptr<OpPassBase<FuncOp>> createPipelineDataTransferPass();
std::unique_ptr<OperationPass<FuncOp>> createPipelineDataTransferPass();

/// Lowers affine control flow operations (ForStmt, IfStmt and AffineApplyOp)
/// to equivalent lower-level constructs (flow of basic blocks and arithmetic
/// primitives).
std::unique_ptr<OpPassBase<FuncOp>> createLowerAffinePass();
std::unique_ptr<OperationPass<FuncOp>> createLowerAffinePass();

/// Creates a pass that transforms perfectly nested loops with independent
/// bounds into a single loop.
std::unique_ptr<OpPassBase<FuncOp>> createLoopCoalescingPass();
std::unique_ptr<OperationPass<FuncOp>> createLoopCoalescingPass();

/// Creates a pass that transforms a single ParallelLoop over N induction
/// variables into another ParallelLoop over less than N induction variables.
std::unique_ptr<Pass> createParallelLoopCollapsingPass();

/// Creates a pass to perform optimizations relying on memref dataflow such as
/// store to load forwarding, elimination of dead stores, and dead allocs.
std::unique_ptr<OpPassBase<FuncOp>> createMemRefDataFlowOptPass();
std::unique_ptr<OperationPass<FuncOp>> createMemRefDataFlowOptPass();

/// Creates a pass to strip debug information from a function.
std::unique_ptr<Pass> createStripDebugInfoPass();

/// Creates a pass which prints the list of ops and the number of occurrences in
/// the module.
std::unique_ptr<OpPassBase<ModuleOp>> createPrintOpStatsPass();
std::unique_ptr<OperationPass<ModuleOp>> createPrintOpStatsPass();

/// Creates a pass which inlines calls and callable operations as defined by
/// the CallGraph.
Expand Down
16 changes: 8 additions & 8 deletions mlir/include/mlir/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@

include "mlir/Pass/PassBase.td"

def AffinePipelineDataTransfer : Pass<
"affine-pipeline-data-transfer"> {
def AffinePipelineDataTransfer
: FunctionPass<"affine-pipeline-data-transfer"> {
let summary = "Pipeline non-blocking data transfers between explicitly "
"managed levels of the memory hierarchy";
let description = [{
Expand Down Expand Up @@ -84,7 +84,7 @@ def AffinePipelineDataTransfer : Pass<
let constructor = "mlir::createPipelineDataTransferPass()";
}

def AffineLoopFusion : Pass<"affine-loop-fusion"> {
def AffineLoopFusion : FunctionPass<"affine-loop-fusion"> {
let summary = "Fuse affine loop nests";
let constructor = "mlir::createLoopFusionPass()";
}
Expand Down Expand Up @@ -120,7 +120,7 @@ def LocationSnapshot : Pass<"snapshot-op-locations"> {
];
}

def LoopCoalescing : Pass<"loop-coalescing"> {
def LoopCoalescing : FunctionPass<"loop-coalescing"> {
let summary = "Coalesce nested loops with independent bounds into a single "
"loop";
let constructor = "mlir::createLoopCoalescingPass()";
Expand All @@ -131,7 +131,7 @@ def LoopInvariantCodeMotion : Pass<"loop-invariant-code-motion"> {
let constructor = "mlir::createLoopInvariantCodeMotionPass()";
}

def MemRefDataFlowOpt : Pass<"memref-dataflow-opt"> {
def MemRefDataFlowOpt : FunctionPass<"memref-dataflow-opt"> {
let summary = "Perform store/load forwarding for memrefs";
let description = [{
This pass performs store to load forwarding for memref's to eliminate memory
Expand Down Expand Up @@ -192,17 +192,17 @@ def ParallelLoopCollapsing : Pass<"parallel-loop-collapsing"> {
];
}

def PrintCFG : Pass<"print-cfg-graph"> {
def PrintCFG : FunctionPass<"print-cfg-graph"> {
let summary = "Print CFG graph per-Region";
let constructor = "mlir::createPrintCFGGraphPass()";
}

def PrintOpStats : Pass<"print-op-stats"> {
def PrintOpStats : Pass<"print-op-stats", "ModuleOp"> {
let summary = "Print statistics of operations";
let constructor = "mlir::createPrintOpStatsPass()";
}

def PrintOp : Pass<"print-op-graph"> {
def PrintOp : Pass<"print-op-graph", "ModuleOp"> {
let summary = "Print op graph per-Region";
let constructor = "mlir::createPrintOpGraphPass()";
}
Expand Down
4 changes: 2 additions & 2 deletions mlir/include/mlir/Transforms/ViewOpGraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
namespace mlir {
class Block;
class ModuleOp;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;

/// Displays the graph in a window. This is for use from the debugger and
/// depends on Graphviz to generate the graph.
Expand All @@ -32,7 +32,7 @@ raw_ostream &writeGraph(raw_ostream &os, Block &block, bool shortNames = false,
const Twine &title = "");

/// Creates a pass to print op graphs.
std::unique_ptr<OpPassBase<ModuleOp>>
std::unique_ptr<OperationPass<ModuleOp>>
createPrintOpGraphPass(raw_ostream &os = llvm::errs(), bool shortNames = false,
const Twine &title = "");

Expand Down
4 changes: 2 additions & 2 deletions mlir/include/mlir/Transforms/ViewRegionGraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

namespace mlir {
class FuncOp;
template <typename T> class OpPassBase;
template <typename T> class OperationPass;
class Region;

/// Displays the CFG in a window. This is for use from the debugger and
Expand All @@ -32,7 +32,7 @@ raw_ostream &writeGraph(raw_ostream &os, Region &region,
bool shortNames = false, const Twine &title = "");

/// Creates a pass to print CFG graphs.
std::unique_ptr<mlir::OpPassBase<mlir::FuncOp>>
std::unique_ptr<mlir::OperationPass<mlir::FuncOp>>
createPrintCFGGraphPass(raw_ostream &os = llvm::errs(), bool shortNames = false,
const Twine &title = "");

Expand Down
18 changes: 8 additions & 10 deletions mlir/lib/Conversion/AVX512ToLLVM/ConvertAVX512ToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include "mlir/Conversion/AVX512ToLLVM/ConvertAVX512ToLLVM.h"

#include "../PassDetail.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
Expand Down Expand Up @@ -163,16 +164,13 @@ void mlir::populateAVX512ToLLVMConversionPatterns(
}

namespace {
struct ConvertAVX512ToLLVMPass : public ModulePass<ConvertAVX512ToLLVMPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertAVX512ToLLVM
#include "mlir/Conversion/Passes.h.inc"

void runOnModule() override;
struct ConvertAVX512ToLLVMPass
: public ConvertAVX512ToLLVMBase<ConvertAVX512ToLLVMPass> {
void runOnOperation() override;
};
} // namespace

void ConvertAVX512ToLLVMPass::runOnModule() {
void ConvertAVX512ToLLVMPass::runOnOperation() {
// Convert to the LLVM IR dialect.
OwningRewritePatternList patterns;
LLVMTypeConverter converter(&getContext());
Expand All @@ -186,12 +184,12 @@ void ConvertAVX512ToLLVMPass::runOnModule() {
target.addIllegalDialect<avx512::AVX512Dialect>();
target.addDynamicallyLegalOp<FuncOp>(
[&](FuncOp op) { return converter.isSignatureLegal(op.getType()); });
if (failed(
applyPartialConversion(getModule(), target, patterns, &converter))) {
if (failed(applyPartialConversion(getOperation(), target, patterns,
&converter))) {
signalPassFailure();
}
}

std::unique_ptr<OpPassBase<ModuleOp>> mlir::createConvertAVX512ToLLVMPass() {
std::unique_ptr<OperationPass<ModuleOp>> mlir::createConvertAVX512ToLLVMPass() {
return std::make_unique<ConvertAVX512ToLLVMPass>();
}
9 changes: 3 additions & 6 deletions mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"

#include "../PassDetail.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
Expand Down Expand Up @@ -577,11 +578,7 @@ void mlir::populateAffineToStdConversionPatterns(
}

namespace {
class LowerAffinePass : public FunctionPass<LowerAffinePass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertAffineToStandard
#include "mlir/Conversion/Passes.h.inc"

class LowerAffinePass : public ConvertAffineToStandardBase<LowerAffinePass> {
void runOnFunction() override {
OwningRewritePatternList patterns;
populateAffineToStdConversionPatterns(patterns, &getContext());
Expand All @@ -595,6 +592,6 @@ class LowerAffinePass : public FunctionPass<LowerAffinePass> {

/// Lowers If and For operations within a function into their lower level CFG
/// equivalent blocks.
std::unique_ptr<OpPassBase<FuncOp>> mlir::createLowerAffinePass() {
std::unique_ptr<OperationPass<FuncOp>> mlir::createLowerAffinePass() {
return std::make_unique<LowerAffinePass>();
}
5 changes: 3 additions & 2 deletions mlir/lib/Conversion/GPUToCUDA/ConvertKernelFuncToCubin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ static constexpr const char *kCubinAnnotation = "nvvm.cubin";
/// GPU binary code, which is then attached as an attribute to the function. The
/// function body is erased.
class GpuKernelToCubinPass
: public OperationPass<GpuKernelToCubinPass, gpu::GPUModuleOp> {
: public PassWrapper<GpuKernelToCubinPass,
OperationPass<gpu::GPUModuleOp>> {
public:
GpuKernelToCubinPass(CubinGenerator cubinGenerator)
: cubinGenerator(cubinGenerator) {}
Expand Down Expand Up @@ -143,7 +144,7 @@ StringAttr GpuKernelToCubinPass::translateGPUModuleToCubinAnnotation(
return StringAttr::get({cubin->data(), cubin->size()}, loc->getContext());
}

std::unique_ptr<OpPassBase<gpu::GPUModuleOp>>
std::unique_ptr<OperationPass<gpu::GPUModuleOp>>
mlir::createConvertGPUKernelToCubinPass(CubinGenerator cubinGenerator) {
return std::make_unique<GpuKernelToCubinPass>(cubinGenerator);
}
33 changes: 14 additions & 19 deletions mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@

#include "mlir/Conversion/GPUToCUDA/GPUToCUDAPass.h"

#include "../PassDetail.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/Module.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/Pass/Pass.h"

#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/DataLayout.h"
Expand Down Expand Up @@ -61,12 +61,8 @@ namespace {
///
/// Intermediate data structures are allocated on the stack.
class GpuLaunchFuncToCudaCallsPass
: public ModulePass<GpuLaunchFuncToCudaCallsPass> {
: public ConvertGpuLaunchFuncToCudaCallsBase<GpuLaunchFuncToCudaCallsPass> {
private:
/// Include the generated pass utilities.
#define GEN_PASS_ConvertGpuLaunchFuncToCudaCalls
#include "mlir/Conversion/Passes.h.inc"

LLVM::LLVMDialect *getLLVMDialect() { return llvmDialect; }

llvm::LLVMContext &getLLVMContext() {
Expand Down Expand Up @@ -126,20 +122,19 @@ class GpuLaunchFuncToCudaCallsPass

public:
// Run the dialect converter on the module.
void runOnModule() override {
void runOnOperation() override {
// Cache the LLVMDialect for the current module.
llvmDialect = getContext().getRegisteredDialect<LLVM::LLVMDialect>();
// Cache the used LLVM types.
initializeCachedTypes();

getModule().walk([this](mlir::gpu::LaunchFuncOp op) {
translateGpuLaunchCalls(op);
});
getOperation().walk(
[this](mlir::gpu::LaunchFuncOp op) { translateGpuLaunchCalls(op); });

// GPU kernel modules are no longer necessary since we have a global
// constant with the CUBIN data.
for (auto m :
llvm::make_early_inc_range(getModule().getOps<gpu::GPUModuleOp>()))
llvm::make_early_inc_range(getOperation().getOps<gpu::GPUModuleOp>()))
m.erase();
}

Expand All @@ -160,7 +155,7 @@ class GpuLaunchFuncToCudaCallsPass
// The types in comments give the actual types expected/returned but the API
// uses void pointers. This is fine as they have the same linkage in C.
void GpuLaunchFuncToCudaCallsPass::declareCudaFunctions(Location loc) {
ModuleOp module = getModule();
ModuleOp module = getOperation();
OpBuilder builder(module.getBody()->getTerminator());
if (!module.lookupSymbol(cuModuleLoadName)) {
builder.create<LLVM::LLVMFuncOp>(
Expand Down Expand Up @@ -391,7 +386,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
builder.getI32IntegerAttr(0));
// Create an LLVM global with CUBIN extracted from the kernel annotation and
// obtain a pointer to the first byte in it.
auto kernelModule = getModule().lookupSymbol<gpu::GPUModuleOp>(
auto kernelModule = getOperation().lookupSymbol<gpu::GPUModuleOp>(
launchOp.getKernelModuleName());
assert(kernelModule && "expected a kernel module");

Expand All @@ -412,7 +407,7 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
// in the called helper function.
auto cuModule = allocatePointer(builder, loc);
auto cuModuleLoad =
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuModuleLoadName);
getOperation().lookupSymbol<LLVM::LLVMFuncOp>(cuModuleLoadName);
builder.create<LLVM::CallOp>(loc, ArrayRef<Type>{getCUResultType()},
builder.getSymbolRefAttr(cuModuleLoad),
ArrayRef<Value>{cuModule, data});
Expand All @@ -423,20 +418,20 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
auto kernelName = generateKernelNameConstant(launchOp.kernel(), loc, builder);
auto cuFunction = allocatePointer(builder, loc);
auto cuModuleGetFunction =
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuModuleGetFunctionName);
getOperation().lookupSymbol<LLVM::LLVMFuncOp>(cuModuleGetFunctionName);
builder.create<LLVM::CallOp>(
loc, ArrayRef<Type>{getCUResultType()},
builder.getSymbolRefAttr(cuModuleGetFunction),
ArrayRef<Value>{cuFunction, cuOwningModuleRef, kernelName});
// Grab the global stream needed for execution.
auto cuGetStreamHelper =
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuGetStreamHelperName);
getOperation().lookupSymbol<LLVM::LLVMFuncOp>(cuGetStreamHelperName);
auto cuStream = builder.create<LLVM::CallOp>(
loc, ArrayRef<Type>{getPointerType()},
builder.getSymbolRefAttr(cuGetStreamHelper), ArrayRef<Value>{});
// Invoke the function with required arguments.
auto cuLaunchKernel =
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuLaunchKernelName);
getOperation().lookupSymbol<LLVM::LLVMFuncOp>(cuLaunchKernelName);
auto cuFunctionRef =
builder.create<LLVM::LoadOp>(loc, getPointerType(), cuFunction);
auto paramsArray = setupParamsArray(launchOp, builder);
Expand All @@ -458,14 +453,14 @@ void GpuLaunchFuncToCudaCallsPass::translateGpuLaunchCalls(
nullpointer /* extra */});
// Sync on the stream to make it synchronous.
auto cuStreamSync =
getModule().lookupSymbol<LLVM::LLVMFuncOp>(cuStreamSynchronizeName);
getOperation().lookupSymbol<LLVM::LLVMFuncOp>(cuStreamSynchronizeName);
builder.create<LLVM::CallOp>(loc, ArrayRef<Type>{getCUResultType()},
builder.getSymbolRefAttr(cuStreamSync),
ArrayRef<Value>(cuStream.getResult(0)));
launchOp.erase();
}

std::unique_ptr<mlir::OpPassBase<mlir::ModuleOp>>
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>>
mlir::createConvertGpuLaunchFuncToCudaCallsPass() {
return std::make_unique<GpuLaunchFuncToCudaCallsPass>();
}
10 changes: 3 additions & 7 deletions mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@
#include "mlir/Dialect/GPU/Passes.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
#include "llvm/Support/FormatVariadic.h"

#include "../GPUCommon/IndexIntrinsicsOpLowering.h"
#include "../GPUCommon/OpToFuncCallLowering.h"
#include "../PassDetail.h"

using namespace mlir;

Expand Down Expand Up @@ -246,12 +246,8 @@ struct GPUReturnOpLowering : public ConvertToLLVMPattern {
/// This pass only handles device code and is not meant to be run on GPU host
/// code.
class LowerGpuOpsToNVVMOpsPass
: public OperationPass<LowerGpuOpsToNVVMOpsPass, gpu::GPUModuleOp> {
: public ConvertGpuOpsToNVVMOpsBase<LowerGpuOpsToNVVMOpsPass> {
public:
/// Include the generated pass utilities.
#define GEN_PASS_ConvertGpuOpsToNVVMOps
#include "mlir/Conversion/Passes.h.inc"

void runOnOperation() override {
gpu::GPUModuleOp m = getOperation();

Expand Down Expand Up @@ -324,7 +320,7 @@ void mlir::populateGpuToNVVMConversionPatterns(
"__nv_tanh");
}

std::unique_ptr<OpPassBase<gpu::GPUModuleOp>>
std::unique_ptr<OperationPass<gpu::GPUModuleOp>>
mlir::createLowerGpuOpsToNVVMOpsPass() {
return std::make_unique<LowerGpuOpsToNVVMOpsPass>();
}
10 changes: 3 additions & 7 deletions mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

#include "../GPUCommon/IndexIntrinsicsOpLowering.h"
#include "../GPUCommon/OpToFuncCallLowering.h"
#include "../PassDetail.h"

using namespace mlir;

Expand All @@ -32,12 +33,8 @@ namespace {
// This pass only handles device code and is not meant to be run on GPU host
// code.
class LowerGpuOpsToROCDLOpsPass
: public OperationPass<LowerGpuOpsToROCDLOpsPass, gpu::GPUModuleOp> {
: public ConvertGpuOpsToROCDLOpsBase<LowerGpuOpsToROCDLOpsPass> {
public:
/// Include the generated pass utilities.
#define GEN_PASS_ConvertGpuOpsToROCDLOps
#include "mlir/Conversion/Passes.h.inc"

void runOnOperation() override {
gpu::GPUModuleOp m = getOperation();

Expand Down Expand Up @@ -83,8 +80,7 @@ class LowerGpuOpsToROCDLOpsPass

} // anonymous namespace

std::unique_ptr<OpPassBase<gpu::GPUModuleOp>>
std::unique_ptr<OperationPass<gpu::GPUModuleOp>>
mlir::createLowerGpuOpsToROCDLOpsPass() {
return std::make_unique<LowerGpuOpsToROCDLOpsPass>();
}

16 changes: 6 additions & 10 deletions mlir/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRVPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@
//===----------------------------------------------------------------------===//

#include "mlir/Conversion/GPUToSPIRV/ConvertGPUToSPIRVPass.h"
#include "../PassDetail.h"
#include "mlir/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.h"
#include "mlir/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SPIRV/SPIRVDialect.h"
#include "mlir/Dialect/SPIRV/SPIRVLowering.h"
#include "mlir/Dialect/SPIRV/SPIRVOps.h"
#include "mlir/Pass/Pass.h"

using namespace mlir;

Expand All @@ -33,18 +33,14 @@ namespace {
/// replace it).
///
/// 2) Lower the body of the spirv::ModuleOp.
struct GPUToSPIRVPass : public ModulePass<GPUToSPIRVPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertGpuToSPIRV
#include "mlir/Conversion/Passes.h.inc"

void runOnModule() override;
struct GPUToSPIRVPass : public ConvertGPUToSPIRVBase<GPUToSPIRVPass> {
void runOnOperation() override;
};
} // namespace

void GPUToSPIRVPass::runOnModule() {
void GPUToSPIRVPass::runOnOperation() {
MLIRContext *context = &getContext();
ModuleOp module = getModule();
ModuleOp module = getOperation();

SmallVector<Operation *, 1> kernelModules;
OpBuilder builder(context);
Expand All @@ -71,6 +67,6 @@ void GPUToSPIRVPass::runOnModule() {
}
}

std::unique_ptr<OpPassBase<ModuleOp>> mlir::createConvertGPUToSPIRVPass() {
std::unique_ptr<OperationPass<ModuleOp>> mlir::createConvertGPUToSPIRVPass() {
return std::make_unique<GPUToSPIRVPass>();
}
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
//
//===----------------------------------------------------------------------===//

#include "../PassDetail.h"
#include "mlir/Conversion/GPUToVulkan/ConvertGPUToVulkanPass.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/SPIRV/SPIRVOps.h"
Expand All @@ -23,7 +24,6 @@
#include "mlir/IR/Function.h"
#include "mlir/IR/Module.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/Pass/Pass.h"

using namespace mlir;

Expand All @@ -38,13 +38,10 @@ namespace {
/// function and attaching binary data and entry point name as an attributes to
/// created vulkan launch call op.
class ConvertGpuLaunchFuncToVulkanLaunchFunc
: public ModulePass<ConvertGpuLaunchFuncToVulkanLaunchFunc> {
: public ConvertGpuLaunchFuncToVulkanLaunchFuncBase<
ConvertGpuLaunchFuncToVulkanLaunchFunc> {
public:
/// Include the generated pass utilities.
#define GEN_PASS_ConvertGpuLaunchFuncToVulkanLaunchFunc
#include "mlir/Conversion/Passes.h.inc"

void runOnModule() override;
void runOnOperation() override;

private:
/// Creates a SPIR-V binary shader from the given `module` using
Expand All @@ -68,14 +65,13 @@ class ConvertGpuLaunchFuncToVulkanLaunchFunc
/// operand is unsupported by Vulkan runtime.
LogicalResult declareVulkanLaunchFunc(Location loc,
gpu::LaunchFuncOp launchOp);

};

} // anonymous namespace

void ConvertGpuLaunchFuncToVulkanLaunchFunc::runOnModule() {
void ConvertGpuLaunchFuncToVulkanLaunchFunc::runOnOperation() {
bool done = false;
getModule().walk([this, &done](gpu::LaunchFuncOp op) {
getOperation().walk([this, &done](gpu::LaunchFuncOp op) {
if (done) {
op.emitError("should only contain one 'gpu::LaunchFuncOp' op");
return signalPassFailure();
Expand All @@ -86,17 +82,17 @@ void ConvertGpuLaunchFuncToVulkanLaunchFunc::runOnModule() {

// Erase `gpu::GPUModuleOp` and `spirv::Module` operations.
for (auto gpuModule :
llvm::make_early_inc_range(getModule().getOps<gpu::GPUModuleOp>()))
llvm::make_early_inc_range(getOperation().getOps<gpu::GPUModuleOp>()))
gpuModule.erase();

for (auto spirvModule :
llvm::make_early_inc_range(getModule().getOps<spirv::ModuleOp>()))
llvm::make_early_inc_range(getOperation().getOps<spirv::ModuleOp>()))
spirvModule.erase();
}

LogicalResult ConvertGpuLaunchFuncToVulkanLaunchFunc::declareVulkanLaunchFunc(
Location loc, gpu::LaunchFuncOp launchOp) {
OpBuilder builder(getModule().getBody()->getTerminator());
OpBuilder builder(getOperation().getBody()->getTerminator());
// TODO: Workgroup size is written into the kernel. So to properly modelling
// vulkan launch, we cannot have the local workgroup size configuration here.
SmallVector<Type, 8> vulkanLaunchTypes{launchOp.getOperandTypes()};
Expand Down Expand Up @@ -138,7 +134,7 @@ LogicalResult ConvertGpuLaunchFuncToVulkanLaunchFunc::createBinaryShader(

void ConvertGpuLaunchFuncToVulkanLaunchFunc::convertGpuLaunchFunc(
gpu::LaunchFuncOp launchOp) {
ModuleOp module = getModule();
ModuleOp module = getOperation();
OpBuilder builder(launchOp);
Location loc = launchOp.getLoc();

Expand Down Expand Up @@ -169,7 +165,7 @@ void ConvertGpuLaunchFuncToVulkanLaunchFunc::convertGpuLaunchFunc(
launchOp.erase();
}

std::unique_ptr<mlir::OpPassBase<mlir::ModuleOp>>
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>>
mlir::createConvertGpuLaunchFuncToVulkanLaunchFuncPass() {
return std::make_unique<ConvertGpuLaunchFuncToVulkanLaunchFunc>();
}
21 changes: 9 additions & 12 deletions mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@
//
//===----------------------------------------------------------------------===//

#include "../PassDetail.h"
#include "mlir/Conversion/GPUToVulkan/ConvertGPUToVulkanPass.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/Module.h"
#include "mlir/Pass/Pass.h"

#include "llvm/ADT/SmallString.h"
#include "llvm/Support/FormatVariadic.h"
Expand Down Expand Up @@ -58,12 +58,9 @@ namespace {
/// * deinitVulkan -- deinitializes vulkan runtime
///
class VulkanLaunchFuncToVulkanCallsPass
: public ModulePass<VulkanLaunchFuncToVulkanCallsPass> {
: public ConvertVulkanLaunchFuncToVulkanCallsBase<
VulkanLaunchFuncToVulkanCallsPass> {
private:
/// Include the generated pass utilities.
#define GEN_PASS_ConvertVulkanLaunchFuncToVulkanCalls
#include "mlir/Conversion/Passes.h.inc"

LLVM::LLVMDialect *getLLVMDialect() { return llvmDialect; }

llvm::LLVMContext &getLLVMContext() {
Expand Down Expand Up @@ -150,7 +147,7 @@ class VulkanLaunchFuncToVulkanCallsPass
LogicalResult deduceMemRefRank(Value ptrToMemRefDescriptor, uint32_t &rank);

public:
void runOnModule() override;
void runOnOperation() override;

private:
LLVM::LLVMDialect *llvmDialect;
Expand All @@ -169,18 +166,18 @@ class VulkanLaunchFuncToVulkanCallsPass

} // anonymous namespace

void VulkanLaunchFuncToVulkanCallsPass::runOnModule() {
void VulkanLaunchFuncToVulkanCallsPass::runOnOperation() {
initializeCachedTypes();

// Collect SPIR-V attributes such as `spirv_blob` and
// `spirv_entry_point_name`.
getModule().walk([this](LLVM::CallOp op) {
getOperation().walk([this](LLVM::CallOp op) {
if (isVulkanLaunchCallOp(op))
collectSPIRVAttributes(op);
});

// Convert vulkan launch call op into a sequence of Vulkan runtime calls.
getModule().walk([this](LLVM::CallOp op) {
getOperation().walk([this](LLVM::CallOp op) {
if (isCInterfaceVulkanLaunchCallOp(op))
translateVulkanLaunchCall(op);
});
Expand Down Expand Up @@ -278,7 +275,7 @@ VulkanLaunchFuncToVulkanCallsPass::deduceMemRefRank(Value ptrToMemRefDescriptor,
}

void VulkanLaunchFuncToVulkanCallsPass::declareVulkanFunctions(Location loc) {
ModuleOp module = getModule();
ModuleOp module = getOperation();
OpBuilder builder(module.getBody()->getTerminator());

if (!module.lookupSymbol(kSetEntryPoint)) {
Expand Down Expand Up @@ -436,7 +433,7 @@ void VulkanLaunchFuncToVulkanCallsPass::translateVulkanLaunchCall(
cInterfaceVulkanLaunchCallOp.erase();
}

std::unique_ptr<mlir::OpPassBase<mlir::ModuleOp>>
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>>
mlir::createConvertVulkanLaunchFuncToVulkanCallsPass() {
return std::make_unique<VulkanLaunchFuncToVulkanCallsPass>();
}
18 changes: 7 additions & 11 deletions mlir/lib/Conversion/LinalgToLLVM/LinalgToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include "mlir/Conversion/LinalgToLLVM/LinalgToLLVM.h"

#include "../PassDetail.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
Expand All @@ -28,8 +29,6 @@
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/Passes.h"
Expand Down Expand Up @@ -561,17 +560,14 @@ void mlir::populateLinalgToLLVMConversionPatterns(
}

namespace {
struct ConvertLinalgToLLVMPass : public ModulePass<ConvertLinalgToLLVMPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertLinalgToLLVM
#include "mlir/Conversion/Passes.h.inc"

void runOnModule() override;
struct ConvertLinalgToLLVMPass
: public ConvertLinalgToLLVMBase<ConvertLinalgToLLVMPass> {
void runOnOperation() override;
};
} // namespace

void ConvertLinalgToLLVMPass::runOnModule() {
auto module = getModule();
void ConvertLinalgToLLVMPass::runOnOperation() {
auto module = getOperation();

// Convert to the LLVM IR dialect using the converter defined above.
OwningRewritePatternList patterns;
Expand All @@ -592,6 +588,6 @@ void ConvertLinalgToLLVMPass::runOnModule() {
signalPassFailure();
}

std::unique_ptr<OpPassBase<ModuleOp>> mlir::createConvertLinalgToLLVMPass() {
std::unique_ptr<OperationPass<ModuleOp>> mlir::createConvertLinalgToLLVMPass() {
return std::make_unique<ConvertLinalgToLLVMPass>();
}
16 changes: 6 additions & 10 deletions mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRVPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,27 +7,23 @@
//===----------------------------------------------------------------------===//

#include "mlir/Conversion/LinalgToSPIRV/LinalgToSPIRVPass.h"
#include "../PassDetail.h"
#include "mlir/Conversion/LinalgToSPIRV/LinalgToSPIRV.h"
#include "mlir/Dialect/SPIRV/SPIRVDialect.h"
#include "mlir/Dialect/SPIRV/SPIRVLowering.h"
#include "mlir/Pass/Pass.h"

using namespace mlir;

namespace {
/// A pass converting MLIR Linalg ops into SPIR-V ops.
class LinalgToSPIRVPass : public ModulePass<LinalgToSPIRVPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertLinalgToSPIRV
#include "mlir/Conversion/Passes.h.inc"

void runOnModule() override;
class LinalgToSPIRVPass : public ConvertLinalgToSPIRVBase<LinalgToSPIRVPass> {
void runOnOperation() override;
};
} // namespace

void LinalgToSPIRVPass::runOnModule() {
void LinalgToSPIRVPass::runOnOperation() {
MLIRContext *context = &getContext();
ModuleOp module = getModule();
ModuleOp module = getOperation();

auto targetAttr = spirv::lookupTargetEnvOrDefault(module);
std::unique_ptr<ConversionTarget> target =
Expand All @@ -47,6 +43,6 @@ void LinalgToSPIRVPass::runOnModule() {
return signalPassFailure();
}

std::unique_ptr<OpPassBase<ModuleOp>> mlir::createLinalgToSPIRVPass() {
std::unique_ptr<OperationPass<ModuleOp>> mlir::createLinalgToSPIRVPass() {
return std::make_unique<LinalgToSPIRVPass>();
}
9 changes: 3 additions & 6 deletions mlir/lib/Conversion/LoopToStandard/LoopToStandard.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//

#include "../PassDetail.h"
#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
Expand All @@ -19,7 +20,6 @@
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Module.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/Functional.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/Passes.h"
Expand All @@ -30,11 +30,8 @@ using namespace mlir::loop;

namespace {

struct LoopToStandardPass : public OperationPass<LoopToStandardPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertLoopToStandard
#include "mlir/Conversion/Passes.h.inc"

struct LoopToStandardPass
: public ConvertLoopToStandardBase<LoopToStandardPass> {
void runOnOperation() override;
};

Expand Down
31 changes: 9 additions & 22 deletions mlir/lib/Conversion/LoopsToGPU/LoopsToGPUPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@
//===----------------------------------------------------------------------===//

#include "mlir/Conversion/LoopsToGPU/LoopsToGPUPass.h"
#include "../PassDetail.h"
#include "mlir/Conversion/LoopsToGPU/LoopsToGPU.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"

#include "llvm/ADT/ArrayRef.h"
Expand All @@ -28,13 +28,8 @@ namespace {
// A pass that traverses top-level loops in the function and converts them to
// GPU launch operations. Nested launches are not allowed, so this does not
// walk the function recursively to avoid considering nested loops.
struct ForLoopMapper : public FunctionPass<ForLoopMapper> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertSimpleLoopsToGPU
#include "mlir/Conversion/Passes.h.inc"

struct ForLoopMapper : public ConvertSimpleLoopsToGPUBase<ForLoopMapper> {
ForLoopMapper() = default;
ForLoopMapper(const ForLoopMapper &) {}
ForLoopMapper(unsigned numBlockDims, unsigned numThreadDims) {
this->numBlockDims = numBlockDims;
this->numThreadDims = numThreadDims;
Expand Down Expand Up @@ -62,13 +57,8 @@ struct ForLoopMapper : public FunctionPass<ForLoopMapper> {
// nested loops as the size of `numWorkGroups`. Within these any loop nest has
// to be perfectly nested upto depth equal to size of `workGroupSize`.
struct ImperfectlyNestedForLoopMapper
: public FunctionPass<ImperfectlyNestedForLoopMapper> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertLoopsToGPU
#include "mlir/Conversion/Passes.h.inc"

: public ConvertLoopsToGPUBase<ImperfectlyNestedForLoopMapper> {
ImperfectlyNestedForLoopMapper() = default;
ImperfectlyNestedForLoopMapper(const ImperfectlyNestedForLoopMapper &) {}
ImperfectlyNestedForLoopMapper(ArrayRef<int64_t> numWorkGroups,
ArrayRef<int64_t> workGroupSize) {
this->numWorkGroups->assign(numWorkGroups.begin(), numWorkGroups.end());
Expand Down Expand Up @@ -104,11 +94,8 @@ struct ImperfectlyNestedForLoopMapper
}
};

struct ParallelLoopToGpuPass : public OperationPass<ParallelLoopToGpuPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertParallelLoopToGpu
#include "mlir/Conversion/Passes.h.inc"

struct ParallelLoopToGpuPass
: public ConvertParallelLoopToGpuBase<ParallelLoopToGpuPass> {
void runOnOperation() override {
OwningRewritePatternList patterns;
populateParallelLoopToGPUPatterns(patterns, &getContext());
Expand All @@ -125,22 +112,22 @@ struct ParallelLoopToGpuPass : public OperationPass<ParallelLoopToGpuPass> {

} // namespace

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
mlir::createSimpleLoopsToGPUPass(unsigned numBlockDims,
unsigned numThreadDims) {
return std::make_unique<ForLoopMapper>(numBlockDims, numThreadDims);
}
std::unique_ptr<OpPassBase<FuncOp>> mlir::createSimpleLoopsToGPUPass() {
std::unique_ptr<OperationPass<FuncOp>> mlir::createSimpleLoopsToGPUPass() {
return std::make_unique<ForLoopMapper>();
}

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
mlir::createLoopToGPUPass(ArrayRef<int64_t> numWorkGroups,
ArrayRef<int64_t> workGroupSize) {
return std::make_unique<ImperfectlyNestedForLoopMapper>(numWorkGroups,
workGroupSize);
}
std::unique_ptr<OpPassBase<FuncOp>> mlir::createLoopToGPUPass() {
std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopToGPUPass() {
return std::make_unique<ImperfectlyNestedForLoopMapper>();
}

Expand Down
25 changes: 25 additions & 0 deletions mlir/lib/Conversion/PassDetail.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
//===- PassDetail.h - Conversion Pass class details -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef CONVERSION_PASSDETAIL_H_
#define CONVERSION_PASSDETAIL_H_

#include "mlir/Pass/Pass.h"

namespace mlir {

namespace gpu {
class GPUModuleOp;
} // end namespace gpu

#define GEN_PASS_CLASSES
#include "mlir/Conversion/Passes.h.inc"

} // end namespace mlir

#endif // CONVERSION_PASSDETAIL_H_
20 changes: 7 additions & 13 deletions mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//

#include "../PassDetail.h"
#include "mlir/ADT/TypeSwitch.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
Expand All @@ -22,7 +23,6 @@
#include "mlir/IR/Module.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/Functional.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
Expand Down Expand Up @@ -2847,32 +2847,26 @@ LLVMTypeConverter::promoteMemRefDescriptors(Location loc, ValueRange opOperands,

namespace {
/// A pass converting MLIR operations into the LLVM IR dialect.
struct LLVMLoweringPass : public ModulePass<LLVMLoweringPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertStandardToLLVM
#include "mlir/Conversion/Passes.h.inc"

/// Creates an LLVM lowering pass.
struct LLVMLoweringPass : public ConvertStandardToLLVMBase<LLVMLoweringPass> {
LLVMLoweringPass() = default;
LLVMLoweringPass(bool useBarePtrCallConv, bool emitCWrappers,
unsigned indexBitwidth) {
this->useBarePtrCallConv = useBarePtrCallConv;
this->emitCWrappers = emitCWrappers;
this->indexBitwidth = indexBitwidth;
}
explicit LLVMLoweringPass() {}
LLVMLoweringPass(const LLVMLoweringPass &pass) {}

/// Run the dialect converter on the module.
void runOnModule() override {
void runOnOperation() override {
if (useBarePtrCallConv && emitCWrappers) {
getModule().emitError()
getOperation().emitError()
<< "incompatible conversion options: bare-pointer calling convention "
"and C wrapper emission";
signalPassFailure();
return;
}

ModuleOp m = getModule();
ModuleOp m = getOperation();

LLVMTypeConverterCustomization customs;
customs.funcArgConverter = useBarePtrCallConv ? barePtrFuncArgTypeConverter
Expand Down Expand Up @@ -2901,7 +2895,7 @@ mlir::LLVMConversionTarget::LLVMConversionTarget(MLIRContext &ctx)
this->addIllegalOp<TanhOp>();
}

std::unique_ptr<OpPassBase<ModuleOp>>
std::unique_ptr<OperationPass<ModuleOp>>
mlir::createLowerToLLVMPass(const LowerToLLVMOptions &options) {
return std::make_unique<LLVMLoweringPass>(
options.useBarePtrCallConv, options.emitCWrappers, options.indexBitwidth);
Expand Down
17 changes: 7 additions & 10 deletions mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,28 +12,24 @@
//===----------------------------------------------------------------------===//

#include "mlir/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.h"
#include "../PassDetail.h"
#include "mlir/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.h"
#include "mlir/Dialect/SPIRV/SPIRVDialect.h"
#include "mlir/Dialect/SPIRV/SPIRVLowering.h"
#include "mlir/Pass/Pass.h"

using namespace mlir;

namespace {
/// A pass converting MLIR Standard operations into the SPIR-V dialect.
class ConvertStandardToSPIRVPass
: public ModulePass<ConvertStandardToSPIRVPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertStandardToSPIRV
#include "mlir/Conversion/Passes.h.inc"

void runOnModule() override;
: public ConvertStandardToSPIRVBase<ConvertStandardToSPIRVPass> {
void runOnOperation() override;
};
} // namespace

void ConvertStandardToSPIRVPass::runOnModule() {
void ConvertStandardToSPIRVPass::runOnOperation() {
MLIRContext *context = &getContext();
ModuleOp module = getModule();
ModuleOp module = getOperation();

auto targetAttr = spirv::lookupTargetEnvOrDefault(module);
std::unique_ptr<ConversionTarget> target =
Expand All @@ -49,6 +45,7 @@ void ConvertStandardToSPIRVPass::runOnModule() {
}
}

std::unique_ptr<OpPassBase<ModuleOp>> mlir::createConvertStandardToSPIRVPass() {
std::unique_ptr<OperationPass<ModuleOp>>
mlir::createConvertStandardToSPIRVPass() {
return std::make_unique<ConvertStandardToSPIRVPass>();
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//

#include "../PassDetail.h"
#include "mlir/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.h"
#include "mlir/Conversion/StandardToSPIRV/ConvertStandardToSPIRVPass.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/Pass/Pass.h"

using namespace mlir;

Expand Down Expand Up @@ -160,11 +160,8 @@ void mlir::populateStdLegalizationPatternsForSPIRVLowering(
//===----------------------------------------------------------------------===//

namespace {
struct SPIRVLegalization final : public OperationPass<SPIRVLegalization> {
/// Include the generated pass utilities.
#define GEN_PASS_LegalizeStandardForSPIRV
#include "mlir/Conversion/Passes.h.inc"

struct SPIRVLegalization final
: public LegalizeStandardForSPIRVBase<SPIRVLegalization> {
void runOnOperation() override;
};
} // namespace
Expand Down
22 changes: 9 additions & 13 deletions mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"

#include "../PassDetail.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
Expand All @@ -21,8 +22,6 @@
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/Passes.h"
#include "llvm/IR/DerivedTypes.h"
Expand Down Expand Up @@ -1118,23 +1117,20 @@ void mlir::populateVectorToLLVMMatrixConversionPatterns(
}

namespace {
struct LowerVectorToLLVMPass : public ModulePass<LowerVectorToLLVMPass> {
/// Include the generated pass utilities.
#define GEN_PASS_ConvertVectorToLLVM
#include "mlir/Conversion/Passes.h.inc"

void runOnModule() override;
struct LowerVectorToLLVMPass
: public ConvertVectorToLLVMBase<LowerVectorToLLVMPass> {
void runOnOperation() override;
};
} // namespace

void LowerVectorToLLVMPass::runOnModule() {
void LowerVectorToLLVMPass::runOnOperation() {
// Perform progressive lowering of operations on slices and
// all contraction operations. Also applies folding and DCE.
{
OwningRewritePatternList patterns;
populateVectorSlicesLoweringPatterns(patterns, &getContext());
populateVectorContractLoweringPatterns(patterns, &getContext());
applyPatternsGreedily(getModule(), patterns);
applyPatternsGreedily(getOperation(), patterns);
}

// Convert to the LLVM IR dialect.
Expand All @@ -1148,12 +1144,12 @@ void LowerVectorToLLVMPass::runOnModule() {
LLVMConversionTarget target(getContext());
target.addDynamicallyLegalOp<FuncOp>(
[&](FuncOp op) { return converter.isSignatureLegal(op.getType()); });
if (failed(
applyPartialConversion(getModule(), target, patterns, &converter))) {
if (failed(applyPartialConversion(getOperation(), target, patterns,
&converter))) {
signalPassFailure();
}
}

std::unique_ptr<OpPassBase<ModuleOp>> mlir::createConvertVectorToLLVMPass() {
std::unique_ptr<OperationPass<ModuleOp>> mlir::createConvertVectorToLLVMPass() {
return std::make_unique<LowerVectorToLLVMPass>();
}
16 changes: 7 additions & 9 deletions mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Analysis/Utils.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/Passes.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/Support/CommandLine.h"
Expand Down Expand Up @@ -75,11 +75,7 @@ namespace {
// TODO(bondhugula): We currently can't generate copies correctly when stores
// are strided. Check for strided stores.
struct AffineDataCopyGeneration
: public FunctionPass<AffineDataCopyGeneration> {
/// Include the generated pass utilities.
#define GEN_PASS_AffineDataCopyGeneration
#include "mlir/Dialect/Affine/Passes.h.inc"

: public AffineDataCopyGenerationBase<AffineDataCopyGeneration> {
explicit AffineDataCopyGeneration(
unsigned slowMemorySpace = 0,
unsigned fastMemorySpace = clFastMemorySpace, unsigned tagMemorySpace = 0,
Expand All @@ -96,7 +92,8 @@ struct AffineDataCopyGeneration
skipNonUnitStrideLoops(skipNonUnitStrideLoops) {}

explicit AffineDataCopyGeneration(const AffineDataCopyGeneration &other)
: slowMemorySpace(other.slowMemorySpace),
: AffineDataCopyGenerationBase<AffineDataCopyGeneration>(other),
slowMemorySpace(other.slowMemorySpace),
fastMemorySpace(other.fastMemorySpace),
tagMemorySpace(other.tagMemorySpace),
minDmaTransferSize(other.minDmaTransferSize),
Expand Down Expand Up @@ -134,14 +131,15 @@ struct AffineDataCopyGeneration
/// buffers in 'fastMemorySpace', and replaces memory operations to the former
/// by the latter. Only load op's handled for now.
/// TODO(bondhugula): extend this to store op's.
std::unique_ptr<OpPassBase<FuncOp>> mlir::createAffineDataCopyGenerationPass(
std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineDataCopyGenerationPass(
unsigned slowMemorySpace, unsigned fastMemorySpace, unsigned tagMemorySpace,
int minDmaTransferSize, uint64_t fastMemCapacityBytes) {
return std::make_unique<AffineDataCopyGeneration>(
slowMemorySpace, fastMemorySpace, tagMemorySpace, minDmaTransferSize,
fastMemCapacityBytes);
}
std::unique_ptr<OpPassBase<FuncOp>> mlir::createAffineDataCopyGenerationPass() {
std::unique_ptr<OperationPass<FuncOp>>
mlir::createAffineDataCopyGenerationPass() {
return std::make_unique<AffineDataCopyGeneration>();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Analysis/AffineAnalysis.h"
#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/LoopAnalysis.h"
Expand All @@ -20,7 +21,6 @@
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Builders.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
#include "mlir/Transforms/Utils.h"
#include "llvm/ADT/DenseMap.h"
Expand All @@ -41,11 +41,8 @@ namespace {
/// TODO(asabne) : Check for the presence of side effects before hoisting.
/// TODO: This code should be removed once the new LICM pass can handle its
/// uses.
struct LoopInvariantCodeMotion : public FunctionPass<LoopInvariantCodeMotion> {
/// Include the generated pass utilities.
#define GEN_PASS_AffineLoopInvariantCodeMotion
#include "mlir/Dialect/Affine/Passes.h.inc"

struct LoopInvariantCodeMotion
: public AffineLoopInvariantCodeMotionBase<LoopInvariantCodeMotion> {
void runOnFunction() override;
void runOnAffineForOp(AffineForOp forOp);
};
Expand Down Expand Up @@ -232,7 +229,7 @@ void LoopInvariantCodeMotion::runOnFunction() {
});
}

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
mlir::createAffineLoopInvariantCodeMotionPass() {
return std::make_unique<LoopInvariantCodeMotion>();
}
12 changes: 4 additions & 8 deletions mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Analysis/AffineAnalysis.h"
#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/LoopAnalysis.h"
Expand All @@ -19,7 +20,6 @@
#include "mlir/Dialect/Affine/Passes.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/Builders.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
#include "mlir/Transforms/Utils.h"
#include "llvm/Support/CommandLine.h"
Expand Down Expand Up @@ -58,11 +58,7 @@ static llvm::cl::list<unsigned> clTileSizes(
namespace {

/// A pass to perform loop tiling on all suitable loop nests of a Function.
struct LoopTiling : public FunctionPass<LoopTiling> {
/// Include the generated pass utilities.
#define GEN_PASS_AffineLoopTiling
#include "mlir/Dialect/Affine/Passes.h.inc"

struct LoopTiling : public AffineLoopTilingBase<LoopTiling> {
explicit LoopTiling(uint64_t cacheSizeBytes = kDefaultCacheMemCapacity,
bool avoidMaxMinBounds = true)
: cacheSizeBytes(cacheSizeBytes), avoidMaxMinBounds(avoidMaxMinBounds) {}
Expand All @@ -85,11 +81,11 @@ struct LoopTiling : public FunctionPass<LoopTiling> {

/// Creates a pass to perform loop tiling on all suitable loop nests of a
/// Function.
std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
return std::make_unique<LoopTiling>(cacheSizeBytes);
}
std::unique_ptr<OpPassBase<FuncOp>> mlir::createLoopTilingPass() {
std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() {
return std::make_unique<LoopTiling>();
}

Expand Down
10 changes: 3 additions & 7 deletions mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Analysis/LoopAnalysis.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/Passes.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Builders.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/CommandLine.h"
Expand Down Expand Up @@ -58,11 +58,7 @@ namespace {
/// full unroll threshold was specified, in which case, fully unrolls all loops
/// with trip count less than the specified threshold. The latter is for testing
/// purposes, especially for testing outer loop unrolling.
struct LoopUnroll : public FunctionPass<LoopUnroll> {
/// Include the generated pass utilities.
#define GEN_PASS_AffineUnroll
#include "mlir/Dialect/Affine/Passes.h.inc"

struct LoopUnroll : public AffineLoopUnrollBase<LoopUnroll> {
const Optional<unsigned> unrollFactor;
const Optional<bool> unrollFull;
// Callback to obtain unroll factors; if this has a callable target, takes
Expand Down Expand Up @@ -166,7 +162,7 @@ LogicalResult LoopUnroll::runOnAffineForOp(AffineForOp forOp) {
return loopUnrollByFactor(forOp, kDefaultUnrollFactor);
}

std::unique_ptr<OpPassBase<FuncOp>> mlir::createLoopUnrollPass(
std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopUnrollPass(
int unrollFactor, int unrollFull,
const std::function<unsigned(AffineForOp)> &getUnrollFactor) {
return std::make_unique<LoopUnroll>(
Expand Down
11 changes: 4 additions & 7 deletions mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,15 @@
// Note: 'if/else' blocks are not jammed. So, if there are loops inside if
// op's, bodies of those loops will not be jammed.
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Analysis/LoopAnalysis.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/Passes.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/Builders.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/CommandLine.h"
Expand All @@ -60,11 +61,7 @@ static llvm::cl::opt<unsigned>
namespace {
/// Loop unroll jam pass. Currently, this just unroll jams the first
/// outer loop in a Function.
struct LoopUnrollAndJam : public FunctionPass<LoopUnrollAndJam> {
/// Include the generated pass utilities.
#define GEN_PASS_AffineLoopUnrollAndJam
#include "mlir/Dialect/Affine/Passes.h.inc"

struct LoopUnrollAndJam : public AffineLoopUnrollAndJamBase<LoopUnrollAndJam> {
Optional<unsigned> unrollJamFactor;
static const unsigned kDefaultUnrollJamFactor = 4;

Expand All @@ -76,7 +73,7 @@ struct LoopUnrollAndJam : public FunctionPass<LoopUnrollAndJam> {
};
} // end anonymous namespace

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
mlir::createLoopUnrollAndJamPass(int unrollJamFactor) {
return std::make_unique<LoopUnrollAndJam>(
unrollJamFactor == -1 ? None : Optional<unsigned>(unrollJamFactor));
Expand Down
21 changes: 21 additions & 0 deletions mlir/lib/Dialect/Affine/Transforms/PassDetail.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
//===- PassDetail.h - Affine Pass class details -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef DIALECT_AFFINE_TRANSFORMS_PASSDETAIL_H_
#define DIALECT_AFFINE_TRANSFORMS_PASSDETAIL_H_

#include "mlir/Pass/Pass.h"

namespace mlir {

#define GEN_PASS_CLASSES
#include "mlir/Dialect/Affine/Passes.h.inc"

} // end namespace mlir

#endif // DIALECT_AFFINE_TRANSFORMS_PASSDETAIL_H_
11 changes: 4 additions & 7 deletions mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Analysis/Utils.h"
#include "mlir/Dialect/Affine/Passes.h"
#include "mlir/IR/IntegerSet.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/Utils.h"

#define DEBUG_TYPE "simplify-affine-structure"
Expand All @@ -27,11 +27,7 @@ namespace {
/// all memrefs with non-trivial layout maps are converted to ones with trivial
/// identity layout ones.
struct SimplifyAffineStructures
: public FunctionPass<SimplifyAffineStructures> {
/// Include the generated pass utilities.
#define GEN_PASS_SimplifyAffineStructures
#include "mlir/Dialect/Affine/Passes.h.inc"

: public SimplifyAffineStructuresBase<SimplifyAffineStructures> {
void runOnFunction() override;

/// Utility to simplify an affine attribute and update its entry in the parent
Expand Down Expand Up @@ -73,7 +69,8 @@ struct SimplifyAffineStructures

} // end anonymous namespace

std::unique_ptr<OpPassBase<FuncOp>> mlir::createSimplifyAffineStructuresPass() {
std::unique_ptr<OperationPass<FuncOp>>
mlir::createSimplifyAffineStructuresPass() {
return std::make_unique<SimplifyAffineStructures>();
}

Expand Down
13 changes: 4 additions & 9 deletions mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Analysis/LoopAnalysis.h"
#include "mlir/Analysis/NestedMatcher.h"
#include "mlir/Analysis/SliceAnalysis.h"
Expand All @@ -24,7 +25,6 @@
#include "mlir/IR/Builders.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/Functional.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/FoldUtils.h"
Expand Down Expand Up @@ -573,13 +573,8 @@ namespace {

/// Base state for the vectorize pass.
/// Command line arguments are preempted by non-empty pass arguments.
struct Vectorize : public FunctionPass<Vectorize> {
/// Include the generated pass utilities.
#define GEN_PASS_AffineVectorize
#include "mlir/Dialect/Affine/Passes.h.inc"

struct Vectorize : public AffineVectorizeBase<Vectorize> {
Vectorize() = default;
Vectorize(const Vectorize &) {}
Vectorize(ArrayRef<int64_t> virtualVectorSize);
void runOnFunction() override;
};
Expand Down Expand Up @@ -1252,10 +1247,10 @@ void Vectorize::runOnFunction() {
LLVM_DEBUG(dbgs() << "\n");
}

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
mlir::createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
return std::make_unique<Vectorize>(virtualVectorSize);
}
std::unique_ptr<OpPassBase<FuncOp>> mlir::createSuperVectorizePass() {
std::unique_ptr<OperationPass<FuncOp>> mlir::createSuperVectorizePass() {
return std::make_unique<Vectorize>();
}
23 changes: 10 additions & 13 deletions mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/GPU/Passes.h"
#include "mlir/Dialect/GPU/Utils.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/RegionUtils.h"

using namespace mlir;
Expand Down Expand Up @@ -214,16 +214,13 @@ namespace {
/// The gpu.modules are intended to be compiled to a cubin blob independently in
/// a separate pass. The external functions can then be annotated with the
/// symbol of the cubin accessor function.
class GpuKernelOutliningPass : public ModulePass<GpuKernelOutliningPass> {
class GpuKernelOutliningPass
: public GpuKernelOutliningBase<GpuKernelOutliningPass> {
public:
/// Include the generated pass utilities.
#define GEN_PASS_GpuKernelOutlining
#include "mlir/Dialect/GPU/Passes.h.inc"

void runOnModule() override {
SymbolTable symbolTable(getModule());
void runOnOperation() override {
SymbolTable symbolTable(getOperation());
bool modified = false;
for (auto func : getModule().getOps<FuncOp>()) {
for (auto func : getOperation().getOps<FuncOp>()) {
// Insert just after the function.
Block::iterator insertPt(func.getOperation()->getNextNode());
auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
Expand Down Expand Up @@ -255,8 +252,8 @@ class GpuKernelOutliningPass : public ModulePass<GpuKernelOutliningPass> {
// If any new module was inserted in this module, annotate this module as
// a container module.
if (modified)
getModule().setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
UnitAttr::get(&getContext()));
getOperation().setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
UnitAttr::get(&getContext()));
}

private:
Expand All @@ -267,7 +264,7 @@ class GpuKernelOutliningPass : public ModulePass<GpuKernelOutliningPass> {
// a SymbolTable by the caller. SymbolTable needs to be refactored to
// prevent manual building of Ops with symbols in code using SymbolTables
// and then this needs to use the OpBuilder.
auto context = getModule().getContext();
auto context = getOperation().getContext();
Builder builder(context);
OperationState state(kernelFunc.getLoc(),
gpu::GPUModuleOp::getOperationName());
Expand Down Expand Up @@ -300,6 +297,6 @@ class GpuKernelOutliningPass : public ModulePass<GpuKernelOutliningPass> {

} // namespace

std::unique_ptr<OpPassBase<ModuleOp>> mlir::createGpuKernelOutliningPass() {
std::unique_ptr<OperationPass<ModuleOp>> mlir::createGpuKernelOutliningPass() {
return std::make_unique<GpuKernelOutliningPass>();
}
21 changes: 21 additions & 0 deletions mlir/lib/Dialect/GPU/Transforms/PassDetail.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
//===- PassDetail.h - GPU Pass class details --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef DIALECT_GPU_TRANSFORMS_PASSDETAIL_H_
#define DIALECT_GPU_TRANSFORMS_PASSDETAIL_H_

#include "mlir/Pass/Pass.h"

namespace mlir {

#define GEN_PASS_CLASSES
#include "mlir/Dialect/GPU/Passes.h.inc"

} // end namespace mlir

#endif // DIALECT_GPU_TRANSFORMS_PASSDETAIL_H_
9 changes: 3 additions & 6 deletions mlir/lib/Dialect/LLVMIR/Transforms/LegalizeForExport.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/LLVMIR/Transforms/LegalizeForExport.h"
#include "PassDetail.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Module.h"
#include "mlir/Pass/Pass.h"

using namespace mlir;

Expand Down Expand Up @@ -57,11 +57,8 @@ void mlir::LLVM::ensureDistinctSuccessors(Operation *op) {
}

namespace {
struct LegalizeForExportPass : public OperationPass<LegalizeForExportPass> {
/// Include the generated pass utilities.
#define GEN_PASS_LLVMLegalizeForExport
#include "mlir/Dialect/LLVMIR/Transforms/Passes.h.inc"

struct LegalizeForExportPass
: public LLVMLegalizeForExportBase<LegalizeForExportPass> {
void runOnOperation() override {
LLVM::ensureDistinctSuccessors(getOperation());
}
Expand Down
21 changes: 21 additions & 0 deletions mlir/lib/Dialect/LLVMIR/Transforms/PassDetail.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
//===- PassDetail.h - LLVM Pass class details -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef DIALECT_LLVMIR_TRANSFORMS_PASSDETAIL_H_
#define DIALECT_LLVMIR_TRANSFORMS_PASSDETAIL_H_

#include "mlir/Pass/Pass.h"

namespace mlir {

#define GEN_PASS_CLASSES
#include "mlir/Dialect/LLVMIR/Transforms/Passes.h.inc"

} // end namespace mlir

#endif // DIALECT_LLVMIR_TRANSFORMS_PASSDETAIL_H_
18 changes: 5 additions & 13 deletions mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Analysis/Dominance.h"
#include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
Expand All @@ -20,9 +21,7 @@
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/STLExtras.h"
#include "mlir/Transforms/FoldUtils.h"
Expand Down Expand Up @@ -567,11 +566,8 @@ struct FuseGenericTensorOps : public OpRewritePattern<GenericOp> {
};

/// Pass that fuses generic ops on tensors. Used only for testing.
struct FusionOfTensorOpsPass : public OperationPass<FusionOfTensorOpsPass> {
/// Include the generated pass utilities.
#define GEN_PASS_LinalgFusionOfTensorOps
#include "mlir/Dialect/Linalg/Passes.h.inc"

struct FusionOfTensorOpsPass
: public LinalgFusionOfTensorOpsBase<FusionOfTensorOpsPass> {
void runOnOperation() override {
OwningRewritePatternList patterns;
Operation *op = getOperation();
Expand All @@ -580,16 +576,12 @@ struct FusionOfTensorOpsPass : public OperationPass<FusionOfTensorOpsPass> {
};
};

struct LinalgFusionPass : public FunctionPass<LinalgFusionPass> {
/// Include the generated pass utilities.
#define GEN_PASS_LinalgFusion
#include "mlir/Dialect/Linalg/Passes.h.inc"

struct LinalgFusionPass : public LinalgFusionBase<LinalgFusionPass> {
void runOnFunction() override { fuseLinalgOpsGreedily(getFunction()); }
};
} // namespace

std::unique_ptr<OpPassBase<FuncOp>> mlir::createLinalgFusionPass() {
std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgFusionPass() {
return std::make_unique<LinalgFusionPass>();
}

Expand Down
29 changes: 9 additions & 20 deletions mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//

#include "PassDetail.h"
#include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
Expand All @@ -18,8 +19,6 @@
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/Functional.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/STLExtras.h"
Expand Down Expand Up @@ -693,48 +692,38 @@ static void lowerLinalgToLoopsImpl(Operation *op, MLIRContext *context) {
}

namespace {
struct LowerToAffineLoops : public FunctionPass<LowerToAffineLoops> {
/// Include the generated pass utilities.
#define GEN_PASS_LinalgLowerToAffineLoops
#include "mlir/Dialect/Linalg/Passes.h.inc"

struct LowerToAffineLoops
: public LinalgLowerToAffineLoopsBase<LowerToAffineLoops> {
void runOnFunction() override {
lowerLinalgToLoopsImpl<AffineForOp, AffineIndexedValue>(getFunction(),
&getContext());
}
};
struct LowerToLoops : public FunctionPass<LowerToLoops> {
/// Include the generated pass utilities.
#define GEN_PASS_LinalgLowerToLoops
#include "mlir/Dialect/Linalg/Passes.h.inc"

struct LowerToLoops : public LinalgLowerToLoopsBase<LowerToLoops> {
void runOnFunction() override {
lowerLinalgToLoopsImpl<loop::ForOp, StdIndexedValue>(getFunction(),
&getContext());
}
};
struct LowerToParallelLoops : public FunctionPass<LowerToParallelLoops> {
/// Include the generated pass utilities.
#define GEN_PASS_LinalgLowerToParallelLoops
#include "mlir/Dialect/Linalg/Passes.h.inc"

struct LowerToParallelLoops
: public LinalgLowerToParallelLoopsBase<LowerToParallelLoops> {
void runOnFunction() override {
lowerLinalgToLoopsImpl<loop::ParallelOp, StdIndexedValue>(getFunction(),
&getContext());
}
};
} // namespace

std::unique_ptr<OpPassBase<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
return std::make_unique<LowerToLoops>();
}

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
mlir::createConvertLinalgToParallelLoopsPass() {
return std::make_unique<LowerToParallelLoops>();
}

std::unique_ptr<OpPassBase<FuncOp>>
std::unique_ptr<OperationPass<FuncOp>>
mlir::createConvertLinalgToAffineLoopsPass() {
return std::make_unique<LowerToAffineLoops>();
}
Expand Down
21 changes: 21 additions & 0 deletions mlir/lib/Dialect/Linalg/Transforms/PassDetail.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
//===- PassDetail.h - Linalg Pass class details -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef DIALECT_LINALG_TRANSFORMS_PASSDETAIL_H_
#define DIALECT_LINALG_TRANSFORMS_PASSDETAIL_H_

#include "mlir/Pass/Pass.h"

namespace mlir {

#define GEN_PASS_CLASSES
#include "mlir/Dialect/Linalg/Passes.h.inc"

} // end namespace mlir

#endif // DIALECT_LINALG_TRANSFORMS_PASSDETAIL_H_
Loading