Skip to content

Commit

Permalink
[mlir][spirv] Use spv.entry_point_abi in GPU to SPIR-V conversions
Browse files Browse the repository at this point in the history
We have spv.entry_point_abi for specifying the local workgroup size.
It should be decorated onto input gpu.func ops to drive the SPIR-V
CodeGen to generate the proper SPIR-V module execution mode. Compared
to using command-line options for specifying the configuration, using
attributes also has the benefits that 1) we are now able to use
different local workgroup for different entry points and 2) the
tests contains the configuration directly.

Differential Revision: https://reviews.llvm.org/D74012
  • Loading branch information
antiagainst committed Feb 10, 2020
1 parent 9559834 commit 50aeeed
Show file tree
Hide file tree
Showing 11 changed files with 104 additions and 97 deletions.
8 changes: 4 additions & 4 deletions mlir/include/mlir/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.h
Expand Up @@ -17,13 +17,13 @@

namespace mlir {
class SPIRVTypeConverter;

/// Appends to a pattern list additional patterns for translating GPU Ops to
/// SPIR-V ops. Needs the workgroup size as input since SPIR-V/Vulkan requires
/// the workgroup size to be statically specified.
/// SPIR-V ops. For a gpu.func to be converted, it should have a
/// spv.entry_point_abi attribute.
void populateGPUToSPIRVPatterns(MLIRContext *context,
SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns,
ArrayRef<int64_t> workGroupSize);
OwningRewritePatternList &patterns);
} // namespace mlir

#endif // MLIR_CONVERSION_GPUTOSPIRV_CONVERTGPUTOSPIRV_H
Expand Up @@ -22,10 +22,9 @@ namespace mlir {
class ModuleOp;
template <typename T> class OpPassBase;

/// Pass to convert GPU Ops to SPIR-V ops. Needs the workgroup size as input
/// since SPIR-V/Vulkan requires the workgroup size to be statically specified.
std::unique_ptr<OpPassBase<ModuleOp>>
createConvertGPUToSPIRVPass(ArrayRef<int64_t> workGroupSize);
/// Pass to convert GPU Ops to SPIR-V ops. For a gpu.func to be converted, it
/// should have a spv.entry_point_abi attribute.
std::unique_ptr<OpPassBase<ModuleOp>> createConvertGPUToSPIRVPass();

} // namespace mlir
#endif // MLIR_CONVERSION_GPUTOSPIRV_CONVERTGPUTOSPIRVPASS_H
14 changes: 9 additions & 5 deletions mlir/include/mlir/Dialect/SPIRV/TargetAndABI.h
Expand Up @@ -112,6 +112,15 @@ StringRef getEntryPointABIAttrName();
EntryPointABIAttr getEntryPointABIAttr(ArrayRef<int32_t> localSize,
MLIRContext *context);

/// Queries the entry point ABI on the nearest function-like op containing the
/// given `op`. Returns null attribute if not found.
EntryPointABIAttr lookupEntryPointABI(Operation *op);

/// Queries the local workgroup size from entry point ABI on the nearest
/// function-like op containing the given `op`. Returns null attribute if not
/// found.
DenseIntElementsAttr lookupLocalWorkGroupSize(Operation *op);

/// Returns a default resource limits attribute that uses numbers from
/// "Table 46. Required Limits" of the Vulkan spec.
ResourceLimitsAttr getDefaultResourceLimits(MLIRContext *context);
Expand All @@ -128,11 +137,6 @@ TargetEnvAttr getDefaultTargetEnv(MLIRContext *context);
/// extensions) if not provided.
TargetEnvAttr lookupTargetEnvOrDefault(Operation *op);

/// Queries the local workgroup size from entry point ABI on the nearest
/// function-like op containing the given `op`. Returns null attribute if not
/// found.
DenseIntElementsAttr lookupLocalWorkGroupSize(Operation *op);

} // namespace spirv
} // namespace mlir

Expand Down
40 changes: 15 additions & 25 deletions mlir/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.cpp
Expand Up @@ -82,16 +82,9 @@ class WorkGroupSizeConversion : public SPIRVOpLowering<gpu::BlockDimOp> {
};

/// Pattern to convert a kernel function in GPU dialect within a spv.module.
class KernelFnConversion final : public SPIRVOpLowering<gpu::GPUFuncOp> {
class GPUFuncOpConversion final : public SPIRVOpLowering<gpu::GPUFuncOp> {
public:
KernelFnConversion(MLIRContext *context, SPIRVTypeConverter &converter,
ArrayRef<int64_t> workGroupSize,
PatternBenefit benefit = 1)
: SPIRVOpLowering<gpu::GPUFuncOp>(context, converter, benefit) {
auto config = workGroupSize.take_front(3);
workGroupSizeAsInt32.assign(config.begin(), config.end());
workGroupSizeAsInt32.resize(3, 1);
}
using SPIRVOpLowering<gpu::GPUFuncOp>::SPIRVOpLowering;

PatternMatchResult
matchAndRewrite(gpu::GPUFuncOp funcOp, ArrayRef<Value> operands,
Expand Down Expand Up @@ -352,28 +345,27 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, SPIRVTypeConverter &typeConverter,
return newFuncOp;
}

PatternMatchResult
KernelFnConversion::matchAndRewrite(gpu::GPUFuncOp funcOp,
ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const {
if (!gpu::GPUDialect::isKernel(funcOp)) {
PatternMatchResult GPUFuncOpConversion::matchAndRewrite(
gpu::GPUFuncOp funcOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const {
if (!gpu::GPUDialect::isKernel(funcOp))
return matchFailure();
}

SmallVector<spirv::InterfaceVarABIAttr, 4> argABI;
for (auto argNum : llvm::seq<unsigned>(0, funcOp.getNumArguments())) {
argABI.push_back(spirv::getInterfaceVarABIAttr(
0, argNum, spirv::StorageClass::StorageBuffer, rewriter.getContext()));
}

auto context = rewriter.getContext();
auto entryPointAttr =
spirv::getEntryPointABIAttr(workGroupSizeAsInt32, context);
auto entryPointAttr = spirv::lookupEntryPointABI(funcOp);
if (!entryPointAttr) {
funcOp.emitRemark("match failure: missing 'spv.entry_point_abi' attribute");
return matchFailure();
}
FuncOp newFuncOp = lowerAsEntryFunction(funcOp, typeConverter, rewriter,
entryPointAttr, argABI);
if (!newFuncOp) {
if (!newFuncOp)
return matchFailure();
}
newFuncOp.removeAttr(Identifier::get(gpu::GPUDialect::getKernelFuncAttrName(),
rewriter.getContext()));
return matchSuccess();
Expand Down Expand Up @@ -429,13 +421,11 @@ namespace {

void mlir::populateGPUToSPIRVPatterns(MLIRContext *context,
SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns,
ArrayRef<int64_t> workGroupSize) {
OwningRewritePatternList &patterns) {
populateWithGenerated(context, &patterns);
patterns.insert<KernelFnConversion>(context, typeConverter, workGroupSize);
patterns.insert<
ForOpConversion, GPUModuleConversion, GPUReturnOpConversion,
IfOpConversion,
ForOpConversion, GPUFuncOpConversion, GPUModuleConversion,
GPUReturnOpConversion, IfOpConversion,
LaunchConfigConversion<gpu::BlockIdOp, spirv::BuiltIn::WorkgroupId>,
LaunchConfigConversion<gpu::GridDimOp, spirv::BuiltIn::NumWorkgroups>,
LaunchConfigConversion<gpu::ThreadIdOp,
Expand Down
31 changes: 7 additions & 24 deletions mlir/lib/Conversion/GPUToSPIRV/ConvertGPUToSPIRVPass.cpp
Expand Up @@ -24,33 +24,17 @@
using namespace mlir;

namespace {
/// Pass to lower GPU Dialect to SPIR-V. The pass only converts those functions
/// that have the "gpu.kernel" attribute, i.e. those functions that are
/// referenced in gpu::LaunchKernelOp operations. For each such function
/// Pass to lower GPU Dialect to SPIR-V. The pass only converts the gpu.func ops
/// inside gpu.module ops. i.e., the function that are referenced in
/// gpu.launch_func ops. For each such function
///
/// 1) Create a spirv::ModuleOp, and clone the function into spirv::ModuleOp
/// (the original function is still needed by the gpu::LaunchKernelOp, so cannot
/// replace it).
///
/// 2) Lower the body of the spirv::ModuleOp.
class GPUToSPIRVPass : public ModulePass<GPUToSPIRVPass> {
public:
GPUToSPIRVPass() = default;
GPUToSPIRVPass(const GPUToSPIRVPass &) {}
GPUToSPIRVPass(ArrayRef<int64_t> workGroupSize) {
this->workGroupSize = workGroupSize;
}

struct GPUToSPIRVPass : public ModulePass<GPUToSPIRVPass> {
void runOnModule() override;

private:
/// Command line option to specify the workgroup size.
ListOption<int64_t> workGroupSize{
*this, "workgroup-size",
llvm::cl::desc(
"Workgroup Sizes in the SPIR-V module for x, followed by y, followed "
"by z dimension of the dispatch (others will be ignored)"),
llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated};
};
} // namespace

Expand All @@ -70,7 +54,7 @@ void GPUToSPIRVPass::runOnModule() {

SPIRVTypeConverter typeConverter;
OwningRewritePatternList patterns;
populateGPUToSPIRVPatterns(context, typeConverter, patterns, workGroupSize);
populateGPUToSPIRVPatterns(context, typeConverter, patterns);
populateStandardToSPIRVPatterns(context, typeConverter, patterns);

std::unique_ptr<ConversionTarget> target = spirv::SPIRVConversionTarget::get(
Expand All @@ -84,9 +68,8 @@ void GPUToSPIRVPass::runOnModule() {
}
}

std::unique_ptr<OpPassBase<ModuleOp>>
mlir::createConvertGPUToSPIRVPass(ArrayRef<int64_t> workGroupSize) {
return std::make_unique<GPUToSPIRVPass>(workGroupSize);
std::unique_ptr<OpPassBase<ModuleOp>> mlir::createConvertGPUToSPIRVPass() {
return std::make_unique<GPUToSPIRVPass>();
}

static PassRegistration<GPUToSPIRVPass>
Expand Down
33 changes: 20 additions & 13 deletions mlir/lib/Dialect/SPIRV/TargetAndABI.cpp
Expand Up @@ -158,6 +158,26 @@ spirv::getEntryPointABIAttr(ArrayRef<int32_t> localSize, MLIRContext *context) {
context);
}

spirv::EntryPointABIAttr spirv::lookupEntryPointABI(Operation *op) {
while (op && !op->hasTrait<OpTrait::FunctionLike>())
op = op->getParentOp();
if (!op)
return {};

if (auto attr = op->getAttrOfType<spirv::EntryPointABIAttr>(
spirv::getEntryPointABIAttrName()))
return attr;

return {};
}

DenseIntElementsAttr spirv::lookupLocalWorkGroupSize(Operation *op) {
if (auto entryPoint = spirv::lookupEntryPointABI(op))
return entryPoint.local_size();

return {};
}

spirv::ResourceLimitsAttr
spirv::getDefaultResourceLimits(MLIRContext *context) {
auto i32Type = IntegerType::get(32, context);
Expand Down Expand Up @@ -187,16 +207,3 @@ spirv::TargetEnvAttr spirv::lookupTargetEnvOrDefault(Operation *op) {
return attr;
return getDefaultTargetEnv(op->getContext());
}

DenseIntElementsAttr spirv::lookupLocalWorkGroupSize(Operation *op) {
while (op && !op->hasTrait<OpTrait::FunctionLike>())
op = op->getParentOp();
if (!op)
return {};

if (auto attr = op->getAttrOfType<spirv::EntryPointABIAttr>(
spirv::getEntryPointABIAttrName()))
return attr.local_size();

return {};
}
27 changes: 15 additions & 12 deletions mlir/test/Conversion/GPUToSPIRV/builtins.mlir
@@ -1,4 +1,4 @@
// RUN: mlir-opt -split-input-file -pass-pipeline='convert-gpu-to-spirv{workgroup-size=32,4}' %s -o - | FileCheck %s
// RUN: mlir-opt -split-input-file -convert-gpu-to-spirv %s -o - | FileCheck %s

module attributes {gpu.container_module} {
func @builtin() {
Expand All @@ -11,7 +11,7 @@ module attributes {gpu.container_module} {
// CHECK: spv.globalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
gpu.module @kernels {
gpu.func @builtin_workgroup_id_x()
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
// CHECK: [[ADDRESS:%.*]] = spv._address_of [[WORKGROUPID]]
// CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
// CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
Expand All @@ -34,7 +34,7 @@ module attributes {gpu.container_module} {
// CHECK: spv.globalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
gpu.module @kernels {
gpu.func @builtin_workgroup_id_y()
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
// CHECK: [[ADDRESS:%.*]] = spv._address_of [[WORKGROUPID]]
// CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
// CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}1 : i32{{\]}}
Expand All @@ -57,7 +57,7 @@ module attributes {gpu.container_module} {
// CHECK: spv.globalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
gpu.module @kernels {
gpu.func @builtin_workgroup_id_z()
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
// CHECK: [[ADDRESS:%.*]] = spv._address_of [[WORKGROUPID]]
// CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
// CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}2 : i32{{\]}}
Expand All @@ -79,8 +79,11 @@ module attributes {gpu.container_module} {
// CHECK-LABEL: spv.module "Logical" "GLSL450"
gpu.module @kernels {
gpu.func @builtin_workgroup_size_x()
attributes {gpu.kernel} {
// The constant value is obtained fomr the command line option above.
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[32, 1, 1]>: vector<3xi32>}} {
// The constant value is obtained from the spv.entry_point_abi.
// Note that this ignores the workgroup size specification in gpu.launch.
// We may want to define gpu.workgroup_size and convert it to the entry
// point ABI we want here.
// CHECK: spv.constant 32 : i32
%0 = "gpu.block_dim"() {dimension = "x"} : () -> index
gpu.return
Expand All @@ -100,8 +103,8 @@ module attributes {gpu.container_module} {
// CHECK-LABEL: spv.module "Logical" "GLSL450"
gpu.module @kernels {
gpu.func @builtin_workgroup_size_y()
attributes {gpu.kernel} {
// The constant value is obtained fomr the command line option above.
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[32, 4, 1]>: vector<3xi32>}} {
// The constant value is obtained from the spv.entry_point_abi.
// CHECK: spv.constant 4 : i32
%0 = "gpu.block_dim"() {dimension = "y"} : () -> index
gpu.return
Expand All @@ -121,8 +124,8 @@ module attributes {gpu.container_module} {
// CHECK-LABEL: spv.module "Logical" "GLSL450"
gpu.module @kernels {
gpu.func @builtin_workgroup_size_z()
attributes {gpu.kernel} {
// The constant value is obtained fomr the command line option above (1 is default).
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[32, 4, 1]>: vector<3xi32>}} {
// The constant value is obtained from the spv.entry_point_abi.
// CHECK: spv.constant 1 : i32
%0 = "gpu.block_dim"() {dimension = "z"} : () -> index
gpu.return
Expand All @@ -143,7 +146,7 @@ module attributes {gpu.container_module} {
// CHECK: spv.globalVariable [[LOCALINVOCATIONID:@.*]] built_in("LocalInvocationId")
gpu.module @kernels {
gpu.func @builtin_local_id_x()
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
// CHECK: [[ADDRESS:%.*]] = spv._address_of [[LOCALINVOCATIONID]]
// CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
// CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
Expand All @@ -166,7 +169,7 @@ module attributes {gpu.container_module} {
// CHECK: spv.globalVariable [[NUMWORKGROUPS:@.*]] built_in("NumWorkgroups")
gpu.module @kernels {
gpu.func @builtin_num_workgroups_x()
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
// CHECK: [[ADDRESS:%.*]] = spv._address_of [[NUMWORKGROUPS]]
// CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
// CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Conversion/GPUToSPIRV/if.mlir
Expand Up @@ -10,7 +10,7 @@ module attributes {gpu.container_module} {
gpu.module @kernels {
// CHECK-LABEL: @kernel_simple_selection
gpu.func @kernel_simple_selection(%arg2 : memref<10xf32>, %arg3 : i1)
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
%value = constant 0.0 : f32
%i = constant 0 : index

Expand All @@ -31,7 +31,7 @@ module attributes {gpu.container_module} {

// CHECK-LABEL: @kernel_nested_selection
gpu.func @kernel_nested_selection(%arg3 : memref<10xf32>, %arg4 : memref<10xf32>, %arg5 : i1, %arg6 : i1)
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
%i = constant 0 : index
%j = constant 9 : index

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Conversion/GPUToSPIRV/load-store.mlir
Expand Up @@ -29,7 +29,7 @@ module attributes {gpu.container_module} {
// CHECK-SAME: [[ARG5:%.*]]: i32 {spv.interface_var_abi = {binding = 5 : i32, descriptor_set = 0 : i32, storage_class = 12 : i32{{[}][}]}}
// CHECK-SAME: [[ARG6:%.*]]: i32 {spv.interface_var_abi = {binding = 6 : i32, descriptor_set = 0 : i32, storage_class = 12 : i32{{[}][}]}}
gpu.func @load_store_kernel(%arg0: memref<12x4xf32>, %arg1: memref<12x4xf32>, %arg2: memref<12x4xf32>, %arg3: index, %arg4: index, %arg5: index, %arg6: index)
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
// CHECK: [[ADDRESSWORKGROUPID:%.*]] = spv._address_of [[WORKGROUPIDVAR]]
// CHECK: [[WORKGROUPID:%.*]] = spv.Load "Input" [[ADDRESSWORKGROUPID]]
// CHECK: [[WORKGROUPIDX:%.*]] = spv.CompositeExtract [[WORKGROUPID]]{{\[}}0 : i32{{\]}}
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Conversion/GPUToSPIRV/loop.mlir
Expand Up @@ -9,7 +9,7 @@ module attributes {gpu.container_module} {

gpu.module @kernels {
gpu.func @loop_kernel(%arg2 : memref<10xf32>, %arg3 : memref<10xf32>)
attributes {gpu.kernel} {
attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
// CHECK: [[LB:%.*]] = spv.constant 4 : i32
%lb = constant 4 : index
// CHECK: [[UB:%.*]] = spv.constant 42 : i32
Expand Down

0 comments on commit 50aeeed

Please sign in to comment.