Skip to content

Commit 89d1143

Browse files
authored
[mlir][gpu]Add GPUToXeVM lowering pipeline pass. (#161216)
It's the default GPU to XeVM lowering pipeline. It starts by lowering GPU code to the specified compilation target (default is fatbin), then lowers the host code. If XeGPU ops are used, it expects the MLIR code to have XeGPU ops already embedded in gpu code.
1 parent 024dd56 commit 89d1143

File tree

10 files changed

+605
-2
lines changed

10 files changed

+605
-2
lines changed

mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h

Lines changed: 55 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
//===- Passes.h - GPU NVVM pipeline entry points --------------------------===//
1+
//===- Passes.h - GPU pipeline entry points--------------------------------===//
22
//
33
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
44
// See https://llvm.org/LICENSE.txt for license information.
@@ -60,6 +60,52 @@ struct GPUToNVVMPipelineOptions
6060
llvm::cl::init(false)};
6161
};
6262

63+
// Options for the gpu to xevm pipeline.
64+
struct GPUToXeVMPipelineOptions
65+
: public PassPipelineOptions<GPUToXeVMPipelineOptions> {
66+
PassOptions::Option<std::string> xegpuOpLevel{
67+
*this, "xegpu-op-level",
68+
llvm::cl::desc("Granularity of XeGPU operations to target: workgroup | "
69+
"subgroup | lane"),
70+
llvm::cl::init("workgroup")};
71+
// General lowering controls.
72+
PassOptions::Option<bool> use64bitIndex{
73+
*this, "use-64bit-index",
74+
llvm::cl::desc("Bitwidth of the index type (host & device)"),
75+
llvm::cl::init(true)};
76+
PassOptions::Option<bool> kernelBarePtrCallConv{
77+
*this, "kernel-bare-ptr-calling-convention",
78+
llvm::cl::desc("Use bare pointer calling convention for device kernels"),
79+
llvm::cl::init(false)};
80+
PassOptions::Option<bool> hostBarePtrCallConv{
81+
*this, "host-bare-ptr-calling-convention",
82+
llvm::cl::desc("Use bare pointer calling convention for host launches"),
83+
llvm::cl::init(false)};
84+
PassOptions::Option<std::string> binaryFormat{
85+
*this, "binary-format",
86+
llvm::cl::desc("Final GPU binary emission format (e.g. fatbin)"),
87+
llvm::cl::init("fatbin")};
88+
// Options mirroring xevm-attach-target (GpuXeVMAttachTarget).
89+
PassOptions::Option<std::string> xevmModuleMatcher{
90+
*this, "xevm-module-matcher",
91+
llvm::cl::desc("Regex to match gpu.module names for XeVM target attach"),
92+
llvm::cl::init("")};
93+
PassOptions::Option<std::string> zebinTriple{
94+
*this, "zebin-triple", llvm::cl::desc("Target triple for XeVM codegen"),
95+
llvm::cl::init("spirv64-unknown-unknown")};
96+
PassOptions::Option<std::string> zebinChip{
97+
*this, "zebin-chip", llvm::cl::desc("Target chip (e.g. pvc, bmg)"),
98+
llvm::cl::init("bmg")};
99+
PassOptions::Option<unsigned> optLevel{
100+
*this, "opt-level",
101+
llvm::cl::desc("Optimization level for attached target/codegen"),
102+
llvm::cl::init(2)};
103+
PassOptions::Option<std::string> cmdOptions{
104+
*this, "igc-cmd-options",
105+
llvm::cl::desc("Additional downstream compiler command line options"),
106+
llvm::cl::init("")};
107+
};
108+
63109
//===----------------------------------------------------------------------===//
64110
// Building and Registering.
65111
//===----------------------------------------------------------------------===//
@@ -70,8 +116,15 @@ struct GPUToNVVMPipelineOptions
70116
void buildLowerToNVVMPassPipeline(OpPassManager &pm,
71117
const GPUToNVVMPipelineOptions &options);
72118

73-
/// Register all pipeleines for the `gpu` dialect.
119+
/// Adds the GPU to XeVM pipeline to the given pass manager. Transforms main
120+
/// dialects into XeVM targets. Begins with GPU code regions, then handles host
121+
/// code.
122+
void buildLowerToXeVMPassPipeline(OpPassManager &pm,
123+
const GPUToXeVMPipelineOptions &options);
124+
125+
/// Register all pipelines for the `gpu` dialect.
74126
void registerGPUToNVVMPipeline();
127+
void registerGPUToXeVMPipeline();
75128

76129
} // namespace gpu
77130
} // namespace mlir

mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
add_mlir_dialect_library(MLIRGPUPipelines
22
GPUToNVVMPipeline.cpp
3+
GPUToXeVMPipeline.cpp
34

45
ADDITIONAL_HEADER_DIRS
56
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/GPU
@@ -11,12 +12,17 @@ add_mlir_dialect_library(MLIRGPUPipelines
1112
MLIRTransforms
1213
MLIRLinalgTransforms
1314
MLIRAffineToStandard
15+
MLIRGPUToLLVMSPV
1416
MLIRGPUToNVVMTransforms
1517
MLIRIndexToLLVM
1618
MLIRMathToLLVM
19+
MLIRMathToXeVM
1720
MLIRNVGPUToNVVM
1821
MLIRNVVMToLLVM
1922
MLIRReconcileUnrealizedCasts
2023
MLIRSCFToControlFlow
2124
MLIRVectorToSCF
25+
MLIRXeGPUTransforms
26+
MLIRXeGPUToXeVM
27+
MLIRXeVMToLLVM
2228
)
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
//===- GPUToXeVMPipeline.cpp - Lowering pipeline to XeVM/LLVM -------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
//
9+
// This file implements a pass for testing the lowering to XeVM as a generally
10+
// usable sink pass. If XeGPU ops are used, it expects the MLIR code to have
11+
// XeGPU ops already embedded in gpu code.
12+
//
13+
//===----------------------------------------------------------------------===//
14+
15+
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
16+
#include "mlir/Conversion/MathToXeVM/MathToXeVM.h"
17+
#include "mlir/Conversion/Passes.h"
18+
#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h"
19+
#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
20+
#include "mlir/Conversion/XeGPUToXeVM/XeGPUToXeVM.h"
21+
#include "mlir/Conversion/XeVMToLLVM/XeVMToLLVM.h"
22+
#include "mlir/Dialect/Func/IR/FuncOps.h"
23+
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
24+
#include "mlir/Dialect/GPU/Pipelines/Passes.h"
25+
#include "mlir/Dialect/GPU/Transforms/Passes.h"
26+
#include "mlir/Dialect/LLVMIR/Transforms/RequestCWrappers.h"
27+
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
28+
#include "mlir/Dialect/XeGPU/Transforms/Passes.h"
29+
#include "mlir/Pass/PassManager.h"
30+
#include "mlir/Pass/PassOptions.h"
31+
#include "mlir/Target/LLVM/XeVM/Target.h"
32+
#include "mlir/Transforms/Passes.h"
33+
34+
using namespace mlir;
35+
36+
namespace {
37+
//===----------------------------------------------------------------------===//
38+
// Pre-GPU common pipeline for both Host and GPU.
39+
//===----------------------------------------------------------------------===//
40+
void buildPreGPUCommonPassPipeline(
41+
OpPassManager &pm, const mlir::gpu::GPUToXeVMPipelineOptions &options) {
42+
// builtin.module scope passes.
43+
pm.addPass(createCSEPass());
44+
pm.addPass(createConvertVectorToSCFPass());
45+
{
46+
GpuXeVMAttachTargetOptions xevmTargetOptions;
47+
xevmTargetOptions.moduleMatcher = options.xevmModuleMatcher;
48+
xevmTargetOptions.triple = options.zebinTriple;
49+
xevmTargetOptions.chip = options.zebinChip;
50+
xevmTargetOptions.optLevel = options.optLevel;
51+
xevmTargetOptions.cmdOptions = options.cmdOptions;
52+
pm.addPass(createGpuXeVMAttachTarget(xevmTargetOptions));
53+
}
54+
pm.addPass(createLowerAffinePass());
55+
pm.addNestedPass<func::FuncOp>(createGpuAsyncRegionPass());
56+
}
57+
58+
//===----------------------------------------------------------------------===//
59+
// GPUModule-specific stuff.
60+
//===----------------------------------------------------------------------===//
61+
void buildGPUPassPipeline(OpPassManager &pm,
62+
const mlir::gpu::GPUToXeVMPipelineOptions &options) {
63+
if (options.xegpuOpLevel == "workgroup") {
64+
pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUWgToSgDistribute());
65+
pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
66+
pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUBlocking());
67+
pm.addNestedPass<gpu::GPUModuleOp>(createCanonicalizerPass());
68+
pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
69+
}
70+
if (options.xegpuOpLevel == "subgroup" ||
71+
options.xegpuOpLevel == "workgroup") {
72+
pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUPropagateLayout());
73+
pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUSubgroupDistribute());
74+
pm.addNestedPass<gpu::GPUModuleOp>(createCanonicalizerPass());
75+
pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
76+
pm.addNestedPass<gpu::GPUModuleOp>(createLoopInvariantCodeMotionPass());
77+
pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
78+
pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUVectorLinearize());
79+
}
80+
pm.addNestedPass<gpu::GPUModuleOp>(createConvertMathToXeVM());
81+
pm.addNestedPass<gpu::GPUModuleOp>(createConvertXeGPUToXeVMPass());
82+
{
83+
ConvertGpuOpsToLLVMSPVOpsOptions gpuToLLVMSPVOptions;
84+
gpuToLLVMSPVOptions.use64bitIndex = options.use64bitIndex;
85+
pm.addNestedPass<gpu::GPUModuleOp>(
86+
createConvertGpuOpsToLLVMSPVOps(gpuToLLVMSPVOptions));
87+
}
88+
pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
89+
pm.addNestedPass<gpu::GPUModuleOp>(createReconcileUnrealizedCastsPass());
90+
}
91+
92+
//===----------------------------------------------------------------------===//
93+
// Post-GPU pipeline for both Host and GPU.
94+
//===----------------------------------------------------------------------===//
95+
void buildPostGPUCommonPassPipeline(
96+
OpPassManager &pm, const mlir::gpu::GPUToXeVMPipelineOptions &options) {
97+
// builtin.module scope passes.
98+
pm.addPass(createSCFToControlFlowPass());
99+
pm.addPass(memref::createExpandStridedMetadataPass());
100+
{
101+
GpuToLLVMConversionPassOptions gpuToLLVMOptions;
102+
gpuToLLVMOptions.hostBarePtrCallConv = options.hostBarePtrCallConv;
103+
gpuToLLVMOptions.kernelBarePtrCallConv = options.kernelBarePtrCallConv;
104+
pm.addPass(createGpuToLLVMConversionPass(gpuToLLVMOptions));
105+
}
106+
pm.addPass(createLowerAffinePass());
107+
pm.addPass(createConvertToLLVMPass());
108+
pm.addPass(createReconcileUnrealizedCastsPass());
109+
// gpu-module-to-binary
110+
{
111+
GpuModuleToBinaryPassOptions gpuToModuleBinOptions;
112+
gpuToModuleBinOptions.compilationTarget = options.binaryFormat;
113+
gpuToModuleBinOptions.cmdOptions = options.cmdOptions;
114+
pm.addPass(createGpuModuleToBinaryPass(gpuToModuleBinOptions));
115+
}
116+
}
117+
} // namespace
118+
119+
void mlir::gpu::buildLowerToXeVMPassPipeline(
120+
OpPassManager &pm, const GPUToXeVMPipelineOptions &options) {
121+
// Pre-GPU common pipelines.
122+
buildPreGPUCommonPassPipeline(pm, options);
123+
124+
// GPUModule-specific stuff.
125+
buildGPUPassPipeline(pm, options);
126+
127+
// Post-GPU pipeline for both Host and GPU.
128+
buildPostGPUCommonPassPipeline(pm, options);
129+
}
130+
131+
void mlir::gpu::registerGPUToXeVMPipeline() {
132+
PassPipelineRegistration<GPUToXeVMPipelineOptions>(
133+
"gpu-lower-to-xevm-pipeline",
134+
"The default GPU to XeVM lowering pipeline. It starts by lowering GPU "
135+
"code to the "
136+
"specified compilation target (default is fatbin) then lowers the host "
137+
"code.",
138+
buildLowerToXeVMPassPipeline);
139+
}

mlir/lib/RegisterAllPasses.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,4 +98,5 @@ void mlir::registerAllPasses() {
9898
sparse_tensor::registerSparseTensorPipelines();
9999
tosa::registerTosaToLinalgPipelines();
100100
gpu::registerGPUToNVVMPipeline();
101+
gpu::registerGPUToXeVMPipeline();
101102
}
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
if not config.run_xevm_tests:
2+
config.unsupported = True
3+
if not config.enable_levelzero_runner:
4+
config.unsupported = True
Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=lane" \
2+
// RUN: | mlir-runner \
3+
// RUN: --shared-libs=%mlir_levelzero_runtime \
4+
// RUN: --shared-libs=%mlir_runner_utils \
5+
// RUN: --entry-point-result=void \
6+
// RUN: | FileCheck %s
7+
8+
module @gemm attributes {gpu.container_module} {
9+
gpu.module @kernel {
10+
gpu.func @simple_gemm(%a: memref<256x256xf16>, %b: memref<256x256xf16>, %c: memref<256x256xf32>) kernel {
11+
%c0 = arith.constant 0 : index
12+
%c1 = arith.constant 1 : index
13+
%c8 = arith.constant 8 : index
14+
%c16 = arith.constant 16 : index
15+
%c32 = arith.constant 32 : index
16+
%c256 = arith.constant 256 : index
17+
%block_x = gpu.block_id x
18+
%block_y = gpu.block_id y
19+
%x_block_offset = arith.muli %block_x, %c8 : index
20+
%y_block_offset = arith.muli %block_y, %c16 : index
21+
22+
%c_tdesc = xegpu.create_nd_tdesc %c : memref<256x256xf32> -> !xegpu.tensor_desc<8x16xf32>
23+
%c_init_value = xegpu.load_nd %c_tdesc[%x_block_offset, %y_block_offset] : !xegpu.tensor_desc<8x16xf32> -> vector<8xf32>
24+
%a_tdesc = xegpu.create_nd_tdesc %a : memref<256x256xf16> -> !xegpu.tensor_desc<8x16xf16>
25+
%b_tdesc = xegpu.create_nd_tdesc %b : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16>
26+
27+
%r = scf.for %k = %c0 to %c256 step %c16 iter_args(%arg_c = %c_init_value) -> (vector<8xf32>) {
28+
%a_val = xegpu.load_nd %a_tdesc[%x_block_offset, %k] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16>
29+
%b_val = xegpu.load_nd %b_tdesc[%k, %y_block_offset] : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16>
30+
%dpas = xegpu.dpas %a_val, %b_val, %arg_c : vector<8xf16>, vector<16xf16>, vector<8xf32> -> vector<8xf32>
31+
scf.yield %dpas : vector<8xf32>
32+
}
33+
xegpu.store_nd %r, %c_tdesc[%x_block_offset, %y_block_offset] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<8xf32>, !xegpu.tensor_desc<8x16xf32>
34+
gpu.return
35+
}
36+
}
37+
38+
func.func @test(%a : memref<256x256xf16>, %b : memref<256x256xf16>, %c : memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
39+
%c1 = arith.constant 1 : index
40+
%c16 = arith.constant 16 : index
41+
%c32 = arith.constant 32 : index
42+
%memref_a = gpu.alloc () : memref<256x256xf16>
43+
gpu.memcpy %memref_a, %a : memref<256x256xf16>, memref<256x256xf16>
44+
%memref_b = gpu.alloc () : memref<256x256xf16>
45+
gpu.memcpy %memref_b, %b : memref<256x256xf16>, memref<256x256xf16>
46+
%memref_c = gpu.alloc () : memref<256x256xf32>
47+
gpu.memcpy %memref_c, %c : memref<256x256xf32>, memref<256x256xf32>
48+
gpu.launch_func @kernel::@simple_gemm blocks in (%c32, %c16, %c1) threads in (%c16, %c1, %c1) args(%memref_a : memref<256x256xf16>, %memref_b : memref<256x256xf16>, %memref_c : memref<256x256xf32>)
49+
gpu.wait // Wait for the kernel to finish.
50+
gpu.memcpy %c, %memref_c : memref<256x256xf32>, memref<256x256xf32>
51+
gpu.dealloc %memref_a : memref<256x256xf16>
52+
gpu.dealloc %memref_b : memref<256x256xf16>
53+
gpu.dealloc %memref_c : memref<256x256xf32>
54+
return %c : memref<256x256xf32>
55+
}
56+
57+
func.func @main() attributes {llvm.emit_c_interface} {
58+
%c0 = arith.constant 0 : index
59+
%c1 = arith.constant 1 : index
60+
%c1_f16 = arith.constant 1.0 : f16
61+
%c2_f16 = arith.constant 2.0 : f16
62+
%c256 = arith.constant 256 : index
63+
%cf_0 = arith.constant 0.0 : f16
64+
%cf_1 = arith.constant 1.0 : f16
65+
%A = memref.alloc() : memref<256x256xf16>
66+
%B = memref.alloc() : memref<256x256xf16>
67+
%C = memref.alloc() : memref<256x256xf32>
68+
%C_ref = memref.alloc() : memref<256x256xf32>
69+
%c_gen_int = arith.constant 0 : i1
70+
%cf_lower = arith.constant -0.5 : f32
71+
%cf_upper = arith.constant 0.5 : f32
72+
73+
// Initialize matrix A ; A[i, j] = j
74+
scf.for %i = %c0 to %c256 step %c1 {
75+
scf.for %j = %c0 to %c256 step %c1 {
76+
%t = index.castu %j : index to i16
77+
%val = arith.uitofp %t : i16 to f16
78+
memref.store %val, %A[%i, %j] : memref<256x256xf16>
79+
}
80+
}
81+
82+
// Initialize the B matrix.
83+
// Make matrix B an identity matrix.
84+
scf.for %i = %c0 to %c256 step %c1 {
85+
scf.for %j = %c0 to %c256 step %c1 {
86+
%i_i32 = index.castu %i : index to i32
87+
%j_i32 = index.castu %j : index to i32
88+
%i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
89+
90+
scf.if %i_j_same {
91+
memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
92+
} else {
93+
memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
94+
}
95+
}
96+
}
97+
98+
// Initialize matrix C and C_ref ; C[i, j] = 0
99+
%c0_f32 = arith.constant 0.0 : f32
100+
scf.for %i = %c0 to %c256 step %c1 {
101+
scf.for %j = %c0 to %c256 step %c1 {
102+
memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
103+
memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
104+
}
105+
}
106+
107+
// Run GPU version.
108+
%2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
109+
%gpu_result_cast = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
110+
111+
// CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
112+
// CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
113+
call @printMemrefF32(%gpu_result_cast) : (memref<*xf32>) -> ()
114+
memref.dealloc %A : memref<256x256xf16>
115+
memref.dealloc %B : memref<256x256xf16>
116+
memref.dealloc %C : memref<256x256xf32>
117+
memref.dealloc %C_ref : memref<256x256xf32>
118+
return
119+
}
120+
func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
121+
}
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
if not config.run_xevm_tests:
2+
config.unsupported = True
3+
if not config.enable_levelzero_runner:
4+
config.unsupported = True

0 commit comments

Comments
 (0)