forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
executor_utils.h
55 lines (42 loc) · 1.24 KB
/
executor_utils.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#pragma once
#include <ATen/core/ivalue.h>
#include <c10/core/DeviceType.h>
#include <c10/util/Exception.h>
#include <cuda.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/codegen/cuda/expr_evaluator.h>
#include <torch/csrc/jit/codegen/cuda/fusion.h>
#include <torch/csrc/jit/codegen/cuda/ir_all_nodes.h>
#include <torch/csrc/jit/codegen/cuda/lower2device.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace executor_utils {
// Include all the functions we might need in generated code
std::string kernelPreamble();
void validateKernelInputs(
Fusion* fusion,
const at::ArrayRef<IValue>& inputs,
const c10::Device& device);
void validateKernelOutputs(
Fusion* fusion,
const std::vector<at::Tensor>& outputs,
const c10::Device& device);
StatefulExpressionEvaluator statefulBindInputs(
const at::ArrayRef<IValue>& aten_inputs,
Fusion* fusion,
GpuLower* lower = nullptr);
struct NvrtcFunction {
CUmodule module = CUmodule();
CUfunction function = CUfunction();
};
NvrtcFunction nvrtcCompile(
const std::string& code,
const std::string& func_name,
int id);
} // namespace executor_utils
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch