Skip to content

Commit

Permalink
[refactor] [llvm] Rename CodeGenCPU/CUDA/WASM and CodeGenLLVMCPU/CUDA…
Browse files Browse the repository at this point in the history
…/WASM (#5500)

* [refactor] [llvm] Rename CodeGenCPU/CUDA/WASM and CodeGenLLVMCPU/CUDA/WASM

* module->task

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
lin-hitonami and pre-commit-ci[bot] committed Jul 25, 2022
1 parent c1f4519 commit 307e0b1
Show file tree
Hide file tree
Showing 14 changed files with 246 additions and 232 deletions.
6 changes: 3 additions & 3 deletions taichi/codegen/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ std::unique_ptr<KernelCodeGen> KernelCodeGen::create(Arch arch,
Stmt *stmt) {
#ifdef TI_WITH_LLVM
if (arch_is_cpu(arch) && arch != Arch::wasm) {
return std::make_unique<CodeGenCPU>(kernel, stmt);
return std::make_unique<KernelCodeGenCPU>(kernel, stmt);
} else if (arch == Arch::wasm) {
return std::make_unique<CodeGenWASM>(kernel, stmt);
return std::make_unique<KernelCodeGenWASM>(kernel, stmt);
} else if (arch == Arch::cuda) {
#if defined(TI_WITH_CUDA)
return std::make_unique<CodeGenCUDA>(kernel, stmt);
return std::make_unique<KernelCodeGenCUDA>(kernel, stmt);
#else
TI_NOT_IMPLEMENTED
#endif
Expand Down
26 changes: 14 additions & 12 deletions taichi/codegen/cpu/codegen_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ TLANG_NAMESPACE_BEGIN

namespace {

class CodeGenLLVMCPU : public CodeGenLLVM {
class TaskCodeGenCPU : public TaskCodeGenLLVM {
public:
using IRVisitor::visit;

CodeGenLLVMCPU(Kernel *kernel, IRNode *ir)
: CodeGenLLVM(kernel, ir, nullptr) {
TaskCodeGenCPU(Kernel *kernel, IRNode *ir)
: TaskCodeGenLLVM(kernel, ir, nullptr) {
TI_AUTO_PROF
}

Expand Down Expand Up @@ -212,9 +212,9 @@ class CodeGenLLVMCPU : public CodeGenLLVM {

void visit(ExternalFuncCallStmt *stmt) override {
if (stmt->type == ExternalFuncCallStmt::BITCODE) {
CodeGenLLVM::visit_call_bitcode(stmt);
TaskCodeGenLLVM::visit_call_bitcode(stmt);
} else if (stmt->type == ExternalFuncCallStmt::SHARED_OBJECT) {
CodeGenLLVM::visit_call_shared_object(stmt);
TaskCodeGenLLVM::visit_call_shared_object(stmt);
} else {
TI_NOT_IMPLEMENTED
}
Expand All @@ -225,9 +225,10 @@ class CodeGenLLVMCPU : public CodeGenLLVM {

#ifdef TI_WITH_LLVM
// static
std::unique_ptr<CodeGenLLVM> CodeGenCPU::make_codegen_llvm(Kernel *kernel,
IRNode *ir) {
return std::make_unique<CodeGenLLVMCPU>(kernel, ir);
std::unique_ptr<TaskCodeGenLLVM> KernelCodeGenCPU::make_codegen_llvm(
Kernel *kernel,
IRNode *ir) {
return std::make_unique<TaskCodeGenCPU>(kernel, ir);
}

FunctionType CPUModuleToFunctionConverter::convert(
Expand Down Expand Up @@ -274,14 +275,15 @@ FunctionType CPUModuleToFunctionConverter::convert(
};
}

LLVMCompiledData CodeGenCPU::modulegen(std::unique_ptr<llvm::Module> &&module,
OffloadedStmt *stmt) {
CodeGenLLVMCPU gen(kernel, stmt);
LLVMCompiledData KernelCodeGenCPU::modulegen(
std::unique_ptr<llvm::Module> &&module,
OffloadedStmt *stmt) {
TaskCodeGenCPU gen(kernel, stmt);
return gen.run_compilation();
}
#endif // TI_WITH_LLVM

FunctionType CodeGenCPU::codegen() {
FunctionType KernelCodeGenCPU::codegen() {
TI_AUTO_PROF;
// TODO(PGZXB): move the offline cache part to the base class
auto *llvm_prog = get_llvm_program(prog);
Expand Down
9 changes: 5 additions & 4 deletions taichi/codegen/cpu/codegen_cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,16 @@

TLANG_NAMESPACE_BEGIN

class CodeGenCPU : public KernelCodeGen {
class KernelCodeGenCPU : public KernelCodeGen {
public:
CodeGenCPU(Kernel *kernel, IRNode *ir = nullptr) : KernelCodeGen(kernel, ir) {
KernelCodeGenCPU(Kernel *kernel, IRNode *ir = nullptr)
: KernelCodeGen(kernel, ir) {
}

// TODO: Stop defining this macro guards in the headers
#ifdef TI_WITH_LLVM
static std::unique_ptr<CodeGenLLVM> make_codegen_llvm(Kernel *kernel,
IRNode *ir);
static std::unique_ptr<TaskCodeGenLLVM> make_codegen_llvm(Kernel *kernel,
IRNode *ir);

bool supports_offline_cache() const override {
return true;
Expand Down
25 changes: 13 additions & 12 deletions taichi/codegen/cuda/codegen_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ using namespace llvm;
// NVVM IR Spec:
// https://docs.nvidia.com/cuda/archive/10.0/pdf/NVVM_IR_Specification.pdf

class CodeGenLLVMCUDA : public CodeGenLLVM {
class TaskCodeGenCUDA : public TaskCodeGenLLVM {
public:
using IRVisitor::visit;

CodeGenLLVMCUDA(Kernel *kernel, IRNode *ir = nullptr)
: CodeGenLLVM(kernel, ir) {
TaskCodeGenCUDA(Kernel *kernel, IRNode *ir = nullptr)
: TaskCodeGenLLVM(kernel, ir) {
}

llvm::Value *create_print(std::string tag,
Expand Down Expand Up @@ -627,7 +627,7 @@ class CodeGenLLVMCUDA : public CodeGenLLVM {

void visit(ExternalFuncCallStmt *stmt) override {
if (stmt->type == ExternalFuncCallStmt::BITCODE) {
CodeGenLLVM::visit_call_bitcode(stmt);
TaskCodeGenLLVM::visit_call_bitcode(stmt);
} else {
TI_NOT_IMPLEMENTED
}
Expand All @@ -644,7 +644,7 @@ class CodeGenLLVMCUDA : public CodeGenLLVM {
void visit(BinaryOpStmt *stmt) override {
auto op = stmt->op_type;
if (op != BinaryOpType::atan2 && op != BinaryOpType::pow) {
return CodeGenLLVM::visit(stmt);
return TaskCodeGenLLVM::visit(stmt);
}

auto ret_type = stmt->ret_type;
Expand Down Expand Up @@ -699,9 +699,10 @@ class CodeGenLLVMCUDA : public CodeGenLLVM {

#ifdef TI_WITH_LLVM
// static
std::unique_ptr<CodeGenLLVM> CodeGenCUDA::make_codegen_llvm(Kernel *kernel,
IRNode *ir) {
return std::make_unique<CodeGenLLVMCUDA>(kernel, ir);
std::unique_ptr<TaskCodeGenLLVM> KernelCodeGenCUDA::make_codegen_llvm(
Kernel *kernel,
IRNode *ir) {
return std::make_unique<TaskCodeGenCUDA>(kernel, ir);
}
#endif // TI_WITH_LLVM

Expand All @@ -724,7 +725,7 @@ static void set_arg_external_array(RuntimeContext *ctx,
: RuntimeContext::DevAllocType::kNone);
}

FunctionType CodeGenCUDA::codegen() {
FunctionType KernelCodeGenCUDA::codegen() {
TI_AUTO_PROF
// TODO: move the offline cache part to the base class
auto *llvm_prog = get_llvm_program(prog);
Expand All @@ -748,7 +749,7 @@ FunctionType CodeGenCUDA::codegen() {
kernel->lower(/*to_executable=*/false);
}

CodeGenLLVMCUDA gen(kernel, ir);
TaskCodeGenCUDA gen(kernel, ir);
auto compiled_res = gen.run_compilation();

CUDAModuleToFunctionConverter converter{gen.tlctx,
Expand Down Expand Up @@ -826,8 +827,8 @@ FunctionType CUDAModuleToFunctionConverter::convert(

} else if (arr_sz > 0) {
// arg_buffers[i] is a DeviceAllocation*
// TODO: Unwraps DeviceAllocation* can be done at CodeGenLLVM since
// it's shared by cpu and cuda.
// TODO: Unwraps DeviceAllocation* can be done at TaskCodeGenLLVM
// since it's shared by cpu and cuda.
DeviceAllocation *ptr =
static_cast<DeviceAllocation *>(arg_buffers[i]);
device_buffers[i] = executor->get_ndarray_alloc_info_ptr(*ptr);
Expand Down
8 changes: 4 additions & 4 deletions taichi/codegen/cuda/codegen_cuda.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,16 @@

TLANG_NAMESPACE_BEGIN

class CodeGenCUDA : public KernelCodeGen {
class KernelCodeGenCUDA : public KernelCodeGen {
public:
CodeGenCUDA(Kernel *kernel, IRNode *ir = nullptr)
KernelCodeGenCUDA(Kernel *kernel, IRNode *ir = nullptr)
: KernelCodeGen(kernel, ir) {
}

// TODO: Stop defining this macro guards in the headers
#ifdef TI_WITH_LLVM
static std::unique_ptr<CodeGenLLVM> make_codegen_llvm(Kernel *kernel,
IRNode *ir);
static std::unique_ptr<TaskCodeGenLLVM> make_codegen_llvm(Kernel *kernel,
IRNode *ir);
#endif // TI_WITH_LLVM

bool supports_offline_cache() const override {
Expand Down
Loading

0 comments on commit 307e0b1

Please sign in to comment.