Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[refactor] Fix "const CompileConfig *" to "const CompileConfig &" (Part2) #7253

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions c_api/src/taichi_llvm_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,44 +54,44 @@ taichi::lang::Device &LlvmRuntime::get() {

TiMemory LlvmRuntime::allocate_memory(
const taichi::lang::Device::AllocParams &params) {
taichi::lang::CompileConfig *config = executor_->get_config();
const taichi::lang::CompileConfig &config = executor_->get_config();
taichi::lang::TaichiLLVMContext *tlctx =
executor_->get_llvm_context(config->arch);
executor_->get_llvm_context(config.arch);
taichi::lang::LLVMRuntime *llvm_runtime = executor_->get_llvm_runtime();
taichi::lang::LlvmDevice *llvm_device = executor_->llvm_device();

taichi::lang::DeviceAllocation devalloc =
llvm_device->allocate_memory_runtime(
{params, config->ndarray_use_cached_allocator,
{params, config.ndarray_use_cached_allocator,
tlctx->runtime_jit_module, llvm_runtime, result_buffer});
return devalloc2devmem(*this, devalloc);
}

void LlvmRuntime::free_memory(TiMemory devmem) {
taichi::lang::CompileConfig *config = executor_->get_config();
const taichi::lang::CompileConfig &config = executor_->get_config();
// For memory allocated through Device::allocate_memory_runtime(),
// the corresponding Device::free_memory() interface has not been
// implemented yet...
if (taichi::arch_is_cpu(config->arch)) {
TI_CAPI_NOT_SUPPORTED_IF(taichi::arch_is_cpu(config->arch));
if (taichi::arch_is_cpu(config.arch)) {
TI_CAPI_NOT_SUPPORTED_IF(taichi::arch_is_cpu(config.arch));
}

Runtime::free_memory(devmem);
}

TiAotModule LlvmRuntime::load_aot_module(const char *module_path) {
auto *config = executor_->get_config();
const auto &config = executor_->get_config();
std::unique_ptr<taichi::lang::aot::Module> aot_module{nullptr};

if (taichi::arch_is_cpu(config->arch)) {
if (taichi::arch_is_cpu(config.arch)) {
taichi::lang::cpu::AotModuleParams aot_params;
aot_params.executor_ = executor_.get();
aot_params.module_path = module_path;
aot_module = taichi::lang::cpu::make_aot_module(aot_params);

} else {
#ifdef TI_WITH_CUDA
TI_ASSERT(config->arch == taichi::Arch::cuda);
TI_ASSERT(config.arch == taichi::Arch::cuda);
taichi::lang::cuda::AotModuleParams aot_params;
aot_params.executor_ = executor_.get();
aot_params.module_path = module_path;
Expand Down
2 changes: 1 addition & 1 deletion taichi/codegen/amdgpu/codegen_amdgpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ FunctionType AMDGPUModuleToFunctionConverter::convert(
auto &tasks = data.tasks;
auto jit = tlctx_->jit.get();
auto amdgpu_module =
jit->add_module(std::move(mod), executor_->get_config()->gpu_max_reg);
jit->add_module(std::move(mod), executor_->get_config().gpu_max_reg);

return [amdgpu_module, kernel_name, args, offloaded_tasks = tasks,
executor = this->executor_](RuntimeContext &context) {
Expand Down
4 changes: 2 additions & 2 deletions taichi/codegen/cuda/codegen_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,7 @@ FunctionType CUDAModuleToFunctionConverter::convert(
#ifdef TI_WITH_CUDA
auto jit = tlctx_->jit.get();
auto cuda_module =
jit->add_module(std::move(mod), executor_->get_config()->gpu_max_reg);
jit->add_module(std::move(mod), executor_->get_config().gpu_max_reg);

return [cuda_module, kernel_name, args, offloaded_tasks = tasks,
executor = this->executor_](RuntimeContext &context) {
Expand Down Expand Up @@ -688,7 +688,7 @@ FunctionType CUDAModuleToFunctionConverter::convert(
CUDADriver::get_instance().stream_synchronize(nullptr);
}
CUDADriver::get_instance().context_set_limit(
CU_LIMIT_STACK_SIZE, executor->get_config()->cuda_stack_limit);
CU_LIMIT_STACK_SIZE, executor->get_config().cuda_stack_limit);

for (auto task : offloaded_tasks) {
TI_TRACE("Launching kernel {}<<<{}, {}>>>", task.name, task.grid_dim,
Expand Down
8 changes: 4 additions & 4 deletions taichi/jit/jit_session.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,21 @@ namespace taichi::lang {
#ifdef TI_WITH_LLVM
std::unique_ptr<JITSession> create_llvm_jit_session_cpu(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch);

std::unique_ptr<JITSession> create_llvm_jit_session_cuda(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch);
#endif

JITSession::JITSession(TaichiLLVMContext *tlctx, CompileConfig *config)
JITSession::JITSession(TaichiLLVMContext *tlctx, const CompileConfig &config)
: tlctx_(tlctx), config_(config) {
}

std::unique_ptr<JITSession> JITSession::create(TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch) {
#ifdef TI_WITH_LLVM
if (arch_is_cpu(arch)) {
Expand Down
6 changes: 3 additions & 3 deletions taichi/jit/jit_session.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ struct CompileConfig;
class JITSession {
protected:
TaichiLLVMContext *tlctx_;
CompileConfig *config_;
const CompileConfig &config_;

std::vector<std::unique_ptr<JITModule>> modules;

public:
JITSession(TaichiLLVMContext *tlctx, CompileConfig *config);
JITSession(TaichiLLVMContext *tlctx, const CompileConfig &config);

virtual JITModule *add_module(std::unique_ptr<llvm::Module> M,
int max_reg = 0) = 0;
Expand All @@ -36,7 +36,7 @@ class JITSession {
virtual llvm::DataLayout get_data_layout() = 0;

static std::unique_ptr<JITSession> create(TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch);

virtual void global_optimize_module(llvm::Module *module) {
Expand Down
8 changes: 4 additions & 4 deletions taichi/runtime/amdgpu/jit_amdgpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ std::string JITSessionAMDGPU::compile_module_to_hsaco(
}
using namespace llvm;

if (this->config_->print_kernel_llvm_ir) {
if (this->config_.print_kernel_llvm_ir) {
static FileSequenceWriter writer("taichi_kernel_amdgpu_llvm_ir_{:04d}.ll",
"unoptimized LLVM IR (AMDGPU)");
writer.write(llvm_module.get());
Expand Down Expand Up @@ -93,7 +93,7 @@ std::string JITSessionAMDGPU::compile_module_to_hsaco(

std::string hsaco_str = load_hsaco(hsaco_path);

if (this->config_->print_kernel_llvm_ir_optimized) {
if (this->config_.print_kernel_llvm_ir_optimized) {
static FileSequenceWriter writer(
"taichi_kernel_amdgpu_llvm_ir_optimized_{:04d}.ll",
"unoptimized LLVM IR (AMDGPU)");
Expand All @@ -104,7 +104,7 @@ std::string JITSessionAMDGPU::compile_module_to_hsaco(

std::unique_ptr<JITSession> create_llvm_jit_session_amdgpu(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch) {
TI_ASSERT(arch == Arch::amdgpu);
auto data_layout = llvm::DataLayout(
Expand All @@ -117,7 +117,7 @@ std::unique_ptr<JITSession> create_llvm_jit_session_amdgpu(
#else
std::unique_ptr<JITSession> create_llvm_jit_session_amdgpu(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch) {
TI_NOT_IMPLEMENTED
}
Expand Down
4 changes: 2 additions & 2 deletions taichi/runtime/amdgpu/jit_amdgpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class JITSessionAMDGPU : public JITSession {
llvm::DataLayout data_layout;

JITSessionAMDGPU(TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
llvm::DataLayout data_layout)
: JITSession(tlctx, config), data_layout(data_layout) {
random_num_ = get_random_num();
Expand Down Expand Up @@ -145,7 +145,7 @@ class JITSessionAMDGPU : public JITSession {

std::unique_ptr<JITSession> create_llvm_jit_session_amdgpu(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch);

} // namespace lang
Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/cpu/aot_module_loader_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class AotModuleImpl : public LlvmAotModule {
FunctionType convert_module_to_function(
const std::string &name,
LlvmOfflineCache::KernelCacheData &&loaded) override {
Arch arch = executor_->get_config()->arch;
Arch arch = executor_->get_config().arch;
TI_ASSERT(arch == Arch::x64 || arch == Arch::arm64);
auto *tlctx = executor_->get_llvm_context(arch);

Expand Down
8 changes: 4 additions & 4 deletions taichi/runtime/cpu/jit_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class JITSessionCPU : public JITSession {
public:
JITSessionCPU(TaichiLLVMContext *tlctx,
std::unique_ptr<ExecutorProcessControl> EPC,
CompileConfig *config,
const CompileConfig &config,
JITTargetMachineBuilder JTMB,
DataLayout DL)
: JITSession(tlctx, config),
Expand Down Expand Up @@ -224,7 +224,7 @@ void JITSessionCPU::global_optimize_module_cpu(llvm::Module *module) {
TI_ERROR_UNLESS(target, err_str);

TargetOptions options;
if (this->config_->fast_math) {
if (this->config_.fast_math) {
options.AllowFPOpFusion = FPOpFusion::Fast;
options.UnsafeFPMath = 1;
options.NoInfsFPMath = 1;
Expand Down Expand Up @@ -294,7 +294,7 @@ void JITSessionCPU::global_optimize_module_cpu(llvm::Module *module) {
module_pass_manager.run(*module);
}

if (this->config_->print_kernel_llvm_ir_optimized) {
if (this->config_.print_kernel_llvm_ir_optimized) {
if (false) {
TI_INFO("Functions with > 100 instructions in optimized LLVM IR:");
TaichiLLVMContext::print_huge_functions(module);
Expand All @@ -308,7 +308,7 @@ void JITSessionCPU::global_optimize_module_cpu(llvm::Module *module) {

std::unique_ptr<JITSession> create_llvm_jit_session_cpu(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch) {
TI_ASSERT(arch_is_cpu(arch));
auto target_info = get_host_target_info();
Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/cuda/aot_module_loader_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class AotModuleImpl : public LlvmAotModule {
FunctionType convert_module_to_function(
const std::string &name,
LlvmOfflineCache::KernelCacheData &&loaded) override {
Arch arch = executor_->get_config()->arch;
Arch arch = executor_->get_config().arch;
TI_ASSERT(arch == Arch::cuda);
auto *tlctx = executor_->get_llvm_context(arch);

Expand Down
12 changes: 6 additions & 6 deletions taichi/runtime/cuda/jit_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ namespace taichi::lang {
JITModule *JITSessionCUDA ::add_module(std::unique_ptr<llvm::Module> M,
int max_reg) {
auto ptx = compile_module_to_ptx(M);
if (this->config_->print_kernel_nvptx) {
if (this->config_.print_kernel_nvptx) {
static FileSequenceWriter writer("taichi_kernel_nvptx_{:04d}.ptx",
"module NVPTX");
writer.write(ptx);
Expand Down Expand Up @@ -82,7 +82,7 @@ std::string JITSessionCUDA::compile_module_to_ptx(

using namespace llvm;

if (this->config_->print_kernel_llvm_ir) {
if (this->config_.print_kernel_llvm_ir) {
static FileSequenceWriter writer("taichi_kernel_cuda_llvm_ir_{:04d}.ll",
"unoptimized LLVM IR (CUDA)");
writer.write(module.get());
Expand All @@ -103,7 +103,7 @@ std::string JITSessionCUDA::compile_module_to_ptx(
TI_ERROR_UNLESS(target, err_str);

TargetOptions options;
if (this->config_->fast_math) {
if (this->config_.fast_math) {
options.AllowFPOpFusion = FPOpFusion::Fast;
// See NVPTXISelLowering.cpp
// Setting UnsafeFPMath true will result in approximations such as
Expand Down Expand Up @@ -223,7 +223,7 @@ std::string JITSessionCUDA::compile_module_to_ptx(
module_pass_manager.run(*module);
}

if (this->config_->print_kernel_llvm_ir_optimized) {
if (this->config_.print_kernel_llvm_ir_optimized) {
static FileSequenceWriter writer(
"taichi_kernel_cuda_llvm_ir_optimized_{:04d}.ll",
"optimized LLVM IR (CUDA)");
Expand All @@ -239,7 +239,7 @@ std::string JITSessionCUDA::compile_module_to_ptx(

std::unique_ptr<JITSession> create_llvm_jit_session_cuda(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch) {
TI_ASSERT(arch == Arch::cuda);
// https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#data-layout
Expand All @@ -251,7 +251,7 @@ std::unique_ptr<JITSession> create_llvm_jit_session_cuda(
#else
std::unique_ptr<JITSession> create_llvm_jit_session_cuda(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch) {
TI_NOT_IMPLEMENTED
}
Expand Down
4 changes: 2 additions & 2 deletions taichi/runtime/cuda/jit_cuda.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class JITSessionCUDA : public JITSession {
llvm::DataLayout data_layout;

JITSessionCUDA(TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
llvm::DataLayout data_layout)
: JITSession(tlctx, config), data_layout(data_layout) {
}
Expand All @@ -106,7 +106,7 @@ class JITSessionCUDA : public JITSession {

std::unique_ptr<JITSession> create_llvm_jit_session_cuda(
TaichiLLVMContext *tlctx,
CompileConfig *config,
const CompileConfig &config,
Arch arch);

} // namespace taichi::lang
2 changes: 1 addition & 1 deletion taichi/runtime/llvm/llvm_aot_module_loader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ namespace taichi::lang {
LlvmOfflineCache::KernelCacheData LlvmAotModule::load_kernel_from_cache(
const std::string &name) {
TI_ASSERT(cache_reader_ != nullptr);
auto *tlctx = executor_->get_llvm_context(executor_->get_config()->arch);
auto *tlctx = executor_->get_llvm_context(executor_->get_config().arch);
LlvmOfflineCache::KernelCacheData loaded;
auto ok = cache_reader_->get_kernel_cache(loaded, name,
*tlctx->get_this_thread_context());
Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/llvm/llvm_aot_module_loader.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class LlvmAotModule : public aot::Module {
}

Arch arch() const override {
return executor_->get_config()->arch;
return executor_->get_config().arch;
}

uint64_t version() const override {
Expand Down
4 changes: 2 additions & 2 deletions taichi/runtime/llvm/llvm_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ namespace taichi::lang {

using namespace llvm;

TaichiLLVMContext::TaichiLLVMContext(CompileConfig *config, Arch arch)
TaichiLLVMContext::TaichiLLVMContext(const CompileConfig &config, Arch arch)
: config_(config), arch_(arch) {
TI_TRACE("Creating Taichi llvm context for arch: {}", arch_name(arch));
main_thread_id_ = std::this_thread::get_id();
Expand Down Expand Up @@ -153,7 +153,7 @@ llvm::Type *TaichiLLVMContext::get_data_type(DataType dt) {
auto num_elements = tensor_type->get_num_elements();
// Return type is <element_type * num_elements> if real matrix is used,
// otherwise [element_type * num_elements].
if (codegen_vector_type(*config_)) {
if (codegen_vector_type(config_)) {
return llvm::VectorType::get(element_type, num_elements,
/*scalable=*/false);
}
Expand Down
4 changes: 2 additions & 2 deletions taichi/runtime/llvm/llvm_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class TaichiLLVMContext {
explicit ThreadLocalData(std::unique_ptr<llvm::orc::ThreadSafeContext> ctx);
~ThreadLocalData();
};
CompileConfig *config_;
const CompileConfig &config_;

public:
std::unique_ptr<JITSession> jit{nullptr};
Expand All @@ -43,7 +43,7 @@ class TaichiLLVMContext {

std::unique_ptr<ThreadLocalData> linking_context_data{nullptr};

TaichiLLVMContext(CompileConfig *config, Arch arch);
TaichiLLVMContext(const CompileConfig &config, Arch arch);

virtual ~TaichiLLVMContext();

Expand Down
Loading