diff --git a/controllers/llamaCPP.cc b/controllers/llamaCPP.cc index 39696c549..fc2705cdc 100644 --- a/controllers/llamaCPP.cc +++ b/controllers/llamaCPP.cc @@ -452,10 +452,11 @@ bool llamaCPP::loadModelImpl(const Json::Value &jsonBody) { this->pre_prompt = jsonBody.get("pre_prompt", "").asString(); this->repeat_last_n = jsonBody.get("repeat_last_n", 32).asInt(); - // Set folder for llama log - std::string llama_log_folder = - jsonBody.get("llama_log_folder", "log/").asString(); - log_set_target(llama_log_folder + "llama.log"); + if (!jsonBody["llama_log_folder"].isNull()) { + log_enable(); + std::string llama_log_folder = jsonBody["llama_log_folder"].asString(); + log_set_target(llama_log_folder + "llama.log"); + } // Set folder for llama log } #ifdef GGML_USE_CUBLAS LOG_INFO << "Setting up GGML CUBLAS PARAMS"; diff --git a/controllers/llamaCPP.h b/controllers/llamaCPP.h index 5dc693de5..e7e084e7c 100644 --- a/controllers/llamaCPP.h +++ b/controllers/llamaCPP.h @@ -5,6 +5,7 @@ #endif #pragma once +#define LOG_TARGET stdout #include "log.h" #include "utils/nitro_utils.h" @@ -2486,7 +2487,7 @@ class llamaCPP : public drogon::HttpController { public: llamaCPP() { // Some default values for now below - log_enable(); // Disable the log to file feature, reduce bloat for + log_disable(); // Disable the log to file feature, reduce bloat for // target // system () std::vector llama_models =