Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit a2cb6ba

Browse files
committed
fix: log_disable
1 parent 30abf3a commit a2cb6ba

File tree

1 file changed

+8
-7
lines changed

1 file changed

+8
-7
lines changed

controllers/llamaCPP.cc

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -145,12 +145,12 @@ std::string create_return_json(const std::string& id, const std::string& model,
145145

146146
llamaCPP::llamaCPP()
147147
: queue(new trantor::ConcurrentTaskQueue(llama.params.n_parallel,
148-
"llamaCPP")){
149-
// Some default values for now below
150-
// log_disable(); // Disable the log to file feature, reduce bloat for
151-
// target
152-
// system ()
153-
};
148+
"llamaCPP")) {
149+
// Some default values for now below
150+
log_disable(); // Disable the log to file feature, reduce bloat for
151+
// target
152+
// system ()
153+
};
154154

155155
llamaCPP::~llamaCPP() {
156156
StopBackgroundTask();
@@ -163,7 +163,8 @@ void llamaCPP::WarmupModel(bool is_embedded_model) {
163163
pseudo["prompt"] = "Hello";
164164
pseudo["n_predict"] = 2;
165165
pseudo["stream"] = false;
166-
const int task_id = llama.request_completion(pseudo, false, is_embedded_model, -1);
166+
const int task_id =
167+
llama.request_completion(pseudo, false, is_embedded_model, -1);
167168
std::string completion_text;
168169
task_result result = llama.next_result(task_id);
169170
if (!result.error && result.stop) {

0 commit comments

Comments
 (0)