Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 15 additions & 13 deletions controllers/llamaCPP.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,18 +96,19 @@ std::string create_return_json(const std::string &id, const std::string &model,
}

void llamaCPP::warmupModel() {
// json pseudo;
//
// pseudo["prompt"] = "Hello";
// pseudo["n_predict"] = 10;
// const int task_id = llama.request_completion(pseudo, false);
// std::string completion_text;
// task_result result = llama.next_result(task_id);
// if (!result.error && result.stop) {
// LOG_INFO << result.result_json.dump(-1, ' ', false,
// json::error_handler_t::replace);
// }
// return;
json pseudo;

pseudo["prompt"] = "Hello";
pseudo["n_predict"] = 10;
pseudo["stream"] = false;
const int task_id = llama.request_completion(pseudo, false, false);
std::string completion_text;
task_result result = llama.next_result(task_id);
if (!result.error && result.stop) {
LOG_INFO << result.result_json.dump(-1, ' ', false,
json::error_handler_t::replace);
}
return;
}

void llamaCPP::chatCompletion(
Expand Down Expand Up @@ -365,10 +366,11 @@ void llamaCPP::loadModel(
jsonResp["message"] = "Model loaded successfully";
model_loaded = true;
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
// warmupModel();

LOG_INFO << "Started background task here!";
backgroundThread = std::thread(&llamaCPP::backgroundTask, this);
warmupModel();

callback(resp);
}

Expand Down