Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 8527a17

Browse files
committed
fix: reorder is_embedded_model and params.embedding
1 parent b47f6e9 commit 8527a17

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

controllers/llamaCPP.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -654,9 +654,9 @@ bool llamaCPP::LoadModelImpl(std::shared_ptr<Json::Value> jsonBody) {
654654

655655
params.n_gpu_layers = jsonBody->get("ngl", 100).asInt();
656656
params.n_ctx = jsonBody->get("ctx_len", 2048).asInt();
657+
params.embedding = jsonBody->get("embedding", true).asBool();
657658
is_embedded_model =
658659
!(*jsonBody)["embedding"].isNull() && (*jsonBody)["embedding"].asBool();
659-
params.embedding = jsonBody->get("embedding", true).asBool();
660660
// Check if n_parallel exists in jsonBody, if not, set to drogon_thread
661661
params.n_batch = jsonBody->get("n_batch", 512).asInt();
662662
params.n_parallel = jsonBody->get("n_parallel", 1).asInt();

0 commit comments

Comments
 (0)