Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit b7cf124

Browse files
vansangpfievhiento09sangjanai
authored
chore: rename engines (#1406)
* chore: rename engines * ci: update engine name in postinstaller scripts * fix: version and variant --------- Co-authored-by: Hien To <tominhhien97@gmail.com> Co-authored-by: vansangpfiev <sang@jan.ai>
1 parent f9a1a4d commit b7cf124

27 files changed

+190
-129
lines changed

engine/commands/chat_completion_cmd.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,8 @@ void ChatCompletionCmd::Exec(const std::string& host, int port,
7777
}
7878

7979
// Only check if llamacpp engine
80-
if ((mc.engine.find("llamacpp") != std::string::npos) &&
80+
if ((mc.engine.find(kLlamaEngine) != std::string::npos ||
81+
mc.engine.find(kLlamaRepo) != std::string::npos) &&
8182
!commands::ModelStatusCmd().IsLoaded(host, port, model_handle)) {
8283
CLI_LOG("Model is not loaded yet!");
8384
return;

engine/commands/ps_cmd.cc

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <string>
55
#include <tabulate/table.hpp>
66
#include "nlohmann/json.hpp"
7+
#include "utils/engine_constants.h"
78
#include "utils/format_utils.h"
89
#include "utils/logging_utils.h"
910
#include "utils/string_utils.h"
@@ -26,7 +27,8 @@ void PsCmd::Exec(const std::string& host, int port) {
2627
try {
2728
for (const auto& item : data) {
2829
ModelLoadedStatus model_status;
29-
model_status.engine = item["engine"];
30+
// TODO(sang) hardcode for now
31+
model_status.engine = kLlamaEngine;
3032
model_status.model = item["id"];
3133
model_status.ram = item["ram"];
3234
model_status.start_time = item["start_time"];

engine/commands/run_cmd.cc

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,19 @@
1111

1212
namespace commands {
1313

14+
namespace {
15+
std::string Repo2Engine(const std::string& r) {
16+
if (r == kLlamaRepo) {
17+
return kLlamaEngine;
18+
} else if (r == kOnnxRepo) {
19+
return kOnnxEngine;
20+
} else if (r == kTrtLlmRepo) {
21+
return kTrtLlmEngine;
22+
}
23+
return r;
24+
};
25+
} // namespace
26+
1427
void RunCmd::Exec(bool chat_flag) {
1528
std::optional<std::string> model_id = model_handle_;
1629

@@ -47,7 +60,9 @@ void RunCmd::Exec(bool chat_flag) {
4760

4861
// Check if engine existed. If not, download it
4962
{
50-
auto required_engine = engine_service_.GetEngineInfo(mc.engine);
63+
auto required_engine =
64+
engine_service_.GetEngineInfo(Repo2Engine(mc.engine));
65+
5166
if (!required_engine.has_value()) {
5267
throw std::runtime_error("Engine not found: " + mc.engine);
5368
}

engine/config/gguf_parser.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525

2626
#include "gguf_parser.h"
2727
#include "trantor/utils/Logger.h"
28+
#include "utils/engine_constants.h"
2829

2930
namespace config {
3031
#define NOMINMAX
@@ -401,7 +402,7 @@ void GGUFHandler::ModelConfigFromMetadata() {
401402
model_config_.frequency_penalty = 0;
402403
model_config_.presence_penalty = 0;
403404
model_config_.stream = true;
404-
model_config_.engine = "cortex.llamacpp";
405+
model_config_.engine = kLlamaEngine;
405406
model_config_.created = std::time(nullptr);
406407
model_config_.model = "model";
407408
model_config_.owned_by = "";

engine/config/yaml_config.cc

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,9 @@
44
#include <iostream>
55
#include <string>
66

7-
#include "utils/format_utils.h"
7+
#include "utils/engine_constants.h"
88
#include "utils/file_manager_utils.h"
9+
#include "utils/format_utils.h"
910
#include "yaml_config.h"
1011
namespace config {
1112
// Method to read YAML file
@@ -25,10 +26,12 @@ void YamlHandler::ReadYamlFile(const std::string& file_path) {
2526
std::replace(s.begin(), s.end(), '\\', '/');
2627
std::vector<std::string> v;
2728
if (yaml_node_["engine"] &&
28-
yaml_node_["engine"].as<std::string>() == "cortex.llamacpp") {
29+
(yaml_node_["engine"].as<std::string>() == kLlamaRepo ||
30+
(yaml_node_["engine"].as<std::string>() == kLlamaEngine))) {
2931
auto abs_path = s.substr(0, s.find_last_of('/')) + "/model.gguf";
3032
auto rel_path = fmu::ToRelativeCortexDataPath(fs::path(abs_path));
3133
v.emplace_back(rel_path.string());
34+
3235
} else {
3336
v.emplace_back(s.substr(0, s.find_last_of('/')));
3437
}
@@ -289,7 +292,8 @@ void YamlHandler::WriteYamlFile(const std::string& file_path) const {
289292
outFile << "version: " << yaml_node_["version"].as<std::string>() << "\n";
290293
}
291294
if (yaml_node_["files"] && yaml_node_["files"].size()) {
292-
outFile << "files: # Can be relative OR absolute local file path\n";
295+
outFile << "files: # Can be relative OR absolute local file "
296+
"path\n";
293297
for (const auto& source : yaml_node_["files"]) {
294298
outFile << " - " << source << "\n";
295299
}

0 commit comments

Comments
 (0)