Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit a6b64f8

Browse files
authored
Merge pull request #1520 from janhq/j/fix-streamlining-cortex-run
fix(#1425): Streamlining cortex run behavior
2 parents 620db2a + 989bf55 commit a6b64f8

File tree

4 files changed

+8
-8
lines changed

4 files changed

+8
-8
lines changed

engine/cli/command_line_parser.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ void CommandLineParser::SetupCommonCommands() {
139139
run_cmd->usage("Usage:\n" + commands::GetCortexBinary() +
140140
" run [options] [model_id]");
141141
run_cmd->add_option("model_id", cml_data_.model_id, "");
142-
run_cmd->add_flag("--chat", cml_data_.chat_flag, "Flag for interactive mode");
142+
run_cmd->add_flag("-d,--detach", cml_data_.run_detach, "Detached mode");
143143
run_cmd->callback([this, run_cmd] {
144144
if (std::exchange(executed_, true))
145145
return;
@@ -151,7 +151,7 @@ void CommandLineParser::SetupCommonCommands() {
151151
commands::RunCmd rc(cml_data_.config.apiServerHost,
152152
std::stoi(cml_data_.config.apiServerPort),
153153
cml_data_.model_id, download_service_);
154-
rc.Exec(cml_data_.chat_flag);
154+
rc.Exec(cml_data_.run_detach);
155155
});
156156

157157
auto chat_cmd = app_.add_subcommand(

engine/cli/command_line_parser.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ class CommandLineParser {
4444
std::string engine_src;
4545
std::string cortex_version;
4646
bool check_upd = true;
47-
bool chat_flag = false;
47+
bool run_detach = false;
4848
int port;
4949
config_yaml_utils::CortexConfig config;
5050
std::unordered_map<std::string, std::string> model_update_options;

engine/cli/commands/chat_cmd.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,6 @@ void ChatCmd::Exec(const std::string& host, int port,
66
const std::string& model_handle,
77
std::shared_ptr<DownloadService> download_service) {
88
RunCmd rc(host, port, model_handle, download_service);
9-
rc.Exec(true /*chat_flag*/);
9+
rc.Exec(false /*detach mode*/);
1010
}
1111
}; // namespace commands

engine/cli/commands/run_cmd.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ std::string Repo2Engine(const std::string& r) {
2323
};
2424
} // namespace
2525

26-
void RunCmd::Exec(bool chat_flag) {
26+
void RunCmd::Exec(bool run_detach) {
2727
std::optional<std::string> model_id = model_handle_;
2828

2929
cortex::db::Models modellist_handler;
@@ -115,12 +115,12 @@ void RunCmd::Exec(bool chat_flag) {
115115
}
116116

117117
// Chat
118-
if (chat_flag) {
119-
ChatCompletionCmd(model_service_).Exec(host_, port_, *model_id, mc, "");
120-
} else {
118+
if (run_detach) {
121119
CLI_LOG(*model_id << " model started successfully. Use `"
122120
<< commands::GetCortexBinary() << " chat " << *model_id
123121
<< "` for interactive chat shell");
122+
} else {
123+
ChatCompletionCmd(model_service_).Exec(host_, port_, *model_id, mc, "");
124124
}
125125
} catch (const std::exception& e) {
126126
CLI_LOG("Fail to run model with ID '" + model_handle_ + "': " + e.what());

0 commit comments

Comments
 (0)