diff --git a/engine/cli/command_line_parser.cc b/engine/cli/command_line_parser.cc index f0a41a12e..51bd121b2 100644 --- a/engine/cli/command_line_parser.cc +++ b/engine/cli/command_line_parser.cc @@ -139,7 +139,7 @@ void CommandLineParser::SetupCommonCommands() { run_cmd->usage("Usage:\n" + commands::GetCortexBinary() + " run [options] [model_id]"); run_cmd->add_option("model_id", cml_data_.model_id, ""); - run_cmd->add_flag("--chat", cml_data_.chat_flag, "Flag for interactive mode"); + run_cmd->add_flag("-d,--detach", cml_data_.run_detach, "Detached mode"); run_cmd->callback([this, run_cmd] { if (std::exchange(executed_, true)) return; @@ -151,7 +151,7 @@ void CommandLineParser::SetupCommonCommands() { commands::RunCmd rc(cml_data_.config.apiServerHost, std::stoi(cml_data_.config.apiServerPort), cml_data_.model_id, download_service_); - rc.Exec(cml_data_.chat_flag); + rc.Exec(cml_data_.run_detach); }); auto chat_cmd = app_.add_subcommand( diff --git a/engine/cli/command_line_parser.h b/engine/cli/command_line_parser.h index 0fc3f1aa7..7a9581f1f 100644 --- a/engine/cli/command_line_parser.h +++ b/engine/cli/command_line_parser.h @@ -44,7 +44,7 @@ class CommandLineParser { std::string engine_src; std::string cortex_version; bool check_upd = true; - bool chat_flag = false; + bool run_detach = false; int port; config_yaml_utils::CortexConfig config; std::unordered_map model_update_options; diff --git a/engine/cli/commands/chat_cmd.cc b/engine/cli/commands/chat_cmd.cc index 381aa91dc..d0f6cd8ee 100644 --- a/engine/cli/commands/chat_cmd.cc +++ b/engine/cli/commands/chat_cmd.cc @@ -6,6 +6,6 @@ void ChatCmd::Exec(const std::string& host, int port, const std::string& model_handle, std::shared_ptr download_service) { RunCmd rc(host, port, model_handle, download_service); - rc.Exec(true /*chat_flag*/); + rc.Exec(false /*detach mode*/); } }; // namespace commands diff --git a/engine/cli/commands/run_cmd.cc b/engine/cli/commands/run_cmd.cc index da295e360..074c12709 100644 --- a/engine/cli/commands/run_cmd.cc +++ b/engine/cli/commands/run_cmd.cc @@ -23,7 +23,7 @@ std::string Repo2Engine(const std::string& r) { }; } // namespace -void RunCmd::Exec(bool chat_flag) { +void RunCmd::Exec(bool run_detach) { std::optional model_id = model_handle_; cortex::db::Models modellist_handler; @@ -115,12 +115,12 @@ void RunCmd::Exec(bool chat_flag) { } // Chat - if (chat_flag) { - ChatCompletionCmd(model_service_).Exec(host_, port_, *model_id, mc, ""); - } else { + if (run_detach) { CLI_LOG(*model_id << " model started successfully. Use `" << commands::GetCortexBinary() << " chat " << *model_id << "` for interactive chat shell"); + } else { + ChatCompletionCmd(model_service_).Exec(host_, port_, *model_id, mc, ""); } } catch (const std::exception& e) { CLI_LOG("Fail to run model with ID '" + model_handle_ + "': " + e.what());