diff --git a/engine/commands/cortex_upd_cmd.h b/engine/commands/cortex_upd_cmd.h index 682fb95d2..4a94ba01a 100644 --- a/engine/commands/cortex_upd_cmd.h +++ b/engine/commands/cortex_upd_cmd.h @@ -127,7 +127,7 @@ inline bool ReplaceBinaryInflight(const std::filesystem::path& src, std::filesystem::perms::others_read | std::filesystem::perms::others_exec); } catch (const std::exception& e) { - CTL_ERR("Something wrong happened: " << e.what()); + CTL_ERR("Something went wrong: " << e.what()); if (std::filesystem::exists(temp)) { std::rename(temp.string().c_str(), dst.string().c_str()); CLI_LOG("Restored binary file"); diff --git a/engine/controllers/command_line_parser.cc b/engine/controllers/command_line_parser.cc index 3046d1e70..9bee6b3fe 100644 --- a/engine/controllers/command_line_parser.cc +++ b/engine/controllers/command_line_parser.cc @@ -79,11 +79,10 @@ bool CommandLineParser::SetupCommand(int argc, char** argv) { { auto model_pull_cmd = - app_.add_subcommand("pull", - "Download a model from a registry. Working with " - "HuggingFace repositories. For available models, " - "please visit https://huggingface.co/cortexso"); - model_pull_cmd->add_option("model_id", model_id, ""); + app_.add_subcommand("pull", + "Download a model by URL (or HuggingFace ID) " + "See built-in models: https://huggingface.co/cortexso"); + model_pull_cmd->add_option("model_id", model_id, ""); model_pull_cmd->callback([&model_id]() { try { commands::ModelPullCmd().Exec(model_id); @@ -109,7 +108,7 @@ bool CommandLineParser::SetupCommand(int argc, char** argv) { std::string msg; { auto chat_cmd = - app_.add_subcommand("chat", "Send a chat request to a model"); + app_.add_subcommand("chat", "Send a chat completion request"); chat_cmd->add_option("model_id", model_id, ""); chat_cmd->add_option("-m,--message", msg, "Message to chat with model"); diff --git a/engine/e2e-test/test_cortex_update.py b/engine/e2e-test/test_cortex_update.py index fc1fe25cb..0c4d3a774 100644 --- a/engine/e2e-test/test_cortex_update.py +++ b/engine/e2e-test/test_cortex_update.py @@ -1,5 +1,3 @@ -import platform - import pytest from test_runner import run @@ -10,5 +8,5 @@ class TestCortexUpdate: @pytest.mark.skip(reason="Stable release is not available yet") def test_cortex_update(self): exit_code, output, error = run("Update cortex", ["update"]) - assert exit_code == 0, f"Something wrong happened" - assert "Update cortex sucessfully" in output + assert exit_code == 0, "Something went wrong" + assert "Updated cortex sucessfully" in output diff --git a/platform/src/infrastructure/commanders/chat.command.ts b/platform/src/infrastructure/commanders/chat.command.ts index 35b2ef5ba..1728f7dba 100644 --- a/platform/src/infrastructure/commanders/chat.command.ts +++ b/platform/src/infrastructure/commanders/chat.command.ts @@ -29,7 +29,7 @@ type ChatOptions = { @SubCommand({ name: 'chat', - description: 'Send a chat request to a model', + description: 'Send a chat completion request', arguments: '[model_id] [message]', argsDescription: { model_id: diff --git a/platform/src/infrastructure/commanders/models/model-pull.command.ts b/platform/src/infrastructure/commanders/models/model-pull.command.ts index 179484845..2d5b03799 100644 --- a/platform/src/infrastructure/commanders/models/model-pull.command.ts +++ b/platform/src/infrastructure/commanders/models/model-pull.command.ts @@ -26,7 +26,7 @@ import { fileManagerService } from '@/infrastructure/services/file-manager/file- arguments: '', argsDescription: { model_id: 'Model repo to pull' }, description: - 'Download a model from a registry. Working with HuggingFace repositories. For available models, please visit https://huggingface.co/cortexso', + 'Download a model by URL (or HuggingFace ID). See built-in models: https://huggingface.co/cortexso', }) @SetCommandContext() export class ModelPullCommand extends BaseCommand { diff --git a/platform/src/infrastructure/commanders/run.command.ts b/platform/src/infrastructure/commanders/run.command.ts index 3d30b6a16..784a8443b 100644 --- a/platform/src/infrastructure/commanders/run.command.ts +++ b/platform/src/infrastructure/commanders/run.command.ts @@ -32,7 +32,7 @@ type RunOptions = { model_id: 'Model to run. If the model is not available, it will attempt to pull.', }, - description: 'Shortcut to start a model and chat', + description: 'Shortcut: start a model and interactive chat shell', }) export class RunCommand extends BaseCommand { chatClient: ChatClient;