diff --git a/.github/workflows/cortex-cpp-quality-gate.yml b/.github/workflows/cortex-cpp-quality-gate.yml index 39c5e7b42..279dd77d6 100644 --- a/.github/workflows/cortex-cpp-quality-gate.yml +++ b/.github/workflows/cortex-cpp-quality-gate.yml @@ -149,6 +149,7 @@ jobs: if: runner.os == 'Linux' run: | cd engine + mkdir -p ~/.config/cortexcpp/ echo "huggingFaceToken: ${{ secrets.HUGGINGFACE_TOKEN_READ }}" > ~/.config/cortexcpp/.cortexrc echo "gitHubToken: ${{ secrets.PAT_SERVICE_ACCOUNT }}" >> ~/.config/cortexcpp/.cortexrc # ./build/cortex @@ -175,6 +176,7 @@ jobs: if: runner.os == 'Linux' run: | cd engine + mkdir -p ~/.config/cortexcpp/ echo "apiServerPort: 3928" > ~/.config/cortexcpp/.cortexrc echo "huggingFaceToken: ${{ secrets.HUGGINGFACE_TOKEN_READ }}" >> ~/.config/cortexcpp/.cortexrc echo "gitHubToken: ${{ secrets.PAT_SERVICE_ACCOUNT }}" >> ~/.config/cortexcpp/.cortexrc @@ -453,6 +455,7 @@ jobs: if: runner.os == 'Linux' run: | cd engine + mkdir -p ~/.config/cortexcpp/ echo "gitHubToken: ${{ secrets.GITHUB_TOKEN }}" > ~/.config/cortexcpp/.cortexrc # ./build/cortex cat ~/.config/cortexcpp/.cortexrc @@ -477,6 +480,7 @@ jobs: if: runner.os == 'Linux' run: | cd engine + mkdir -p ~/.config/cortexcpp/ echo "apiServerPort: 3928" > ~/.config/cortexcpp/.cortexrc echo "gitHubToken: ${{ secrets.GITHUB_TOKEN }}" > ~/.config/cortexcpp/.cortexrc # ./build/cortex diff --git a/docs/docs/architecture/cortexrc.mdx b/docs/docs/architecture/cortexrc.mdx index a19c23afe..c5c776f74 100644 --- a/docs/docs/architecture/cortexrc.mdx +++ b/docs/docs/architecture/cortexrc.mdx @@ -44,7 +44,6 @@ Example of the `.cortexrc` file: ``` logFolderPath: /home//cortexcpp logLlamaCppPath: ./logs/cortex.log -logTensorrtLLMPath: ./logs/cortex.log logOnnxPath: ./logs/cortex.log dataFolderPath: /home//cortexcpp maxLogLines: 100000 diff --git a/engine/cli/command_line_parser.cc b/engine/cli/command_line_parser.cc index b423a6896..6963c5266 100644 --- a/engine/cli/command_line_parser.cc +++ b/engine/cli/command_line_parser.cc @@ -437,7 +437,7 @@ void CommandLineParser::SetupConfigsCommands() { auto is_empty = true; for (const auto& [key, value] : config_update_opts_) { - if (!value.empty()) { + if (!value.empty() || CONFIGURATIONS.at(key).allow_empty) { is_empty = false; break; } @@ -656,36 +656,47 @@ void CommandLineParser::SetupHardwareCommands() { void CommandLineParser::SetupSystemCommands() { auto start_cmd = app_.add_subcommand("start", "Start the API server"); start_cmd->group(kSystemGroup); - cml_data_.port = std::stoi(cml_data_.config.apiServerPort); - start_cmd->add_option("-p, --port", cml_data_.port, "Server port to listen"); - start_cmd->add_option("--loglevel", cml_data_.log_level, - "Set up log level for server, accepted TRACE, DEBUG, " - "INFO, WARN, ERROR"); - if (cml_data_.log_level != "INFO" && cml_data_.log_level != "TRACE" && - cml_data_.log_level != "DEBUG" && cml_data_.log_level != "WARN" && - cml_data_.log_level != "ERROR") { - CLI_LOG("Invalid log level: " << cml_data_.log_level - << ", Set Loglevel to INFO"); - cml_data_.log_level = "INFO"; + + // Add options dynamically + std::vector> option_names = { + {"logspath", "The directory where logs are stored"}, + {"logsllama", "The directory where llama-cpp engine logs are stored"}, + {"logsonnx", "The directory where onnx engine logs are stored"}, + {"datapath", "The directory for storing data"}, + {"loglines", "Log size limit"}, + {"host", "The host IP for the API server"}, + {"port", "The port used by the API server"}, + {"hf-token", "HuggingFace authentication token"}, + {"gh-agent", "Github user agent"}, + {"gh-token", "Github authentication token"}, + {"cors", "Cross-Origin Resource Sharing"}, + {"origins", "Lists allowed origins for CORS requests"}, + {"proxy-url", "Proxy URL"}, + {"verify-proxy", "SSL verification for client proxy connections"}, + {"verify-proxy-host", "SSL verification for host proxy connections"}, + {"proxy-username", "Proxy username"}, + {"proxy-password", "Proxy password"}, + {"no-proxy", "Specifies exceptions for proxy usage"}, + {"verify-ssl-peer", "SSL/TLS verification for peer connections"}, + {"verify-ssl-host", "SSL/TLS verification for host connections"}, + {"ssl-cert-path", "Path to SSL certificates"}, + {"ssl-key-path", "Path to SSL and keys"}, + {"loglevel", "Log level"}}; + cml_data_.server_start_options["loglevel"] = "INFO"; + for (const auto& option_name : option_names) { + start_cmd->add_option( + "--" + std::get<0>(option_name), + cml_data_.server_start_options[std::get<0>(option_name)], + std::get<1>(option_name)); } + start_cmd->callback([this] { if (std::exchange(executed_, true)) return; - if (cml_data_.port != stoi(cml_data_.config.apiServerPort)) { - CTL_INF("apiServerPort changed from " << cml_data_.config.apiServerPort - << " to " << cml_data_.port); - auto config_path = file_manager_utils::GetConfigurationPath(); - cml_data_.config.apiServerPort = std::to_string(cml_data_.port); - auto result = - config_yaml_utils::CortexConfigMgr::GetInstance().DumpYamlConfig( - cml_data_.config, config_path.string()); - if (result.has_error()) { - CLI_LOG("Error update " << config_path.string() << result.error()); - } - } + commands::ServerStartCmd ssc; - ssc.Exec(cml_data_.config.apiServerHost, - std::stoi(cml_data_.config.apiServerPort), cml_data_.log_level); + ssc.Exec(cml_data_.server_start_options["loglevel"], + cml_data_.server_start_options, cml_data_.config); }); auto stop_cmd = app_.add_subcommand("stop", "Stop the API server"); diff --git a/engine/cli/command_line_parser.h b/engine/cli/command_line_parser.h index 5b64f7f4d..75b6b144c 100644 --- a/engine/cli/command_line_parser.h +++ b/engine/cli/command_line_parser.h @@ -67,13 +67,12 @@ class CommandLineParser { bool display_gpu_mode = false; bool display_available_model = false; std::string filter = ""; - std::string log_level = "INFO"; bool show_menu = false; - int port; config_yaml_utils::CortexConfig config; std::unordered_map model_update_options; + std::unordered_map server_start_options; std::string model_src; }; CmlData cml_data_; diff --git a/engine/cli/commands/config_upd_cmd.cc b/engine/cli/commands/config_upd_cmd.cc index 58bedb2e5..9866fbfa0 100644 --- a/engine/cli/commands/config_upd_cmd.cc +++ b/engine/cli/commands/config_upd_cmd.cc @@ -56,7 +56,7 @@ void commands::ConfigUpdCmd::Exec( auto non_null_opts = std::unordered_map(); for (const auto& [key, value] : options) { - if (value.empty()) { + if (value.empty() && !CONFIGURATIONS.at(key).allow_empty) { continue; } non_null_opts[key] = value; diff --git a/engine/cli/commands/server_start_cmd.cc b/engine/cli/commands/server_start_cmd.cc index a4bcb1eb5..e2b14e70e 100644 --- a/engine/cli/commands/server_start_cmd.cc +++ b/engine/cli/commands/server_start_cmd.cc @@ -66,7 +66,7 @@ bool ServerStartCmd::Exec(const std::string& host, int port, si.cb = sizeof(si); ZeroMemory(&pi, sizeof(pi)); std::wstring params = L"--start-server"; - params += L" --config_file_path \"" + + params += L" --config_file_path \"" + file_manager_utils::GetConfigurationPath().wstring() + L"\""; params += L" --data_folder_path \"" + file_manager_utils::GetCortexDataPath().wstring() + L"\""; @@ -80,17 +80,17 @@ bool ServerStartCmd::Exec(const std::string& host, int port, mutable_cmds.push_back(L'\0'); // Create child process if (!CreateProcess( - NULL, // No module name (use command line) + NULL, // No module name (use command line) mutable_cmds - .data(), // Command line (replace with your actual executable) - NULL, // Process handle not inheritable - NULL, // Thread handle not inheritable - FALSE, // Set handle inheritance - CREATE_NO_WINDOW, // No new console - NULL, // Use parent's environment block - NULL, // Use parent's starting directory - &si, // Pointer to STARTUPINFO structure - &pi)) // Pointer to PROCESS_INFORMATION structure + .data(), // Command line (replace with your actual executable) + NULL, // Process handle not inheritable + NULL, // Thread handle not inheritable + FALSE, // Set handle inheritance + CREATE_NO_WINDOW, // No new console + NULL, // Use parent's environment block + NULL, // Use parent's starting directory + &si, // Pointer to STARTUPINFO structure + &pi)) // Pointer to PROCESS_INFORMATION structure { std::cout << "Could not start server: " << GetLastError() << std::endl; return false; @@ -136,4 +136,171 @@ bool ServerStartCmd::Exec(const std::string& host, int port, #endif return true; } + +bool ServerStartCmd::Exec( + const std::optional& log_level, + const std::unordered_map& options, + CortexConfig& data) { + for (const auto& [key, value] : options) { + if (!value.empty()) { + UpdateConfig(data, key, value); + } + } + + auto config_path = file_manager_utils::GetConfigurationPath(); + auto result = + config_yaml_utils::CortexConfigMgr::GetInstance().DumpYamlConfig( + data, config_path.string()); + if (result.has_error()) { + CTL_WRN("Error update " << config_path.string() << result.error()); + } + return Exec(data.apiServerHost, std::stoi(data.apiServerPort), log_level); +} + +void ServerStartCmd::UpdateConfig(CortexConfig& data, const std::string& key, + const std::string& value) { + static const std::unordered_map< + std::string, std::function> + updaters = { + {"logspath", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.logFolderPath = v; + }}, + {"logsllama", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.logLlamaCppPath = v; + }}, + {"logsonnx", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.logOnnxPath = v; + }}, + {"loglines", + [this](CortexConfig& data, const std::string& k, + const std::string& v) { + UpdateNumericField(k, v, [&data](float f) { + data.maxLogLines = static_cast(f); + }); + }}, + {"host", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.apiServerHost = v; + }}, + {"port", + [](CortexConfig& data, const std::string& k, const std::string& v) { + data.apiServerPort = v; + }}, + {"hf-token", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.huggingFaceToken = v; + }}, + {"gh-agent", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.gitHubUserAgent = v; + }}, + {"gh-token", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.gitHubToken = v; + }}, + {"cors", + [this](CortexConfig& data, const std::string& k, + const std::string& v) { + UpdateBooleanField(k, v, [&data](bool b) { data.enableCors = b; }); + }}, + {"origins", + [this](CortexConfig& data, const std::string& k, + const std::string& v) { + UpdateVectorField(k, v, + [&data](const std::vector& orgs) { + data.allowedOrigins = orgs; + }); + }}, + {"proxy-url", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.proxyUrl = v; + }}, + {"verify-proxy", + [this](CortexConfig& data, const std::string& k, + const std::string& v) { + UpdateBooleanField(k, v, + [&data](bool b) { data.verifyProxySsl = b; }); + }}, + {"verify-proxy-host", + [this](CortexConfig& data, const std::string& k, + const std::string& v) { + UpdateBooleanField( + k, v, [&data](bool b) { data.verifyProxyHostSsl = b; }); + }}, + {"proxy-username", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.proxyUsername = v; + }}, + {"proxy-password", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.proxyPassword = v; + }}, + {"no-proxy", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.noProxy = v; + }}, + {"verify-ssl-peer", + [this](CortexConfig& data, const std::string& k, + const std::string& v) { + UpdateBooleanField(k, v, + [&data](bool b) { data.verifyPeerSsl = b; }); + }}, + {"verify-ssl-host", + [this](CortexConfig& data, const std::string& k, + const std::string& v) { + UpdateBooleanField(k, v, + [&data](bool b) { data.verifyHostSsl = b; }); + }}, + {"ssl-cert-path", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.sslCertPath = v; + }}, + {"ssl-key-path", + [](CortexConfig& data, const std::string&, const std::string& v) { + data.sslKeyPath = v; + }}, + }; + + if (auto it = updaters.find(key); it != updaters.end()) { + it->second(data, key, value); + CTL_INF("Updated " << key << " to: " << value); + } else { + CTL_WRN("Warning: Unknown configuration key '" << key << "' ignored."); + } +} + +void ServerStartCmd::UpdateVectorField( + const std::string& key, const std::string& value, + std::function&)> setter) { + std::vector tokens; + std::istringstream iss(value); + std::string token; + while (std::getline(iss, token, ',')) { + tokens.push_back(token); + } + setter(tokens); +} + +void ServerStartCmd::UpdateNumericField(const std::string& key, + const std::string& value, + std::function setter) { + try { + float numeric_val = std::stof(value); + setter(numeric_val); + } catch (const std::exception& e) { + CLI_LOG("Failed to parse numeric value for " << key << ": " << e.what()); + } +} + +void ServerStartCmd::UpdateBooleanField(const std::string& key, + const std::string& value, + std::function setter) { + bool bool_value = (value == "true" || value == "1"); + setter(bool_value); +} + }; // namespace commands diff --git a/engine/cli/commands/server_start_cmd.h b/engine/cli/commands/server_start_cmd.h index f3880532e..8807fc1ef 100644 --- a/engine/cli/commands/server_start_cmd.h +++ b/engine/cli/commands/server_start_cmd.h @@ -2,11 +2,13 @@ #include #include +#include "utils/config_yaml_utils.h" #include "utils/curl_utils.h" #include "utils/logging_utils.h" #include "utils/url_parser.h" namespace commands { +using CortexConfig = config_yaml_utils::CortexConfig; inline bool IsServerAlive(const std::string& host, int port) { auto url = url_parser::Url{ @@ -26,5 +28,23 @@ class ServerStartCmd { public: bool Exec(const std::string& host, int port, const std::optional& log_level = std::nullopt); + + bool Exec(const std::optional& log_level, + const std::unordered_map& options, + CortexConfig& data); + + private: + void UpdateConfig(CortexConfig& data, const std::string& key, + const std::string& value); + + void UpdateVectorField( + const std::string& key, const std::string& value, + std::function&)> setter); + + void UpdateNumericField(const std::string& key, const std::string& value, + std::function setter); + + void UpdateBooleanField(const std::string& key, const std::string& value, + std::function setter); }; } // namespace commands diff --git a/engine/common/api_server_configuration.h b/engine/common/api_server_configuration.h index 63383301b..65841859c 100644 --- a/engine/common/api_server_configuration.h +++ b/engine/common/api_server_configuration.h @@ -97,6 +97,12 @@ static const std::unordered_map .accept_value = "string", .default_value = "", .allow_empty = true}}, + {"github_token", ApiConfigurationMetadata{.name = "github_token", + .desc = "Github token", + .group = "Token", + .accept_value = "string", + .default_value = "", + .allow_empty = true}}, }; class ApiServerConfiguration { @@ -107,7 +113,7 @@ class ApiServerConfiguration { const std::string& proxy_url = "", const std::string& proxy_username = "", const std::string& proxy_password = "", const std::string& no_proxy = "", bool verify_peer_ssl = true, bool verify_host_ssl = true, - const std::string& hf_token = "", std::vector api_keys = {}) + const std::string& hf_token = "", const std::string& gh_token = "", std::vector api_keys = {}) : cors{cors}, allowed_origins{allowed_origins}, verify_proxy_ssl{verify_proxy_ssl}, @@ -119,6 +125,7 @@ class ApiServerConfiguration { verify_peer_ssl{verify_peer_ssl}, verify_host_ssl{verify_host_ssl}, hf_token{hf_token}, + gh_token{gh_token}, api_keys{api_keys} {} // cors @@ -139,6 +146,7 @@ class ApiServerConfiguration { // token std::string hf_token{""}; + std::string gh_token{""}; // authentication std::vector api_keys; @@ -159,6 +167,7 @@ class ApiServerConfiguration { root["verify_peer_ssl"] = verify_peer_ssl; root["verify_host_ssl"] = verify_host_ssl; root["huggingface_token"] = hf_token; + root["github_token"] = gh_token; root["api_keys"] = Json::Value(Json::arrayValue); for (const auto& api_key : api_keys) { root["api_keys"].append(api_key); @@ -255,6 +264,15 @@ class ApiServerConfiguration { return true; }}, + {"github_token", + [this](const Json::Value& value) -> bool { + if (!value.isString()) { + return false; + } + gh_token = value.asString(); + return true; + }}, + {"cors", [this](const Json::Value& value) -> bool { if (!value.isBool()) { diff --git a/engine/e2e-test/cli/model/test_cli_model.py b/engine/e2e-test/cli/model/test_cli_model.py index 8577b3a58..aa6e99e4a 100644 --- a/engine/e2e-test/cli/model/test_cli_model.py +++ b/engine/e2e-test/cli/model/test_cli_model.py @@ -1,5 +1,6 @@ import pytest import requests +import platform import os from pathlib import Path from utils.test_runner import ( @@ -9,6 +10,16 @@ wait_for_websocket_download_success_event, ) + +def get_root_path(): + if platform.system() == "Linux": + # For Linux, use the XDG base directory. + # Here we use XDG_DATA_HOME if set, otherwise default to ~/.local/share. + return Path(os.environ.get("XDG_DATA_HOME", Path.home() / ".local" / "share")) + else: + return Path.home() + + class TestCliModel: @pytest.fixture(autouse=True) @@ -24,7 +35,7 @@ def setup_and_teardown(self): # Clean up run("Delete model", ["models", "delete", "tinyllama:1b"]) stop_server() - + def test_model_pull_with_direct_url_should_be_success(self): exit_code, output, error = run( "Pull model", @@ -32,12 +43,18 @@ def test_model_pull_with_direct_url_should_be_success(self): "pull", "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/blob/main/tinyllama-1.1b-chat-v0.3.Q2_K.gguf", ], - timeout=None, capture=False + timeout=None, + capture=False, + ) + root = get_root_path() + assert os.path.exists( + root + / "cortexcpp" + / "models" + / "huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/tinyllama-1.1b-chat-v0.3.Q2_K.gguf" ) - root = Path.home() - assert os.path.exists(root / "cortexcpp" / "models" / "huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/tinyllama-1.1b-chat-v0.3.Q2_K.gguf") assert exit_code == 0, f"Model pull failed with error: {error}" - + @pytest.mark.asyncio async def test_models_delete_should_be_successful(self): json_body = {"model": "tinyllama:1b"} @@ -49,4 +66,4 @@ async def test_models_delete_should_be_successful(self): "Delete model", ["models", "delete", "tinyllama:1b"] ) assert "Model tinyllama:1b deleted successfully" in output - assert exit_code == 0, f"Model does not exist: {error}" \ No newline at end of file + assert exit_code == 0, f"Model does not exist: {error}" diff --git a/engine/e2e-test/utils/test_runner.py b/engine/e2e-test/utils/test_runner.py index f25fc2bc0..bafcfda46 100644 --- a/engine/e2e-test/utils/test_runner.py +++ b/engine/e2e-test/utils/test_runner.py @@ -112,7 +112,7 @@ def pull_model_if_needed(model_id: str = "tinyllama:1b"): def start_server_nix() -> bool: executable = getExecutablePath() process = subprocess.Popen( - [executable] + ["start", "-p", "3928"], + [executable] + ["start", "--port", "3928"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, @@ -142,7 +142,7 @@ def start_server_nix() -> bool: def start_server_windows() -> bool: executable = getExecutablePath() process = subprocess.Popen( - [executable] + ["start", "-p", "3928"], + [executable] + ["start", "--port", "3928"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, diff --git a/engine/services/config_service.cc b/engine/services/config_service.cc index ae90e93fb..526fe94b0 100644 --- a/engine/services/config_service.cc +++ b/engine/services/config_service.cc @@ -6,10 +6,13 @@ cpp::result ConfigService::UpdateApiServerConfiguration(const Json::Value& json) { auto config = file_manager_utils::GetCortexConfig(); ApiServerConfiguration api_server_config{ - config.enableCors, config.allowedOrigins, config.verifyProxySsl, - config.verifyProxyHostSsl, config.proxyUrl, config.proxyUsername, - config.proxyPassword, config.noProxy, config.verifyPeerSsl, - config.verifyHostSsl, config.huggingFaceToken, config.apiKeys}; + config.enableCors, config.allowedOrigins, + config.verifyProxySsl, config.verifyProxyHostSsl, + config.proxyUrl, config.proxyUsername, + config.proxyPassword, config.noProxy, + config.verifyPeerSsl, config.verifyHostSsl, + config.huggingFaceToken, config.gitHubToken, + config.apiKeys}; std::vector updated_fields; std::vector invalid_fields; @@ -36,6 +39,7 @@ ConfigService::UpdateApiServerConfiguration(const Json::Value& json) { config.verifyHostSsl = api_server_config.verify_host_ssl; config.huggingFaceToken = api_server_config.hf_token; + config.gitHubToken = api_server_config.gh_token; config.apiKeys = api_server_config.api_keys; auto result = file_manager_utils::UpdateCortexConfig(config); @@ -46,8 +50,11 @@ cpp::result ConfigService::GetApiServerConfiguration() { auto config = file_manager_utils::GetCortexConfig(); return ApiServerConfiguration{ - config.enableCors, config.allowedOrigins, config.verifyProxySsl, - config.verifyProxyHostSsl, config.proxyUrl, config.proxyUsername, - config.proxyPassword, config.noProxy, config.verifyPeerSsl, - config.verifyHostSsl, config.huggingFaceToken, config.apiKeys}; + config.enableCors, config.allowedOrigins, + config.verifyProxySsl, config.verifyProxyHostSsl, + config.proxyUrl, config.proxyUsername, + config.proxyPassword, config.noProxy, + config.verifyPeerSsl, config.verifyHostSsl, + config.huggingFaceToken, config.gitHubToken, + config.apiKeys}; } diff --git a/engine/test/components/test_cortex_config.cc b/engine/test/components/test_cortex_config.cc index f4bb7c1dc..f48b5c674 100644 --- a/engine/test/components/test_cortex_config.cc +++ b/engine/test/components/test_cortex_config.cc @@ -16,7 +16,6 @@ class CortexConfigTest : public ::testing::Test { // Set up default configuration default_config = {"default_log_path", "default_llamacpp_log_path", - "default_tensorrtllm_log_path", "default_onnx_log_path", "default_data_path", 1000, @@ -38,7 +37,6 @@ class CortexConfigTest : public ::testing::Test { TEST_F(CortexConfigTest, DumpYamlConfig_WritesCorrectly) { CortexConfig config = {"log_path", "default_llamacpp_log_path", - "default_tensorrtllm_log_path", "default_onnx_log_path", "data_path", 5000, @@ -68,7 +66,6 @@ TEST_F(CortexConfigTest, FromYaml_ReadsCorrectly) { // First, create a valid YAML configuration file CortexConfig config = {"log_path", "default_llamacpp_log_path", - "default_tensorrtllm_log_path", "default_onnx_log_path", "data_path", 5000, diff --git a/engine/utils/config_yaml_utils.cc b/engine/utils/config_yaml_utils.cc index 49b31acd0..dc47590c4 100644 --- a/engine/utils/config_yaml_utils.cc +++ b/engine/utils/config_yaml_utils.cc @@ -22,7 +22,6 @@ cpp::result CortexConfigMgr::DumpYamlConfig( YAML::Node node; node["logFolderPath"] = config.logFolderPath; node["logLlamaCppPath"] = config.logLlamaCppPath; - node["logTensorrtLLMPath"] = config.logTensorrtLLMPath; node["logOnnxPath"] = config.logOnnxPath; node["dataFolderPath"] = config.dataFolderPath; node["maxLogLines"] = config.maxLogLines; @@ -78,11 +77,10 @@ CortexConfig CortexConfigMgr::FromYaml(const std::string& path, !node["apiServerPort"] || !node["checkedForUpdateAt"] || !node["checkedForLlamacppUpdateAt"] || !node["latestRelease"] || !node["latestLlamacppRelease"] || !node["logLlamaCppPath"] || - !node["logOnnxPath"] || !node["logTensorrtLLMPath"] || - !node["huggingFaceToken"] || !node["gitHubUserAgent"] || - !node["gitHubToken"] || !node["llamacppVariant"] || - !node["llamacppVersion"] || !node["enableCors"] || - !node["allowedOrigins"] || !node["proxyUrl"] || + !node["logOnnxPath"] || !node["huggingFaceToken"] || + !node["gitHubUserAgent"] || !node["gitHubToken"] || + !node["llamacppVariant"] || !node["llamacppVersion"] || + !node["enableCors"] || !node["allowedOrigins"] || !node["proxyUrl"] || !node["proxyUsername"] || !node["proxyPassword"] || !node["verifyPeerSsl"] || !node["verifyHostSsl"] || !node["verifyProxySsl"] || !node["verifyProxyHostSsl"] || @@ -97,9 +95,6 @@ CortexConfig CortexConfigMgr::FromYaml(const std::string& path, .logLlamaCppPath = node["logLlamaCppPath"] ? node["logLlamaCppPath"].as() : default_cfg.logLlamaCppPath, - .logTensorrtLLMPath = node["logTensorrtLLMPath"] - ? node["logTensorrtLLMPath"].as() - : default_cfg.logTensorrtLLMPath, .logOnnxPath = node["logOnnxPath"] ? node["logOnnxPath"].as() : default_cfg.logOnnxPath, @@ -183,10 +178,9 @@ CortexConfig CortexConfigMgr::FromYaml(const std::string& path, .checkedForSyncHubAt = node["checkedForSyncHubAt"] ? node["checkedForSyncHubAt"].as() : default_cfg.checkedForSyncHubAt, - .apiKeys = - node["apiKeys"] - ? node["apiKeys"].as>() - : default_cfg.apiKeys, + .apiKeys = node["apiKeys"] + ? node["apiKeys"].as>() + : default_cfg.apiKeys, }; if (should_update_config) { diff --git a/engine/utils/config_yaml_utils.h b/engine/utils/config_yaml_utils.h index c94b8fe5f..7fb07290f 100644 --- a/engine/utils/config_yaml_utils.h +++ b/engine/utils/config_yaml_utils.h @@ -30,7 +30,6 @@ const std::vector kDefaultSupportedEngines{kLlamaEngine, struct CortexConfig { std::string logFolderPath; std::string logLlamaCppPath; - std::string logTensorrtLLMPath; std::string logOnnxPath; std::string dataFolderPath; diff --git a/engine/utils/file_manager_utils.cc b/engine/utils/file_manager_utils.cc index 575a3cb9b..c479949aa 100644 --- a/engine/utils/file_manager_utils.cc +++ b/engine/utils/file_manager_utils.cc @@ -188,7 +188,6 @@ config_yaml_utils::CortexConfig GetDefaultConfig() { .logFolderPath = default_data_folder_path.string(), #endif .logLlamaCppPath = kLogsLlamacppBaseName, - .logTensorrtLLMPath = kLogsTensorrtllmBaseName, .logOnnxPath = kLogsOnnxBaseName, #if defined(_WIN32) .dataFolderPath = diff --git a/engine/utils/file_manager_utils.h b/engine/utils/file_manager_utils.h index f60edf4b3..088328491 100644 --- a/engine/utils/file_manager_utils.h +++ b/engine/utils/file_manager_utils.h @@ -13,7 +13,6 @@ constexpr std::string_view kProdVariant = "prod"; constexpr std::string_view kBetaVariant = "beta"; constexpr std::string_view kNightlyVariant = "nightly"; constexpr char kLogsLlamacppBaseName[] = "./logs/cortex.log"; -constexpr char kLogsTensorrtllmBaseName[] = "./logs/cortex.log"; constexpr char kLogsOnnxBaseName[] = "./logs/cortex.log"; inline std::string cortex_config_file_path;