Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 0968abe

Browse files
committed
chore: cleanup
1 parent 70caa83 commit 0968abe

File tree

14 files changed

+30
-251
lines changed

14 files changed

+30
-251
lines changed

docs/docs/engines/engine-extension.mdx

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,6 @@ class EngineI {
7171
std::shared_ptr<Json::Value> json_body,
7272
std::function<void(Json::Value&&, Json::Value&&)>&& callback) = 0;
7373

74-
// Compatibility and model management
75-
virtual bool IsSupported(const std::string& f) = 0;
76-
7774
virtual void GetModels(
7875
std::shared_ptr<Json::Value> jsonBody,
7976
std::function<void(Json::Value&&, Json::Value&&)>&& callback) = 0;

engine/common/base.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,6 @@ class BaseModel {
2020
virtual void GetModels(
2121
const HttpRequestPtr& req,
2222
std::function<void(const HttpResponsePtr&)>&& callback) = 0;
23-
virtual void FineTuning(
24-
const HttpRequestPtr& req,
25-
std::function<void(const HttpResponsePtr&)>&& callback) = 0;
2623
};
2724

2825
class BaseChatCompletion {

engine/controllers/server.cc

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -121,17 +121,6 @@ void server::GetModels(const HttpRequestPtr& req,
121121
LOG_TRACE << "Done get models";
122122
}
123123

124-
void server::FineTuning(
125-
const HttpRequestPtr& req,
126-
std::function<void(const HttpResponsePtr&)>&& callback) {
127-
auto ir = inference_svc_->FineTuning(req->getJsonObject());
128-
auto resp = cortex_utils::CreateCortexHttpJsonResponse(std::get<1>(ir));
129-
resp->setStatusCode(
130-
static_cast<HttpStatusCode>(std::get<0>(ir)["status_code"].asInt()));
131-
callback(resp);
132-
LOG_TRACE << "Done fine-tuning";
133-
}
134-
135124
void server::Inference(const HttpRequestPtr& req,
136125
std::function<void(const HttpResponsePtr&)>&& callback) {
137126

engine/controllers/server.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,8 @@ class server : public drogon::HttpController<server, false>,
3939
METHOD_ADD(server::ModelStatus, "modelstatus", Options, Post);
4040
METHOD_ADD(server::GetModels, "models", Get);
4141

42-
// cortex.python API
43-
METHOD_ADD(server::FineTuning, "finetuning", Options, Post);
44-
4542
// Openai compatible path
4643
ADD_METHOD_TO(server::ChatCompletion, "/v1/chat/completions", Options, Post);
47-
ADD_METHOD_TO(server::FineTuning, "/v1/fine_tuning/job", Options, Post);
4844
ADD_METHOD_TO(server::Embedding, "/v1/embeddings", Options, Post);
4945
ADD_METHOD_TO(server::Inference, "/v1/inference", Options, Post);
5046
ADD_METHOD_TO(server::RouteRequest, "/v1/route/request", Options, Post);
@@ -69,9 +65,6 @@ class server : public drogon::HttpController<server, false>,
6965
void GetModels(
7066
const HttpRequestPtr& req,
7167
std::function<void(const HttpResponsePtr&)>&& callback) override;
72-
void FineTuning(
73-
const HttpRequestPtr& req,
74-
std::function<void(const HttpResponsePtr&)>&& callback) override;
7568
void Inference(const HttpRequestPtr& req,
7669
std::function<void(const HttpResponsePtr&)>&& callback);
7770
void RouteRequest(const HttpRequestPtr& req,

engine/cortex-common/EngineI.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,6 @@ class EngineI {
4747
std::shared_ptr<Json::Value> json_body,
4848
std::function<void(Json::Value&&, Json::Value&&)>&& callback) = 0;
4949

50-
// For backward compatible checking
51-
virtual bool IsSupported(const std::string& f) = 0;
52-
5350
// Get list of running models
5451
virtual void GetModels(
5552
std::shared_ptr<Json::Value> jsonBody,
@@ -62,7 +59,6 @@ class EngineI {
6259
// Stop inflight chat completion in stream mode
6360
virtual void StopInferencing(const std::string& model_id) = 0;
6461

65-
virtual Json::Value GetRemoteModels() = 0;
6662
virtual void HandleRouteRequest(
6763
std::shared_ptr<Json::Value> json_body,
6864
std::function<void(Json::Value&&, Json::Value&&)>&& callback) = 0;

engine/cortex-common/cortexpythoni.h

Lines changed: 0 additions & 22 deletions
This file was deleted.

engine/cortex-common/local_enginei.h

Lines changed: 0 additions & 32 deletions
This file was deleted.

engine/extensions/local-engine/local_engine.h

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
#include <memory>
55
#include <string>
66
#include <unordered_map>
7-
#include "cortex-common/local_enginei.h"
7+
#include "cortex-common/EngineI.h"
88
#include "json/json.h"
99
#include "services/engine_service.h"
1010
#include "utils/process/utils.h"
@@ -16,11 +16,16 @@ struct ServerAddress {
1616
int port;
1717
cortex::process::ProcessInfo process_info;
1818
};
19-
class LocalEngine : public LocalEngineI {
19+
class LocalEngine : public EngineI {
2020
public:
2121
LocalEngine(EngineService& engine_service, TaskQueue& q)
2222
: engine_service_(engine_service), q_(q) {}
2323
~LocalEngine();
24+
25+
void Load(EngineLoadOption opts) final {}
26+
27+
void Unload(EngineUnloadOption opts) final {}
28+
2429
void HandleChatCompletion(
2530
std::shared_ptr<Json::Value> json_body,
2631
std::function<void(Json::Value&&, Json::Value&&)>&& callback) final;
@@ -42,6 +47,22 @@ class LocalEngine : public LocalEngineI {
4247
std::shared_ptr<Json::Value> jsonBody,
4348
std::function<void(Json::Value&&, Json::Value&&)>&& callback) final;
4449

50+
bool SetFileLogger(int max_log_lines, const std::string& log_path) final {
51+
return true;
52+
}
53+
void SetLogLevel(trantor::Logger::LogLevel logLevel) final {}
54+
55+
// Stop inflight chat completion in stream mode
56+
void StopInferencing(const std::string& model_id) final {}
57+
58+
void HandleRouteRequest(
59+
std::shared_ptr<Json::Value> json_body,
60+
std::function<void(Json::Value&&, Json::Value&&)>&& callback) final {}
61+
62+
void HandleInference(
63+
std::shared_ptr<Json::Value> json_body,
64+
std::function<void(Json::Value&&, Json::Value&&)>&& callback) final {}
65+
4566
private:
4667
std::unordered_map<std::string, ServerAddress> server_map_;
4768
EngineService& engine_service_;

engine/extensions/python-engine/python_engine.cc

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -622,10 +622,6 @@ void PythonEngine::HandleInference(
622622
}
623623
}
624624

625-
Json::Value PythonEngine::GetRemoteModels() {
626-
return Json::Value();
627-
}
628-
629625
void PythonEngine::StopInferencing(const std::string& model_id) {
630626
(void)model_id;
631627
}
@@ -860,15 +856,6 @@ void PythonEngine::HandleEmbedding(
860856
callback(Json::Value(), Json::Value());
861857
}
862858

863-
bool PythonEngine::IsSupported(const std::string& f) {
864-
if (f == "HandleChatCompletion" || f == "LoadModel" || f == "UnloadModel" ||
865-
f == "GetModelStatus" || f == "GetModels" || f == "SetFileLogger" ||
866-
f == "SetLogLevel") {
867-
return true;
868-
}
869-
return false;
870-
}
871-
872859
bool PythonEngine::SetFileLogger(int max_log_lines,
873860
const std::string& log_path) {
874861
if (!async_file_logger_) {

engine/extensions/python-engine/python_engine.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,6 @@ class PythonEngine : public EngineI {
9595
void HandleEmbedding(
9696
std::shared_ptr<Json::Value> json_body,
9797
std::function<void(Json::Value&&, Json::Value&&)>&& callback) override;
98-
bool IsSupported(const std::string& feature) override;
9998
bool SetFileLogger(int max_log_lines, const std::string& log_path) override;
10099
void SetLogLevel(trantor::Logger::LogLevel logLevel) override;
101100
void HandleRouteRequest(
@@ -104,7 +103,6 @@ class PythonEngine : public EngineI {
104103
void HandleInference(
105104
std::shared_ptr<Json::Value> json_body,
106105
std::function<void(Json::Value&&, Json::Value&&)>&& callback) override;
107-
Json::Value GetRemoteModels() override;
108106
void StopInferencing(const std::string& model_id) override;
109107
};
110108
} // namespace python_engine

0 commit comments

Comments
 (0)