Skip to content

Commit

Permalink
在webui模型列表中包括:非model worker启动的在线模型(如openai-api);已经下载的本地模型 (#2060)
Browse files Browse the repository at this point in the history
  • Loading branch information
liunux4odoo committed Nov 14, 2023
1 parent 21b079d commit 2adfa42
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 27 deletions.
25 changes: 11 additions & 14 deletions server/llm_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from configs import logger, log_verbose, LLM_MODELS, HTTPX_DEFAULT_TIMEOUT
from server.utils import (BaseResponse, fschat_controller_address, list_config_llm_models,
get_httpx_client, get_model_worker_config)
from copy import deepcopy
from typing import List


def list_running_models(
Expand All @@ -28,26 +28,23 @@ def list_running_models(
msg=f"failed to get available models from controller: {controller_address}。错误信息是: {e}")


def list_config_models() -> BaseResponse:
def list_config_models(
types: List[str] = Body(["local", "online"], description="模型配置项类别,如local, online, worker"),
placeholder: str = Body(None, description="占位用,无实际效果")
) -> BaseResponse:
'''
从本地获取configs中配置的模型列表
'''
configs = {}
# 删除ONLINE_MODEL配置中的敏感信息
for name, config in list_config_llm_models()["online"].items():
configs[name] = {}
for k, v in config.items():
if not (k == "worker_class"
or "key" in k.lower()
or "secret" in k.lower()
or k.lower().endswith("id")):
configs[name][k] = v
return BaseResponse(data=configs)
data = {}
for type, models in list_config_llm_models().items():
if type in types:
data[type] = {m: get_model_config(m).data for m in models}
return BaseResponse(data=data)


def get_model_config(
model_name: str = Body(description="配置中LLM模型的名称"),
placeholder: str = Body(description="占位用,无实际效果")
placeholder: str = Body(None, description="占位用,无实际效果")
) -> BaseResponse:
'''
获取LLM模型配置项(合并后的)
Expand Down
14 changes: 9 additions & 5 deletions server/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,13 +342,14 @@ def list_embed_models() -> List[str]:
def list_config_llm_models() -> Dict[str, Dict]:
'''
get configured llm models with different types.
return [(model_name, config_type), ...]
return {config_type: {model_name: config}, ...}
'''
workers = list(FSCHAT_MODEL_WORKERS)
workers = FSCHAT_MODEL_WORKERS.copy()
workers.pop("default", None)

return {
"local": MODEL_PATH["llm_model"],
"online": ONLINE_LLM_MODEL,
"local": MODEL_PATH["llm_model"].copy(),
"online": ONLINE_LLM_MODEL.copy(),
"worker": workers,
}

Expand Down Expand Up @@ -406,7 +407,10 @@ def get_model_worker_config(model_name: str = None) -> dict:
exc_info=e if log_verbose else None)
# 本地模型
if model_name in MODEL_PATH["llm_model"]:
config["model_path"] = get_model_path(model_name)
path = get_model_path(model_name)
config["model_path"] = path
if path and os.path.isdir(path):
config["model_path_exists"] = True
config["device"] = llm_device(config.get("device"))
return config

Expand Down
16 changes: 10 additions & 6 deletions webui_pages/dialogue/dialogue.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ def filter(msg):


def dialogue_page(api: ApiRequest, is_lite: bool = False):
default_model = api.get_default_llm_model()[0]
if not chat_box.chat_inited:
default_model = api.get_default_llm_model()[0]
st.toast(
f"欢迎使用 [`Langchain-Chatchat`](https://github.com/chatchat-space/Langchain-Chatchat) ! \n\n"
f"当前运行的模型`{default_model}`, 您可以开始提问了."
Expand Down Expand Up @@ -83,15 +83,19 @@ def llm_model_format_func(x):
running_models = list(api.list_running_models())
available_models = []
config_models = api.list_config_models()
worker_models = list(config_models.get("worker", {})) # 仅列出在FSCHAT_MODEL_WORKERS中配置的模型
for m in worker_models:
if m not in running_models and m != "default":
available_models.append(m)
for k, v in config_models.get("local", {}).items(): # 列出配置了有效本地路径的模型
if (v.get("model_path_exists")
and k not in running_models):
available_models.append(k)
for k, v in config_models.get("online", {}).items(): # 列出ONLINE_MODELS中直接访问的模型
if not v.get("provider") and k not in running_models:
available_models.append(k)
llm_models = running_models + available_models
index = llm_models.index(st.session_state.get("cur_llm_model", api.get_default_llm_model()[0]))
cur_llm_model = st.session_state.get("cur_llm_model", default_model)
if cur_llm_model in llm_models:
index = llm_models.index(cur_llm_model)
else:
index = 0
llm_model = st.selectbox("选择LLM模型:",
llm_models,
index,
Expand Down
11 changes: 9 additions & 2 deletions webui_pages/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -730,12 +730,19 @@ async def ret_async():
else:
return ret_sync()

def list_config_models(self) -> Dict[str, List[str]]:
def list_config_models(
self,
types: List[str] = ["local", "online"],
) -> Dict[str, Dict]:
'''
获取服务器configs中配置的模型列表,返回形式为{"type": [model_name1, model_name2, ...], ...}。
获取服务器configs中配置的模型列表,返回形式为{"type": {model_name: config}, ...}。
'''
data = {
"types": types,
}
response = self.post(
"/llm_model/list_config_models",
json=data,
)
return self._get_response_value(response, as_json=True, value_func=lambda r:r.get("data", {}))

Expand Down

0 comments on commit 2adfa42

Please sign in to comment.