Skip to content

Commit

Permalink
优化目录结构 (#4058)
Browse files Browse the repository at this point in the history
* 优化目录结构

* 修改一些测试问题

---------

Co-authored-by: glide-the <2533736852@qq.com>
  • Loading branch information
liunux4odoo and glide-the committed May 22, 2024
1 parent 842a230 commit 5c650a8
Show file tree
Hide file tree
Showing 753 changed files with 846 additions and 745 deletions.
8 changes: 4 additions & 4 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
*.log
*.log.*
*.bak
/chatchat-server/chatchat/data/*
!/chatchat-server/chatchat/data/knowledge_base/samples
/chatchat-server/chatchat/data/knowledge_base/samples/vector_store
!/chatchat-server/chatchat/data/nltk_data
/libs/chatchat-server/chatchat/data/*
!/libs/chatchat-server/chatchat/data/knowledge_base/samples
/libs/chatchat-server/chatchat/data/knowledge_base/samples/vector_store
!/libs/chatchat-server/chatchat/data/nltk_data

.vscode/

Expand Down
14 changes: 0 additions & 14 deletions chatchat-server/chatchat/copy_config_example.py

This file was deleted.

6 changes: 0 additions & 6 deletions chatchat-server/tests/unit_server/test_init_server.py

This file was deleted.

File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,3 @@ ollama:
model_credentials:
base_url: 'http://172.21.192.1:11434'
mode: 'completion'



Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
import streamlit as st

# from chatchat.webui_pages.loom_view_client import update_store
# from chatchat.webui_pages.openai_plugins import openai_plugins_page
from chatchat.webui_pages.utils import *
from streamlit_option_menu import option_menu
from chatchat.webui_pages.dialogue.dialogue import dialogue_page, chat_box
from chatchat.webui_pages.knowledge_base.knowledge_base import knowledge_base_page
import os
import sys
from chatchat.configs import VERSION
from chatchat.server.utils import api_address


# def on_change(key):
# if key:
# update_store()
img_dir = os.path.dirname(os.path.abspath(__file__))

api = ApiRequest(base_url=api_address())

if __name__ == "__main__":
is_lite = "lite" in sys.argv

st.set_page_config(
"Langchain-Chatchat WebUI",
os.path.join(img_dir, "img", "chatchat_icon_blue_square_v2.png"),
initial_sidebar_state="expanded",
menu_items={
'Get Help': 'https://github.com/chatchat-space/Langchain-Chatchat',
'Report a bug': "https://github.com/chatchat-space/Langchain-Chatchat/issues",
'About': f"""欢迎使用 Langchain-Chatchat WebUI {VERSION}!"""
},
layout="wide"

)

# use the following code to set the app to wide mode and the html markdown to increase the sidebar width
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
width: 350px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
width: 600px;
margin-left: -600px;
}
""",
unsafe_allow_html=True,
)
pages = {
"对话": {
"icon": "chat",
"func": dialogue_page,
},
"知识库管理": {
"icon": "hdd-stack",
"func": knowledge_base_page,
},
# "模型服务": {
# "icon": "hdd-stack",
# "func": openai_plugins_page,
# },
}
# 更新状态
# if "status" not in st.session_state \
# or "run_plugins_list" not in st.session_state \
# or "launch_subscribe_info" not in st.session_state \
# or "list_running_models" not in st.session_state \
# or "model_config" not in st.session_state:
# update_store()
with st.sidebar:
st.image(
os.path.join(img_dir, "img", 'logo-long-chatchat-trans-v2.png'),
use_column_width=True
)
st.caption(
f"""<p align="right">当前版本:{VERSION}</p>""",
unsafe_allow_html=True,
)
options = list(pages)
icons = [x["icon"] for x in pages.values()]

default_index = 0
selected_page = option_menu(
menu_title="",
key="selected_page",
options=options,
icons=icons,
# menu_icon="chat-quote",
default_index=default_index,
)

if selected_page in pages:
pages[selected_page]["func"](api=api, is_lite=is_lite)
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -202,16 +202,16 @@ async def chat_iterator() -> AsyncIterable[OpenAIChatOutput]:
message_id=message_id,
)
yield ret.model_dump_json()
yield OpenAIChatOutput( # return blank text lastly
id=f"chat{uuid.uuid4()}",
object="chat.completion.chunk",
content="",
role="assistant",
model=models["llm_model"].model_name,
status = data["status"],
message_type = data["message_type"],
message_id=message_id,
)
# yield OpenAIChatOutput( # return blank text lastly
# id=f"chat{uuid.uuid4()}",
# object="chat.completion.chunk",
# content="",
# role="assistant",
# model=models["llm_model"].model_name,
# status = data["status"],
# message_type = data["message_type"],
# message_id=message_id,
# )
await task

if stream:
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def run_init_server(
model_providers_cfg_path: str = None,
provider_host: str = None,
provider_port: int = None):
from chatchat.model_loaders.init_server import init_server
from chatchat.init_server import init_server
from chatchat.configs import (MODEL_PROVIDERS_CFG_PATH_CONFIG,
MODEL_PROVIDERS_CFG_HOST,
MODEL_PROVIDERS_CFG_PORT)
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
6 changes: 6 additions & 0 deletions libs/chatchat-server/tests/unit_server/test_init_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from chatchat.init_server import init_server


def test_init_server():

init_server()
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -178,33 +178,29 @@ async def workspaces_model_types(self, model_type: str, request: Request):
async def list_models(self, provider: str, request: Request):
logger.info(f"Received list_models request for provider: {provider}")
# 返回ModelType所有的枚举
llm_models: List[AIModelEntity] = []
ai_models: List[AIModelEntity] = []
for model_type in ModelType.__members__.values():
try:
provider_model_bundle = (
self._provider_manager.provider_manager.get_provider_model_bundle(
provider=provider, model_type=model_type
)
)
# 获取预定义模型
llm_models.extend(
provider_model_bundle.model_type_instance.predefined_models()
provider_model_bundle_llm = provider_manager.get_provider_model_bundle(
provider="zhipuai", model_type=model_type
)
# 获取自定义模型
for (
model
) in provider_model_bundle.configuration.custom_configuration.models:
llm_models.append(
provider_model_bundle.model_type_instance.get_model_schema(
model=model.model,
credentials=model.credentials,
for model in provider_model_bundle_llm.configuration.custom_configuration.models:
if model.model_type == model_type:
ai_models.append(
provider_model_bundle_llm.model_type_instance.get_model_schema(
model=model.model,
credentials=model.credentials,
)
)
)
except Exception as e:
logger.error(
logger.warning(
f"Error while fetching models for provider: {provider}, model_type: {model_type}"
)
logger.error(e)

# 获取预定义模型
ai_models.extend(provider_model_bundle_llm.model_type_instance.predefined_models())

logger.info(f"ai_models: {ai_models}")

# modelsList[AIModelEntity]转换称List[ModelCard]

Expand Down
Loading

0 comments on commit 5c650a8

Please sign in to comment.