Skip to content

Commit

Permalink
Dev (#1652)
Browse files Browse the repository at this point in the history
* 更新上agent提示词代码

* 更新部分文档,修复了issue中提到的bge匹配超过1 的bug

* 按需修改

* 解决了部分最新用户用依赖的bug,加了两个工具,移除google工具
  • Loading branch information
zRzRzRzRzRzRzR committed Oct 4, 2023
1 parent 332f8be commit fc6a3b0
Show file tree
Hide file tree
Showing 11 changed files with 108 additions and 31 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,8 @@ docker run -d --gpus all -p 80:8501 registry.cn-beijing.aliyuncs.com/chatchat/ch
- [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh)
- [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh)
- [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5)
- [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5)- [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5)
- [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5)
- [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5)
- [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5)
- [BAAI/bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct)
- [sensenova/piccolo-base-zh](https://huggingface.co/sensenova/piccolo-base-zh)
Expand Down
2 changes: 1 addition & 1 deletion configs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@
from .prompt_config import *


VERSION = "v0.2.5-preview"
VERSION = "v0.2.6-preview"
8 changes: 4 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
langchain>=0.0.302
fschat[model_worker]==0.2.29
fschat[model_worker]==0.2.30
openai
sentence_transformers
transformers>=4.33.0
torch>=2.0.1
torchvision
torchaudio
fastapi>=0.103.1
fastapi>=0.103.2
nltk~=3.8.1
uvicorn~=0.23.1
starlette~=0.27.0
pydantic~=1.10.11
unstructured[all-docs]>=0.10.4
unstructured[all-docs]>=0.10.12
python-magic-bin; sys_platform == 'win32'
SQLAlchemy==2.0.19
faiss-cpu
Expand All @@ -25,7 +25,7 @@ pathlib
pytest
scikit-learn
numexpr
vllm==0.1.7; sys_platform == "linux"
vllm>=0.2.0; sys_platform == "linux"
# online api libs
# zhipuai
# dashscope>=1.10.0 # qwen
Expand Down
10 changes: 6 additions & 4 deletions requirements_api.txt
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
langchain>=0.0.302
fschat[model_worker]==0.2.29
fschat[model_worker]==0.2.30
openai
sentence_transformers
transformers>=4.33.0
torch >=2.0.1
transformers>=4.33.3
torch>=2.0.1
torchvision
torchaudio
fastapi>=0.103.1
Expand All @@ -24,14 +24,16 @@ pathlib
pytest
scikit-learn
numexpr
vllm==0.1.7; sys_platform == "linux"

vllm>=0.2.0; sys_platform == "linux"


# online api libs
# zhipuai
# dashscope>=1.10.0 # qwen
# qianfan
# volcengine>=1.0.106 # fangzhou
# duckduckgo-searchd #duckduckgo搜索

# uncomment libs if you want to use corresponding vector store
# pymilvus==2.1.3 # requires milvus==2.1.3
Expand Down
8 changes: 0 additions & 8 deletions server/agent/google_search.py

This file was deleted.

35 changes: 35 additions & 0 deletions server/agent/search_internet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
## 单独运行的时候需要添加
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import json
from server.chat import search_engine_chat
from configs import LLM_MODEL, TEMPERATURE, VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD

import asyncio


async def search_engine_iter(query: str):
response = await search_engine_chat(query=query,
search_engine_name="bing",
model_name=LLM_MODEL,
temperature=TEMPERATURE,
history=[],
top_k = VECTOR_SEARCH_TOP_K,
prompt_name = "knowledge_base_chat",
stream=False)

contents = ""
async for data in response.body_iterator: # 这里的data是一个json字符串
data = json.loads(data)
contents = data["answer"]
docs = data["docs"]
return contents

def search_internet(query: str):
return asyncio.run(search_engine_iter(query))


if __name__ == "__main__":
result = search_internet("大数据男女比例")
print("答案:",result)
37 changes: 37 additions & 0 deletions server/agent/search_knowledge.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
## 单独运行的时候需要添加
import sys
import os
import json

sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import LLM_MODEL, TEMPERATURE, VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD

import asyncio


async def search_knowledge_base_iter(query: str):
response = await knowledge_base_chat(query=query,
knowledge_base_name="tcqa",
model_name=LLM_MODEL,
temperature=TEMPERATURE,
history=[],
top_k = VECTOR_SEARCH_TOP_K,
prompt_name = "knowledge_base_chat",
score_threshold = SCORE_THRESHOLD,
stream=False)

contents = ""
async for data in response.body_iterator: # 这里的data是一个json字符串
data = json.loads(data)
contents = data["answer"]
docs = data["docs"]
return contents

def search_knowledge(query: str):
return asyncio.run(search_knowledge_base_iter(query))


if __name__ == "__main__":
result = search_knowledge("大数据男女比例")
print("答案:",result)
17 changes: 12 additions & 5 deletions server/agent/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@
from server.agent.translator import translate
from server.agent.weather import weathercheck
from server.agent.shell import shell
from server.agent.google_search import google_search
from langchain.agents import Tool
from server.agent.search_knowledge import search_knowledge
from server.agent.search_internet import search_internet

tools = [
Tool.from_function(
Expand All @@ -32,9 +33,15 @@
description="使用命令行工具输出",
),
Tool.from_function(
func=google_search,
name="谷歌搜索工具",
description="使用谷歌搜索",
)
func=search_knowledge,
name="知识库查询工具",
description="使用西交利物浦大学大数据专业的本专业数据库来解答问题",
),
Tool.from_function(
func=search_internet,
name="互联网查询工具",
description="访问Bing互联网来解答问题",
),

]
tool_names = [tool.name for tool in tools]
2 changes: 1 addition & 1 deletion server/agent/translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,4 @@ def translate(query: str):
llm_translate = LLMChain(llm=model, prompt=PROMPT)
ans = llm_translate.run(query)

return ans
return ans
9 changes: 8 additions & 1 deletion startup.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI:
args.block_size = 16
args.swap_space = 4 # GiB
args.gpu_memory_utilization = 0.90
args.max_num_batched_tokens = 2560
args.max_num_batched_tokens = 16384 # 一个批次中的最大令牌(tokens)数量,这个取决于你的显卡和大模型设置,设置太大显存会不够
args.max_num_seqs = 256
args.disable_log_stats = False
args.conv_template = None
Expand All @@ -123,6 +123,13 @@ def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI:
args.num_gpus = 1 # vllm worker的切分是tensor并行,这里填写显卡的数量
args.engine_use_ray = False
args.disable_log_requests = False

# 0.2.0 vllm后要加的参数
args.max_model_len = 8192 # 模型可以处理的最大序列长度。请根据你的大模型设置,
args.revision = None
args.quantization = None
args.max_log_len = None

if args.model_path:
args.model = args.model_path
if args.num_gpus > 1:
Expand Down
8 changes: 2 additions & 6 deletions webui_pages/dialogue/dialogue.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def on_mode_change():
"搜索引擎问答",
"自定义Agent问答",
],
index=1,
index=3,
on_change=on_mode_change,
key="dialogue_mode",
)
Expand Down Expand Up @@ -101,9 +101,8 @@ def llm_model_format_func(x):
st.success(msg)
st.session_state["prev_llm_model"] = llm_model

temperature = st.slider("Temperature:", 0.0, 1.0, TEMPERATURE, 0.01)
temperature = st.slider("Temperature:", 0.0, 1.0, TEMPERATURE, 0.05)

## 部分模型可以超过10抡对话
history_len = st.number_input("历史对话轮数:", 0, 20, HISTORY_LEN)

def on_kb_change():
Expand All @@ -119,10 +118,7 @@ def on_kb_change():
key="selected_kb",
)
kb_top_k = st.number_input("匹配知识条数:", 1, 20, VECTOR_SEARCH_TOP_K)

## Bge 模型会超过1
score_threshold = st.slider("知识匹配分数阈值:", 0.0, 1.0, float(SCORE_THRESHOLD), 0.01)

# chunk_content = st.checkbox("关联上下文", False, disabled=True)
# chunk_size = st.slider("关联长度:", 0, 500, 250, disabled=True)
elif dialogue_mode == "搜索引擎问答":
Expand Down

0 comments on commit fc6a3b0

Please sign in to comment.