Skip to content

Commit

Permalink
Merge branch 'GaiZhenbiao:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
xingfanxia committed Nov 1, 2023
2 parents 72f403e + f9abb09 commit 43bc43b
Show file tree
Hide file tree
Showing 16 changed files with 294 additions and 60 deletions.
2 changes: 1 addition & 1 deletion ChuanhuChatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def create_new_model():

with gr.Tab(label=i18n("准备数据集")):
dataset_preview_json = gr.JSON(
label=i18n("数据集预览"), readonly=True)
label=i18n("数据集预览"))
dataset_selection = gr.Files(label=i18n("选择数据集"), file_types=[
".xlsx", ".jsonl"], file_count="single")
upload_to_openai_btn = gr.Button(
Expand Down
7 changes: 4 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,11 @@
| [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) | | [LLaMA](https://github.com/facebookresearch/llama) | 支持 Lora 模型 
| [Google PaLM](https://developers.generativeai.google/products/palm) | 不支持流式传输 | [StableLM](https://github.com/Stability-AI/StableLM)
| [讯飞星火认知大模型](https://xinghuo.xfyun.cn) | | [MOSS](https://github.com/OpenLMLab/MOSS)
| [Inspur Yuan 1.0](https://air.inspur.com/home) | |
| [MiniMax](https://api.minimax.chat/) |
| [Inspur Yuan 1.0](https://air.inspur.com/home) | | [通义千问](https://github.com/QwenLM/Qwen/tree/main)
| [MiniMax](https://api.minimax.chat/) |
| [XMChat](https://github.com/MILVLG/xmchat) | 不支持流式传输
| [Midjourney](https://www.midjourney.com/) | 不支持流式传输
| [Claude](https://www.anthropic.com/) |

## 使用技巧

Expand Down Expand Up @@ -141,7 +142,7 @@ python ChuanhuChatbot.py
一个浏览器窗口将会自动打开,此时您将可以使用 **川虎Chat** 与ChatGPT或其他模型进行对话。

> **Note**
>
>
> 具体详尽的安装教程和使用教程请查看[本项目的wiki页面](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程)
## 疑难杂症解决
Expand Down
1 change: 1 addition & 0 deletions config_example.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"spark_appid": "", // 你的 讯飞星火大模型 API AppID,用于讯飞星火大模型对话模型
"spark_api_key": "", // 你的 讯飞星火大模型 API Key,用于讯飞星火大模型对话模型
"spark_api_secret": "", // 你的 讯飞星火大模型 API Secret,用于讯飞星火大模型对话模型
"claude_api_secret":"",// 你的 Claude API Secret,用于 Claude 对话模型


//== Azure ==
Expand Down
2 changes: 1 addition & 1 deletion locale/en_US.json
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@
"重命名该对话": "Rename this chat",
"重新生成": "Regenerate",
"高级": "Advanced",
",本次对话累计消耗了 ": ", Total cost for this dialogue is ",
",本次对话累计消耗了 ": ", total cost: ",
"💾 保存对话": "💾 Save Dialog",
"📝 导出为 Markdown": "📝 Export as Markdown",
"🔄 切换API地址": "🔄 Switch API Address",
Expand Down
3 changes: 3 additions & 0 deletions modules/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,9 @@ def load_config_to_environ(key_list):
spark_api_secret = config.get("spark_api_secret", "")
os.environ["SPARK_API_SECRET"] = spark_api_secret

claude_api_secret = config.get("claude_api_secret", "")
os.environ["CLAUDE_API_SECRET"] = claude_api_secret

load_config_to_environ(["openai_api_type", "azure_openai_api_key", "azure_openai_api_base_url",
"azure_openai_api_version", "azure_deployment_name", "azure_embedding_deployment_name", "azure_embedding_model_name"])

Expand Down
55 changes: 55 additions & 0 deletions modules/models/Claude.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@

from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from ..presets import *
from ..utils import *

from .base_model import BaseLLMModel


class Claude_Client(BaseLLMModel):
def __init__(self, model_name, api_secret) -> None:
super().__init__(model_name=model_name)
self.api_secret = api_secret
if None in [self.api_secret]:
raise Exception("请在配置文件或者环境变量中设置Claude的API Secret")
self.claude_client = Anthropic(api_key=self.api_secret)


def get_answer_stream_iter(self):
system_prompt = self.system_prompt
history = self.history
if system_prompt is not None:
history = [construct_system(system_prompt), *history]

completion = self.claude_client.completions.create(
model=self.model_name,
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT}{history}{AI_PROMPT}",
stream=True,
)
if completion is not None:
partial_text = ""
for chunk in completion:
partial_text += chunk.completion
yield partial_text
else:
yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG


def get_answer_at_once(self):
system_prompt = self.system_prompt
history = self.history
if system_prompt is not None:
history = [construct_system(system_prompt), *history]

completion = self.claude_client.completions.create(
model=self.model_name,
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT}{history}{AI_PROMPT}",
)
if completion is not None:
return completion.completion, len(completion.completion)
else:
return "获取资源错误", 0


57 changes: 57 additions & 0 deletions modules/models/Qwen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
import logging
import colorama
from .base_model import BaseLLMModel
from ..presets import MODEL_METADATA


class Qwen_Client(BaseLLMModel):
def __init__(self, model_name, user_name="") -> None:
super().__init__(model_name=model_name, user=user_name)
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_METADATA[model_name]["repo_id"], trust_remote_code=True, resume_download=True)
self.model = AutoModelForCausalLM.from_pretrained(MODEL_METADATA[model_name]["repo_id"], device_map="auto", trust_remote_code=True, resume_download=True).eval()

def generation_config(self):
return GenerationConfig.from_dict({
"chat_format": "chatml",
"do_sample": True,
"eos_token_id": 151643,
"max_length": self.token_upper_limit,
"max_new_tokens": 512,
"max_window_size": 6144,
"pad_token_id": 151643,
"top_k": 0,
"top_p": self.top_p,
"transformers_version": "4.33.2",
"trust_remote_code": True,
"temperature": self.temperature,
})

def _get_glm_style_input(self):
history = [x["content"] for x in self.history]
query = history.pop()
logging.debug(colorama.Fore.YELLOW +
f"{history}" + colorama.Fore.RESET)
assert (
len(history) % 2 == 0
), f"History should be even length. current history is: {history}"
history = [[history[i], history[i + 1]]
for i in range(0, len(history), 2)]
return history, query

def get_answer_at_once(self):
history, query = self._get_glm_style_input()
self.model.generation_config = self.generation_config()
response, history = self.model.chat(self.tokenizer, query, history=history)
return response, len(response)

def get_answer_stream_iter(self):
history, query = self._get_glm_style_input()
self.model.generation_config = self.generation_config()
for response in self.model.chat_stream(
self.tokenizer,
query,
history,
):
yield response
13 changes: 9 additions & 4 deletions modules/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,8 @@ class ModelType(Enum):
Midjourney = 11
Spark = 12
OpenAIInstruct = 13
Claude = 14
Qwen = 15

@classmethod
def get_type(cls, model_name: str):
Expand Down Expand Up @@ -179,6 +181,10 @@ def get_type(cls, model_name: str):
model_type = ModelType.LangchainChat
elif "星火大模型" in model_name_lower:
model_type = ModelType.Spark
elif "claude" in model_name_lower:
model_type = ModelType.Claude
elif "qwen" in model_name_lower:
model_type = ModelType.Qwen
else:
model_type = ModelType.LLaMA
return model_type
Expand Down Expand Up @@ -653,14 +659,13 @@ def delete_first_conversation(self):
def delete_last_conversation(self, chatbot):
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
msg = "由于包含报错信息,只删除chatbot记录"
chatbot.pop()
chatbot = chatbot[:-1]
return chatbot, self.history
if len(self.history) > 0:
self.history.pop()
self.history.pop()
self.history = self.history[:-2]
if len(chatbot) > 0:
msg = "删除了一组chatbot对话"
chatbot.pop()
chatbot = chatbot[:-1]
if len(self.all_token_counts) > 0:
msg = "删除了一组对话的token计数记录"
self.all_token_counts.pop()
Expand Down
6 changes: 6 additions & 0 deletions modules/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,12 @@ def get_model(
from .spark import Spark_Client
model = Spark_Client(model_name, os.getenv("SPARK_APPID"), os.getenv(
"SPARK_API_KEY"), os.getenv("SPARK_API_SECRET"), user_name=user_name)
elif model_type == ModelType.Claude:
from .Claude import Claude_Client
model = Claude_Client(model_name="claude-2", api_secret=os.getenv("CLAUDE_API_SECRET"))
elif model_type == ModelType.Qwen:
from .Qwen import Qwen_Client
model = Qwen_Client(model_name, user_name=user_name)
elif model_type == ModelType.Unknown:
raise ValueError(f"未知模型: {model_name}")
logging.info(msg)
Expand Down
14 changes: 12 additions & 2 deletions modules/presets.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,8 @@
"minimax-abab5-chat",
"midjourney",
"讯飞星火大模型V2.0",
"讯飞星火大模型V1.5"
"讯飞星火大模型V1.5",
"Claude"
]

LOCAL_MODELS = [
Expand All @@ -86,6 +87,8 @@
"StableLM",
"MOSS",
"Llama-2-7B-Chat",
"Qwen 7B",
"Qwen 14B"
]

# Additional metadate for local models
Expand All @@ -97,6 +100,12 @@
"Llama-2-7B-Chat":{
"repo_id": "TheBloke/Llama-2-7b-Chat-GGUF",
"filelist": ["llama-2-7b-chat.Q6_K.gguf"],
},
"Qwen 7B": {
"repo_id": "Qwen/Qwen-7B-Chat-Int4",
},
"Qwen 14B": {
"repo_id": "Qwen/Qwen-14B-Chat-Int4",
}
}

Expand Down Expand Up @@ -125,7 +134,8 @@
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-32k-0613": 32768
"gpt-4-32k-0613": 32768,
"Claude": 4096
}

TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。
Expand Down
Loading

0 comments on commit 43bc43b

Please sign in to comment.