Skip to content

Commit 9266206

Browse files
committed
add qwen model
1 parent 4d16534 commit 9266206

File tree

4 files changed

+32
-3
lines changed

4 files changed

+32
-3
lines changed

Qwen_7b.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import requests
2+
import json
3+
import datetime
4+
5+
6+
def getAnswerFromQwen7b_v2(contextx):
7+
data = json.dumps(contextx)
8+
url = 'http://172.16.62.66:8008/stream'
9+
headers = {'content-type': 'application/json;charset=utf-8'}
10+
r = requests.post(url, data=data, headers=headers)
11+
res = r.json()
12+
if r.status_code == 200:
13+
return res
14+
else:
15+
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
16+
return {'response': '算力不足,请稍候再试![stop]', 'history': [], 'status': 200, 'time': now}

chat/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "aiitchat",
3-
"version": "1.3.20",
3+
"version": "1.5.0",
44
"private": true,
55
"dependencies": {
66
"@chatui/core": "^2.4.2",

chat/src/App.js

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,19 @@ var history = []
1515
const defaultQuickReplies = [
1616
{
1717
icon: 'message',
18-
name: 'ChatGLM2模型',
18+
name: 'ChatGLM2',
1919
isNew: false,
2020
isHighlight: true,
2121
},
2222
{
2323
icon: 'file',
24-
name: 'Vicuna模型',
24+
name: '通义千问',
25+
isNew: true,
26+
isHighlight: true,
27+
},
28+
{
29+
icon: 'file',
30+
name: 'Vicuna',
2531
isNew: false,
2632
isHighlight: true,
2733
},
@@ -111,6 +117,10 @@ function App() {
111117
modelname = "ChatGLM-6b";
112118
changeTitleStyle(0);
113119
}
120+
else if (item.name.startsWith("通义千问")) {
121+
modelname = "Qwen-7b";
122+
changeTitleStyle(2);
123+
}
114124
else if (item.name.startsWith("Llama2")) {
115125
modelname = "Llama-7b";
116126
changeTitleStyle(2);

codegen_stream.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from ChatGLM_6b import getAnswerFromChatGLM6b, getAnswerFromChatGLM6b_v2
88
from Vicuna_7b import getAnswerFromVicuna7b, getAnswerFromVicuna7b_v2
99
from LlaMA2_7b import getAnswerFromLLaMA_v2
10+
from Qwen_7b import getAnswerFromQwen7b_v2
1011

1112
filter_string = None
1213

@@ -106,6 +107,8 @@ async def codegen_stream_v2(request):
106107
result = getAnswerFromVicuna7b_v2(context)
107108
elif modelname == 'Llama-7b':
108109
result = getAnswerFromLLaMA_v2(context)
110+
elif modelname == 'Qwen-7b':
111+
result = getAnswerFromQwen7b_v2(context)
109112
else:
110113
result = getAnswerFromChatGLM6b_v2(context)
111114
stop = result["response"] .endswith("[stop]")

0 commit comments

Comments
 (0)