Skip to content

Commit

Permalink
Merge pull request #5 from tudou2/master
Browse files Browse the repository at this point in the history
update
  • Loading branch information
congxuma committed Apr 17, 2023
2 parents 614d7eb + b797ee4 commit 7264827
Show file tree
Hide file tree
Showing 9 changed files with 72 additions and 56 deletions.
40 changes: 16 additions & 24 deletions bot/chatgpt/chat_gpt_bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,16 @@ def __init__(self):
self.tb4chatgpt = TokenBucket(conf().get('rate_limit_chatgpt', 20))

self.sessions = SessionManager(ChatGPTSession, model= conf().get("model") or "gpt-3.5-turbo")
self.args ={
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"temperature":conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p":1,
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get('request_timeout', None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get('request_timeout', None), #重试超时时间,在这个时间内,将会自动重试
}

def reply(self, query, context=None):
# acquire reply content
Expand Down Expand Up @@ -58,7 +68,7 @@ def reply(self, query, context=None):
# # reply in stream
# return self.reply_text_stream(query, new_query, session_id)

reply_content = self.reply_text(session, session_id, api_key, 0)
reply_content = self.reply_text(session, api_key)
logger.debug("[CHATGPT] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session.messages, session_id, reply_content["content"], reply_content["completion_tokens"]))
if reply_content['completion_tokens'] == 0 and len(reply_content['content']) > 0:
reply = Reply(ReplyType.ERROR, reply_content['content'])
Expand All @@ -82,19 +92,7 @@ def reply(self, query, context=None):
reply = Reply(ReplyType.ERROR, 'Bot不支持处理{}类型的消息'.format(context.type))
return reply

def compose_args(self):
return {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"temperature":conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p":1,
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get('request_timeout', None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get('request_timeout', None), #重试超时时间,在这个时间内,将会自动重试
}

def reply_text(self, session:ChatGPTSession, session_id, api_key, retry_count=0) -> dict:
def reply_text(self, session:ChatGPTSession, api_key=None, retry_count=0) -> dict:
'''
call openai's ChatCompletion to get the answer
:param session: a conversation session
Expand All @@ -107,7 +105,7 @@ def reply_text(self, session:ChatGPTSession, session_id, api_key, retry_count=0)
raise openai.error.RateLimitError("RateLimitError: rate limit exceeded")
# if api_key == None, the default openai.api_key will be used
response = openai.ChatCompletion.create(
api_key=api_key, messages=session.messages, **self.compose_args()
api_key=api_key, messages=session.messages, **self.args
)
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {"total_tokens": response["usage"]["total_tokens"],
Expand All @@ -133,11 +131,11 @@ def reply_text(self, session:ChatGPTSession, session_id, api_key, retry_count=0)
else:
logger.warn("[CHATGPT] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session_id)
self.sessions.clear_session(session.session_id)

if need_retry:
logger.warn("[CHATGPT] 第{}次重试".format(retry_count+1))
return self.reply_text(session, session_id, api_key, retry_count+1)
return self.reply_text(session, api_key, retry_count+1)
else:
return result

Expand All @@ -147,10 +145,4 @@ def __init__(self):
super().__init__()
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"

def compose_args(self):
args = super().compose_args()
args["deployment_id"] = conf().get("azure_deployment_id")
#args["engine"] = args["model"]
#del(args["model"])
return args
self.args["deployment_id"] = conf().get("azure_deployment_id")
9 changes: 6 additions & 3 deletions bot/chatgpt/chat_gpt_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def __init__(self, session_id, system_prompt=None, model= "gpt-3.5-turbo"):
def discard_exceeding(self, max_tokens, cur_tokens= None):
precise = True
try:
cur_tokens = num_tokens_from_messages(self.messages, self.model)
cur_tokens = self.calc_tokens()
except Exception as e:
precise = False
if cur_tokens is None:
Expand All @@ -29,7 +29,7 @@ def discard_exceeding(self, max_tokens, cur_tokens= None):
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
self.messages.pop(1)
if precise:
cur_tokens = num_tokens_from_messages(self.messages, self.model)
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
break
Expand All @@ -40,11 +40,14 @@ def discard_exceeding(self, max_tokens, cur_tokens= None):
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
break
if precise:
cur_tokens = num_tokens_from_messages(self.messages, self.model)
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
return cur_tokens

def calc_tokens(self):
return num_tokens_from_messages(self.messages, self.model)


# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_messages(messages, model):
Expand Down
46 changes: 25 additions & 21 deletions bot/openai/open_ai_bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,17 @@ def __init__(self):
openai.proxy = proxy

self.sessions = SessionManager(OpenAISession, model= conf().get("model") or "text-davinci-003")
self.args = {
"model": conf().get("model") or "text-davinci-003", # 对话模型的名称
"temperature":conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
"max_tokens":1200, # 回复最大的字符数
"top_p":1,
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get('request_timeout', None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get('request_timeout', None), #重试超时时间,在这个时间内,将会自动重试
"stop":["\n\n\n"]
}

def reply(self, query, context=None):
# acquire reply content
Expand All @@ -42,11 +53,9 @@ def reply(self, query, context=None):
reply = Reply(ReplyType.INFO, '所有人记忆已清除')
else:
session = self.sessions.session_query(query, session_id)
new_query = str(session)
logger.debug("[OPEN_AI] session query={}".format(new_query))

total_tokens, completion_tokens, reply_content = self.reply_text(new_query, session_id, 0)
logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(new_query, session_id, reply_content, completion_tokens))
result = self.reply_text(session)
total_tokens, completion_tokens, reply_content = result['total_tokens'], result['completion_tokens'], result['content']
logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens))

if total_tokens == 0 :
reply = Reply(ReplyType.ERROR, reply_content)
Expand All @@ -63,47 +72,42 @@ def reply(self, query, context=None):
reply = Reply(ReplyType.ERROR, retstring)
return reply

def reply_text(self, query, session_id, retry_count=0):
def reply_text(self, session:OpenAISession, retry_count=0):
try:
response = openai.Completion.create(
model= conf().get("model") or "text-davinci-003", # 对话模型的名称
prompt=query,
temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
max_tokens=1200, # 回复最大的字符数
top_p=1,
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
stop=["\n\n\n"]
prompt=str(session), **self.args
)
res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '')
total_tokens = response["usage"]["total_tokens"]
completion_tokens = response["usage"]["completion_tokens"]
logger.info("[OPEN_AI] reply={}".format(res_content))
return total_tokens, completion_tokens, res_content
return {"total_tokens": total_tokens,
"completion_tokens": completion_tokens,
"content": res_content}
except Exception as e:
need_retry = retry_count < 2
result = [0,0,"我现在有点累了,等会再来吧"]
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if isinstance(e, openai.error.RateLimitError):
logger.warn("[OPEN_AI] RateLimitError: {}".format(e))
result[2] = "提问太快啦,请休息一下再问我吧"
result['content'] = "提问太快啦,请休息一下再问我吧"
if need_retry:
time.sleep(5)
elif isinstance(e, openai.error.Timeout):
logger.warn("[OPEN_AI] Timeout: {}".format(e))
result[2] = "我没有收到你的消息"
result['content'] = "我没有收到你的消息"
if need_retry:
time.sleep(5)
elif isinstance(e, openai.error.APIConnectionError):
logger.warn("[OPEN_AI] APIConnectionError: {}".format(e))
need_retry = False
result[2] = "我连接不到你的网络"
result['content'] = "我连接不到你的网络"
else:
logger.warn("[OPEN_AI] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session_id)
self.sessions.clear_session(session.session_id)

if need_retry:
logger.warn("[OPEN_AI] 第{}次重试".format(retry_count+1))
return self.reply_text(query, session_id, retry_count+1)
return self.reply_text(session, retry_count+1)
else:
return result
8 changes: 5 additions & 3 deletions bot/openai/open_ai_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __str__(self):
def discard_exceeding(self, max_tokens, cur_tokens= None):
precise = True
try:
cur_tokens = num_tokens_from_string(str(self), self.model)
cur_tokens = self.calc_tokens()
except Exception as e:
precise = False
if cur_tokens is None:
Expand All @@ -41,7 +41,7 @@ def discard_exceeding(self, max_tokens, cur_tokens= None):
elif len(self.messages) == 1 and self.messages[0]["role"] == "assistant":
self.messages.pop(0)
if precise:
cur_tokens = num_tokens_from_string(str(self), self.model)
cur_tokens = self.calc_tokens()
else:
cur_tokens = len(str(self))
break
Expand All @@ -52,11 +52,13 @@ def discard_exceeding(self, max_tokens, cur_tokens= None):
logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.messages)))
break
if precise:
cur_tokens = num_tokens_from_string(str(self), self.model)
cur_tokens = self.calc_tokens()
else:
cur_tokens = len(str(self))
return cur_tokens

def calc_tokens(self):
return num_tokens_from_string(str(self), self.model)

# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_string(string: str, model: str) -> int:
Expand Down
5 changes: 5 additions & 0 deletions bot/session_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ def add_reply(self, reply):
def discard_exceeding(self, max_tokens=None, cur_tokens=None):
raise NotImplementedError

def calc_tokens(self):
raise NotImplementedError


class SessionManager(object):
Expand All @@ -48,6 +50,9 @@ def build_session(self, session_id, system_prompt=None):
如果session_id不在sessions中,创建一个新的session并添加到sessions中
如果system_prompt不会空,会更新session的system_prompt并重置session
'''
if session_id is None:
return self.sessioncls(session_id, system_prompt, **self.session_args)

if session_id not in self.sessions:
self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args)
elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session
Expand Down
4 changes: 2 additions & 2 deletions channel/chat_channel.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,11 @@ def _compose_context(self, ctype: ContextType, content, **kwargs):

img_match_prefix = check_prefix(content, conf().get('image_create_prefix'))
if img_match_prefix:
content = content.replace(img_match_prefix, '', 1).strip()
content = content.replace(img_match_prefix, '', 1)
context.type = ContextType.IMAGE_CREATE
else:
context.type = ContextType.TEXT
context.content = content
context.content = content.strip()
if 'desire_rtype' not in context and conf().get('always_reply_voice') and ReplyType.VOICE not in self.NOT_SUPPORT_REPLYTYPE:
context['desire_rtype'] = ReplyType.VOICE
elif context.type == ContextType.VOICE:
Expand Down
2 changes: 1 addition & 1 deletion nixpacks.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@ providers = ['python']

[phases.setup]
nixPkgs = ['python310']
cmds = ['apt-get update','apt-get install -y --no-install-recommends ffmpeg espeak']
cmds = ['apt-get update','apt-get install -y --no-install-recommends ffmpeg espeak','python -m venv /opt/venv && . /opt/venv/bin/activate && pip install -r requirements-optional.txt']
[start]
cmd = "python ./app.py"
10 changes: 8 additions & 2 deletions plugins/godcmd/godcmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,13 @@
COMMANDS = {
"help": {
"alias": ["help", "帮助"],
"desc": "回复此帮助",
"desc": "回复此帮助\n使用技巧回复#tips",
},
"tips": {
"alias": ["tips", "技巧"],
"desc": '''1、私聊bot开头的问题会触发chatGPT,比如:bot 上海明天天气怎么样?\n
2、输入内容中含‘每日新闻’可获取当日新闻;含‘每日摄影’可获取每日的一张摄影作品\n
3、关键字画开头将触发画图,目前需要以特殊的格式输入【画 <模型>:prompt】''',
},
"helpp": {
"alias": ["help", "帮助"], # 与help指令共用别名,根据参数数量区分
Expand Down Expand Up @@ -388,4 +394,4 @@ def authenticate(self, userid, args, isadmin, isgroup) -> Tuple[bool,str] :
return False,"认证失败"

def get_help_text(self, isadmin = False, isgroup = False, **kwargs):
return get_help_text(isadmin, isgroup)
return get_help_text(isadmin, isgroup)
4 changes: 4 additions & 0 deletions plugins/source.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@
"replicate": {
"url": "https://github.com/lanvent/plugin_replicate.git",
"desc": "利用replicate api画图的插件"
},
"summary": {
"url": "https://github.com/lanvent/plugin_summary.git",
"desc": "总结聊天记录的插件"
}
,
"getnews": {
Expand Down

0 comments on commit 7264827

Please sign in to comment.