diff --git a/README.md b/README.md
index 851f53680..72e9ea87e 100644
--- a/README.md
+++ b/README.md
@@ -17,9 +17,6 @@
- 个人微信
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/qApznZ?referralCode=RC3znh)
-- 企业微信应用号
-
- [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/-FHS--?referralCode=RC3znh)
# 演示
@@ -27,9 +24,15 @@ https://user-images.githubusercontent.com/26161723/233777277-e3b9928e-b88f-43e2-
Demo made by [Visionn](https://www.wangpc.cc/)
+# 交流群
+
+添加小助手微信进群:
+
+
+
# 更新日志
->**2023.04.26:** 支持企业微信应用号部署,兼容插件,并支持语音图片交互,支持Railway部署,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatcom/README.md)。(contributed by [@lanvent](https://github.com/lanvent) in [#944](https://github.com/zhayujie/chatgpt-on-wechat/pull/944))
+>**2023.04.26:** 支持企业微信应用号部署,兼容插件,并支持语音图片交互,私人助理理想选择,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatcom/README.md)。(contributed by [@lanvent](https://github.com/lanvent) in [#944](https://github.com/zhayujie/chatgpt-on-wechat/pull/944))
>**2023.04.05:** 支持微信公众号部署,兼容插件,并支持语音图片交互,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatmp/README.md)。(contributed by [@JS00000](https://github.com/JS00000) in [#686](https://github.com/zhayujie/chatgpt-on-wechat/pull/686))
@@ -120,6 +123,7 @@ pip3 install azure-cognitiveservices-speech
"speech_recognition": false, # 是否开启语音识别
"group_speech_recognition": false, # 是否开启群组语音识别
"use_azure_chatgpt": false, # 是否使用Azure ChatGPT service代替openai ChatGPT service. 当设置为true时需要设置 open_ai_api_base,如 https://xxx.openai.azure.com/
+ "azure_deployment_id": "", # 采用Azure ChatGPT时,模型部署名称
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述
# 订阅消息,公众号和企业微信channel中请填写,当被订阅时会自动回复,可使用特殊占位符。目前支持的占位符有{trigger_prefix},在程序中它会自动替换成bot的触发词。
"subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持角色扮演和文字冒险等丰富插件。\n输入{trigger_prefix}#help 查看详细指令。"
@@ -147,11 +151,11 @@ pip3 install azure-cognitiveservices-speech
**4.其他配置**
-+ `model`: 模型名称,目前支持 `gpt-3.5-turbo`, `text-davinci-003`, `gpt-4`, `gpt-4-32k` (其中gpt-4 api暂未开放)
++ `model`: 模型名称,目前支持 `gpt-3.5-turbo`, `text-davinci-003`, `gpt-4`, `gpt-4-32k` (其中gpt-4 api暂未完全开放,申请通过后可使用)
+ `temperature`,`frequency_penalty`,`presence_penalty`: Chat API接口参数,详情参考[OpenAI官方文档。](https://platform.openai.com/docs/api-reference/chat)
+ `proxy`:由于目前 `openai` 接口国内无法访问,需配置代理客户端的地址,详情参考 [#351](https://github.com/zhayujie/chatgpt-on-wechat/issues/351)
+ 对于图像生成,在满足个人或群组触发条件外,还需要额外的关键词前缀来触发,对应配置 `image_create_prefix `
-+ 关于OpenAI对话及图片接口的参数配置(内容自由度、回复字数限制、图片大小等),可以参考 [对话接口](https://beta.openai.com/docs/api-reference/completions) 和 [图像接口](https://beta.openai.com/docs/api-reference/completions) 文档直接在 [代码](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/bot/openai/open_ai_bot.py) `bot/openai/open_ai_bot.py` 中进行调整。
++ 关于OpenAI对话及图片接口的参数配置(内容自由度、回复字数限制、图片大小等),可以参考 [对话接口](https://beta.openai.com/docs/api-reference/completions) 和 [图像接口](https://beta.openai.com/docs/api-reference/completions) 文档,在[`config.py`](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/config.py)中检查哪些参数在本项目中是可配置的。
+ `conversation_max_tokens`:表示能够记忆的上下文最大字数(一问一答为一组对话,如果累积的对话字数超出限制,就会优先移除最早的一组对话)
+ `rate_limit_chatgpt`,`rate_limit_dalle`:每分钟最高问答速率、画图速率,超速后排队按序处理。
+ `clear_memory_commands`: 对话内指令,主动清空前文记忆,字符串数组可自定义指令别名。
@@ -159,7 +163,7 @@ pip3 install azure-cognitiveservices-speech
+ `character_desc` 配置中保存着你对机器人说的一段话,他会记住这段话并作为他的设定,你可以为他定制任何人格 (关于会话上下文的更多内容参考该 [issue](https://github.com/zhayujie/chatgpt-on-wechat/issues/43))
+ `subscribe_msg`:订阅消息,公众号和企业微信channel中请填写,当被订阅时会自动回复, 可使用特殊占位符。目前支持的占位符有{trigger_prefix},在程序中它会自动替换成bot的触发词。
-**所有可选的配置项均在该[文件](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/config.py)中列出。**
+**本说明文档可能会未及时更新,当前所有可选的配置项均在该[`config.py`](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/config.py)中列出。**
## 运行
@@ -205,6 +209,8 @@ FAQs:
## 联系
-欢迎提交PR、Issues,以及Star支持一下。程序运行遇到问题优先查看 [常见问题列表](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs) ,其次前往 [Issues](https://github.com/zhayujie/chatgpt-on-wechat/issues) 中搜索。如果你想了解更多项目细节,并与开发者们交流更多关于AI技术的实践,欢迎加入星球:
+欢迎提交PR、Issues,以及Star支持一下。程序运行遇到问题可以查看 [常见问题列表](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs) ,其次前往 [Issues](https://github.com/zhayujie/chatgpt-on-wechat/issues) 中搜索。
+
+如果你想了解更多项目细节,与开发者们交流更多关于AI技术的实践,欢迎加入星球:
diff --git a/bot/chatgpt/chat_gpt_bot.py b/bot/chatgpt/chat_gpt_bot.py
index 60fc3f845..d5db6e950 100644
--- a/bot/chatgpt/chat_gpt_bot.py
+++ b/bot/chatgpt/chat_gpt_bot.py
@@ -36,7 +36,7 @@ def __init__(self):
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
- "top_p": 1,
+ "top_p": conf().get("top_p", 1),
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
diff --git a/bot/chatgpt/chat_gpt_session.py b/bot/chatgpt/chat_gpt_session.py
index e6c319b36..dd35832a8 100644
--- a/bot/chatgpt/chat_gpt_session.py
+++ b/bot/chatgpt/chat_gpt_session.py
@@ -57,16 +57,17 @@ def num_tokens_from_messages(messages, model):
"""Returns the number of tokens used by a list of messages."""
import tiktoken
+ if model == "gpt-3.5-turbo" or model == "gpt-35-turbo":
+ return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
+ elif model == "gpt-4":
+ return num_tokens_from_messages(messages, model="gpt-4-0314")
+
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.debug("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
- if model == "gpt-3.5-turbo" or model == "gpt-35-turbo":
- return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
- elif model == "gpt-4":
- return num_tokens_from_messages(messages, model="gpt-4-0314")
- elif model == "gpt-3.5-turbo-0301":
+ if model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif model == "gpt-4-0314":
diff --git a/bot/linkai/link_ai_bot.py b/bot/linkai/link_ai_bot.py
index a92466db1..12fcc1fd8 100644
--- a/bot/linkai/link_ai_bot.py
+++ b/bot/linkai/link_ai_bot.py
@@ -1,25 +1,43 @@
+# access LinkAI knowledge base platform
+# docs: https://link-ai.tech/platform/link-app/wechat
+
+import time
+
+import requests
+
from bot.bot import Bot
-from bridge.context import ContextType
-from bridge.reply import Reply, ReplyType
-from common.log import logger
-from bridge.context import Context
from bot.chatgpt.chat_gpt_session import ChatGPTSession
+from bot.openai.open_ai_image import OpenAIImage
from bot.session_manager import SessionManager
+from bridge.context import Context, ContextType
+from bridge.reply import Reply, ReplyType
+from common.log import logger
from config import conf
-import requests
-import time
-class LinkAIBot(Bot):
+class LinkAIBot(Bot, OpenAIImage):
# authentication failed
AUTH_FAILED_CODE = 401
+ NO_QUOTA_CODE = 406
def __init__(self):
self.base_url = "https://api.link-ai.chat/v1"
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
def reply(self, query, context: Context = None) -> Reply:
- return self._chat(query, context)
+ if context.type == ContextType.TEXT:
+ return self._chat(query, context)
+ elif context.type == ContextType.IMAGE_CREATE:
+ ok, retstring = self.create_img(query, 0)
+ reply = None
+ if ok:
+ reply = Reply(ReplyType.IMAGE_URL, retstring)
+ else:
+ reply = Reply(ReplyType.ERROR, retstring)
+ return reply
+ else:
+ reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
+ return reply
def _chat(self, query, context, retry_count=0):
if retry_count >= 2:
@@ -28,22 +46,32 @@ def _chat(self, query, context, retry_count=0):
return Reply(ReplyType.ERROR, "请再问我一次吧")
try:
+ # load config
+ if context.get("generate_breaked_by"):
+ logger.info(f"[LINKAI] won't set appcode because a plugin ({context['generate_breaked_by']}) affected the context")
+ app_code = None
+ else:
+ app_code = conf().get("linkai_app_code")
+ linkai_api_key = conf().get("linkai_api_key")
+
session_id = context["session_id"]
session = self.sessions.session_query(query, session_id)
# remove system message
- if session.messages[0].get("role") == "system":
+ if app_code and session.messages[0].get("role") == "system":
session.messages.pop(0)
- # load config
- app_code = conf().get("linkai_app_code")
- linkai_api_key = conf().get("linkai_api_key")
logger.info(f"[LINKAI] query={query}, app_code={app_code}")
body = {
"appCode": app_code,
- "messages": session.messages
+ "messages": session.messages,
+ "model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
+ "temperature": conf().get("temperature"),
+ "top_p": conf().get("top_p", 1),
+ "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
+ "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
}
headers = {"Authorization": "Bearer " + linkai_api_key}
@@ -54,16 +82,23 @@ def _chat(self, query, context, retry_count=0):
if res.get("code") == self.AUTH_FAILED_CODE:
logger.exception(f"[LINKAI] please check your linkai_api_key, res={res}")
return Reply(ReplyType.ERROR, "请再问我一次吧")
+
+ elif res.get("code") == self.NO_QUOTA_CODE:
+ logger.exception(f"[LINKAI] please check your account quota, https://chat.link-ai.tech/console/account")
+ return Reply(ReplyType.ERROR, "提问太快啦,请休息一下再问我吧")
+
else:
# retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
+
# execute success
reply_content = res["data"]["content"]
logger.info(f"[LINKAI] reply={reply_content}")
self.sessions.session_reply(reply_content, session_id)
return Reply(ReplyType.TEXT, reply_content)
+
except Exception as e:
logger.exception(e)
# retry
diff --git a/bridge/bridge.py b/bridge/bridge.py
index 119e855c4..d3fbd9565 100644
--- a/bridge/bridge.py
+++ b/bridge/bridge.py
@@ -23,7 +23,7 @@ def __init__(self):
self.btype["chat"] = const.OPEN_AI
if conf().get("use_azure_chatgpt", False):
self.btype["chat"] = const.CHATGPTONAZURE
- if conf().get("linkai_api_key") and conf().get("linkai_app_code"):
+ if conf().get("use_linkai") and conf().get("linkai_api_key"):
self.btype["chat"] = const.LINKAI
self.bots = {}
diff --git a/channel/chat_channel.py b/channel/chat_channel.py
index 795787b2b..6a2f11e81 100644
--- a/channel/chat_channel.py
+++ b/channel/chat_channel.py
@@ -162,6 +162,8 @@ def _generate_reply(self, context: Context, reply: Reply = Reply()) -> Reply:
reply = e_context["reply"]
if not e_context.is_pass():
logger.debug("[WX] ready to handle context: type={}, content={}".format(context.type, context.content))
+ if e_context.is_break():
+ context["generate_breaked_by"] = e_context["breaked_by"]
if context.type == ContextType.TEXT or context.type == ContextType.IMAGE_CREATE: # 文字和图片消息
reply = super().build_reply_content(context.content, context)
elif context.type == ContextType.VOICE: # 语音消息
@@ -220,7 +222,7 @@ def _decorate_reply(self, context: Context, reply: Reply) -> Reply:
reply = super().build_text_to_voice(reply.content)
return self._decorate_reply(context, reply)
if context.get("isgroup", False):
- reply_text = "@" + context["msg"].actual_user_nickname + " " + reply_text.strip()
+ reply_text = "@" + context["msg"].actual_user_nickname + "\n" + reply_text.strip()
reply_text = conf().get("group_chat_reply_prefix", "") + reply_text
else:
reply_text = conf().get("single_chat_reply_prefix", "") + reply_text
diff --git a/channel/wechatcom/README.md b/channel/wechatcom/README.md
index 1b2fe1aa6..2f54a79fd 100644
--- a/channel/wechatcom/README.md
+++ b/channel/wechatcom/README.md
@@ -1,6 +1,6 @@
# 企业微信应用号channel
-企业微信官方提供了客服、应用等API,本channel使用的是企业微信的应用API的能力。
+企业微信官方提供了客服、应用等API,本channel使用的是企业微信的自建应用API的能力。
因为未来可能还会开发客服能力,所以本channel的类型名叫作`wechatcom_app`。
@@ -72,13 +72,11 @@ Error code: 60020, message: "not allow to access from your ip, ...from ip: xx.xx
意思是IP不可信,需要参考上一步的`企业可信IP`配置,把这里的IP加进去。
-### Railway部署方式
+~~### Railway部署方式~~(2023-06-08已失效)
-公众号不能在`Railway`上部署,但企业微信应用[可以](https://railway.app/template/-FHS--?referralCode=RC3znh)!
+~~公众号不能在`Railway`上部署,但企业微信应用[可以](https://railway.app/template/-FHS--?referralCode=RC3znh)!~~
-[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/-FHS--?referralCode=RC3znh)
-
-填写配置后,将部署完成后的网址```**.railway.app/wxcomapp```,填写在上一步的URL中。发送信息后观察日志,把报错的IP加入到可信IP。(每次重启后都需要加入可信IP)
+~~填写配置后,将部署完成后的网址```**.railway.app/wxcomapp```,填写在上一步的URL中。发送信息后观察日志,把报错的IP加入到可信IP。(每次重启后都需要加入可信IP)~~
## 测试体验
diff --git a/config-template.json b/config-template.json
index d179ff813..e2a6defc9 100644
--- a/config-template.json
+++ b/config-template.json
@@ -28,5 +28,8 @@
"conversation_max_tokens": 1000,
"expires_in_seconds": 3600,
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
- "subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help 查看详细指令。"
+ "subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help 查看详细指令。",
+ "use_linkai": false,
+ "linkai_api_key": "",
+ "linkai_app_code": ""
}
diff --git a/config.py b/config.py
index 70de3b312..c964cd9de 100644
--- a/config.py
+++ b/config.py
@@ -102,6 +102,7 @@
# 插件配置
"plugin_trigger_prefix": "$", # 规范插件提供聊天相关指令的前缀,建议不要和管理员指令前缀"#"冲突
# 知识库平台配置
+ "use_linkai": False,
"linkai_api_key": "",
"linkai_app_code": ""
}
diff --git a/docs/images/contact.jpg b/docs/images/contact.jpg
new file mode 100644
index 000000000..3a8a4123b
Binary files /dev/null and b/docs/images/contact.jpg differ
diff --git a/plugins/dungeon/dungeon.py b/plugins/dungeon/dungeon.py
index 07fbde713..dce62cdfb 100644
--- a/plugins/dungeon/dungeon.py
+++ b/plugins/dungeon/dungeon.py
@@ -64,7 +64,7 @@ def on_handle_context(self, e_context: EventContext):
if e_context["context"].type != ContextType.TEXT:
return
bottype = Bridge().get_bot_type("chat")
- if bottype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]:
+ if bottype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI]:
return
bot = Bridge().get_bot("chat")
content = e_context["context"].content[:]
diff --git a/plugins/event.py b/plugins/event.py
index df8c609e7..719e6fcac 100644
--- a/plugins/event.py
+++ b/plugins/event.py
@@ -50,3 +50,6 @@ def __delitem__(self, key):
def is_pass(self):
return self.action == EventAction.BREAK_PASS
+
+ def is_break(self):
+ return self.action == EventAction.BREAK or self.action == EventAction.BREAK_PASS
diff --git a/plugins/godcmd/godcmd.py b/plugins/godcmd/godcmd.py
index cc753cc16..0503b173f 100644
--- a/plugins/godcmd/godcmd.py
+++ b/plugins/godcmd/godcmd.py
@@ -285,9 +285,9 @@ def on_handle_context(self, e_context: EventContext):
ok, result = False, "请提供一个GPT模型"
elif cmd == "gpt_model":
user_data = conf().get_user_data(user)
- model = conf().get('model')
- if 'gpt_model' in user_data:
- model = user_data['gpt_model']
+ model = conf().get("model")
+ if "gpt_model" in user_data:
+ model = user_data["gpt_model"]
ok, result = True, "你的GPT模型为" + str(model)
elif cmd == "reset_gpt_model":
try:
@@ -297,7 +297,7 @@ def on_handle_context(self, e_context: EventContext):
except Exception as e:
ok, result = False, "你没有设置私有GPT模型"
elif cmd == "reset":
- if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]:
+ if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI]:
bot.sessions.clear_session(session_id)
channel.cancel_session(session_id)
ok, result = True, "会话已重置"
@@ -320,7 +320,7 @@ def on_handle_context(self, e_context: EventContext):
load_config()
ok, result = True, "配置已重载"
elif cmd == "resetall":
- if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]:
+ if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI]:
channel.cancel_all_session()
bot.sessions.clear_all_session()
ok, result = True, "重置所有会话成功"
diff --git a/plugins/plugin_manager.py b/plugins/plugin_manager.py
index d8ad2c3e7..269695478 100644
--- a/plugins/plugin_manager.py
+++ b/plugins/plugin_manager.py
@@ -163,6 +163,9 @@ def emit_event(self, e_context: EventContext, *args, **kwargs):
logger.debug("Plugin %s triggered by event %s" % (name, e_context.event))
instance = self.instances[name]
instance.handlers[e_context.event](e_context, *args, **kwargs)
+ if e_context.is_break():
+ e_context["breaked_by"] = name
+ logger.debug("Plugin %s breaked event %s" % (name, e_context.event))
return e_context
def set_plugin_priority(self, name: str, priority: int):
diff --git a/plugins/role/role.py b/plugins/role/role.py
index 4f1dc879b..c75aa905a 100644
--- a/plugins/role/role.py
+++ b/plugins/role/role.py
@@ -99,7 +99,7 @@ def on_handle_context(self, e_context: EventContext):
if e_context["context"].type != ContextType.TEXT:
return
btype = Bridge().get_bot_type("chat")
- if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]:
+ if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI]:
return
bot = Bridge().get_bot("chat")
content = e_context["context"].content[:]
diff --git a/plugins/tool/tool.py b/plugins/tool/tool.py
index 4caa083cc..cba50daee 100644
--- a/plugins/tool/tool.py
+++ b/plugins/tool/tool.py
@@ -55,6 +55,7 @@ def on_handle_context(self, e_context: EventContext):
const.CHATGPT,
const.OPEN_AI,
const.CHATGPTONAZURE,
+ const.LINKAI,
):
return
diff --git a/translate/baidu/baidu_translate.py b/translate/baidu/baidu_translate.py
index a87176683..6f99e34e1 100644
--- a/translate/baidu/baidu_translate.py
+++ b/translate/baidu/baidu_translate.py
@@ -17,13 +17,15 @@ def __init__(self) -> None:
self.url = endpoint + path
self.appid = conf().get("baidu_translate_app_id")
self.appkey = conf().get("baidu_translate_app_key")
+ if not self.appid or not self.appkey:
+ raise Exception("baidu translate appid or appkey not set")
# For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`, need to convert to ISO 639-1 codes
def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
if not from_lang:
from_lang = "auto" # baidu suppport auto detect
salt = random.randint(32768, 65536)
- sign = self.make_md5(self.appid + query + str(salt) + self.appkey)
+ sign = self.make_md5("{}{}{}{}".format(self.appid, query, salt, self.appkey))
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {"appid": self.appid, "q": query, "from": from_lang, "to": to_lang, "salt": salt, "sign": sign}
diff --git a/voice/audio_convert.py b/voice/audio_convert.py
index 9c9515a28..18fe3c2f3 100644
--- a/voice/audio_convert.py
+++ b/voice/audio_convert.py
@@ -1,7 +1,13 @@
import shutil
import wave
-import pysilk
+from common.log import logger
+
+try:
+ import pysilk
+except ImportError:
+ logger.warn("import pysilk failed, wechaty voice message will not be supported.")
+
from pydub import AudioSegment
sil_supports = [8000, 12000, 16000, 24000, 32000, 44100, 48000] # slk转wav时,支持的采样率
diff --git a/voice/baidu/baidu_voice.py b/voice/baidu/baidu_voice.py
index 406157b96..fbf53ce89 100644
--- a/voice/baidu/baidu_voice.py
+++ b/voice/baidu/baidu_voice.py
@@ -43,9 +43,9 @@ def __init__(self):
with open(config_path, "r") as fr:
bconf = json.load(fr)
- self.app_id = conf().get("baidu_app_id")
- self.api_key = conf().get("baidu_api_key")
- self.secret_key = conf().get("baidu_secret_key")
+ self.app_id = str(conf().get("baidu_app_id"))
+ self.api_key = str(conf().get("baidu_api_key"))
+ self.secret_key = str(conf().get("baidu_secret_key"))
self.dev_id = conf().get("baidu_dev_pid")
self.lang = bconf["lang"]
self.ctp = bconf["ctp"]