diff --git a/api/core/features/assistant_base_runner.py b/api/core/features/assistant_base_runner.py index 1d9541070f881..aa15c45869b0d 100644 --- a/api/core/features/assistant_base_runner.py +++ b/api/core/features/assistant_base_runner.py @@ -566,7 +566,11 @@ def organize_agent_history(self, prompt_messages: list[PromptMessage]) -> list[P tools = tools.split(';') tool_calls: list[AssistantPromptMessage.ToolCall] = [] tool_call_response: list[ToolPromptMessage] = [] - tool_inputs = json.loads(agent_thought.tool_input) + try: + tool_inputs = json.loads(agent_thought.tool_input) + except Exception as e: + logging.warning("tool execution error: {}, tool_input: {}.".format(str(e), agent_thought.tool_input)) + tool_inputs = { agent_thought.tool: agent_thought.tool_input } for tool in tools: # generate a uuid for tool call tool_call_id = str(uuid.uuid4()) @@ -599,4 +603,4 @@ def organize_agent_history(self, prompt_messages: list[PromptMessage]) -> list[P db.session.close() - return result \ No newline at end of file + return result diff --git a/api/core/features/assistant_cot_runner.py b/api/core/features/assistant_cot_runner.py index 3762ddcf62e7c..6d43d846e473e 100644 --- a/api/core/features/assistant_cot_runner.py +++ b/api/core/features/assistant_cot_runner.py @@ -182,7 +182,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): delta=LLMResultChunkDelta( index=0, message=AssistantPromptMessage( - content=json.dumps(chunk) + content=json.dumps(chunk, ensure_ascii=False) # if ensure_ascii=True, the text in webui maybe garbled text ), usage=None ) @@ -667,4 +667,4 @@ def _jsonify_tool_prompt_messages(self, tools: list[PromptMessageTool]) -> str: try: return json.dumps(tools, ensure_ascii=False) except json.JSONDecodeError: - return json.dumps(tools) \ No newline at end of file + return json.dumps(tools)