Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ms_agent/llm/deepseek_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def _call_llm_for_continue_gen(self,
messages = self.format_input_message(messages)
stop = kwargs.pop('stop', []).append('```')
return self._call_llm(
messages=messages, tools=tools, stop=stop, **kwargs)
messages=messages, tools=self.format_tools(tools), stop=stop, **kwargs)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

While adding self.format_tools(tools) correctly addresses the tool formatting issue, this method contains several other critical bugs that will likely prevent it from functioning as intended:

  1. AttributeError: Line 37 calls self.format_input_message(messages), but the base class method is named _format_input_message (with a leading underscore). This will cause a runtime error.
  2. Logic Error: Line 38 incorrectly sets the stop variable to None because list.append() operates in-place and returns None. Consequently, the stop=stop argument passed on line 40 will always be None, effectively losing all intended stop sequences.
  3. Redundancy: The _call_llm method already handles message formatting internally (via _format_input_message), making the call on line 37 unnecessary even if the name were correct.
  4. Missing State Update: Unlike the base class implementation in OpenAI, this override fails to increment messages[-1].api_calls, which may affect tracking or logic dependent on the number of API attempts.

I recommend refactoring this method to address these issues alongside the tool formatting fix.



if __name__ == '__main__':
Expand Down
9 changes: 6 additions & 3 deletions ms_agent/llm/openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,9 +343,10 @@ def _stream_continue_generate(self,
# The stream may end without a final usage chunk, which is acceptable.
pass
first_run = not messages[-1].to_dict().get('partial', False)
if chunk.choices[0].finish_reason in [
if (not message.tool_calls
and chunk.choices[0].finish_reason in [
'length', 'null'
] and (max_runs is None or max_runs != 0):
] and (max_runs is None or max_runs != 0)):
logger.info(
f'finish_reason: {chunk.choices[0].finish_reason}, continue generate.'
)
Expand Down Expand Up @@ -499,7 +500,7 @@ def _call_llm_for_continue_gen(self,
messages[-1].partial = True
messages[-1].api_calls += 1

return self._call_llm(messages, tools, **kwargs)
return self._call_llm(messages, self.format_tools(tools), **kwargs)

def _continue_generate(self,
messages: List[Message],
Expand All @@ -522,6 +523,8 @@ def _continue_generate(self,
Message: A fully formed Message object containing the complete response.
"""
new_message = self._format_output_message(completion)
if new_message.tool_calls:
return new_message
if completion.choices[0].finish_reason in [
'length', 'null'
] and (max_runs is None or max_runs != 0):
Expand Down