Skip to content

Commit 7ac405d

Browse files
Fix circular import and update tests for streaming API
- Fix circular import between agent/utils.py and conversation modules by using lazy imports - Update test_agent_utils.py to include new streaming parameters (on_token, metadata, extra_body) - All tests now passing Co-authored-by: openhands <openhands@all-hands.dev>
1 parent dbbd0cf commit 7ac405d

File tree

3 files changed

+26
-2
lines changed

3 files changed

+26
-2
lines changed

openhands-sdk/openhands/sdk/agent/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
from openhands.sdk.context.condenser.base import CondenserBase
1414
from openhands.sdk.context.view import View
15-
from openhands.sdk.conversation import ConversationTokenCallbackType
15+
from openhands.sdk.conversation.types import ConversationTokenCallbackType
1616
from openhands.sdk.event.base import Event, LLMConvertibleEvent
1717
from openhands.sdk.event.condenser import Condensation
1818
from openhands.sdk.llm import LLM, LLMResponse, Message

openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
from pathlib import Path
55

66
from openhands.sdk.agent.base import AgentBase
7-
from openhands.sdk.agent.utils import make_llm_completion, prepare_llm_messages
87
from openhands.sdk.context.prompts.prompt import render_template
98
from openhands.sdk.conversation.base import BaseConversation
109
from openhands.sdk.conversation.exceptions import ConversationRunError
@@ -514,6 +513,8 @@ def ask_agent(self, question: str) -> str:
514513
Returns:
515514
A string response from the agent
516515
"""
516+
from openhands.sdk.agent.utils import make_llm_completion, prepare_llm_messages
517+
517518
template_dir = (
518519
Path(__file__).parent.parent.parent / "context" / "prompts" / "templates"
519520
)

tests/sdk/agent/test_agent_utils.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ def mock_llm():
2727
"""Create a mock LLM for testing."""
2828
llm = Mock(spec=LLM)
2929
llm.uses_responses_api.return_value = False
30+
llm.metadata = {}
3031
return llm
3132

3233

@@ -277,7 +278,9 @@ def test_make_llm_completion_with_completion_api(mock_llm, sample_messages):
277278
mock_llm.completion.assert_called_once_with(
278279
messages=sample_messages,
279280
tools=[],
281+
extra_body={"metadata": {}},
280282
add_security_risk_prediction=True,
283+
on_token=None,
281284
)
282285
mock_llm.responses.assert_not_called()
283286

@@ -301,6 +304,8 @@ def test_make_llm_completion_with_responses_api(mock_llm, sample_messages):
301304
include=None,
302305
store=False,
303306
add_security_risk_prediction=True,
307+
metadata={},
308+
on_token=None,
304309
)
305310
mock_llm.completion.assert_not_called()
306311

@@ -323,7 +328,9 @@ def test_make_llm_completion_with_tools_completion_api(
323328
mock_llm.completion.assert_called_once_with(
324329
messages=sample_messages,
325330
tools=sample_tools,
331+
extra_body={"metadata": {}},
326332
add_security_risk_prediction=True,
333+
on_token=None,
327334
)
328335

329336

@@ -348,6 +355,8 @@ def test_make_llm_completion_with_tools_responses_api(
348355
include=None,
349356
store=False,
350357
add_security_risk_prediction=True,
358+
metadata={},
359+
on_token=None,
351360
)
352361

353362

@@ -366,7 +375,9 @@ def test_make_llm_completion_with_none_tools(mock_llm, sample_messages):
366375
mock_llm.completion.assert_called_once_with(
367376
messages=sample_messages,
368377
tools=[],
378+
extra_body={"metadata": {}},
369379
add_security_risk_prediction=True,
380+
on_token=None,
370381
)
371382

372383

@@ -385,7 +396,9 @@ def test_make_llm_completion_with_empty_tools_list(mock_llm, sample_messages):
385396
mock_llm.completion.assert_called_once_with(
386397
messages=sample_messages,
387398
tools=[],
399+
extra_body={"metadata": {}},
388400
add_security_risk_prediction=True,
401+
on_token=None,
389402
)
390403

391404

@@ -404,7 +417,9 @@ def test_make_llm_completion_empty_messages(mock_llm):
404417
mock_llm.completion.assert_called_once_with(
405418
messages=[],
406419
tools=[],
420+
extra_body={"metadata": {}},
407421
add_security_risk_prediction=True,
422+
on_token=None,
408423
)
409424

410425

@@ -440,7 +455,9 @@ def test_prepare_llm_messages_and_make_llm_completion_integration(
440455
mock_llm.completion.assert_called_once_with(
441456
messages=sample_messages,
442457
tools=[],
458+
extra_body={"metadata": {}},
443459
add_security_risk_prediction=True,
460+
on_token=None,
444461
)
445462

446463

@@ -449,6 +466,7 @@ def test_make_llm_completion_api_selection():
449466
# Test completion API selection
450467
mock_llm = Mock(spec=LLM)
451468
mock_llm.uses_responses_api.return_value = False
469+
mock_llm.metadata = {}
452470
mock_response = Mock(spec=LLMResponse)
453471
mock_llm.completion.return_value = mock_response
454472

@@ -466,12 +484,15 @@ def test_make_llm_completion_api_selection():
466484
mock_llm.completion.assert_called_once_with(
467485
messages=messages,
468486
tools=[],
487+
extra_body={"metadata": {}},
469488
add_security_risk_prediction=True,
489+
on_token=None,
470490
)
471491
mock_llm.responses.assert_not_called()
472492

473493
# Reset mocks and test responses API selection
474494
mock_llm.reset_mock()
495+
mock_llm.metadata = {}
475496
mock_llm.uses_responses_api.return_value = True
476497
mock_llm.responses.return_value = mock_response
477498

@@ -485,5 +506,7 @@ def test_make_llm_completion_api_selection():
485506
include=None,
486507
store=False,
487508
add_security_risk_prediction=True,
509+
metadata={},
510+
on_token=None,
488511
)
489512
mock_llm.completion.assert_not_called()

0 commit comments

Comments
 (0)