Skip to content

Commit

Permalink
langchain[patch]: Invoke callback prior to yielding token (#18282)
Browse files Browse the repository at this point in the history
## PR title
langchain[patch]: Invoke callback prior to yielding

## PR message
Description: Invoke on_llm_new_token callback prior to yielding token in
_stream and _astream methods in langchain/tests/fake_chat_model.
Issue: #16913
Dependencies: None
Twitter handle: None
  • Loading branch information
williamdevena committed Feb 28, 2024
1 parent cd52433 commit 23722e3
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions libs/langchain/tests/unit_tests/llms/fake_chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,9 @@ def _stream(

for token in content_chunks:
chunk = ChatGenerationChunk(message=AIMessageChunk(content=token))
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk

if message.additional_kwargs:
for key, value in message.additional_kwargs.items():
Expand All @@ -142,37 +142,37 @@ def _stream(
},
)
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
"",
chunk=chunk, # No token for function call
)
yield chunk
else:
chunk = ChatGenerationChunk(
message=AIMessageChunk(
content="",
additional_kwargs={"function_call": {fkey: fvalue}},
)
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
"",
chunk=chunk, # No token for function call
)
yield chunk
else:
chunk = ChatGenerationChunk(
message=AIMessageChunk(
content="", additional_kwargs={key: value}
)
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
"",
chunk=chunk, # No token for function call
)
yield chunk

async def _astream(
self,
Expand Down

0 comments on commit 23722e3

Please sign in to comment.