Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added generic event handler for both tockens and functions calls #9263

Merged
merged 23 commits into from
Aug 25, 2023
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
ac38430
Added generic event handler for both tockens and functions calls
andrewBatutin Aug 15, 2023
d663cde
Added callback for event handler
andrewBatutin Aug 16, 2023
a224cf6
Merge branch 'master' into general-event-handler
andrewBatutin Aug 16, 2023
9a2cf5c
to avoid big PR restored lock file
andrewBatutin Aug 16, 2023
6b16d10
Merge remote-tracking branch 'origin/general-event-handler' into gene…
andrewBatutin Aug 16, 2023
706f443
Merge branch 'master' into general-event-handler
andrewBatutin Aug 17, 2023
43f8eb8
updated poetry files
andrewBatutin Aug 17, 2023
6ad397d
updated poetry files for root project
andrewBatutin Aug 17, 2023
1dc16cd
updated poetry files for langchain project
andrewBatutin Aug 17, 2023
75f4cb6
Merge remote-tracking branch 'origin/general-event-handler' into gene…
andrewBatutin Aug 17, 2023
b18e2ac
removed extra dependency
andrewBatutin Aug 17, 2023
966613e
Merge branch 'langchain-ai:master' into general-event-handler
andrewBatutin Aug 21, 2023
a72e20c
updating event handler
andrewBatutin Aug 22, 2023
0804af4
removed event handler in favour returning chunk as a param for on new…
andrewBatutin Aug 22, 2023
d36e8d0
Merge remote-tracking branch 'origin/general-event-handler' into gene…
andrewBatutin Aug 22, 2023
0801772
removed event handler in favour returning chunk as a param for on new…
andrewBatutin Aug 22, 2023
aea4b8d
Merge branch 'langchain-ai:master' into general-event-handler
andrewBatutin Aug 22, 2023
2ada2b9
added chunk to BaseTracer callback
andrewBatutin Aug 24, 2023
bd770ef
Merge remote-tracking branch 'origin/general-event-handler' into gene…
andrewBatutin Aug 24, 2023
af0ee18
Merge branch 'master' into general-event-handler
andrewBatutin Aug 24, 2023
72b06ce
lint formatting
andrewBatutin Aug 24, 2023
4bf6447
Merge remote-tracking branch 'origin/general-event-handler' into gene…
andrewBatutin Aug 24, 2023
e41a391
applied reformating
andrewBatutin Aug 24, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions libs/langchain/langchain/callbacks/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from langchain.schema.agent import AgentAction, AgentFinish
from langchain.schema.document import Document
from langchain.schema.messages import BaseMessage
from langchain.schema.output import LLMResult
from langchain.schema.output import LLMResult, GenerationChunk, ChatGenerationChunk


class RetrieverManagerMixin:
Expand Down Expand Up @@ -43,12 +43,19 @@ class LLMManagerMixin:
def on_llm_new_token(
self,
token: str,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
"""Run on new LLM token. Only available when streaming is enabled.

Args:
token (str): The new token.
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk, containing
content and other information.
"""

def on_llm_end(
self,
Expand Down
3 changes: 3 additions & 0 deletions libs/langchain/langchain/callbacks/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
LLMResult,
)
from langchain.schema.messages import BaseMessage, get_buffer_string
from langchain.schema.output import GenerationChunk, ChatGenerationChunk

if TYPE_CHECKING:
from langsmith import Client as LangSmithClient
Expand Down Expand Up @@ -655,6 +656,7 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
async def on_llm_new_token(
self,
token: str,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Expand All @@ -667,6 +669,7 @@ async def on_llm_new_token(
"on_llm_new_token",
"ignore_llm",
token,
chunk,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
Expand Down
3 changes: 2 additions & 1 deletion libs/langchain/langchain/chat_models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,8 @@ async def _astream(
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
await run_manager.on_llm_new_token(chunk.content)
await run_manager.on_llm_new_token(token=chunk.content, chunk=chunk)


async def _agenerate(
self,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,22 @@
"""Test ChatOpenAI wrapper."""

from typing import Any

import pytest

from langchain.callbacks.base import AsyncCallbackHandler
from langchain.callbacks.manager import CallbackManager
from langchain.chains.openai_functions import (
create_openai_fn_chain,
)
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema import (
ChatGeneration,
ChatResult,
LLMResult,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from langchain.schema.output import ChatGenerationChunk
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler


Expand Down Expand Up @@ -161,6 +167,94 @@ async def test_async_chat_openai_streaming() -> None:
assert generation.text == generation.message.content


@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_async_chat_openai_streaming_with_function() -> None:
"""Test ChatOpenAI wrapper with multiple completions."""

class MyCustomAsyncHandler(AsyncCallbackHandler):

def on_llm_new_token(self, token: str, chunk: ChatGenerationChunk, **kwargs: Any,) -> Any:
print(f"I just got a token: {token}")
print(f"I just got a chunk: {chunk}")


json_schema = {
"title": "Person",
"description": "Identifying information about a person.",
"type": "object",
"properties": {
"name": {
"title": "Name",
"description": "The person's name",
"type": "string",
},
"age": {
"title": "Age",
"description": "The person's age",
"type": "integer",
},
"fav_food": {
"title": "Fav Food",
"description": "The person's favorite food",
"type": "string",
},
},
"required": ["name", "age"],
}

callback_handler = MyCustomAsyncHandler()
callback_manager = CallbackManager([callback_handler])

chat = ChatOpenAI(
max_tokens=10,
n=1,
callback_manager=callback_manager,
streaming=True,
)

prompt_msgs = [
SystemMessage(
content="You are a world class algorithm for "
"extracting information in structured formats."
),
HumanMessage(
content="Use the given format to extract "
"information from the following input:"
),
HumanMessagePromptTemplate.from_template("{input}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = ChatPromptTemplate(messages=prompt_msgs)

function: Any = {
"name": "output_formatter",
"description": (
"Output formatter. Should always be used to format your response to the"
" user."
),
"parameters": json_schema,
}
chain = create_openai_fn_chain(
[function],
chat,
prompt,
output_parser=None,
)

message = HumanMessage(content="Sally is 13 years old")
response = await chain.agenerate([{"input": message}])

assert isinstance(response, LLMResult)
assert len(response.generations) == 1
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content


def test_chat_openai_extra_kwargs() -> None:
"""Test extra kwargs to chat openai."""
# Check that foo is saved in extra_kwargs.
Expand Down
Loading