-
Notifications
You must be signed in to change notification settings - Fork 4.5k
/
langchain.py
203 lines (162 loc) · 6.7 KB
/
langchain.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
from threading import Thread
from typing import TYPE_CHECKING, Any, Callable, Generator, Optional, Sequence
if TYPE_CHECKING:
from langchain.base_language import BaseLanguageModel
from llama_index.bridge.pydantic import PrivateAttr
from llama_index.callbacks import CallbackManager
from llama_index.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.llms.llm import LLM
from llama_index.types import BaseOutputParser, PydanticProgramMode
class LangChainLLM(LLM):
"""Adapter for a LangChain LLM."""
_llm: Any = PrivateAttr()
def __init__(
self,
llm: "BaseLanguageModel",
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
self._llm = llm
super().__init__(
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "LangChainLLM"
@property
def llm(self) -> "BaseLanguageModel":
return self._llm
@property
def metadata(self) -> LLMMetadata:
from llama_index.llms.langchain_utils import get_llm_metadata
return get_llm_metadata(self._llm)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
from llama_index.llms.langchain_utils import (
from_lc_messages,
to_lc_messages,
)
if not self.metadata.is_chat_model:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, is_formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
lc_messages = to_lc_messages(messages)
lc_message = self._llm.predict_messages(messages=lc_messages, **kwargs)
message = from_lc_messages([lc_message])[0]
return ChatResponse(message=message)
@llm_completion_callback()
def complete(
self, prompt: str, is_formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if not is_formatted:
prompt = self.completion_to_prompt(prompt)
output_str = self._llm.predict(prompt, **kwargs)
return CompletionResponse(text=output_str)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if not self.metadata.is_chat_model:
prompt = self.messages_to_prompt(messages)
stream_completion = self.stream_complete(
prompt, is_formatted=True, **kwargs
)
return stream_completion_response_to_chat_response(stream_completion)
from llama_index.langchain_helpers.streaming import (
StreamingGeneratorCallbackHandler,
)
handler = StreamingGeneratorCallbackHandler()
if not hasattr(self._llm, "streaming"):
raise ValueError("LLM must support streaming.")
if not hasattr(self._llm, "callbacks"):
raise ValueError("LLM must support callbacks to use streaming.")
self._llm.callbacks = [handler] # type: ignore
self._llm.streaming = True # type: ignore
thread = Thread(target=self.chat, args=[messages], kwargs=kwargs)
thread.start()
response_gen = handler.get_response_gen()
def gen() -> Generator[ChatResponse, None, None]:
text = ""
for delta in response_gen:
text += delta
yield ChatResponse(
message=ChatMessage(text=text),
delta=delta,
)
return gen()
@llm_completion_callback()
def stream_complete(
self, prompt: str, is_formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if not is_formatted:
prompt = self.completion_to_prompt(prompt)
from llama_index.langchain_helpers.streaming import (
StreamingGeneratorCallbackHandler,
)
handler = StreamingGeneratorCallbackHandler()
if not hasattr(self._llm, "streaming"):
raise ValueError("LLM must support streaming.")
if not hasattr(self._llm, "callbacks"):
raise ValueError("LLM must support callbacks to use streaming.")
self._llm.callbacks = [handler] # type: ignore
self._llm.streaming = True # type: ignore
thread = Thread(target=self.complete, args=[prompt], kwargs=kwargs)
thread.start()
response_gen = handler.get_response_gen()
def gen() -> Generator[CompletionResponse, None, None]:
text = ""
for delta in response_gen:
text += delta
yield CompletionResponse(delta=delta, text=text)
return gen()
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
# TODO: Implement async chat
return self.chat(messages, **kwargs)
@llm_completion_callback()
async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
# TODO: Implement async complete
return self.complete(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
# TODO: Implement async stream_chat
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
# TODO: Implement async stream_complete
async def gen() -> CompletionResponseAsyncGen:
for response in self.stream_complete(prompt, **kwargs):
yield response
return gen()