-
Notifications
You must be signed in to change notification settings - Fork 54
/
llm_chain.ex
567 lines (474 loc) · 18.8 KB
/
llm_chain.ex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
defmodule LangChain.Chains.LLMChain do
@doc """
Define an LLMChain. This is the heart of the LangChain library.
The chain deals with tools, a tool map, delta tracking, last_message tracking,
conversation messages, and verbose logging. This helps by separating these
responsibilities from the LLM making it easier to support additional LLMs
because the focus is on communication and formats instead of all the extra
logic.
"""
use Ecto.Schema
import Ecto.Changeset
require Logger
alias LangChain.PromptTemplate
alias __MODULE__
alias LangChain.Message
alias LangChain.Message.ToolCall
alias LangChain.Message.ToolResult
alias LangChain.MessageDelta
alias LangChain.Function
alias LangChain.LangChainError
alias LangChain.Utils
@primary_key false
embedded_schema do
field :llm, :any, virtual: true
field :verbose, :boolean, default: false
# verbosely log each delta message.
field :verbose_deltas, :boolean, default: false
field :tools, {:array, :any}, default: [], virtual: true
# set and managed privately through tools
field :_tool_map, :map, default: %{}, virtual: true
# List of `Message` structs for creating the conversation with the LLM.
field :messages, {:array, :any}, default: [], virtual: true
# Custom context data made available to tools when executed.
# Could include information like account ID, user data, etc.
field :custom_context, :any, virtual: true
# Track the current merged `%MessageDelta{}` struct received when streamed.
# Set to `nil` when there is no current delta being tracked. This happens
# when the final delta is received that completes the message. At that point,
# the delta is converted to a message and the delta is set to nil.
field :delta, :any, virtual: true
# Track the last `%Message{}` received in the chain.
field :last_message, :any, virtual: true
# Track if the state of the chain expects a response from the LLM. This
# happens after sending a user message or when a tool_call is received, or
# when we've provided a tool response and the LLM needs to respond.
field :needs_response, :boolean, default: false
# A callback function to execute when messages are added. Don't allow caller
# to setup in `.new` function. Want to set it from the `.run` function to
# avoid multiple chain instances (across processes) from both firing
# callbacks.
field :callback_fn, :any, virtual: true
end
@type t :: %LLMChain{}
@create_fields [:llm, :tools, :custom_context, :verbose]
@required_fields [:llm]
@doc """
Start a new LLMChain configuration.
{:ok, chain} = LLMChain.new(%{
llm: %ChatOpenAI{model: "gpt-3.5-turbo", stream: true},
messages: [%Message.new_system!("You are a helpful assistant.")]
})
"""
@spec new(attrs :: map()) :: {:ok, t} | {:error, Ecto.Changeset.t()}
def new(attrs \\ %{}) do
%LLMChain{}
|> cast(attrs, @create_fields)
|> common_validation()
|> apply_action(:insert)
end
@doc """
Start a new LLMChain configuration and return it or raise an error if invalid.
chain = LLMChain.new!(%{
llm: %ChatOpenAI{model: "gpt-3.5-turbo", stream: true},
messages: [%Message.new_system!("You are a helpful assistant.")]
})
"""
@spec new!(attrs :: map()) :: t() | no_return()
def new!(attrs \\ %{}) do
case new(attrs) do
{:ok, chain} ->
chain
{:error, changeset} ->
raise LangChainError, changeset
end
end
def common_validation(changeset) do
changeset
|> validate_required(@required_fields)
|> Utils.validate_llm_is_struct()
|> build_tools_map_from_tools()
end
@doc false
def build_tools_map_from_tools(changeset) do
tools = get_field(changeset, :tools, [])
# get a list of all the tools indexed into a map by name
fun_map =
Enum.reduce(tools, %{}, fn f, acc ->
Map.put(acc, f.name, f)
end)
put_change(changeset, :_tool_map, fun_map)
end
@doc """
Add a tool to an LLMChain.
"""
@spec add_tools(t(), Function.t() | [Function.t()]) :: t() | no_return()
def add_tools(%LLMChain{tools: existing} = chain, tools) do
updated = existing ++ List.wrap(tools)
chain
|> change()
|> cast(%{tools: updated}, [:tools])
|> build_tools_map_from_tools()
|> apply_action!(:update)
end
@doc """
Run the chain on the LLM using messages and any registered functions. This
formats the request for a ChatLLMChain where messages are passed to the API.
When successful, it returns `{:ok, updated_chain, message_or_messages}`
## Options
- `:while_needs_response` - repeatedly evaluates functions and submits to the
LLM so long as we still expect to get a response.
- `:callback_fn` - the callback function to execute as messages are received.
The `callback_fn` is a function that receives one argument. It is the
LangChain structure for the received message or event. It may be a
`MessageDelta` or a `Message`. Use pattern matching to respond as desired.
"""
@spec run(t(), Keyword.t()) :: {:ok, t(), Message.t() | [Message.t()]} | {:error, String.t()}
def run(chain, opts \\ [])
def run(%LLMChain{} = chain, opts) do
# set the callback function on the chain
chain = %LLMChain{chain | callback_fn: Keyword.get(opts, :callback_fn)}
if chain.verbose, do: IO.inspect(chain.llm, label: "LLM")
if chain.verbose, do: IO.inspect(chain.messages, label: "MESSAGES")
tools = chain.tools
if chain.verbose, do: IO.inspect(tools, label: "TOOLS")
if Keyword.get(opts, :while_needs_response, false) do
run_while_needs_response(chain)
else
# run the chain and format the return
case do_run(chain) do
{:ok, chain} ->
{:ok, chain, chain.last_message}
{:error, _reason} = error ->
error
end
end
end
# Repeatedly run the chain while `needs_response` is true. This will execute
# tools and re-submit the tool result to the LLM giving the LLM an
# opportunity to execute more tools or return a response.
@spec run_while_needs_response(t()) :: {:ok, t(), Message.t()} | {:error, String.t()}
defp run_while_needs_response(%LLMChain{needs_response: false} = chain) do
{:ok, chain, chain.last_message}
end
defp run_while_needs_response(%LLMChain{needs_response: true} = chain) do
chain
|> execute_tool_calls()
|> do_run()
|> case do
{:ok, updated_chain} ->
run_while_needs_response(updated_chain)
{:error, reason} ->
{:error, reason}
end
end
# internal reusable function for running the chain
@spec do_run(t()) :: {:ok, t()} | {:error, String.t()}
defp do_run(%LLMChain{} = chain) do
# submit to LLM. The "llm" is a struct. Match to get the name of the module
# then execute the `.call` function on that module.
%module{} = chain.llm
# handle and output response
case module.call(chain.llm, chain.messages, chain.tools, chain.callback_fn) do
{:ok, [%Message{} = message]} ->
if chain.verbose, do: IO.inspect(message, label: "SINGLE MESSAGE RESPONSE")
{:ok, add_message(chain, message)}
{:ok, [%Message{} = message, _others] = messages} ->
if chain.verbose, do: IO.inspect(messages, label: "MULTIPLE MESSAGE RESPONSE")
# return the list of message responses. Happens when multiple
# "choices" are returned from LLM by request.
{:ok, add_message(chain, message)}
{:ok, [%MessageDelta{} | _] = deltas} ->
if chain.verbose_deltas, do: IO.inspect(deltas, label: "DELTA MESSAGE LIST RESPONSE")
updated_chain = apply_deltas(chain, deltas)
if chain.verbose,
do: IO.inspect(updated_chain.last_message, label: "COMBINED DELTA MESSAGE RESPONSE")
{:ok, updated_chain}
{:ok, [[%MessageDelta{} | _] | _] = deltas} ->
if chain.verbose_deltas, do: IO.inspect(deltas, label: "DELTA MESSAGE LIST RESPONSE")
updated_chain = apply_deltas(chain, deltas)
if chain.verbose,
do: IO.inspect(updated_chain.last_message, label: "COMBINED DELTA MESSAGE RESPONSE")
{:ok, updated_chain}
{:ok, %Message{} = message} ->
if chain.verbose,
do: IO.inspect(message, label: "SINGLE MESSAGE RESPONSE NO WRAPPED ARRAY")
{:ok, add_message(chain, message)}
{:error, reason} ->
if chain.verbose, do: IO.inspect(reason, label: "ERROR")
Logger.error("Error during chat call. Reason: #{inspect(reason)}")
{:error, reason}
end
end
@doc """
Update the LLMChain's `custom_context` map. Passing in a `context_update` map
will by default merge the map into the existing `custom_context`.
Use the `:as` option to:
- `:merge` - Merge update changes in. Default.
- `:replace` - Replace the context with the `context_update`.
"""
@spec update_custom_context(t(), context_update :: %{atom() => any()}, opts :: Keyword.t()) ::
t() | no_return()
def update_custom_context(chain, context_update, opts \\ [])
def update_custom_context(
%LLMChain{custom_context: %{} = context} = chain,
%{} = context_update,
opts
) do
new_context =
case Keyword.get(opts, :as) || :merge do
:merge ->
Map.merge(context, context_update)
:replace ->
context_update
other ->
raise LangChain.LangChainError,
"Invalid update_custom_context :as option of #{inspect(other)}"
end
%LLMChain{chain | custom_context: new_context}
end
def update_custom_context(
%LLMChain{custom_context: nil} = chain,
%{} = context_update,
_opts
) do
# can't merge a map with `nil`. Replace it.
%LLMChain{chain | custom_context: context_update}
end
@doc """
Apply a received MessageDelta struct to the chain. The LLMChain tracks the
current merged MessageDelta state. When the final delta is received that
completes the message, the LLMChain is updated to clear the `delta` and the
`last_message` and list of messages are updated.
"""
@spec apply_delta(t(), MessageDelta.t()) :: t()
def apply_delta(%LLMChain{delta: nil} = chain, %MessageDelta{} = new_delta) do
%LLMChain{chain | delta: new_delta}
end
def apply_delta(%LLMChain{delta: %MessageDelta{} = delta} = chain, %MessageDelta{} = new_delta) do
merged = MessageDelta.merge_delta(delta, new_delta)
delta_to_message_when_complete(%LLMChain{chain | delta: merged})
end
@doc """
Convert any hanging delta of the chain to a message and append to the chain.
If the delta is `nil`, the chain is returned unmodified.
"""
@spec delta_to_message_when_complete(t()) :: t()
def delta_to_message_when_complete(
%LLMChain{delta: %MessageDelta{status: status} = delta} = chain
)
when status in [:complete, :length] do
# it's complete. Attempt to convert delta to a message
case MessageDelta.to_message(delta) do
{:ok, %Message{} = message} ->
fire_callback(chain, message)
add_message(%LLMChain{chain | delta: nil}, message)
{:error, reason} ->
# should not have failed, but it did. Log the error and return
# the chain unmodified.
Logger.warning("Error applying delta message. Reason: #{inspect(reason)}")
chain
end
end
def delta_to_message_when_complete(%LLMChain{} = chain) do
# either no delta or incomplete
chain
end
@doc """
Apply a list of deltas to the chain.
"""
@spec apply_deltas(t(), list()) :: t()
def apply_deltas(%LLMChain{} = chain, deltas) when is_list(deltas) do
deltas
|> List.flatten()
|> Enum.reduce(chain, fn d, acc -> apply_delta(acc, d) end)
end
@doc """
Add a received Message struct to the chain. The LLMChain tracks the
`last_message` received and the complete list of messages exchanged. Depending
on the message role, the chain may be in a pending or incomplete state where
a response from the LLM is anticipated.
"""
@spec add_message(t(), Message.t()) :: t()
def add_message(%LLMChain{} = chain, %Message{} = new_message) do
needs_response =
cond do
new_message.role in [:user, :tool] -> true
Message.is_tool_call?(new_message) -> true
new_message.role in [:system, :assistant] -> false
end
%LLMChain{
chain
| messages: chain.messages ++ [new_message],
last_message: new_message,
needs_response: needs_response
}
end
@doc """
Add a set of Message structs to the chain. This enables quickly building a chain
for submitting to an LLM.
"""
@spec add_messages(t(), [Message.t()]) :: t()
def add_messages(%LLMChain{} = chain, messages) do
Enum.reduce(messages, chain, fn msg, acc ->
add_message(acc, msg)
end)
end
@doc """
Apply a set of PromptTemplates to the chain. The list of templates can also
include Messages with no templates. Provide the inputs to apply to the
templates for rendering as a message. The prepared messages are applied to the
chain.
"""
@spec apply_prompt_templates(t(), [Message.t() | PromptTemplate.t()], %{atom() => any()}) ::
t() | no_return()
def apply_prompt_templates(%LLMChain{} = chain, templates, %{} = inputs) do
messages = PromptTemplate.to_messages!(templates, inputs)
add_messages(chain, messages)
end
@doc """
Convenience function for setting the prompt text for the LLMChain using
prepared text.
"""
@spec quick_prompt(t(), String.t()) :: t()
def quick_prompt(%LLMChain{} = chain, text) do
messages = [
Message.new_system!(),
Message.new_user!(text)
]
add_messages(chain, messages)
end
@doc """
If the `last_message` from the Assistant includes one or more `ToolCall`s, then the linked
tool is executed. If there is no `last_message` or the `last_message` is
not a `tool_call`, the LLMChain is returned with no action performed.
This makes it safe to call any time.
The `context` is additional data that will be passed to the executed tool.
The value given here will override any `custom_context` set on the LLMChain.
If not set, the global `custom_context` is used.
"""
@spec execute_tool_calls(t(), context :: nil | %{atom() => any()}) :: t()
def execute_tool_calls(chain, context \\ nil)
def execute_tool_calls(%LLMChain{last_message: nil} = chain, _context), do: chain
def execute_tool_calls(
%LLMChain{last_message: %Message{} = message} = chain,
context
) do
if Message.is_tool_call?(message) do
# context to use
use_context = context || chain.custom_context
verbose = chain.verbose
# Get all the tools to call. Accumulate them into a map.
# Stored as
grouped =
Enum.reduce(message.tool_calls, %{async: [], sync: [], invalid: []}, fn call, acc ->
case chain._tool_map[call.name] do
%Function{async: true} = func ->
Map.put(acc, :async, acc.async ++ [{call, func}])
%Function{async: false} = func ->
Map.put(acc, :sync, acc.sync ++ [{call, func}])
# invalid tool call
nil ->
Map.put(acc, :invalid, acc.invalid ++ [{call, nil}])
end
end)
# execute all the async calls. This keeps the responses in order too.
async_results =
grouped[:async]
|> Enum.map(fn {call, func} ->
Task.async(fn ->
execute_tool_call(call, func, verbose: verbose, context: use_context)
end)
end)
|> Task.await_many()
sync_results =
Enum.map(grouped[:sync], fn {call, func} ->
execute_tool_call(call, func, verbose: verbose, context: use_context)
end)
# log invalid tool calls
invalid_calls =
Enum.map(grouped[:invalid], fn {call, _} ->
text = "Tool call made to #{call.name} but tool not found"
Logger.warning(text)
ToolResult.new!(%{tool_call_id: call.call_id, content: text, is_error: true})
end)
combined_results = async_results ++ sync_results ++ invalid_calls
# create a single tool message that contains all the tool results
message =
Message.new_tool_result!(%{content: message.content, tool_results: combined_results})
if chain.verbose, do: IO.inspect(message, label: "TOOL RESULTS")
fire_callback(chain, message)
# add all the message to the chain
LLMChain.add_message(chain, message)
else
# Not a complete tool call
chain
end
end
@doc """
Execute the tool call with the tool. Returns the tool's message response.
"""
@spec execute_tool_call(ToolCall.t(), Function.t(), Keyword.t()) :: ToolResult.t()
def execute_tool_call(%ToolCall{} = call, %Function{} = function, opts \\ []) do
verbose = Keyword.get(opts, :verbose, false)
context = Keyword.get(opts, :context, nil)
try do
if verbose, do: IO.inspect(function.name, label: "EXECUTING FUNCTION")
case Function.execute(function, call.arguments, context) do
{:ok, result} ->
if verbose, do: IO.inspect(result, label: "FUNCTION RESULT")
# successful execution.
ToolResult.new!(%{
tool_call_id: call.call_id,
content: result,
name: function.name,
display_text: function.display_text
})
{:error, reason} when is_binary(reason) ->
if verbose, do: IO.inspect(reason, label: "FUNCTION ERROR")
ToolResult.new!(%{
tool_call_id: call.call_id,
content: reason,
name: function.name,
display_text: function.display_text,
is_error: true
})
end
rescue
err ->
Logger.error("Function #{function.name} failed in execution. Exception: #{inspect(err)}")
ToolResult.new!(%{
tool_call_id: call.call_id,
content: "ERROR executing tool: #{inspect(err)}",
is_error: true
})
end
end
# Fire the callback if set.
defp fire_callback(%LLMChain{callback_fn: nil}, _data), do: :ok
# OPTIONAL: Execute callback function
defp fire_callback(%LLMChain{callback_fn: callback_fn}, data) when is_function(callback_fn) do
data
|> List.wrap()
|> List.flatten()
|> Enum.each(fn item -> callback_fn.(item) end)
:ok
end
@doc """
Remove an incomplete MessageDelta from `delta` and add a Message with the
desired status to the chain.
"""
def cancel_delta(%LLMChain{delta: nil} = chain, _message_status), do: chain
def cancel_delta(%LLMChain{delta: delta} = chain, message_status) do
# remove the in-progress delta
updated_chain = %LLMChain{chain | delta: nil}
case MessageDelta.to_message(%MessageDelta{delta | status: :complete}) do
{:ok, message} ->
message = %Message{message | status: message_status}
add_message(updated_chain, message)
{:error, reason} ->
Logger.error("Error attempting to cancel_delta. Reason: #{inspect(reason)}")
chain
end
end
end