-
-
Notifications
You must be signed in to change notification settings - Fork 1.6k
/
Copy pathprompt_refiner_node.py
108 lines (84 loc) · 3.82 KB
/
prompt_refiner_node.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
"""
PromptRefinerNode Module
"""
from typing import List, Optional
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from ..prompts import TEMPLATE_REFINER, TEMPLATE_REFINER_WITH_CONTEXT
from ..utils import transform_schema
from .base_node import BaseNode
class PromptRefinerNode(BaseNode):
"""
A node that refine the user prompt with the use of the schema and additional context and
create a precise prompt in subsequent steps that explicitly link elements in the user's
original input to their corresponding representations in the JSON schema.
Attributes:
llm_model: An instance of a language model client, configured for generating answers.
verbose (bool): A flag indicating whether to show print statements during execution.
Args:
input (str): Boolean expression defining the input keys needed from the state.
output (List[str]): List of output keys to be updated in the state.
node_config (dict): Additional configuration for the node.
node_name (str): The unique identifier name for the node, defaulting to "GenerateAnswer".
"""
def __init__(
self,
input: str,
output: List[str],
node_config: Optional[dict] = None,
node_name: str = "PromptRefiner",
):
super().__init__(node_name, "node", input, output, 2, node_config)
self.llm_model = node_config["llm_model"]
if isinstance(node_config["llm_model"], ChatOllama):
self.llm_model.format = "json"
self.verbose = (
True if node_config is None else node_config.get("verbose", False)
)
self.force = False if node_config is None else node_config.get("force", False)
self.script_creator = (
False if node_config is None else node_config.get("script_creator", False)
)
self.is_md_scraper = (
False if node_config is None else node_config.get("is_md_scraper", False)
)
self.additional_info = node_config.get("additional_info")
self.output_schema = node_config.get("schema")
def execute(self, state: dict) -> dict:
"""
Generate a refined prompt using the user's prompt, the schema, and additional context.
Args:
state (dict): The current state of the graph. The input keys will be used
to fetch the correct data from the state.
Returns:
dict: The updated state with the output key containing the generated answer.
Raises:
KeyError: If the input keys are not found in the state, indicating
that the necessary information for generating an answer is missing.
"""
self.logger.info(f"--- Executing {self.node_name} Node ---")
user_prompt = state["user_prompt"]
self.simplefied_schema = transform_schema(self.output_schema.schema())
if self.additional_info is not None:
prompt = PromptTemplate(
template=TEMPLATE_REFINER_WITH_CONTEXT,
partial_variables={
"user_input": user_prompt,
"json_schema": str(self.simplefied_schema),
"additional_context": self.additional_info,
},
)
else:
prompt = PromptTemplate(
template=TEMPLATE_REFINER,
partial_variables={
"user_input": user_prompt,
"json_schema": str(self.simplefied_schema),
},
)
output_parser = StrOutputParser()
chain = prompt | self.llm_model | output_parser
refined_prompt = chain.invoke({})
state.update({self.output[0]: refined_prompt})
return state