11"""
2- generate_answer_node module
2+ GenerateAnswerNode Module
33"""
44from typing import List , Optional
55from langchain .prompts import PromptTemplate
1919
2020class GenerateAnswerNode (BaseNode ):
2121 """
22- Initializes the GenerateAnswerNode class.
23-
24- Args:
25- input (str): The input data type for the node.
26- output (List[str]): The output data type(s) for the node.
27- node_config (Optional[dict]): Configuration dictionary for the node,
28- which includes the LLM model, verbosity, schema, and other settings.
29- Defaults to None.
30- node_name (str): The name of the node. Defaults to "GenerateAnswer".
31-
32- Attributes:
33- llm_model: The language model specified in the node configuration.
34- verbose (bool): Whether verbose mode is enabled.
35- force (bool): Whether to force certain behaviors, overriding defaults.
36- script_creator (bool): Whether the node is in script creation mode.
37- is_md_scraper (bool): Whether the node is scraping markdown data.
38- additional_info (Optional[str]): Any additional information to be
39- included in the prompt templates.
22+ Initializes the GenerateAnswerNode class.
23+
24+ Args:
25+ input (str): The input data type for the node.
26+ output (List[str]): The output data type(s) for the node.
27+ node_config (Optional[dict]): Configuration dictionary for the node,
28+ which includes the LLM model, verbosity, schema, and other settings.
29+ Defaults to None.
30+ node_name (str): The name of the node. Defaults to "GenerateAnswer".
31+
32+ Attributes:
33+ llm_model: The language model specified in the node configuration.
34+ verbose (bool): Whether verbose mode is enabled.
35+ force (bool): Whether to force certain behaviors, overriding defaults.
36+ script_creator (bool): Whether the node is in script creation mode.
37+ is_md_scraper (bool): Whether the node is scraping markdown data.
38+ additional_info (Optional[str]): Any additional information to be
39+ included in the prompt templates.
4040 """
4141 def __init__ (
4242 self ,
@@ -57,7 +57,17 @@ def __init__(
5757 self .is_md_scraper = node_config .get ("is_md_scraper" , False )
5858 self .additional_info = node_config .get ("additional_info" )
5959
60- def execute (self , state : dict ) -> dict :
60+ async def execute (self , state : dict ) -> dict :
61+ """
62+ Executes the GenerateAnswerNode.
63+
64+ Args:
65+ state (dict): The current state of the graph. The input keys will be used
66+ to fetch the correct data from the state.
67+
68+ Returns:
69+ dict: The updated state with the output key containing the generated answer.
70+ """
6171 self .logger .info (f"--- Executing { self .node_name } Node ---" )
6272
6373 input_keys = self .get_input_keys (state )
@@ -113,7 +123,7 @@ def execute(self, state: dict) -> dict:
113123 chain = prompt | self .llm_model
114124 if output_parser :
115125 chain = chain | output_parser
116- answer = chain .invoke ({"question" : user_prompt })
126+ answer = await chain .ainvoke ({"question" : user_prompt })
117127
118128 state .update ({self .output [0 ]: answer })
119129 return state
@@ -133,7 +143,7 @@ def execute(self, state: dict) -> dict:
133143 chains_dict [chain_name ] = chains_dict [chain_name ] | output_parser
134144
135145 async_runner = RunnableParallel (** chains_dict )
136- batch_results = async_runner .invoke ({"question" : user_prompt })
146+ batch_results = await async_runner .ainvoke ({"question" : user_prompt })
137147
138148 merge_prompt = PromptTemplate (
139149 template = template_merge_prompt ,
@@ -144,7 +154,7 @@ def execute(self, state: dict) -> dict:
144154 merge_chain = merge_prompt | self .llm_model
145155 if output_parser :
146156 merge_chain = merge_chain | output_parser
147- answer = merge_chain .invoke ({"context" : batch_results , "question" : user_prompt })
157+ answer = await merge_chain .ainvoke ({"context" : batch_results , "question" : user_prompt })
148158
149159 state .update ({self .output [0 ]: answer })
150160 return state
0 commit comments