|
1 | | -""" |
2 | | -generate_answer_node module |
3 | | -""" |
4 | 1 | from typing import List, Optional |
5 | 2 | from langchain.prompts import PromptTemplate |
6 | 3 | from langchain_core.output_parsers import JsonOutputParser |
|
18 | 15 | ) |
19 | 16 |
|
20 | 17 | class GenerateAnswerNode(BaseNode): |
21 | | - """ |
22 | | - Initializes the GenerateAnswerNode class. |
23 | | -
|
24 | | - Args: |
25 | | - input (str): The input data type for the node. |
26 | | - output (List[str]): The output data type(s) for the node. |
27 | | - node_config (Optional[dict]): Configuration dictionary for the node, |
28 | | - which includes the LLM model, verbosity, schema, and other settings. |
29 | | - Defaults to None. |
30 | | - node_name (str): The name of the node. Defaults to "GenerateAnswer". |
31 | | -
|
32 | | - Attributes: |
33 | | - llm_model: The language model specified in the node configuration. |
34 | | - verbose (bool): Whether verbose mode is enabled. |
35 | | - force (bool): Whether to force certain behaviors, overriding defaults. |
36 | | - script_creator (bool): Whether the node is in script creation mode. |
37 | | - is_md_scraper (bool): Whether the node is scraping markdown data. |
38 | | - additional_info (Optional[str]): Any additional information to be |
39 | | - included in the prompt templates. |
40 | | - """ |
41 | 18 | def __init__( |
42 | 19 | self, |
43 | 20 | input: str, |
@@ -113,7 +90,7 @@ def execute(self, state: dict) -> dict: |
113 | 90 | chain = prompt | self.llm_model |
114 | 91 | if output_parser: |
115 | 92 | chain = chain | output_parser |
116 | | - answer = chain.invoke({"question": user_prompt}) |
| 93 | + answer = chain.ainvoke({"question": user_prompt}) |
117 | 94 |
|
118 | 95 | state.update({self.output[0]: answer}) |
119 | 96 | return state |
@@ -144,7 +121,7 @@ def execute(self, state: dict) -> dict: |
144 | 121 | merge_chain = merge_prompt | self.llm_model |
145 | 122 | if output_parser: |
146 | 123 | merge_chain = merge_chain | output_parser |
147 | | - answer = merge_chain.invoke({"context": batch_results, "question": user_prompt}) |
| 124 | + answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt}) |
148 | 125 |
|
149 | 126 | state.update({self.output[0]: answer}) |
150 | 127 | return state |
0 commit comments