Skip to content

Commit 257f393

Browse files
committed
feat: async invocation
1 parent 3b7b701 commit 257f393

17 files changed

+28
-51
lines changed

scrapegraphai/builders/graph_builder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def build_graph(self):
120120
Returns:
121121
dict: A JSON representation of the graph configuration.
122122
"""
123-
return self.chain.invoke(self.prompt)
123+
return self.chain.ainvoke(self.prompt)
124124

125125
@staticmethod
126126
def convert_json_to_graphviz(json_data, format: str = 'pdf'):

scrapegraphai/nodes/generate_answer_csv_node.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def execute(self, state):
126126
)
127127

128128
chain = prompt | self.llm_model | output_parser
129-
answer = chain.invoke({"question": user_prompt})
129+
answer = chain.ainvoke({"question": user_prompt})
130130
state.update({self.output[0]: answer})
131131
return state
132132

@@ -157,7 +157,7 @@ def execute(self, state):
157157
)
158158

159159
merge_chain = merge_prompt | self.llm_model | output_parser
160-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
160+
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
161161

162162
state.update({self.output[0]: answer})
163163
return state

scrapegraphai/nodes/generate_answer_node.py

Lines changed: 2 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
1-
"""
2-
generate_answer_node module
3-
"""
41
from typing import List, Optional
52
from langchain.prompts import PromptTemplate
63
from langchain_core.output_parsers import JsonOutputParser
@@ -18,26 +15,6 @@
1815
)
1916

2017
class GenerateAnswerNode(BaseNode):
21-
"""
22-
Initializes the GenerateAnswerNode class.
23-
24-
Args:
25-
input (str): The input data type for the node.
26-
output (List[str]): The output data type(s) for the node.
27-
node_config (Optional[dict]): Configuration dictionary for the node,
28-
which includes the LLM model, verbosity, schema, and other settings.
29-
Defaults to None.
30-
node_name (str): The name of the node. Defaults to "GenerateAnswer".
31-
32-
Attributes:
33-
llm_model: The language model specified in the node configuration.
34-
verbose (bool): Whether verbose mode is enabled.
35-
force (bool): Whether to force certain behaviors, overriding defaults.
36-
script_creator (bool): Whether the node is in script creation mode.
37-
is_md_scraper (bool): Whether the node is scraping markdown data.
38-
additional_info (Optional[str]): Any additional information to be
39-
included in the prompt templates.
40-
"""
4118
def __init__(
4219
self,
4320
input: str,
@@ -113,7 +90,7 @@ def execute(self, state: dict) -> dict:
11390
chain = prompt | self.llm_model
11491
if output_parser:
11592
chain = chain | output_parser
116-
answer = chain.invoke({"question": user_prompt})
93+
answer = chain.ainvoke({"question": user_prompt})
11794

11895
state.update({self.output[0]: answer})
11996
return state
@@ -144,7 +121,7 @@ def execute(self, state: dict) -> dict:
144121
merge_chain = merge_prompt | self.llm_model
145122
if output_parser:
146123
merge_chain = merge_chain | output_parser
147-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
124+
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
148125

149126
state.update({self.output[0]: answer})
150127
return state

scrapegraphai/nodes/generate_answer_node_k_level.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def execute(self, state: dict) -> dict:
143143
merge_chain = merge_prompt | self.llm_model
144144
if output_parser:
145145
merge_chain = merge_chain | output_parser
146-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
146+
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
147147

148148
state["answer"] = answer
149149

scrapegraphai/nodes/generate_answer_omni_node.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def execute(self, state: dict) -> dict:
121121
)
122122

123123
chain = prompt | self.llm_model | output_parser
124-
answer = chain.invoke({"question": user_prompt})
124+
answer = chain.ainvoke({"question": user_prompt})
125125

126126
state.update({self.output[0]: answer})
127127
return state
@@ -154,7 +154,7 @@ def execute(self, state: dict) -> dict:
154154
)
155155

156156
merge_chain = merge_prompt | self.llm_model | output_parser
157-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
157+
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
158158

159159
state.update({self.output[0]: answer})
160160
return state

scrapegraphai/nodes/generate_answer_pdf_node.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ def execute(self, state):
128128
},
129129
)
130130
chain = prompt | self.llm_model | output_parser
131-
answer = chain.invoke({"question": user_prompt})
131+
answer = chain.ainvoke({"question": user_prompt})
132132

133133

134134
state.update({self.output[0]: answer})
@@ -162,7 +162,7 @@ def execute(self, state):
162162
)
163163

164164
merge_chain = merge_prompt | self.llm_model | output_parser
165-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
165+
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
166166

167167
state.update({self.output[0]: answer})
168168
return state

scrapegraphai/nodes/generate_code_node.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ def generate_initial_code(self, state: dict) -> str:
325325
output_parser = StrOutputParser()
326326

327327
chain = prompt | self.llm_model | output_parser
328-
generated_code = chain.invoke({})
328+
generated_code = chain.ainvoke({})
329329
return generated_code
330330

331331
def semantic_comparison(self, generated_result: Any, reference_result: Any) -> Dict[str, Any]:
@@ -368,7 +368,7 @@ def semantic_comparison(self, generated_result: Any, reference_result: Any) -> D
368368
)
369369

370370
chain = prompt | self.llm_model | output_parser
371-
return chain.invoke({
371+
return chain.ainvoke({
372372
"generated_result": json.dumps(generated_result, indent=2),
373373
"reference_result": json.dumps(reference_result_dict, indent=2)
374374
})

scrapegraphai/nodes/generate_scraper_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ def execute(self, state: dict) -> dict:
131131
)
132132
map_chain = prompt | self.llm_model | StrOutputParser()
133133

134-
answer = map_chain.invoke({"question": user_prompt})
134+
answer = map_chain.ainvoke({"question": user_prompt})
135135

136136
state.update({self.output[0]: answer})
137137
return state

scrapegraphai/nodes/html_analyzer_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def execute(self, state: dict) -> dict:
9393
output_parser = StrOutputParser()
9494

9595
chain = prompt | self.llm_model | output_parser
96-
html_analysis = chain.invoke({})
96+
html_analysis = chain.ainvoke({})
9797

9898
state.update({self.output[0]: html_analysis, self.output[1]: reduced_html})
9999
return state

scrapegraphai/nodes/merge_answers_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def execute(self, state: dict) -> dict:
9595
)
9696

9797
merge_chain = prompt_template | self.llm_model | output_parser
98-
answer = merge_chain.invoke({"user_prompt": user_prompt})
98+
answer = merge_chain.ainvoke({"user_prompt": user_prompt})
9999
answer["sources"] = state.get("urls", [])
100100

101101
state.update({self.output[0]: answer})

0 commit comments

Comments
 (0)