@@ -56,43 +56,34 @@ def _create_graph(self) -> BaseGraph:
5656
5757 fetch_node = FetchNode (
5858 input = "url | local_dir" ,
59- output = ["doc" ],
60- node_config = {
61- "headless" : self .headless ,
62- "verbose" : self .verbose
63- }
59+ output = ["doc" ]
6460 )
6561 parse_node = ParseNode (
6662 input = "doc" ,
6763 output = ["parsed_doc" ],
6864 node_config = {
69- "chunk_size" : self .model_token ,
70- "verbose" : self .verbose
65+ "chunk_size" : self .model_token
7166 }
7267 )
7368 rag_node = RAGNode (
7469 input = "user_prompt & (parsed_doc | doc)" ,
7570 output = ["relevant_chunks" ],
7671 node_config = {
77- "llm" : self .llm_model ,
78- "embedder_model" : self .embedder_model ,
79- "verbose" : self .verbose
80- }
72+ "llm_model" : self .llm_model ,
73+ "embedder_model" : self .embedder_model }
8174 )
8275 generate_answer_node = GenerateAnswerNode (
8376 input = "user_prompt & (relevant_chunks | parsed_doc | doc)" ,
8477 output = ["answer" ],
8578 node_config = {
86- "llm" : self .llm_model ,
87- "verbose" : self .verbose
79+ "llm_model" : self .llm_model
8880 }
8981 )
9082 text_to_speech_node = TextToSpeechNode (
9183 input = "answer" ,
9284 output = ["audio" ],
9385 node_config = {
94- "tts_model" : OpenAITextToSpeech (self .config ["tts_model" ]),
95- "verbose" : self .verbose
86+ "tts_model" : OpenAITextToSpeech (self .config ["tts_model" ])
9687 }
9788 )
9889
@@ -131,4 +122,4 @@ def run(self) -> str:
131122 "output_path" , "output.mp3" ))
132123 print (f"Audio saved to { self .config .get ('output_path' , 'output.mp3' )} " )
133124
134- return self .final_state .get ("answer" , "No answer found." )
125+ return self .final_state .get ("answer" , "No answer found." )
0 commit comments