@@ -114,24 +114,26 @@ def execute(self, state):
114114
115115 format_instructions = output_parser .get_format_instructions ()
116116
117+ if len (doc ) == 1 :
118+ prompt = PromptTemplate (
119+ template = template_no_chunks_pdf_prompt ,
120+ input_variables = ["question" ],
121+ partial_variables = {
122+ "context" :chunk ,
123+ "format_instructions" : format_instructions ,
124+ },
125+ )
126+ chain = prompt | self .llm_model | output_parser
127+ answer = chain .invoke ({"question" : user_prompt })
128+
129+
130+ state .update ({self .output [0 ]: answer })
131+ return state
132+
117133 chains_dict = {}
118134 # Use tqdm to add progress bar
119135 for i , chunk in enumerate (
120- tqdm (doc , desc = "Processing chunks" , disable = not self .verbose )
121- ):
122- if len (doc ) == 1 :
123- prompt = PromptTemplate (
124- template = template_no_chunks_pdf_prompt ,
125- input_variables = ["question" ],
126- partial_variables = {
127- "context" :chunk ,
128- "format_instructions" : format_instructions ,
129- },
130- )
131- chain = prompt | self .llm_model | output_parser
132- answer = chain .invoke ({"question" : user_prompt })
133-
134- break
136+ tqdm (doc , desc = "Processing chunks" , disable = not self .verbose )):
135137 prompt = PromptTemplate (
136138 template = template_chunks_pdf_prompt ,
137139 input_variables = ["question" ],
@@ -146,20 +148,18 @@ def execute(self, state):
146148 chain_name = f"chunk{ i + 1 } "
147149 chains_dict [chain_name ] = prompt | self .llm_model | output_parser
148150
149- if len (chains_dict ) > 1 :
150- # Use dictionary unpacking to pass the dynamically named chains to RunnableParallel
151- map_chain = RunnableParallel (** chains_dict )
152- # Chain
153- answer = map_chain .invoke ({"question" : user_prompt })
154- # Merge the answers from the chunks
155- merge_prompt = PromptTemplate (
156- template = template_merge_pdf_prompt ,
157- input_variables = ["context" , "question" ],
158- partial_variables = {"format_instructions" : format_instructions },
159- )
160- merge_chain = merge_prompt | self .llm_model | output_parser
161- answer = merge_chain .invoke ({"context" : answer , "question" : user_prompt })
151+ # Use dictionary unpacking to pass the dynamically named chains to RunnableParallel
152+ map_chain = RunnableParallel (** chains_dict )
153+ # Chain
154+ answer = map_chain .ainvoke ({"question" : user_prompt })
155+ # Merge the answers from the chunks
156+ merge_prompt = PromptTemplate (
157+ template = template_merge_pdf_prompt ,
158+ input_variables = ["context" , "question" ],
159+ partial_variables = {"format_instructions" : format_instructions },
160+ )
161+ merge_chain = merge_prompt | self .llm_model | output_parser
162+ answer = merge_chain .invoke ({"context" : answer , "question" : user_prompt })
162163
163- # Update the state with the generated answer
164164 state .update ({self .output [0 ]: answer })
165165 return state
0 commit comments