20
20
Approach ,
21
21
DataPoints ,
22
22
ExtraInfo ,
23
- LLMInputType ,
24
23
ThoughtStep ,
25
24
)
26
25
from approaches .promptmanager import PromptManager
@@ -284,17 +283,10 @@ async def run_search_approach(
284
283
minimum_search_score = overrides .get ("minimum_search_score" , 0.0 )
285
284
minimum_reranker_score = overrides .get ("minimum_reranker_score" , 0.0 )
286
285
search_index_filter = self .build_filter (overrides , auth_claims )
287
-
288
- llm_inputs = overrides .get ("llm_inputs" )
289
- # Use default values based on multimodal_enabled if not provided in overrides
290
- if llm_inputs is None :
291
- llm_inputs = self .get_default_llm_inputs ()
292
- llm_inputs_enum = LLMInputType (llm_inputs ) if llm_inputs is not None else None
293
- use_image_sources = llm_inputs_enum in [LLMInputType .TEXT_AND_IMAGES , LLMInputType .IMAGES ]
294
- use_text_sources = llm_inputs_enum in [LLMInputType .TEXT_AND_IMAGES , LLMInputType .TEXTS ]
295
-
296
- use_image_embeddings = overrides .get ("use_image_embeddings" , self .multimodal_enabled )
297
- use_text_embeddings = overrides .get ("use_text_embeddings" , True )
286
+ send_text_sources = overrides .get ("send_text_sources" , True )
287
+ send_image_sources = overrides .get ("send_image_sources" , True )
288
+ search_text_embeddings = overrides .get ("search_text_embeddings" , True )
289
+ search_image_embeddings = overrides .get ("search_image_embeddings" , self .multimodal_enabled )
298
290
299
291
original_user_query = messages [- 1 ]["content" ]
300
292
if not isinstance (original_user_query , str ):
@@ -329,9 +321,9 @@ async def run_search_approach(
329
321
330
322
vectors : list [VectorQuery ] = []
331
323
if use_vector_search :
332
- if use_text_embeddings :
324
+ if search_text_embeddings :
333
325
vectors .append (await self .compute_text_embedding (query_text ))
334
- if use_image_embeddings :
326
+ if search_image_embeddings :
335
327
vectors .append (await self .compute_multimodal_embedding (query_text ))
336
328
337
329
results = await self .search (
@@ -350,11 +342,11 @@ async def run_search_approach(
350
342
351
343
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
352
344
text_sources , image_sources , citations = await self .get_sources_content (
353
- results , use_semantic_captions , use_image_sources = use_image_sources , user_oid = auth_claims .get ("oid" )
345
+ results , use_semantic_captions , download_image_sources = send_image_sources , user_oid = auth_claims .get ("oid" )
354
346
)
355
347
356
348
extra_info = ExtraInfo (
357
- DataPoints (text = text_sources if use_text_sources else [], images = image_sources , citations = citations ),
349
+ DataPoints (text = text_sources if send_text_sources else [], images = image_sources , citations = citations ),
358
350
thoughts = [
359
351
self .format_thought_step_for_chatcompletion (
360
352
title = "Prompt to generate search query" ,
@@ -376,8 +368,8 @@ async def run_search_approach(
376
368
"filter" : search_index_filter ,
377
369
"use_vector_search" : use_vector_search ,
378
370
"use_text_search" : use_text_search ,
379
- "use_image_embeddings " : use_image_embeddings ,
380
- "use_image_sources " : use_image_sources ,
371
+ "search_text_embeddings " : search_text_embeddings ,
372
+ "search_image_embeddings " : search_image_embeddings ,
381
373
},
382
374
),
383
375
ThoughtStep (
@@ -401,6 +393,8 @@ async def run_agentic_retrieval_approach(
401
393
results_merge_strategy = overrides .get ("results_merge_strategy" , "interleaved" )
402
394
# 50 is the amount of documents that the reranker can process per query
403
395
max_docs_for_reranker = max_subqueries * 50
396
+ send_text_sources = overrides .get ("send_text_sources" , True )
397
+ send_image_sources = overrides .get ("send_image_sources" , True )
404
398
405
399
response , results = await self .run_agentic_retrieval (
406
400
messages = messages ,
@@ -413,20 +407,15 @@ async def run_agentic_retrieval_approach(
413
407
results_merge_strategy = results_merge_strategy ,
414
408
)
415
409
416
- # Determine if we should use text/image sources based on overrides or defaults
417
- llm_inputs = overrides .get ("llm_inputs" )
418
- if llm_inputs is None :
419
- llm_inputs = self .get_default_llm_inputs ()
420
- llm_inputs_enum = LLMInputType (llm_inputs ) if llm_inputs is not None else None
421
- use_image_sources = llm_inputs_enum in [LLMInputType .TEXT_AND_IMAGES , LLMInputType .IMAGES ]
422
- use_text_sources = llm_inputs_enum in [LLMInputType .TEXT_AND_IMAGES , LLMInputType .TEXTS ]
423
-
424
410
text_sources , image_sources , citations = await self .get_sources_content (
425
- results , use_semantic_captions = False , use_image_sources = use_image_sources , user_oid = auth_claims .get ("oid" )
411
+ results ,
412
+ use_semantic_captions = False ,
413
+ download_image_sources = send_image_sources ,
414
+ user_oid = auth_claims .get ("oid" ),
426
415
)
427
416
428
417
extra_info = ExtraInfo (
429
- DataPoints (text = text_sources if use_text_sources else [], images = image_sources , citations = citations ),
418
+ DataPoints (text = text_sources if send_text_sources else [], images = image_sources , citations = citations ),
430
419
thoughts = [
431
420
ThoughtStep (
432
421
"Use agentic retrieval" ,
0 commit comments