Replies: 2 comments 1 reply
-
Hello, @regybean! I'm a bot here to help you with bugs, questions, and becoming a contributor. Let's tackle this together! To modify the query pipeline to retrieve separate nodes of the description
Here is an example of how you can achieve this: class MultiStepQueryEngine(BaseQueryEngine):
def __init__(
self,
user_docs_query_engine: BaseQueryEngine,
context_info_query_engine: BaseQueryEngine,
query_transform: StepDecomposeQueryTransform,
response_synthesizer: Optional[BaseSynthesizer] = None,
num_steps: Optional[int] = 3,
early_stopping: bool = True,
index_summary: str = "None",
stop_fn: Optional[Callable[[Dict], bool]] = None,
) -> None:
self._user_docs_query_engine = user_docs_query_engine
self._context_info_query_engine = context_info_query_engine
self._query_transform = query_transform
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
callback_manager=self._user_docs_query_engine.callback_manager
)
self._index_summary = index_summary
self._num_steps = num_steps
self._early_stopping = early_stopping
self._stop_fn = stop_fn or default_stop_fn
if not self._early_stopping and self._num_steps is None:
raise ValueError("Must specify num_steps if early_stopping is False.")
callback_manager = self._user_docs_query_engine.callback_manager
super().__init__(callback_manager)
def _query_multistep(
self, query_bundle: QueryBundle
) -> Tuple[List[NodeWithScore], List[NodeWithScore], Dict[str, Any]]:
prev_reasoning = ""
cur_response = None
should_stop = False
cur_steps = 0
final_response_metadata: Dict[str, Any] = {"sub_qa": []}
text_chunks = []
source_nodes = []
while not should_stop:
if self._num_steps is not None and cur_steps >= self._num_steps:
should_stop = True
break
elif should_stop:
break
updated_query_bundle = self._combine_queries(query_bundle, prev_reasoning)
stop_dict = {"query_bundle": updated_query_bundle}
if self._stop_fn(stop_dict):
should_stop = True
break
user_docs_response = self._user_docs_query_engine.query(updated_query_bundle)
context_info_response = self._context_info_query_engine.query(updated_query_bundle)
cur_response = self._combine_responses(user_docs_response, context_info_response)
cur_qa_text = (
f"\nQuestion: {updated_query_bundle.query_str}\n"
f"Answer: {cur_response!s}"
)
text_chunks.append(cur_qa_text)
for source_node in cur_response.source_nodes:
source_nodes.append(source_node)
final_response_metadata["sub_qa"].append(
(updated_query_bundle.query_str, cur_response)
)
prev_reasoning += (
f"- {updated_query_bundle.query_str}\n" f"- {cur_response!s}\n"
)
cur_steps += 1
nodes = [
NodeWithScore(node=TextNode(text=text_chunk)) for text_chunk in text_chunks
]
return nodes, source_nodes, final_response_metadata
def _combine_responses(self, user_docs_response, context_info_response):
# Combine the responses from user_docs and context_info
combined_text = f"{user_docs_response.text}\n{context_info_response.text}"
combined_source_nodes = user_docs_response.source_nodes + context_info_response.source_nodes
return TextNode(text=combined_text, source_nodes=combined_source_nodes) In this example, the Additionally, you can leverage the from llama_index.core.query_pipeline.query import QueryPipeline
from llama_index.core.query_pipeline.components import InputComponent, QueryComponent, ArgPackComponent, FnComponent
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.schema import NodeWithScore
class UserDocsComponent(QueryComponent):
def __init__(self, query_engine):
self.query_engine = query_engine
def _run_component(self, **kwargs):
query_bundle = kwargs['query_bundle']
return self.query_engine.query(query_bundle)
class ContextInfoComponent(QueryComponent):
def __init__(self, query_engine):
self.query_engine = query_engine
def _run_component(self, **kwargs):
query_bundle = kwargs['query_bundle']
return self.query_engine.query(query_bundle)
# Initialize query engines
user_docs_query_engine = ... # Your user_docs query engine
context_info_query_engine = ... # Your context_info query engine
# Create query components
user_docs_component = UserDocsComponent(user_docs_query_engine)
context_info_component = ContextInfoComponent(context_info_query_engine)
# Create input component
input_component = InputComponent()
# Create and configure the query pipeline
pipeline = QueryPipeline()
pipeline.add_modules({
'input': input_component,
'user_docs': user_docs_component,
'context_info': context_info_component,
'join': ArgPackComponent(),
'reranker': FnComponent(fn=reciprocal_rank_fusion),
'summarizer': TreeSummarize(llm=OpenAI(model="gpt-3.5-turbo"))
})
# Link the input component to both query components
pipeline.add_link('input', 'user_docs', src_key='query_bundle', dest_key='query_bundle')
pipeline.add_link('input', 'context_info', src_key='query_bundle', dest_key='query_bundle')
pipeline.add_link('user_docs', 'join', dest_key='user_docs')
pipeline.add_link('context_info', 'join', dest_key='context_info')
pipeline.add_link('join', 'reranker')
pipeline.add_link('reranker', 'summarizer', dest_key='nodes')
pipeline.add_link('input', 'summarizer', dest_key='query_str')
# Run the pipeline
query_bundle = ... # Your query bundle
combined_result = pipeline.run(query_bundle=query_bundle) This example demonstrates how to set up a query pipeline using the LlamaIndex framework to manage the interactions between different query components and combine their results [2][3][4]. |
Beta Was this translation helpful? Give feedback.
-
@dosu for the multistep query engine can you show me how to use it to complete a query and apply a template string as follows. text_qa_template_str = ( |
Beta Was this translation helpful? Give feedback.
Uh oh!
There was an error while loading. Please reload this page.
-
How can I modify the query pipeline to retrieve separate nodes of the description: user_docs and context_info from two seperate vectorstoreindex query engines and combine them into a full query template string.
i.e. user query -> gets relevant user uploaded doc info -> adds context from body of standards -> outputs contextualised query from user documentation
Beta Was this translation helpful? Give feedback.
All reactions