Skip to content

Commit e8b125f

Browse files
committed
feat: refactor knowledge_pipeline
1 parent 41053eb commit e8b125f

File tree

1 file changed

+0
-25
lines changed

1 file changed

+0
-25
lines changed

aperag/pipeline/knowledge_pipeline.py

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ def __init__(self, **kwargs):
7171
DEFAULT_CHINESE_PROMPT_TEMPLATE_V3)
7272
self.prompt = PromptTemplate(template=self.prompt_template, input_variables=["query", "context"])
7373

74-
# No changes needed in new_ai_message
7574
async def new_ai_message(self, message, message_id, response, references, urls):
7675
return Message(
7776
id=message_id,
@@ -90,7 +89,6 @@ async def new_ai_message(self, message, message_id, response, references, urls):
9089
llm_context_window=self.context_window,
9190
)
9291

93-
# No changes needed in filter_by_keywords
9492
async def filter_by_keywords(self, message, candidates):
9593
index = generate_fulltext_index_name(self.collection_id)
9694
async with IKExtractor({"index_name": index, "es_host": settings.ES_HOST}) as extractor:
@@ -125,14 +123,6 @@ async def _run_standard_rag(self, query_with_history: str, vector: List[float],
125123
score_threshold=self.score_threshold, topk=self.topk * 6, vector=vector)
126124
logger.info("[%s] Found %d relevant documents in vector db", log_prefix, len(results))
127125

128-
# Optional: Hyde logic could be added here if needed later
129-
# hyde_message = await self.generate_hyde_message(message)
130-
# new_vector = self.embedding_model.embed_query(hyde_message)
131-
# results2 = await async_run(self.context_manager.query, message,
132-
# score_threshold=self.score_threshold, topk=self.topk * 6, vector=new_vector)
133-
# results_set = set([result.text for result in results])
134-
# results.extend(result for result in results2 if result.text not in results_set)
135-
136126
if self.bot_context != "":
137127
bot_context_result = DocumentWithScore(
138128
text=self.bot_context, # type: ignore
@@ -173,21 +163,6 @@ async def _run_light_rag(self, query_with_history: str, log_prefix: str) -> Opti
173163
It should take the query and return the context string.
174164
"""
175165
logger.info("[%s] Skipping LightRAG pipeline (placeholder)", log_prefix)
176-
# In the future, this will call the LightRAG client:
177-
# try:
178-
# # Example call structure (adjust based on actual LightRAG client)
179-
# lightrag_client = ... # Initialize LightRAG client
180-
# response = await lightrag_client.aquery(
181-
# query_with_history,
182-
# param=QueryParam(mode="global", only_need_context=True), # Example params
183-
# )
184-
# context = response.get_context() # Or however context is retrieved
185-
# logger.info("[%s] LightRAG pipeline returned context (length: %d)", log_prefix, len(context))
186-
# return context
187-
# except Exception as e:
188-
# logger.error("[%s] LightRAG pipeline failed: %s", log_prefix, e)
189-
# return None
190-
await asyncio.sleep(0) # Simulate async operation if needed for testing structure
191166
return None
192167

193168
async def run(self, message, gen_references=False, message_id=""):

0 commit comments

Comments
 (0)