Skip to content

Commit 69a0649

Browse files
committed
fix basic search
1 parent 724eff9 commit 69a0649

File tree

2 files changed

+0
-68
lines changed

2 files changed

+0
-68
lines changed

graphrag/api/query.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1108,8 +1108,6 @@ async def basic_search_streaming(
11081108
vector_store_args = {}
11091109
for index, store in config.vector_store.items():
11101110
vector_store_args[index] = store.model_dump()
1111-
else:
1112-
vector_store_args = None
11131111
logger.info(f"Vector Store Args: {redact(vector_store_args)}") # type: ignore # noqa
11141112

11151113
description_embedding_store = get_embedding_store(

graphrag/query/structured_search/basic_search/search.py

Lines changed: 0 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -121,72 +121,6 @@ async def asearch(
121121
output_tokens=0,
122122
)
123123

124-
def search(
125-
self,
126-
query: str,
127-
conversation_history: ConversationHistory | None = None,
128-
**kwargs,
129-
) -> SearchResult:
130-
"""Build basic search context that fits a single context window and generate answer for the user question."""
131-
start_time = time.time()
132-
search_prompt = ""
133-
llm_calls, prompt_tokens, output_tokens = {}, {}, {}
134-
context_result = self.context_builder.build_context(
135-
query=query,
136-
conversation_history=conversation_history,
137-
**kwargs,
138-
**self.context_builder_params,
139-
)
140-
llm_calls["build_context"] = context_result.llm_calls
141-
prompt_tokens["build_context"] = context_result.prompt_tokens
142-
output_tokens["build_context"] = context_result.output_tokens
143-
144-
log.info("GENERATE ANSWER: %d. QUERY: %s", start_time, query)
145-
try:
146-
search_prompt = self.system_prompt.format(
147-
context_data=context_result.context_chunks,
148-
response_type=self.response_type,
149-
)
150-
search_messages = [
151-
{"role": "system", "content": search_prompt},
152-
{"role": "user", "content": query},
153-
]
154-
155-
response = self.llm.generate(
156-
messages=search_messages,
157-
streaming=True,
158-
callbacks=self.callbacks,
159-
**self.llm_params,
160-
)
161-
llm_calls["response"] = 1
162-
prompt_tokens["response"] = num_tokens(search_prompt, self.token_encoder)
163-
output_tokens["response"] = num_tokens(response, self.token_encoder)
164-
165-
return SearchResult(
166-
response=response,
167-
context_data=context_result.context_records,
168-
context_text=context_result.context_chunks,
169-
completion_time=time.time() - start_time,
170-
llm_calls=sum(llm_calls.values()),
171-
prompt_tokens=sum(prompt_tokens.values()),
172-
output_tokens=sum(output_tokens.values()),
173-
llm_calls_categories=llm_calls,
174-
prompt_tokens_categories=prompt_tokens,
175-
output_tokens_categories=output_tokens,
176-
)
177-
178-
except Exception:
179-
log.exception("Exception in _map_response_single_batch")
180-
return SearchResult(
181-
response="",
182-
context_data=context_result.context_records,
183-
context_text=context_result.context_chunks,
184-
completion_time=time.time() - start_time,
185-
llm_calls=1,
186-
prompt_tokens=num_tokens(search_prompt, self.token_encoder),
187-
output_tokens=0,
188-
)
189-
190124
async def astream_search(
191125
self,
192126
query: str,

0 commit comments

Comments
 (0)