@@ -23,22 +23,12 @@ def __init__(self, conn: sqlite3.Connection, settings: Settings, chunker: Chunke
2323 self ._logger = Logger ()
2424
2525 def load_model (self ):
26- """Load the model model from the specified path
27- or download it from Hugging Face if not found."""
26+ """Load the model model from the specified path."""
2827
2928 model_path = Path (self ._settings .model_path )
3029 if not model_path .exists ():
3130 raise FileNotFoundError (f"Model file not found at { model_path } " )
3231
33- # model_path = self.settings.model_path
34- # if not Path(self.settings.model_path).exists():
35- # # check if exists locally or try to download it from Hugging Face
36- # model_path = hf_hub_download(
37- # repo_id=self.settings.model_path,
38- # filename="model-q4_0.gguf", # GGUF format
39- # cache_dir="./models"
40- # )
41-
4232 self ._conn .execute (
4333 "SELECT llm_model_load(?, ?);" ,
4434 (self ._settings .model_path , self ._settings .model_options ),
@@ -105,12 +95,12 @@ def create_new_context(self) -> None:
10595 )
10696
10797 def free_context (self ) -> None :
108- """"""
98+ """Release resources associated with the current context. """
10999 cursor = self ._conn .cursor ()
110100
111101 cursor .execute ("SELECT llm_context_free();" )
112102
113- def search (self , query : str , limit : int = 10 ) -> list [DocumentResult ]:
103+ def search (self , query : str , top_k : int = 10 ) -> list [DocumentResult ]:
114104 """Semantic search and full-text search sorted with Reciprocal Rank Fusion."""
115105 query_embedding = self .generate_embedding ([Chunk (content = query )])[0 ].embedding
116106
@@ -185,7 +175,7 @@ def search(self, query: str, limit: int = 10) -> list[DocumentResult]:
185175 {
186176 "query" : query ,
187177 "query_embedding" : query_embedding ,
188- "k" : limit ,
178+ "k" : top_k ,
189179 "rrf_k" : Engine .DEFAULT_RRF_K ,
190180 "weight_fts" : self ._settings .weight_fts ,
191181 "weight_vec" : self ._settings .weight_vec ,
0 commit comments