Skip to content

Commit 8201a20

Browse files
author
Daniele Briggi
committed
refact(formatter): unified standard and debug view
1 parent 52d68f9 commit 8201a20

File tree

6 files changed

+187
-181
lines changed

6 files changed

+187
-181
lines changed

.github/workflows/test.yaml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,12 @@ jobs:
5050
- name: Test
5151
# Using default directory for models
5252
run: |
53-
pytest -v -m "not slow" ./tests
53+
pytest --cov --cov-branch --cov-report=xml -v -m "not slow" ./tests
54+
55+
- name: Upload coverage reports to Codecov
56+
uses: codecov/codecov-action@v5
57+
with:
58+
token: ${{ secrets.CODECOV_TOKEN }}
5459

5560
code-style:
5661
runs-on: ubuntu-latest

src/sqlite_rag/cli.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -386,6 +386,8 @@ def search(
386386

387387
search_time = time.time() - start_time
388388

389+
results = results[:limit]
390+
389391
# Get the appropriate formatter and display results
390392
formatter = get_formatter(debug=debug, table_view=peek)
391393
formatter.format_results(results, query)

src/sqlite_rag/engine.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -23,22 +23,12 @@ def __init__(self, conn: sqlite3.Connection, settings: Settings, chunker: Chunke
2323
self._logger = Logger()
2424

2525
def load_model(self):
26-
"""Load the model model from the specified path
27-
or download it from Hugging Face if not found."""
26+
"""Load the model model from the specified path."""
2827

2928
model_path = Path(self._settings.model_path)
3029
if not model_path.exists():
3130
raise FileNotFoundError(f"Model file not found at {model_path}")
3231

33-
# model_path = self.settings.model_path
34-
# if not Path(self.settings.model_path).exists():
35-
# # check if exists locally or try to download it from Hugging Face
36-
# model_path = hf_hub_download(
37-
# repo_id=self.settings.model_path,
38-
# filename="model-q4_0.gguf", # GGUF format
39-
# cache_dir="./models"
40-
# )
41-
4232
self._conn.execute(
4333
"SELECT llm_model_load(?, ?);",
4434
(self._settings.model_path, self._settings.model_options),
@@ -105,12 +95,12 @@ def create_new_context(self) -> None:
10595
)
10696

10797
def free_context(self) -> None:
108-
""""""
98+
"""Release resources associated with the current context."""
10999
cursor = self._conn.cursor()
110100

111101
cursor.execute("SELECT llm_context_free();")
112102

113-
def search(self, query: str, limit: int = 10) -> list[DocumentResult]:
103+
def search(self, query: str, top_k: int = 10) -> list[DocumentResult]:
114104
"""Semantic search and full-text search sorted with Reciprocal Rank Fusion."""
115105
query_embedding = self.generate_embedding([Chunk(content=query)])[0].embedding
116106

@@ -185,7 +175,7 @@ def search(self, query: str, limit: int = 10) -> list[DocumentResult]:
185175
{
186176
"query": query,
187177
"query_embedding": query_embedding,
188-
"k": limit,
178+
"k": top_k,
189179
"rrf_k": Engine.DEFAULT_RRF_K,
190180
"weight_fts": self._settings.weight_fts,
191181
"weight_vec": self._settings.weight_vec,

0 commit comments

Comments
 (0)