Skip to content

Commit bcea663

Browse files
authored
feat: make pandas an optional dependency (#141)
1 parent 8df2c5d commit bcea663

File tree

2 files changed

+15
-8
lines changed

2 files changed

+15
-8
lines changed

pyproject.toml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,6 @@ dependencies = [
3636
"sqlmodel-slim (>=0.0.21)",
3737
# Progress:
3838
"tqdm (>=4.66.0)",
39-
# Evaluation:
40-
"pandas (>=2.1.1)",
4139
# CLI:
4240
"typer (>=0.15.1)",
4341
# Model Context Protocol:
@@ -83,7 +81,7 @@ llama-cpp-python = ["llama-cpp-python (>=0.3.9)"]
8381
# Markdown conversion:
8482
pandoc = ["pypandoc-binary (>=1.13)"]
8583
# Evaluation:
86-
ragas = ["ragas (>=0.1.12)"]
84+
ragas = ["pandas (>=2.1.1)", "ragas (>=0.1.12)"]
8785

8886
[tool.commitizen] # https://commitizen-tools.github.io/commitizen/config/
8987
bump_message = "bump: v$current_version → v$new_version"

src/raglite/_eval.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
11
"""Generation and evaluation of evals."""
22

33
from random import randint
4-
from typing import ClassVar
4+
from typing import TYPE_CHECKING, ClassVar
55

66
import numpy as np
7-
import pandas as pd
87
from pydantic import BaseModel, ConfigDict, Field, field_validator
98
from sqlmodel import Session, func, select
109
from tqdm.auto import tqdm, trange
1110

11+
if TYPE_CHECKING:
12+
import pandas as pd
13+
1214
from raglite._config import RAGLiteConfig
1315
from raglite._database import Chunk, Document, Eval, create_database_engine
1416
from raglite._extract import extract_with_llm
@@ -176,8 +178,14 @@ def answer_evals(
176178
num_evals: int = 100,
177179
*,
178180
config: RAGLiteConfig | None = None,
179-
) -> pd.DataFrame:
181+
) -> "pd.DataFrame":
180182
"""Read evals from the database and answer them with RAG."""
183+
try:
184+
import pandas as pd
185+
except ModuleNotFoundError as import_error:
186+
error_message = "To use the `answer_evals` function, please install the `ragas` extra."
187+
raise ModuleNotFoundError(error_message) from import_error
188+
181189
# Read evals from the database.
182190
config = config or RAGLiteConfig()
183191
engine = create_database_engine(config)
@@ -206,10 +214,11 @@ def answer_evals(
206214

207215

208216
def evaluate(
209-
answered_evals: pd.DataFrame | int = 100, config: RAGLiteConfig | None = None
210-
) -> pd.DataFrame:
217+
answered_evals: "pd.DataFrame | int" = 100, config: RAGLiteConfig | None = None
218+
) -> "pd.DataFrame":
211219
"""Evaluate the performance of a set of answered evals with Ragas."""
212220
try:
221+
import pandas as pd
213222
from datasets import Dataset
214223
from langchain_community.chat_models import ChatLiteLLM
215224
from langchain_community.llms import LlamaCpp

0 commit comments

Comments
 (0)