Skip to content

Commit 6ff35f7

Browse files
authored
feat: Add support for llamaIndex in evaluation (#1619)
Added type checks for llamaIndex LLMs and embeddings in the evaluate function.
1 parent 2a4a5ad commit 6ff35f7

File tree

2 files changed

+12
-3
lines changed

2 files changed

+12
-3
lines changed

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ all = [
2525
"rapidfuzz",
2626
"pandas",
2727
"datacompy",
28+
"llama_index",
2829
]
2930
docs = [
3031
"mkdocs>=1.6.1",

src/ragas/evaluation.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@
88
from langchain_core.embeddings import Embeddings as LangchainEmbeddings
99
from langchain_core.language_models import BaseLanguageModel as LangchainLLM
1010

11+
from llama_index.core.base.llms.base import BaseLLM as LlamaIndexLLM
12+
from llama_index.core.base.embeddings.base import BaseEmbedding as LlamaIndexEmbedding
13+
1114
from ragas._analytics import EvaluationEvent, track, track_was_completed
1215
from ragas.callbacks import ChainType, RagasTracer, new_group
1316
from ragas.dataset_schema import (
@@ -19,13 +22,14 @@
1922
from ragas.embeddings.base import (
2023
BaseRagasEmbeddings,
2124
LangchainEmbeddingsWrapper,
25+
LlamaIndexEmbeddingsWrapper,
2226
embedding_factory,
2327
)
2428
from ragas.exceptions import ExceptionInRunner
2529
from ragas.executor import Executor
2630
from ragas.integrations.helicone import helicone_config
2731
from ragas.llms import llm_factory
28-
from ragas.llms.base import BaseRagasLLM, LangchainLLMWrapper
32+
from ragas.llms.base import BaseRagasLLM, LangchainLLMWrapper, LlamaIndexLLMWrapper
2933
from ragas.metrics import AspectCritic
3034
from ragas.metrics._answer_correctness import AnswerCorrectness
3135
from ragas.metrics.base import (
@@ -56,8 +60,8 @@
5660
def evaluate(
5761
dataset: t.Union[Dataset, EvaluationDataset],
5862
metrics: t.Optional[t.Sequence[Metric]] = None,
59-
llm: t.Optional[BaseRagasLLM | LangchainLLM] = None,
60-
embeddings: t.Optional[BaseRagasEmbeddings | LangchainEmbeddings] = None,
63+
llm: t.Optional[BaseRagasLLM | LangchainLLM | LlamaIndexLLM] = None,
64+
embeddings: t.Optional[BaseRagasEmbeddings | LangchainEmbeddings | LlamaIndexEmbedding] = None,
6165
callbacks: Callbacks = None,
6266
in_ci: bool = False,
6367
run_config: RunConfig = RunConfig(),
@@ -182,8 +186,12 @@ def evaluate(
182186
# set the llm and embeddings
183187
if isinstance(llm, LangchainLLM):
184188
llm = LangchainLLMWrapper(llm, run_config=run_config)
189+
elif isinstance(llm, LlamaIndexLLM):
190+
llm = LlamaIndexLLMWrapper(llm, run_config=run_config)
185191
if isinstance(embeddings, LangchainEmbeddings):
186192
embeddings = LangchainEmbeddingsWrapper(embeddings)
193+
elif isinstance(embeddings, LlamaIndexEmbedding):
194+
embeddings = LlamaIndexEmbeddingsWrapper(embeddings)
187195

188196
# init llms and embeddings
189197
binary_metrics = []

0 commit comments

Comments
 (0)