Skip to content

Commit 1dad492

Browse files
authored
fix(integration): langchain integration and CI fixes (#830)
- removed deprication warning - fixed CI issues
1 parent a288656 commit 1dad492

File tree

2 files changed

+29
-7
lines changed

2 files changed

+29
-7
lines changed

src/ragas/integrations/langchain.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ def evaluate_run(
206206
if example.outputs is None or "ground_truth" not in example.outputs:
207207
raise ValueError("expected `ground_truth` in example outputs.")
208208
chain_eval["ground_truth"] = example.outputs["ground_truth"]
209-
eval_output = self(chain_eval, include_run_info=True)
209+
eval_output = self.invoke(chain_eval, include_run_info=True)
210210

211211
evaluation_result = EvaluationResult(
212212
key=self.metric.name, score=eval_output[self.metric.name]

src/ragas/metrics/base.py

Lines changed: 28 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
C - contexts: context used for generation
55
G - ground_truth: ground truth answer
66
"""
7+
78
from __future__ import annotations
89

910
import asyncio
@@ -25,17 +26,37 @@
2526
EvaluationMode = Enum("EvaluationMode", "qac qa qc gc ga qga qcg")
2627

2728

29+
def get_required_columns(
30+
eval_mod: EvaluationMode, ignore_columns: t.Optional[t.List[str]] = None
31+
) -> t.List[str]:
32+
if eval_mod == EvaluationMode.qac:
33+
keys = ["question", "answer", "contexts"]
34+
elif eval_mod == EvaluationMode.qa:
35+
keys = ["question", "answer"]
36+
elif eval_mod == EvaluationMode.qc:
37+
keys = ["question", "contexts"]
38+
elif eval_mod == EvaluationMode.gc:
39+
keys = ["contexts", "ground_truth"]
40+
elif eval_mod == EvaluationMode.ga:
41+
keys = ["answer", "ground_truth"]
42+
elif eval_mod == EvaluationMode.qga:
43+
keys = ["question", "contexts", "answer", "ground_truth"]
44+
elif eval_mod == EvaluationMode.qcg:
45+
keys = ["question", "contexts", "ground_truth"]
46+
ignore_columns = ignore_columns or []
47+
48+
return [k for k in keys if k not in ignore_columns]
49+
50+
2851
@dataclass
2952
class Metric(ABC):
3053
@property
3154
@abstractmethod
32-
def name(self) -> str:
33-
...
55+
def name(self) -> str: ...
3456

3557
@property
3658
@abstractmethod
37-
def evaluation_mode(self) -> EvaluationMode:
38-
...
59+
def evaluation_mode(self) -> EvaluationMode: ...
3960

4061
@abstractmethod
4162
def init(self, run_config: RunConfig):
@@ -97,8 +118,9 @@ async def ascore(
97118
return score
98119

99120
@abstractmethod
100-
async def _ascore(self, row: t.Dict, callbacks: Callbacks, is_async: bool) -> float:
101-
...
121+
async def _ascore(
122+
self, row: t.Dict, callbacks: Callbacks, is_async: bool
123+
) -> float: ...
102124

103125

104126
@dataclass

0 commit comments

Comments
 (0)