Skip to content

Commit ba4e6c8

Browse files
authored
fix: linting issues (#138)
1 parent 59123b4 commit ba4e6c8

File tree

3 files changed

+18
-5
lines changed

3 files changed

+18
-5
lines changed

docs/integrations/langchain.ipynb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
"index = VectorstoreIndexCreator().from_loaders([loader])\n",
5252
"\n",
5353
"\n",
54-
"llm = ChatOpenAI(temperature= 0)\n",
54+
"llm = ChatOpenAI(temperature=0)\n",
5555
"qa_chain = RetrievalQA.from_chain_type(\n",
5656
" llm,\n",
5757
" retriever=index.vectorstore.as_retriever(),\n",
@@ -373,6 +373,7 @@
373373
],
374374
"source": [
375375
"from langchain.schema import Document\n",
376+
"\n",
376377
"fake_result = result.copy()\n",
377378
"fake_result[\"source_documents\"] = [Document(page_content=\"I love christmas\")]\n",
378379
"eval_result = context_recall_chain(fake_result)\n",

src/ragas/metrics/answer_relevance.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,9 @@
77
from datasets import Dataset
88
from langchain.callbacks.manager import trace_as_chain_group
99
from langchain.embeddings import OpenAIEmbeddings
10-
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
1110
from langchain.embeddings.base import Embeddings
11+
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
12+
1213
from ragas.metrics.base import EvaluationMode, MetricWithLLM
1314
from ragas.metrics.llms import generate
1415

@@ -45,7 +46,7 @@ class AnswerRelevancy(MetricWithLLM):
4546
Here indicates the number questions generated per answer.
4647
Ideal range between 3 to 5.
4748
embeddings: Embedding
48-
The langchain wrapper of Embedding object.
49+
The langchain wrapper of Embedding object.
4950
E.g. HuggingFaceEmbeddings('BAAI/bge-base-en')
5051
"""
5152

tests/benchmarks/benchmark_eval.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,12 @@
22
from torch.cuda import is_available
33

44
from ragas import evaluate
5-
from ragas.metrics import answer_relevancy, context_relevancy, faithfulness
5+
from ragas.metrics import (
6+
answer_relevancy,
7+
context_recall,
8+
context_relevancy,
9+
faithfulness,
10+
)
611
from ragas.metrics.critique import harmfulness
712

813
DEVICE = "cuda" if is_available() else "cpu"
@@ -13,6 +18,12 @@
1318
if __name__ == "__main__":
1419
result = evaluate(
1520
ds.select(range(5)),
16-
metrics=[answer_relevancy, context_relevancy, faithfulness, harmfulness],
21+
metrics=[
22+
answer_relevancy,
23+
context_relevancy,
24+
faithfulness,
25+
harmfulness,
26+
context_recall,
27+
],
1728
)
1829
print(result)

0 commit comments

Comments
 (0)