Skip to content

Commit 7a12846

Browse files
authored
fix: llamaIndex import fixes (#156)
fixes: #144 fixes: - llamaIndex incorrect import - dependencies specified - outdated metrics documentation
1 parent 1d01c35 commit 7a12846

File tree

6 files changed

+65
-54
lines changed

6 files changed

+65
-54
lines changed

β€Ždocs/integrations/llamaindex.ipynbβ€Ž

Lines changed: 44 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@
125125
},
126126
{
127127
"cell_type": "code",
128-
"execution_count": 11,
128+
"execution_count": 5,
129129
"id": "751dc988",
130130
"metadata": {},
131131
"outputs": [],
@@ -159,7 +159,7 @@
159159
},
160160
{
161161
"cell_type": "code",
162-
"execution_count": 6,
162+
"execution_count": 7,
163163
"id": "9875132a",
164164
"metadata": {},
165165
"outputs": [],
@@ -191,7 +191,7 @@
191191
},
192192
{
193193
"cell_type": "code",
194-
"execution_count": 12,
194+
"execution_count": 8,
195195
"id": "05633cc2",
196196
"metadata": {},
197197
"outputs": [
@@ -206,7 +206,7 @@
206206
"name": "stderr",
207207
"output_type": "stream",
208208
"text": [
209-
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [01:12<00:00, 72.16s/it]\n"
209+
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [01:00<00:00, 60.01s/it]\n"
210210
]
211211
},
212212
{
@@ -220,21 +220,21 @@
220220
"name": "stderr",
221221
"output_type": "stream",
222222
"text": [
223-
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:12<00:00, 12.74s/it]\n"
223+
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:11<00:00, 11.67s/it]\n"
224224
]
225225
},
226226
{
227227
"name": "stdout",
228228
"output_type": "stream",
229229
"text": [
230-
"evaluating with [context_ relevancy]\n"
230+
"evaluating with [context_relevancy]\n"
231231
]
232232
},
233233
{
234234
"name": "stderr",
235235
"output_type": "stream",
236236
"text": [
237-
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:39<00:00, 39.72s/it]\n"
237+
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:46<00:00, 46.11s/it]\n"
238238
]
239239
},
240240
{
@@ -248,7 +248,7 @@
248248
"name": "stderr",
249249
"output_type": "stream",
250250
"text": [
251-
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:20<00:00, 20.26s/it]\n"
251+
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:13<00:00, 13.82s/it]\n"
252252
]
253253
},
254254
{
@@ -262,7 +262,7 @@
262262
"name": "stderr",
263263
"output_type": "stream",
264264
"text": [
265-
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:31<00:00, 31.83s/it]\n"
265+
"100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:39<00:00, 39.68s/it]\n"
266266
]
267267
}
268268
],
@@ -274,15 +274,15 @@
274274
},
275275
{
276276
"cell_type": "code",
277-
"execution_count": 13,
277+
"execution_count": 9,
278278
"id": "f927a943",
279279
"metadata": {},
280280
"outputs": [
281281
{
282282
"name": "stdout",
283283
"output_type": "stream",
284284
"text": [
285-
"{'ragas_score': 0.4150, 'faithfulness': 0.7000, 'answer_relevancy': 0.9550, 'context_ relevancy': 0.1622, 'harmfulness': 0.0000, 'context_recall': 1.0000}\n"
285+
"{'ragas_score': 0.5228, 'faithfulness': 0.7000, 'answer_relevancy': 0.9565, 'context_relevancy': 0.2406, 'harmfulness': 0.0000, 'context_recall': 0.9800}\n"
286286
]
287287
}
288288
],
@@ -301,7 +301,7 @@
301301
},
302302
{
303303
"cell_type": "code",
304-
"execution_count": 14,
304+
"execution_count": 10,
305305
"id": "b96311e2",
306306
"metadata": {},
307307
"outputs": [
@@ -327,12 +327,12 @@
327327
" <tr style=\"text-align: right;\">\n",
328328
" <th></th>\n",
329329
" <th>question</th>\n",
330-
" <th>answer</th>\n",
331330
" <th>contexts</th>\n",
331+
" <th>answer</th>\n",
332332
" <th>ground_truths</th>\n",
333333
" <th>faithfulness</th>\n",
334334
" <th>answer_relevancy</th>\n",
335-
" <th>context_ relevancy</th>\n",
335+
" <th>context_relevancy</th>\n",
336336
" <th>harmfulness</th>\n",
337337
" <th>context_recall</th>\n",
338338
" </tr>\n",
@@ -341,60 +341,60 @@
341341
" <tr>\n",
342342
" <th>0</th>\n",
343343
" <td>What is the population of New York City as of ...</td>\n",
344-
" <td>\\nThe population of New York City as of 2020 i...</td>\n",
345344
" <td>[Aeromedical Staging Squadron, and a military ...</td>\n",
345+
" <td>\\nThe population of New York City as of 2020 i...</td>\n",
346346
" <td>[8,804,000]</td>\n",
347347
" <td>1.0</td>\n",
348-
" <td>0.999999</td>\n",
349-
" <td>0.161345</td>\n",
348+
" <td>1.000000</td>\n",
349+
" <td>0.320000</td>\n",
350350
" <td>0</td>\n",
351351
" <td>1.0</td>\n",
352352
" </tr>\n",
353353
" <tr>\n",
354354
" <th>1</th>\n",
355355
" <td>Which borough of New York City has the highest...</td>\n",
356-
" <td>\\nThe borough of Manhattan has the highest pop...</td>\n",
357356
" <td>[co-extensive with New York County, the boroug...</td>\n",
357+
" <td>\\nThe borough of Manhattan has the highest pop...</td>\n",
358358
" <td>[Queens]</td>\n",
359359
" <td>0.0</td>\n",
360-
" <td>0.998528</td>\n",
361-
" <td>0.046342</td>\n",
360+
" <td>0.998525</td>\n",
361+
" <td>0.038462</td>\n",
362362
" <td>0</td>\n",
363-
" <td>1.0</td>\n",
363+
" <td>0.9</td>\n",
364364
" </tr>\n",
365365
" <tr>\n",
366366
" <th>2</th>\n",
367367
" <td>What is the economic significance of New York ...</td>\n",
368-
" <td>\\nNew York City is a major global economic cen...</td>\n",
369368
" <td>[health care and life sciences, medical techno...</td>\n",
369+
" <td>\\nNew York City is a major global economic cen...</td>\n",
370370
" <td>[New York City's economic significance is vast...</td>\n",
371371
" <td>1.0</td>\n",
372-
" <td>0.903937</td>\n",
373-
" <td>0.407880</td>\n",
372+
" <td>0.911303</td>\n",
373+
" <td>0.384615</td>\n",
374374
" <td>0</td>\n",
375375
" <td>1.0</td>\n",
376376
" </tr>\n",
377377
" <tr>\n",
378378
" <th>3</th>\n",
379379
" <td>How did New York City get its name?</td>\n",
380-
" <td>\\nNew York City was named in honor of the Duke...</td>\n",
381380
" <td>[a US$1 billion research and education center ...</td>\n",
381+
" <td>\\nNew York City was named in honor of the Duke...</td>\n",
382382
" <td>[New York City got its name when it came under...</td>\n",
383383
" <td>1.0</td>\n",
384-
" <td>0.929809</td>\n",
385-
" <td>0.057195</td>\n",
384+
" <td>0.929792</td>\n",
385+
" <td>0.407407</td>\n",
386386
" <td>0</td>\n",
387387
" <td>1.0</td>\n",
388388
" </tr>\n",
389389
" <tr>\n",
390390
" <th>4</th>\n",
391391
" <td>What is the significance of the Statue of Libe...</td>\n",
392-
" <td>\\nThe Statue of Liberty is a symbol of the Uni...</td>\n",
393392
" <td>[(stylized I ❀ NY) is both a logo and a song t...</td>\n",
393+
" <td>\\nThe Statue of Liberty is a symbol of the Uni...</td>\n",
394394
" <td>[The Statue of Liberty in New York City holds ...</td>\n",
395395
" <td>0.5</td>\n",
396-
" <td>0.942681</td>\n",
397-
" <td>0.138449</td>\n",
396+
" <td>0.942658</td>\n",
397+
" <td>0.052632</td>\n",
398398
" <td>0</td>\n",
399399
" <td>1.0</td>\n",
400400
" </tr>\n",
@@ -410,36 +410,36 @@
410410
"3 How did New York City get its name? \n",
411411
"4 What is the significance of the Statue of Libe... \n",
412412
"\n",
413-
" answer \\\n",
414-
"0 \\nThe population of New York City as of 2020 i... \n",
415-
"1 \\nThe borough of Manhattan has the highest pop... \n",
416-
"2 \\nNew York City is a major global economic cen... \n",
417-
"3 \\nNew York City was named in honor of the Duke... \n",
418-
"4 \\nThe Statue of Liberty is a symbol of the Uni... \n",
419-
"\n",
420413
" contexts \\\n",
421414
"0 [Aeromedical Staging Squadron, and a military ... \n",
422415
"1 [co-extensive with New York County, the boroug... \n",
423416
"2 [health care and life sciences, medical techno... \n",
424417
"3 [a US$1 billion research and education center ... \n",
425418
"4 [(stylized I ❀ NY) is both a logo and a song t... \n",
426419
"\n",
420+
" answer \\\n",
421+
"0 \\nThe population of New York City as of 2020 i... \n",
422+
"1 \\nThe borough of Manhattan has the highest pop... \n",
423+
"2 \\nNew York City is a major global economic cen... \n",
424+
"3 \\nNew York City was named in honor of the Duke... \n",
425+
"4 \\nThe Statue of Liberty is a symbol of the Uni... \n",
426+
"\n",
427427
" ground_truths faithfulness \\\n",
428428
"0 [8,804,000] 1.0 \n",
429429
"1 [Queens] 0.0 \n",
430430
"2 [New York City's economic significance is vast... 1.0 \n",
431431
"3 [New York City got its name when it came under... 1.0 \n",
432432
"4 [The Statue of Liberty in New York City holds ... 0.5 \n",
433433
"\n",
434-
" answer_relevancy context_ relevancy harmfulness context_recall \n",
435-
"0 0.999999 0.161345 0 1.0 \n",
436-
"1 0.998528 0.046342 0 1.0 \n",
437-
"2 0.903937 0.407880 0 1.0 \n",
438-
"3 0.929809 0.057195 0 1.0 \n",
439-
"4 0.942681 0.138449 0 1.0 "
434+
" answer_relevancy context_relevancy harmfulness context_recall \n",
435+
"0 1.000000 0.320000 0 1.0 \n",
436+
"1 0.998525 0.038462 0 0.9 \n",
437+
"2 0.911303 0.384615 0 1.0 \n",
438+
"3 0.929792 0.407407 0 1.0 \n",
439+
"4 0.942658 0.052632 0 1.0 "
440440
]
441441
},
442-
"execution_count": 14,
442+
"execution_count": 10,
443443
"metadata": {},
444444
"output_type": "execute_result"
445445
}

β€Ždocs/metrics.mdβ€Ž

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
This measures the factual consistency of the generated answer against the given context. This is done using a multi step paradigm that includes creation of statements from the generated answer followed by verifying each of these statements against the context. It is calculated from `answer` and `retrieved context`. The answer is scaled to (0,1) range. Higher the better.
66
```python
7-
from ragas.metrics.factuality import Faithfulness
7+
from ragas.metrics.faithfulness import Faithfulness
88
faithfulness = Faithfulness()
99

1010
# Dataset({
@@ -19,22 +19,26 @@ results = faithfulness.score(dataset)
1919

2020
This measures how relevant is the retrieved context to the prompt. This is done using a combination of OpenAI models and cross-encoder models. To improve the score one can try to optimize the amount of information present in the retrieved context. It is calculated from `question` and `retrieved context`.
2121
```python
22-
from ragas.metrics.context_relevancy import ContextRelevancy
23-
context_rel = ContextRelevancy(strictness=3)
22+
from ragas.metrics import ContextRelevancy
23+
context_relevancy = ContextRelevancy(strictness=3)
24+
25+
# run init models to load the models used
26+
context_relevancy.init_model()
27+
2428
# Dataset({
2529
# features: ['question','contexts'],
2630
# num_rows: 25
2731
# })
2832
dataset: Dataset
2933

30-
results = context_rel.score(dataset)
34+
results = context_relevancy.score(dataset)
3135
```
3236

3337
### `Context Recall`
3438
measures the recall of the retrieved context using annotated answer as ground truth. Annotated answer is taken as proxy for ground truth context. It is calculated from `ground truth` and `retrieved context`.
3539

3640
```python
37-
from ragas.metrics.context_recall import ContextRecall
41+
from ragas.metrics import ContextRecall
3842
context_recall = ContextRecall()
3943
# Dataset({
4044
# features: ['contexts','ground_truths'],
@@ -50,8 +54,12 @@ results = context_recall.score(dataset)
5054

5155
This measures how relevant is the generated answer to the prompt. If the generated answer is incomplete or contains redundant information the score will be low. This is quantified by working out the chance of an LLM generating the given question using the generated answer. It is calculated from `question` and `answer`. Values range (0,1), higher the better.
5256
```python
53-
from ragas.metrics.answer_relevancy import AnswerRelevancy
57+
from ragas.metrics import AnswerRelevancy
5458
answer_relevancy = AnswerRelevancy()
59+
60+
# init_model to load models used
61+
answer_relevancy.init_model()
62+
5563
# Dataset({
5664
# features: ['question','answer'],
5765
# num_rows: 25
@@ -74,7 +82,6 @@ from ragas.metrics.critique import SUPPORTED_ASPECTS
7482
print(SUPPORTED_ASPECTS)
7583

7684
from ragas.metrics.critique import conciseness
77-
from ragas
7885
# Dataset({
7986
# features: ['question','answer'],
8087
# num_rows: 25
@@ -88,6 +95,7 @@ results = conciseness.score(dataset)
8895
from ragas.metrics.critique import AspectCritique
8996
mycritique = AspectCritique(name="my-critique", definition="Is the submission safe to children?", strictness=2)
9097

98+
results = mycritique.score(dataset)
9199
```
92100

93101

β€Žpyproject.tomlβ€Ž

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@ dependencies = [
55
"transformers",
66
"sentence-transformers",
77
"datasets",
8-
"langchain>=0.0.218",
8+
"tiktoken",
9+
"langchain>=0.0.288",
910
"openai",
1011
"pydantic<2.0",
1112
"pysbd>=0.3.4",

β€Žsrc/ragas/llama_index/evaluation.pyβ€Ž

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
import typing as t
44

55
from datasets import Dataset
6-
from rich.repr import Result
76

87
from ragas import evaluate as ragas_evaluate
8+
from ragas.evaluation import Result
99
from ragas.metrics.base import Metric
1010

1111
if t.TYPE_CHECKING:

β€Žsrc/ragas/metrics/__init__.pyβ€Ž

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22
from ragas.metrics.context_recall import ContextRecall, context_recall
33
from ragas.metrics.context_relevance import ContextRelevancy, context_relevancy
44
from ragas.metrics.critique import AspectCritique
5-
from ragas.metrics.faithfulnes import Faithfulness, faithfulness
5+
from ragas.metrics.faithfulness import Faithfulness, faithfulness
6+
7+
DEFAULT_METRICS = [answer_relevancy, context_relevancy, faithfulness, context_recall]
68

79
__all__ = [
810
"Faithfulness",
File renamed without changes.

0 commit comments

Comments
Β (0)