forked from mlfoundations/dclm
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathevaluation_mosaicml_mpt-7b.json
More file actions
85 lines (85 loc) · 3.82 KB
/
evaluation_mosaicml_mpt-7b.json
File metadata and controls
85 lines (85 loc) · 3.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
{
"name": "../eval/heavy",
"uuid": "96ee1b21-6c3b-4cd7-a3d7-6b5da7815a49",
"model": "mosaicml/mpt-7b",
"creation_date": "2024_06_03-23_06_46",
"eval_metrics": {
"icl": {
"mmlu_zeroshot": 0.264640179119612,
"hellaswag_zeroshot": 0.7613025307655334,
"jeopardy": 0.46818186044692994,
"bigbench_qa_wikidata": 0.7109394073486328,
"arc_easy": 0.7234848737716675,
"arc_challenge": 0.43430033326148987,
"mmlu_fewshot": 0.2883020364924481,
"bigbench_misconceptions": 0.5205479264259338,
"copa": 0.800000011920929,
"siqa": 0.5312179923057556,
"commonsense_qa": 0.22113022208213806,
"piqa": 0.8046789765357971,
"openbook_qa": 0.41999998688697815,
"bigbench_novel_concepts": 0.5625,
"bigbench_strange_stories": 0.6666666865348816,
"bigbench_strategy_qa": 0.5648754835128784,
"lambada_openai": 0.7054142951965332,
"hellaswag": 0.7654849886894226,
"winograd": 0.8717948794364929,
"winogrande": 0.6858721375465393,
"bigbench_conlang_translation": 0.06707317382097244,
"bigbench_language_identification": 0.24860000610351562,
"bigbench_conceptual_combinations": 0.3300970792770386,
"bigbench_elementary_math_qa": 0.276467502117157,
"bigbench_dyck_languages": 0.3179999887943268,
"agi_eval_lsat_ar": 0.25217390060424805,
"bigbench_cs_algorithms": 0.4840908944606781,
"bigbench_logical_deduction": 0.23999999463558197,
"bigbench_operators": 0.34285715222358704,
"bigbench_repeat_copy_logic": 0.25,
"simple_arithmetic_nospaces": 0.08100000023841858,
"simple_arithmetic_withspaces": 0.09200000017881393,
"math_qa": 0.2628226578235626,
"logi_qa": 0.27496159076690674,
"pubmed_qa_labeled": 0.4339999854564667,
"squad": 0.5940397381782532,
"agi_eval_lsat_rc": 0.25,
"agi_eval_lsat_lr": 0.2450980395078659,
"coqa": 0.45383942127227783,
"bigbench_understanding_fables": 0.24867725372314453,
"boolq": 0.7538226246833801,
"agi_eval_sat_en": 0.2669903039932251,
"winogender_mc_female": 0.550000011920929,
"winogender_mc_male": 0.550000011920929,
"enterprise_pii_classification": 0.593225359916687,
"bbq": 0.5585124113342979,
"gpqa_main": 0.234375,
"gpqa_diamond": 0.2222222238779068,
"gsm8k_cot": 0.050796058028936386,
"agi_eval_sat_math_cot": 0.022727273404598236,
"aqua_cot": 0.02448979578912258,
"svamp_cot": 0.30666667222976685,
"triviaqa_sm_sub": 0.49399998784065247
}
},
"aggregated_task_categories_centered": {
"commonsense reasoning": 0.3344781428816608,
"language understanding": 0.4421306223843177,
"reading comprehension": 0.25455210023772035,
"safety": 0.1258688975464214,
"symbolic problem solving": 0.151250306933642,
"world knowledge": 0.26039833038522486
},
"aggregated_centered_results": 0.2570849136240029,
"aggregated_results": 0.41782945136611016,
"rw_small": 0.6931304186582565,
"rw_small_centered": 0.48044879185525996,
"95%_CI_above": 0.5406992905132182,
"95%_CI_above_centered": 0.3929484872199338,
"99%_CI_above": 0.5586362421512604,
"99%_CI_above_centered": 0.4430027846556688,
"low_variance_datasets": 0.5486367377367887,
"low_variance_datasets_centered": 0.44281775846540244,
"_filename": "exp_data/evals/evaluation_mosaicml_mpt-7b.json",
"missing tasks": "[]",
"Core": 0.44281775846540244,
"Extended": 0.2570849136240029
}