forked from ellipticmarketing/modelrelay
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscores.js
More file actions
99 lines (98 loc) · 3.58 KB
/
scores.js
File metadata and controls
99 lines (98 loc) · 3.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
/**
* @file scores.js
* @description Model intelligence scores (single source of truth).
* Keys are canonical model IDs (no suffix like :free).
*/
export const scores = {
"arcee-ai/trinity-large-preview": 0.778,
"arcee-ai/trinity-mini": 0.35,
"bytedance/seed-oss-36b-instruct": 0.38,
"coder-model": 0.696,
"codestral-latest": 0.58,
"cognitivecomputations/dolphin-mistral-24b-venice-edition": 0.335,
"corethink": 0.623,
"deepseek-ai/deepseek-r1-distill-llama-8b": 0.282,
"deepseek-ai/deepseek-r1-distill-qwen-14b": 0.377,
"deepseek-ai/deepseek-r1-distill-qwen-32b": 0.439,
"deepseek-ai/deepseek-r1-distill-qwen-7b": 0.226,
"deepseek-ai/deepseek-v3.1": 0.62,
"deepseek-ai/deepseek-v3.1-terminus": 0.684,
"deepseek-ai/deepseek-v3.2": 0.731,
"deepseek-r1-distill-llama-70b": 0.439,
"deepseek/deepseek-r1-0528": 0.439,
"devstral-2-123b-instruct-2512": 0.722,
"gemma-3-12b-it": 0.18,
"gemma-3-27b-it": 0.18,
"gemma-3-4b-it": 0.428,
"giga-potato-thinking": 0.592,
"glm-4.6": 0.7,
"google/gemma-2-9b-it": 0.18,
"google/gemma-3n-e2b-it": 0.25,
"google/gemma-3n-e4b-it": 0.38,
"gpt-oss-120b": 0.6,
"ibm/granite-34b-code-instruct": 0.3,
"igenius/colosseum_355b_instruct_16k": 0.52,
"liquid/lfm-2.5-1.2b-instruct": 0.008,
"liquid/lfm-2.5-1.2b-thinking": 0.02,
"llama-3.1-8b-instant": 0.14,
"llama-3.3-70b-instruct": 0.395,
"llama-3.3-70b-versatile": 0.395,
"llama-4-scout-17b-16e-instruct": 0.44,
"llama3.1-8b": 0.14,
"llama3.3-70b": 0.395,
"meta-llama/llama-3.2-3b-instruct": 0.106,
"meta-llama/llama-3.3-70b-instruct": 0.395,
"meta-llama/llama-4-maverick-17b-128e-preview": 0.62,
"meta-llama/llama-4-scout-17b-16e-preview": 0.44,
"meta/llama-3.1-405b-instruct": 0.44,
"meta/llama-3.3-70b-instruct": 0.395,
"meta/llama-4-maverick-17b-128e-instruct": 0.62,
"meta/llama-4-scout-17b-16e-instruct": 0.44,
"microsoft/phi-3.5-mini-instruct": 0.12,
"microsoft/phi-4-mini-instruct": 0.14,
"minimax/minimax-m2.5": 0.802,
"minimaxai/minimax-m2": 0.694,
"minimaxai/minimax-m2.1": 0.74,
"mistral-small-3.2-24b-instruct-2506": 0.32,
"mistralai/devstral-2-123b-instruct-2512": 0.722,
"mistralai/magistral-small-2506": 0.45,
"mistralai/ministral-14b-instruct-2512": 0.34,
"mistralai/mistral-large-3-675b-instruct-2512": 0.58,
"mistralai/mistral-medium-3-instruct": 0.48,
"mistralai/mistral-small-3.1-24b-instruct": 0.536,
"mistralai/mixtral-8x22b-instruct-v0.1": 0.32,
"moonshotai/kimi-k2-instruct": 0.658,
"moonshotai/kimi-k2-thinking": 0.713,
"moonshotai/kimi-k2.5": 0.768,
"nousresearch/hermes-3-llama-3.1-405b": 0.232,
"nvidia/llama-3.1-nemotron-ultra-253b-v1": 0.56,
"nvidia/llama-3.3-nemotron-super-49b-v1.5": 0.49,
"nvidia/nemotron-3-nano-30b-a3b": 0.43,
"nvidia/nemotron-nano-12b-v2-vl": 0.252,
"nvidia/nemotron-nano-9b-v2": 0.432,
"openai/gpt-oss-120b": 0.6,
"openai/gpt-oss-20b": 0.42,
"qwen-3-235b-a22b": 0.7,
"qwen-3-32b": 0.5,
"qwen-qwq-32b": 0.5,
"qwen/qwen2.5-coder-32b-instruct": 0.46,
"qwen/qwen3-235b-a22b": 0.7,
"qwen/qwen3-32b": 0.5,
"qwen/qwen3-4b": 0.542,
"qwen/qwen3-coder": 0.742,
"qwen/qwen3-coder-480b-a35b-instruct": 0.706,
"qwen/qwen3-next-80b-a3b-instruct": 0.65,
"qwen/qwen3-next-80b-a3b-thinking": 0.68,
"qwen/qwen3.5-397b-a17b": 0.68,
"qwen/qwq-32b": 0.5,
"qwen3-235b-a22b-instruct-2507": 0.7,
"qwen3-coder-30b-a3b-instruct": 0.706,
"stepfun-ai/step-3.5-flash": 0.744,
"stepfun/step-3.5-flash": 0.744,
"stockmark/stockmark-2-100b-instruct": 0.36,
"upstage/solar-pro-3": 0.42,
"vision-model": 0.67,
"z-ai/glm-4.5-air": 0.576,
"z-ai/glm4.7": 0.738,
"z-ai/glm5": 0.778,
}