Skip to content

Commit 6bfe107

Browse files
authored
Update monitor.py (#3627)
1 parent 78d7784 commit 6bfe107

File tree

3 files changed

+24
-39
lines changed

3 files changed

+24
-39
lines changed

fastchat/constants.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99

1010
# Survey Link URL (to be removed) #00729c
1111
SURVEY_LINK = """<div style='text-align: left; margin: 20px 0;'>
12-
<div style='display: inline-block; border: 2px solid #C41E3A; padding: 20px; padding-bottom: 10px; padding-top: 10px; border-radius: 5px;'>
13-
<span style='color: #C41E3A; font-weight: bold;'>New Launch! Jailbreak models at <a href='https://redarena.ai' style='color: #C41E3A; text-decoration: underline;'>RedTeam Arena</a>. </span>
12+
<div style='display: inline-block; border: 2px solid #00729c; padding: 20px; padding-bottom: 10px; padding-top: 10px; border-radius: 5px;'>
13+
<span style='color: #00729c; font-weight: bold;'>New Launch! Copilot Arena: <a href='https://marketplace.visualstudio.com/items?itemName=copilot-arena.copilot-arena' style='color: #00729c; text-decoration: underline;'>VS Code Extension</a> to compare Top LLMs</span>
1414
</div>
1515
</div>"""
1616
# SURVEY_LINK = ""

fastchat/serve/monitor/monitor.py

Lines changed: 14 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -54,19 +54,16 @@
5454

5555

5656
def recompute_final_ranking(arena_df):
57-
# compute ranking based on CI
58-
ranking = {}
59-
for i, model_a in enumerate(arena_df.index):
60-
ranking[model_a] = 1
61-
for j, model_b in enumerate(arena_df.index):
62-
if i == j:
63-
continue
64-
if (
65-
arena_df.loc[model_b]["rating_q025"]
66-
> arena_df.loc[model_a]["rating_q975"]
67-
):
68-
ranking[model_a] += 1
69-
return list(ranking.values())
57+
q025 = arena_df["rating_q025"].values
58+
q975 = arena_df["rating_q975"].values
59+
60+
sorted_q025 = np.sort(q025)
61+
insertion_indices = np.searchsorted(sorted_q025, q975, side="right")
62+
counts = len(sorted_q025) - insertion_indices
63+
64+
rankings = 1 + counts
65+
ranking_series = pd.Series(rankings, index=arena_df.index)
66+
return ranking_series.tolist()
7067

7168

7269
def arena_hard_title(date):
@@ -81,22 +78,6 @@ def arena_hard_title(date):
8178
return arena_hard_title
8279

8380

84-
def recompute_final_ranking(arena_df):
85-
# compute ranking based on CI
86-
ranking = {}
87-
for i, model_a in enumerate(arena_df.index):
88-
ranking[model_a] = 1
89-
for j, model_b in enumerate(arena_df.index):
90-
if i == j:
91-
continue
92-
if (
93-
arena_df.loc[model_b]["rating_q025"]
94-
> arena_df.loc[model_a]["rating_q975"]
95-
):
96-
ranking[model_a] += 1
97-
return list(ranking.values())
98-
99-
10081
def update_elo_components(
10182
max_num_files, elo_results_file, ban_ip_file, exclude_model_names
10283
):
@@ -861,14 +842,15 @@ def build_category_leaderboard_tab(
861842
"full_style_control",
862843
"hard_6",
863844
"hard_6_style_control",
864-
"if",
865845
"coding",
866846
"math",
867-
"multiturn",
847+
"creative_writing",
848+
"if",
868849
"long_user",
850+
"multiturn",
869851
# "no_refusal",
870852
]
871-
selected_categories_width = [110, 110, 110, 110, 110, 80, 80, 80, 80]
853+
selected_categories_width = [110, 110, 110, 110, 80, 80, 80, 110, 80, 80]
872854

873855
language_categories = [
874856
"english",

fastchat/serve/monitor/monitor_md.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
"gemini-1.5-pro-api-0409-preview",
1010
"bard-jan-24-gemini-pro",
1111
"chatgpt-4o-latest-20240808",
12+
"chatgpt-4o-latest-20240903",
1213
]
1314

1415
key_to_category_name = {
@@ -18,11 +19,12 @@
1819
"math": "Math",
1920
"if": "Instruction Following",
2021
"multiturn": "Multi-Turn",
22+
"creative_writing": "Creative Writing",
2123
"coding": "Coding",
2224
"coding_style_control": "Coding w/ Style Control",
23-
"hard_6": "Hard Prompts (Overall)",
25+
"hard_6": "Hard Prompts",
2426
"hard_english_6": "Hard Prompts (English)",
25-
"hard_6_style_control": "Hard Prompts (Overall) w/ Style Control",
27+
"hard_6_style_control": "Hard Prompts w/ Style Control",
2628
"long_user": "Longer Query",
2729
"english": "English",
2830
"chinese": "Chinese",
@@ -47,8 +49,8 @@
4749
"Multi-Turn": "Multi-Turn Conversation (>= 2 turns)",
4850
"Coding": "Coding: whether conversation contains code snippets",
4951
"Coding w/ Style Control": "Coding with Style Control",
50-
"Hard Prompts (Overall)": "Hard Prompts (Overall): details in [blog post](https://lmsys.org/blog/2024-05-17-category-hard/)",
51-
"Hard Prompts (Overall) w/ Style Control": "Hard Prompts with Style Control. See details in [blog post](https://lmsys.org/blog/2024-08-28-style-control/).",
52+
"Hard Prompts": "Hard Prompts: details in [blog post](https://lmsys.org/blog/2024-05-17-category-hard/)",
53+
"Hard Prompts w/ Style Control": "Hard Prompts with Style Control. See details in [blog post](https://lmsys.org/blog/2024-08-28-style-control/).",
5254
"Hard Prompts (English)": "Hard Prompts (English), note: the delta is to English Category. details in [blog post](https://lmsys.org/blog/2024-05-17-category-hard/)",
5355
"Longer Query": "Longer Query (>= 500 tokens)",
5456
"English": "English Prompts",
@@ -64,6 +66,7 @@
6466
"Exclude Refusal": 'Exclude model responses with refusal (e.g., "I cannot answer")',
6567
"overall_limit_5_user_vote": "overall_limit_5_user_vote",
6668
"Overall (Deprecated)": "Overall without De-duplicating Top Redundant Queries (top 0.1%). See details in [blog post](https://lmsys.org/blog/2024-05-17-category-hard/#note-enhancing-quality-through-de-duplication).",
69+
"Creative Writing": "Creative Writing",
6770
}
6871
cat_name_to_baseline = {
6972
"Hard Prompts (English)": "English",
@@ -81,7 +84,7 @@ def make_default_md_1(mirror=False):
8184
link_color = "#1976D2" # This color should be clear in both light and dark mode
8285
leaderboard_md = f"""
8386
# 🏆 Chatbot Arena LLM Leaderboard: Community-driven Evaluation for Best LLM and AI chatbots
84-
[Blog](https://blog.lmarena.ai/blog/2023/arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2403.04132) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/6GXcFg3TH8) | [Kaggle Competition](https://www.kaggle.com/competitions/lmsys-chatbot-arena)
87+
[Twitter](https://twitter.com/lmarena_ai) | [Discord](https://discord.gg/6GXcFg3TH8) | [Blog](https://blog.lmarena.ai/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2403.04132) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Kaggle Competition](https://www.kaggle.com/competitions/wsdm-cup-multilingual-chatbot-arena)
8588
"""
8689

8790
return leaderboard_md

0 commit comments

Comments
 (0)