Skip to content

Commit 233c11c

Browse files
authored
[recipe] r1: support R1 Benchmark Evaluation (verl-project#777)
verl-project#708 Support Evaluaton: - [x] GPQA Diamond (english) - [x] LiveCodeBench (code) - [x] AIME 2024 (math) - [x] CNMO 2024 (math) Test - [x] DS-R1-Distill-Qwen2.5-1.5B - [x] DS-R1 --- Example Eval Scripts in `recipes/r1/run_r1_distill_qwen.sh` --- Eval Results of DS-R1-Distill-Qwen2.5-1.5B (k=8) Dataset | Test Results | Reported -- | -- | -- GPQA Diamond | 35.3 | 33.8 LiveCodeBench | 16.9 | 16.9 AIME 2024 | 30.4 | 28.9 CNMO 2024 (en) | 45.1 | - CNMO 2024 (zh) | 41.0 | - --- Eval Results (DS-R1) Dataset | Test Results (k=1) | Test Results (k=4) | Reported -- | -- | -- | -- GPQA Diamond | 67.7 | 69.6 | 71.5 LiveCodeBench | 64.7 | 63.1 | 65.9 AIME 2024 | 86.7 | 79.2 | 79.8 CNMO 2024 | 75.0 | 78.5 | 78.8 The final eval results will be placed [here](https://huggingface.co/datasets/dyyyyyyyy/r1-benchmark-eval).
1 parent b6cd6b7 commit 233c11c

File tree

14 files changed

+697
-28
lines changed

14 files changed

+697
-28
lines changed
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
name: e2e_eval_aime24
2+
3+
on:
4+
# Trigger the workflow on push or pull request,
5+
# but only for the main branch
6+
push:
7+
branches:
8+
- main
9+
- v0.2.x
10+
paths:
11+
- "**/*.py"
12+
- .github/workflows/e2e_eval_aime24.yml
13+
pull_request:
14+
branches:
15+
- main
16+
- v0.2.x
17+
paths:
18+
- "**/*.py"
19+
- "verl/trainer/config/*.yaml"
20+
- .github/workflows/e2e_eval_aime24.yml
21+
- "tests/e2e/*.sh"
22+
23+
# Declare permissions just read content.
24+
permissions:
25+
contents: read
26+
27+
jobs:
28+
e2e_eval_aime24:
29+
runs-on: [self-hosted, l20-1]
30+
timeout-minutes: 40 # Increase this timeout value as needed
31+
env:
32+
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
33+
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
34+
NO_PROXY: "localhost,127.0.0.1"
35+
HF_HUB_ENABLE_HF_TRANSFER: 1
36+
container:
37+
image: hiyouga/verl:ngc-th2.6.0-cu120-vllm0.8.2
38+
options: --gpus all --shm-size=10g
39+
steps:
40+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
41+
with:
42+
fetch-depth: 0
43+
- name: Install the current repository
44+
run: |
45+
pip3 install hf_transfer
46+
pip3 install -e .[test,gpu,math]
47+
- name: Prepare aime24 dataset
48+
run: |
49+
ray stop --force
50+
python3 recipe/r1/data_process.py --task aime2024
51+
- name: Running generation and evaluation in aime2024
52+
run: |
53+
ray stop --force
54+
bash tests/e2e/run_r1_distill_qwen_aime24_eval.sh

recipe/r1/__init__.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.

recipe/r1/config/evaluation.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
data:
2+
path: /tmp/math_Qwen2-7B-Instruct.parquet
3+
prompt_key: prompt
4+
response_key: responses
5+
data_source_key: data_source
6+
reward_model_key: reward_model
7+
8+
custom_reward_function:
9+
path: null
10+
name: compute_score

recipe/r1/data_process.py

Lines changed: 210 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,210 @@
1+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
"""
15+
Preprocess the dataset to parquet format
16+
"""
17+
18+
import os
19+
from datasets import load_dataset, concatenate_datasets
20+
from functools import partial
21+
22+
from verl.utils.hdfs_io import copy, makedirs
23+
import argparse
24+
25+
26+
def example_map_fn(example, idx, process_fn, data_source, ability, split):
27+
question, solution = process_fn(example)
28+
data = {
29+
"data_source": data_source,
30+
"prompt": [{
31+
"role": "user",
32+
"content": question
33+
}],
34+
"ability": ability,
35+
"reward_model": {
36+
"style": "rule",
37+
"ground_truth": solution
38+
},
39+
"extra_info": {
40+
'split': split,
41+
'index': idx
42+
}
43+
}
44+
return data
45+
46+
47+
def build_aime2024_dataset():
48+
49+
def process_aime2024(example):
50+
return example["Problem"], str(example["Answer"])
51+
52+
data_source = 'Maxwell-Jia/AIME_2024'
53+
print(f"Loading the {data_source} dataset from huggingface...", flush=True)
54+
dataset = load_dataset(data_source, split="train")
55+
map_fn = partial(example_map_fn,
56+
process_fn=process_aime2024,
57+
data_source=data_source,
58+
ability="English",
59+
split="test")
60+
dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names)
61+
return dataset
62+
63+
64+
def build_gpqa_dimond_dataset():
65+
import random
66+
GPQA_QUERY_TEMPLATE = "Answer the following multiple choice question. The last line of your response should be of the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before answering.\n\n{Question}\n\nA) {A}\nB) {B}\nC) {C}\nD) {D}"
67+
68+
def process_gpqa_diamond(example):
69+
choices = [example["Incorrect Answer 1"], example["Incorrect Answer 2"], example["Incorrect Answer 3"]]
70+
random.shuffle(choices)
71+
gold_index = random.randint(0, 3)
72+
choices.insert(gold_index, example["Correct Answer"])
73+
query_prompt = GPQA_QUERY_TEMPLATE.format(A=choices[0],
74+
B=choices[1],
75+
C=choices[2],
76+
D=choices[3],
77+
Question=example["Question"])
78+
gold_choice = "ABCD"[gold_index]
79+
return query_prompt, gold_choice
80+
81+
data_source = 'Idavidrein/gpqa'
82+
print(f"Loading the {data_source} dataset from huggingface...", flush=True)
83+
84+
dataset = load_dataset(data_source, "gpqa_diamond", split="train")
85+
map_fn = partial(example_map_fn,
86+
process_fn=process_gpqa_diamond,
87+
data_source=data_source,
88+
ability="Math",
89+
split="test")
90+
dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names)
91+
return dataset
92+
93+
94+
def build_cnmo2024_dataset():
95+
96+
def process_cnmo2024(example):
97+
return example["question"], example["answer"]
98+
99+
data_source = 'opencompass/LiveMathBench'
100+
print(f"Loading the {data_source} dataset from huggingface...", flush=True)
101+
102+
dataset_en = load_dataset(data_source, "v202412_CNMO_en", split="test")
103+
map_fn_en = partial(example_map_fn,
104+
process_fn=process_cnmo2024,
105+
data_source='opencompass/cnmo2024_en',
106+
ability="Math",
107+
split="test")
108+
dataset_en = dataset_en.map(map_fn_en, with_indices=True, remove_columns=dataset_en.column_names)
109+
110+
dataset_zh = load_dataset(data_source, "v202412_CNMO_cn", split="test")
111+
map_fn_zh = partial(example_map_fn,
112+
process_fn=process_cnmo2024,
113+
data_source='opencompass/cnmo2024_zh',
114+
ability="Math",
115+
split="test")
116+
dataset_zh = dataset_zh.map(map_fn_zh, with_indices=True, remove_columns=dataset_zh.column_names)
117+
118+
dataset = concatenate_datasets([dataset_en, dataset_zh])
119+
return dataset
120+
121+
122+
def build_livecodebench_dataset():
123+
import json, pickle, zlib, base64
124+
125+
def process_livecodebench(example):
126+
# Construct Query Prompt
127+
# From https://github.com/LiveCodeBench/LiveCodeBench/blob/998c52d394b836f15fff3b9a29866191108ff81b/lcb_runner/prompts/code_generation.py#L140
128+
query_prompt = (
129+
"You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests.\n\n"
130+
f"Question: {example['question_content']}\n\n")
131+
if example["starter_code"]:
132+
query_prompt += (
133+
"You will use the following starter code to write the solution to the problem and enclose your code within delimiters.\n"
134+
f"```python\n{example['starter_code']}\n```")
135+
else:
136+
query_prompt += (
137+
"Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows. Ensure that when the python program runs, it reads the inputs, runs the algorithm and writes output to STDOUT."
138+
f"```python\n# YOUR CODE HERE\n```")
139+
140+
# Construct test cases
141+
public_test_cases = json.loads(example["public_test_cases"])
142+
try:
143+
private_test_cases = json.loads(example["private_test_cases"])
144+
except:
145+
private_test_cases = json.loads(
146+
pickle.loads(zlib.decompress(base64.b64decode(example["private_test_cases"].encode("utf-8")))))
147+
full_test_cases = public_test_cases + private_test_cases
148+
149+
metadata = json.loads(example["metadata"])
150+
test_cases = {
151+
"inputs": [t["input"] for t in full_test_cases],
152+
"outputs": [t["output"] for t in full_test_cases],
153+
"fn_name": metadata.get("func_name", None),
154+
}
155+
text_cases_compressed = base64.b64encode(zlib.compress(pickle.dumps(json.dumps(test_cases)))).decode("utf-8")
156+
return query_prompt, text_cases_compressed
157+
158+
data_source = 'livecodebench/code_generation_lite'
159+
print(f"Loading the {data_source} dataset from huggingface...", flush=True)
160+
dataset = load_dataset(data_source, split="test")
161+
# R1 Evaluation use LiveCodeBench 24.08-25.01
162+
dataset = dataset.filter(lambda line: "2024-08-00T00:00:00" <= line["contest_date"] < "2025-01-00T00:00:00")
163+
map_fn = partial(example_map_fn,
164+
process_fn=process_livecodebench,
165+
data_source=data_source,
166+
ability="Code",
167+
split="test")
168+
169+
dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names, num_proc=8)
170+
return dataset
171+
172+
173+
TASK2DATA = {
174+
"aime2024": build_aime2024_dataset,
175+
"gpqa_diamond": build_gpqa_dimond_dataset,
176+
"cnmo2024": build_cnmo2024_dataset,
177+
"livecodebench": build_livecodebench_dataset,
178+
}
179+
SUPPORTED_TASKS = TASK2DATA.keys()
180+
181+
if __name__ == '__main__':
182+
parser = argparse.ArgumentParser()
183+
parser.add_argument('--local_dir', default='~/data/r1')
184+
parser.add_argument('--hdfs_dir', default=None)
185+
parser.add_argument('--tasks', default="all")
186+
187+
args = parser.parse_args()
188+
189+
if args.tasks.lower() == "all":
190+
args.tasks = SUPPORTED_TASKS
191+
else:
192+
args.tasks = [task.strip() for task in args.tasks.split(',') if task.strip()]
193+
for task in args.tasks:
194+
if task not in SUPPORTED_TASKS:
195+
raise NotImplementedError(f"{task} has not been supported.")
196+
197+
datasets = []
198+
for task in args.tasks:
199+
datasets.append(TASK2DATA[task]())
200+
test_dataset = concatenate_datasets(datasets)
201+
202+
local_dir = args.local_dir
203+
hdfs_dir = args.hdfs_dir
204+
205+
test_dataset.to_parquet(os.path.join(local_dir, 'test.parquet'))
206+
207+
if hdfs_dir is not None:
208+
makedirs(hdfs_dir)
209+
210+
copy(src=local_dir, dst=hdfs_dir)

recipe/r1/main_eval.py

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
"""
15+
Offline evaluate the performance of a generated file using reward model and ground truth verifier.
16+
The input is a parquet file that contains N generated sequences and (optional) the ground truth.
17+
18+
"""
19+
20+
import hydra
21+
from verl.utils.fs import copy_to_local
22+
import pandas as pd
23+
import numpy as np
24+
from tqdm import tqdm
25+
from collections import defaultdict
26+
import ray
27+
28+
29+
def get_custom_reward_fn(config):
30+
import importlib.util, os
31+
32+
reward_fn_config = config.get("custom_reward_function") or {}
33+
file_path = reward_fn_config.get("path")
34+
if not file_path:
35+
return None
36+
37+
if not os.path.exists(file_path):
38+
raise FileNotFoundError(f"Reward function file '{file_path}' not found.")
39+
40+
spec = importlib.util.spec_from_file_location("custom_module", file_path)
41+
module = importlib.util.module_from_spec(spec)
42+
try:
43+
spec.loader.exec_module(module)
44+
except Exception as e:
45+
raise RuntimeError(f"Error loading module from '{file_path}': {e}")
46+
47+
function_name = reward_fn_config.get("name")
48+
49+
if not hasattr(module, function_name):
50+
raise AttributeError(f"Reward function '{function_name}' not found in '{file_path}'.")
51+
52+
print(f"using customized reward function '{function_name}' from '{file_path}'")
53+
54+
return getattr(module, function_name)
55+
56+
57+
@ray.remote
58+
def process_item(reward_fn, data_source, response_lst, reward_data):
59+
ground_truth = reward_data['ground_truth']
60+
score_lst = [reward_fn(data_source, r, ground_truth) for r in response_lst]
61+
return data_source, np.mean(score_lst)
62+
63+
64+
@hydra.main(config_path='config', config_name='evaluation', version_base=None)
65+
def main(config):
66+
local_path = copy_to_local(config.data.path)
67+
dataset = pd.read_parquet(local_path)
68+
prompts = dataset[config.data.prompt_key]
69+
responses = dataset[config.data.response_key]
70+
data_sources = dataset[config.data.data_source_key]
71+
reward_model_data = dataset[config.data.reward_model_key]
72+
73+
total = len(dataset)
74+
75+
# Initialize Ray
76+
if not ray.is_initialized():
77+
ray.init()
78+
79+
# evaluate test_score based on data source
80+
data_source_reward = defaultdict(list)
81+
compute_score = get_custom_reward_fn(config)
82+
83+
# Create remote tasks
84+
remote_tasks = [
85+
process_item.remote(compute_score, data_sources[i], responses[i], reward_model_data[i]) for i in range(total)
86+
]
87+
88+
# Process results as they come in
89+
with tqdm(total=total) as pbar:
90+
while len(remote_tasks) > 0:
91+
# Use ray.wait to get completed tasks
92+
done_ids, remote_tasks = ray.wait(remote_tasks)
93+
for result_id in done_ids:
94+
data_source, score = ray.get(result_id)
95+
data_source_reward[data_source].append(score)
96+
pbar.update(1)
97+
98+
metric_dict = {}
99+
for data_source, rewards in data_source_reward.items():
100+
metric_dict[f'test_score/{data_source}'] = np.mean(rewards)
101+
102+
print(metric_dict)
103+
104+
105+
if __name__ == '__main__':
106+
main()

0 commit comments

Comments
 (0)