Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import (
LCBProDataset,
LCBProEvaluator,
)

lcb_pro_reader_cfg = dict(
Expand All @@ -29,7 +30,14 @@
)

lcb_pro_eval_cfg = dict(
evaluator=dict()
evaluator=dict(
type=LCBProEvaluator,
submit_url='http://lightcpverifier.ailab.ailab.ai/submit',
result_url='http://lightcpverifier.ailab.ailab.ai/result/{submission_id}',
timeout=10,
poll_interval=10,
max_retries=3,
)
)

lcb_pro_datasets = [
Expand Down
1 change: 1 addition & 0 deletions opencompass/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@
from .lcsts import * # noqa: F401, F403
from .leval import * # noqa: F401, F403
from .livecodebench import * # noqa: F401, F403
from .livecodebench_pro import * # noqa: F401, F403
from .livemathbench import * # noqa: F401, F403
from .livereasonbench import * # noqa: F401, F403
from .livestembench import * # noqa: F401, F403
Expand Down
4 changes: 4 additions & 0 deletions opencompass/datasets/livecodebench_pro/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .livecodebench_pro import LCBProDataset # noqa: F401, F403
from .livecodebench_pro_evaluator import LCBProEvaluator # noqa: F401, F403

__all__ = ['LCBProDataset', 'LCBProEvaluator']
26 changes: 26 additions & 0 deletions opencompass/datasets/livecodebench_pro/livecodebench_pro.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import json

from datasets import Dataset

from opencompass.utils import get_data_path # noqa: F401, F403

from ..base import BaseDataset


class LCBProDataset(BaseDataset):

@staticmethod
def load(path, **kwargs):
path = get_data_path(path)
dataset_list = []
li = 0
with open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
data = json.loads(line)
dataset_list.append({
'id_ddm': data['id_ddm'],
'problem': data['dialogs'][0]['content']
})
li += 1
return Dataset.from_list(dataset_list)
Loading
Loading