Skip to content

Commit 99fd46c

Browse files
authored
fix: fix the Data Evaluation Detail page (#154)
* fix: the Data Evaluation Detail Page should show the model used * fix: fix the time format displayed * fix: fix the Data Evaluation Detail page
1 parent 4ed049c commit 99fd46c

File tree

6 files changed

+69
-34
lines changed

6 files changed

+69
-34
lines changed

frontend/src/pages/DataEvaluation/Create/CreateTask.tsx

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
// TypeScript
21
import React, { useState, useEffect } from 'react';
32
import { Button, Form, Input, Select, message, Modal, Row, Col, Table, Space } from 'antd';
43
import { EyeOutlined } from '@ant-design/icons';
@@ -9,6 +8,7 @@ import { ModelI } from "@/pages/SettingsPage/ModelAccess.tsx";
98
import { createEvaluationTaskUsingPost } from "@/pages/DataEvaluation/evaluation.api.ts";
109
import { queryPromptTemplatesUsingGet } from "@/pages/DataEvaluation/evaluation.api.ts";
1110
import PreviewPromptModal from "@/pages/DataEvaluation/Create/PreviewPrompt.tsx";
11+
import { EVAL_METHODS, TASK_TYPES } from "@/pages/DataEvaluation/evaluation.const.tsx";
1212

1313
interface Dataset {
1414
id: string;
@@ -35,15 +35,6 @@ interface CreateTaskModalProps {
3535
onSuccess: () => void;
3636
}
3737

38-
const TASK_TYPES = [
39-
{ label: 'QA评估', value: 'QA' },
40-
{ label: 'COT评估', value: 'COT' },
41-
];
42-
43-
const EVAL_METHODS = [
44-
{ label: '模型自动评估', value: 'AUTO' },
45-
];
46-
4738
const DEFAULT_EVAL_METHOD = 'AUTO';
4839
const DEFAULT_TASK_TYPE = 'QA';
4940

frontend/src/pages/DataEvaluation/Detail/components/Overview.tsx

Lines changed: 26 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,9 @@
11
import { useState } from 'react';
2-
import { Descriptions, Empty, DescriptionsProps, Table, Button, message } from 'antd';
3-
import { CheckCircle, XCircle, Clock as ClockIcon } from 'lucide-react';
2+
import { Descriptions, Empty, DescriptionsProps, Table, Button } from 'antd';
43
import { EyeOutlined } from '@ant-design/icons';
5-
import { EvaluationStatus } from '../../evaluation.model';
64
import PreviewPromptModal from "@/pages/DataEvaluation/Create/PreviewPrompt.tsx";
7-
8-
const statusMap = {
9-
[EvaluationStatus.PENDING]: { color: 'blue', text: '待处理', icon: <ClockIcon className="mr-1" size={14} /> },
10-
[EvaluationStatus.RUNNING]: { color: 'processing', text: '进行中', icon: <ClockIcon className="mr-1" size={14} /> },
11-
[EvaluationStatus.COMPLETED]: { color: 'success', text: '已完成', icon: <CheckCircle className="mr-1" size={14} /> },
12-
[EvaluationStatus.FAILED]: { color: 'error', text: '失败', icon: <XCircle className="mr-1" size={14} /> },
13-
[EvaluationStatus.PAUSED]: { color: 'warning', text: '已暂停', icon: <ClockIcon className="mr-1" size={14} /> },
14-
};
5+
import { formatDateTime } from "@/utils/unit.ts";
6+
import { evalTaskStatusMap, getEvalMethod, getEvalType, getSource } from "@/pages/DataEvaluation/evaluation.const.tsx";
157

168
const Overview = ({ task }) => {
179
const [previewVisible, setPreviewVisible] = useState(false);
@@ -23,8 +15,6 @@ const Overview = ({ task }) => {
2315
setPreviewVisible(true);
2416
};
2517

26-
const statusInfo = statusMap[task.status] || { color: 'default', text: '未知状态' };
27-
2818
// 基本信息
2919
const items: DescriptionsProps["items"] = [
3020
{
@@ -37,10 +27,30 @@ const Overview = ({ task }) => {
3727
label: "名称",
3828
children: task.name,
3929
},
30+
{
31+
key: "evalType",
32+
label: "评估类型",
33+
children: getEvalType(task.taskType),
34+
},
35+
{
36+
key: "evalMethod",
37+
label: "评估方式",
38+
children: getEvalMethod(task.evalMethod),
39+
},
4040
{
4141
key: "status",
4242
label: "状态",
43-
children: statusInfo.text || "未知",
43+
children: evalTaskStatusMap[task.status]?.label || "未知",
44+
},
45+
{
46+
key: "source",
47+
label: "评估数据",
48+
children: getSource(task.sourceType) + task.sourceName,
49+
},
50+
{
51+
key: "evalConfig.modelName",
52+
label: "模型",
53+
children: task.evalConfig?.modelName || task.evalConfig?.modelId,
4454
},
4555
{
4656
key: "createdBy",
@@ -50,12 +60,12 @@ const Overview = ({ task }) => {
5060
{
5161
key: "createdAt",
5262
label: "创建时间",
53-
children: task.createdAt,
63+
children: formatDateTime(task.createdAt),
5464
},
5565
{
5666
key: "updatedAt",
5767
label: "更新时间",
58-
children: task.updatedAt,
68+
children: formatDateTime(task.updatedAt),
5969
},
6070
{
6171
key: "description",

frontend/src/pages/DataEvaluation/evaluation.const.tsx

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,35 @@
1-
import { formatDate } from "@/utils/unit";
1+
import { formatDateTime } from "@/utils/unit";
22
import { BarChart3 } from "lucide-react";
33
import { EvaluationStatus, EvaluationTask } from "@/pages/DataEvaluation/evaluation.model.ts";
44

5+
export const TASK_TYPES = [
6+
{ label: 'QA评估', value: 'QA' },
7+
{ label: 'COT评估', value: 'COT' },
8+
];
9+
10+
export const EVAL_METHODS = [
11+
{ label: '模型自动评估', value: 'AUTO' },
12+
];
13+
14+
export const getEvalType = (type: string) => {
15+
return TASK_TYPES.find((item) => item.value === type)?.label;
16+
};
17+
18+
export const getEvalMethod = (type: string) => {
19+
return EVAL_METHODS.find((item) => item.value === type)?.label;
20+
};
21+
22+
export const getSource = (type: string) => {
23+
switch (type) {
24+
case "DATASET":
25+
return "数据集 - ";
26+
case "SYNTHESIS":
27+
return "合成任务 - ";
28+
default:
29+
return "-";
30+
}
31+
};
32+
533
export const evalTaskStatusMap: Record<
634
string,
735
{
@@ -41,8 +69,8 @@ export function mapEvaluationTask(task: Partial<EvaluationTask>): EvaluationTask
4169
return {
4270
...task,
4371
status: evalTaskStatusMap[task.status || EvaluationStatus.PENDING],
44-
createdAt: formatDate(task.createdAt),
45-
updatedAt: formatDate(task.updatedAt),
72+
createdAt: formatDateTime(task.createdAt),
73+
updatedAt: formatDateTime(task.updatedAt),
4674
description: task.description,
4775
icon: <BarChart3 />,
4876
iconColor: task.ratio_method === "DATASET" ? "bg-blue-100" : "bg-green-100",

frontend/src/pages/RatioTask/ratio.const.tsx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { formatDate } from "@/utils/unit";
1+
import { formatDateTime } from "@/utils/unit";
22
import { RatioTaskItem, RatioStatus } from "./ratio.model";
33
import { BarChart3, Calendar, Database } from "lucide-react";
44
import { Link } from "react-router";
@@ -43,8 +43,8 @@ export function mapRatioTask(task: Partial<RatioTaskItem>): RatioTaskItem {
4343
return {
4444
...task,
4545
status: ratioTaskStatusMap[task.status || RatioStatus.PENDING],
46-
createdAt: formatDate(task.created_at),
47-
updatedAt: formatDate(task.updated_at),
46+
createdAt: formatDateTime(task.created_at),
47+
updatedAt: formatDateTime(task.updated_at),
4848
description: task.description,
4949
icon: <BarChart3 />,
5050
iconColor: task.ratio_method === "DATASET" ? "bg-blue-100" : "bg-green-100",

runtime/datamate-python/app/module/evaluation/interface/evaluation.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from app.module.evaluation.service.prompt_template_service import PromptTemplateService
2525
from app.module.evaluation.service.evaluation import EvaluationTaskService
2626
from app.module.shared.schema.common import StandardResponse, TaskStatus
27+
from app.module.system.service.common_service import get_model_by_id
2728

2829
router = APIRouter(
2930
prefix="",
@@ -79,6 +80,10 @@ async def create_evaluation_task(
7980
if existing_task.scalar_one_or_none():
8081
raise HTTPException(status_code=400, detail=f"Evaluation task with name '{request.name}' already exists")
8182

83+
model_config = await get_model_by_id(db, request.eval_config.model_id)
84+
if not model_config:
85+
raise HTTPException(status_code=400, detail=f"Model with id '{request.eval_config.model_id}' not found")
86+
8287
# 创建评估任务
8388
task = EvaluationTask(
8489
id=str(uuid.uuid4()),
@@ -90,7 +95,8 @@ async def create_evaluation_task(
9095
source_name=request.source_name,
9196
eval_prompt=request.eval_prompt,
9297
eval_config=json.dumps({
93-
"model_id": request.eval_config.model_id,
98+
"modelId": request.eval_config.model_id,
99+
"modelName": model_config.model_name,
94100
"dimensions": request.eval_config.dimensions,
95101
}),
96102
status=TaskStatus.PENDING.value,

runtime/datamate-python/app/module/evaluation/service/evaluation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def get_eval_prompt(self, item: EvaluationItem) -> str:
4343

4444
async def execute(self):
4545
eval_config = json.loads(self.task.eval_config)
46-
model_config = await get_model_by_id(self.db, eval_config.get("model_id"))
46+
model_config = await get_model_by_id(self.db, eval_config.get("modelId"))
4747
semaphore = asyncio.Semaphore(10)
4848
files = (await self.db.execute(
4949
select(EvaluationFile).where(EvaluationFile.task_id == self.task.id)

0 commit comments

Comments
 (0)