@@ -43,7 +43,7 @@ async def calculate_scores(self) -> dict[str, np.ndarray]:
43
43
scores = await self .task .generator .calculate_scores (self .task , self .solutions )
44
44
accuracy_scores = scores [ACCURACY_METRIC_NAME ]
45
45
seo_scores = scores [SEO_METRIC_NAME ]
46
- aggregated_scores = np .where (accuracy_scores > 0.9 , seo_scores , 0 )
46
+ aggregated_scores = np .where (accuracy_scores > 0.8 , seo_scores , 0 )
47
47
return aggregated_scores , scores
48
48
49
49
@@ -54,9 +54,10 @@ async def calculate_scores(self) -> dict[str, np.ndarray]:
54
54
scores = await self .task .generator .calculate_scores (self .task , self .solutions )
55
55
accuracy_scores = scores [ACCURACY_METRIC_NAME ]
56
56
quality_scores = scores [QUALITY_METRIC_NAME ]
57
- aggregated_scores = np .where (accuracy_scores > 0.9 , quality_scores , 0 )
57
+ aggregated_scores = np .where (accuracy_scores > 0.8 , quality_scores , 0 )
58
58
return aggregated_scores , scores
59
59
60
+
60
61
class BalancedChallenge (Challenge ):
61
62
competition_type : str = Field (default = BALANCED_COMPETITION_TYPE , description = "The type of competition" )
62
63
@@ -65,13 +66,13 @@ async def calculate_scores(self) -> dict[str, np.ndarray]:
65
66
accuracy_scores = scores [ACCURACY_METRIC_NAME ]
66
67
quality_scores = scores [QUALITY_METRIC_NAME ]
67
68
seo_scores = scores [SEO_METRIC_NAME ]
68
- aggregated_scores = accuracy_scores * 0.4 + quality_scores * 0.3 + seo_scores * 0.3
69
+ aggregated_scores = accuracy_scores * 0.6 + quality_scores * 0.2 + seo_scores * 0.2
69
70
return aggregated_scores , scores
70
71
71
72
72
73
RESERVED_WEIGHTS = {
73
- ACCURACY_COMPETITION_TYPE : 50 ,
74
- BALANCED_COMPETITION_TYPE : 30 ,
74
+ ACCURACY_COMPETITION_TYPE : 70 ,
75
+ BALANCED_COMPETITION_TYPE : 10 ,
75
76
SEO_COMPETITION_TYPE : 10 ,
76
77
QUALITY_COMPETITION_TYPE : 10 ,
77
78
}
0 commit comments