99from autointent import Context
1010from autointent .context .optimization_info import DecisionArtifact
1111from autointent .custom_types import ListOfGenericLabels
12- from autointent .metrics import PREDICTION_METRICS_MULTICLASS
12+ from autointent .metrics import DECISION_METRICS_MULTICLASS
1313from autointent .modules .abc import BaseModule
1414from autointent .schemas import Tag
1515
@@ -53,7 +53,7 @@ def score_ho(self, context: Context, metrics: list[str]) -> dict[str, float]:
5353
5454 val_labels , val_scores = get_decision_evaluation_data (context , "validation" )
5555 decisions = self .predict (val_scores )
56- chosen_metrics = {name : fn for name , fn in PREDICTION_METRICS_MULTICLASS .items () if name in metrics }
56+ chosen_metrics = {name : fn for name , fn in DECISION_METRICS_MULTICLASS .items () if name in metrics }
5757 self ._artifact = DecisionArtifact (labels = decisions )
5858 return self .score_metrics_ho ((val_labels , decisions ), chosen_metrics )
5959
@@ -72,7 +72,7 @@ def score_cv(self, context: Context, metrics: list[str]) -> dict[str, float]:
7272 msg = "No folded scores are found."
7373 raise RuntimeError (msg )
7474
75- chosen_metrics = {name : fn for name , fn in PREDICTION_METRICS_MULTICLASS .items () if name in metrics }
75+ chosen_metrics = {name : fn for name , fn in DECISION_METRICS_MULTICLASS .items () if name in metrics }
7676 metrics_values : dict [str , list [float ]] = {name : [] for name in chosen_metrics }
7777 all_val_decisions = []
7878 for j in range (context .data_handler .n_folds ):
0 commit comments