Skip to content

Commit 8a8255f

Browse files
authored
Merge pull request #466 from ahn1340/test_metric
Test metric
2 parents bd8e948 + a99d1d3 commit 8a8255f

File tree

4 files changed

+184
-37
lines changed

4 files changed

+184
-37
lines changed

autosklearn/evaluation/abstract_evaluator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -213,9 +213,9 @@ def _loss(self, y_true, y_hat, all_scoring_functions=None):
213213
all_scoring_functions=all_scoring_functions)
214214

215215
if hasattr(score, '__len__'):
216-
err = {key: 1 - score[key] for key in score}
216+
err = {key: self.metric._optimum - score[key] for key in score}
217217
else:
218-
err = 1 - score
218+
err = self.metric._optimum - score
219219

220220
return err
221221

autosklearn/metrics/__init__.py

Lines changed: 35 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,11 @@
1111

1212

1313
class Scorer(object, metaclass=ABCMeta):
14-
def __init__(self, name, score_func, sign, kwargs):
14+
def __init__(self, name, score_func, optimum, sign, kwargs):
1515
self.name = name
1616
self._kwargs = kwargs
1717
self._score_func = score_func
18+
self._optimum = optimum
1819
self._sign = sign
1920

2021
@abstractmethod
@@ -133,8 +134,8 @@ def __call__(self, y_true, y_pred, sample_weight=None):
133134
return self._sign * self._score_func(y_true, y_pred, **self._kwargs)
134135

135136

136-
def make_scorer(name, score_func, greater_is_better=True, needs_proba=False,
137-
needs_threshold=False, **kwargs):
137+
def make_scorer(name, score_func, optimum=1, greater_is_better=True,
138+
needs_proba=False, needs_threshold=False, **kwargs):
138139
"""Make a scorer from a performance metric or loss function.
139140
140141
Factory inspired by scikit-learn which wraps scikit-learn scoring functions
@@ -146,6 +147,10 @@ def make_scorer(name, score_func, greater_is_better=True, needs_proba=False,
146147
Score function (or loss function) with signature
147148
``score_func(y, y_pred, **kwargs)``.
148149
150+
optimum : int or float, default=1
151+
The best score achievable by the score function, i.e. maximum in case of
152+
scorer function and minimum in case of loss function.
153+
149154
greater_is_better : boolean, default=True
150155
Whether score_func is a score function (default), meaning high is good,
151156
or a loss function, meaning low is good. In the latter case, the
@@ -174,41 +179,56 @@ def make_scorer(name, score_func, greater_is_better=True, needs_proba=False,
174179
cls = _ThresholdScorer
175180
else:
176181
cls = _PredictScorer
177-
return cls(name, score_func, sign, kwargs)
182+
return cls(name, score_func, optimum, sign, kwargs)
178183

179184

180185
# Standard regression scores
181-
r2 = make_scorer('r2', sklearn.metrics.r2_score)
186+
r2 = make_scorer('r2',
187+
sklearn.metrics.r2_score)
182188
mean_squared_error = make_scorer('mean_squared_error',
183189
sklearn.metrics.mean_squared_error,
190+
optimum=0,
184191
greater_is_better=False)
185192
mean_absolute_error = make_scorer('mean_absolute_error',
186193
sklearn.metrics.mean_absolute_error,
194+
optimum=0,
187195
greater_is_better=False)
188196
median_absolute_error = make_scorer('median_absolute_error',
189197
sklearn.metrics.median_absolute_error,
198+
optimum=0,
190199
greater_is_better=False)
191200

192201
# Standard Classification Scores
193-
accuracy = make_scorer('accuracy', sklearn.metrics.accuracy_score)
202+
accuracy = make_scorer('accuracy',
203+
sklearn.metrics.accuracy_score)
194204
balanced_accuracy = make_scorer('balanced_accuracy',
195205
classification_metrics.balanced_accuracy)
196-
f1 = make_scorer('f1', sklearn.metrics.f1_score)
206+
f1 = make_scorer('f1',
207+
sklearn.metrics.f1_score)
197208

198209
# Score functions that need decision values
199-
roc_auc = make_scorer('roc_auc', sklearn.metrics.roc_auc_score,
200-
greater_is_better=True, needs_threshold=True)
210+
roc_auc = make_scorer('roc_auc',
211+
sklearn.metrics.roc_auc_score,
212+
greater_is_better=True,
213+
needs_threshold=True)
201214
average_precision = make_scorer('average_precision',
202215
sklearn.metrics.average_precision_score,
203216
needs_threshold=True)
204-
precision = make_scorer('precision', sklearn.metrics.precision_score)
205-
recall = make_scorer('recall', sklearn.metrics.recall_score)
217+
precision = make_scorer('precision',
218+
sklearn.metrics.precision_score)
219+
recall = make_scorer('recall',
220+
sklearn.metrics.recall_score)
206221

207222
# Score function for probabilistic classification
208-
log_loss = make_scorer('log_loss', sklearn.metrics.log_loss,
209-
greater_is_better=False, needs_proba=True)
210-
pac_score = make_scorer('pac_score', classification_metrics.pac_score,
211-
greater_is_better=True, needs_proba=True)
223+
log_loss = make_scorer('log_loss',
224+
sklearn.metrics.log_loss,
225+
optimum=0,
226+
greater_is_better=False,
227+
needs_proba=True)
228+
pac_score = make_scorer('pac_score',
229+
classification_metrics.pac_score,
230+
greater_is_better=True,
231+
needs_proba=True)
212232
# TODO what about mathews correlation coefficient etc?
213233

214234

example/example_metrics.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ def main():
5555
accuracy_scorer = autosklearn.metrics.make_scorer(
5656
name="accu",
5757
score_func=accuracy,
58+
optimum=1,
5859
greater_is_better=True,
5960
needs_proba=False,
6061
needs_threshold=False,
@@ -77,6 +78,7 @@ def main():
7778
accuracy_scorer = autosklearn.metrics.make_scorer(
7879
name="accu_add",
7980
score_func=accuracy_wk,
81+
optimum=1,
8082
greater_is_better=True,
8183
needs_proba=False,
8284
needs_threshold=False,

0 commit comments

Comments
 (0)