diff --git a/isic_challenge_scoring/__init__.py b/isic_challenge_scoring/__init__.py index 711a643..2d58d5a 100644 --- a/isic_challenge_scoring/__init__.py +++ b/isic_challenge_scoring/__init__.py @@ -1,10 +1,16 @@ from importlib.metadata import PackageNotFoundError, version from isic_challenge_scoring.classification import ClassificationMetric, ClassificationScore -from isic_challenge_scoring.segmentation import SegmentationScore +from isic_challenge_scoring.segmentation import SegmentationMetric, SegmentationScore from isic_challenge_scoring.types import ScoreException -__all__ = ['ClassificationScore', 'SegmentationScore', 'ScoreException', 'ClassificationMetric'] +__all__ = [ + 'ClassificationMetric', + 'ClassificationScore', + 'ScoreException', + 'SegmentationMetric', + 'SegmentationScore', +] try: __version__ = version('isic-challenge-scoring') diff --git a/isic_challenge_scoring/__main__.py b/isic_challenge_scoring/__main__.py index dc27a9b..2a0ad18 100644 --- a/isic_challenge_scoring/__main__.py +++ b/isic_challenge_scoring/__main__.py @@ -6,7 +6,7 @@ import click_pathlib from isic_challenge_scoring.classification import ClassificationMetric, ClassificationScore -from isic_challenge_scoring.segmentation import SegmentationScore +from isic_challenge_scoring.segmentation import SegmentationMetric, SegmentationScore from isic_challenge_scoring.types import ScoreException DirectoryPath = click_pathlib.Path(exists=True, file_okay=False, dir_okay=True, readable=True) @@ -23,9 +23,17 @@ def cli(output: str) -> None: @click.pass_context @click.argument('truth_dir', type=DirectoryPath) @click.argument('prediction_dir', type=DirectoryPath) -def segmentation(ctx: click.Context, truth_dir: pathlib.Path, prediction_dir: pathlib.Path) -> None: +@click.option( + '-m', + '--metric', + type=click.Choice([metric.value for metric in SegmentationMetric]), + default=SegmentationMetric.THRESHOLD_JACCARD.value, +) +def segmentation( + ctx: click.Context, truth_dir: pathlib.Path, prediction_dir: pathlib.Path, metric: str +) -> None: try: - score = SegmentationScore.from_dir(truth_dir, prediction_dir) + score = SegmentationScore.from_dir(truth_dir, prediction_dir, SegmentationMetric(str)) except ScoreException as e: raise click.ClickException(str(e)) diff --git a/isic_challenge_scoring/classification.py b/isic_challenge_scoring/classification.py index baf79cd..585bc67 100644 --- a/isic_challenge_scoring/classification.py +++ b/isic_challenge_scoring/classification.py @@ -74,7 +74,7 @@ def __init__( truth_probabilities, prediction_probabilities, truth_weights.validation_weight ) elif target_metric == ClassificationMetric.AVERAGE_PRECISION: - self.overall = self.macro_average['ap'] + self.overall = self.macro_average.at['ap'] per_category_ap = pd.Series( [ metrics.average_precision( @@ -87,7 +87,7 @@ def __init__( ) self.validation = per_category_ap.mean() elif target_metric == ClassificationMetric.AUC: - self.overall = self.macro_average['auc'] + self.overall = self.macro_average.at['auc'] per_category_auc = pd.Series( [ metrics.auc( diff --git a/isic_challenge_scoring/segmentation.py b/isic_challenge_scoring/segmentation.py index 3df765b..c73b6f4 100644 --- a/isic_challenge_scoring/segmentation.py +++ b/isic_challenge_scoring/segmentation.py @@ -1,6 +1,7 @@ from __future__ import annotations from dataclasses import dataclass +import enum import pathlib from typing import Iterable, cast @@ -13,11 +14,17 @@ from isic_challenge_scoring.unzip import unzip_all +class SegmentationMetric(enum.Enum): + JACCARD = 'jaccard' # 2016/1 2016/2b 2017/1 2018/2 + THRESHOLD_JACCARD = 'threshold_jaccard' # 2018/1 + AUC = 'auc' # 2017/2 + + @dataclass(init=False) class SegmentationScore(Score): macro_average: pd.Series - def __init__(self, image_pairs: Iterable[ImagePair]) -> None: + def __init__(self, image_pairs: Iterable[ImagePair], target_metric: SegmentationMetric) -> None: # TODO: Add weighting confusion_matrics = pd.DataFrame( [ @@ -53,8 +60,14 @@ def __init__(self, image_pairs: Iterable[ImagePair]) -> None: self.macro_average = per_image.mean(axis='index').rename('macro_average', inplace=True) - self.overall = self.macro_average.at['threshold_jaccard'] - self.validation = self.macro_average.at['threshold_jaccard'] + if target_metric == SegmentationMetric.JACCARD: + self.overall = self.macro_average.at['jaccard'] + self.validation = self.macro_average.at['jaccard'] + elif target_metric == SegmentationMetric.THRESHOLD_JACCARD: + self.overall = self.macro_average.at['threshold_jaccard'] + self.validation = self.macro_average.at['threshold_jaccard'] + elif target_metric == SegmentationMetric.AUC: + raise Exception def to_string(self) -> str: output = super().to_string() @@ -68,13 +81,21 @@ def to_dict(self) -> ScoreDict: return output @classmethod - def from_dir(cls, truth_path: pathlib.Path, prediction_path: pathlib.Path) -> SegmentationScore: + def from_dir( + cls, + truth_path: pathlib.Path, + prediction_path: pathlib.Path, + target_metric: SegmentationMetric, + ) -> SegmentationScore: image_pairs = iter_image_pairs(truth_path, prediction_path) - return cls(image_pairs) + return cls(image_pairs, target_metric) @classmethod def from_zip_file( - cls, truth_zip_file: pathlib.Path, prediction_zip_file: pathlib.Path + cls, + truth_zip_file: pathlib.Path, + prediction_zip_file: pathlib.Path, + target_metric: SegmentationMetric, ) -> SegmentationScore: truth_path, truth_temp_dir = unzip_all(truth_zip_file) # TODO: If an exception occurs while unzipping prediction_zip_file, truth_temp_dir is not @@ -82,7 +103,7 @@ def from_zip_file( prediction_path, prediction_temp_dir = unzip_all(prediction_zip_file) try: - score = cls.from_dir(truth_path, prediction_path) + score = cls.from_dir(truth_path, prediction_path, target_metric) finally: truth_temp_dir.cleanup() prediction_temp_dir.cleanup() diff --git a/tests/test_classification.py b/tests/test_classification.py index da536b0..8c71bcb 100644 --- a/tests/test_classification.py +++ b/tests/test_classification.py @@ -17,4 +17,5 @@ def test_score(classification_truth_file_path, classification_prediction_file_pa classification_prediction_file_path, target_metric, ) + assert isinstance(score.overall, float) assert isinstance(score.validation, float) diff --git a/tests/test_segmentation.py b/tests/test_segmentation.py index b7615db..6afc3ef 100644 --- a/tests/test_segmentation.py +++ b/tests/test_segmentation.py @@ -1,5 +1,19 @@ -from isic_challenge_scoring.segmentation import SegmentationScore +import pytest +from isic_challenge_scoring.segmentation import SegmentationMetric, SegmentationScore -def test_score(segmentation_truth_path, segmentation_prediction_path): - assert SegmentationScore.from_dir(segmentation_truth_path, segmentation_prediction_path) + +@pytest.mark.parametrize( + 'target_metric', + [ + SegmentationMetric.JACCARD, + SegmentationMetric.THRESHOLD_JACCARD, + # SegmentationMetric.AUC, + ], +) +def test_score(segmentation_truth_path, segmentation_prediction_path, target_metric): + score = SegmentationScore.from_dir( + segmentation_truth_path, segmentation_prediction_path, target_metric + ) + assert isinstance(score.overall, float) + assert isinstance(score.validation, float)