Skip to content

Commit fd16925

Browse files
authored
Refactor TrustyAI fairness metrics namespaces (#156)
1 parent cae4f49 commit fd16925

File tree

2 files changed

+15
-10
lines changed

2 files changed

+15
-10
lines changed

src/trustyai/metrics/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# pylint: disable = import-error, invalid-name, wrong-import-order, no-name-in-module
22
"""General model classes"""
33
from trustyai import _default_initializer # pylint: disable=unused-import
4-
from org.kie.trustyai.explainability.metrics import (
4+
from org.kie.trustyai.metrics.explainability import (
55
ExplainabilityMetrics as _ExplainabilityMetrics,
66
)
77

src/trustyai/metrics/fairness/group.py

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,12 @@
55
import numpy as np
66
import pandas as pd
77
from jpype import JInt
8-
from org.kie.trustyai.explainability.metrics import FairnessMetrics
8+
from org.kie.trustyai.metrics.fairness.group import (
9+
DisparateImpactRatio,
10+
GroupStatisticalParityDifference,
11+
GroupAverageOddsDifference,
12+
GroupAveragePredictiveValueDifference,
13+
)
914

1015
from trustyai.model import Value, PredictionProvider, Model
1116
from trustyai.utils.data_conversions import (
@@ -37,7 +42,7 @@ def statistical_parity_difference(
3742
) -> float:
3843
"""Calculate Statistical Parity Difference between privileged and unprivileged dataframes"""
3944
favorable_prediction_object = one_output_convert(favorable)
40-
return FairnessMetrics.groupStatisticalParityDifference(
45+
return GroupStatisticalParityDifference.calculate(
4146
to_trusty_dataframe(
4247
data=privileged, outputs=outputs, feature_names=feature_names
4348
),
@@ -63,7 +68,7 @@ def statistical_parity_difference_model(
6368
_jsamples = to_trusty_dataframe(
6469
data=samples, no_outputs=True, feature_names=feature_names
6570
)
66-
return FairnessMetrics.groupStatisticalParityDifference(
71+
return GroupStatisticalParityDifference.calculate(
6772
_jsamples,
6873
model,
6974
_column_selector_to_index(privilege_columns, samples),
@@ -81,7 +86,7 @@ def disparate_impact_ratio(
8186
) -> float:
8287
"""Calculate Disparate Impact Ration between privileged and unprivileged dataframes"""
8388
favorable_prediction_object = one_output_convert(favorable)
84-
return FairnessMetrics.groupDisparateImpactRatio(
89+
return DisparateImpactRatio.calculate(
8590
to_trusty_dataframe(
8691
data=privileged, outputs=outputs, feature_names=feature_names
8792
),
@@ -107,7 +112,7 @@ def disparate_impact_ratio_model(
107112
_jsamples = to_trusty_dataframe(
108113
data=samples, no_outputs=True, feature_names=feature_names
109114
)
110-
return FairnessMetrics.groupDisparateImpactRatio(
115+
return DisparateImpactRatio.calculate(
111116
_jsamples,
112117
model,
113118
_column_selector_to_index(privilege_columns, samples),
@@ -135,7 +140,7 @@ def average_odds_difference(
135140
_positive_class = [Value(v) for v in positive_class]
136141
# determine privileged columns
137142
_privilege_columns = _column_selector_to_index(privilege_columns, test)
138-
return FairnessMetrics.groupAverageOddsDifference(
143+
return GroupAverageOddsDifference.calculate(
139144
to_trusty_dataframe(data=test, outputs=outputs, feature_names=feature_names),
140145
to_trusty_dataframe(data=truth, outputs=outputs, feature_names=feature_names),
141146
_privilege_columns,
@@ -160,7 +165,7 @@ def average_odds_difference_model(
160165
_positive_class = [Value(v) for v in positive_class]
161166
# determine privileged columns
162167
_privilege_columns = _column_selector_to_index(privilege_columns, samples)
163-
return FairnessMetrics.groupAverageOddsDifference(
168+
return GroupAverageOddsDifference.calculate(
164169
_jsamples, model, _privilege_columns, _privilege_values, _positive_class
165170
)
166171

@@ -182,7 +187,7 @@ def average_predictive_value_difference(
182187
_privilege_values = [Value(v) for v in privilege_values]
183188
_positive_class = [Value(v) for v in positive_class]
184189
_privilege_columns = _column_selector_to_index(privilege_columns, test)
185-
return FairnessMetrics.groupAveragePredictiveValueDifference(
190+
return GroupAveragePredictiveValueDifference.calculate(
186191
to_trusty_dataframe(data=test, outputs=outputs, feature_names=feature_names),
187192
to_trusty_dataframe(data=truth, outputs=outputs, feature_names=feature_names),
188193
_privilege_columns,
@@ -205,6 +210,6 @@ def average_predictive_value_difference_model(
205210
_positive_class = [Value(v) for v in positive_class]
206211
# determine privileged columns
207212
_privilege_columns = _column_selector_to_index(privilege_columns, samples)
208-
return FairnessMetrics.groupAveragePredictiveValueDifference(
213+
return GroupAveragePredictiveValueDifference.calculate(
209214
_jsamples, model, _privilege_columns, _privilege_values, _positive_class
210215
)

0 commit comments

Comments
 (0)