|
| 1 | +import numpy as np |
| 2 | +from Orange.classification.utils.fasterrisk.fasterrisk import ( |
| 3 | + RiskScoreOptimizer, |
| 4 | + RiskScoreClassifier, |
| 5 | +) |
| 6 | + |
| 7 | +from Orange.classification import Learner, Model |
| 8 | +from Orange.data import Table, Storage |
| 9 | +from Orange.data.filter import HasClass |
| 10 | +from Orange.preprocess import Discretize, Impute, Continuize, SelectBestFeatures |
| 11 | +from Orange.preprocess.discretize import Binning |
| 12 | +from Orange.preprocess.score import ReliefF |
| 13 | + |
| 14 | + |
| 15 | +def _change_class_var_values(y): |
| 16 | + """ |
| 17 | + Changes the class variable values from 0 and 1 to -1 and 1 or vice versa. |
| 18 | + """ |
| 19 | + return np.where(y == 0, -1, np.where(y == -1, 0, y)) |
| 20 | + |
| 21 | + |
| 22 | +class ScoringSheetModel(Model): |
| 23 | + def __init__(self, model): |
| 24 | + self.model = model |
| 25 | + super().__init__() |
| 26 | + |
| 27 | + def predict_storage(self, table): |
| 28 | + if not isinstance(table, Storage): |
| 29 | + raise TypeError("Data is not a subclass of Orange.data.Storage.") |
| 30 | + |
| 31 | + y_pred = _change_class_var_values(self.model.predict(table.X)) |
| 32 | + y_prob = self.model.predict_prob(table.X) |
| 33 | + |
| 34 | + scores = np.hstack(((1 - y_prob).reshape(-1, 1), y_prob.reshape(-1, 1))) |
| 35 | + return y_pred, scores |
| 36 | + |
| 37 | + |
| 38 | +class ScoringSheetLearner(Learner): |
| 39 | + __returns__ = ScoringSheetModel |
| 40 | + preprocessors = [HasClass(), Discretize(method=Binning()), Impute(), Continuize()] |
| 41 | + |
| 42 | + def __init__( |
| 43 | + self, |
| 44 | + num_attr_after_selection=20, |
| 45 | + num_decision_params=5, |
| 46 | + max_points_per_param=5, |
| 47 | + num_input_features=None, |
| 48 | + preprocessors=None, |
| 49 | + ): |
| 50 | + # Set the num_decision_params, max_points_per_param, and num_input_features normally |
| 51 | + self.num_decision_params = num_decision_params |
| 52 | + self.max_points_per_param = max_points_per_param |
| 53 | + self.num_input_features = num_input_features |
| 54 | + self.feature_to_group = None |
| 55 | + |
| 56 | + if preprocessors is None: |
| 57 | + self.preprocessors = [ |
| 58 | + *self.preprocessors, |
| 59 | + SelectBestFeatures(method=ReliefF(), k=num_attr_after_selection), |
| 60 | + ] |
| 61 | + |
| 62 | + super().__init__(preprocessors=preprocessors) |
| 63 | + |
| 64 | + def incompatibility_reason(self, domain): |
| 65 | + reason = None |
| 66 | + if len(domain.class_vars) > 1 and not self.supports_multiclass: |
| 67 | + reason = "Too many target variables." |
| 68 | + elif not domain.has_discrete_class: |
| 69 | + reason = "Categorical class variable expected." |
| 70 | + elif len(domain.class_vars[0].values) > 2: |
| 71 | + reason = "Too many target variable values." |
| 72 | + return reason |
| 73 | + |
| 74 | + def fit_storage(self, table): |
| 75 | + if not isinstance(table, Storage): |
| 76 | + raise TypeError("Data is not a subclass of Orange.data.Storage.") |
| 77 | + elif table.get_nan_count_class() > 0: |
| 78 | + raise ValueError("Class variable contains missing values.") |
| 79 | + |
| 80 | + if self.num_input_features is not None: |
| 81 | + self._generate_feature_group_index(table) |
| 82 | + |
| 83 | + X, y, _ = table.X, table.Y, table.W if table.has_weights() else None |
| 84 | + learner = RiskScoreOptimizer( |
| 85 | + X=X, |
| 86 | + y=_change_class_var_values(y), |
| 87 | + k=self.num_decision_params, |
| 88 | + select_top_m=1, |
| 89 | + lb=-self.max_points_per_param, |
| 90 | + ub=self.max_points_per_param, |
| 91 | + group_sparsity=self.num_input_features, |
| 92 | + featureIndex_to_groupIndex=self.feature_to_group, |
| 93 | + ) |
| 94 | + |
| 95 | + self._optimize_decision_params_adjustment(learner) |
| 96 | + |
| 97 | + multipliers, intercepts, coefficients = learner.get_models() |
| 98 | + |
| 99 | + model = RiskScoreClassifier( |
| 100 | + multiplier=multipliers[0], |
| 101 | + intercept=intercepts[0], |
| 102 | + coefficients=coefficients[0], |
| 103 | + featureNames=[attribute.name for attribute in table.domain.attributes], |
| 104 | + X_train=X if self.num_decision_params > 10 else None, |
| 105 | + ) |
| 106 | + |
| 107 | + return ScoringSheetModel(model) |
| 108 | + |
| 109 | + def _optimize_decision_params_adjustment(self, learner): |
| 110 | + """ |
| 111 | + This function attempts to optimize (fit) the learner, reducing the number of decision |
| 112 | + parameters ('k')if optimization fails due to being too high. |
| 113 | +
|
| 114 | + Sometimes, the number of decision parameters is too high for the |
| 115 | + number of input features. Which results in a ValueError. |
| 116 | + Continues until successful or 'k' cannot be reduced further. |
| 117 | + """ |
| 118 | + while True: |
| 119 | + try: |
| 120 | + learner.optimize() |
| 121 | + return True |
| 122 | + except ValueError as e: |
| 123 | + learner.k -= 1 |
| 124 | + if learner.k < 1: |
| 125 | + # Raise a custom error when k falls below 1 |
| 126 | + raise ValueError( |
| 127 | + "The number of input features is too low for the current settings." |
| 128 | + ) from e |
| 129 | + |
| 130 | + def _generate_feature_group_index(self, table): |
| 131 | + """ |
| 132 | + Returns a feature index to group index mapping. The group index is used to group |
| 133 | + binarized features that belong to the same original feature. |
| 134 | + """ |
| 135 | + original_feature_names = [ |
| 136 | + attribute.compute_value.variable.name |
| 137 | + for attribute in table.domain.attributes |
| 138 | + ] |
| 139 | + feature_to_group_index = { |
| 140 | + feature: idx for idx, feature in enumerate(set(original_feature_names)) |
| 141 | + } |
| 142 | + feature_to_group = [ |
| 143 | + feature_to_group_index[feature] for feature in original_feature_names |
| 144 | + ] |
| 145 | + self.feature_to_group = np.asarray(feature_to_group) |
| 146 | + |
| 147 | + |
| 148 | +if __name__ == "__main__": |
| 149 | + mock_learner = ScoringSheetLearner(20, 5, 10, None) |
| 150 | + mock_table = Table("https://datasets.biolab.si/core/heart_disease.tab") |
| 151 | + mock_model = mock_learner(mock_table) |
| 152 | + mock_model(mock_table) |
0 commit comments