-
Notifications
You must be signed in to change notification settings - Fork 27
Add LogScaler transformer #932
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 8 commits
f1dbfa5
1f7541e
d1c054c
3f1bf3d
1c5289d
57e2a94
c7414ec
d5bc2ed
8c41306
d63495b
3c3b211
2e33bc0
e13e166
a1b7753
9ab83b6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -8,7 +8,7 @@ | |
| import pandas as pd | ||
| import scipy | ||
|
|
||
| from rdt.errors import TransformerInputError | ||
| from rdt.errors import InvalidDataError, TransformerInputError | ||
| from rdt.transformers.base import BaseTransformer | ||
| from rdt.transformers.null import NullTransformer | ||
| from rdt.transformers.utils import learn_rounding_digits | ||
|
|
@@ -626,3 +626,114 @@ def _reverse_transform(self, data): | |
| recovered_data = np.stack([recovered_data, data[:, -1]], axis=1) # noqa: PD013 | ||
|
|
||
| return super()._reverse_transform(recovered_data) | ||
|
|
||
|
|
||
| class LogScaler(FloatFormatter): | ||
| """Transformer for numerical data using log. | ||
|
|
||
| This transformer scales numerical values using log and an optional constant. | ||
|
|
||
| Null values are replaced using a ``NullTransformer``. | ||
|
|
||
| Args: | ||
| missing_value_replacement (object): | ||
| Indicate what to replace the null values with. If an integer or float is given, | ||
| replace them with the given value. If the strings ``'mean'`` or ``'mode'`` | ||
| are given, replace them with the corresponding aggregation and if ``'random'`` | ||
| replace each null value with a random value in the data range. Defaults to ``mean``. | ||
| missing_value_generation (str or None): | ||
| The way missing values are being handled. There are three strategies: | ||
|
|
||
| * ``random``: Randomly generates missing values based on the percentage of | ||
| missing values. | ||
| * ``from_column``: Creates a binary column that describes whether the original | ||
| value was missing. Then use it to recreate missing values. | ||
| * ``None``: Do nothing with the missing values on the reverse transform. Simply | ||
| pass whatever data we get through. | ||
| constant (float): | ||
| The constant to set as the 0-value for the log-based transform. Default to 0 | ||
| (do not modify the 0-value of the data). | ||
| invert (bool): | ||
| Whether to invert the data with respect to the constant value. If False, do not | ||
| invert the data (all values will be greater than the constant value). If True, | ||
| invert the data (all the values will be less than the constant value). | ||
| Defaults to False. | ||
| learn_rounding_scheme (bool): | ||
| Whether or not to learn what place to round to based on the data seen during ``fit``. | ||
| If ``True``, the data returned by ``reverse_transform`` will be rounded to that place. | ||
| Defaults to ``False``. | ||
| """ | ||
|
|
||
| def __init__( | ||
| self, | ||
| missing_value_replacement='mean', | ||
| missing_value_generation='random', | ||
| constant: float = 0, | ||
| invert: bool = False, | ||
| learn_rounding_scheme: bool = False, | ||
| ): | ||
| self.constant = constant | ||
|
||
| self.invert = invert | ||
| super().__init__( | ||
| missing_value_replacement=missing_value_replacement, | ||
| missing_value_generation=missing_value_generation, | ||
| learn_rounding_scheme=learn_rounding_scheme, | ||
| ) | ||
|
|
||
| def _validate_data(self, data: pd.Series): | ||
| column_name = self.get_input_column() | ||
| if self.invert: | ||
| if not all(data < self.constant): | ||
| raise InvalidDataError( | ||
| f"Unable to apply a log transform to column '{column_name}' due to constant" | ||
| ' being too small.' | ||
| ) | ||
| else: | ||
| if not all(data > self.constant): | ||
| raise InvalidDataError( | ||
| f"Unable to apply a log transform to column '{column_name}' due to constant" | ||
| ' being too large.' | ||
| ) | ||
|
|
||
| def _fit(self, data): | ||
| super()._fit(data) | ||
| data = super()._transform(data) | ||
|
|
||
| if data.ndim > 1: | ||
| self._validate_data(data[:, 0]) | ||
| else: | ||
| self._validate_data(data) | ||
|
|
||
| def _transform(self, data): | ||
| data = super()._transform(data) | ||
|
|
||
| if data.ndim > 1: | ||
| self._validate_data(data[:, 0]) | ||
|
||
| if self.invert: | ||
| data[:, 0] = np.log(self.constant - data[:, 0]) | ||
| else: | ||
| data[:, 0] = np.log(data[:, 0] - self.constant) | ||
| else: | ||
| self._validate_data(data) | ||
| if self.invert: | ||
| data = np.log(self.constant - data) | ||
| else: | ||
| data = np.log(data - self.constant) | ||
| return data | ||
|
|
||
| def _reverse_transform(self, data): | ||
| if not isinstance(data, np.ndarray): | ||
| data = data.to_numpy() | ||
|
|
||
| if data.ndim > 1: | ||
| if self.invert: | ||
|
||
| data[:, 0] = self.constant - np.exp(data[:, 0]) | ||
| else: | ||
| data[:, 0] = np.exp(data[:, 0]) + self.constant | ||
| else: | ||
| if self.invert: | ||
| data = self.constant - np.exp(data) | ||
| else: | ||
| data = np.exp(data) + self.constant | ||
|
|
||
| return super()._reverse_transform(data) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -7,6 +7,7 @@ | |
| ClusterBasedNormalizer, | ||
| FloatFormatter, | ||
| GaussianNormalizer, | ||
| LogScaler, | ||
| ) | ||
|
|
||
|
|
||
|
|
@@ -560,3 +561,58 @@ def test_out_of_bounds_reverse_transform(self): | |
|
|
||
| # Assert | ||
| assert isinstance(reverse, pd.DataFrame) | ||
|
|
||
|
|
||
| class TestLogScaler: | ||
| def test_learn_rounding(self): | ||
| # Setup | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add short docstrings to these tests. |
||
| data = pd.DataFrame({'test': [1.0, np.nan, 1.5]}) | ||
| transformer = LogScaler( | ||
| missing_value_generation=None, | ||
| missing_value_replacement='mean', | ||
| learn_rounding_scheme=True, | ||
| ) | ||
| expected = pd.DataFrame({'test': [1.0, 1.2, 1.5]}) | ||
|
|
||
| # Run | ||
| transformer.fit(data, 'test') | ||
| transformed = transformer.transform(data) | ||
| reversed = transformer.reverse_transform(transformed) | ||
|
||
|
|
||
| # Assert | ||
| np.testing.assert_array_equal(reversed, expected) | ||
|
|
||
| def test_missing_value_generation_from_column(self): | ||
| # Setup | ||
| data = pd.DataFrame({'test': [1.0, np.nan, 1.5]}) | ||
| transformer = LogScaler( | ||
| missing_value_generation='from_column', | ||
| missing_value_replacement='mean', | ||
| ) | ||
|
|
||
| # Run | ||
| transformer.fit(data, 'test') | ||
| transformed = transformer.transform(data) | ||
| reversed = transformer.reverse_transform(transformed) | ||
|
|
||
| # Assert | ||
| np.testing.assert_array_equal(reversed, data) | ||
|
|
||
| def test_missing_value_generation_random(self): | ||
| # Setup | ||
| data = pd.DataFrame({'test': [1.0, np.nan, 1.5, 1.5]}) | ||
| transformer = LogScaler( | ||
| missing_value_generation='random', | ||
| missing_value_replacement='mode', | ||
| invert=True, | ||
| constant=3, | ||
| ) | ||
| expected = pd.DataFrame({'test': [np.nan, 1.5, 1.5, 1.5]}) | ||
|
|
||
| # Run | ||
| transformer.fit(data, 'test') | ||
| transformed = transformer.transform(data) | ||
| reversed = transformer.reverse_transform(transformed) | ||
|
|
||
| # Assert | ||
| np.testing.assert_array_equal(reversed, expected) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
"Default to 0" -> "Defaults to 0".
Also, either add the `` quotation marks around the 0, False, True values here, or remove them from the other values in the docstring, so it's conistent.