Skip to content

Commit 9fdf203

Browse files
fix linter
1 parent c86fd0d commit 9fdf203

File tree

3 files changed

+42
-22
lines changed

3 files changed

+42
-22
lines changed

skglm/experimental/quantile_huber.py

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@ class QuantileHuber(BaseDatafit):
1717
----------
1818
delta : float, positive
1919
Width of the quadratic region around the origin. Larger values create
20-
more smoothing. As delta approaches 0, this approaches the standard Pinball loss.
20+
more smoothing. As delta approaches 0, this approaches the standard
21+
Pinball loss.
2122
2223
quantile : float, between 0 and 1
2324
The desired quantile level. For example, 0.5 corresponds to the median.
@@ -34,8 +35,8 @@ class QuantileHuber(BaseDatafit):
3435
(1-\tau) (-r - \frac{\delta}{2}) & \text{if } r < -\delta
3536
\end{cases}
3637
37-
where :math:`r = y - Xw` is the residual, :math:`\tau` is the target quantile,
38-
and :math:`\delta` controls the smoothing region width.
38+
where :math:`r = y - Xw` is the residual, :math:`\tau` is the target
39+
quantile, and :math:`\delta` controls the smoothing region width.
3940
4041
The gradient is given by:
4142
@@ -47,11 +48,15 @@ class QuantileHuber(BaseDatafit):
4748
-(1-\tau) & \text{if } r < -\delta
4849
\end{cases}
4950
50-
This formulation provides twice-differentiable smoothing while maintaining quantile estimation properties. The approach is similar to convolution smoothing with a uniform kernel.
51+
This formulation provides twice-differentiable smoothing while maintaining
52+
quantile estimation properties. The approach is similar to convolution
53+
smoothing with a uniform kernel.
5154
5255
Special cases:
53-
- When :math:`\\tau = 0.5`, this reduces to the symmetric Huber loss used for median regression.
54-
- As :math:`\\delta \\to 0`, it converges to the standard Pinball loss.
56+
- When :math:`\\tau = 0.5`, this reduces to the symmetric Huber
57+
loss used for median regression.
58+
- As :math:`\\delta \\to 0`, it converges to the standard
59+
Pinball loss.
5560
5661
References
5762
----------
@@ -105,7 +110,11 @@ def get_global_lipschitz(self, X, y):
105110
def get_global_lipschitz_sparse(self, X_data, X_indptr, X_indices, y):
106111
n_samples = len(y)
107112
weight = max(self.quantile, 1 - self.quantile)
108-
return weight * spectral_norm(X_data, X_indptr, X_indices, n_samples) ** 2 / (n_samples * self.delta)
113+
return (
114+
weight
115+
* spectral_norm(X_data, X_indptr, X_indices, n_samples) ** 2
116+
/ (n_samples * self.delta)
117+
)
109118

110119
def _loss_and_grad_scalar(self, residual):
111120
tau, delta = self.quantile, self.delta
@@ -116,7 +125,9 @@ def _loss_and_grad_scalar(self, residual):
116125
if residual > 0:
117126
return tau * residual**2 / (2 * delta), tau * residual / delta
118127
else:
119-
return (1 - tau) * residual**2 / (2 * delta), (1 - tau) * residual / delta
128+
return ((1 - tau) * residual**2 / (2 * delta), (1 - tau)
129+
* residual / delta
130+
)
120131

121132
# Linear tails
122133
if residual > delta:

skglm/experimental/smooth_quantile_regressor.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,9 @@
11
import copy
2-
import warnings
32
import numpy as np
43
from scipy import sparse
54

65
from skglm import GeneralizedLinearEstimator
76
from skglm.penalties import L1
8-
from skglm.datafits import Huber
9-
from skglm.experimental.quantile_regression import Pinball
107
from skglm.solvers import FISTA, LBFGS
118

129

@@ -144,16 +141,21 @@ def fit(self, X, y):
144141

145142
extended_sequence = self.smoothing_sequence[:]
146143
fine_deltas = [0.15, 0.12, 0.1, 0.08, 0.06, 0.04, 0.02, 0.01]
147-
extended_sequence = sorted(set(extended_sequence + fine_deltas), reverse=True)
144+
extended_sequence = sorted(set(extended_sequence + fine_deltas),
145+
reverse=True)
148146

149147
# Progressive smoothing stages
150148
for stage, delta in enumerate(extended_sequence):
151149
if self.verbose:
152-
print(f"[ProgressiveSmoothing] Stage {stage+1}/{len(extended_sequence)}: "
153-
f"delta = {delta:.3g}")
150+
print(
151+
f"[ProgressiveSmoothing] Stage {stage + 1}/"
152+
f"{len(extended_sequence)}: "
153+
f"delta = {delta:.3g}"
154+
)
154155

155156
# Always use QuantileHuber for all quantile values
156-
datafit = self._quantile_huber_cls(delta=delta, quantile=self.quantile)
157+
datafit = self._quantile_huber_cls(delta=delta,
158+
quantile=self.quantile)
157159

158160
solver = copy.deepcopy(self.smooth_solver)
159161

@@ -182,7 +184,8 @@ def fit(self, X, y):
182184

183185
if self.verbose:
184186
print(
185-
f" Actual quantile: {actual_quantile:.3f}, Error: {quantile_error:.3f}")
187+
f" Actual quantile: {actual_quantile:.3f},"
188+
f" Error: {quantile_error:.3f}")
186189

187190
if quantile_error < best_quantile_error:
188191
best_quantile_error = quantile_error
@@ -192,7 +195,8 @@ def fit(self, X, y):
192195

193196
if self.verbose:
194197
print(
195-
f" New best quantile at delta={delta:.3g}, error={quantile_error:.3f}")
198+
f" New best quantile at delta={delta:.3g},"
199+
f" error={quantile_error:.3f}")
196200

197201
# Record stage information
198202
obj_value = datafit.value(

skglm/experimental/tests/test_smooth_quantile_regressor.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -107,16 +107,20 @@ def test_issue276_regression():
107107
rel_gap_sqr_large = (sqr_large_loss - ref_large_loss) / ref_large_loss
108108

109109
assert rel_gap_pdcd_small < 0.05, \
110-
f"PDCD_WS should work well on small dataset (rel_gap={rel_gap_pdcd_small:.4f})"
110+
f"PDCD_WS should work well on small dataset" \
111+
f"(rel_gap={rel_gap_pdcd_small:.4f})"
111112

112113
assert rel_gap_sqr_large < 0.05, \
113-
f"SmoothQuantileRegressor failed to fix issue #276 (rel_gap={rel_gap_sqr_large:.4f})"
114+
f"SmoothQuantileRegressor failed to fix issue #276" \
115+
f"(rel_gap={rel_gap_sqr_large:.4f})"
114116

115117
if rel_gap_pdcd_large > 0.05:
116118
assert sqr_large_loss < pdcd_large_loss, \
117-
"SmoothQuantileRegressor should outperform direct PDCD_WS on large dataset"
119+
"SmoothQuantileRegressor should outperform direct" \
120+
"PDCD_WS on large dataset"
118121

119-
assert len(sqr.stage_results_) > 0, "Missing stage results in SmoothQuantileRegressor"
122+
assert len(sqr.stage_results_) > 0, "Missing stage results" \
123+
"in SmoothQuantileRegressor"
120124

121125
return rel_gap_pdcd_small, rel_gap_pdcd_large, rel_gap_sqr_large
122126

@@ -158,7 +162,8 @@ def test_smooth_quantile_regressor_non_median():
158162
rel_gap = (sqr_loss - ref_loss) / ref_loss
159163

160164
assert rel_gap < 0.05, \
161-
f"SmoothQuantileRegressor should work for non-median quantiles (rel_gap={rel_gap:.4f})"
165+
f"SmoothQuantileRegressor should work for non-median quantiles" \
166+
f"(rel_gap={rel_gap:.4f})"
162167

163168
residuals = y - y_pred_sqr
164169
n_pos = np.sum(residuals > 0)

0 commit comments

Comments
 (0)