|
6 | 6 |
|
7 | 7 |
|
8 | 8 | class QuantileHuber(BaseDatafit): |
9 | | - r"""Huber-smoothed Pinball loss for quantile regression. |
10 | | -
|
11 | | - This implements a smoothed approximation of the Pinball (quantile) loss |
12 | | - by applying Huber-style smoothing at the non-differentiable point. T |
13 | | - his formulation improves numerical stability and convergence |
14 | | - for gradient-based solvers, particularly on larger datasets. |
15 | | -
|
16 | | - Parameters |
17 | | - ---------- |
18 | | - delta : float, positive |
19 | | - Width of the quadratic region around the origin. Larger values |
20 | | - create more smoothing. As delta approaches 0, this approaches the |
21 | | - standard Pinball loss. |
22 | | -
|
23 | | - quantile : float, between 0 and 1 |
24 | | - The desired quantile level. For example, 0.5 corresponds to the |
25 | | - median. |
26 | | -
|
27 | | - Notes |
28 | | - ----- |
29 | | - The loss function is defined as: |
30 | | -
|
31 | | - .. math:: |
32 | | - L(r) = \begin{cases} |
33 | | - \tau \frac{r^2}{2\delta} & \text{if } 0 < r \leq \delta \\ |
34 | | - (1-\tau) \frac{r^2}{2\delta} & \text{if } -\delta \leq r < 0 \\ |
35 | | - \tau (r - \frac{\delta}{2}) & \text{if } r > \delta \\ |
36 | | - (1-\tau) (-r - \frac{\delta}{2}) & \text{if } r < -\delta |
37 | | - \end{cases} |
38 | | -
|
39 | | - where :math:`r = y - Xw` is the residual, :math:`\tau` is the target |
40 | | - quantile, and :math:`\delta` controls the smoothing region width. |
41 | | -
|
42 | | - The gradient is given by: |
43 | | -
|
44 | | - .. math:: |
45 | | - \nabla L(r) = \begin{cases} |
46 | | - \tau \frac{r}{\delta} & \text{if } 0 < r \leq \delta \\ |
47 | | - (1-\tau) \frac{r}{\delta} & \text{if } -\delta \leq r < 0 \\ |
48 | | - \tau & \text{if } r > \delta \\ |
49 | | - -(1-\tau) & \text{if } r < -\delta |
50 | | - \end{cases} |
51 | | -
|
52 | | - This formulation provides twice-differentiable smoothing while |
53 | | - maintaining quantile estimation properties. The approach is similar to |
54 | | - convolution smoothing with a uniform kernel. |
55 | | -
|
56 | | - Special cases: |
57 | | - - When :math:`\\tau = 0.5`, this reduces to the symmetric Huber |
58 | | - loss used for median regression. |
59 | | - - As :math:`\\delta \\to 0`, it converges to the standard |
60 | | - Pinball loss. |
61 | | -
|
62 | | - References |
63 | | - ---------- |
64 | | - He, X., Pan, X., Tan, K. M., & Zhou, W. X. (2021). |
65 | | - "Smoothed Quantile Regression with Large-Scale Inference |
66 | | - """ |
| 9 | + r"""Huber‑smoothed pinball loss for quantile regression. |
| 10 | +
|
| 11 | + This class implements a smoothed approximation of the pinball (quantile) |
| 12 | + loss by applying Huber‑style smoothing at the non‑differentiable point. |
| 13 | + The formulation improves numerical stability and convergence for |
| 14 | + gradient‑based solvers, particularly on large data sets. |
| 15 | +
|
| 16 | + Parameters |
| 17 | + ---------- |
| 18 | + delta : float, positive |
| 19 | + Width of the quadratic region around the origin. Larger values create |
| 20 | + stronger smoothing. As ``delta`` -> 0, the loss approaches the |
| 21 | + standard pinball loss. |
| 22 | +
|
| 23 | + quantile : float in (0, 1) |
| 24 | + Target quantile level (e.g. ``0.5`` corresponds to the median). |
| 25 | +
|
| 26 | + Notes |
| 27 | + ----- |
| 28 | + The loss function is defined as |
| 29 | +
|
| 30 | + .. math:: |
| 31 | +
|
| 32 | + L(r) = |
| 33 | + \\begin{cases} |
| 34 | + \\tau \\dfrac{r^{2}}{2\\delta}, & 0 < r \\le \\delta \\\\ |
| 35 | + (1-\\tau) \\dfrac{r^{2}}{2\\delta}, & -\\delta \\le r < 0 \\\\ |
| 36 | + \\tau \\left(r - \\dfrac{\\delta}{2}\\right), & r > \\delta \\\\ |
| 37 | + (1-\\tau) \\left(-r - \\dfrac{\\delta}{2}\\right), & r < -\\delta |
| 38 | + \\end{cases} |
| 39 | +
|
| 40 | + where :math:`r = y - Xw` is the residual, :math:`\\tau` is the target |
| 41 | + quantile, and :math:`\\delta` controls the smoothing width. |
| 42 | +
|
| 43 | + References |
| 44 | + ---------- |
| 45 | + He, X., Pan, X., Tan, K. M., & Zhou, W. X. (2021). |
| 46 | + *Smoothed Quantile Regression with Large‑Scale Inference*. |
| 47 | + """ |
67 | 48 |
|
68 | 49 | def __init__(self, delta, quantile): |
69 | 50 | if not 0 < quantile < 1: |
@@ -145,9 +126,7 @@ def value(self, y, w, Xw): |
145 | 126 | return res / n_samples |
146 | 127 |
|
147 | 128 | def _dr(self, residual): |
148 | | - """ |
149 | | - Return the derivative dl/dr for every residual in `residual`. |
150 | | - """ |
| 129 | + """Compute dl/dr for each residual.""" |
151 | 130 | tau = self.quantile |
152 | 131 | delt = self.delta |
153 | 132 |
|
|
0 commit comments