Skip to content

Commit bada266

Browse files
[DOC] migrate distributions.py docstrings to numpydoc style (#2118)
Addresses #2066 ## Summary Migrates all Google-style docstrings in `pytorch_forecasting/metrics/distributions.py` to numpydoc style as requested in #2066. ## Changes Made Converted the following to numpydoc style - MultivariateNormalDistributionLoss.__init__ - NegativeBinomialDistributionLoss.to_prediction - BetaDistributionLoss.loss - MQF2DistributionLoss.__init__ - MQF2DistributionLoss.loss - MQF2DistributionLoss.to_quantiles - ImplicitQuantileNetworkDistributionLoss.__init__ - ImplicitQuantileNetworkDistributionLoss.loss - ImplicitQuantileNetworkDistributionLoss.to_quantiles ## Type of Change - [x] Documentation (docstring update, no logic changes) ## Notes No code logic was changed. Only docstring formatting was updated. Co-authored-by: Aryan Saini <starktony.2032@gmail.com>
1 parent 6b27898 commit bada266

File tree

1 file changed

+131
-88
lines changed

1 file changed

+131
-88
lines changed

pytorch_forecasting/metrics/distributions.py

Lines changed: 131 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -76,17 +76,23 @@ def __init__(
7676
sigma_init: float = 1.0,
7777
sigma_minimum: float = 1e-3,
7878
):
79-
"""
80-
Initialize metric
81-
82-
Args:
83-
name (str): metric name. Defaults to class name.
84-
quantiles (List[float], optional): quantiles for probability range.
85-
Defaults to [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98].
86-
reduction (str, optional): Reduction, "none", "mean" or "sqrt-mean". Defaults to "mean".
87-
rank (int): rank of low-rank approximation for covariance matrix. Defaults to 10.
88-
sigma_init (float, optional): default value for diagonal covariance. Defaults to 1.0.
89-
sigma_minimum (float, optional): minimum value for diagonal covariance. Defaults to 1e-3.
79+
"""Initialize metric.
80+
81+
Parameters
82+
----------
83+
name : str, optional
84+
Metric name. Defaults to class name.
85+
quantiles : list[float], optional
86+
Quantiles for probability range.
87+
Defaults to [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98].
88+
reduction : str, optional
89+
Reduction, "none", "mean" or "sqrt-mean". Defaults to "mean".
90+
rank : int, optional
91+
Rank of low-rank approximation for covariance matrix. Defaults to 10.
92+
sigma_init : float, optional
93+
Default value for diagonal covariance. Defaults to 1.0.
94+
sigma_minimum : float, optional
95+
Minimum value for diagonal covariance. Defaults to 1e-3.
9096
""" # noqa: E501
9197
if quantiles is None:
9298
quantiles = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
@@ -208,16 +214,20 @@ def rescale_parameters(
208214
return torch.stack([mean, shape], dim=-1)
209215

210216
def to_prediction(self, y_pred: torch.Tensor) -> torch.Tensor:
211-
"""
212-
Convert network prediction into a point prediction. In the case of this distribution prediction we
213-
need to derive the mean (as a point prediction) from the distribution parameters
217+
"""Convert network prediction into a point prediction.
218+
219+
Derives the mean as a point prediction from the distribution parameters.
214220
215-
Args:
216-
y_pred: prediction output of network
217-
in this case the two parameters for the negative binomial
221+
Parameters
222+
----------
223+
y_pred : torch.Tensor
224+
Prediction output of network — the two parameters
225+
for the negative binomial distribution.
218226
219-
Returns:
220-
torch.Tensor: mean prediction
227+
Returns
228+
-------
229+
torch.Tensor
230+
Mean prediction.
221231
""" # noqa: E501
222232
return y_pred[..., 0]
223233

@@ -282,15 +292,19 @@ def map_x_to_distribution(self, x: torch.Tensor) -> distributions.Beta:
282292
)
283293

284294
def loss(self, y_pred: torch.Tensor, y_actual: torch.Tensor) -> torch.Tensor:
285-
"""
286-
Calculate negative likelihood
287-
288-
Args:
289-
y_pred: network output
290-
y_actual: actual values
291-
292-
Returns:
293-
torch.Tensor: metric value on which backpropagation can be applied
295+
"""Calculate negative likelihood.
296+
297+
Parameters
298+
----------
299+
y_pred : torch.Tensor
300+
Network output.
301+
y_actual : torch.Tensor
302+
Actual values.
303+
304+
Returns
305+
-------
306+
torch.Tensor
307+
Metric value on which backpropagation can be applied.
294308
"""
295309
distribution = self.map_x_to_distribution(y_pred)
296310
# clip y_actual to avoid infinite losses
@@ -359,20 +373,28 @@ def __init__(
359373
icnn_num_layers: int = 2,
360374
estimate_logdet: bool = False,
361375
) -> None:
362-
"""
363-
Args:
364-
prediction_length (int): maximum prediction length.
365-
quantiles (List[float], optional): default quantiles to output.
366-
Defaults to [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98].
367-
hidden_size (int, optional): hidden size per prediction length. Defaults to 4.
368-
es_num_samples (int, optional): Number of samples to calculate energy score.
369-
If None, maximum likelihood is used as opposed to energy score for optimization.
370-
Defaults to 50.
371-
beta (float, optional): between 0 and 1.0 to control how scale sensitive metric is (1=fully sensitive).
372-
Defaults to 1.0.
373-
icnn_hidden_size (int, optional): hidden size of distribution estimating network. Defaults to 20.
374-
icnn_num_layers (int, optional): number of hidden layers in distribution estimating network. Defaults to 2.
375-
estimate_logdet (bool, optional): if to estimate log determinant. Defaults to False.
376+
"""Initialize MQF2 distribution loss.
377+
378+
Parameters
379+
----------
380+
prediction_length : int
381+
Maximum prediction length.
382+
quantiles : list[float], optional
383+
Default quantiles to output.
384+
Defaults to [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98].
385+
hidden_size : int, optional
386+
Hidden size per prediction length. Defaults to 4.
387+
es_num_samples : int, optional
388+
Number of samples to calculate energy score.
389+
If None, maximum likelihood is used. Defaults to 50.
390+
beta : float, optional
391+
Controls scale sensitivity (1.0 = fully sensitive). Defaults to 1.0.
392+
icnn_hidden_size : int, optional
393+
Hidden size of distribution estimating network. Defaults to 20.
394+
icnn_num_layers : int, optional
395+
Number of hidden layers in distribution estimating network. Defaults to 2.
396+
estimate_logdet : bool, optional
397+
Whether to estimate log determinant. Defaults to False.
376398
""" # noqa: E501
377399
if quantiles is None:
378400
quantiles = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
@@ -461,15 +483,19 @@ def map_x_to_distribution(self, x: torch.Tensor) -> distributions.Distribution:
461483
)
462484

463485
def loss(self, y_pred: torch.Tensor, y_actual: torch.Tensor) -> torch.Tensor:
464-
"""
465-
Calculate negative likelihood
466-
467-
Args:
468-
y_pred: network output
469-
y_actual: actual values
470-
471-
Returns:
472-
torch.Tensor: metric value on which backpropagation can be applied
486+
"""Calculate negative likelihood.
487+
488+
Parameters
489+
----------
490+
y_pred : torch.Tensor
491+
Network output.
492+
y_actual : torch.Tensor
493+
Actual values.
494+
495+
Returns
496+
-------
497+
torch.Tensor
498+
Metric value on which backpropagation can be applied.
473499
"""
474500
distribution = self.map_x_to_distribution(y_pred)
475501
if self.is_energy_score:
@@ -492,16 +518,20 @@ def rescale_parameters(
492518
def to_quantiles(
493519
self, y_pred: torch.Tensor, quantiles: list[float] = None
494520
) -> torch.Tensor:
495-
"""
496-
Convert network prediction into a quantile prediction.
497-
498-
Args:
499-
y_pred: prediction output of network
500-
quantiles (List[float], optional): quantiles for probability range. Defaults to quantiles as
501-
as defined in the class initialization.
502-
503-
Returns:
504-
torch.Tensor: prediction quantiles (last dimension)
521+
"""Convert network prediction into a quantile prediction.
522+
523+
Parameters
524+
----------
525+
y_pred : torch.Tensor
526+
Prediction output of network.
527+
quantiles : list[float], optional
528+
Quantiles for probability range. Defaults to quantiles as
529+
defined in the class initialization.
530+
531+
Returns
532+
-------
533+
torch.Tensor
534+
Prediction quantiles (last dimension).
505535
""" # noqa: E501
506536
if quantiles is None:
507537
quantiles = self.quantiles
@@ -571,14 +601,19 @@ def __init__(
571601
hidden_size: int | None = 32,
572602
n_loss_samples: int | None = 64,
573603
) -> None:
574-
"""
575-
Args:
576-
prediction_length (int): maximum prediction length.
577-
quantiles (List[float], optional): default quantiles to output.
578-
Defaults to [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98].
579-
input_size (int, optional): input size per prediction length. Defaults to 16.
580-
hidden_size (int, optional): hidden size per prediction length. Defaults to 64.
581-
n_loss_samples (int, optional): number of quantiles to sample to calculate loss.
604+
"""Initialize implicit quantile network distribution loss.
605+
606+
Parameters
607+
----------
608+
quantiles : list[float], optional
609+
Default quantiles to output.
610+
Defaults to [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98].
611+
input_size : int, optional
612+
Input size per prediction length. Defaults to 16.
613+
hidden_size : int, optional
614+
Hidden size per prediction length. Defaults to 32.
615+
n_loss_samples : int, optional
616+
Number of quantiles to sample to calculate loss.
582617
""" # noqa: E501
583618
if quantiles is None:
584619
quantiles = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
@@ -601,15 +636,19 @@ def sample(self, y_pred, n_samples: int) -> torch.Tensor:
601636
return samples
602637

603638
def loss(self, y_pred: torch.Tensor, y_actual: torch.Tensor) -> torch.Tensor:
604-
"""
605-
Calculate negative likelihood
606-
607-
Args:
608-
y_pred: network output
609-
y_actual: actual values
610-
611-
Returns:
612-
torch.Tensor: metric value on which backpropagation can be applied
639+
"""Calculate negative likelihood.
640+
641+
Parameters
642+
----------
643+
y_pred : torch.Tensor
644+
Network output.
645+
y_actual : torch.Tensor
646+
Actual values.
647+
648+
Returns
649+
-------
650+
torch.Tensor
651+
Metric value on which backpropagation can be applied.
613652
"""
614653
eps = 1e-3
615654
# for a couple of random quantiles
@@ -649,16 +688,20 @@ def to_prediction(self, y_pred: torch.Tensor, n_samples: int = 100) -> torch.Ten
649688
def to_quantiles(
650689
self, y_pred: torch.Tensor, quantiles: list[float] = None
651690
) -> torch.Tensor:
652-
"""
653-
Convert network prediction into a quantile prediction.
654-
655-
Args:
656-
y_pred: prediction output of network
657-
quantiles (List[float], optional): quantiles for probability range. Defaults to quantiles as
658-
as defined in the class initialization.
659-
660-
Returns:
661-
torch.Tensor: prediction quantiles (last dimension)
691+
"""Convert network prediction into a quantile prediction.
692+
693+
Parameters
694+
----------
695+
y_pred : torch.Tensor
696+
Prediction output of network.
697+
quantiles : list[float], optional
698+
Quantiles for probability range. Defaults to quantiles
699+
defined in the class initialization.
700+
701+
Returns
702+
-------
703+
torch.Tensor
704+
Prediction quantiles (last dimension).
662705
""" # noqa: E501
663706
if quantiles is None:
664707
quantiles = self.quantiles

0 commit comments

Comments
 (0)