Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions neuralforecast/common/_base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -991,6 +991,8 @@ def _get_loc_scale(self, y_idx, add_channel_dim=False):
def _compute_valid_loss(
self, insample_y, outsample_y, output, outsample_mask, y_idx
):
output_from_scaled_distribution = False

if self.loss.is_distribution_output:
y_loc, y_scale = self._get_loc_scale(y_idx)
distr_args = self.loss.scale_decouple(
Expand All @@ -1001,17 +1003,20 @@ def _compute_valid_loss(
):
_, _, quants = self.loss.sample(distr_args=distr_args)
output = quants
output_from_scaled_distribution = True
elif isinstance(self.valid_loss, losses.BasePointLoss):
distr = self.loss.get_distribution(distr_args=distr_args)
output = distr.mean
output_from_scaled_distribution = True

# Validation Loss evaluation
# Validation loss evaluation
if self.valid_loss.is_distribution_output:
valid_loss = self.valid_loss(
y=outsample_y, distr_args=distr_args, mask=outsample_mask
)
else:
output = self._inv_normalization(y_hat=output, y_idx=y_idx)
if not output_from_scaled_distribution:
output = self._inv_normalization(y_hat=output, y_idx=y_idx)
valid_loss = self.valid_loss(
y=outsample_y, y_hat=output, y_insample=insample_y, mask=outsample_mask
)
Expand Down
37 changes: 37 additions & 0 deletions tests/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1969,3 +1969,40 @@ def _test_model_additivity(preds_df, expl, model_name, use_polars, n_series, h,
rtol=1e-3,
err_msg="Attribution predictions do not match model predictions"
)

def test_compute_valid_loss_distribution_to_quantile_scale():
"""
Test that when training with DistributionLoss and validating with
quantile-based losses, the validation loss is computed on the original scale.
"""
loss = DistributionLoss(distribution='StudentT', level=[80, 90])

# Simulate normalized model output (mean ~0, scale ~1)
batch_size, horizon, n_series = 2, 12, 1
raw_output = (
torch.ones(batch_size, horizon, n_series) * 5,
torch.zeros(batch_size, horizon, n_series), # mean (normalized)
torch.zeros(batch_size, horizon, n_series), # scale (normalized)
)

# Simulate real data statistics
loc = torch.ones(batch_size, horizon, n_series) * 400
scale = torch.ones(batch_size, horizon, n_series) * 100

# Apply scale_decouple (transforms distribution params to original scale)
distr_args = loss.scale_decouple(raw_output, loc=loc, scale=scale)

# Sample quantiles
_, _, quants = loss.sample(distr_args)

# Target would be in original scale (around loc)
target_mean = loc.mean().item()
quants_mean = quants.mean().item()

ratio = quants_mean / target_mean

# Ratio should be close to 1 - quantiles and target on same scale
assert 0.8 < ratio < 1.2, (
f"Quantiles mean ({quants_mean:.2f}) and target mean ({target_mean:.2f}) "
f"are not on the same scale. Ratio: {ratio:.2f}"
)