@@ -1969,3 +1969,40 @@ def _test_model_additivity(preds_df, expl, model_name, use_polars, n_series, h,
19691969 rtol = 1e-3 ,
19701970 err_msg = "Attribution predictions do not match model predictions"
19711971 )
1972+
1973+ def test_compute_valid_loss_distribution_to_quantile_scale ():
1974+ """
1975+ Test that when training with DistributionLoss and validating with
1976+ quantile-based losses, the validation loss is computed on the original scale.
1977+ """
1978+ loss = DistributionLoss (distribution = 'StudentT' , level = [80 , 90 ])
1979+
1980+ # Simulate normalized model output (mean ~0, scale ~1)
1981+ batch_size , horizon , n_series = 2 , 12 , 1
1982+ raw_output = (
1983+ torch .ones (batch_size , horizon , n_series ) * 5 ,
1984+ torch .zeros (batch_size , horizon , n_series ), # mean (normalized)
1985+ torch .zeros (batch_size , horizon , n_series ), # scale (normalized)
1986+ )
1987+
1988+ # Simulate real data statistics
1989+ loc = torch .ones (batch_size , horizon , n_series ) * 400
1990+ scale = torch .ones (batch_size , horizon , n_series ) * 100
1991+
1992+ # Apply scale_decouple (transforms distribution params to original scale)
1993+ distr_args = loss .scale_decouple (raw_output , loc = loc , scale = scale )
1994+
1995+ # Sample quantiles
1996+ _ , _ , quants = loss .sample (distr_args )
1997+
1998+ # Target would be in original scale (around loc)
1999+ target_mean = loc .mean ().item ()
2000+ quants_mean = quants .mean ().item ()
2001+
2002+ ratio = quants_mean / target_mean
2003+
2004+ # Ratio should be close to 1 - quantiles and target on same scale
2005+ assert 0.8 < ratio < 1.2 , (
2006+ f"Quantiles mean ({ quants_mean :.2f} ) and target mean ({ target_mean :.2f} ) "
2007+ f"are not on the same scale. Ratio: { ratio :.2f} "
2008+ )
0 commit comments