Skip to content

Commit 17f9a3c

Browse files
shchurAzulGarza
andauthored
Update model wrappers for v0.6.0 (#44)
Co-authored-by: azul <[email protected]>
1 parent f362bae commit 17f9a3c

File tree

20 files changed

+1966
-195
lines changed

20 files changed

+1966
-195
lines changed
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
model_name,dataset_path,dataset_config,horizon,num_windows,initial_cutoff,window_step_size,min_context_length,max_context_length,seasonality,eval_metric,extra_metrics,quantile_levels,id_column,timestamp_column,target,generate_univariate_targets_from,known_dynamic_columns,past_dynamic_columns,static_columns,task_name,test_error,training_time_s,inference_time_s,dataset_fingerprint,trained_on_this_dataset,fev_version,MASE,WQL
2+
timecopilot,autogluon/chronos_datasets,monash_traffic,24,1,-24,24,-23,,24,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_traffic,0.7739645971400719,,101.55883938,a395745939b7b0f3,False,0.6.0,0.7739645971400719,0.2281103168740663
3+
timecopilot,autogluon/chronos_datasets,monash_australian_electricity,48,1,-48,48,-47,,48,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_australian_electricity,0.761278878663326,,87.18234509,c3fa76db06a6f37a,False,0.6.0,0.761278878663326,0.0354336959497435
4+
timecopilot,autogluon/chronos_datasets,ercot,24,1,-24,24,-23,,24,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],ercot,0.6833983448950258,,81.13162435999999,95b91121d95f89c8,False,0.6.0,0.6833983448950258,0.0206758714793285
5+
timecopilot,autogluon/chronos_datasets_extra,ETTm,24,1,-96,24,-23,,96,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,__ALL__,[],[],[],ETTm,0.6237841762093976,,92.28126142000002,8b02054712ef1d41,False,0.6.0,0.6237841762093976,0.0516134532630169
6+
timecopilot,autogluon/chronos_datasets_extra,ETTh,24,1,-24,24,-23,,24,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,__ALL__,[],[],[],ETTh,0.7625638462320636,,85.33888267200001,d61f7a559d253cb4,False,0.6.0,0.7625638462320636,0.0740316799327656
7+
timecopilot,autogluon/chronos_datasets,exchange_rate,30,1,-30,30,-29,,5,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],exchange_rate,1.981209557451495,,95.825562333,a8476b6fa723f5ed,False,0.6.0,1.981209557451495,0.0134032471152773
8+
timecopilot,autogluon/chronos_datasets,nn5,56,1,-56,56,-55,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],nn5,0.5556876939254136,,96.910774641,91434e4cabb44eab,False,0.6.0,0.5556876939254136,0.1453783915571264
9+
timecopilot,autogluon/chronos_datasets,monash_nn5_weekly,8,1,-8,8,-7,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_nn5_weekly,0.9072348440238922,,91.79065736799998,480088d6b89d7100,False,0.6.0,0.9072348440238922,0.0828638298840608
10+
timecopilot,autogluon/chronos_datasets,monash_weather,30,1,-30,30,-29,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_weather,0.7919336327531099,,147.496713613,980478e7349a0e49,False,0.6.0,0.7919336327531099,0.1284542879603267
11+
timecopilot,autogluon/chronos_datasets,monash_covid_deaths,30,1,-30,30,-29,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_covid_deaths,38.40480428649353,,99.978413443,6cdf94c2ac2fae09,False,0.6.0,38.40480428649353,0.0402622984596447
12+
timecopilot,autogluon/chronos_datasets,monash_fred_md,12,1,-12,12,-11,,12,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_fred_md,0.5513709624273953,,68.48606713199999,a508fc699f3f3683,False,0.6.0,0.5513709624273953,0.0371942971225067
13+
timecopilot,autogluon/chronos_datasets,m4_quarterly,8,1,-8,8,-7,,4,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],m4_quarterly,1.1700822028966442,,204.034223728,97b166b18d38e710,False,0.6.0,1.1700822028966442,0.0741636274514156
14+
timecopilot,autogluon/chronos_datasets,m4_yearly,6,1,-6,6,-5,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],m4_yearly,3.4115641258577467,,88.85265404000003,020f5777849c9c62,False,0.6.0,3.4115641258577467,0.1169761730880261
15+
timecopilot,autogluon/chronos_datasets,dominick,8,1,-8,8,-7,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],dominick,0.8171339001717135,,582.5803540879999,ab1a8018332665ab,False,0.6.0,0.8171339001717135,0.3295555818386664
16+
timecopilot,autogluon/chronos_datasets,m5,28,1,-28,28,-27,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],m5,0.906674248517517,,584.7930052490001,78a5b1db66dfb76c,False,0.6.0,0.906674248517517,0.5536143922620141
17+
timecopilot,autogluon/chronos_datasets,monash_tourism_monthly,24,1,-24,24,-23,,12,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_tourism_monthly,1.4292292732537968,,107.435675182,3ce75ace84bef572,False,0.6.0,1.4292292732537968,0.0790971941665618
18+
timecopilot,autogluon/chronos_datasets,monash_tourism_quarterly,8,1,-8,8,-7,,4,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_tourism_quarterly,1.6223408706515454,,90.725377077,f34c4184775eda7e,False,0.6.0,1.6223408706515454,0.0597801393650899
19+
timecopilot,autogluon/chronos_datasets,monash_tourism_yearly,4,1,-4,4,-3,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_tourism_yearly,3.5747123618200907,,74.45144080200001,7c477aee96f482c4,False,0.6.0,3.5747123618200907,0.1580192799957833
20+
timecopilot,autogluon/chronos_datasets,monash_car_parts,12,1,-12,12,-11,,12,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_car_parts,0.8438107216883975,,116.798960247,1ba8c4719a51b23c,False,0.6.0,0.8438107216883975,0.977330257613002
21+
timecopilot,autogluon/chronos_datasets,monash_hospital,12,1,-12,12,-11,,12,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_hospital,0.7715656244296779,,104.193838361,e364316542f2f5c7,False,0.6.0,0.7715656244296779,0.0536278408642718
22+
timecopilot,autogluon/chronos_datasets,monash_cif_2016,12,1,-12,12,-11,,12,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_cif_2016,0.937178219129742,,70.38486716899999,89203c02fafca301,False,0.6.0,0.937178219129742,0.0128493017821839
23+
timecopilot,autogluon/chronos_datasets,monash_m1_yearly,6,1,-6,6,-5,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_m1_yearly,4.028149050098825,,88.92860413,bd7efdf4c711a830,False,0.6.0,4.028149050098825,0.1400529738252418
24+
timecopilot,autogluon/chronos_datasets,monash_m1_quarterly,8,1,-8,8,-7,,4,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_m1_quarterly,1.695922770506937,,78.39436832999999,5dd7170c16393209,False,0.6.0,1.695922770506937,0.0991189288385913
25+
timecopilot,autogluon/chronos_datasets,monash_m1_monthly,18,1,-18,18,-17,,12,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_m1_monthly,1.053624977666057,,82.058715941,88d42195e0f8e7cc,False,0.6.0,1.053624977666057,0.1332771768039729
26+
timecopilot,autogluon/chronos_datasets,monash_m3_monthly,18,1,-18,18,-17,,12,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_m3_monthly,0.8286361654975168,,115.440668056,372532bc65a1f203,False,0.6.0,0.8286361654975168,0.0900856487113432
27+
timecopilot,autogluon/chronos_datasets,monash_m3_yearly,6,1,-6,6,-5,,1,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_m3_yearly,2.759102155120125,,98.25139432900002,d607d91b11bbe4a1,False,0.6.0,2.759102155120125,0.1250363194133956
28+
timecopilot,autogluon/chronos_datasets,monash_m3_quarterly,8,1,-8,8,-7,,4,MASE,['WQL'],"[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]",id,timestamp,target,,[],[],[],monash_m3_quarterly,1.1839319005752151,,89.49035918899999,6401670c504aa82b,False,0.6.0,1.1839319005752151,0.0711619074323248

docs/tutorials/01-quickstart.ipynb

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,10 @@
99
},
1010
{
1111
"cell_type": "code",
12-
"execution_count": 1,
12+
"execution_count": null,
1313
"metadata": {},
1414
"outputs": [],
1515
"source": [
16-
"%load_ext autoreload\n",
17-
"%autoreload 2\n",
1816
"import fev"
1917
]
2018
},

examples/chronos/evaluate_model.py

Lines changed: 45 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -26,56 +26,55 @@ def predict_with_model(
2626
torch_dtype: str = torch.bfloat16,
2727
num_samples: int = 20,
2828
seed: int = 123,
29-
) -> tuple[datasets.Dataset, float, dict]:
29+
) -> tuple[list[datasets.DatasetDict], float, dict]:
3030
pipeline = BaseChronosPipeline.from_pretrained(model_name, device_map=device_map, torch_dtype=torch_dtype)
31-
32-
past_data, future_data = task.get_input_data(trust_remote_code=True)
33-
target = past_data.with_format("torch").cast_column(
34-
task.target_column, datasets.Sequence(datasets.Value("float32"))
35-
)[task.target_column]
36-
37-
quantile_levels = task.quantile_levels if task.quantile_levels is not None else []
38-
quantiles_all = []
39-
mean_all = []
40-
4131
torch.manual_seed(seed)
42-
start_time = time.monotonic()
43-
for batch in tqdm(batchify(target, batch_size=batch_size), total=len(target) // batch_size):
44-
kwargs = dict(
45-
context=batch,
46-
prediction_length=task.horizon,
47-
limit_prediction_length=False,
48-
)
49-
50-
if pipeline.forecast_type == ForecastType.SAMPLES:
51-
kwargs.update(dict(num_samples=num_samples))
5232

53-
quantiles, mean = pipeline.predict_quantiles(
54-
**kwargs,
55-
# make sure to always compute the median prediction last
56-
quantile_levels=quantile_levels + [0.5],
33+
inference_time = 0.0
34+
quantile_levels = task.quantile_levels.copy()
35+
if 0.5 not in quantile_levels:
36+
quantile_levels.append(0.5)
37+
38+
predictions_per_window = []
39+
for window in task.iter_windows():
40+
past_data, _ = fev.convert_input_data(window, adapter="datasets", as_univariate=True)
41+
past_data = past_data.with_format("torch").cast_column("target", datasets.Sequence(datasets.Value("float32")))
42+
43+
quantiles_all = []
44+
mean_all = []
45+
46+
start_time = time.monotonic()
47+
for batch in batchify(past_data["target"], batch_size=batch_size):
48+
quantiles, mean = pipeline.predict_quantiles(
49+
context=batch,
50+
prediction_length=task.horizon,
51+
limit_prediction_length=False,
52+
quantile_levels=quantile_levels,
53+
)
54+
55+
quantiles_all.append(quantiles.numpy())
56+
mean_all.append(mean.numpy())
57+
inference_time += time.monotonic() - start_time
58+
59+
quantiles_np = np.concatenate(quantiles_all, axis=0) # [num_items, horizon, num_quantiles]
60+
mean_np = np.concatenate(mean_all, axis=0) # [num_items, horizon]
61+
62+
if task.eval_metric in ["MSE", "RMSE", "RMSSE"]:
63+
point_forecast = mean_np # [num_items, horizon]
64+
else:
65+
# use median as the point forecast
66+
point_forecast = quantiles_np[:, :, quantile_levels.index(0.5)] # [num_items, horizon]
67+
predictions_dict = {"predictions": point_forecast}
68+
69+
for idx, level in enumerate(task.quantile_levels):
70+
predictions_dict[str(level)] = quantiles_np[:, :, idx]
71+
72+
predictions_per_window.append(
73+
fev.utils.combine_univariate_predictions_to_multivariate(
74+
datasets.Dataset.from_dict(predictions_dict), target_columns=task.target_columns
75+
)
5776
)
5877

59-
quantiles_all.append(quantiles.numpy())
60-
mean_all.append(mean.numpy())
61-
62-
inference_time = time.monotonic() - start_time
63-
64-
quantiles_np = np.concatenate(quantiles_all, axis=0) # [num_items, horizon, num_quantiles]
65-
mean_np = np.concatenate(mean_all, axis=0) # [num_items, horizon]
66-
67-
if task.eval_metric in ["MSE", "RMSE", "RMSSE"]:
68-
point_forecast = mean_np # [num_items, horizon]
69-
else:
70-
# take the median from the last computed quantile
71-
point_forecast = quantiles_np[:, :, -1] # [num_items, horizon]
72-
73-
predictions_dict = {"predictions": point_forecast}
74-
75-
for idx, level in enumerate(quantile_levels):
76-
predictions_dict[str(level)] = quantiles_np[:, :, idx] # [num_items, horizon]
77-
78-
predictions = datasets.Dataset.from_dict(predictions_dict)
7978
extra_info = {
8079
"model_config": {
8180
"model_name": model_name,
@@ -86,7 +85,7 @@ def predict_with_model(
8685
"seed": seed,
8786
}
8887
}
89-
return predictions, inference_time, extra_info
88+
return predictions_per_window, inference_time, extra_info
9089

9190

9291
if __name__ == "__main__":

examples/chronos/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
chronos-forecasting==1.4.1
1+
chronos-forecasting==1.5.3
22
torch==2.6.0

examples/moirai/evaluate_model.py

Lines changed: 32 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
1+
import logging
12
import time
3+
import warnings
24

35
import datasets
46
import numpy as np
57
import pandas as pd
68
import torch
7-
from uni2ts.model.moirai import MoiraiForecast, MoiraiModule
9+
from uni2ts.model.moirai2 import Moirai2Forecast, Moirai2Module
810

911
import fev
1012

@@ -13,58 +15,61 @@
1315

1416
def predict_with_model(
1517
task: fev.Task,
16-
model_name: str = "Salesforce/moirai-1.1-R-large",
17-
context_length: int = 1024,
18+
model_name: str = "Salesforce/moirai-2.0-R-small",
19+
context_length: int = 500,
1820
batch_size: int = 128,
19-
num_samples: int = 100,
20-
device: str = "cuda",
21+
device: str = "cpu",
2122
seed: int = 123,
22-
) -> tuple[datasets.Dataset, float, dict]:
23-
_, prediction_dataset = fev.convert_input_data(task, "gluonts", trust_remote_code=True)
24-
23+
) -> tuple[list[datasets.DatasetDict], float, dict]:
2524
torch.manual_seed(seed)
26-
model = MoiraiForecast(
27-
module=MoiraiModule.from_pretrained(model_name).to(device),
25+
# Disable GluonTS warnings when accessing forecast.mean
26+
gts_logger = logging.getLogger("gluonts")
27+
gts_logger.setLevel(100)
28+
29+
model = Moirai2Forecast(
30+
module=Moirai2Module.from_pretrained(model_name).to(device),
2831
prediction_length=task.horizon,
2932
context_length=context_length,
30-
num_samples=num_samples,
3133
target_dim=1,
3234
feat_dynamic_real_dim=0,
3335
past_feat_dynamic_real_dim=0,
3436
)
3537
predictor = model.create_predictor(batch_size=batch_size)
3638

37-
start_time = time.monotonic()
38-
samples = np.stack([f.samples for f in predictor.predict(prediction_dataset)])
39-
inference_time = time.monotonic() - start_time
39+
inference_time = 0.0
40+
predictions_per_window = []
41+
for window in task.iter_windows():
42+
_, prediction_dataset = fev.convert_input_data(window, adapter="gluonts", as_univariate=True)
43+
start_time = time.monotonic()
44+
with warnings.catch_warnings():
45+
warnings.simplefilter("ignore", RuntimeWarning)
46+
forecasts = list(predictor.predict(prediction_dataset))
47+
inference_time += time.monotonic() - start_time
4048

41-
if task.eval_metric in ["MSE", "RMSE", "RMSSE"]:
42-
point_forecast = np.mean(samples, axis=1) # [num_items, horizon]
43-
else:
44-
point_forecast = np.median(samples, axis=1) # [num_items, horizon]
45-
46-
predictions_dict = {"predictions": point_forecast}
47-
if task.quantile_levels is not None:
49+
predictions_dict = {"predictions": np.stack([f.mean for f in forecasts])}
4850
for q in task.quantile_levels:
49-
predictions_dict[str(q)] = np.quantile(samples, q=q, axis=1) # [num_items, horizon]
51+
predictions_dict[str(q)] = np.stack([f.quantile(q) for f in forecasts])
52+
predictions_per_window.append(
53+
fev.utils.combine_univariate_predictions_to_multivariate(
54+
datasets.Dataset.from_dict(predictions_dict), target_columns=task.target_columns
55+
)
56+
)
5057

51-
predictions = datasets.Dataset.from_dict(predictions_dict)
5258
extra_info = {
5359
"model_config": {
5460
"context_length": context_length,
5561
"model_name": model_name,
5662
"batch_size": batch_size,
57-
"num_samples": num_samples,
5863
"device": device,
5964
"seed": seed,
6065
}
6166
}
6267

63-
return predictions, inference_time, extra_info
68+
return predictions_per_window, inference_time, extra_info
6469

6570

6671
if __name__ == "__main__":
67-
model_name = "Salesforce/moirai-1.1-R-base"
72+
model_name = "Salesforce/moirai-2.0-R-small"
6873
num_tasks = 2 # replace with `num_tasks = None` to run on all tasks
6974

7075
benchmark = fev.Benchmark.from_yaml(
@@ -85,4 +90,4 @@ def predict_with_model(
8590
# Show and save the results
8691
summary_df = pd.DataFrame(summaries)
8792
print(summary_df)
88-
summary_df.to_csv(f"{model_name}.csv", index=False)
93+
summary_df.to_csv("moirai-2.0.csv", index=False)

examples/moirai/requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
uni2ts==1.2.0
2-
torch==2.6.0
1+
uni2ts @ git+https://github.com/SalesforceAIResearch/uni2ts@b472ef6dcf770b96a1b04bd98cd11e893058573a
2+
torch<2.5.0

examples/seasonal_naive/evaluate_model.py

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,23 +8,30 @@
88
import fev
99

1010

11-
def predict_with_model(task: fev.Task) -> tuple[datasets.Dataset, float, dict]:
12-
_, prediction_dataset = fev.convert_input_data(task, "gluonts", trust_remote_code=True)
13-
11+
def predict_with_model(task: fev.Task) -> tuple[list[datasets.DatasetDict], float, dict]:
1412
predictor = SeasonalNaivePredictor(prediction_length=task.horizon, season_length=task.seasonality)
1513

16-
start_time = time.monotonic()
17-
forecast = np.stack([f.samples for f in predictor.predict(prediction_dataset)]).squeeze(1) # [num_items, horizon]
18-
inference_time = time.monotonic() - start_time
19-
20-
predictions_dict = {"predictions": forecast}
21-
if task.quantile_levels is not None:
14+
inference_time = 0.0
15+
predictions_per_window = []
16+
for window in task.iter_windows(trust_remote_code=True):
17+
_, prediction_dataset = fev.convert_input_data(window, adapter="gluonts", as_univariate=True)
18+
start_time = time.monotonic()
19+
forecast = np.stack([f.samples for f in predictor.predict(prediction_dataset)]).squeeze(
20+
1
21+
) # [num_items, horizon]
22+
inference_time += time.monotonic() - start_time
23+
24+
predictions_dict = {"predictions": forecast}
2225
for q in task.quantile_levels:
2326
predictions_dict[str(q)] = forecast
2427

25-
predictions = datasets.Dataset.from_dict(predictions_dict)
28+
predictions_per_window.append(
29+
fev.combine_univariate_predictions_to_multivariate(
30+
datasets.Dataset.from_dict(predictions_dict), target_columns=task.target_columns
31+
)
32+
)
2633

27-
return predictions, inference_time, {}
34+
return predictions_per_window, inference_time, {}
2835

2936

3037
if __name__ == "__main__":

0 commit comments

Comments
 (0)