diff --git a/scripts/multivariate_forecast/Covid-19_script/OLinear.sh b/scripts/multivariate_forecast/Covid-19_script/OLinear.sh new file mode 100644 index 00000000..2157dbdf --- /dev/null +++ b/scripts/multivariate_forecast/Covid-19_script/OLinear.sh @@ -0,0 +1,7 @@ +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "Covid-19.csv" --strategy-args '{"horizon": 24}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 24, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/covid/covid_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/covid/covid_24_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "Covid-19/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "Covid-19.csv" --strategy-args '{"horizon": 36}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 36, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/covid/covid_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/covid/covid_36_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "Covid-19/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "Covid-19.csv" --strategy-args '{"horizon": 48}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 48, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/covid/covid_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/covid/covid_48_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "Covid-19/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "Covid-19.csv" --strategy-args '{"horizon": 60}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 60, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/covid/covid_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/covid/covid_60_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "Covid-19/OLinear" diff --git a/scripts/multivariate_forecast/ILI_script/OLinear.sh b/scripts/multivariate_forecast/ILI_script/OLinear.sh new file mode 100644 index 00000000..2edf2964 --- /dev/null +++ b/scripts/multivariate_forecast/ILI_script/OLinear.sh @@ -0,0 +1,7 @@ +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "ILI.csv" --strategy-args '{"horizon": 24}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 24, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/ILI/ILI_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/ILI/ILI_24_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "ILI/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "ILI.csv" --strategy-args '{"horizon": 36}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 36, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/ILI/ILI_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/ILI/ILI_36_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "ILI/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "ILI.csv" --strategy-args '{"horizon": 48}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 48, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/ILI/ILI_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/ILI/ILI_48_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "ILI/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "ILI.csv" --strategy-args '{"horizon": 60}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 60, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/ILI/ILI_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/ILI/ILI_60_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "ILI/OLinear" diff --git a/scripts/multivariate_forecast/NASDAQ_script/OLinear.sh b/scripts/multivariate_forecast/NASDAQ_script/OLinear.sh new file mode 100644 index 00000000..eb9eaddc --- /dev/null +++ b/scripts/multivariate_forecast/NASDAQ_script/OLinear.sh @@ -0,0 +1,7 @@ +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "nasdaq.csv" --strategy-args '{"horizon": 24}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 24, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/nasdaq/nasdaq_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/nasdaq/nasdaq_24_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "NASDAQ/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "nasdaq.csv" --strategy-args '{"horizon": 36}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 36, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/nasdaq/nasdaq_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/nasdaq/nasdaq_36_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "NASDAQ/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "nasdaq.csv" --strategy-args '{"horizon": 48}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 48, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/nasdaq/nasdaq_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/nasdaq/nasdaq_48_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "NASDAQ/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "nasdaq.csv" --strategy-args '{"horizon": 60}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 16, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 60, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/nasdaq/nasdaq_60_ratio0.70.npy", "q_out_mat_file": "dataset/Q_matrices/nasdaq/nasdaq_60_ratio0.70.npy", "seq_len": 60, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "NASDAQ/OLinear" diff --git a/scripts/multivariate_forecast/Weather_script/OLinear.sh b/scripts/multivariate_forecast/Weather_script/OLinear.sh new file mode 100644 index 00000000..f2c4753f --- /dev/null +++ b/scripts/multivariate_forecast/Weather_script/OLinear.sh @@ -0,0 +1,7 @@ +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "Weather.csv" --strategy-args '{"horizon": 96}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 32, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 96, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/weather/weather_336_ratio0.7.npy", "q_out_mat_file": "dataset/Q_matrices/weather/weather_96_ratio0.7.npy", "seq_len": 336, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "Weather/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "Weather.csv" --strategy-args '{"horizon": 192}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 32, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 192, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/weather/weather_336_ratio0.7.npy", "q_out_mat_file": "dataset/Q_matrices/weather/weather_192_ratio0.7.npy", "seq_len": 336, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "Weather/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "Weather.csv" --strategy-args '{"horizon": 336}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 32, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 336, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/weather/weather_336_ratio0.7.npy", "q_out_mat_file": "dataset/Q_matrices/weather/weather_336_ratio0.7.npy", "seq_len": 336, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "Weather/OLinear" + +python ./scripts/run_benchmark.py --config-path "rolling_forecast_config.json" --data-name-list "Weather.csv" --strategy-args '{"horizon": 720}' --model-name "olinear.OLinear" --model-hyper-params '{"batch_size": 32, "d_ff": 512, "d_model": 256, "dropout": 0.1, "e_layers": 2, "embed_size": 1, "horizon": 720, "lr": 0.0001, "num_epochs": 10, "q_mat_file": "dataset/Q_matrices/weather/weather_336_ratio0.7.npy", "q_out_mat_file": "dataset/Q_matrices/weather/weather_720_ratio0.7.npy", "seq_len": 336, "temp_patch_len": 16, "temp_stride": 8, "use_amp": 0}' --gpus 0 --num-workers 1 --timeout 60000 --save-path "Weather/OLinear" diff --git a/ts_benchmark/baselines/olinear/__init__.py b/ts_benchmark/baselines/olinear/__init__.py new file mode 100644 index 00000000..bcb664b0 --- /dev/null +++ b/ts_benchmark/baselines/olinear/__init__.py @@ -0,0 +1,3 @@ +__all__ = ["OLinear"] + +from ts_benchmark.baselines.olinear.olinear import OLinear diff --git a/ts_benchmark/baselines/olinear/layers/RevIN.py b/ts_benchmark/baselines/olinear/layers/RevIN.py new file mode 100644 index 00000000..65ea2439 --- /dev/null +++ b/ts_benchmark/baselines/olinear/layers/RevIN.py @@ -0,0 +1,94 @@ +# code from https://github.com/ts-kim/RevIN, with minor modifications + +import torch +import torch.nn as nn + + +class RevIN(nn.Module): + def __init__(self, num_features: int, eps=1e-5, affine=True, subtract_last=False): + """ + :param num_features: number of features or channels + :param eps: a value added for numerical stability + :param affine: if True, RevIN has learnable affine parameters + """ + super(RevIN, self).__init__() + self.num_features = num_features + self.eps = eps + self.affine = affine + self.subtract_last = subtract_last + self.mask = None + if self.affine: + self._init_params() + + def forward(self, x, mode: str, mask=None): + # x [b,l,n] + if mode == 'norm': + self._get_statistics(x, mask) + x = self._normalize(x, mask) + elif mode == 'denorm': + x = self._denormalize(x) + else: + raise NotImplementedError + return x + + def _init_params(self): + # initialize RevIN params: (C,) + self.affine_weight = nn.Parameter(torch.ones(self.num_features)) + self.affine_bias = nn.Parameter(torch.zeros(self.num_features)) + + def _get_statistics(self, x, mask=None): + self.mask = mask + dim2reduce = tuple(range(1, x.ndim - 1)) + if self.subtract_last: + self.last = x[:, -1, :].unsqueeze(1) + else: + if mask is None: + self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach() + else: + assert isinstance(mask, torch.Tensor) + # print(type(mask)) + x = x.masked_fill(mask, 0) # in case other values are filled + self.mean = (torch.sum(x, dim=1) / torch.sum(~mask, dim=1)).unsqueeze(1).detach() + # self.mean could be nan or inf + self.mean = torch.nan_to_num(self.mean, nan=0.0, posinf=0.0, neginf=0.0) + + if mask is None: + self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach() + else: + self.stdev = (torch.sqrt(torch.sum((x - self.mean) ** 2, dim=1) / torch.sum(~mask, dim=1) + self.eps) + .unsqueeze(1).detach()) + self.stdev = torch.nan_to_num(self.stdev, nan=0.0, posinf=None, neginf=None) + + def _normalize(self, x, mask=None): + self.mask = mask + if self.subtract_last: + x = x - self.last + else: + x = x - self.mean + + x = x / self.stdev + + # x should be zero, if values are masked + if mask is not None: + # forward fill + # x, mask2 = forward_fill(x, mask) + # x = x.masked_fill(mask2, 0) + + # mean imputation + x = x.masked_fill(mask, 0) + + if self.affine: + x = x * self.affine_weight + x = x + self.affine_bias + return x + + def _denormalize(self, x): + if self.affine: + x = x - self.affine_bias + x = x / (self.affine_weight + self.eps * self.eps) + x = x * self.stdev + if self.subtract_last: + x = x + self.last + else: + x = x + self.mean + return x diff --git a/ts_benchmark/baselines/olinear/layers/Transformer_EncDec.py b/ts_benchmark/baselines/olinear/layers/Transformer_EncDec.py new file mode 100644 index 00000000..1400f524 --- /dev/null +++ b/ts_benchmark/baselines/olinear/layers/Transformer_EncDec.py @@ -0,0 +1,102 @@ +import random +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LinearEncoder(nn.Module): + def __init__( + self, + d_model, + d_ff=None, + CovMat=None, + dropout=0.1, + activation="relu", + token_num=None, + **kwargs, + ): + super(LinearEncoder, self).__init__() + + d_ff = d_ff or 4 * d_model + self.d_model = d_model + self.d_ff = d_ff + self.CovMat = CovMat.unsqueeze(0) if CovMat is not None else None + self.token_num = token_num + + self.norm1 = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.v_proj = nn.Linear(d_model, d_model) + self.out_proj = nn.Linear(d_model, d_model) + + init_weight_mat = torch.eye(self.token_num) * 1.0 + torch.randn( + self.token_num, self.token_num + ) * 1.0 + self.weight_mat = nn.Parameter(init_weight_mat[None, :, :]) + + self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) + self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) + self.activation = F.relu if activation == "relu" else F.gelu + self.norm2 = nn.LayerNorm(d_model) + + def forward(self, x, **kwargs): + values = self.v_proj(x) + + if self.CovMat is not None: + A = F.softmax(self.CovMat, dim=-1) + F.softplus(self.weight_mat) + else: + A = F.softplus(self.weight_mat) + + A = F.normalize(A, p=1, dim=-1) + A = self.dropout(A) + + new_x = A @ values + + x = x + self.dropout(self.out_proj(new_x)) + x = self.norm1(x) + + y = self.dropout(self.activation(self.conv1(x.transpose(-1, 1)))) + y = self.dropout(self.conv2(y).transpose(-1, 1)) + output = self.norm2(x + y) + + return output, None + + +class Encoder_ori(nn.Module): + def __init__( + self, + attn_layers, + conv_layers=None, + norm_layer=None, + one_output=False, + CKA_flag=False, + ): + super(Encoder_ori, self).__init__() + self.attn_layers = nn.ModuleList(attn_layers) + self.norm = norm_layer + self.one_output = one_output + self.CKA_flag = CKA_flag + + def forward(self, x, attn_mask=None, tau=None, delta=None): + attns = [] + layer_len = len(self.attn_layers) + for i, attn_layer in enumerate(self.attn_layers): + x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta) + attns.append(attn) + + if not self.training and self.CKA_flag and layer_len > 1: + if i == layer_len - 1 and random.uniform(0, 1) < 0.0: + pass + + if isinstance(x, tuple) or isinstance(x, List): + x = x[0] + + if self.norm is not None: + x = self.norm(x) + + if self.one_output: + return x + else: + return x, attns diff --git a/ts_benchmark/baselines/olinear/layers/__init__.py b/ts_benchmark/baselines/olinear/layers/__init__.py new file mode 100644 index 00000000..a6d52b51 --- /dev/null +++ b/ts_benchmark/baselines/olinear/layers/__init__.py @@ -0,0 +1,3 @@ +__all__ = ["Encoder_ori", "LinearEncoder"] + +from ts_benchmark.baselines.olinear.layers.Transformer_EncDec import Encoder_ori, LinearEncoder diff --git a/ts_benchmark/baselines/olinear/model/__init__.py b/ts_benchmark/baselines/olinear/model/__init__.py new file mode 100644 index 00000000..720bafd9 --- /dev/null +++ b/ts_benchmark/baselines/olinear/model/__init__.py @@ -0,0 +1,3 @@ +__all__ = ["OLinear"] + +from ts_benchmark.baselines.olinear.model.olinear_model import OLinear diff --git a/ts_benchmark/baselines/olinear/model/olinear_model.py b/ts_benchmark/baselines/olinear/model/olinear_model.py new file mode 100644 index 00000000..c81e03cb --- /dev/null +++ b/ts_benchmark/baselines/olinear/model/olinear_model.py @@ -0,0 +1,124 @@ +import os + +import numpy as np +import torch +import torch.nn as nn + +from ts_benchmark.baselines.olinear.layers.RevIN import RevIN +from ts_benchmark.baselines.olinear.layers.Transformer_EncDec import Encoder_ori, LinearEncoder + + +class OLinear(nn.Module): + def __init__(self, configs): + super(OLinear, self).__init__() + self.pred_len = configs.pred_len + self.enc_in = configs.enc_in + self.seq_len = configs.seq_len + self.d_model = configs.d_model + self.d_ff = configs.d_ff + self.Q_chan_indep = getattr(configs, "Q_chan_indep", False) + + q_mat_dir = configs.Q_MAT_file if self.Q_chan_indep else configs.q_mat_file + q_mat_dir = self._resolve_path(configs, q_mat_dir) + self.register_buffer("Q_mat", torch.from_numpy(np.load(q_mat_dir)).to(torch.float32)) + + q_out_mat_dir = configs.Q_OUT_MAT_file if self.Q_chan_indep else configs.q_out_mat_file + q_out_mat_dir = self._resolve_path(configs, q_out_mat_dir) + self.register_buffer( + "Q_out_mat", torch.from_numpy(np.load(q_out_mat_dir)).to(torch.float32) + ) + + self.embed_size = configs.embed_size + self.embeddings = nn.Parameter(torch.randn(1, self.embed_size)) + + self.fc = nn.Sequential( + nn.Linear(self.pred_len * self.embed_size, self.d_ff), + nn.GELU(), + nn.Linear(self.d_ff, self.pred_len), + ) + + self.revin_layer = RevIN(self.enc_in, affine=True) + self.dropout = nn.Dropout(configs.dropout) + + self.encoder = Encoder_ori( + [ + LinearEncoder( + d_model=configs.d_model, + d_ff=configs.d_ff, + CovMat=None, + dropout=configs.dropout, + activation=configs.activation, + token_num=self.enc_in, + ) + for _ in range(configs.e_layers) + ], + norm_layer=nn.LayerNorm(configs.d_model), + one_output=True, + CKA_flag=getattr(configs, "CKA_flag", False), + ) + self.ortho_trans = nn.Sequential( + nn.Linear(self.seq_len * self.embed_size, self.d_model), + self.encoder, + nn.Linear(self.d_model, self.pred_len * self.embed_size), + ) + + self.delta1 = nn.Parameter(torch.zeros(1, self.enc_in, 1, self.seq_len)) + self.delta2 = nn.Parameter(torch.zeros(1, self.enc_in, 1, self.pred_len)) + + def _resolve_path(self, configs, path: str) -> str: + if path is None: + raise ValueError("Q matrix file path is None") + if os.path.isfile(path): + return path + root = getattr(configs, "root_path", None) + if root is None: + raise ValueError(f"Q matrix file not found: {path}") + new_path = os.path.join(root, path) + if not os.path.isfile(new_path): + raise ValueError(f"Q matrix file not found: {path} (resolved: {new_path})") + return new_path + + def tokenEmb(self, x, embeddings): + if self.embed_size <= 1: + return x.transpose(-1, -2).unsqueeze(-1) + x = x.transpose(-1, -2) + x = x.unsqueeze(-1) + return x * embeddings + + def Fre_Trans(self, x): + B, N, T, D = x.shape + x = x.transpose(-1, -2) + + if self.Q_chan_indep: + x_trans = torch.einsum( + "bndt,ntv->bndv", x, self.Q_mat.transpose(-1, -2) + ) + else: + x_trans = ( + torch.einsum("bndt,tv->bndv", x, self.Q_mat.transpose(-1, -2)) + + self.delta1 + ) + + x_trans = self.ortho_trans(x_trans.flatten(-2)).reshape(B, N, D, self.pred_len) + + if self.Q_chan_indep: + x = torch.einsum("bndt,ntv->bndv", x_trans, self.Q_out_mat) + else: + x = ( + torch.einsum("bndt,tv->bndv", x_trans, self.Q_out_mat) + self.delta2 + ) + + x = x.transpose(-1, -2) + return x + + def forward(self, x, x_mark_enc=None, x_dec=None, x_mark_dec=None, mask=None): + x = self.revin_layer(x, mode="norm") + x_ori = x + + x = self.tokenEmb(x_ori, self.embeddings) + x = self.Fre_Trans(x) + + out = self.fc(x.flatten(-2)).transpose(-1, -2) + out = self.dropout(out) + out = self.revin_layer(out, mode="denorm") + return out diff --git a/ts_benchmark/baselines/olinear/olinear.py b/ts_benchmark/baselines/olinear/olinear.py new file mode 100644 index 00000000..478646a2 --- /dev/null +++ b/ts_benchmark/baselines/olinear/olinear.py @@ -0,0 +1,73 @@ +import os + +import numpy as np +import torch +import torch.nn as nn +from torch import optim + +from ts_benchmark.baselines.deep_forecasting_model_base import DeepForecastingModelBase +from ts_benchmark.common.constant import ROOT_PATH +from ts_benchmark.baselines.olinear.model.olinear_model import OLinear as OLinearNet + + +MODEL_HYPER_PARAMS = { + "Q_chan_indep": False, + "q_mat_file": None, + "q_out_mat_file": None, + "Q_MAT_file": None, + "Q_OUT_MAT_file": None, + "root_path": ROOT_PATH, + "embed_size": 1, + "d_model": 256, + "d_ff": 512, + "e_layers": 2, + "dropout": 0.1, + "activation": "gelu", + "temp_patch_len": 16, + "temp_stride": 8, + "use_amp": 0, + "batch_size": 32, + "lradj": "type1", + "lr": 0.0001, + "num_epochs": 10, + "num_workers": 0, + "loss": "MSE", + "patience": 3, + "parallel_strategy": "DP", +} + + +class OLinear(DeepForecastingModelBase): + def __init__(self, **kwargs): + super(OLinear, self).__init__(MODEL_HYPER_PARAMS, **kwargs) + + if ( + self.config.q_mat_file is None + and self.config.Q_MAT_file is None + and self.config.q_out_mat_file is None + and self.config.Q_OUT_MAT_file is None + ): + raise ValueError( + "Missing Q matrix file paths. Please set q_mat_file and q_out_mat_file (or Q_MAT_file and Q_OUT_MAT_file) in model-hyper-params." + ) + + @property + def model_name(self): + return "OLinear" + + def _init_criterion_and_optimizer(self): + criterion = nn.MSELoss() + optimizer = optim.Adam(self.model.parameters(), lr=self.config.lr) + return criterion, optimizer + + def _init_model(self): + return OLinearNet(self.config) + + def _process(self, input, target, input_mark, target_mark): + if self.config.use_amp == 1: + with torch.cuda.amp.autocast(): + outputs = self.model(input) + else: + outputs = self.model(input) + + return {"output": outputs}