Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions qlib/contrib/model/pytorch_adarnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.contrib.model.pytorch_utils import count_parameters, get_device
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
Expand Down Expand Up @@ -81,7 +81,7 @@ def __init__(
self.optimizer = optimizer.lower()
self.loss = loss
self.n_splits = n_splits
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down Expand Up @@ -396,7 +396,7 @@ def __init__(
self.model_type = model_type
self.trans_loss = trans_loss
self.len_seq = len_seq
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
in_size = self.n_input

features = nn.ModuleList()
Expand Down Expand Up @@ -558,7 +558,7 @@ def __init__(self, loss_type="cosine", input_dim=512, GPU=0):
"""
self.loss_type = loss_type
self.input_dim = input_dim
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)

def compute(self, X, Y):
"""Compute adaptation loss
Expand Down
4 changes: 2 additions & 2 deletions qlib/contrib/model/pytorch_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import torch.optim as optim
from qlib.contrib.model.pytorch_gru import GRUModel
from qlib.contrib.model.pytorch_lstm import LSTMModel
from qlib.contrib.model.pytorch_utils import count_parameters
from qlib.contrib.model.pytorch_utils import count_parameters, get_device
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
from qlib.log import get_module_logger
Expand Down Expand Up @@ -83,7 +83,7 @@ def __init__(
self.optimizer = optimizer.lower()
self.base_model = base_model
self.model_path = model_path
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.gamma = gamma
Expand Down
4 changes: 2 additions & 2 deletions qlib/contrib/model/pytorch_alstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import torch.nn as nn
import torch.optim as optim

from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -70,7 +70,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down
8 changes: 4 additions & 4 deletions qlib/contrib/model/pytorch_alstm_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import torch.optim as optim
from torch.utils.data import DataLoader

from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -74,7 +74,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.n_jobs = n_jobs
self.seed = seed

Expand Down Expand Up @@ -219,8 +219,8 @@ def fit(
dl_valid.config(fillna_type="ffill+bfill") # process nan brought by dataloader

if reweighter is None:
wl_train = np.ones(len(dl_train))
wl_valid = np.ones(len(dl_valid))
wl_train = np.ones(len(dl_train), dtype=np.float32)
wl_valid = np.ones(len(dl_valid), dtype=np.float32)
elif isinstance(reweighter, Reweighter):
wl_train = reweighter.reweight(dl_train)
wl_valid = reweighter.reweight(dl_valid)
Expand Down
4 changes: 2 additions & 2 deletions qlib/contrib/model/pytorch_gats.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import torch.nn as nn
import torch.optim as optim

from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -75,7 +75,7 @@ def __init__(
self.loss = loss
self.base_model = base_model
self.model_path = model_path
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down
4 changes: 2 additions & 2 deletions qlib/contrib/model/pytorch_gats_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from torch.utils.data import DataLoader
from torch.utils.data import Sampler

from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset.handler import DataHandlerLP
from ...contrib.model.pytorch_lstm import LSTMModel
Expand Down Expand Up @@ -94,7 +94,7 @@ def __init__(
self.loss = loss
self.base_model = base_model
self.model_path = model_path
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.n_jobs = n_jobs
self.seed = seed

Expand Down
8 changes: 4 additions & 4 deletions qlib/contrib/model/pytorch_general_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

from qlib.data.dataset.weight import Reweighter

from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset import DatasetH, TSDatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -83,7 +83,7 @@ def __init__(
self.optimizer = optimizer.lower()
self.loss = loss
self.weight_decay = weight_decay
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.n_jobs = n_jobs
self.seed = seed

Expand Down Expand Up @@ -249,8 +249,8 @@ def fit(
raise ValueError("Empty data from dataset, please check your dataset config.")

if reweighter is None:
wl_train = np.ones(len(dl_train))
wl_valid = np.ones(len(dl_valid))
wl_train = np.ones(len(dl_train), dtype=np.float32)
wl_valid = np.ones(len(dl_valid), dtype=np.float32)
elif isinstance(reweighter, Reweighter):
wl_train = reweighter.reweight(dl_train)
wl_valid = reweighter.reweight(dl_valid)
Expand Down
4 changes: 2 additions & 2 deletions qlib/contrib/model/pytorch_gru.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from ...log import get_module_logger
from ...model.base import Model
from ...utils import get_or_create_path
from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device


class GRU(Model):
Expand Down Expand Up @@ -70,7 +70,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down
8 changes: 4 additions & 4 deletions qlib/contrib/model/pytorch_gru_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import torch.optim as optim
from torch.utils.data import DataLoader

from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset.handler import DataHandlerLP
from ...model.utils import ConcatDataset
Expand Down Expand Up @@ -72,7 +72,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.n_jobs = n_jobs
self.seed = seed

Expand Down Expand Up @@ -213,8 +213,8 @@ def fit(
dl_valid.config(fillna_type="ffill+bfill") # process nan brought by dataloader

if reweighter is None:
wl_train = np.ones(len(dl_train))
wl_valid = np.ones(len(dl_valid))
wl_train = np.ones(len(dl_train), dtype=np.float32)
wl_valid = np.ones(len(dl_valid), dtype=np.float32)
elif isinstance(reweighter, Reweighter):
wl_train = reweighter.reweight(dl_train)
wl_valid = reweighter.reweight(dl_valid)
Expand Down
4 changes: 2 additions & 2 deletions qlib/contrib/model/pytorch_hist.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import torch
import torch.nn as nn
import torch.optim as optim
from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -80,7 +80,7 @@ def __init__(
self.model_path = model_path
self.stock2concept = stock2concept
self.stock_index = stock_index
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down
4 changes: 2 additions & 2 deletions qlib/contrib/model/pytorch_igmtf.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import torch.nn as nn
import torch.optim as optim

from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -74,7 +74,7 @@ def __init__(
self.loss = loss
self.base_model = base_model
self.model_path = model_path
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down
3 changes: 2 additions & 1 deletion qlib/contrib/model/pytorch_krnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from .pytorch_utils import get_device

########################################################################
########################################################################
Expand Down Expand Up @@ -276,7 +277,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down
3 changes: 2 additions & 1 deletion qlib/contrib/model/pytorch_localformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from torch.nn.modules.container import ModuleList
from .pytorch_utils import get_device

# qrun examples/benchmarks/Localformer/workflow_config_localformer_Alpha360.yaml ”

Expand Down Expand Up @@ -58,7 +59,7 @@ def __init__(
self.optimizer = optimizer.lower()
self.loss = loss
self.n_jobs = n_jobs
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed
self.logger = get_module_logger("TransformerModel")
self.logger.info("Naive Transformer:" "\nbatch_size : {}" "\ndevice : {}".format(self.batch_size, self.device))
Expand Down
3 changes: 2 additions & 1 deletion qlib/contrib/model/pytorch_localformer_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from torch.nn.modules.container import ModuleList
from .pytorch_utils import get_device


class LocalformerModel(Model):
Expand Down Expand Up @@ -56,7 +57,7 @@ def __init__(
self.optimizer = optimizer.lower()
self.loss = loss
self.n_jobs = n_jobs
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed
self.logger = get_module_logger("TransformerModel")
self.logger.info(
Expand Down
3 changes: 2 additions & 1 deletion qlib/contrib/model/pytorch_lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from .pytorch_utils import get_device


class LSTM(Model):
Expand Down Expand Up @@ -69,7 +70,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down
7 changes: 4 additions & 3 deletions qlib/contrib/model/pytorch_lstm_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from ...data.dataset.handler import DataHandlerLP
from ...model.utils import ConcatDataset
from ...data.dataset.weight import Reweighter
from .pytorch_utils import get_device


class LSTM(Model):
Expand Down Expand Up @@ -71,7 +72,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.n_jobs = n_jobs
self.seed = seed

Expand Down Expand Up @@ -208,8 +209,8 @@ def fit(
dl_valid.config(fillna_type="ffill+bfill") # process nan brought by dataloader

if reweighter is None:
wl_train = np.ones(len(dl_train))
wl_valid = np.ones(len(dl_valid))
wl_train = np.ones(len(dl_train), dtype=np.float32)
wl_valid = np.ones(len(dl_valid), dtype=np.float32)
elif isinstance(reweighter, Reweighter):
wl_train = reweighter.reweight(dl_train)
wl_valid = reweighter.reweight(dl_valid)
Expand Down
9 changes: 3 additions & 6 deletions qlib/contrib/model/pytorch_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import torch.nn as nn
import torch.optim as optim

from .pytorch_utils import count_parameters
from .pytorch_utils import count_parameters, get_device
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -89,10 +89,7 @@ def __init__(
self.eval_steps = eval_steps
self.optimizer = optimizer.lower()
self.loss_type = loss
if isinstance(GPU, str):
self.device = torch.device(GPU)
else:
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed
self.weight_decay = weight_decay
self.data_parall = data_parall
Expand Down Expand Up @@ -208,7 +205,7 @@ def fit(
all_df["x"][seg] = df["feature"]
all_df["y"][seg] = df["label"].copy() # We have to use copy to remove the reference to release mem
if reweighter is None:
all_df["w"][seg] = pd.DataFrame(np.ones_like(all_df["y"][seg].values), index=df.index)
all_df["w"][seg] = pd.DataFrame(np.ones_like(all_df["y"][seg].values, dtype=np.float32), index=df.index)
elif isinstance(reweighter, Reweighter):
all_df["w"][seg] = pd.DataFrame(reweighter.reweight(df))
else:
Expand Down
3 changes: 2 additions & 1 deletion qlib/contrib/model/pytorch_sandwich.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from .pytorch_krnn import CNNKRNNEncoder
from .pytorch_utils import get_device


class SandwichModel(nn.Module):
Expand Down Expand Up @@ -152,7 +153,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.device = get_device(GPU)
self.seed = seed

self.logger.info(
Expand Down
Loading