|
12 | 12 | import torch.nn.functional as F |
13 | 13 | import torch.optim as optim |
14 | 14 | from torch.autograd import Function |
15 | | -from qlib.contrib.model.pytorch_utils import count_parameters |
| 15 | +from qlib.contrib.model.pytorch_utils import count_parameters, get_device |
16 | 16 | from qlib.data.dataset import DatasetH |
17 | 17 | from qlib.data.dataset.handler import DataHandlerLP |
18 | 18 | from qlib.log import get_module_logger |
@@ -81,7 +81,7 @@ def __init__( |
81 | 81 | self.optimizer = optimizer.lower() |
82 | 82 | self.loss = loss |
83 | 83 | self.n_splits = n_splits |
84 | | - self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu") |
| 84 | + self.device = get_device(GPU) |
85 | 85 | self.seed = seed |
86 | 86 |
|
87 | 87 | self.logger.info( |
@@ -396,7 +396,7 @@ def __init__( |
396 | 396 | self.model_type = model_type |
397 | 397 | self.trans_loss = trans_loss |
398 | 398 | self.len_seq = len_seq |
399 | | - self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu") |
| 399 | + self.device = get_device(GPU) |
400 | 400 | in_size = self.n_input |
401 | 401 |
|
402 | 402 | features = nn.ModuleList() |
@@ -558,7 +558,7 @@ def __init__(self, loss_type="cosine", input_dim=512, GPU=0): |
558 | 558 | """ |
559 | 559 | self.loss_type = loss_type |
560 | 560 | self.input_dim = input_dim |
561 | | - self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu") |
| 561 | + self.device = get_device(GPU) |
562 | 562 |
|
563 | 563 | def compute(self, X, Y): |
564 | 564 | """Compute adaptation loss |
|
0 commit comments