|
1 | 1 | import torch |
2 | 2 | from linear_operator.operators import DiagLinearOperator |
| 3 | +from torch import nn |
3 | 4 | from torch.distributions import ComposeTransform, Transform, TransformedDistribution |
4 | 5 | from torch.func import jacrev |
5 | 6 |
|
6 | | -from autoemulate.core.device import TorchDeviceMixin |
| 7 | +from autoemulate.core.device import TorchDeviceMixin, get_torch_device |
7 | 8 | from autoemulate.core.types import ( |
8 | 9 | DeviceLike, |
9 | 10 | DistributionLike, |
@@ -203,6 +204,59 @@ def _fit_transforms(self, x: TensorLike, y: TensorLike): |
203 | 204 | all(self._y_transforms_affine) if self._y_transforms_affine else False |
204 | 205 | ) |
205 | 206 |
|
| 207 | + def to(self, device: DeviceLike) -> "TransformedEmulator": |
| 208 | + """ |
| 209 | + Move the emulator and all its state to the given device. |
| 210 | +
|
| 211 | + Moves the underlying model, transforms, and cached tensors to ``device``. |
| 212 | +
|
| 213 | + Parameters |
| 214 | + ---------- |
| 215 | + device: DeviceLike |
| 216 | + The target device (e.g. ``"cpu"``, ``"mps"``, ``"cuda"``). |
| 217 | +
|
| 218 | + Returns |
| 219 | + ------- |
| 220 | + TransformedEmulator |
| 221 | + ``self``, for method chaining. |
| 222 | + """ |
| 223 | + device = get_torch_device(device) |
| 224 | + self.device = device |
| 225 | + |
| 226 | + # Move the underlying model (Emulator.to handles nn.Module delegation) |
| 227 | + self.model.to(device) |
| 228 | + # Clear cached prediction strategies that hold stale device refs |
| 229 | + if hasattr(self.model, "_clear_cache"): |
| 230 | + self.model._clear_cache() # type: ignore[attr-defined] |
| 231 | + |
| 232 | + # Move the inner model's own transforms (e.g. StandardizeTransform) |
| 233 | + for attr in ("x_transform", "y_transform"): |
| 234 | + transform = getattr(self.model, attr, None) |
| 235 | + if transform is not None: |
| 236 | + self._move_transform_to_device(transform, device) |
| 237 | + |
| 238 | + # Move transform state tensors |
| 239 | + for transform in self.x_transforms + self.y_transforms: |
| 240 | + self._move_transform_to_device(transform, device) |
| 241 | + |
| 242 | + # Move cached Jacobian |
| 243 | + if self._fixed_jacobian_y_inv is not None: |
| 244 | + self._fixed_jacobian_y_inv = self._fixed_jacobian_y_inv.to(device) |
| 245 | + |
| 246 | + return self |
| 247 | + |
| 248 | + @staticmethod |
| 249 | + def _move_transform_to_device(transform: Transform, device: torch.device) -> None: |
| 250 | + """Move a transform's tensor attributes to the given device.""" |
| 251 | + if isinstance(transform, nn.Module): |
| 252 | + transform.to(device) |
| 253 | + if hasattr(transform, "device"): |
| 254 | + object.__setattr__(transform, "device", device) |
| 255 | + for attr_name in list(vars(transform)): |
| 256 | + val = getattr(transform, attr_name) |
| 257 | + if isinstance(val, torch.Tensor): |
| 258 | + setattr(transform, attr_name, val.to(device)) |
| 259 | + |
206 | 260 | def refit(self, x: TensorLike, y: TensorLike, retrain_transforms: bool = False): |
207 | 261 | """ |
208 | 262 | Refit the emulator with new data and optionally retrain transforms. |
|
0 commit comments