diff --git a/README.md b/README.md index 64a2ed0d5..c8a7caf86 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ SPDX-License-Identifier: Apache-2.0
- - + PINA logo diff --git a/docs/source/index_files/PINA_logo.png b/docs/source/index_files/PINA_logo.png index bac7ee375..5ee864fd7 100644 Binary files a/docs/source/index_files/PINA_logo.png and b/docs/source/index_files/PINA_logo.png differ diff --git a/pina/equation/equation.py b/pina/equation/equation.py index 1e4622db6..057c6bcf5 100644 --- a/pina/equation/equation.py +++ b/pina/equation/equation.py @@ -1,7 +1,6 @@ """Module for the Equation.""" import inspect - from .equation_interface import EquationInterface @@ -49,6 +48,10 @@ def residual(self, input_, output_, params_=None): :raises RuntimeError: If the underlying equation signature length is not 2 (direct problem) or 3 (inverse problem). """ + # Move the equation to the input_ device + self.to(input_.device) + + # Call the underlying equation based on its signature length if self.__len_sig == 2: return self.__equation(input_, output_) if self.__len_sig == 3: diff --git a/pina/equation/equation_factory.py b/pina/equation/equation_factory.py index 057ea65d4..da9c55647 100644 --- a/pina/equation/equation_factory.py +++ b/pina/equation/equation_factory.py @@ -239,19 +239,19 @@ def equation(input_, output_): ) # Ensure consistency of c length - if len(self.c) != (len(input_lbl) - 1) and len(self.c) > 1: + if self.c.shape[-1] != len(input_lbl) - 1 and self.c.shape[-1] > 1: raise ValueError( "If 'c' is passed as a list, its length must be equal to " "the number of spatial dimensions." ) # Repeat c to ensure consistent shape for advection - self.c = self.c.repeat(output_.shape[0], 1) - if self.c.shape[1] != (len(input_lbl) - 1): - self.c = self.c.repeat(1, len(input_lbl) - 1) + c = self.c.repeat(output_.shape[0], 1) + if c.shape[1] != (len(input_lbl) - 1): + c = c.repeat(1, len(input_lbl) - 1) # Add a dimension to c for the following operations - self.c = self.c.unsqueeze(-1) + c = c.unsqueeze(-1) # Compute the time derivative and the spatial gradient time_der = grad(output_, input_, components=None, d="t") @@ -262,7 +262,7 @@ def equation(input_, output_): tmp = tmp.transpose(-1, -2) # Compute advection term - adv = (tmp * self.c).sum(dim=tmp.tensor.ndim - 2) + adv = (tmp * c).sum(dim=tmp.tensor.ndim - 2) return time_der + adv diff --git a/pina/equation/equation_interface.py b/pina/equation/equation_interface.py index f1cc74754..82b86dbd0 100644 --- a/pina/equation/equation_interface.py +++ b/pina/equation/equation_interface.py @@ -1,6 +1,7 @@ """Module for the Equation Interface.""" from abc import ABCMeta, abstractmethod +import torch class EquationInterface(metaclass=ABCMeta): @@ -33,3 +34,33 @@ def residual(self, input_, output_, params_): :return: The computed residual of the equation. :rtype: LabelTensor """ + + def to(self, device): + """ + Move all tensor attributes to the specified device. + + :param torch.device device: The target device to move the tensors to. + :return: The instance moved to the specified device. + :rtype: EquationInterface + """ + # Iterate over all attributes of the Equation + for key, val in self.__dict__.items(): + + # Move tensors in dictionaries to the specified device + if isinstance(val, dict): + self.__dict__[key] = { + k: v.to(device) if torch.is_tensor(v) else v + for k, v in val.items() + } + + # Move tensors in lists to the specified device + elif isinstance(val, list): + self.__dict__[key] = [ + v.to(device) if torch.is_tensor(v) else v for v in val + ] + + # Move tensor attributes to the specified device + elif torch.is_tensor(val): + self.__dict__[key] = val.to(device) + + return self diff --git a/pina/equation/system_equation.py b/pina/equation/system_equation.py index 21cb27160..3e8550d9b 100644 --- a/pina/equation/system_equation.py +++ b/pina/equation/system_equation.py @@ -101,6 +101,10 @@ def residual(self, input_, output_, params_=None): :return: The aggregated residuals of the system of equations. :rtype: LabelTensor """ + # Move the equation to the input_ device + self.to(input_.device) + + # Compute the residual for each equation residual = torch.hstack( [ equation.residual(input_, output_, params_) @@ -108,6 +112,7 @@ def residual(self, input_, output_, params_=None): ] ) + # Skip reduction if not specified if self.reduction is None: return residual diff --git a/pina/problem/zoo/helmholtz.py b/pina/problem/zoo/helmholtz.py index 5f3f956af..0f38780c7 100644 --- a/pina/problem/zoo/helmholtz.py +++ b/pina/problem/zoo/helmholtz.py @@ -48,11 +48,10 @@ def __init__(self, alpha=3.0): :type alpha: float | int """ super().__init__() - - self.alpha = alpha check_consistency(alpha, (int, float)) + self.alpha = alpha - def forcing_term(self, input_): + def forcing_term(input_): """ Implementation of the forcing term. """ diff --git a/pina/solver/physics_informed_solver/pinn_interface.py b/pina/solver/physics_informed_solver/pinn_interface.py index 9155e19ec..65a0dd78f 100644 --- a/pina/solver/physics_informed_solver/pinn_interface.py +++ b/pina/solver/physics_informed_solver/pinn_interface.py @@ -71,9 +71,7 @@ def setup(self, stage): """ # Override the compilation, compiling only for torch < 2.8, see # related issue at https://github.com/mathLab/PINA/issues/621 - if torch.__version__ < "2.8": - self.trainer.compile = True - else: + if torch.__version__ >= "2.8": self.trainer.compile = False warnings.warn( "Compilation is disabled for torch >= 2.8. " diff --git a/pina/solver/solver.py b/pina/solver/solver.py index 6948ec664..442574224 100644 --- a/pina/solver/solver.py +++ b/pina/solver/solver.py @@ -174,11 +174,7 @@ def setup(self, stage): :return: The result of the parent class ``setup`` method. :rtype: Any """ - if stage == "fit" and self.trainer.compile: - self._setup_compile() - if stage == "test" and ( - self.trainer.compile and not self._is_compiled() - ): + if self.trainer.compile and not self._is_compiled(): self._setup_compile() return super().setup(stage) diff --git a/pina/trainer.py b/pina/trainer.py index 78dd77adf..8e1d95110 100644 --- a/pina/trainer.py +++ b/pina/trainer.py @@ -1,12 +1,17 @@ """Module for the Trainer.""" import sys +import warnings import torch import lightning -from .utils import check_consistency +from .utils import check_consistency, custom_warning_format from .data import PinaDataModule from .solver import SolverInterface, PINNInterface +# set the warning for compile options +warnings.formatwarning = custom_warning_format +warnings.filterwarnings("always", category=UserWarning) + class Trainer(lightning.pytorch.Trainer): """ @@ -49,7 +54,8 @@ def __init__( :param float val_size: The percentage of elements to include in the validation dataset. Default is ``0.0``. :param bool compile: If ``True``, the model is compiled before training. - Default is ``False``. For Windows users, it is always disabled. + Default is ``False``. For Windows users, it is always disabled. Not + supported for python version greater or equal than 3.14. :param bool repeat: Whether to repeat the dataset data in each condition during training. For further details, see the :class:`~pina.data.data_module.PinaDataModule` class. Default is @@ -104,8 +110,17 @@ def __init__( super().__init__(**kwargs) # checking compilation and automatic batching - if compile is None or sys.platform == "win32": + # compilation disabled for Windows and for Python 3.14+ + if ( + compile is None + or sys.platform == "win32" + or sys.version_info >= (3, 14) + ): compile = False + warnings.warn( + "Compilation is disabled for Python 3.14+ and for Windows.", + UserWarning, + ) repeat = repeat if repeat is not None else False @@ -325,3 +340,23 @@ def _check_consistency_and_set_defaults( if batch_size is not None: check_consistency(batch_size, int) return pin_memory, num_workers, shuffle, batch_size + + @property + def compile(self): + """ + Whether compilation is required or not. + + :return: ``True`` if compilation is required, ``False`` otherwise. + :rtype: bool + """ + return self._compile + + @compile.setter + def compile(self, value): + """ + Setting the value of compile. + + :param bool value: Whether compilation is required or not. + """ + check_consistency(value, bool) + self._compile = value diff --git a/pyproject.toml b/pyproject.toml index aa4a1947a..ac09d7e21 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "pina-mathlab" -version = "0.2.4" +version = "0.2.5" description = "Physic Informed Neural networks for Advance modeling." readme = "README.md" authors = [ diff --git a/readme/pina_logo.png b/readme/pina_logo.png index 53bef16d9..5ee864fd7 100644 Binary files a/readme/pina_logo.png and b/readme/pina_logo.png differ diff --git a/tests/test_equation/test_equation_factory.py b/tests/test_equation/test_equation_factory.py index 4a9875115..be01427cb 100644 --- a/tests/test_equation/test_equation_factory.py +++ b/tests/test_equation/test_equation_factory.py @@ -104,7 +104,7 @@ def test_advection_equation(c): # Should fail if c is a list and its length != spatial dimension with pytest.raises(ValueError): - Advection([1, 2, 3]) + equation = Advection([1, 2, 3]) residual = equation.residual(pts, u) diff --git a/tutorials/static/pina_logo.png b/tutorials/static/pina_logo.png index 53bef16d9..5ee864fd7 100644 Binary files a/tutorials/static/pina_logo.png and b/tutorials/static/pina_logo.png differ