diff --git a/docs/source/conf.py b/docs/source/conf.py index 3e630f983..d0ddc09a5 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -15,30 +15,21 @@ import sys import os import sphinx_rtd_theme -import pina +import importlib.metadata -# -- Project information ----------------------------------------------------- -project = pina.__project__ -copyright = pina.__copyright__ -author = pina.__author__ -version = pina.__version__ +# -- Project information ----------------------------------------------------- +_DISTRIBUTION_METADATA = importlib.metadata.metadata('pina-mathlab') +project = _DISTRIBUTION_METADATA['Name'] +copyright = _DISTRIBUTION_METADATA['License-File'] +author = "PINA contributors" +version = _DISTRIBUTION_METADATA['Version'] sys.path.insert(0, os.path.abspath('../sphinx_extensions')) # extension to remove paramref link from lightinig # -- General configuration ------------------------------------------------ -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.4' -# if needs_sphinx > sphinx.__display_version__: -# message = 'This project needs at least Sphinx -# v{0!s}'.format(needs_sphinx) -# raise VersionRequirementError(message) - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', @@ -79,16 +70,16 @@ nitpicky = True nitpick_ignore = [ - ('py:meth', 'lightning.pytorch.core.module.LightningModule.log'), - ('py:meth', 'lightning.pytorch.core.module.LightningModule.log_dict'), - ('py:exc', 'MisconfigurationException'), - ('py:func', 'torch.inference_mode'), - ('py:func', 'torch.no_grad'), - ('py:class', 'torch.utils.data.DistributedSampler'), - ('py:class', 'pina.model.layers.convolution.BaseContinuousConv'), - ('py:class', 'Module'), - ('py:class', 'torch.nn.modules.loss._Loss'), # TO FIX - ('py:class', 'torch.optim.LRScheduler'), # TO FIX + # ('py:meth', 'lightning.pytorch.core.module.LightningModule.log'), + # ('py:meth', 'lightning.pytorch.core.module.LightningModule.log_dict'), + # ('py:exc', 'MisconfigurationException'), + # ('py:func', 'torch.inference_mode'), + # ('py:func', 'torch.no_grad'), + # ('py:class', 'torch.utils.data.DistributedSampler'), + # ('py:class', 'pina.model.layers.convolution.BaseContinuousConv'), + # ('py:class', 'Module'), + # ('py:class', 'torch.nn.modules.loss._Loss'), # TO FIX + # ('py:class', 'torch.optim.LRScheduler'), # TO FIX ] @@ -101,27 +92,15 @@ # source_suffix = ['.rst', '.md'] source_suffix = '.rst' -# The encoding of source files. -# source_encoding = 'utf-8-sig' - # The master toctree document. master_doc = 'index' -# General information about the project. -project = pina.__project__ -copyright = pina.__copyright__ -author = pina.__author__ - # autoclass autoclass_content = 'both' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. -# -# The short X.Y version. -version = pina.__version__ -# The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation @@ -131,20 +110,10 @@ # Usually you set "language" from the command line for these cases. language = 'en' -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True @@ -152,10 +121,6 @@ # unit titles (such as .. function::). add_module_names = False -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' @@ -188,13 +153,6 @@ # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "index_files/PINA_logo.png" @@ -225,77 +183,19 @@ "header_links_before_dropdown": 8, } -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files,# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] -html_css_files = [ - '/css/custom.css', -] -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = ['_tutorial'] - # If not ''i, a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - # If false, no index is generated. html_use_index = True -# If true, the index is split into individual pages for each letter. -# html_split_index = False - # If true, links to the reST sources are added to the pages. html_show_sourcelink = True -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - # Output file base name for HTML help builder. htmlhelp_basename = 'pinadoc' @@ -323,27 +223,6 @@ u'PINA contributors', 'manual'), ] -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples @@ -353,10 +232,6 @@ [author], 1) ] -# If true, show URL addresses after external links. -# man_show_urls = False - - # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples @@ -368,15 +243,6 @@ 'Miscellaneous'), ] -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False autodoc_member_order = 'bysource' diff --git a/examples/problems/burgers.py b/examples/problems/burgers.py deleted file mode 100644 index 33c1b5db2..000000000 --- a/examples/problems/burgers.py +++ /dev/null @@ -1,53 +0,0 @@ -""" Burgers' problem. """ - - -# ===================================================== # -# # -# This script implements the one dimensional Burger # -# problem. The Burgers1D class is defined inheriting # -# from TimeDependentProblem, SpatialProblem and we # -# denote: # -# u --> field variable # -# x --> spatial variable # -# t --> temporal variable # -# # -# ===================================================== # - - -import torch -from pina.domain import CartesianDomain -from pina import Condition -from pina.problem import TimeDependentProblem, SpatialProblem -from pina.operators import grad -from pina.equation import FixedValue, Equation - - -class Burgers1D(TimeDependentProblem, SpatialProblem): - - # define the burger equation - def burger_equation(input_, output_): - du = grad(output_, input_) - ddu = grad(du, input_, components=['dudx']) - return ( - du.extract(['dudt']) + - output_.extract(['u'])*du.extract(['dudx']) - - (0.01/torch.pi)*ddu.extract(['ddudxdx']) - ) - - # define initial condition - def initial_condition(input_, output_): - u_expected = -torch.sin(torch.pi*input_.extract(['x'])) - return output_.extract(['u']) - u_expected - - # assign output/ spatial and temporal variables - output_variables = ['u'] - spatial_domain = CartesianDomain({'x': [-1, 1]}) - temporal_domain = CartesianDomain({'t': [0, 1]}) - - # problem condition statement - conditions = { - 'gamma1': Condition(location=CartesianDomain({'x': -1, 't': [0, 1]}), equation=FixedValue(0.)), - 'gamma2': Condition(location=CartesianDomain({'x': 1, 't': [0, 1]}), equation=FixedValue(0.)), - 't0': Condition(location=CartesianDomain({'x': [-1, 1], 't': 0}), equation=Equation(initial_condition)), - 'D': Condition(location=CartesianDomain({'x': [-1, 1], 't': [0, 1]}), equation=Equation(burger_equation)), - } \ No newline at end of file diff --git a/examples/problems/first_order_ode.py b/examples/problems/first_order_ode.py deleted file mode 100644 index be1d88c42..000000000 --- a/examples/problems/first_order_ode.py +++ /dev/null @@ -1,52 +0,0 @@ -""" Simple ODE problem. """ - - -# ===================================================== # -# # -# This script implements a simple first order ode. # -# The FirstOrderODE class is defined inheriting from # -# SpatialProblem. We denote: # -# y --> field variable # -# x --> spatial variable # -# # -# The equation is: # -# dy(x)/dx + y(x) = x # -# # -# ===================================================== # - - -from pina.problem import SpatialProblem -from pina import Condition -from pina.domain import CartesianDomain -from pina.operators import grad -from pina.equation import Equation, FixedValue -import torch - - -class FirstOrderODE(SpatialProblem): - - # variable domain range - x_rng = [0., 5.] - # field variable - output_variables = ['y'] - # create domain - spatial_domain = CartesianDomain({'x': x_rng}) - - # define the ode - def ode(input_, output_): - y = output_ - x = input_ - return grad(y, x) + y - x - - # define real solution - def solution(self, input_): - x = input_ - return x - 1.0 + 2*torch.exp(-x) - - # define problem conditions - conditions = { - 'BC': Condition(location=CartesianDomain({'x': x_rng[0]}), equation=FixedValue(1.)), - 'D': Condition(location=CartesianDomain({'x': x_rng}), equation=Equation(ode)), - } - - truth_solution = solution \ No newline at end of file diff --git a/examples/problems/parametric_elliptic_optimal_control.py b/examples/problems/parametric_elliptic_optimal_control.py deleted file mode 100644 index 9ecd85154..000000000 --- a/examples/problems/parametric_elliptic_optimal_control.py +++ /dev/null @@ -1,75 +0,0 @@ -""" Poisson OCP problem. """ - - -from pina import Condition -from pina.domain import CartesianDomain -from pina.equation import SystemEquation, FixedValue -from pina.problem import SpatialProblem, ParametricProblem -from pina.operators import laplacian - -# ===================================================== # -# # -# This script implements the two dimensional # -# Parametric Elliptic Optimal Control problem. # -# The ParametricEllipticOptimalControl class is # -# inherited from TimeDependentProblem, SpatialProblem # -# and we denote: # -# u --> field variable # -# p --> field variable # -# y --> field variable # -# x1, x2 --> spatial variables # -# mu, alpha --> problem parameters # -# # -# More info in https://arxiv.org/pdf/2110.13530.pdf # -# Section 4.2 of the article # -# ===================================================== # - - -class ParametricEllipticOptimalControl(SpatialProblem, ParametricProblem): - - # setting spatial variables ranges - xmin, xmax, ymin, ymax = -1, 1, -1, 1 - x_range = [xmin, xmax] - y_range = [ymin, ymax] - # setting parameters range - amin, amax = 0.01, 1 - mumin, mumax = 0.5, 3 - mu_range = [mumin, mumax] - a_range = [amin, amax] - # setting field variables - output_variables = ['u', 'y', 'z'] - # setting spatial and parameter domain - spatial_domain = CartesianDomain({'x1': x_range, 'x2': y_range}) - parameter_domain = CartesianDomain({'mu': mu_range, 'alpha': a_range}) - - # equation terms as in https://arxiv.org/pdf/2110.13530.pdf - def term1(input_, output_): - laplace_z = laplacian(output_, input_, components=['z'], d=['x1', 'x2']) - return output_.extract(['y']) - input_.extract(['mu']) - laplace_z - - def term2(input_, output_): - laplace_y = laplacian(output_, input_, components=['y'], d=['x1', 'x2']) - return - laplace_y - output_.extract(['u']) - - - # setting problem condition formulation - conditions = { - 'gamma1': Condition( - location=CartesianDomain({'x1': x_range, 'x2': 1, 'mu': mu_range, 'alpha': a_range}), - equation=FixedValue(0, ['y',])), - 'gamma2': Condition( - location=CartesianDomain({'x1': x_range, 'x2': -1, 'mu': mu_range, 'alpha': a_range}), - equation=FixedValue(0, ['y', 'z'])), - 'gamma3': Condition( - location=CartesianDomain({'x1': 1, 'x2': y_range, 'mu': mu_range, 'alpha': a_range}), - equation=FixedValue(0, ['y', 'z'])), - 'gamma4': Condition( - location=CartesianDomain({'x1': -1, 'x2': y_range, 'mu': mu_range, 'alpha': a_range}), - equation=FixedValue(0, ['y', 'z'])), - 'D': Condition( - location=CartesianDomain( - {'x1': x_range, 'x2': y_range, - 'mu': mu_range, 'alpha': a_range - }), - equation=SystemEquation([term1, term2])), - } \ No newline at end of file diff --git a/examples/problems/parametric_poisson.py b/examples/problems/parametric_poisson.py deleted file mode 100644 index 64dfdaaee..000000000 --- a/examples/problems/parametric_poisson.py +++ /dev/null @@ -1,55 +0,0 @@ -""" Parametric Poisson problem. """ - - -# ===================================================== # -# # -# This script implements the two dimensional # -# Parametric Poisson problem. The ParametricPoisson # -# class is defined inheriting from SpatialProblem and # -# ParametricProblem. We denote: # -# u --> field variable # -# x,y --> spatial variables # -# mu1, mu2 --> parameter variables # -# # -# ===================================================== # - - -from pina.domain import CartesianDomain -from pina.problem import SpatialProblem, ParametricProblem -from pina.operators import laplacian -from pina.equation import FixedValue, Equation -from pina import Condition -import torch - -class ParametricPoisson(SpatialProblem, ParametricProblem): - - # assign output/ spatial and parameter variables - output_variables = ['u'] - spatial_domain = CartesianDomain({'x': [-1, 1], 'y': [-1, 1]}) - parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]}) - - # define the laplace equation - def laplace_equation(input_, output_): - force_term = torch.exp( - - 2*(input_.extract(['x']) - input_.extract(['mu1']))**2 - - 2*(input_.extract(['y']) - input_.extract(['mu2']))**2) - return laplacian(output_.extract(['u']), input_, d=['x','y']) - force_term - - # problem condition statement - conditions = { - 'gamma1': Condition( - location=CartesianDomain({'x': [-1, 1], 'y': 1, 'mu1': [-1, 1], 'mu2': [-1, 1]}), - equation=FixedValue(0.)), - 'gamma2': Condition( - location=CartesianDomain({'x': [-1, 1], 'y': -1, 'mu1': [-1, 1], 'mu2': [-1, 1]}), - equation=FixedValue(0.)), - 'gamma3': Condition( - location=CartesianDomain({'x': 1, 'y': [-1, 1], 'mu1': [-1, 1], 'mu2': [-1, 1]}), - equation=FixedValue(0.)), - 'gamma4': Condition( - location=CartesianDomain({'x': -1, 'y': [-1, 1], 'mu1': [-1, 1], 'mu2': [-1, 1]}), - equation=FixedValue(0.)), - 'D': Condition( - location=CartesianDomain({'x': [-1, 1], 'y': [-1, 1], 'mu1': [-1, 1], 'mu2': [-1, 1]}), - equation=Equation(laplace_equation)), - } diff --git a/examples/problems/poisson.py b/examples/problems/poisson.py deleted file mode 100644 index e4a6cf98e..000000000 --- a/examples/problems/poisson.py +++ /dev/null @@ -1,57 +0,0 @@ -""" Poisson problem. """ - - -# ===================================================== # -# # -# This script implements the two dimensional # -# Poisson problem. The Poisson class is defined # -# inheriting from SpatialProblem. We denote: # -# u --> field variable # -# x,y --> spatial variables # -# # -# ===================================================== # - - -import torch -from pina.domain import CartesianDomain -from pina import Condition -from pina.problem import SpatialProblem -from pina.operators import laplacian -from pina.equation import FixedValue, Equation - - -class Poisson(SpatialProblem): - output_variables = ['u'] - spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) - - def laplace_equation(input_, output_): - force_term = (torch.sin(input_.extract(['x'])*torch.pi) * - torch.sin(input_.extract(['y'])*torch.pi)) - nabla_u = laplacian(output_.extract(['u']), input_) - return nabla_u - force_term - - conditions = { - 'gamma1': Condition( - location=CartesianDomain({'x': [0, 1], 'y': 1}), - equation=FixedValue(0.0)), - 'gamma2': Condition( - location=CartesianDomain({'x': [0, 1], 'y': 0}), - equation=FixedValue(0.0)), - 'gamma3': Condition( - location=CartesianDomain({'x': 1, 'y': [0, 1]}), - equation=FixedValue(0.0)), - 'gamma4': Condition( - location=CartesianDomain({'x': 0, 'y': [0, 1]}), - equation=FixedValue(0.0)), - 'D': Condition( - location=CartesianDomain({'x': [0, 1], 'y': [0, 1]}), - equation=Equation(laplace_equation)), - } - - def poisson_sol(self, pts): - return -( - torch.sin(pts.extract(['x'])*torch.pi) * - torch.sin(pts.extract(['y'])*torch.pi) - )/(2*torch.pi**2) - - truth_solution = poisson_sol diff --git a/examples/problems/stokes.py b/examples/problems/stokes.py deleted file mode 100644 index 5f14f4e8f..000000000 --- a/examples/problems/stokes.py +++ /dev/null @@ -1,67 +0,0 @@ -""" Steady Stokes Problem """ - -import torch -from pina.problem import SpatialProblem -from pina.operators import laplacian, grad, div -from pina import Condition, LabelTensor -from pina.domain import CartesianDomain -from pina.equation import SystemEquation, Equation - -# ===================================================== # -# # -# This script implements the two dimensional # -# Stokes problem. The Stokes class is defined # -# inheriting from SpatialProblem. We denote: # -# ux --> field variable velocity along x # -# uy --> field variable velocity along y # -# p --> field variable pressure # -# x,y --> spatial variables # -# # -# ===================================================== # - -class Stokes(SpatialProblem): - - # assign output/ spatial variables - output_variables = ['ux', 'uy', 'p'] - spatial_domain = CartesianDomain({'x': [-2, 2], 'y': [-1, 1]}) - - # define the momentum equation - def momentum(input_, output_): - delta_ = torch.hstack((LabelTensor(laplacian(output_.extract(['ux']), input_), ['x']), - LabelTensor(laplacian(output_.extract(['uy']), input_), ['y']))) - return - delta_ + grad(output_.extract(['p']), input_) - - def continuity(input_, output_): - return div(output_.extract(['ux', 'uy']), input_) - - # define the inlet velocity - def inlet(input_, output_): - value = 2 * (1 - input_.extract(['y'])**2) - return output_.extract(['ux']) - value - - # define the outlet pressure - def outlet(input_, output_): - value = 0.0 - return output_.extract(['p']) - value - - # define the wall condition - def wall(input_, output_): - value = 0.0 - return output_.extract(['ux', 'uy']) - value - - domains = { - 'gamma_top': CartesianDomain({'x': [-2, 2], 'y': 1}), - 'gamma_bot': CartesianDomain({'x': [-2, 2], 'y': -1}), - 'gamma_out': CartesianDomain({'x': 2, 'y': [-1, 1]}), - 'gamma_in': CartesianDomain({'x': -2, 'y': [-1, 1]}), - 'D': CartesianDomain({'x': [-2, 2], 'y': [-1, 1]}) - } - - # problem condition statement - conditions = { - 'gamma_top': Condition(domain='gamma_top', equation=Equation(wall)), - 'gamma_bot': Condition(domain='gamma_bot', equation=Equation(wall)), - 'gamma_out': Condition(domain='gamma_out', equation=Equation(outlet)), - 'gamma_in': Condition(domain='gamma_in', equation=Equation(inlet)), - 'D': Condition(domain='D', equation=SystemEquation([momentum, continuity])) - } diff --git a/examples/problems/wave.py b/examples/problems/wave.py deleted file mode 100644 index 124a62d4b..000000000 --- a/examples/problems/wave.py +++ /dev/null @@ -1,57 +0,0 @@ -""" Wave equation Problem """ - - -import torch -from pina.domain import CartesianDomain -from pina import Condition -from pina.problem import SpatialProblem, TimeDependentProblem -from pina.operators import laplacian, grad -from pina.equation import FixedValue, Equation - - -# ===================================================== # -# # -# This script implements the two dimensional # -# Wave equation. The Wave class is defined inheriting # -# from SpatialProblem and TimeDependentProblem. Let # -# u --> field variable # -# x,y --> spatial variables # -# t --> temporal variables # -# the velocity coefficient is set to one. # -# # -# ===================================================== # - - - -class Wave(TimeDependentProblem, SpatialProblem): - output_variables = ['u'] - spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) - temporal_domain = CartesianDomain({'t': [0, 1]}) - - def wave_equation(input_, output_): - u_t = grad(output_, input_, components=['u'], d=['t']) - u_tt = grad(u_t, input_, components=['dudt'], d=['t']) - nabla_u = laplacian(output_, input_, components=['u'], d=['x', 'y']) - return nabla_u - u_tt - - def initial_condition(input_, output_): - u_expected = (torch.sin(torch.pi*input_.extract(['x'])) * - torch.sin(torch.pi*input_.extract(['y']))) - return output_.extract(['u']) - u_expected - - conditions = { - 'gamma1': Condition(location=CartesianDomain({'x': [0, 1], 'y': 1, 't': [0, 1]}), equation=FixedValue(0.)), - 'gamma2': Condition(location=CartesianDomain({'x': [0, 1], 'y': 0, 't': [0, 1]}), equation=FixedValue(0.)), - 'gamma3': Condition(location=CartesianDomain({'x': 1, 'y': [0, 1], 't': [0, 1]}), equation=FixedValue(0.)), - 'gamma4': Condition(location=CartesianDomain({'x': 0, 'y': [0, 1], 't': [0, 1]}), equation=FixedValue(0.)), - 't0': Condition(location=CartesianDomain({'x': [0, 1], 'y': [0, 1], 't': 0}), equation=Equation(initial_condition)), - 'D': Condition(location=CartesianDomain({'x': [0, 1], 'y': [0, 1], 't': [0, 1]}), equation=Equation(wave_equation)), - } - - def wave_sol(self, pts): - sqrt_2 = torch.sqrt(torch.tensor(2.)) - return (torch.sin(torch.pi*pts.extract(['x'])) * - torch.sin(torch.pi*pts.extract(['y'])) * - torch.cos(sqrt_2*torch.pi*pts.extract(['t']))) - - truth_solution = wave_sol \ No newline at end of file diff --git a/examples/run_burgers.py b/examples/run_burgers.py deleted file mode 100644 index 10f217a29..000000000 --- a/examples/run_burgers.py +++ /dev/null @@ -1,73 +0,0 @@ -""" Run PINA on Burgers equation. """ - -import argparse -import torch -from torch.nn import Softplus - -from pina import LabelTensor -from pina.model import FeedForward -from pina.solvers import PINN -from pina.plotter import Plotter -from pina.trainer import Trainer -from problems.burgers import Burgers1D - - -class myFeature(torch.nn.Module): - """ - Feature: sin(pi*x) - """ - - def __init__(self): - super(myFeature, self).__init__() - - def forward(self, x): - return LabelTensor(torch.sin(torch.pi * x.extract(['x'])), ['sin(x)']) - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="Run PINA") - parser.add_argument("--load", help="directory to save or load file", type=str) - parser.add_argument("--features", help="extra features", type=int) - parser.add_argument("--epochs", help="extra features", type=int, default=1000) - args = parser.parse_args() - - if args.features is None: - args.features = 0 - - # extra features - feat = [myFeature()] if args.features else [] - - # create problem and discretise domain - burgers_problem = Burgers1D() - burgers_problem.discretise_domain(n=200, mode='grid', variables = 't', locations=['D']) - burgers_problem.discretise_domain(n=20, mode='grid', variables = 'x', locations=['D']) - burgers_problem.discretise_domain(n=150, mode='random', locations=['gamma1', 'gamma2', 't0']) - - # create model - model = FeedForward( - layers=[30, 20, 10, 5], - output_dimensions=len(burgers_problem.output_variables), - input_dimensions=len(burgers_problem.input_variables) + len(feat), - func=Softplus - ) - - # create solver - pinn = PINN( - problem=burgers_problem, - model=model, - extra_features=feat, - optimizer_kwargs={'lr' : 0.006} - ) - - # create trainer - directory = 'pina.burger_extrafeats_{}'.format(bool(args.features)) - trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=args.epochs, default_root_dir=directory) - - - if args.load: - pinn = PINN.load_from_checkpoint(checkpoint_path=args.load, problem=burgers_problem, model=model) - plotter = Plotter() - plotter.plot(pinn) - else: - trainer.train() diff --git a/examples/run_first_order_ode.py b/examples/run_first_order_ode.py deleted file mode 100644 index b41b47062..000000000 --- a/examples/run_first_order_ode.py +++ /dev/null @@ -1,53 +0,0 @@ -""" Run PINA on ODE equation. """ -import argparse -import torch -from torch.nn import Softplus - -from pina.model import FeedForward -from pina.solvers import PINN -from pina.plotter import Plotter -from pina.trainer import Trainer -from problems.first_order_ode import FirstOrderODE - - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="Run PINA") - parser.add_argument("--load", help="directory to save or load file", type=str) - parser.add_argument("--epochs", help="extra features", type=int, default=3000) - args = parser.parse_args() - - - # create problem and discretise domain - problem = FirstOrderODE() - problem.discretise_domain(n=500, mode='grid', variables = 'x', locations=['D']) - problem.discretise_domain(n=1, mode='grid', variables = 'x', locations=['BC']) - - # create model - model = FeedForward( - layers=[10, 10], - output_dimensions=len(problem.output_variables), - input_dimensions=len(problem.input_variables), - func=Softplus - ) - - # create solver - pinn = PINN( - problem=problem, - model=model, - extra_features=None, - optimizer_kwargs={'lr' : 0.001} - ) - - # create trainer - directory = 'pina.ode' - trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=args.epochs, default_root_dir=directory) - - - if args.load: - pinn = PINN.load_from_checkpoint(checkpoint_path=args.load, problem=problem, model=model) - plotter = Plotter() - plotter.plot(pinn) - else: - trainer.train() \ No newline at end of file diff --git a/examples/run_parametric_elliptic_optimal.py b/examples/run_parametric_elliptic_optimal.py deleted file mode 100644 index 564fc5833..000000000 --- a/examples/run_parametric_elliptic_optimal.py +++ /dev/null @@ -1,89 +0,0 @@ -import argparse -import numpy as np -import torch -from torch.nn import Softplus - -from pina import LabelTensor -from pina.solvers import PINN -from pina.model import MultiFeedForward, FeedForward -from pina.plotter import Plotter -from pina.trainer import Trainer -from problems.parametric_elliptic_optimal_control import ( - ParametricEllipticOptimalControl) - - -class myFeature(torch.nn.Module): - """ - Feature: sin(x) - """ - - def __init__(self): - super(myFeature, self).__init__() - - def forward(self, x): - t = (-x.extract(['x1'])**2+1) * (-x.extract(['x2'])**2+1) - return LabelTensor(t, ['k0']) - - -class PIArch(MultiFeedForward): - - def __init__(self, dff_dict): - super().__init__(dff_dict) - - def forward(self, x): - out = self.uy(x) - out.labels = ['u', 'y'] - z = LabelTensor( - (out.extract(['u']) * x.extract(['alpha'])), ['z']) - return out.append(z) - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="Run PINA") - parser.add_argument("--load", help="directory to save or load file", type=str) - parser.add_argument("--features", help="extra features", type=int) - parser.add_argument("--epochs", help="extra features", type=int, default=1000) - args = parser.parse_args() - - if args.features is None: - args.features = 0 - - # extra features - feat = [myFeature()] if args.features else [] - args = parser.parse_args() - - # create problem and discretise domain - opc = ParametricEllipticOptimalControl() - opc.discretise_domain(n= 900, mode='random', variables=['x1', 'x2'], locations=['D']) - opc.discretise_domain(n= 5, mode='random', variables=['mu', 'alpha'], locations=['D']) - opc.discretise_domain(n= 200, mode='random', variables=['x1', 'x2'], locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) - opc.discretise_domain(n= 5, mode='random', variables=['mu', 'alpha'], locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) - - # create model - model = PIArch( - { - 'uy': { - 'input_dimensions': 4 + len(feat), - 'output_dimensions': 2, - 'layers': [40, 40, 20], - 'func': Softplus, - }, - } - ) - - # create PINN - pinn = PINN(problem=opc, model=model, optimizer_kwargs={'lr' : 0.002}, extra_features=feat) - - # create trainer - directory = 'pina.parametric_optimal_control_{}'.format(bool(args.features)) - trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=args.epochs, default_root_dir=directory) - - - if args.load: - pinn = PINN.load_from_checkpoint(checkpoint_path=args.load, problem=opc, model=model, extra_features=feat) - plotter = Plotter() - plotter.plot(pinn, fixed_variables={'mu' : 3 , 'alpha' : 1}, components='u') - plotter.plot(pinn, fixed_variables={'mu' : 3 , 'alpha' : 1}, components='z') - plotter.plot(pinn, fixed_variables={'mu' : 3 , 'alpha' : 1}, components='y') - else: - trainer.train() diff --git a/examples/run_parametric_poisson.py b/examples/run_parametric_poisson.py deleted file mode 100644 index 1c713666d..000000000 --- a/examples/run_parametric_poisson.py +++ /dev/null @@ -1,73 +0,0 @@ -import argparse -import torch -from torch.nn import Softplus -from pina import Plotter, LabelTensor, Trainer -from pina.solvers import PINN -from pina.model import FeedForward -from problems.parametric_poisson import ParametricPoisson - - -class myFeature(torch.nn.Module): - """ - """ - def __init__(self): - super(myFeature, self).__init__() - - def forward(self, x): - t = ( - torch.exp( - - 2*(x.extract(['x']) - x.extract(['mu1']))**2 - - 2*(x.extract(['y']) - x.extract(['mu2']))**2 - ) - ) - return LabelTensor(t, ['k0']) - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="Run PINA") - parser.add_argument("--load", help="directory to save or load file", type=str) - parser.add_argument("--features", help="extra features", type=int) - parser.add_argument("--epochs", help="extra features", type=int, default=1000) - args = parser.parse_args() - - if args.features is None: - args.features = 0 - - # extra features - feat = [myFeature()] if args.features else [] - - # create problem and discretise domain - ppoisson_problem = ParametricPoisson() - ppoisson_problem.discretise_domain(n=100, mode='random', variables = ['x', 'y'], locations=['D']) - ppoisson_problem.discretise_domain(n=100, mode='random', variables = ['mu1', 'mu2'], locations=['D']) - ppoisson_problem.discretise_domain(n=20, mode='random', variables = ['x', 'y'], locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) - ppoisson_problem.discretise_domain(n=5, mode='random', variables = ['mu1', 'mu2'], locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) - - # create model - model = FeedForward( - layers=[10, 10, 10], - output_dimensions=len(ppoisson_problem.output_variables), - input_dimensions=len(ppoisson_problem.input_variables) + len(feat), - func=Softplus - ) - - # create solver - pinn = PINN( - problem=ppoisson_problem, - model=model, - extra_features=feat, - optimizer_kwargs={'lr' : 0.006} - ) - - # create trainer - directory = 'pina.parametric_poisson_extrafeats_{}'.format(bool(args.features)) - trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=args.epochs, default_root_dir=directory) - - - if args.load: - pinn = PINN.load_from_checkpoint(checkpoint_path=args.load, problem=ppoisson_problem, model=model, extra_features=feat) - plotter = Plotter() - plotter.plot(pinn, fixed_variables={'mu1': 1, 'mu2': -1}) - else: - trainer.train() diff --git a/examples/run_poisson.py b/examples/run_poisson.py deleted file mode 100644 index 390e042ca..000000000 --- a/examples/run_poisson.py +++ /dev/null @@ -1,73 +0,0 @@ -""" Run PINA on ODE equation. """ -import argparse -import torch -from torch.nn import Softplus - -from pina import LabelTensor -from pina.model import FeedForward -from pina.solvers import PINN -from pina.plotter import Plotter -from pina.trainer import Trainer -from problems.poisson import Poisson - - -class myFeature(torch.nn.Module): - """ - Feature: sin(x) - """ - - def __init__(self): - super(myFeature, self).__init__() - - def forward(self, x): - t = (torch.sin(x.extract(['x'])*torch.pi) * - torch.sin(x.extract(['y'])*torch.pi)) - return LabelTensor(t, ['sin(x)sin(y)']) - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="Run PINA") - parser.add_argument("--load", help="directory to save or load file", type=str) - parser.add_argument("--features", help="extra features", type=int) - parser.add_argument("--epochs", help="extra features", type=int, default=1000) - args = parser.parse_args() - - if args.features is None: - args.features = 0 - - # extra features - feat = [myFeature()] if args.features else [] - args = parser.parse_args() - - # create problem and discretise domain - problem = Poisson() - problem.discretise_domain(n=20, mode='grid', locations=['D']) - problem.discretise_domain(n=100, mode='random', locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) - - # create model - model = FeedForward( - layers=[10, 10], - output_dimensions=len(problem.output_variables), - input_dimensions=len(problem.input_variables) + len(feat), - func=Softplus - ) - - # create solver - pinn = PINN( - problem=problem, - model=model, - extra_features=feat, - optimizer_kwargs={'lr' : 0.001} - ) - - # create trainer - directory = 'pina.parametric_poisson_extrafeats_{}'.format(bool(args.features)) - trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=args.epochs, default_root_dir=directory) - - - if args.load: - pinn = PINN.load_from_checkpoint(checkpoint_path=args.load, problem=problem, model=model, extra_features=feat) - plotter = Plotter() - plotter.plot(pinn) - else: - trainer.train() \ No newline at end of file diff --git a/examples/run_poisson_deeponet.py b/examples/run_poisson_deeponet.py deleted file mode 100644 index 3e577a612..000000000 --- a/examples/run_poisson_deeponet.py +++ /dev/null @@ -1,75 +0,0 @@ -import argparse -import torch -from pina import Plotter, LabelTensor, Trainer -from pina.solvers import PINN -from pina.model import DeepONet, FeedForward -from problems.parametric_poisson import ParametricPoisson - - -class myFeature(torch.nn.Module): - """ - """ - def __init__(self): - super(myFeature, self).__init__() - - def forward(self, x): - t = ( - torch.exp( - - 2*(x.extract(['x']) - x.extract(['mu1']))**2 - - 2*(x.extract(['y']) - x.extract(['mu2']))**2 - ) - ) - return LabelTensor(t, ['k0']) - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="Run PINA") - parser.add_argument("--load", help="directory to save or load file", type=str) - parser.add_argument("--epochs", help="extra features", type=int, default=1000) - args = parser.parse_args() - - - # create problem and discretise domain - ppoisson_problem = ParametricPoisson() - ppoisson_problem.discretise_domain(n=100, mode='random', variables = ['x', 'y'], locations=['D']) - ppoisson_problem.discretise_domain(n=100, mode='random', variables = ['mu1', 'mu2'], locations=['D']) - ppoisson_problem.discretise_domain(n=20, mode='random', variables = ['x', 'y'], locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) - ppoisson_problem.discretise_domain(n=5, mode='random', variables = ['mu1', 'mu2'], locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) - - # create model - trunck = FeedForward( - layers=[40, 40], - output_dimensions=1, - input_dimensions=2, - func=torch.nn.ReLU - ) - branch = FeedForward( - layers=[40, 40], - output_dimensions=1, - input_dimensions=2, - func=torch.nn.ReLU - ) - model = DeepONet(branch_net=branch, - trunk_net=trunck, - input_indeces_branch_net=['x', 'y'], - input_indeces_trunk_net=['mu1', 'mu2']) - - # create solver - pinn = PINN( - problem=ppoisson_problem, - model=model, - optimizer_kwargs={'lr' : 0.006} - ) - - # create trainer - directory = 'pina.parametric_poisson_deeponet' - trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=args.epochs, default_root_dir=directory) - - - if args.load: - pinn = PINN.load_from_checkpoint(checkpoint_path=args.load, problem=ppoisson_problem, model=model) - plotter = Plotter() - plotter.plot(pinn, fixed_variables={'mu1': 1, 'mu2': -1}) - else: - trainer.train() diff --git a/examples/run_stokes.py b/examples/run_stokes.py deleted file mode 100644 index 04f652bd3..000000000 --- a/examples/run_stokes.py +++ /dev/null @@ -1,52 +0,0 @@ -import argparse -from torch.nn import Softplus - -from pina import Plotter, Trainer -from pina.model import FeedForward -from pina.solvers import PINN -from problems.stokes import Stokes - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="Run PINA") - parser = argparse.ArgumentParser(description="Run PINA") - parser.add_argument("--load", help="directory to save or load file", type=str) - parser.add_argument("--epochs", help="extra features", type=int, default=1000) - args = parser.parse_args() - - - # create problem and discretise domain - stokes_problem = Stokes() - stokes_problem.discretise_domain(n=1000, domains=['gamma_top', 'gamma_bot', 'gamma_in', 'gamma_out']) - stokes_problem.discretise_domain(n=2000, domains=['D']) - - # make the model - model = FeedForward( - layers=[10, 10, 10, 10], - output_dimensions=len(stokes_problem.output_variables), - input_dimensions=len(stokes_problem.input_variables), - func=Softplus, - ) - - # make the pinn - pinn = PINN( - stokes_problem, - model, - optimizer_kwargs={'lr' : 0.001} - ) - - # create trainer - directory = 'pina.navier_stokes' - trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=args.epochs, default_root_dir=directory) - - - if args.load: - pinn = PINN.load_from_checkpoint(checkpoint_path=args.load, problem=stokes_problem, model=model) - plotter = Plotter() - plotter.plot(pinn, components='ux') - plotter.plot(pinn, components='uy') - plotter.plot(pinn, components='p') - else: - trainer.train() - - diff --git a/examples/run_wave.py b/examples/run_wave.py deleted file mode 100644 index 2d4b4e6e4..000000000 --- a/examples/run_wave.py +++ /dev/null @@ -1,64 +0,0 @@ -""" Run PINA on Burgers equation. """ - -import argparse -import torch -from torch.nn import Softplus - -from pina import LabelTensor -from pina.model import FeedForward -from pina.solvers import PINN -from pina.plotter import Plotter -from pina.trainer import Trainer -from problems.wave import Wave - -class HardMLP(torch.nn.Module): - - def __init__(self, **kwargs): - super().__init__() - - self.layers = FeedForward(**kwargs) - - # here in the foward we implement the hard constraints - def forward(self, x): - hard_space = x.extract(['x'])*(1-x.extract(['x']))*x.extract(['y'])*(1-x.extract(['y'])) - hard_t = torch.sin(torch.pi*x.extract(['x'])) * torch.sin(torch.pi*x.extract(['y'])) * torch.cos(torch.sqrt(torch.tensor(2.))*torch.pi*x.extract(['t'])) - return hard_space * self.layers(x) * x.extract(['t']) + hard_t - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="Run PINA") - parser.add_argument("--load", help="directory to save or load file", type=str) - parser.add_argument("--epochs", help="extra features", type=int, default=1000) - args = parser.parse_args() - - - # create problem and discretise domain - wave_problem = Wave() - wave_problem.discretise_domain(1000, 'random', locations=['D', 't0', 'gamma1', 'gamma2', 'gamma3', 'gamma4']) - - # create model - model = HardMLP( - layers=[40, 40, 40], - output_dimensions=len(wave_problem.output_variables), - input_dimensions=len(wave_problem.input_variables), - func=Softplus - ) - - # create solver - pinn = PINN( - problem=wave_problem, - model=model, - optimizer_kwargs={'lr' : 0.006} - ) - - # create trainer - directory = 'pina.wave' - trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=args.epochs, default_root_dir=directory) - - - if args.load: - pinn = PINN.load_from_checkpoint(checkpoint_path=args.load, problem=wave_problem, model=model) - plotter = Plotter() - plotter.plot(pinn) - else: - trainer.train() diff --git a/pina/__init__.py b/pina/__init__.py index f6e73592f..f8624144a 100644 --- a/pina/__init__.py +++ b/pina/__init__.py @@ -1,3 +1,7 @@ +""" +Module for the Pina library. +""" + __all__ = [ "Trainer", "LabelTensor", diff --git a/pina/adaptive_function/__init__.py b/pina/adaptive_function/__init__.py index 911cf2998..093739247 100644 --- a/pina/adaptive_function/__init__.py +++ b/pina/adaptive_function/__init__.py @@ -1,3 +1,7 @@ +""" +Adaptive Activation Functions Module. +""" + __all__ = [ "AdaptiveActivationFunctionInterface", "AdaptiveReLU", diff --git a/pina/adaptive_function/adaptive_function.py b/pina/adaptive_function/adaptive_function.py index 9bf5fba97..a88fe804e 100644 --- a/pina/adaptive_function/adaptive_function.py +++ b/pina/adaptive_function/adaptive_function.py @@ -15,7 +15,7 @@ class AdaptiveReLU(AdaptiveActivationFunctionInterface): is defined as: .. math:: - \text{ReLU}_{\text{adaptive}}({x}) = \alpha\,\text{ReLU}(\beta{x}+\gamma), + \text{ReLU}_{\text{adaptive}}({x})=\alpha\,\text{ReLU}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the ReLU function is defined as: @@ -50,13 +50,15 @@ class AdaptiveSigmoid(AdaptiveActivationFunctionInterface): r""" Adaptive trainable :class:`~torch.nn.Sigmoid` activation function. - Given the function :math:`\text{Sigmoid}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, + Given the function + :math:`\text{Sigmoid}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, the adaptive function :math:`\text{Sigmoid}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n` is defined as: .. math:: - \text{Sigmoid}_{\text{adaptive}}({x}) = \alpha\,\text{Sigmoid}(\beta{x}+\gamma), + \text{Sigmoid}_{\text{adaptive}}({x})= + \alpha\,\text{Sigmoid}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Sigmoid function is defined as: @@ -97,7 +99,7 @@ class AdaptiveTanh(AdaptiveActivationFunctionInterface): is defined as: .. math:: - \text{Tanh}_{\text{adaptive}}({x}) = \alpha\,\text{Tanh}(\beta{x}+\gamma), + \text{Tanh}_{\text{adaptive}}({x})=\alpha\,\text{Tanh}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Tanh function is defined as: @@ -138,7 +140,7 @@ class AdaptiveSiLU(AdaptiveActivationFunctionInterface): is defined as: .. math:: - \text{SiLU}_{\text{adaptive}}({x}) = \alpha\,\text{SiLU}(\beta{x}+\gamma), + \text{SiLU}_{\text{adaptive}}({x})=\alpha\,\text{SiLU}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the SiLU function is defined as: @@ -180,7 +182,7 @@ class AdaptiveMish(AdaptiveActivationFunctionInterface): is defined as: .. math:: - \text{Mish}_{\text{adaptive}}({x}) = \alpha\,\text{Mish}(\beta{x}+\gamma), + \text{Mish}_{\text{adaptive}}({x})=\alpha\,\text{Mish}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Mish function is defined as: @@ -265,7 +267,7 @@ class AdaptiveCELU(AdaptiveActivationFunctionInterface): is defined as: .. math:: - \text{CELU}_{\text{adaptive}}({x}) = \alpha\,\text{CELU}(\beta{x}+\gamma), + \text{CELU}_{\text{adaptive}}({x})=\alpha\,\text{CELU}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the CELU function is defined as: @@ -306,13 +308,13 @@ class AdaptiveGELU(AdaptiveActivationFunctionInterface): is defined as: .. math:: - \text{GELU}_{\text{adaptive}}({x}) = \alpha\,\text{GELU}(\beta{x}+\gamma), + \text{GELU}_{\text{adaptive}}({x})=\alpha\,\text{GELU}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the GELU function is defined as: .. math:: - \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3))) + \text{GELU}(x)=0.5*x*(1+\text{Tanh}(\sqrt{2 / \pi}*(x+0.044715*x^3))) .. seealso:: @@ -342,13 +344,15 @@ class AdaptiveSoftmin(AdaptiveActivationFunctionInterface): r""" Adaptive trainable :class:`~torch.nn.Softmin` activation function. - Given the function :math:`\text{Softmin}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, + Given the function + :math:`\text{Softmin}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, the adaptive function :math:`\text{Softmin}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n` is defined as: .. math:: - \text{Softmin}_{\text{adaptive}}({x}) = \alpha\,\text{Softmin}(\beta{x}+\gamma), + \text{Softmin}_{\text{adaptive}}({x})=\alpha\, + \text{Softmin}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Softmin function is defined as: @@ -383,13 +387,15 @@ class AdaptiveSoftmax(AdaptiveActivationFunctionInterface): r""" Adaptive trainable :class:`~torch.nn.Softmax` activation function. - Given the function :math:`\text{Softmax}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, + Given the function + :math:`\text{Softmax}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, the adaptive function :math:`\text{Softmax}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n` is defined as: .. math:: - \text{Softmax}_{\text{adaptive}}({x}) = \alpha\,\text{Softmax}(\beta{x}+\gamma), + \text{Softmax}_{\text{adaptive}}({x})=\alpha\, + \text{Softmax}(\beta{x}+\gamma), where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Softmax function is defined as: diff --git a/pina/adaptive_function/adaptive_function_interface.py b/pina/adaptive_function/adaptive_function_interface.py index 20fae5105..365caf6f3 100644 --- a/pina/adaptive_function/adaptive_function_interface.py +++ b/pina/adaptive_function/adaptive_function_interface.py @@ -1,15 +1,15 @@ """Module for adaptive functions.""" -import torch - -from pina.utils import check_consistency from abc import ABCMeta +import torch +from ..utils import check_consistency, is_function class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta): r""" The - :class:`~pina.adaptive_function.adaptive_func_interface.AdaptiveActivationFunctionInterface` + :class:`~pina.adaptive_function.adaptive_func_interface.\ + AdaptiveActivationFunctionInterface` class makes a :class:`torch.nn.Module` activation function into an adaptive trainable activation function. If one wants to create an adpative activation function, this class must be use as base class. @@ -104,9 +104,6 @@ def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): else: self.register_buffer("gamma", gamma) - # storing the activation - self._func = None - def forward(self, x): """ Define the computation performed at every call. @@ -144,3 +141,13 @@ def func(self): The callable activation function. """ return self._func + + @func.setter + def func(self, value): + """ + Set the activation function. + """ + if not is_function(value): + raise TypeError("The function must be callable.") + self._func = value + return self._func diff --git a/pina/adaptive_functions/__init__.py b/pina/adaptive_functions/__init__.py index 9af99d2b0..ce00ac998 100644 --- a/pina/adaptive_functions/__init__.py +++ b/pina/adaptive_functions/__init__.py @@ -1,3 +1,7 @@ +""" +Old module for adaptive functions. Deprecated in 0.2.0. +""" + import warnings from ..adaptive_function import * @@ -8,7 +12,7 @@ warnings.formatwarning = custom_warning_format warnings.filterwarnings("always", category=DeprecationWarning) warnings.warn( - f"'pina.adaptive_functions' is deprecated and will be removed " - f"in future versions. Please use 'pina.adaptive_function' instead.", + "'pina.adaptive_functions' is deprecated and will be removed " + "in future versions. Please use 'pina.adaptive_function' instead.", DeprecationWarning, ) diff --git a/pina/callbacks/__init__.py b/pina/callbacks/__init__.py index 8c2c71d02..76c5021ff 100644 --- a/pina/callbacks/__init__.py +++ b/pina/callbacks/__init__.py @@ -1,3 +1,7 @@ +""" +Old module for callbacks. Deprecated in 0.2.0. +""" + import warnings from ..callback import * @@ -8,7 +12,7 @@ warnings.formatwarning = custom_warning_format warnings.filterwarnings("always", category=DeprecationWarning) warnings.warn( - f"'pina.callbacks' is deprecated and will be removed " - f"in future versions. Please use 'pina.callback' instead.", + "'pina.callbacks' is deprecated and will be removed " + "in future versions. Please use 'pina.callback' instead.", DeprecationWarning, ) diff --git a/pina/collector.py b/pina/collector.py index b78442197..ab42111c1 100644 --- a/pina/collector.py +++ b/pina/collector.py @@ -1,5 +1,5 @@ """ -# TODO +Module for the Collector class. """ from .graph import Graph @@ -7,16 +7,23 @@ class Collector: + """ + Collector class for collecting data from the problem. + """ def __init__(self, problem): + """ " + Initialize the Collector class, by creating a hook between the collector + and the problem and initializing the data collections. + + :param AbstractProblem problem: The problem to collect data from. + """ # creating a hook between collector and problem self.problem = problem # those variables are used for the dataloading self._data_collections = {name: {} for name in self.problem.conditions} - self.conditions_name = { - i: name for i, name in enumerate(self.problem.conditions) - } + self.conditions_name = dict(enumerate(self.problem.conditions)) # variables used to check that all conditions are sampled self._is_conditions_ready = { @@ -26,26 +33,61 @@ def __init__(self, problem): @property def full(self): + """ + Return True if all conditions are ready. + """ + return all(self._is_conditions_ready.values()) @full.setter def full(self, value): + """ + Set the full property of the collector. Admit only boolean values. + + :param bool value: The value to set the full property to. + """ check_consistency(value, bool) self._full = value @property def data_collections(self): + """ + Return the data collections, created by combining together all condition + in the problem. + + :return: The data collections. + :rtype: dict + """ + return self._data_collections @property def problem(self): + """ + Property that return the problem connected to the collector. + + :return: The problem connected to the collector. + :rtype: AbstractProblem + """ return self._problem @problem.setter def problem(self, value): + """ + Return the problem connected to the collector. + + return: The problem connected to the collector. + rtype: AbstractProblem + """ + self._problem = value def store_fixed_data(self): + """ + Store inside data collections the fixed data of the problem. These comes + from the conditions that do not require sampling. + """ + # loop over all conditions for condition_name, condition in self.problem.conditions.items(): # if the condition is not ready and domain is not attribute @@ -66,7 +108,8 @@ def store_fixed_data(self): def store_sample_domains(self): """ - # TODO: Add docstring + Store inside data collections the sampled data of the problem. These + comes from the conditions that require sampling. """ for condition_name in self.problem.conditions: condition = self.problem.conditions[condition_name] diff --git a/pina/data/data_module.py b/pina/data/data_module.py index 8157ea489..f68bbc70f 100644 --- a/pina/data/data_module.py +++ b/pina/data/data_module.py @@ -1,4 +1,9 @@ -import logging +""" +This module contains the PinaDataModule class, which extends the +LightningDataModule class to allow proper creation and management of +different types of Datasets defined in PINA. +""" + import warnings from lightning.pytorch import LightningDataModule import torch @@ -58,6 +63,10 @@ def __next__(self): class Collator: + """ + Class used to collate the batch + """ + def __init__(self, max_conditions_lengths, dataset=None): self.max_conditions_lengths = max_conditions_lengths self.callable_function = ( @@ -123,6 +132,10 @@ def __call__(self, batch): class PinaSampler: + """ + Class used to create the sampler instance. + """ + def __new__(cls, dataset, shuffle): if ( @@ -150,7 +163,6 @@ def __init__( train_size=0.7, test_size=0.2, val_size=0.1, - predict_size=0.0, batch_size=None, shuffle=True, repeat=False, @@ -169,9 +181,8 @@ def __init__( :type test_size: float :param val_size: Fraction or number of elements in the validation split. :type val_size: float - :param predict_size: Fraction or number of elements in the prediction split. - :type predict_size: float - :param batch_size: Batch size used for training. If None, the entire dataset is used per batch. + :param batch_size: Batch size used for training. If None, the entire + dataset is used per batch. :type batch_size: int or None :param shuffle: Whether to shuffle the dataset before splitting. :type shuffle: bool @@ -179,13 +190,13 @@ def __init__( :type repeat: bool :param automatic_batching: Whether to enable automatic batching. :type automatic_batching: bool - :param num_workers: Number of worker threads for data loading. Default 0 (serial loading) + :param num_workers: Number of worker threads for data loading. + Default 0 (serial loading) :type num_workers: int - :param pin_memory: Whether to use pinned memory for faster data transfer to GPU. (Default False) + :param pin_memory: Whether to use pinned memory for faster data + transfer to GPU. (Default False) :type pin_memory: bool """ - logging.debug("Start initialization of Pina DataModule") - logging.info("Start initialization of Pina DataModule") super().__init__() # Store fixed attributes @@ -216,7 +227,7 @@ def __init__( collector.store_sample_domains() # Check if the splits are correct - self._check_slit_sizes(train_size, test_size, val_size, predict_size) + self._check_slit_sizes(train_size, test_size, val_size) # Split input data into subsets splits_dict = {} @@ -235,11 +246,7 @@ def __init__( self.val_dataset = None else: self.val_dataloader = super().val_dataloader - if predict_size > 0: - splits_dict["predict"] = predict_size - self.predict_dataset = None - else: - self.predict_dataloader = super().predict_dataloader + self.collector_splits = self._create_splits(collector, splits_dict) self.transfer_batch_to_device = self._transfer_batch_to_device @@ -247,7 +254,6 @@ def setup(self, stage=None): """ Perform the splitting of the dataset """ - logging.debug("Start setup of Pina DataModule obj") if stage == "fit" or stage is None: self.train_dataset = PinaDatasetFactory( self.collector_splits["train"], @@ -270,18 +276,8 @@ def setup(self, stage=None): max_conditions_lengths=self.find_max_conditions_lengths("test"), automatic_batching=self.automatic_batching, ) - elif stage == "predict": - self.predict_dataset = PinaDatasetFactory( - self.collector_splits["predict"], - max_conditions_lengths=self.find_max_conditions_lengths( - "predict" - ), - automatic_batching=self.automatic_batching, - ) else: - raise ValueError( - "stage must be either 'fit' or 'test' or 'predict'." - ) + raise ValueError("stage must be either 'fit' or 'test'.") @staticmethod def _split_condition(condition_dict, splits_dict): @@ -336,7 +332,6 @@ def _apply_shuffle(condition_dict, len_data): # ----------- End auxiliary function ------------ - logging.debug("Dataset creation in PinaDataModule obj") split_names = list(splits_dict.keys()) dataset_dict = {name: {} for name in split_names} for ( @@ -355,11 +350,13 @@ def _apply_shuffle(condition_dict, len_data): def _create_dataloader(self, split, dataset): shuffle = self.shuffle if split == "train" else False # Suppress the warning about num_workers. - # In many cases, especially for PINNs, serial data loading can outperform parallel data loading. + # In many cases, especially for PINNs, + # serial data loading can outperform parallel data loading. warnings.filterwarnings( "ignore", message=( - r"The '(train|val|test)_dataloader' does not have many workers which may be a bottleneck." + "The '(train|val|test)_dataloader' does not have many workers " + "which may be a bottleneck." ), module="lightning.pytorch.trainer.connectors.data_connector", ) @@ -387,6 +384,14 @@ def _create_dataloader(self, split, dataset): return dataloader def find_max_conditions_lengths(self, split): + """ + Define the maximum length of the conditions. + + :param split: The splits of the dataset. + :type split: dict + :return: The maximum length of the conditions. + :rtype: dict + """ max_conditions_lengths = {} for k, v in self.collector_splits[split].items(): if self.batch_size is None: @@ -417,12 +422,6 @@ def test_dataloader(self): """ return self._create_dataloader("test", self.test_dataset) - def predict_dataloader(self): - """ - Create the prediction dataloader - """ - raise NotImplementedError("Predict dataloader not implemented") - @staticmethod def _transfer_batch_to_device_dummy(batch, device, dataloader_idx): return batch @@ -445,13 +444,13 @@ def _transfer_batch_to_device(self, batch, device, dataloader_idx): return batch @staticmethod - def _check_slit_sizes(train_size, test_size, val_size, predict_size): + def _check_slit_sizes(train_size, test_size, val_size): """ Check if the splits are correct """ - if train_size < 0 or test_size < 0 or val_size < 0 or predict_size < 0: + if train_size < 0 or test_size < 0 or val_size < 0: raise ValueError("The splits must be positive") - if abs(train_size + test_size + val_size + predict_size - 1) > 1e-6: + if abs(train_size + test_size + val_size - 1) > 1e-6: raise ValueError("The sum of the splits must be 1") @property diff --git a/pina/domain/__init__.py b/pina/domain/__init__.py index e5a327b4c..45aade718 100644 --- a/pina/domain/__init__.py +++ b/pina/domain/__init__.py @@ -1,3 +1,7 @@ +""" +This module contains the domain classes. +""" + __all__ = [ "DomainInterface", "CartesianDomain", diff --git a/pina/domain/cartesian.py b/pina/domain/cartesian.py index 9c312ea8b..0d96080b6 100644 --- a/pina/domain/cartesian.py +++ b/pina/domain/cartesian.py @@ -1,4 +1,5 @@ -import torch +"""Module for CartesianDomain class.""" + import torch from .domain_interface import DomainInterface @@ -46,7 +47,8 @@ def variables(self): def update(self, new_domain): """Adding new dimensions on the ``CartesianDomain`` - :param CartesianDomain new_domain: A new ``CartesianDomain`` object to merge + :param CartesianDomain new_domain: A new ``CartesianDomain`` object + to merge :Example: >>> spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) @@ -78,7 +80,7 @@ def _sample_range(self, n, mode, bounds): """ dim = bounds.shape[0] if mode in ["chebyshev", "grid"] and dim != 1: - raise RuntimeError("Something wrong in Span...") + raise RuntimeError("Something wrong in Cartesian...") if mode == "random": pts = torch.rand(size=(n, dim)) @@ -89,11 +91,10 @@ def _sample_range(self, n, mode, bounds): # elif mode == 'lh' or mode == 'latin': elif mode in ["lh", "latin"]: pts = torch_lhs(n, dim) + else: + raise ValueError("Invalid mode") - pts *= bounds[:, 1] - bounds[:, 0] - pts += bounds[:, 0] - - return pts + return pts * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0] def sample(self, n, mode="random", variables="all"): """Sample routine. @@ -121,7 +122,8 @@ def sample(self, n, mode="random", variables="all"): are sampled all together, and the final number of points .. warning:: - The extrema values of Span are always sampled only for ``grid`` mode. + The extrema values of Span are always sampled only for ``grid`` + mode. :Example: >>> spatial_domain = Span({'x': [0, 1], 'y': [0, 1]}) @@ -153,7 +155,7 @@ def _1d_sampler(n, mode, variables): """Sample independentely the variables and cross the results""" tmp = [] for variable in variables: - if variable in self.range_.keys(): + if variable in self.range_: bound = torch.tensor([self.range_[variable]]) pts_variable = self._sample_range(n, mode, bound) pts_variable = pts_variable.as_subclass(LabelTensor) @@ -166,7 +168,7 @@ def _1d_sampler(n, mode, variables): result = result.append(i, mode="cross") for variable in variables: - if variable in self.fixed_.keys(): + if variable in self.fixed_: value = self.fixed_[variable] pts_variable = torch.tensor([[value]]).repeat( result.shape[0], 1 @@ -201,7 +203,7 @@ def _Nd_sampler(n, mode, variables): result.labels = keys for variable in variables: - if variable in self.fixed_.keys(): + if variable in self.fixed_: value = self.fixed_[variable] pts_variable = torch.tensor([[value]]).repeat( result.shape[0], 1 @@ -224,7 +226,7 @@ def _single_points_sample(n, variables): """ tmp = [] for variable in variables: - if variable in self.fixed_.keys(): + if variable in self.fixed_: value = self.fixed_[variable] pts_variable = torch.tensor([[value]]).repeat(n, 1) pts_variable = pts_variable.as_subclass(LabelTensor) @@ -244,15 +246,14 @@ def _single_points_sample(n, variables): if self.fixed_ and (not self.range_): return _single_points_sample(n, variables) - if isinstance(variables, str) and variables in self.fixed_.keys(): + if isinstance(variables, str) and variables in self.fixed_: return _single_points_sample(n, variables) if mode in ["grid", "chebyshev"]: return _1d_sampler(n, mode, variables).extract(variables) - elif mode in ["random", "lh", "latin"]: + if mode in ["random", "lh", "latin"]: return _Nd_sampler(n, mode, variables).extract(variables) - else: - raise ValueError(f"mode={mode} is not valid.") + raise ValueError(f"mode={mode} is not valid.") def is_inside(self, point, check_border=False): """Check if a point is inside the ellipsoid. diff --git a/pina/domain/difference_domain.py b/pina/domain/difference_domain.py index 4015a3860..fc5056396 100644 --- a/pina/domain/difference_domain.py +++ b/pina/domain/difference_domain.py @@ -6,10 +6,12 @@ class Difference(OperationInterface): + """ + PINA implementation of Difference of Domains. + """ def __init__(self, geometries): r""" - PINA implementation of Difference of Domains. Given two sets :math:`A` and :math:`B` then the domain difference is defined as: @@ -41,7 +43,8 @@ def is_inside(self, point, check_border=False): :param point: Point to be checked. :type point: torch.Tensor :param bool check_border: If ``True``, the border is considered inside. - :return: ``True`` if the point is inside the Exclusion domain, ``False`` otherwise. + :return: ``True`` if the point is inside the Exclusion domain, + ``False`` otherwise. :rtype: bool """ for geometry in self.geometries[1:]: @@ -54,7 +57,8 @@ def sample(self, n, mode="random", variables="all"): Sample routine for ``Difference`` domain. :param int n: Number of points to sample in the shape. - :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param str mode: Mode for sampling, defaults to ``random``. Available + modes include: ``random``. :param variables: Variables to be sampled, defaults to ``all``. :type variables: str | list[str] :return: Returns ``LabelTensor`` of n sampled points. diff --git a/pina/domain/domain_interface.py b/pina/domain/domain_interface.py index 265b64fa9..2db6fe5d3 100644 --- a/pina/domain/domain_interface.py +++ b/pina/domain/domain_interface.py @@ -17,7 +17,6 @@ def sample_modes(self): """ Abstract method returing available samples modes for the Domain. """ - pass @property @abstractmethod @@ -25,7 +24,6 @@ def variables(self): """ Abstract method returing Domain variables. """ - pass @sample_modes.setter def sample_modes(self, values): @@ -48,7 +46,6 @@ def sample(self): Abstract method for sampling a point from the location. To be implemented in the child class. """ - pass @abstractmethod def is_inside(self, point, check_border=False): @@ -61,4 +58,3 @@ def is_inside(self, point, check_border=False): of the location is considered checked to be considered inside or not. Defaults to ``False``. """ - pass diff --git a/pina/domain/ellipsoid.py b/pina/domain/ellipsoid.py index 5d466743b..120cbf6b1 100644 --- a/pina/domain/ellipsoid.py +++ b/pina/domain/ellipsoid.py @@ -1,5 +1,8 @@ -import torch +""" +Module for the Ellipsoid domain. +""" +import torch from .domain_interface import DomainInterface from ..label_tensor import LabelTensor from ..utils import check_consistency @@ -113,7 +116,7 @@ def is_inside(self, point, check_border=False): tmp = torch.tensor(list_dict_vals, dtype=torch.float) centers = LabelTensor(tmp.reshape(1, -1), self.variables) - if not all([i in ax_sq.labels for i in point.labels]): + if not all(i in ax_sq.labels for i in point.labels): raise ValueError( "point labels different from constructor" f" dictionary labels. Got {point.labels}," @@ -202,7 +205,8 @@ def sample(self, n, mode="random", variables="all"): """Sample routine. :param int n: Number of points to sample in the shape. - :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param str mode: Mode for sampling, defaults to ``random``. + Available modes include: ``random``. :param variables: Variables to be sampled, defaults to ``all``. :type variables: str | list[str] :return: Returns ``LabelTensor`` of n sampled points. @@ -242,7 +246,7 @@ def _Nd_sampler(n, mode, variables): result.labels = keys for variable in variables: - if variable in self.fixed_.keys(): + if variable in self.fixed_: value = self.fixed_[variable] pts_variable = torch.tensor([[value]]).repeat( result.shape[0], 1 @@ -265,7 +269,7 @@ def _single_points_sample(n, variables): """ tmp = [] for variable in variables: - if variable in self.fixed_.keys(): + if variable in self.fixed_: value = self.fixed_[variable] pts_variable = torch.tensor([[value]]).repeat(n, 1) pts_variable = pts_variable.as_subclass(LabelTensor) @@ -288,5 +292,5 @@ def _single_points_sample(n, variables): if mode in self.sample_modes: return _Nd_sampler(n, mode, variables).extract(variables) - else: - raise NotImplementedError(f"mode={mode} is not implemented.") + + raise NotImplementedError(f"mode={mode} is not implemented.") diff --git a/pina/domain/exclusion_domain.py b/pina/domain/exclusion_domain.py index 6b04b0cf0..0d25d7378 100644 --- a/pina/domain/exclusion_domain.py +++ b/pina/domain/exclusion_domain.py @@ -1,21 +1,24 @@ """Module for Exclusion class.""" +import random import torch from ..label_tensor import LabelTensor -import random from .operation_interface import OperationInterface class Exclusion(OperationInterface): + """ + PINA implementation of Exclusion of Domains. + """ def __init__(self, geometries): r""" - PINA implementation of Exclusion of Domains. Given two sets :math:`A` and :math:`B` then the domain difference is defined as: .. math:: - A \setminus B = \{x \mid x \in A \land x \in B \land x \not\in (A \lor B)\}, + A \setminus B = \{x \mid x \in A \land x \in B \land + x \not\in(A \lor B)\}, with :math:`x` a point in :math:`\mathbb{R}^N` and :math:`N` the dimension of the geometry space. @@ -39,7 +42,8 @@ def is_inside(self, point, check_border=False): :param point: Point to be checked. :type point: torch.Tensor :param bool check_border: If ``True``, the border is considered inside. - :return: ``True`` if the point is inside the Exclusion domain, ``False`` otherwise. + :return: ``True`` if the point is inside the Exclusion domain, + ``False`` otherwise. :rtype: bool """ flag = 0 @@ -53,7 +57,8 @@ def sample(self, n, mode="random", variables="all"): Sample routine for ``Exclusion`` domain. :param int n: Number of points to sample in the shape. - :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param str mode: Mode for sampling, defaults to ``random``. Available + modes include: ``random``. :param variables: Variables to be sampled, defaults to ``all``. :type variables: str | list[str] :return: Returns ``LabelTensor`` of n sampled points. @@ -83,7 +88,8 @@ def sample(self, n, mode="random", variables="all"): sampled = [] - # calculate the number of points to sample for each geometry and the remainder. + # calculate the number of points to sample for each geometry and the + # remainder. remainder = n % len(self.geometries) num_points = n // len(self.geometries) diff --git a/pina/domain/intersection_domain.py b/pina/domain/intersection_domain.py index 906595f54..69388b002 100644 --- a/pina/domain/intersection_domain.py +++ b/pina/domain/intersection_domain.py @@ -1,16 +1,18 @@ """Module for Intersection class.""" +import random import torch from ..label_tensor import LabelTensor from .operation_interface import OperationInterface -import random class Intersection(OperationInterface): + """ + PINA implementation of Intersection of Domains. + """ def __init__(self, geometries): r""" - PINA implementation of Intersection of Domains. Given two sets :math:`A` and :math:`B` then the domain difference is defined as: @@ -41,7 +43,8 @@ def is_inside(self, point, check_border=False): :param point: Point to be checked. :type point: torch.Tensor :param bool check_border: If ``True``, the border is considered inside. - :return: ``True`` if the point is inside the Intersection domain, ``False`` otherwise. + :return: ``True`` if the point is inside the Intersection domain, + ``False`` otherwise. :rtype: bool """ flag = 0 @@ -55,7 +58,8 @@ def sample(self, n, mode="random", variables="all"): Sample routine for ``Intersection`` domain. :param int n: Number of points to sample in the shape. - :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param str mode: Mode for sampling, defaults to ``random``. Available + modes include: ``random``. :param variables: Variables to be sampled, defaults to ``all``. :type variables: str | list[str] :return: Returns ``LabelTensor`` of n sampled points. @@ -85,7 +89,8 @@ def sample(self, n, mode="random", variables="all"): sampled = [] - # calculate the number of points to sample for each geometry and the remainder. + # calculate the number of points to sample for each geometry and the + # remainder. remainder = n % len(self.geometries) num_points = n // len(self.geometries) diff --git a/pina/domain/operation_interface.py b/pina/domain/operation_interface.py index 7023eb9b9..5a5c3c169 100644 --- a/pina/domain/operation_interface.py +++ b/pina/domain/operation_interface.py @@ -1,15 +1,18 @@ """Module for OperationInterface class.""" +from abc import ABCMeta, abstractmethod from .domain_interface import DomainInterface from ..utils import check_consistency -from abc import ABCMeta, abstractmethod class OperationInterface(DomainInterface, metaclass=ABCMeta): + """ + Abstract class for set domains operations. + """ def __init__(self, geometries): """ - Abstract set operation class. Any geometry operation entity must inherit from this class. + Any geometry operation entity must inherit from this class. :param list geometries: A list of geometries from ``pina.geometry`` such as ``EllipsoidDomain`` or ``CartesianDomain``. @@ -57,10 +60,10 @@ def is_inside(self, point, check_border=False): :param point: Point to be checked. :type point: torch.Tensor :param bool check_border: If ``True``, the border is considered inside. - :return: ``True`` if the point is inside the Intersection domain, ``False`` otherwise. + :return: ``True`` if the point is inside the Intersection domain, + ``False`` otherwise. :rtype: bool """ - pass def _check_dimensions(self, geometries): """Check if the dimensions of the geometries are consistent. @@ -71,5 +74,5 @@ def _check_dimensions(self, geometries): for geometry in geometries: if geometry.variables != geometries[0].variables: raise NotImplementedError( - f"The geometries need to have same dimensions and labels." + "The geometries need to have same dimensions and labels." ) diff --git a/pina/domain/simplex.py b/pina/domain/simplex.py index 1e706c603..7c1deee6f 100644 --- a/pina/domain/simplex.py +++ b/pina/domain/simplex.py @@ -1,6 +1,10 @@ +""" +Module for Simplex Domain. +""" + import torch from .domain_interface import DomainInterface -from pina.domain import CartesianDomain +from .cartesian import CartesianDomain from ..label_tensor import LabelTensor from ..utils import check_consistency @@ -51,23 +55,20 @@ def __init__(self, simplex_matrix, sample_surface=False): # check consistency of labels matrix_labels = simplex_matrix[0].labels if not all(vertex.labels == matrix_labels for vertex in simplex_matrix): - raise ValueError(f"Labels don't match.") + raise ValueError("Labels don't match.") # check consistency dimensions dim_simplex = len(matrix_labels) if len(simplex_matrix) != dim_simplex + 1: raise ValueError( - "An n-dimensional simplex is composed by n + 1 tensors of dimension n." + "An n-dimensional simplex is composed by n + 1 tensors of " + "dimension n." ) # creating vertices matrix self._vertices_matrix = LabelTensor.vstack(simplex_matrix) # creating basis vectors for simplex - # self._vectors_shifted = ( - # (self._vertices_matrix.T)[:, :-1] - (self._vertices_matrix.T)[:, None, -1] - # ) ### TODO: Remove after checking - vert = self._vertices_matrix self._vectors_shifted = (vert[:-1] - vert[-1]).T @@ -92,7 +93,7 @@ def _build_cartesian(self, vertices): """ span_dict = {} - for i, coord in enumerate(self.variables): + for coord in self.variables: sorted_vertices = torch.sort(vertices[coord].tensor.squeeze()) # respective coord bounded by the lowest and highest values span_dict[coord] = [ @@ -133,6 +134,7 @@ def is_inside(self, point, check_border=False): point_shift = point_shift.tensor.reshape(-1, 1) # compute barycentric coordinates + lambda_ = torch.linalg.solve( self._vectors_shifted * 1.0, point_shift * 1.0 ) @@ -222,7 +224,8 @@ def sample(self, n, mode="random", variables="all"): Sample n points from Simplex domain. :param int n: Number of points to sample in the shape. - :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param str mode: Mode for sampling, defaults to ``random``. Available + modes include: ``random``. :param variables: Variables to be sampled, defaults to ``all``. :type variables: str | list[str] :return: Returns ``LabelTensor`` of n sampled points. diff --git a/pina/domain/union_domain.py b/pina/domain/union_domain.py index 813cc74be..ecf6c63c2 100644 --- a/pina/domain/union_domain.py +++ b/pina/domain/union_domain.py @@ -1,12 +1,15 @@ """Module for Union class.""" +import random import torch from .operation_interface import OperationInterface from ..label_tensor import LabelTensor -import random class Union(OperationInterface): + """ + Union of Domains. + """ def __init__(self, geometries): r""" @@ -36,7 +39,7 @@ def __init__(self, geometries): @property def sample_modes(self): self.sample_modes = list( - set([geom.sample_modes for geom in self.geometries]) + set(geom.sample_modes for geom in self.geometries) ) def is_inside(self, point, check_border=False): @@ -61,7 +64,8 @@ def sample(self, n, mode="random", variables="all"): Sample routine for ``Union`` domain. :param int n: Number of points to sample in the shape. - :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param str mode: Mode for sampling, defaults to ``random``. Available + modes include: ``random``. :param variables: Variables to be sampled, defaults to ``all``. :type variables: str | list[str] :return: Returns ``LabelTensor`` of n sampled points. @@ -85,7 +89,8 @@ def sample(self, n, mode="random", variables="all"): """ sampled_points = [] - # calculate the number of points to sample for each geometry and the remainder + # calculate the number of points to sample for each geometry and the + # remainder remainder = n % len(self.geometries) num_points = n // len(self.geometries) @@ -103,7 +108,8 @@ def sample(self, n, mode="random", variables="all"): num_points + int(i < remainder), mode, variables ) ) - # in case number of sampled points is smaller than the number of geometries + # in case number of sampled points is smaller than the number of + # geometries if len(sampled_points) >= n: break diff --git a/pina/equation/__init__.py b/pina/equation/__init__.py index d9961b486..87168146b 100644 --- a/pina/equation/__init__.py +++ b/pina/equation/__init__.py @@ -1,3 +1,7 @@ +""" +Module for defining equations and system of equations. +""" + __all__ = [ "SystemEquation", "Equation", diff --git a/pina/equation/equation.py b/pina/equation/equation.py index 6ab28cbd0..7306ea8ca 100644 --- a/pina/equation/equation.py +++ b/pina/equation/equation.py @@ -4,10 +4,12 @@ class Equation(EquationInterface): + """ + Equation class for specifing any equation in PINA. + """ def __init__(self, equation): """ - Equation class for specifing any equation in PINA. Each ``equation`` passed to a ``Condition`` object must be an ``Equation`` or ``SystemEquation``. diff --git a/pina/equation/equation_factory.py b/pina/equation/equation_factory.py index 689465932..cc271092f 100644 --- a/pina/equation/equation_factory.py +++ b/pina/equation/equation_factory.py @@ -1,14 +1,17 @@ -"""Module""" +"""Module for defining different equations.""" from .equation import Equation from ..operator import grad, div, laplacian class FixedValue(Equation): + """ + Fixed Value Equation class. + """ def __init__(self, value, components=None): """ - Fixed Value Equation class. This class can be + This class can be used to enforced a fixed value for a specific condition, e.g. Dirichlet Boundary conditions. @@ -29,11 +32,13 @@ def equation(input_, output_): class FixedGradient(Equation): + """ + Fixed Gradient Equation class. + """ def __init__(self, value, components=None, d=None): """ - Fixed Gradient Equation class. This class can be - used to enforced a fixed gradient for a specific + This class can beused to enforced a fixed gradient for a specific condition. :param float value: Value to be mantained fixed. @@ -55,11 +60,13 @@ def equation(input_, output_): class FixedFlux(Equation): + """ + Fixed Flux Equation class. + """ def __init__(self, value, components=None, d=None): """ - Fixed Flux Equation class. This class can be - used to enforced a fixed flux for a specific + This class can be used to enforced a fixed flux for a specific condition. :param float value: Value to be mantained fixed. @@ -81,10 +88,13 @@ def equation(input_, output_): class Laplace(Equation): + """ + Laplace Equation class. + """ def __init__(self, components=None, d=None): """ - Laplace Equation class. This class can be + This class can be used to enforced a Laplace equation for a specific condition (force term set to zero). diff --git a/pina/equation/equation_interface.py b/pina/equation/equation_interface.py index 982b431e6..6c25418b1 100644 --- a/pina/equation/equation_interface.py +++ b/pina/equation/equation_interface.py @@ -19,10 +19,10 @@ def residual(self, input_, output_, params_): Residual computation of the equation. :param LabelTensor input_: Input points to evaluate the equation. - :param LabelTensor output_: Output vectors given by my model (e.g., a ``FeedForward`` model). + :param LabelTensor output_: Output vectors given by my model (e.g., + a ``FeedForward`` model). :param dict params_: Dictionary of unknown parameters, eventually related to an ``InverseProblem``. :return: The residual evaluation of the specified equation. :rtype: LabelTensor """ - pass diff --git a/pina/equation/system_equation.py b/pina/equation/system_equation.py index 2ed54ae24..4200199b9 100644 --- a/pina/equation/system_equation.py +++ b/pina/equation/system_equation.py @@ -1,16 +1,19 @@ """Module for SystemEquation.""" import torch +from .equation_interface import EquationInterface from .equation import Equation from ..utils import check_consistency -class SystemEquation(Equation): +class SystemEquation(EquationInterface): + """ + System of Equation class for specifing any system + of equations in PINA. + """ def __init__(self, list_equation, reduction=None): """ - System of Equation class for specifing any system - of equations in PINA. Each ``equation`` passed to a ``Condition`` object must be an ``Equation`` or ``SystemEquation``. A ``SystemEquation`` is specified by a list of @@ -37,7 +40,7 @@ def __init__(self, list_equation, reduction=None): self.reduction = torch.mean elif reduction == "sum": self.reduction = torch.sum - elif (reduction == None) or callable(reduction): + elif (reduction is None) or callable(reduction): self.reduction = reduction else: raise NotImplementedError( diff --git a/pina/geometry/__init__.py b/pina/geometry/__init__.py index e627b2972..9ed4c0145 100644 --- a/pina/geometry/__init__.py +++ b/pina/geometry/__init__.py @@ -1,3 +1,8 @@ +""" +Old module for geometry related classes and functions. +Deprecated in 0.2.0. +""" + import warnings from ..domain import * diff --git a/pina/graph.py b/pina/graph.py index 7d1576902..39d4bcfd5 100644 --- a/pina/graph.py +++ b/pina/graph.py @@ -5,7 +5,7 @@ import torch from torch_geometric.data import Data, Batch from torch_geometric.utils import to_undirected -from . import LabelTensor +from .label_tensor import LabelTensor from .utils import check_consistency, is_function diff --git a/pina/label_tensor.py b/pina/label_tensor.py index 79313de51..cce141c12 100644 --- a/pina/label_tensor.py +++ b/pina/label_tensor.py @@ -17,9 +17,15 @@ def __new__(cls, x, labels, *args, **kwargs): @property def tensor(self): + """ + Give the tensor part of the LabelTensor. + + :return: tensor part of the LabelTensor + :rtype: torch.Tensor + """ return self.as_subclass(Tensor) - def __init__(self, x, labels, **kwargs): + def __init__(self, x, labels): """ Construct a `LabelTensor` by passing a dict of the labels @@ -43,8 +49,9 @@ def labels(self): :return: labels of self :rtype: list """ - if self.ndim - 1 in self._labels.keys(): + if self.ndim - 1 in self._labels: return self._labels[self.ndim - 1]["dof"] + return None @property def full_labels(self): @@ -55,11 +62,11 @@ def full_labels(self): """ to_return_dict = {} shape_tensor = self.shape - for i in range(len(shape_tensor)): - if i in self._labels.keys(): + for i, value in enumerate(shape_tensor): + if i in self._labels: to_return_dict[i] = self._labels[i] else: - to_return_dict[i] = {"dof": range(shape_tensor[i]), "name": i} + to_return_dict[i] = {"dof": range(value), "name": i} return to_return_dict @property @@ -186,7 +193,7 @@ def get_label_indices(dim_labels, labels_te): labels = copy(self._labels) # Get the dimension names and the respective dimension index - dim_names = {labels[dim]["name"]: dim for dim in labels.keys()} + dim_names = {labels[dim]["name"]: dim for dim in labels} ndim = super().ndim tensor = self.tensor.as_subclass(torch.Tensor) @@ -259,7 +266,7 @@ def cat(tensors, dim=0): # Check label consistency across tensors, excluding the # concatenation dimension - for key in tensors_labels[0].keys(): + for key in tensors_labels[0]: if key != dim: if any( tensors_labels[i][key] != tensors_labels[0][key] @@ -325,6 +332,12 @@ def requires_grad_(self, mode=True): @property def dtype(self): + """ + Give the dtype of the tensor. + + :return: dtype of the tensor + :rtype: torch.dtype + """ return super().dtype def to(self, *args, **kwargs): @@ -350,12 +363,31 @@ def clone(self, *args, **kwargs): return out def append(self, tensor, mode="std"): + """ + Appends a given tensor to the current tensor along the last dimension. + + This method allows for two types of appending operations: + 1. **Standard append** ("std"): Concatenates the tensors along the + last dimension. + 2. **Cross append** ("cross"): Repeats the current tensor and the new + tensor in a cross-product manner, then concatenates them. + + :param LabelTensor tensor: The tensor to append. + :param mode: The append mode to use. Defaults to "std". + :type mode: str, optional + :return: The new tensor obtained by appending the input tensor + (either 'std' or 'cross'). + :rtype: LabelTensor + + :raises ValueError: If the mode is not "std" or "cross". + """ if mode == "std": # Call cat on last dimension new_label_tensor = LabelTensor.cat( [self, tensor], dim=self.ndim - 1 ) - elif mode == "cross": + return new_label_tensor + if mode == "cross": # Crete tensor and call cat on last dimension tensor1 = self tensor2 = tensor @@ -368,9 +400,8 @@ def append(self, tensor, mode="std"): new_label_tensor = LabelTensor.cat( [tensor1, tensor2], dim=self.ndim - 1 ) - else: - raise ValueError('mode must be either "std" or "cross"') - return new_label_tensor + return new_label_tensor + raise ValueError('mode must be either "std" or "cross"') @staticmethod def vstack(label_tensors): @@ -461,7 +492,7 @@ def __getitem__(self, index): # Update labels based on the index offset = 0 for dim, idx in enumerate(index): - if dim in self.stored_labels.keys(): + if dim in self.stored_labels: if isinstance(idx, int): selected_tensor = selected_tensor.unsqueeze(dim) if idx != slice(None): @@ -508,7 +539,7 @@ def arg_sort(lst): indexer = [slice(None)] * self.ndim # Assigned the sorted index to the specified dimension indexer[dim] = sorted_index - return self.__getitem__(tuple(indexer)) + return self[tuple(indexer)] def __deepcopy__(self, memo): """ @@ -539,7 +570,7 @@ def permute(self, *dims): # Update lables labels = self._labels keys_list = list(*dims) - labels = {keys_list.index(k): labels[k] for k in labels.keys()} + labels = {keys_list.index(k): v for k, v in labels.items()} # Assign labels to the new tensor tensor._labels = labels diff --git a/pina/loss/__init__.py b/pina/loss/__init__.py index a4d7f69d5..178b84782 100644 --- a/pina/loss/__init__.py +++ b/pina/loss/__init__.py @@ -1,3 +1,7 @@ +""" +Module for loss functions and weighting functions. +""" + __all__ = [ "LossInterface", "LpLoss", diff --git a/pina/loss/loss_interface.py b/pina/loss/loss_interface.py index b6b4dc1cf..227e2a6f6 100644 --- a/pina/loss/loss_interface.py +++ b/pina/loss/loss_interface.py @@ -18,8 +18,8 @@ def __init__(self, reduction="mean"): will be applied, ``mean``: the sum of the output will be divided by the number of elements in the output, ``sum``: the output will be summed. Note: ``size_average`` and ``reduce`` are in the - process of being deprecated, and in the meantime, specifying either of - those two args will override ``reduction``. Default: ``mean``. + process of being deprecated, and in the meantime, specifying either + of those two args will override ``reduction``. Default: ``mean``. """ super().__init__(reduction=reduction, size_average=None, reduce=None) @@ -32,7 +32,6 @@ def forward(self, input, target): :return: Loss evaluation. :rtype: torch.Tensor """ - pass def _reduction(self, loss): """Simple helper function to check reduction @@ -42,8 +41,8 @@ def _reduction(self, loss): will be applied, ``mean``: the sum of the output will be divided by the number of elements in the output, ``sum``: the output will be summed. Note: ``size_average`` and ``reduce`` are in the - process of being deprecated, and in the meantime, specifying either of - those two args will override ``reduction``. Default: ``mean``. + process of being deprecated, and in the meantime, specifying either + of those two args will override ``reduction``. Default: ``mean``. :type reduction: str :param loss: Loss tensor for each element. :type loss: torch.Tensor diff --git a/pina/loss/lp_loss.py b/pina/loss/lp_loss.py index b39b16e5f..03f447350 100644 --- a/pina/loss/lp_loss.py +++ b/pina/loss/lp_loss.py @@ -23,7 +23,8 @@ class LpLoss(LossInterface): .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad - l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }{[\sum_{i=1}^{D}|y_n^i|^p]}, + l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] } + {[\sum_{i=1}^{D}|y_n^i|^p]}, where :math:`N` is the batch size. If ``reduction`` is not ``none`` (default ``mean``), then: @@ -38,16 +39,19 @@ class LpLoss(LossInterface): :math:`x` and :math:`y` are tensors of arbitrary shapes with a total of :math:`n` elements each. - The sum operation still operates over all the elements, and divides by :math:`n`. + The sum operation still operates over all the elements, and divides by + :math:`n`. - The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``. + The division by :math:`n` can be avoided if one sets ``reduction`` to + ``sum``. """ def __init__(self, p=2, reduction="mean", relative=False): """ :param int p: Degree of Lp norm. It specifies the type of norm to be calculated. See `list of possible orders in torch linalg - `_ to + `torch.linalg.norm `_ for possible degrees. Default 2 (euclidean norm). :param str reduction: Specifies the reduction to apply to the output: ``none`` | ``mean`` | ``sum``. ``none``: no reduction diff --git a/pina/loss/power_loss.py b/pina/loss/power_loss.py index 09bf94a09..695ef4d32 100644 --- a/pina/loss/power_loss.py +++ b/pina/loss/power_loss.py @@ -17,13 +17,15 @@ class PowerLoss(LossInterface): .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad - l_n = \frac{1}{D}\left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right], + l_n = \frac{1}{D}\left[\sum_{i=1}^{D} + \left| x_n^i - y_n^i \right|^p\right], If ``'relative'`` is set to true: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad - l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }{\sum_{i=1}^{D}|y_n^i|^p}, + l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p } + {\sum_{i=1}^{D}|y_n^i|^p}, where :math:`N` is the batch size. If ``reduction`` is not ``none`` (default ``mean``), then: @@ -38,16 +40,19 @@ class PowerLoss(LossInterface): :math:`x` and :math:`y` are tensors of arbitrary shapes with a total of :math:`n` elements each. - The sum operation still operates over all the elements, and divides by :math:`n`. + The sum operation still operates over all the elements, and divides by + :math:`n`. - The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``. + The division by :math:`n` can be avoided if one sets ``reduction`` to + ``sum``. """ def __init__(self, p=2, reduction="mean", relative=False): """ :param int p: Degree of Lp norm. It specifies the type of norm to be calculated. See `list of possible orders in torch linalg - `_ to + `_ to see the possible degrees. Default 2 (euclidean norm). :param str reduction: Specifies the reduction to apply to the output: ``none`` | ``mean`` | ``sum``. When ``none``: no reduction diff --git a/pina/loss/weighting_interface.py b/pina/loss/weighting_interface.py index d8ce4b525..56a17b8ef 100644 --- a/pina/loss/weighting_interface.py +++ b/pina/loss/weighting_interface.py @@ -20,4 +20,3 @@ def aggregate(self, losses): :return: The losses aggregation. It should be a scalar Tensor. :rtype: torch.Tensor """ - pass diff --git a/pina/model/__init__.py b/pina/model/__init__.py index 502e15d81..ac36fcb16 100644 --- a/pina/model/__init__.py +++ b/pina/model/__init__.py @@ -1,3 +1,7 @@ +""" +Module containing the neural network models. +""" + __all__ = [ "FeedForward", "ResidualFeedForward", diff --git a/pina/model/average_neural_operator.py b/pina/model/average_neural_operator.py index c0ddb12a2..1b1bcfe02 100644 --- a/pina/model/average_neural_operator.py +++ b/pina/model/average_neural_operator.py @@ -1,8 +1,8 @@ """Module Averaging Neural Operator.""" import torch -from torch import nn, cat -from .block import AVNOBlock +from torch import nn +from .block.average_neural_operator_block import AVNOBlock from .kernel_neural_operator import KernelNeuralOperator from ..utils import check_consistency @@ -110,9 +110,9 @@ def forward(self, x): """ points_tmp = x.extract(self.coordinates_indices) new_batch = x.extract(self.field_indices) - new_batch = cat((new_batch, points_tmp), dim=-1) + new_batch = torch.cat((new_batch, points_tmp), dim=-1) new_batch = self._lifting_operator(new_batch) new_batch = self._integral_kernels(new_batch) - new_batch = cat((new_batch, points_tmp), dim=-1) + new_batch = torch.cat((new_batch, points_tmp), dim=-1) new_batch = self._projection_operator(new_batch) return new_batch diff --git a/pina/model/block/__init__.py b/pina/model/block/__init__.py index 64fb150ad..9b6bac309 100644 --- a/pina/model/block/__init__.py +++ b/pina/model/block/__init__.py @@ -1,3 +1,7 @@ +""" +Module containing the building blocks for models. +""" + __all__ = [ "ContinuousConvBlock", "ResidualBlock", diff --git a/pina/model/block/average_neural_operator_block.py b/pina/model/block/average_neural_operator_block.py index fd682a5ce..010e80bc7 100644 --- a/pina/model/block/average_neural_operator_block.py +++ b/pina/model/block/average_neural_operator_block.py @@ -1,6 +1,7 @@ """Module for Averaging Neural Operator Layer class.""" -from torch import nn, mean +import torch +from torch import nn from ...utils import check_consistency @@ -64,4 +65,4 @@ def forward(self, x): :return: The output tensor obtained from Average Neural Operator Block. :rtype: torch.Tensor """ - return self._func(self._nn(x) + mean(x, dim=1, keepdim=True)) + return self._func(self._nn(x) + torch.mean(x, dim=1, keepdim=True)) diff --git a/pina/model/block/convolution.py b/pina/model/block/convolution.py index c6ae4e240..1849399fe 100644 --- a/pina/model/block/convolution.py +++ b/pina/model/block/convolution.py @@ -75,34 +75,29 @@ def __init__( """ super().__init__() - if isinstance(input_numb_field, int): - self._input_numb_field = input_numb_field - else: + if not isinstance(input_numb_field, int): raise ValueError("input_numb_field must be int.") + self._input_numb_field = input_numb_field - if isinstance(output_numb_field, int): - self._output_numb_field = output_numb_field - else: + if not isinstance(output_numb_field, int): raise ValueError("input_numb_field must be int.") + self._output_numb_field = output_numb_field - if isinstance(filter_dim, (tuple, list)): - vect = filter_dim - else: + if not isinstance(filter_dim, (tuple, list)): raise ValueError("filter_dim must be tuple or list.") + vect = filter_dim vect = torch.tensor(vect) self.register_buffer("_dim", vect, persistent=False) - if isinstance(stride, dict): - self._stride = Stride(stride) - else: + if not isinstance(stride, dict): raise ValueError("stride must be dictionary.") + self._stride = Stride(stride) self._net = model - if isinstance(optimize, bool): - self._optimize = optimize - else: + if not isinstance(optimize, bool): raise ValueError("optimize must be bool.") + self._optimize = optimize # choosing how to initialize based on optimization if self._optimize: @@ -119,13 +114,18 @@ def __init__( if no_overlap: raise NotImplementedError - self.transpose = self.transpose_no_overlap - else: - self.transpose = self.transpose_overlap + + self.transpose = self.transpose_overlap class DefaultKernel(torch.nn.Module): + """ + TODO + """ def __init__(self, input_dim, output_dim): + """ + TODO + """ super().__init__() assert isinstance(input_dim, int) assert isinstance(output_dim, int) @@ -138,44 +138,66 @@ def __init__(self, input_dim, output_dim): ) def forward(self, x): + """ + TODO + """ return self._model(x) @property def net(self): + """ + TODO + """ return self._net @property def stride(self): + """ + TODO + """ return self._stride @property def filter_dim(self): + """ + TODO + """ return self._dim @property def input_numb_field(self): + """ + TODO + """ return self._input_numb_field @property def output_numb_field(self): + """ + TODO + """ return self._output_numb_field - @property @abstractmethod def forward(self, X): - pass + """ + TODO + """ - @property @abstractmethod def transpose_overlap(self, X): - pass + """ + TODO + """ - @property @abstractmethod def transpose_no_overlap(self, X): - pass + """ + TODO + """ - @property @abstractmethod - def _initialize_convolution(self, X, type): - pass + def _initialize_convolution(self, X, type_): + """ + TODO + """ diff --git a/pina/model/block/convolution_2d.py b/pina/model/block/convolution_2d.py index 4c085338e..68df175d3 100644 --- a/pina/model/block/convolution_2d.py +++ b/pina/model/block/convolution_2d.py @@ -1,9 +1,9 @@ """Module for Continuous Convolution class""" +import torch from .convolution import BaseContinuousConv from .utils_convolution import check_point, map_points_ from .integral import Integral -import torch class ContinuousConvBlock(BaseContinuousConv): @@ -27,8 +27,9 @@ class ContinuousConvBlock(BaseContinuousConv): .. seealso:: **Original reference**: Coscia, D., Meneghetti, L., Demo, N. et al. - *A continuous convolutional trainable filter for modelling unstructured data*. - Comput Mech 72, 253–265 (2023). DOI ``_ + *A continuous convolutional trainable filter for modelling + unstructured data*. Comput Mech 72, 253–265 (2023). + DOI ``_ """ @@ -45,7 +46,8 @@ def __init__( """ :param input_numb_field: Number of fields :math:`N_{in}` in the input. :type input_numb_field: int - :param output_numb_field: Number of fields :math:`N_{out}` in the output. + :param output_numb_field: Number of fields :math:`N_{out}` in the + output. :type output_numb_field: int :param filter_dim: Dimension of the filter. :type filter_dim: tuple(int) | list(int) @@ -134,6 +136,11 @@ def forward(self, x): # stride for continuous convolution overridden self._stride = self._stride._stride_discrete + # Define variables + self._index = None + self._grid = None + self._grid_transpose = None + def _spawn_networks(self, model): """ Private method to create a collection of kernels @@ -152,7 +159,7 @@ def _spawn_networks(self, model): else: if not isinstance(model, object): raise ValueError( - "Expected a python class inheriting" " from torch.nn.Module" + "Expected a python class inheriting from torch.nn.Module" ) for _ in range(self._input_numb_field * self._output_numb_field): @@ -271,7 +278,7 @@ def _make_grid_transpose(self, X): # save on tmp self._grid_transpose = tmp - def _make_grid(self, X, type): + def _make_grid(self, X, type_): """ Private method to create convolution grid. @@ -283,14 +290,15 @@ def _make_grid(self, X, type): """ # choose the type of convolution - if type == "forward": - return self._make_grid_forward(X) - elif type == "inverse": + if type_ == "forward": + self._make_grid_forward(X) + return + if type_ == "inverse": self._make_grid_transpose(X) - else: - raise TypeError + return + raise TypeError - def _initialize_convolution(self, X, type="forward"): + def _initialize_convolution(self, X, type_="forward"): """ Private method to intialize the convolution. The convolution is initialized by setting a grid and @@ -304,7 +312,7 @@ def _initialize_convolution(self, X, type="forward"): """ # variable for the convolution - self._make_grid(X, type) + self._make_grid(X, type_) # calculate the index self._find_index(X) @@ -321,7 +329,7 @@ def forward(self, X): # initialize convolution if self.training: # we choose what to do based on optimization - self._choose_initialization(X, type="forward") + self._choose_initialization(X, type_="forward") else: # we always initialize on testing self._initialize_convolution(X, "forward") @@ -383,12 +391,14 @@ def transpose_no_overlap(self, integrals, X): :type integral: torch.tensor :param X: Input data. Expect tensor of shape :math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size, - :math`N_{in}`is the number of input fields, :math:`M` the number of points + :math`N_{in}`is the number of input fields, :math:`M` the number of + points in the mesh, :math:`D` the dimension of the problem. :type X: torch.Tensor :return: Feed forward transpose convolution. Tensor of shape :math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size, - :math`N_{out}`is the number of input fields, :math:`M` the number of points + :math`N_{out}`is the number of input fields, :math:`M` the number of + points in the mesh, :math:`D` the dimension of the problem. :rtype: torch.Tensor @@ -399,7 +409,7 @@ def transpose_no_overlap(self, integrals, X): # initialize convolution if self.training: # we choose what to do based on optimization - self._choose_initialization(X, type="inverse") + self._choose_initialization(X, type_="inverse") else: # we always initialize on testing self._initialize_convolution(X, "inverse") @@ -466,12 +476,14 @@ def transpose_overlap(self, integrals, X): :type integral: torch.tensor :param X: Input data. Expect tensor of shape :math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size, - :math`N_{in}`is the number of input fields, :math:`M` the number of points + :math`N_{in}`is the number of input fields, :math:`M` the number of + points in the mesh, :math:`D` the dimension of the problem. :type X: torch.Tensor :return: Feed forward transpose convolution. Tensor of shape :math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size, - :math`N_{out}`is the number of input fields, :math:`M` the number of points + :math`N_{out}`is the number of input fields, :math:`M` the number of + points in the mesh, :math:`D` the dimension of the problem. :rtype: torch.Tensor @@ -481,7 +493,7 @@ def transpose_overlap(self, integrals, X): # initialize convolution if self.training: # we choose what to do based on optimization - self._choose_initialization(X, type="inverse") + self._choose_initialization(X, type_="inverse") else: # we always initialize on testing self._initialize_convolution(X, "inverse") @@ -491,7 +503,7 @@ def transpose_overlap(self, integrals, X): conv_transposed = self._grid_transpose.clone().detach() # list to iterate for calculating nn output - tmp = [i for i in range(self._output_numb_field)] + tmp = list(range(self._output_numb_field)) iterate_conv = [ item for item in tmp for _ in range(self._input_numb_field) ] diff --git a/pina/model/block/embedding.py b/pina/model/block/embedding.py index 77e340d32..270ca1d05 100644 --- a/pina/model/block/embedding.py +++ b/pina/model/block/embedding.py @@ -2,7 +2,6 @@ import torch from pina.utils import check_consistency -from typing import Union, Sequence class PeriodicBoundaryEmbedding(torch.nn.Module): @@ -18,8 +17,9 @@ class PeriodicBoundaryEmbedding(torch.nn.Module): u(\mathbf{x}) = u(\mathbf{x} + n \mathbf{L})\;\; \forall n\in\mathbb{N}. - The :meth:`PeriodicBoundaryEmbedding` augments the input such that the periodic conditons - is guarantee. The input is augmented by the following formula: + The :meth:`PeriodicBoundaryEmbedding` augments the input such that the + periodic conditonsis guarantee. The input is augmented by the following + formula: .. math:: \mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[1, @@ -135,13 +135,13 @@ def _get_vars(self, x, indeces): if isinstance(indeces[0], str): try: return x.extract(indeces) - except AttributeError: + except AttributeError as e: raise RuntimeError( "Not possible to extract input variables from tensor." " Ensure that the passed tensor is a LabelTensor or" " pass list of integers to extract variables. For" " more information refer to warning in the documentation." - ) + ) from e elif isinstance(indeces[0], int): return x[..., indeces] else: @@ -159,11 +159,14 @@ def period(self): class FourierFeatureEmbedding(torch.nn.Module): + """ + Fourier Feature Embedding class for encoding input features + using random Fourier features. + """ + def __init__(self, input_dimension, output_dimension, sigma): r""" - Fourier Feature Embedding class for encoding input features - using random Fourier features.This class applies a Fourier - transformation to the input features, + This class applies a Fourier transformation to the input features, which can help in learning high-frequency variations in data. If multiple sigma are provided, the class supports multiscale feature embedding, creating embeddings for diff --git a/pina/model/block/fourier_block.py b/pina/model/block/fourier_block.py index 22bebe17b..06f7efb16 100644 --- a/pina/model/block/fourier_block.py +++ b/pina/model/block/fourier_block.py @@ -1,8 +1,12 @@ +""" +Module for Fourier Block implementation. +""" + import torch -import torch.nn as nn +from torch import nn from ...utils import check_consistency -from . import ( +from .spectral import ( SpectralConvBlock1D, SpectralConvBlock2D, SpectralConvBlock3D, @@ -17,9 +21,9 @@ class FourierBlock1D(nn.Module): .. seealso:: - **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K., Liu, B., - Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). *Fourier neural operator for - parametric partial differential equations*. + **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K., + Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). *Fourier + neural operator for parametric partial differential equations*. DOI: `arXiv preprint arXiv:2010.08895. `_ @@ -32,24 +36,26 @@ def __init__( n_modes, activation=torch.nn.Tanh, ): - super().__init__() """ PINA implementation of Fourier block one dimension. The module computes the spectral convolution of the input with a linear kernel in the fourier space, and then it maps the input back to the physical space. The output is then added to a Linear tranformation of the input in the physical space. Finally an activation function is - applied to the output. + applied to the output. The block expects an input of size ``[batch, input_numb_fields, N]`` and returns an output of size ``[batch, output_numb_fields, N]``. :param int input_numb_fields: The number of channels for the input. :param int output_numb_fields: The number of channels for the output. - :param list | tuple n_modes: Number of modes to select for each dimension. - It must be at most equal to the ``floor(N/2)+1``. + :param list | tuple n_modes: Number of modes to select for each + dimension. It must be at most equal to the ``floor(N/2)+1``. :param torch.nn.Module activation: The activation function. """ + + super().__init__() + # check type consistency check_consistency(activation(), nn.Module) @@ -109,13 +115,15 @@ def __init__( input in the physical space. Finally an activation function is applied to the output. - The block expects an input of size ``[batch, input_numb_fields, Nx, Ny]`` - and returns an output of size ``[batch, output_numb_fields, Nx, Ny]``. + The block expects an input of size + ``[batch, input_numb_fields, Nx, Ny]`` and returns an output of size + ``[batch, output_numb_fields, Nx, Ny]``. :param int input_numb_fields: The number of channels for the input. :param int output_numb_fields: The number of channels for the output. - :param list | tuple n_modes: Number of modes to select for each dimension. - It must be at most equal to the ``floor(Nx/2)+1`` and ``floor(Ny/2)+1``. + :param list | tuple n_modes: Number of modes to select for each + dimension. It must be at most equal to the ``floor(Nx/2)+1`` + and ``floor(Ny/2)+1``. :param torch.nn.Module activation: The activation function. """ super().__init__() @@ -172,21 +180,22 @@ def __init__( activation=torch.nn.Tanh, ): """ - PINA implementation of Fourier block three dimensions. The module computes - the spectral convolution of the input with a linear kernel in the - fourier space, and then it maps the input back to the physical + PINA implementation of Fourier block three dimensions. The module + computes the spectral convolution of the input with a linear kernel in + the fourier space, and then it maps the input back to the physical space. The output is then added to a Linear tranformation of the input in the physical space. Finally an activation function is applied to the output. - The block expects an input of size ``[batch, input_numb_fields, Nx, Ny, Nz]`` - and returns an output of size ``[batch, output_numb_fields, Nx, Ny, Nz]``. + The block expects an input of size + ``[batch, input_numb_fields, Nx, Ny, Nz]`` and returns an output of size + ``[batch, output_numb_fields, Nx, Ny, Nz]``. :param int input_numb_fields: The number of channels for the input. :param int output_numb_fields: The number of channels for the output. - :param list | tuple n_modes: Number of modes to select for each dimension. - It must be at most equal to the ``floor(Nx/2)+1``, ``floor(Ny/2)+1`` - and ``floor(Nz/2)+1``. + :param list | tuple n_modes: Number of modes to select for each + dimension. It must be at most equal to the ``floor(Nx/2)+1``, + ``floor(Ny/2)+1`` and ``floor(Nz/2)+1``. :param torch.nn.Module activation: The activation function. """ super().__init__() diff --git a/pina/model/block/gno_block.py b/pina/model/block/gno_block.py index f3913245c..c1d470dfa 100644 --- a/pina/model/block/gno_block.py +++ b/pina/model/block/gno_block.py @@ -1,10 +1,14 @@ +""" +Module containing the Graph Integral Layer class. +""" + import torch from torch_geometric.nn import MessagePassing class GNOBlock(MessagePassing): """ - TODO: Add documentation + Graph Neural Operator (GNO) Block using PyG MessagePassing. """ def __init__( @@ -18,21 +22,21 @@ def __init__( external_func=None, ): """ - Initialize the Graph Integral Layer, inheriting from the MessagePassing class of PyTorch Geometric. - - :param width: The width of the hidden representation of the nodes features - :type width: int - :param edges_features: The number of edge features. - :type edges_features: int - :param n_layers: The number of layers in the Feed Forward Neural Network used to compute the representation of the edges features. - :type n_layers: int + Initialize the GNOBlock. + + :param width: Hidden dimension of node features. + :param edges_features: Number of edge features. + :param n_layers: Number of layers in edge transformation MLP. """ - from pina.model import FeedForward - super(GNOBlock, self).__init__(aggr="mean") + from ...model.feed_forward import FeedForward + + super().__init__(aggr="mean") # Uses PyG's default aggregation self.width = width + if layers is None and inner_size is None: inner_size = width + self.dense = FeedForward( input_dimensions=edges_features, output_dimensions=width**2, @@ -41,48 +45,50 @@ def __init__( inner_size=inner_size, func=internal_func, ) + self.W = torch.nn.Linear(width, width) self.func = external_func() - def message(self, x_j, edge_attr): + def message_and_aggregate(self, edge_index, x, edge_attr): """ - This function computes the message passed between the nodes of the graph. Overwrite the default message function defined in the MessagePassing class. - - :param x_j: The node features of the neighboring. - :type x_j: torch.Tensor - :param edge_attr: The edge features. - :type edge_attr: torch.Tensor - :return: The message passed between the nodes of the graph. - :rtype: torch.Tensor + Combines message and aggregation. + + :param edge_index: COO format edge indices. + :param x: Node feature matrix [num_nodes, width]. + :param edge_attr: Edge features [num_edges, edge_dim]. + :return: Aggregated messages. """ - x = self.dense(edge_attr).view(-1, self.width, self.width) - return torch.einsum("bij,bj->bi", x, x_j) + # Edge features are transformed into a matrix of shape + # [num_edges, width, width] + x_ = self.dense(edge_attr).view(-1, self.width, self.width) + # Messages are computed as the product of the edge features + messages = torch.einsum("bij,bj->bi", x_, x[edge_index[0]]) + # Aggregation is performed using the mean (set in the constructor) + return self.aggregate(messages, edge_index[1]) + + def edge_update(self, edge_attr): + """ + Updates edge features. + """ + return edge_attr def update(self, aggr_out, x): """ - This function updates the node features of the graph. Overwrite the default update function defined in the MessagePassing class. - - :param aggr_out: The aggregated messages. - :type aggr_out: torch.Tensor - :param x: The node features. - :type x: torch.Tensor - :return: The updated node features. - :rtype: torch.Tensor + Updates node features. + + :param aggr_out: Aggregated messages. + :param x: Node feature matrix. + :return: Updated node features. """ - aggr_out = aggr_out + self.W(x) - return aggr_out + return aggr_out + self.W(x) def forward(self, x, edge_index, edge_attr): """ - The forward pass of the Graph Integral Layer. + Forward pass of the GNOBlock. :param x: Node features. - :type x: torch.Tensor - :param edge_index: Edge index. - :type edge_index: torch.Tensor + :param edge_index: Edge indices. :param edge_attr: Edge features. - :type edge_attr: torch.Tensor - :return: Output of a single iteration over the Graph Integral Layer. - :rtype: torch.Tensor + :return: Updated node features. """ return self.func(self.propagate(edge_index, x=x, edge_attr=edge_attr)) diff --git a/pina/model/block/integral.py b/pina/model/block/integral.py index 565aec3cf..5b54bb76c 100644 --- a/pina/model/block/integral.py +++ b/pina/model/block/integral.py @@ -1,10 +1,18 @@ +""" +Module for performing integral for continuous convolution +""" + import torch -class Integral(object): +class Integral: + """ + Integral class for continous convolution + """ def __init__(self, param): - """Integral class for continous convolution + """ + Initialize the integral class :param param: type of continuous convolution :type param: string diff --git a/pina/model/block/low_rank_block.py b/pina/model/block/low_rank_block.py index dfb6864d5..06b59d7dc 100644 --- a/pina/model/block/low_rank_block.py +++ b/pina/model/block/low_rank_block.py @@ -2,8 +2,7 @@ import torch -from pina.utils import check_consistency -import pina.model as pm # avoid circular import +from ...utils import check_consistency class LowRankBlock(torch.nn.Module): @@ -78,9 +77,10 @@ def __init__( basis function network. """ super().__init__() + from ..feed_forward import FeedForward # Assignment (check consistency inside FeedForward) - self._basis = pm.FeedForward( + self._basis = FeedForward( input_dimensions=input_dimensions, output_dimensions=2 * rank * embedding_dimenion, inner_size=inner_size, diff --git a/pina/model/block/pod_block.py b/pina/model/block/pod_block.py index 3e5627487..aff359ffe 100644 --- a/pina/model/block/pod_block.py +++ b/pina/model/block/pod_block.py @@ -1,9 +1,6 @@ """Module for Base Continuous Convolution class.""" -from abc import ABCMeta, abstractmethod import torch -from .stride import Stride -from .utils_convolution import optimizing class PODBlock(torch.nn.Module): @@ -14,7 +11,8 @@ class PODBlock(torch.nn.Module): The layer is not trainable. .. note:: - All the POD modes are stored in memory, avoiding to recompute them when the rank changes but increasing the memory usage. + All the POD modes are stored in memory, avoiding to recompute them when + the rank changes but increasing the memory usage. """ def __init__(self, rank, scale_coefficients=True): @@ -50,7 +48,8 @@ def rank(self, value): @property def basis(self): """ - The POD basis. It is a matrix whose columns are the first `self.rank` POD modes. + The POD basis. It is a matrix whose columns are the first `self.rank` + POD modes. :rtype: torch.Tensor """ @@ -68,7 +67,7 @@ def scaler(self): :rtype: dict """ if self._scaler is None: - return + return None return { "mean": self._scaler["mean"][: self.rank], @@ -114,7 +113,8 @@ def _fit_scaler(self, coeffs): def _fit_pod(self, X): """ - Private method that computes the POD basis of the given tensor and stores it in the private member `_basis`. + Private method that computes the POD basis of the given tensor and + stores it in the private member `_basis`. :param torch.Tensor X: The tensor to be reduced. """ diff --git a/pina/model/block/residual.py b/pina/model/block/residual.py index edd9b07c0..db9f4f2b6 100644 --- a/pina/model/block/residual.py +++ b/pina/model/block/residual.py @@ -1,5 +1,9 @@ +""" +TODO: Add title. +""" + import torch -import torch.nn as nn +from torch import nn from ...utils import check_consistency @@ -35,7 +39,8 @@ def __init__( (first block). :param bool spectral_norm: Apply spectral normalization to feedforward layers, defaults to False. - :param torch.nn.Module activation: Cctivation function after first block. + :param torch.nn.Module activation: Cctivation function after first + block. """ super().__init__() @@ -81,19 +86,17 @@ def _spect_norm(self, x): return nn.utils.spectral_norm(x) if self._spectral_norm else x -import torch -import torch.nn as nn - - class EnhancedLinear(torch.nn.Module): """ A wrapper class for enhancing a linear layer with activation and/or dropout. :param layer: The linear layer to be enhanced. :type layer: torch.nn.Module - :param activation: The activation function to be applied after the linear layer. + :param activation: The activation function to be applied after the linear + layer. :type activation: torch.nn.Module - :param dropout: The dropout probability to be applied after the activation (if provided). + :param dropout: The dropout probability to be applied after the activation + (if provided). :type dropout: float :Example: @@ -110,9 +113,11 @@ def __init__(self, layer, activation=None, dropout=None): :param layer: The linear layer to be enhanced. :type layer: torch.nn.Module - :param activation: The activation function to be applied after the linear layer. + :param activation: The activation function to be applied after the + linear layer. :type activation: torch.nn.Module - :param dropout: The dropout probability to be applied after the activation (if provided). + :param dropout: The dropout probability to be applied after the + activation (if provided). :type dropout: float """ super().__init__() diff --git a/pina/model/block/spectral.py b/pina/model/block/spectral.py index 674f3e095..ba581a982 100644 --- a/pina/model/block/spectral.py +++ b/pina/model/block/spectral.py @@ -1,7 +1,10 @@ +""" +TODO: Add title. +""" + import torch -import torch.nn as nn +from torch import nn from ...utils import check_consistency -import warnings ######## 1D Spectral Convolution ########### @@ -13,7 +16,8 @@ class SpectralConvBlock1D(nn.Module): def __init__(self, input_numb_fields, output_numb_fields, n_modes): """ - The module computes the spectral convolution of the input with a linear kernel in the + The module computes the spectral convolution of the input with a linear + kernel in the fourier space, and then it maps the input back to the physical space. @@ -106,17 +110,20 @@ class SpectralConvBlock2D(nn.Module): def __init__(self, input_numb_fields, output_numb_fields, n_modes): """ - The module computes the spectral convolution of the input with a linear kernel in the + The module computes the spectral convolution of the input with a linear + kernel in the fourier space, and then it maps the input back to the physical space. - The block expects an input of size ``[batch, input_numb_fields, Nx, Ny]`` + The block expects an input of size + ``[batch, input_numb_fields, Nx, Ny]`` and returns an output of size ``[batch, output_numb_fields, Nx, Ny]``. :param int input_numb_fields: The number of channels for the input. :param int output_numb_fields: The number of channels for the output. - :param list | tuple n_modes: Number of modes to select for each dimension. - It must be at most equal to the ``floor(Nx/2)+1`` and ``floor(Ny/2)+1``. + :param list | tuple n_modes: Number of modes to select for each + dimension. It must be at most equal to the ``floor(Nx/2)+1`` and + ``floor(Ny/2)+1``. """ super().__init__() @@ -234,18 +241,21 @@ class SpectralConvBlock3D(nn.Module): def __init__(self, input_numb_fields, output_numb_fields, n_modes): """ - The module computes the spectral convolution of the input with a linear kernel in the + The module computes the spectral convolution of the input with a + linear kernel in the fourier space, and then it maps the input back to the physical space. - The block expects an input of size ``[batch, input_numb_fields, Nx, Ny, Nz]`` - and returns an output of size ``[batch, output_numb_fields, Nx, Ny, Nz]``. + The block expects an input of size + ``[batch, input_numb_fields, Nx, Ny, Nz]`` + and returns an output of size + ``[batch, output_numb_fields, Nx, Ny, Nz]``. :param int input_numb_fields: The number of channels for the input. :param int output_numb_fields: The number of channels for the output. - :param list | tuple n_modes: Number of modes to select for each dimension. - It must be at most equal to the ``floor(Nx/2)+1``, ``floor(Ny/2)+1`` - and ``floor(Nz/2)+1``. + :param list | tuple n_modes: Number of modes to select for each + dimension. It must be at most equal to the ``floor(Nx/2)+1``, + ``floor(Ny/2)+1`` and ``floor(Nz/2)+1``. """ super().__init__() @@ -347,7 +357,8 @@ def forward(self, x): ``[batch, input_numb_fields, x, y, z]``. :type x: torch.Tensor :return: The output tensor obtained from the - spectral convolution of size ``[batch, output_numb_fields, x, y, z]``. + spectral convolution of size + ``[batch, output_numb_fields, x, y, z]``. :rtype: torch.Tensor """ diff --git a/pina/model/block/stride.py b/pina/model/block/stride.py index 7832ac4e1..34f433109 100644 --- a/pina/model/block/stride.py +++ b/pina/model/block/stride.py @@ -1,18 +1,25 @@ +""" +TODO: Add description +""" + import torch -class Stride(object): +class Stride: + """ + TODO + """ - def __init__(self, dict): + def __init__(self, dict_): """Stride class for continous convolution :param param: type of continuous convolution :type param: string """ - self._dict_stride = dict + self._dict_stride = dict_ self._stride_continuous = None - self._stride_discrete = self._create_stride_discrete(dict) + self._stride_discrete = self._create_stride_discrete(dict_) def _create_stride_discrete(self, my_dict): """Creating the list for applying the filter @@ -46,13 +53,13 @@ def _create_stride_discrete(self, my_dict): # checking - if not all([len(s) == len(domain) for s in my_dict.values()]): + if not all(len(s) == len(domain) for s in my_dict.values()): raise IndexError("values in the dict must have all same length") if not all(v >= 0 for v in domain): raise ValueError("domain values must be greater than 0") - if not all(v == 1 or v == -1 or v == 0 for v in direction): + if not all(v in (0, -1, 1) for v in direction): raise ValueError("direction must be either equal to 1, -1 or 0") seq_jumps = [i for i, e in enumerate(jumps) if e == 0] diff --git a/pina/model/block/utils_convolution.py b/pina/model/block/utils_convolution.py index 5442ff48d..d8e30fed9 100644 --- a/pina/model/block/utils_convolution.py +++ b/pina/model/block/utils_convolution.py @@ -1,7 +1,14 @@ +""" +TODO +""" + import torch def check_point(x, current_stride, dim): + """ + TODO + """ max_stride = current_stride + dim indeces = torch.logical_and( x[..., :-1] < max_stride, x[..., :-1] >= current_stride @@ -33,16 +40,18 @@ def optimizing(f): def wrapper(*args, **kwargs): - if kwargs["type"] == "forward": + if kwargs["type_"] == "forward": if not wrapper.has_run_inverse: wrapper.has_run_inverse = True return f(*args, **kwargs) - if kwargs["type"] == "inverse": + if kwargs["type_"] == "inverse": if not wrapper.has_run: wrapper.has_run = True return f(*args, **kwargs) + return f(*args, **kwargs) + wrapper.has_run_inverse = False wrapper.has_run = False diff --git a/pina/model/deeponet.py b/pina/model/deeponet.py index 6dda342f3..29891fad9 100644 --- a/pina/model/deeponet.py +++ b/pina/model/deeponet.py @@ -1,9 +1,9 @@ """Module for DeepONet model""" +from functools import partial import torch -import torch.nn as nn +from torch import nn from ..utils import check_consistency, is_function -from functools import partial class MIONet(torch.nn.Module): @@ -12,8 +12,9 @@ class MIONet(torch.nn.Module): MIONet is a general architecture for learning Operators defined on the tensor product of Banach spaces. Unlike traditional machine - learning methods MIONet is designed to map entire functions to other functions. - It can be trained both with Physics Informed or Supervised learning strategies. + learning methods MIONet is designed to map entire functions to other + functions. It can be trained both with Physics Informed or Supervised + learning strategies. .. seealso:: @@ -37,37 +38,45 @@ def __init__( :param dict networks: The neural networks to use as models. The ``dict`` takes as key a neural network, and as value the list of indeces to extract from the input variable - in the forward pass of the neural network. If a list of ``int`` is passed, - the corresponding columns of the inner most entries are extracted. - If a list of ``str`` is passed the variables of the corresponding :py:obj:`pina.label_tensor.LabelTensor` - are extracted. The ``torch.nn.Module`` model has to take as input a + in the forward pass of the neural network. If a list of ``int`` + is passed, the corresponding columns of the inner most entries are + extracted. + If a list of ``str`` is passed the variables of the corresponding + :py:obj:`pina.label_tensor.LabelTensor`are extracted. The + ``torch.nn.Module`` model has to take as input a :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. - Default implementation consist of different branch nets and one trunk nets. + Default implementation consist of different branch nets and one + trunk nets. :param str or Callable aggregator: Aggregator to be used to aggregate partial results from the modules in `nets`. Partial results are aggregated component-wise. Available aggregators include - sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: ``max``. + sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: + ``max``. :param str or Callable reduction: Reduction to be used to reduce the aggregated result of the modules in `nets` to the desired output dimension. Available reductions include - sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: ``max``. - :param bool or Callable scale: Scaling the final output before returning the - forward pass, default ``True``. + sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: + ``max``. + :param bool or Callable scale: Scaling the final output before returning + the forward pass, default ``True``. :param bool or Callable translation: Translating the final output before returning the forward pass, default ``True``. .. warning:: In the forward pass we do not check if the input is instance of - :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. A general rule is - that for a :py:obj:`pina.label_tensor.LabelTensor` input both list of integers and - list of strings can be passed for ``input_indeces_branch_net`` - and ``input_indeces_trunk_net``. Differently, for a :class:`torch.Tensor` - only a list of integers can be passed for ``input_indeces_branch_net`` - and ``input_indeces_trunk_net``. + :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. + A general rule is that for a :py:obj:`pina.label_tensor.LabelTensor` + input both list of integers and list of strings can be passed for + ``input_indeces_branch_net``and ``input_indeces_trunk_net``. + Differently, for a :class:`torch.Tensor` only a list of integers + can be passed for ``input_indeces_branch_net``and + ``input_indeces_trunk_net``. :Example: - >>> branch_net1 = FeedForward(input_dimensons=1, output_dimensions=10) - >>> branch_net2 = FeedForward(input_dimensons=2, output_dimensions=10) + >>> branch_net1 = FeedForward(input_dimensons=1, + ... output_dimensions=10) + >>> branch_net2 = FeedForward(input_dimensons=2, + ... output_dimensions=10) >>> trunk_net = FeedForward(input_dimensons=1, output_dimensions=10) >>> networks = {branch_net1 : ['x'], branch_net2 : ['x', 'y'], @@ -125,7 +134,7 @@ def __init__( if not all(map(lambda x: x == shapes[0], shapes)): raise ValueError( - "The passed networks have not the same " "output dimension." + "The passed networks have not the same output dimension." ) # assign trunk and branch net with their input indeces @@ -163,7 +172,7 @@ def _symbol_functions(**kwargs): } def _init_aggregator(self, aggregator): - aggregator_funcs = DeepONet._symbol_functions(dim=2) + aggregator_funcs = self._symbol_functions(dim=2) if aggregator in aggregator_funcs: aggregator_func = aggregator_funcs[aggregator] elif isinstance(aggregator, nn.Module) or is_function(aggregator): @@ -175,7 +184,7 @@ def _init_aggregator(self, aggregator): self._aggregator_type = aggregator def _init_reduction(self, reduction): - reduction_funcs = DeepONet._symbol_functions(dim=-1) + reduction_funcs = self._symbol_functions(dim=-1) if reduction in reduction_funcs: reduction_func = reduction_funcs[reduction] elif isinstance(reduction, nn.Module) or is_function(reduction): @@ -190,13 +199,13 @@ def _get_vars(self, x, indeces): if isinstance(indeces[0], str): try: return x.extract(indeces) - except AttributeError: + except AttributeError as e: raise RuntimeError( "Not possible to extract input variables from tensor." " Ensure that the passed tensor is a LabelTensor or" " pass list of integers to extract variables. For" " more information refer to warning in the documentation." - ) + ) from e elif isinstance(indeces[0], int): return x[..., indeces] else: @@ -209,7 +218,8 @@ def forward(self, x): """ Defines the computation performed at every call. - :param LabelTensor or torch.Tensor x: The input tensor for the forward call. + :param LabelTensor or torch.Tensor x: The input tensor for the forward + call. :return: The output computed by the DeepONet model. :rtype: LabelTensor or torch.Tensor """ @@ -225,7 +235,7 @@ def forward(self, x): # reduce output_ = self._reduction(aggregated) - if self._reduction_type in DeepONet._symbol_functions(dim=-1): + if self._reduction_type in self._symbol_functions(dim=-1): output_ = output_.reshape(-1, 1) # scale and translate @@ -309,47 +319,55 @@ def __init__( ): """ :param torch.nn.Module branch_net: The neural network to use as branch - model. It has to take as input a :py:obj:`pina.label_tensor.LabelTensor` - or :class:`torch.Tensor`. The number of dimensions of the output has - to be the same of the ``trunk_net``. + model. It has to take as input a + :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. + The number of dimensions of the output has to be the same of the + ``trunk_net``. :param torch.nn.Module trunk_net: The neural network to use as trunk - model. It has to take as input a :py:obj:`pina.label_tensor.LabelTensor` - or :class:`torch.Tensor`. The number of dimensions of the output - has to be the same of the ``branch_net``. + model. It has to take as input a + :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. + The number of dimensions of the output has to be the same of the + ``branch_net``. :param list(int) or list(str) input_indeces_branch_net: List of indeces to extract from the input variable in the forward pass for the - branch net. If a list of ``int`` is passed, the corresponding columns - of the inner most entries are extracted. If a list of ``str`` is passed - the variables of the corresponding :py:obj:`pina.label_tensor.LabelTensor` are extracted. + branch net. If a list of ``int`` is passed, the corresponding + columns of the inner most entries are extracted. If a list of + ``str`` is passed the variables of the corresponding + :py:obj:`pina.label_tensor.LabelTensor` are extracted. :param list(int) or list(str) input_indeces_trunk_net: List of indeces to extract from the input variable in the forward pass for the trunk net. If a list of ``int`` is passed, the corresponding columns - of the inner most entries are extracted. If a list of ``str`` is passed - the variables of the corresponding :py:obj:`pina.label_tensor.LabelTensor` are extracted. + of the inner most entries are extracted. If a list of ``str`` is + passed the variables of the corresponding + :py:obj:`pina.label_tensor.LabelTensor` are extracted. :param str or Callable aggregator: Aggregator to be used to aggregate partial results from the modules in `nets`. Partial results are aggregated component-wise. Available aggregators include - sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: ``max``. + sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, + max: ``max``. :param str or Callable reduction: Reduction to be used to reduce the aggregated result of the modules in `nets` to the desired output dimension. Available reductions include - sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: ``max``. - :param bool or Callable scale: Scaling the final output before returning the - forward pass, default True. + sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, + max: ``max``. + :param bool or Callable scale: Scaling the final output before returning + the forward pass, default True. :param bool or Callable translation: Translating the final output before returning the forward pass, default True. .. warning:: In the forward pass we do not check if the input is instance of - :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. A general rule is - that for a :py:obj:`pina.label_tensor.LabelTensor` input both list of integers and - list of strings can be passed for ``input_indeces_branch_net`` - and ``input_indeces_trunk_net``. Differently, for a :class:`torch.Tensor` - only a list of integers can be passed for ``input_indeces_branch_net`` - and ``input_indeces_trunk_net``. + :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. + A general rule is that for a :py:obj:`pina.label_tensor.LabelTensor` + input both list of integers and list of strings can be passed for + ``input_indeces_branch_net`` and ``input_indeces_trunk_net``. + Differently, for a :class:`torch.Tensor` only a list of integers can + be passed for ``input_indeces_branch_net`` and + ``input_indeces_trunk_net``. :Example: - >>> branch_net = FeedForward(input_dimensons=1, output_dimensions=10) + >>> branch_net = FeedForward(input_dimensons=1, + ... output_dimensions=10) >>> trunk_net = FeedForward(input_dimensons=1, output_dimensions=10) >>> model = DeepONet(branch_net=branch_net, ... trunk_net=trunk_net, @@ -395,7 +413,8 @@ def forward(self, x): """ Defines the computation performed at every call. - :param LabelTensor or torch.Tensor x: The input tensor for the forward call. + :param LabelTensor or torch.Tensor x: The input tensor for the forward + call. :return: The output computed by the DeepONet model. :rtype: LabelTensor or torch.Tensor """ diff --git a/pina/model/feed_forward.py b/pina/model/feed_forward.py index 5e2566cf8..a10652d49 100644 --- a/pina/model/feed_forward.py +++ b/pina/model/feed_forward.py @@ -1,7 +1,7 @@ """Module for FeedForward model""" import torch -import torch.nn as nn +from torch import nn from ..utils import check_consistency from .block.residual import EnhancedLinear @@ -13,10 +13,12 @@ class FeedForward(torch.nn.Module): :param int input_dimensions: The number of input components of the model. Expected tensor shape of the form :math:`(*, d)`, where * - means any number of dimensions including none, and :math:`d` the ``input_dimensions``. + means any number of dimensions including none, and :math:`d` the + ``input_dimensions``. :param int output_dimensions: The number of output components of the model. Expected tensor shape of the form :math:`(*, d)`, where * - means any number of dimensions including none, and :math:`d` the ``output_dimensions``. + means any number of dimensions including none, and :math:`d` the + ``output_dimensions``. :param int inner_size: number of neurons in the hidden layer(s). Default is 20. :param int n_layers: number of hidden layers. Default is 2. @@ -24,9 +26,9 @@ class FeedForward(torch.nn.Module): :class:`torch.nn.Module` is passed, this is used as activation function after any layers, except the last one. If a list of Modules is passed, they are used as activation functions at any layers, in order. - :param list(int) | tuple(int) layers: a list containing the number of neurons for - any hidden layers. If specified, the parameters ``n_layers`` e - ``inner_size`` are not considered. + :param list(int) | tuple(int) layers: a list containing the number of + neurons for any hidden layers. If specified, the parameters ``n_layers`` + and ``inner_size`` are not considered. :param bool bias: If ``True`` the MLP will consider some bias. """ @@ -72,10 +74,10 @@ def __init__( raise RuntimeError("uncosistent number of layers and functions") unique_list = [] - for layer, func in zip(self.layers[:-1], self.functions): + for layer, func_ in zip(self.layers[:-1], self.functions): unique_list.append(layer) - if func is not None: - unique_list.append(func()) + if func_ is not None: + unique_list.append(func_()) unique_list.append(self.layers[-1]) self.model = nn.Sequential(*unique_list) @@ -95,24 +97,27 @@ def forward(self, x): class ResidualFeedForward(torch.nn.Module): """ The PINA implementation of feedforward network, also with skipped connection - and transformer network, as presented in **Understanding and mitigating gradient - pathologies in physics-informed neural networks** + and transformer network, as presented in **Understanding and mitigating + gradient pathologies in physics-informed neural networks** .. seealso:: **Original reference**: Wang, Sifan, Yujun Teng, and Paris Perdikaris. - *Understanding and mitigating gradient flow pathologies in physics-informed - neural networks*. SIAM Journal on Scientific Computing 43.5 (2021): A3055-A3081. + *Understanding and mitigating gradient flow pathologies in + physics-informed neural networks*. SIAM Journal on Scientific Computing + 43.5 (2021): A3055-A3081. DOI: `10.1137/20M1318043 `_ :param int input_dimensions: The number of input components of the model. Expected tensor shape of the form :math:`(*, d)`, where * - means any number of dimensions including none, and :math:`d` the ``input_dimensions``. + means any number of dimensions including none, and :math:`d` the + ``input_dimensions``. :param int output_dimensions: The number of output components of the model. Expected tensor shape of the form :math:`(*, d)`, where * - means any number of dimensions including none, and :math:`d` the ``output_dimensions``. + means any number of dimensions including none, and :math:`d` the + ``output_dimensions``. :param int inner_size: number of neurons in the hidden layer(s). Default is 20. :param int n_layers: number of hidden layers. Default is 2. @@ -148,66 +153,24 @@ def __init__( check_consistency(func, torch.nn.Module, subclass=True) check_consistency(bias, bool) - # check transformer nets - if transformer_nets is None: - transformer_nets = [ - EnhancedLinear( - nn.Linear( - in_features=input_dimensions, out_features=inner_size - ), - nn.Tanh(), - ), - EnhancedLinear( - nn.Linear( - in_features=input_dimensions, out_features=inner_size - ), - nn.Tanh(), - ), - ] - elif isinstance(transformer_nets, (list, tuple)): - if len(transformer_nets) != 2: - raise ValueError( - "transformer_nets needs to be a list of len two." - ) - for net in transformer_nets: - if not isinstance(net, nn.Module): - raise ValueError( - "transformer_nets needs to be a list of torch.nn.Module." - ) - x = torch.rand(10, input_dimensions) - try: - out = net(x) - except RuntimeError: - raise ValueError( - "transformer network input incompatible with input_dimensions." - ) - if out.shape[-1] != inner_size: - raise ValueError( - "transformer network output incompatible with inner_size." - ) - else: - RuntimeError( - "Runtime error for transformer nets, check official documentation." - ) + transformer_nets = self._check_transformer_nets( + transformer_nets, input_dimensions, inner_size + ) # assign variables - self.input_dimension = input_dimensions - self.output_dimension = output_dimensions self.transformer_nets = nn.ModuleList(transformer_nets) # build layers layers = [inner_size] * n_layers - tmp_layers = layers.copy() - tmp_layers.insert(0, self.input_dimension) + layers = layers.copy() + layers.insert(0, input_dimensions) self.layers = [] - for i in range(len(tmp_layers) - 1): - self.layers.append( - nn.Linear(tmp_layers[i], tmp_layers[i + 1], bias=bias) - ) + for i in range(len(layers) - 1): + self.layers.append(nn.Linear(layers[i], layers[i + 1], bias=bias)) self.last_layer = nn.Linear( - tmp_layers[len(tmp_layers) - 1], output_dimensions, bias=bias + layers[len(layers) - 1], output_dimensions, bias=bias ) if isinstance(func, list): @@ -219,8 +182,8 @@ def __init__( raise RuntimeError("uncosistent number of layers and functions") unique_list = [] - for layer, func in zip(self.layers, self.functions): - unique_list.append(EnhancedLinear(layer=layer, activation=func)) + for layer, func_ in zip(self.layers, self.functions): + unique_list.append(EnhancedLinear(layer=layer, activation=func_)) self.inner_layers = torch.nn.Sequential(*unique_list) def forward(self, x): @@ -244,3 +207,52 @@ def forward(self, x): # last layer return self.last_layer(x) + + @staticmethod + def _check_transformer_nets(transformer_nets, input_dimensions, inner_size): + # check transformer nets + if transformer_nets is None: + transformer_nets = [ + EnhancedLinear( + nn.Linear( + in_features=input_dimensions, out_features=inner_size + ), + nn.Tanh(), + ), + EnhancedLinear( + nn.Linear( + in_features=input_dimensions, out_features=inner_size + ), + nn.Tanh(), + ), + ] + elif isinstance(transformer_nets, (list, tuple)): + if len(transformer_nets) != 2: + raise ValueError( + "transformer_nets needs to be a list of len two." + ) + for net in transformer_nets: + if not isinstance(net, nn.Module): + raise ValueError( + "transformer_nets needs to be a list of " + "torch.nn.Module." + ) + x = torch.rand(10, input_dimensions) + try: + out = net(x) + except RuntimeError as e: + raise ValueError( + "transformer network input incompatible with " + "input_dimensions." + ) from e + if out.shape[-1] != inner_size: + raise ValueError( + "transformer network output incompatible with " + "inner_size." + ) + else: + raise RuntimeError( + "Runtime error for transformer nets, check official " + "documentation." + ) + return transformer_nets diff --git a/pina/model/fourier_neural_operator.py b/pina/model/fourier_neural_operator.py index 0fa2d3e94..59578aee6 100644 --- a/pina/model/fourier_neural_operator.py +++ b/pina/model/fourier_neural_operator.py @@ -2,10 +2,10 @@ Fourier Neural Operator Module. """ +import warnings import torch -import torch.nn as nn +from torch import nn from ..label_tensor import LabelTensor -import warnings from ..utils import check_consistency from .block.fourier_block import FourierBlock1D, FourierBlock2D, FourierBlock3D from .kernel_neural_operator import KernelNeuralOperator @@ -57,36 +57,22 @@ def __init__( super().__init__() # check type consistency - check_consistency(dimensions, int) - check_consistency(padding, int) - check_consistency(padding_type, str) - check_consistency(inner_size, int) - check_consistency(n_layers, int) - check_consistency(func, nn.Module, subclass=True) - - if layers is not None: - if isinstance(layers, (tuple, list)): - check_consistency(layers, int) - else: - raise ValueError("layers must be tuple or list of int.") - if not isinstance(n_modes, (list, tuple, int)): - raise ValueError( - "n_modes must be a int or list or tuple of valid modes." - " More information on the official documentation." - ) + self._check_consistency( + dimensions, + padding, + padding_type, + inner_size, + n_layers, + func, + layers, + n_modes, + ) # assign padding self._padding = padding # initialize fourier layer for each dimension - if dimensions == 1: - fourier_layer = FourierBlock1D - elif dimensions == 2: - fourier_layer = FourierBlock2D - elif dimensions == 3: - fourier_layer = FourierBlock3D - else: - raise NotImplementedError("FNO implemented only for 1D/2D/3D data.") + fourier_layer = self._get_fourier_block(dimensions) # Here we build the FNO kernels by stacking Fourier Blocks @@ -113,24 +99,24 @@ def __init__( raise RuntimeError( "Uncosistent number of layers and functions." ) - elif all(isinstance(i, int) for i in n_modes): + if all(isinstance(i, int) for i in n_modes): n_modes = [n_modes] * len(layers) else: n_modes = [n_modes] * len(layers) # 4. Build the FNO network - _layers = [] tmp_layers = [input_numb_fields] + layers + [output_numb_fields] - for i in range(len(layers)): - _layers.append( + self._layers = nn.Sequential( + *[ fourier_layer( input_numb_fields=tmp_layers[i], output_numb_fields=tmp_layers[i + 1], n_modes=n_modes[i], activation=_functions[i], ) - ) - self._layers = nn.Sequential(*_layers) + for i in range(len(layers)) + ] + ) # 5. Padding values for spectral conv if isinstance(padding, int): @@ -158,14 +144,14 @@ def forward(self, x): :return: The output tensor obtained from the kernels convolution. :rtype: torch.Tensor """ - if isinstance(x, LabelTensor): # TODO remove when Network is fixed + if isinstance(x, LabelTensor): warnings.warn( "LabelTensor passed as input is not allowed," " casting LabelTensor to Torch.Tensor" ) x = x.as_subclass(torch.Tensor) # permuting the input [batch, channels, x, y, ...] - permutation_idx = [0, x.ndim - 1, *[i for i in range(1, x.ndim - 1)]] + permutation_idx = [0, x.ndim - 1, *list(range(1, x.ndim - 1))] x = x.permute(permutation_idx) # padding the input @@ -179,11 +165,50 @@ def forward(self, x): x = x[idxs] # permuting back [batch, x, y, ..., channels] - permutation_idx = [0, *[i for i in range(2, x.ndim)], 1] + permutation_idx = [0, *list(range(2, x.ndim)), 1] x = x.permute(permutation_idx) return x + @staticmethod + def _check_consistency( + dimensions, + padding, + padding_type, + inner_size, + n_layers, + func, + layers, + n_modes, + ): + check_consistency(dimensions, int) + check_consistency(padding, int) + check_consistency(padding_type, str) + check_consistency(inner_size, int) + check_consistency(n_layers, int) + check_consistency(func, nn.Module, subclass=True) + + if layers is not None: + if isinstance(layers, (tuple, list)): + check_consistency(layers, int) + else: + raise ValueError("layers must be tuple or list of int.") + if not isinstance(n_modes, (list, tuple, int)): + raise ValueError( + "n_modes must be a int or list or tuple of valid modes." + " More information on the official documentation." + ) + + @staticmethod + def _get_fourier_block(dimensions): + if dimensions == 1: + return FourierBlock1D + if dimensions == 2: + return FourierBlock2D + if dimensions == 3: + return FourierBlock3D + raise NotImplementedError("FNO implemented only for 1D/2D/3D data.") + class FNO(KernelNeuralOperator): """ diff --git a/pina/model/graph_neural_operator.py b/pina/model/graph_neural_operator.py index 0e3a6d8ef..b0233f1a6 100644 --- a/pina/model/graph_neural_operator.py +++ b/pina/model/graph_neural_operator.py @@ -1,6 +1,10 @@ +""" +Module for the Graph Neural Operator and Graph Neural Kernel. +""" + import torch from torch.nn import Tanh -from .block import GNOBlock +from .block.gno_block import GNOBlock from .kernel_neural_operator import KernelNeuralOperator @@ -30,14 +34,20 @@ def __init__( :type edge_features: int :param n_layers: The number of kernel layers. :type n_layers: int - :param internal_n_layers: The number of layers the FF Neural Network internal to each Kernel Layer. + :param internal_n_layers: The number of layers the FF Neural Network + internal to each Kernel Layer. :type internal_n_layers: int - :param internal_layers: Number of neurons of hidden layers(s) in the FF Neural Network inside for each Kernel Layer. + :param internal_layers: Number of neurons of hidden layers(s) in the + FF Neural Network inside for each Kernel Layer. :type internal_layers: list | tuple - :param internal_func: The activation function used inside the computation of the representation of the edge features in the Graph Integral Layer. - :param external_func: The activation function applied to the output of the Graph Integral Layer. + :param internal_func: The activation function used inside the + computation of the representation of the edge features in the + Graph Integral Layer. + :param external_func: The activation function applied to the output of + the Graph Integral Layer. :type external_func: torch.nn.Module - :param shared_weights: If ``True`` the weights of the Graph Integral Layers are shared. + :param shared_weights: If ``True`` the weights of the Graph Integral + Layers are shared. """ super().__init__() if external_func is None: @@ -56,7 +66,7 @@ def __init__( external_func=external_func, ) self.n_layers = n_layers - self.forward = self.forward_shared + self._forward_func = self._forward_shared else: self.layers = torch.nn.ModuleList( [ @@ -72,25 +82,21 @@ def __init__( for _ in range(n_layers) ] ) + self._forward_func = self._forward_unshared - def forward(self, x, edge_index, edge_attr): - """ - The forward pass of the Graph Neural Kernel used when the weights are not shared. - - :param x: The input batch. - :type x: torch.Tensor - :param edge_index: The edge index. - :type edge_index: torch.Tensor - :param edge_attr: The edge attributes. - :type edge_attr: torch.Tensor - """ + def _forward_unshared(self, x, edge_index, edge_attr): for layer in self.layers: x = layer(x, edge_index, edge_attr) return x - def forward_shared(self, x, edge_index, edge_attr): + def _forward_shared(self, x, edge_index, edge_attr): + for _ in range(self.n_layers): + x = self.layers(x, edge_index, edge_attr) + return x + + def forward(self, x, edge_index, edge_attr): """ - The forward pass of the Graph Neural Kernel used when the weights are shared. + The forward pass of the Graph Neural Kernel. :param x: The input batch. :type x: torch.Tensor @@ -99,9 +105,7 @@ def forward_shared(self, x, edge_index, edge_attr): :param edge_attr: The edge attributes. :type edge_attr: torch.Tensor """ - for _ in range(self.n_layers): - x = self.layers(x, edge_index, edge_attr) - return x + return self._forward_func(x, edge_index, edge_attr) class GraphNeuralOperator(KernelNeuralOperator): @@ -125,23 +129,31 @@ def __init__( """ The Graph Neural Operator constructor. - :param lifting_operator: The lifting operator mapping the node features to its hidden dimension. + :param lifting_operator: The lifting operator mapping the node features + to its hidden dimension. :type lifting_operator: torch.nn.Module - :param projection_operator: The projection operator mapping the hidden representation of the nodes features to the output function. + :param projection_operator: The projection operator mapping the hidden + representation of the nodes features to the output function. :type projection_operator: torch.nn.Module :param edge_features: Number of edge features. :type edge_features: int :param n_layers: The number of kernel layers. :type n_layers: int - :param internal_n_layers: The number of layers the Feed Forward Neural Network internal to each Kernel Layer. + :param internal_n_layers: The number of layers the Feed Forward Neural + Network internal to each Kernel Layer. :type internal_n_layers: int - :param internal_layers: Number of neurons of hidden layers(s) in the FF Neural Network inside for each Kernel Layer. + :param internal_layers: Number of neurons of hidden layers(s) in the + FF Neural Network inside for each Kernel Layer. :type internal_layers: list | tuple - :param internal_func: The activation function used inside the computation of the representation of the edge features in the Graph Integral Layer. + :param internal_func: The activation function used inside the + computation of the representation of the edge features in the + Graph Integral Layer. :type internal_func: torch.nn.Module - :param external_func: The activation function applied to the output of the Graph Integral Kernel. + :param external_func: The activation function applied to the output of + the Graph Integral Kernel. :type external_func: torch.nn.Module - :param shared_weights: If ``True`` the weights of the Graph Integral Layers are shared. + :param shared_weights: If ``True`` the weights of the Graph Integral + Layers are shared. :type shared_weights: bool """ diff --git a/pina/model/layers/__init__.py b/pina/model/layers/__init__.py index dcf63dc3a..2faa66ad5 100644 --- a/pina/model/layers/__init__.py +++ b/pina/model/layers/__init__.py @@ -1,3 +1,7 @@ +""" +Old layers module, deprecated in 0.2.0. +""" + import warnings from ..block import * @@ -8,7 +12,7 @@ warnings.formatwarning = custom_warning_format warnings.filterwarnings("always", category=DeprecationWarning) warnings.warn( - f"'pina.model.layers' is deprecated and will be removed " - f"in future versions. Please use 'pina.model.block' instead.", + "'pina.model.layers' is deprecated and will be removed " + "in future versions. Please use 'pina.model.block' instead.", DeprecationWarning, ) diff --git a/pina/model/low_rank_neural_operator.py b/pina/model/low_rank_neural_operator.py index 733dc7bb7..376b8a907 100644 --- a/pina/model/low_rank_neural_operator.py +++ b/pina/model/low_rank_neural_operator.py @@ -1,7 +1,7 @@ """Module LowRank Neural Operator.""" import torch -from torch import nn, cat +from torch import nn from ..utils import check_consistency @@ -145,4 +145,4 @@ def forward(self, x): for module in self._integral_kernels: x = module(x, coords) # projecting - return self._projection_operator(cat((x, coords), dim=-1)) + return self._projection_operator(torch.cat((x, coords), dim=-1)) diff --git a/pina/model/multi_feed_forward.py b/pina/model/multi_feed_forward.py index b04708db2..7fdbf31c4 100644 --- a/pina/model/multi_feed_forward.py +++ b/pina/model/multi_feed_forward.py @@ -1,11 +1,11 @@ """Module for Multi FeedForward model""" +from abc import ABC, abstractmethod import torch - from .feed_forward import FeedForward -class MultiFeedForward(torch.nn.Module): +class MultiFeedForward(torch.nn.Module, ABC): """ The PINA implementation of MultiFeedForward network. @@ -24,3 +24,9 @@ def __init__(self, ffn_dict): for name, constructor_args in ffn_dict.items(): setattr(self, name, FeedForward(**constructor_args)) + + @abstractmethod + def forward(self, *args, **kwargs): + """ + TODO: Docstring + """ diff --git a/pina/model/spline.py b/pina/model/spline.py index 0aeef8e32..36596901f 100644 --- a/pina/model/spline.py +++ b/pina/model/spline.py @@ -5,6 +5,7 @@ class Spline(torch.nn.Module): + """TODO: Docstring for Spline.""" def __init__(self, order=4, knots=None, control_points=None) -> None: """ @@ -99,6 +100,7 @@ def basis(self, x, k, i, t): @property def control_points(self): + """TODO: Docstring for control_points.""" return self._control_points @control_points.setter @@ -116,6 +118,7 @@ def control_points(self, value): @property def knots(self): + """TODO: Docstring for knots.""" return self._knots @knots.setter diff --git a/pina/operator.py b/pina/operator.py index 85ebf9d8e..32c565851 100644 --- a/pina/operator.py +++ b/pina/operator.py @@ -1,9 +1,11 @@ """ -Module for operator vectorize implementation. Differential operator are used to write any differential problem. -These operator are implemented to work on different accellerators: CPU, GPU, TPU or MPS. -All operator take as input a tensor onto which computing the operator, a tensor with respect -to which computing the operator, the name of the output variables to calculate the operator -for (in case of multidimensional functions), and the variables name on which the operator is calculated. +Module for operator vectorize implementation. Differential operator are used to +write any differential problem. These operator are implemented to work on +different accellerators: CPU, GPU, TPU or MPS. All operator take as input a +tensor onto which computing the operator, a tensor with respect to which +computing the operator, the name of the output variables to calculate the +operator for (in case of multidimensional functions), and the variables name +on which the operator is calculated. """ import torch @@ -50,7 +52,7 @@ def grad_scalar_output(output_, input_, d): if len(output_.labels) != 1: raise RuntimeError("only scalar function can be differentiated") - if not all([di in input_.labels for di in d]): + if not all(di in input_.labels for di in d): raise RuntimeError("derivative labels missing from input tensor") output_fieldname = output_.labels[0] @@ -139,8 +141,8 @@ def div(output_, input_, components=None, d=None): grad_output = grad(output_, input_, components, d) labels = [None] * len(components) tensors_to_sum = [] - for i, (c, d) in enumerate(zip(components, d)): - c_fields = f"d{c}d{d}" + for i, (c, d_) in enumerate(zip(components, d)): + c_fields = f"d{c}d{d_}" tensors_to_sum.append(grad_output.extract(c_fields)) labels[i] = c_fields div_result = LabelTensor.summation(tensors_to_sum) @@ -205,11 +207,8 @@ def scalar_laplace(output_, input_, components, d): if method == "divgrad": raise NotImplementedError("divgrad not implemented as method") - # TODO fix - # grad_output = grad(output_, input_, components, d) - # result = div(grad_output, input_, d=d) - elif method == "std": + if method == "std": if len(components) == 1: result = scalar_laplace(output_, input_, components, d) labels = [f"dd{components[0]}"] diff --git a/pina/operators.py b/pina/operators.py index 5e3e838f3..a995d4436 100644 --- a/pina/operators.py +++ b/pina/operators.py @@ -1,3 +1,7 @@ +""" +Old module for operators. Deprecated in 0.2.0. +""" + import warnings from .operator import * @@ -8,7 +12,7 @@ warnings.formatwarning = custom_warning_format warnings.filterwarnings("always", category=DeprecationWarning) warnings.warn( - f"'pina.operators' is deprecated and will be removed " - f"in future versions. Please use 'pina.operator' instead.", + "'pina.operators' is deprecated and will be removed " + "in future versions. Please use 'pina.operator' instead.", DeprecationWarning, ) diff --git a/pina/optim/__init__.py b/pina/optim/__init__.py index 631134a0e..38301bb60 100644 --- a/pina/optim/__init__.py +++ b/pina/optim/__init__.py @@ -1,3 +1,5 @@ +"""Module for Optimizer class.""" + __all__ = [ "Optimizer", "TorchOptimizer", diff --git a/pina/optim/optimizer_interface.py b/pina/optim/optimizer_interface.py index 0d197ea8d..d61ef4b59 100644 --- a/pina/optim/optimizer_interface.py +++ b/pina/optim/optimizer_interface.py @@ -1,15 +1,24 @@ -"""Module for PINA Optimizer""" +"""Module for PINA Optimizer.""" from abc import ABCMeta, abstractmethod -class Optimizer(metaclass=ABCMeta): # TODO improve interface +class Optimizer(metaclass=ABCMeta): + """ + TODO + :param metaclass: _description_, defaults to ABCMeta + :type metaclass: _type_, optional + """ @property @abstractmethod def instance(self): - pass + """ + TODO + """ @abstractmethod def hook(self): - pass + """ + TODO + """ diff --git a/pina/optim/scheduler_interface.py b/pina/optim/scheduler_interface.py index 1cae521fc..ddb515cd0 100644 --- a/pina/optim/scheduler_interface.py +++ b/pina/optim/scheduler_interface.py @@ -1,15 +1,25 @@ -"""Module for PINA Optimizer""" +"""Module for PINA Scheduler.""" from abc import ABCMeta, abstractmethod -class Scheduler(metaclass=ABCMeta): # TODO improve interface +class Scheduler(metaclass=ABCMeta): + """ + TODO + + :param metaclass: _description_, defaults to ABCMeta + :type metaclass: _type_, optional + """ @property @abstractmethod def instance(self): - pass + """ + TODO + """ @abstractmethod def hook(self): - pass + """ + TODO + """ diff --git a/pina/optim/torch_optimizer.py b/pina/optim/torch_optimizer.py index 02b892086..74b53379b 100644 --- a/pina/optim/torch_optimizer.py +++ b/pina/optim/torch_optimizer.py @@ -7,8 +7,20 @@ class TorchOptimizer(Optimizer): + """ + TODO + + :param Optimizer: _description_ + :type Optimizer: _type_ + """ def __init__(self, optimizer_class, **kwargs): + """ + TODO + + :param optimizer_class: _description_ + :type optimizer_class: _type_ + """ check_consistency(optimizer_class, torch.optim.Optimizer, subclass=True) self.optimizer_class = optimizer_class @@ -16,6 +28,12 @@ def __init__(self, optimizer_class, **kwargs): self._optimizer_instance = None def hook(self, parameters): + """ + TODO + + :param parameters: _description_ + :type parameters: _type_ + """ self._optimizer_instance = self.optimizer_class( parameters, **self.kwargs ) diff --git a/pina/optim/torch_scheduler.py b/pina/optim/torch_scheduler.py index bf8daec8c..41c589c32 100644 --- a/pina/optim/torch_scheduler.py +++ b/pina/optim/torch_scheduler.py @@ -1,7 +1,5 @@ """Module for PINA Torch Optimizer""" -import torch - try: from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0 except ImportError: @@ -15,8 +13,20 @@ class TorchScheduler(Scheduler): + """ + TODO + + :param Scheduler: _description_ + :type Scheduler: _type_ + """ def __init__(self, scheduler_class, **kwargs): + """ + TODO + + :param scheduler_class: _description_ + :type scheduler_class: _type_ + """ check_consistency(scheduler_class, LRScheduler, subclass=True) self.scheduler_class = scheduler_class @@ -24,6 +34,12 @@ def __init__(self, scheduler_class, **kwargs): self._scheduler_instance = None def hook(self, optimizer): + """ + TODO + + :param optimizer: _description_ + :type optimizer: _type_ + """ check_consistency(optimizer, Optimizer) self._scheduler_instance = self.scheduler_class( optimizer.instance, **self.kwargs diff --git a/pina/problem/__init__.py b/pina/problem/__init__.py index 3f77cb087..3174082d6 100644 --- a/pina/problem/__init__.py +++ b/pina/problem/__init__.py @@ -1,3 +1,5 @@ +"""Module for Problems.""" + __all__ = [ "AbstractProblem", "SpatialProblem", diff --git a/pina/problem/abstract_problem.py b/pina/problem/abstract_problem.py index ddc98af8c..43058a6e2 100644 --- a/pina/problem/abstract_problem.py +++ b/pina/problem/abstract_problem.py @@ -1,11 +1,11 @@ """Module for AbstractProblem class""" from abc import ABCMeta, abstractmethod +from copy import deepcopy from ..utils import check_consistency from ..domain import DomainInterface, CartesianDomain from ..condition.domain_equation_condition import DomainEquationCondition -from copy import deepcopy -from .. import LabelTensor +from ..label_tensor import LabelTensor from ..utils import merge_tensors @@ -20,7 +20,12 @@ class AbstractProblem(metaclass=ABCMeta): """ def __init__(self): + """ + TODO + :return: _description_ + :rtype: _type_ + """ self._discretised_domains = {} # create collector to manage problem data @@ -42,16 +47,33 @@ def __init__(self): @property def batching_dimension(self): + """ + TODO + + :return: _description_ + :rtype: _type_ + """ return self._batching_dimension @batching_dimension.setter def batching_dimension(self, value): + """ + TODO + + :return: _description_ + :rtype: _type_ + """ self._batching_dimension = value - # TODO this should be erase when dataloading will interface collector, - # kept only for back compatibility + # back compatibility 0.1 @property def input_pts(self): + """ + TODO + + :return: _description_ + :rtype: _type_ + """ to_return = {} for cond_name, cond in self.conditions.items(): if hasattr(cond, "input"): @@ -62,6 +84,12 @@ def input_pts(self): @property def discretised_domains(self): + """ + TODO + + :return: _description_ + :rtype: _type_ + """ return self._discretised_domains def __deepcopy__(self, memo): @@ -90,10 +118,7 @@ def are_all_domains_discretised(self): :rtype: bool """ return all( - [ - domain in self.discretised_domains - for domain in self.domains.keys() - ] + domain in self.discretised_domains for domain in self.domains ) @property @@ -127,7 +152,6 @@ def output_variables(self): """ The output variables of the problem. """ - pass @property @abstractmethod @@ -153,7 +177,7 @@ def discretise_domain( chebyshev sampling, ``chebyshev``; grid sampling ``grid``. :param variables: variable(s) to sample, defaults to 'all'. :type variables: str | list[str] - :param domains: problem's domain from where to sample, defaults to 'all'. + :param domains: Domain from where to sample, defaults to 'all'. :type domains: str | list[str] :Example: @@ -162,11 +186,12 @@ def discretise_domain( >>> pinn.discretise_domain(n=10, mode='grid', variables=['x']) .. warning:: - ``random`` is currently the only implemented ``mode`` for all geometries, i.e. - ``EllipsoidDomain``, ``CartesianDomain``, ``SimplexDomain`` and the geometries - compositions ``Union``, ``Difference``, ``Exclusion``, ``Intersection``. The - modes ``latin`` or ``lh``, ``chebyshev``, ``grid`` are only implemented for - ``CartesianDomain``. + ``random`` is currently the only implemented ``mode`` for all + geometries, i.e. ``EllipsoidDomain``, ``CartesianDomain``, + ``SimplexDomain`` and the geometries compositions ``Union``, + ``Difference``, ``Exclusion``, ``Intersection``. The + modes ``latin`` or ``lh``, ``chebyshev``, ``grid`` are only + implemented for ``CartesianDomain``. """ # check consistecy n, mode, variables, locations diff --git a/pina/problem/inverse_problem.py b/pina/problem/inverse_problem.py index 7451e2b9b..bd7570112 100644 --- a/pina/problem/inverse_problem.py +++ b/pina/problem/inverse_problem.py @@ -1,7 +1,7 @@ """Module for the ParametricProblem class""" -import torch from abc import abstractmethod +import torch from .abstract_problem import AbstractProblem @@ -16,40 +16,14 @@ class InverseProblem(AbstractProblem): derivative term. :Example: - >>> from pina.problem import SpatialProblem, InverseProblem - >>> from pina.operator import grad - >>> from pina.equation import ParametricEquation, FixedValue - >>> from pina import Condition - >>> from pina.geometry import CartesianDomain - >>> import torch - >>> - >>> class InverseODE(SpatialProblem, InverseProblem): - >>> - >>> output_variables = ['u'] - >>> spatial_domain = CartesianDomain({'x': [0, 1]}) - >>> unknown_parameter_domain = CartesianDomain({'alpha': [1, 10]}) - >>> - >>> def ode_equation(input_, output_, params_): - >>> u_x = grad(output_, input_, components=['u'], d=['x']) - >>> u = output_.extract(['u']) - >>> return params_.extract(['alpha']) * u_x - u - >>> - >>> def solution_data(input_, output_): - >>> x = input_.extract(['x']) - >>> solution = torch.exp(x) - >>> return output_ - solution - >>> - >>> conditions = { - >>> 'x0': Condition(CartesianDomain({'x': 0}), FixedValue(1.0)), - >>> 'D': Condition(CartesianDomain({'x': [0, 1]}), ParametricEquation(ode_equation)), - >>> 'data': Condition(CartesianDomain({'x': [0, 1]}), Equation(solution_data)) + TODO """ def __init__(self): super().__init__() # storing unknown_parameters for optimization self.unknown_parameters = {} - for i, var in enumerate(self.unknown_variables): + for var in self.unknown_variables: range_var = self.unknown_parameter_domain.range_[var] tensor_var = ( torch.rand(1, requires_grad=True) * range_var[1] + range_var[0] @@ -61,7 +35,6 @@ def unknown_parameter_domain(self): """ The parameters' domain of the problem. """ - pass @property def unknown_variables(self): diff --git a/pina/problem/parametric_problem.py b/pina/problem/parametric_problem.py index 3710175c7..e12c42ef1 100644 --- a/pina/problem/parametric_problem.py +++ b/pina/problem/parametric_problem.py @@ -15,29 +15,7 @@ class ParametricProblem(AbstractProblem): derivative term. :Example: - >>> from pina.problem import SpatialProblem, ParametricProblem - >>> from pina.operator import grad - >>> from pina.equations import Equation, FixedValue - >>> from pina import Condition - >>> from pina.geometry import CartesianDomain - >>> import torch - >>> - >>> - >>> class ParametricODE(SpatialProblem, ParametricProblem): - >>> - >>> output_variables = ['u'] - >>> spatial_domain = CartesianDomain({'x': [0, 1]}) - >>> parameter_domain = CartesianDomain({'alpha': [1, 10]}) - >>> - >>> def ode_equation(input_, output_): - >>> u_x = grad(output_, input_, components=['u'], d=['x']) - >>> u = output_.extract(['u']) - >>> alpha = input_.extract(['alpha']) - >>> return alpha * u_x - u - >>> - >>> conditions = { - >>> 'x0': Condition(CartesianDomain({'x': 0, 'alpha':[1, 10]}), FixedValue(1.)), - >>> 'D': Condition(CartesianDomain({'x': [0, 1], 'alpha':[1, 10]}), Equation(ode_equation))} + TODO """ @abstractmethod @@ -45,7 +23,6 @@ def parameter_domain(self): """ The parameters' domain of the problem. """ - pass @property def parameters(self): diff --git a/pina/problem/spatial_problem.py b/pina/problem/spatial_problem.py index 61962667c..1e5434e65 100644 --- a/pina/problem/spatial_problem.py +++ b/pina/problem/spatial_problem.py @@ -13,27 +13,7 @@ class SpatialProblem(AbstractProblem): Here's an example of a spatial 1-dimensional ODE problem. :Example: - >>> from pina.problem import SpatialProblem - >>> from pina.operator import grad - >>> from pina.equation import Equation, FixedValue - >>> from pina import Condition - >>> from pina.geometry import CartesianDomain - >>> import torch - >>> - >>> - >>> class SpatialODE(SpatialProblem: - >>> - >>> output_variables = ['u'] - >>> spatial_domain = CartesianDomain({'x': [0, 1]}) - >>> - >>> def ode_equation(input_, output_): - >>> u_x = grad(output_, input_, components=['u'], d=['x']) - >>> u = output_.extract(['u']) - >>> return u_x - u - >>> - >>> conditions = { - >>> 'x0': Condition(CartesianDomain({'x': 0, 'alpha':[1, 10]}), FixedValue(1.)), - >>> 'D': Condition(CartesianDomain({'x': [0, 1], 'alpha':[1, 10]}), Equation(ode_equation))} + TODO """ @abstractmethod @@ -41,7 +21,6 @@ def spatial_domain(self): """ The spatial domain of the problem. """ - pass @property def spatial_variables(self): diff --git a/pina/problem/time_dependent_problem.py b/pina/problem/time_dependent_problem.py index 3aaa945ab..3d06b689b 100644 --- a/pina/problem/time_dependent_problem.py +++ b/pina/problem/time_dependent_problem.py @@ -13,37 +13,7 @@ class TimeDependentProblem(AbstractProblem): Here's an example of a 1D wave problem. :Example: - >>> from pina.problem import SpatialProblem, TimeDependentProblem - >>> from pina.operator import grad, laplacian - >>> from pina.equation import Equation, FixedValue - >>> from pina import Condition - >>> from pina.geometry import CartesianDomain - >>> import torch - >>> - >>> - >>> class Wave(TimeDependentSpatialProblem): - >>> - >>> output_variables = ['u'] - >>> spatial_domain = CartesianDomain({'x': [0, 3]}) - >>> temporal_domain = CartesianDomain({'t': [0, 1]}) - >>> - >>> def wave_equation(input_, output_): - >>> u_t = grad(output_, input_, components=['u'], d=['t']) - >>> u_tt = grad(u_t, input_, components=['dudt'], d=['t']) - >>> delta_u = laplacian(output_, input_, components=['u'], d=['x']) - >>> return delta_u - u_tt - >>> - >>> def initial_condition(input_, output_): - >>> u_expected = (-3*torch.sin(2*torch.pi*input_.extract(['x'])) - >>> + 5*torch.sin(8/3*torch.pi*input_.extract(['x']))) - >>> u = output_.extract(['u']) - >>> return u - u_expected - >>> - >>> conditions = { - >>> 't0': Condition(CartesianDomain({'x': [0, 3], 't':0}), Equation(initial_condition)), - >>> 'gamma1': Condition(CartesianDomain({'x':0, 't':[0, 1]}), FixedValue(0.)), - >>> 'gamma2': Condition(CartesianDomain({'x':3, 't':[0, 1]}), FixedValue(0.)), - >>> 'D': Condition(CartesianDomain({'x': [0, 3], 't':[0, 1]}), Equation(wave_equation))} + TODO """ @abstractmethod @@ -51,7 +21,6 @@ def temporal_domain(self): """ The temporal domain of the problem. """ - pass @property def temporal_variable(self): diff --git a/pina/problem/zoo/__init__.py b/pina/problem/zoo/__init__.py index b10c0fbe0..c18d649d7 100644 --- a/pina/problem/zoo/__init__.py +++ b/pina/problem/zoo/__init__.py @@ -1,3 +1,5 @@ +"""TODO""" + __all__ = [ "Poisson2DSquareProblem", "SupervisedProblem", diff --git a/pina/problem/zoo/poisson_2d_square.py b/pina/problem/zoo/poisson_2d_square.py index 89d9ee391..e65beb5bd 100644 --- a/pina/problem/zoo/poisson_2d_square.py +++ b/pina/problem/zoo/poisson_2d_square.py @@ -1,12 +1,12 @@ """Definition of the Poisson problem on a square domain.""" -from pina.problem import SpatialProblem -from pina.operator import laplacian -from pina import Condition -from pina.domain import CartesianDomain -from pina.equation.equation import Equation -from pina.equation.equation_factory import FixedValue import torch +from ..spatial_problem import SpatialProblem +from ...operator import laplacian +from ... import Condition +from ...domain import CartesianDomain +from ...equation.equation import Equation +from ...equation.equation_factory import FixedValue def laplace_equation(input_, output_): @@ -48,6 +48,8 @@ class Poisson2DSquareProblem(SpatialProblem): } def poisson_sol(self, pts): + """TODO""" + return -( torch.sin(pts.extract(["x"]) * torch.pi) * torch.sin(pts.extract(["y"]) * torch.pi) diff --git a/pina/problem/zoo/supervised_problem.py b/pina/problem/zoo/supervised_problem.py index b45bc91c5..1d4654945 100644 --- a/pina/problem/zoo/supervised_problem.py +++ b/pina/problem/zoo/supervised_problem.py @@ -1,14 +1,17 @@ -from pina.problem import AbstractProblem -from pina import Condition -from pina import Graph +"""TODO""" + +from ..abstract_problem import AbstractProblem +from ... import Condition +from ... import Graph class SupervisedProblem(AbstractProblem): """ A problem definition for supervised learning in PINA. - This class allows an easy and straightforward definition of a Supervised problem, - based on a single condition of type `InputTargetCondition` + This class allows an easy and straightforward definition of a + Supervised problem, based on a single condition of type + `InputTargetCondition` :Example: >>> import torch @@ -17,7 +20,7 @@ class SupervisedProblem(AbstractProblem): >>> problem = SupervisedProblem(input_data, output_data) """ - conditions = dict() + conditions = {} output_variables = None def __init__(self, input_, output_): diff --git a/pina/solver/__init__.py b/pina/solver/__init__.py index d3f515e3c..7a10cf9fa 100644 --- a/pina/solver/__init__.py +++ b/pina/solver/__init__.py @@ -1,3 +1,7 @@ +""" +TODO +""" + __all__ = [ "SolverInterface", "SingleSolverInterface", diff --git a/pina/solver/garom.py b/pina/solver/garom.py index 8a6f5ff46..d023cf890 100644 --- a/pina/solver/garom.py +++ b/pina/solver/garom.py @@ -1,14 +1,11 @@ """Module for GAROM""" import torch - +from torch.nn.modules.loss import _Loss from .solver import MultiSolverInterface -from ..utils import check_consistency -from ..loss.loss_interface import LossInterface from ..condition import InputTargetCondition from ..utils import check_consistency from ..loss import LossInterface, PowerLoss -from torch.nn.modules.loss import _Loss class GAROM(MultiSolverInterface): @@ -60,18 +57,22 @@ def __init__( rate scheduler for the generator. :param Scheduler scheduler_discriminator: Learning rate scheduler for the discriminator. - :param dict scheduler_discriminator_kwargs: LR scheduler constructor keyword args. - :param gamma: Ratio of expected loss for generator and discriminator, defaults to 0.3. + :param dict scheduler_discriminator_kwargs: LR scheduler constructor + keyword args. + :param gamma: Ratio of expected loss for generator and discriminator, + defaults to 0.3. :type gamma: float - :param lambda_k: Learning rate for control theory optimization, defaults to 0.001. + :param lambda_k: Learning rate for control theory optimization, + defaults to 0.001. :type lambda_k: float - :param regularizer: Regularization term in the GAROM loss, defaults to False. + :param regularizer: Regularization term in the GAROM loss, + defaults to False. :type regularizer: bool .. warning:: - The algorithm works only for data-driven model. Hence in the ``problem`` definition - the codition must only contain ``input`` (e.g. coefficient parameters, time - parameters), and ``target``. + The algorithm works only for data-driven model. Hence in the + ``problem`` definition the codition must only contain ``input`` + (e.g. coefficient parameters, time parameters), and ``target``. """ # set loss @@ -118,9 +119,11 @@ def forward(self, x, mc_steps=20, variance=False): :param mc_steps: Number of montecarlo samples to approximate the expected value, defaults to 20. :type mc_steps: int - :param variance: Returining also the sample variance of the solution, defaults to False. + :param variance: Returining also the sample variance of the solution, + defaults to False. :type variance: bool - :return: The expected value of the generator distribution. If ``variance=True`` also the + :return: The expected value of the generator distribution. If + ``variance=True`` also the sample variance is returned. :rtype: torch.Tensor | tuple(torch.Tensor, torch.Tensor) """ @@ -139,6 +142,7 @@ def forward(self, x, mc_steps=20, variance=False): return mean def sample(self, x): + """TODO""" # sampling return self.generator(x) @@ -285,24 +289,30 @@ def test_step(self, batch): @property def generator(self): + """TODO""" return self.models[0] @property def discriminator(self): + """TODO""" return self.models[1] @property def optimizer_generator(self): + """TODO""" return self.optimizers[0].instance @property def optimizer_discriminator(self): + """TODO""" return self.optimizers[1].instance @property def scheduler_generator(self): + """TODO""" return self.schedulers[0].instance @property def scheduler_discriminator(self): + """TODO""" return self.schedulers[1].instance diff --git a/pina/solver/physic_informed_solver/__init__.py b/pina/solver/physic_informed_solver/__init__.py index f17d5ad78..ce14f85fa 100644 --- a/pina/solver/physic_informed_solver/__init__.py +++ b/pina/solver/physic_informed_solver/__init__.py @@ -1,3 +1,5 @@ +"""TODO""" + __all__ = [ "PINNInterface", "PINN", diff --git a/pina/solver/physic_informed_solver/causal_pinn.py b/pina/solver/physic_informed_solver/causal_pinn.py index 498472215..36e24a06d 100644 --- a/pina/solver/physic_informed_solver/causal_pinn.py +++ b/pina/solver/physic_informed_solver/causal_pinn.py @@ -2,9 +2,9 @@ import torch -from pina.problem import TimeDependentProblem +from ...problem import TimeDependentProblem from .pinn import PINN -from pina.utils import check_consistency +from ...utils import check_consistency class CausalPINN(PINN): diff --git a/pina/solver/physic_informed_solver/competitive_pinn.py b/pina/solver/physic_informed_solver/competitive_pinn.py index 8eddef8ca..0073ad905 100644 --- a/pina/solver/physic_informed_solver/competitive_pinn.py +++ b/pina/solver/physic_informed_solver/competitive_pinn.py @@ -1,7 +1,7 @@ """Module for Competitive PINN.""" -import torch import copy +import torch from ...problem import InverseProblem from .pinn_interface import PINNInterface diff --git a/pina/solver/physic_informed_solver/gradient_pinn.py b/pina/solver/physic_informed_solver/gradient_pinn.py index cad5bced1..22ebb2f17 100644 --- a/pina/solver/physic_informed_solver/gradient_pinn.py +++ b/pina/solver/physic_informed_solver/gradient_pinn.py @@ -3,8 +3,8 @@ import torch from .pinn import PINN -from pina.operator import grad -from pina.problem import SpatialProblem +from ...operator import grad +from ...problem import SpatialProblem class GradientPINN(PINN): @@ -32,7 +32,7 @@ class GradientPINN(PINN): \mathcal{L}_{\rm{problem}} =& \frac{1}{N}\sum_{i=1}^N \mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) + \frac{1}{N}\sum_{i=1}^N - \mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i)) + \\ + \mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i)) + &\frac{1}{N}\sum_{i=1}^N \nabla_{\mathbf{x}}\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) + \frac{1}{N}\sum_{i=1}^N diff --git a/pina/solver/physic_informed_solver/pinn_interface.py b/pina/solver/physic_informed_solver/pinn_interface.py index d478c6340..f31e80c00 100644 --- a/pina/solver/physic_informed_solver/pinn_interface.py +++ b/pina/solver/physic_informed_solver/pinn_interface.py @@ -106,7 +106,6 @@ def loss_phys(self, samples, equation): samples and equation. :rtype: LabelTensor """ - pass def compute_residual(self, samples, equation): """ diff --git a/pina/solver/physic_informed_solver/self_adaptive_pinn.py b/pina/solver/physic_informed_solver/self_adaptive_pinn.py index 4e919b558..2a0208e9c 100644 --- a/pina/solver/physic_informed_solver/self_adaptive_pinn.py +++ b/pina/solver/physic_informed_solver/self_adaptive_pinn.py @@ -1,10 +1,10 @@ """Module for Self-Adaptive PINN.""" -import torch from copy import deepcopy +import torch -from pina.utils import check_consistency -from pina.problem import InverseProblem +from ...utils import check_consistency +from ...problem import InverseProblem from ..solver import MultiSolverInterface from .pinn_interface import PINNInterface @@ -155,7 +155,7 @@ def __init__( self._vectorial_loss.reduction = "none" def forward(self, x): - """ + r""" Forward pass implementation for the PINN solver. It returns the function evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points diff --git a/pina/solver/reduced_order_model.py b/pina/solver/reduced_order_model.py index 1cbce1ae7..c80556b31 100644 --- a/pina/solver/reduced_order_model.py +++ b/pina/solver/reduced_order_model.py @@ -2,7 +2,7 @@ import torch -from . import SupervisedSolver +from .supervised import SupervisedSolver class ReducedOrderModelSolver(SupervisedSolver): @@ -39,7 +39,8 @@ class ReducedOrderModelSolver(SupervisedSolver): \mathcal{D}_{\rm{net}}[\mathcal{E}_{\rm{net}}[\mathbf{u}(\mu_i)]] - \mathbf{u}(\mu_i)) - where :math:`\mathcal{L}` is a specific loss function, default Mean Square Error: + where :math:`\mathcal{L}` is a specific loss function, default + Mean Square Error: .. math:: \mathcal{L}(v) = \| v \|^2_2. diff --git a/pina/solver/solver.py b/pina/solver/solver.py index 3509b3412..f671a7ebc 100644 --- a/pina/solver/solver.py +++ b/pina/solver/solver.py @@ -1,16 +1,15 @@ """Solver module.""" +from abc import ABCMeta, abstractmethod import lightning import torch -import sys -from abc import ABCMeta, abstractmethod +from torch._dynamo.eval_frame import OptimizedModule from ..problem import AbstractProblem from ..optim import Optimizer, Scheduler, TorchOptimizer, TorchScheduler from ..loss import WeightingInterface from ..loss.scalar_weighting import _NoWeighting from ..utils import check_consistency, labelize_forward -from torch._dynamo.eval_frame import OptimizedModule class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta): @@ -119,6 +118,10 @@ def test_step(self, batch): self.store_log("test_loss", loss, self.get_batch_size(batch)) def store_log(self, name, value, batch_size): + """ + TODO + """ + self.log( name=name, value=value, @@ -128,7 +131,9 @@ def store_log(self, name, value, batch_size): @abstractmethod def forward(self, *args, **kwargs): - pass + """ + TODO + """ @abstractmethod def optimization_cycle(self, batch): @@ -144,7 +149,6 @@ def optimization_cycle(self, batch): containing the condition name and the associated scalar loss. :rtype: dict(torch.Tensor) """ - pass @property def problem(self): @@ -169,7 +173,10 @@ def weighting(self): @staticmethod def get_batch_size(batch): - # assuming batch is a custom Batch object + """ + TODO + """ + batch_size = 0 for data in batch: batch_size += len(data[1]["input"]) @@ -177,10 +184,18 @@ def get_batch_size(batch): @staticmethod def default_torch_optimizer(): + """ + TODO + """ + return TorchOptimizer(torch.optim.Adam, lr=0.001) @staticmethod def default_torch_scheduler(): + """ + TODO + """ + return TorchScheduler(torch.optim.lr_scheduler.ConstantLR) def on_train_start(self): @@ -202,6 +217,10 @@ def on_test_start(self): self._compile_model() def _check_already_compiled(self): + """ + TODO + """ + models = self._pina_models if len(models) == 1 and isinstance( self._pina_models[0], torch.nn.ModuleDict @@ -214,6 +233,10 @@ def _check_already_compiled(self): @staticmethod def _perform_compilation(model): + """ + TODO + """ + model_device = next(model.parameters()).device try: if model_device == torch.device("mps:0"): @@ -225,7 +248,9 @@ def _perform_compilation(model): return model -class SingleSolverInterface(SolverInterface): +class SingleSolverInterface(SolverInterface, metaclass=ABCMeta): + """TODO""" + def __init__( self, problem, @@ -322,7 +347,7 @@ def optimizer(self): return self._pina_optimizers[0] -class MultiSolverInterface(SolverInterface): +class MultiSolverInterface(SolverInterface, metaclass=ABCMeta): """ Multiple Solver base class. This class inherits is a wrapper of SolverInterface class diff --git a/pina/solvers/__init__.py b/pina/solvers/__init__.py index b7373a39f..aaa44b9b0 100644 --- a/pina/solvers/__init__.py +++ b/pina/solvers/__init__.py @@ -1,3 +1,7 @@ +""" +Old module for solvers. Deprecated in 0.2.0 . +""" + import warnings from ..solver import * @@ -8,7 +12,7 @@ warnings.formatwarning = custom_warning_format warnings.filterwarnings("always", category=DeprecationWarning) warnings.warn( - f"'pina.solvers' is deprecated and will be removed " - f"in future versions. Please use 'pina.solver' instead.", + "'pina.solvers' is deprecated and will be removed " + "in future versions. Please use 'pina.solver' instead.", DeprecationWarning, ) diff --git a/pina/solvers/pinns/__init__.py b/pina/solvers/pinns/__init__.py index 78184b022..c9012bbc2 100644 --- a/pina/solvers/pinns/__init__.py +++ b/pina/solvers/pinns/__init__.py @@ -1,3 +1,7 @@ +""" +Old module for the PINNs solver. Deprecated in 0.2.0. +""" + import warnings from ...solver.physic_informed_solver import * diff --git a/pina/trainer.py b/pina/trainer.py index 41c41937c..81abfbd17 100644 --- a/pina/trainer.py +++ b/pina/trainer.py @@ -9,15 +9,18 @@ class Trainer(lightning.pytorch.Trainer): + """ + PINA custom Trainer class which allows to customize standard Lightning + Trainer class for PINNs training. + """ def __init__( self, solver, batch_size=None, - train_size=0.7, - test_size=0.2, - val_size=0.1, - predict_size=0.0, + train_size=1.0, + test_size=0.0, + val_size=0.0, compile=None, automatic_batching=None, num_workers=None, @@ -26,7 +29,8 @@ def __init__( **kwargs, ): """ - PINA Trainer class for costumizing every aspect of training via flags. + Initialize the Trainer class for by calling Lightning costructor and + adding many other functionalities. :param solver: A pina:class:`SolverInterface` solver for the differential problem. @@ -41,8 +45,6 @@ def __init__( :type test_size: float :param val_size: Percentage of elements in the val dataset. :type val_size: float - :param predict_size: Percentage of elements in the predict dataset. - :type predict_size: float :param compile: if True model is compiled before training, default False. For Windows users compilation is always disabled. :type compile: bool @@ -62,43 +64,23 @@ def __init__( :Keyword Arguments: The additional keyword arguments specify the training setup and can be choosen from the `pytorch-lightning - Trainer API `_ + Trainer API `_ """ # check consistency for init types - check_consistency(solver, SolverInterface) - check_consistency(train_size, float) - check_consistency(test_size, float) - check_consistency(val_size, float) - check_consistency(predict_size, float) - if automatic_batching is not None: - check_consistency(automatic_batching, bool) - if compile is not None: - check_consistency(compile, bool) - if pin_memory is not None: - check_consistency(pin_memory, bool) - else: - pin_memory = False - if num_workers is not None: - check_consistency(pin_memory, int) - else: - num_workers = 0 - if shuffle is not None: - check_consistency(shuffle, bool) - else: - shuffle = True - if train_size + test_size + val_size + predict_size > 1: - raise ValueError( - "train_size, test_size, val_size and predict_size " - "must sum up to 1." + self._check_input_consistency( + solver, + train_size, + test_size, + val_size, + automatic_batching, + compile, + ) + pin_memory, num_workers, shuffle, batch_size = ( + self._check_consistency_and_set_defaults( + pin_memory, num_workers, shuffle, batch_size ) - for size in [train_size, test_size, val_size, predict_size]: - if size < 0 or size > 1: - raise ValueError( - "splitting sizes for train, validation, test " - "and prediction must be between [0, 1]." - ) - if batch_size is not None: - check_consistency(batch_size, int) + ) # inference mode set to false when validating/testing PINNs otherwise # gradient is not tracked and optimization_cycle fails @@ -125,6 +107,7 @@ def __init__( self.automatic_batching = ( automatic_batching if automatic_batching is not None else False ) + # set attributes self.compile = compile self.solver = solver @@ -135,7 +118,6 @@ def __init__( train_size, test_size, val_size, - predict_size, batch_size, automatic_batching, pin_memory, @@ -171,7 +153,6 @@ def _create_datamodule( train_size, test_size, val_size, - predict_size, batch_size, automatic_batching, pin_memory, @@ -187,7 +168,8 @@ def _create_datamodule( error_message = "\n".join( [ f"""{" " * 13} ---> Domain {key} { - "sampled" if key in self.solver.problem.discretised_domains else + "sampled" if key in self.solver.problem.discretised_domains + else "not sampled"}""" for key in self.solver.problem.domains.keys() ] @@ -202,7 +184,6 @@ def _create_datamodule( train_size=train_size, test_size=test_size, val_size=val_size, - predict_size=predict_size, batch_size=batch_size, automatic_batching=automatic_batching, num_workers=num_workers, @@ -232,3 +213,44 @@ def solver(self): @solver.setter def solver(self, solver): self._solver = solver + + @staticmethod + def _check_input_consistency( + solver, train_size, test_size, val_size, automatic_batching, compile + ): + """ + Check the consistency of the input parameters." + """ + + check_consistency(solver, SolverInterface) + check_consistency(train_size, float) + check_consistency(test_size, float) + check_consistency(val_size, float) + if automatic_batching is not None: + check_consistency(automatic_batching, bool) + if compile is not None: + check_consistency(compile, bool) + + @staticmethod + def _check_consistency_and_set_defaults( + pin_memory, num_workers, shuffle, batch_size + ): + """ + Check the consistency of the input parameters and set the default + values. + """ + if pin_memory is not None: + check_consistency(pin_memory, bool) + else: + pin_memory = False + if num_workers is not None: + check_consistency(pin_memory, int) + else: + num_workers = 0 + if shuffle is not None: + check_consistency(shuffle, bool) + else: + shuffle = True + if batch_size is not None: + check_consistency(batch_size, int) + return pin_memory, num_workers, shuffle, batch_size diff --git a/pina/utils.py b/pina/utils.py index c4cb8760b..529c98b67 100644 --- a/pina/utils.py +++ b/pina/utils.py @@ -1,26 +1,40 @@ """Utils module.""" import types +from functools import reduce import torch -from functools import reduce from .label_tensor import LabelTensor +# Codacy error unused parameters def custom_warning_format( message, category, filename, lineno, file=None, line=None ): + """ + Depewarning custom format. + + :param str message: The warning message. + :param class category: The warning category. + :param str filename: The filename where the warning was raised. + :param int lineno: The line number where the warning was raised. + :param str file: The file object where the warning was raised. + :param inr line: The line where the warning was raised. + :return: The formatted warning message. + :rtype: str + """ return f"{filename}: {category.__name__}: {message}\n" -def check_consistency(object, object_instance, subclass=False): +def check_consistency(object_, object_instance, subclass=False): """Helper function to check object inheritance consistency. Given a specific ``'object'`` we check if the object is instance of a specific ``'object_instance'``, or in case ``'subclass=True'`` we check if the object is subclass if the ``'object_instance'``. - :param (iterable or class object) object: The object to check the inheritance + :param (iterable or class object) object: The object to check the + inheritance :param Object object_instance: The parent class from where the object is expected to inherit :param str object_name: The name of the object @@ -28,17 +42,19 @@ def check_consistency(object, object_instance, subclass=False): :raises ValueError: If the object does not inherit from the specified class """ - if not isinstance(object, (list, set, tuple)): - object = [object] + if not isinstance(object_, (list, set, tuple)): + object_ = [object_] - for obj in object: + for obj in object_: try: if not subclass: assert isinstance(obj, object_instance) else: assert issubclass(obj, object_instance) - except AssertionError: - raise ValueError(f"{type(obj).__name__} must be {object_instance}.") + except AssertionError as e: + raise ValueError( + f"{type(obj).__name__} must be {object_instance}." + ) from e def labelize_forward(forward, input_variables, output_variables): @@ -67,12 +83,14 @@ def wrapper(x): def merge_tensors(tensors): # name to be changed + """TODO""" if tensors: return reduce(merge_two_tensors, tensors[1:], tensors[0]) raise ValueError("Expected at least one tensor") def merge_two_tensors(tensor1, tensor2): + """TODO""" n1 = tensor1.shape[0] n2 = tensor2.shape[0] @@ -125,7 +143,7 @@ def is_function(f): :return: `True` if `f` is a function, `False` otherwise. :rtype: bool """ - return type(f) == types.FunctionType or type(f) == types.LambdaType + return isinstance(f, (types.FunctionType, types.LambdaType)) def chebyshev_roots(n): diff --git a/tests/test_adaptive_function.py b/tests/test_adaptive_function.py index ba18d2e92..bce5059d7 100644 --- a/tests/test_adaptive_function.py +++ b/tests/test_adaptive_function.py @@ -1,62 +1,85 @@ import torch import pytest -from pina.adaptive_function import (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh, - AdaptiveSiLU, AdaptiveMish, AdaptiveELU, - AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin, - AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp) +from pina.adaptive_function import ( + AdaptiveReLU, + AdaptiveSigmoid, + AdaptiveTanh, + AdaptiveSiLU, + AdaptiveMish, + AdaptiveELU, + AdaptiveCELU, + AdaptiveGELU, + AdaptiveSoftmin, + AdaptiveSoftmax, + AdaptiveSIREN, + AdaptiveExp, +) -adaptive_function = (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh, - AdaptiveSiLU, AdaptiveMish, AdaptiveELU, - AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin, - AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp) +adaptive_function = ( + AdaptiveReLU, + AdaptiveSigmoid, + AdaptiveTanh, + AdaptiveSiLU, + AdaptiveMish, + AdaptiveELU, + AdaptiveCELU, + AdaptiveGELU, + AdaptiveSoftmin, + AdaptiveSoftmax, + AdaptiveSIREN, + AdaptiveExp, +) x = torch.rand(10, requires_grad=True) + @pytest.mark.parametrize("Func", adaptive_function) def test_constructor(Func): - if Func.__name__ == 'AdaptiveExp': + if Func.__name__ == "AdaptiveExp": # simple Func() # setting values - af = Func(alpha=1., beta=2.) + af = Func(alpha=1.0, beta=2.0) assert af.alpha.requires_grad assert af.beta.requires_grad - assert af.alpha == 1. - assert af.beta == 2. + assert af.alpha == 1.0 + assert af.beta == 2.0 else: # simple Func() # setting values - af = Func(alpha=1., beta=2., gamma=3.) + af = Func(alpha=1.0, beta=2.0, gamma=3.0) assert af.alpha.requires_grad assert af.beta.requires_grad assert af.gamma.requires_grad - assert af.alpha == 1. - assert af.beta == 2. - assert af.gamma == 3. + assert af.alpha == 1.0 + assert af.beta == 2.0 + assert af.gamma == 3.0 # fixed variables - af = Func(alpha=1., beta=2., fixed=['alpha']) + af = Func(alpha=1.0, beta=2.0, fixed=["alpha"]) assert af.alpha.requires_grad is False assert af.beta.requires_grad - assert af.alpha == 1. - assert af.beta == 2. + assert af.alpha == 1.0 + assert af.beta == 2.0 with pytest.raises(TypeError): - Func(alpha=1., beta=2., fixed=['delta']) + Func(alpha=1.0, beta=2.0, fixed=["delta"]) with pytest.raises(ValueError): - Func(alpha='s') + Func(alpha="s") Func(alpha=1) -@pytest.mark.parametrize("Func", adaptive_function) + +@pytest.mark.parametrize("Func", adaptive_function) def test_forward(Func): af = Func() af(x) + @pytest.mark.parametrize("Func", adaptive_function) def test_backward(Func): af = Func() y = af(x) - y.mean().backward() \ No newline at end of file + y.mean().backward() diff --git a/tests/test_blocks/test_convolution.py b/tests/test_blocks/test_convolution.py index fd8a70aa3..f8206196f 100644 --- a/tests/test_blocks/test_convolution.py +++ b/tests/test_blocks/test_convolution.py @@ -18,8 +18,8 @@ def _transform_image(image): # initializing transfomed image coordinates = torch.zeros( - [channels, prod(dimension), - len(dimension) + 1]).to(image.device) + [channels, prod(dimension), len(dimension) + 1] + ).to(image.device) # creating the n dimensional mesh grid values_mesh = [ @@ -43,9 +43,13 @@ class MLP(torch.nn.Module): def __init__(self) -> None: super().__init__() - self.model = torch.nn.Sequential(torch.nn.Linear(2, 8), torch.nn.ReLU(), - torch.nn.Linear(8, 8), torch.nn.ReLU(), - torch.nn.Linear(8, 1)) + self.model = torch.nn.Sequential( + torch.nn.Linear(2, 8), + torch.nn.ReLU(), + torch.nn.Linear(8, 8), + torch.nn.ReLU(), + torch.nn.Linear(8, 1), + ) def forward(self, x): return self.model(x) @@ -61,7 +65,7 @@ def forward(self, x): "domain": [10, 10], "start": [0, 0], "jumps": [3, 3], - "direction": [1, 1.] + "direction": [1, 1.0], } dim_filter = len(dim) dim_input = (batch, channel_input, 10, dim_filter) @@ -73,53 +77,42 @@ def forward(self, x): def test_constructor(): model = MLP - conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model) - conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=None) + conv = ContinuousConvBlock( + channel_input, channel_output, dim, stride, model=model + ) + conv = ContinuousConvBlock( + channel_input, channel_output, dim, stride, model=None + ) def test_forward(): model = MLP # simple forward - conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model) + conv = ContinuousConvBlock( + channel_input, channel_output, dim, stride, model=model + ) conv(x) # simple forward with optimization - conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model, - optimize=True) + conv = ContinuousConvBlock( + channel_input, channel_output, dim, stride, model=model, optimize=True + ) conv(x) def test_backward(): model = MLP - + x = torch.rand(dim_input) x = make_grid(x) x.requires_grad = True # simple backward - conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model) + conv = ContinuousConvBlock( + channel_input, channel_output, dim, stride, model=model + ) conv(x) - l=torch.mean(conv(x)) + l = torch.mean(conv(x)) l.backward() assert x._grad.shape == torch.Size([2, 2, 20, 3]) x = torch.rand(dim_input) @@ -127,14 +120,11 @@ def test_backward(): x.requires_grad = True # simple backward with optimization - conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model, - optimize=True) + conv = ContinuousConvBlock( + channel_input, channel_output, dim, stride, model=model, optimize=True + ) conv(x) - l=torch.mean(conv(x)) + l = torch.mean(conv(x)) l.backward() assert x._grad.shape == torch.Size([2, 2, 20, 3]) @@ -143,17 +133,13 @@ def test_transpose(): model = MLP # simple transpose - conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model) - - conv2 = ContinuousConvBlock(channel_output, - channel_input, - dim, - stride, - model=model) + conv = ContinuousConvBlock( + channel_input, channel_output, dim, stride, model=model + ) + + conv2 = ContinuousConvBlock( + channel_output, channel_input, dim, stride, model=model + ) integrals = conv(x) conv2.transpose(integrals[..., -1], x) diff --git a/tests/test_blocks/test_embedding.py b/tests/test_blocks/test_embedding.py index db10695a3..e8fa6ebce 100644 --- a/tests/test_blocks/test_embedding.py +++ b/tests/test_blocks/test_embedding.py @@ -6,55 +6,66 @@ # test tolerance tol = 1e-6 + def check_same_columns(tensor): # Get the first column and compute residual residual = tensor - tensor[0] zeros = torch.zeros_like(residual) # Compare each column with the first column - all_same = torch.allclose(input=residual,other=zeros,atol=tol) + all_same = torch.allclose(input=residual, other=zeros, atol=tol) return all_same + def grad(u, x): """ Compute the first derivative of u with respect to x. """ - return torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u), - create_graph=True, allow_unused=True, - retain_graph=True)[0] + return torch.autograd.grad( + u, + x, + grad_outputs=torch.ones_like(u), + create_graph=True, + allow_unused=True, + retain_graph=True, + )[0] + def test_constructor_PeriodicBoundaryEmbedding(): PeriodicBoundaryEmbedding(input_dimension=1, periods=2) - PeriodicBoundaryEmbedding(input_dimension=1, periods={'x': 3, 'y' : 4}) - PeriodicBoundaryEmbedding(input_dimension=1, periods={0: 3, 1 : 4}) + PeriodicBoundaryEmbedding(input_dimension=1, periods={"x": 3, "y": 4}) + PeriodicBoundaryEmbedding(input_dimension=1, periods={0: 3, 1: 4}) PeriodicBoundaryEmbedding(input_dimension=1, periods=2, output_dimension=10) with pytest.raises(TypeError): PeriodicBoundaryEmbedding() with pytest.raises(ValueError): - PeriodicBoundaryEmbedding(input_dimension=1., periods=1) - PeriodicBoundaryEmbedding(input_dimension=1, periods=1, - output_dimension=1.) - PeriodicBoundaryEmbedding(input_dimension=1, periods={'x':'x'}) - PeriodicBoundaryEmbedding(input_dimension=1, periods={0:'x'}) + PeriodicBoundaryEmbedding(input_dimension=1.0, periods=1) + PeriodicBoundaryEmbedding( + input_dimension=1, periods=1, output_dimension=1.0 + ) + PeriodicBoundaryEmbedding(input_dimension=1, periods={"x": "x"}) + PeriodicBoundaryEmbedding(input_dimension=1, periods={0: "x"}) @pytest.mark.parametrize("period", [1, 4, 10]) @pytest.mark.parametrize("input_dimension", [1, 2, 3]) -def test_forward_backward_same_period_PeriodicBoundaryEmbedding(input_dimension, - period): +def test_forward_backward_same_period_PeriodicBoundaryEmbedding( + input_dimension, period +): func = torch.nn.Sequential( - PeriodicBoundaryEmbedding(input_dimension=input_dimension, - output_dimension=60, periods=period), + PeriodicBoundaryEmbedding( + input_dimension=input_dimension, output_dimension=60, periods=period + ), torch.nn.Tanh(), torch.nn.Linear(60, 60), torch.nn.Tanh(), - torch.nn.Linear(60, 1) + torch.nn.Linear(60, 1), ) # coordinates - x = period * torch.tensor([[0.],[1.]]) + x = period * torch.tensor([[0.0], [1.0]]) if input_dimension == 2: - x = torch.cartesian_prod(x.flatten(),x.flatten()) + x = torch.cartesian_prod(x.flatten(), x.flatten()) elif input_dimension == 3: - x = torch.cartesian_prod(x.flatten(),x.flatten(),x.flatten()) + x = torch.cartesian_prod(x.flatten(), x.flatten(), x.flatten()) x.requires_grad = True # output f = func(x) @@ -63,29 +74,32 @@ def test_forward_backward_same_period_PeriodicBoundaryEmbedding(input_dimension, loss = f.mean() loss.backward() + def test_constructor_FourierFeatureEmbedding(): - FourierFeatureEmbedding(input_dimension=1, output_dimension=20, - sigma=1) - with pytest.raises(TypeError): + FourierFeatureEmbedding(input_dimension=1, output_dimension=20, sigma=1) + with pytest.raises(TypeError): FourierFeatureEmbedding() - with pytest.raises(RuntimeError): + with pytest.raises(RuntimeError): FourierFeatureEmbedding(input_dimension=1, output_dimension=3, sigma=1) with pytest.raises(ValueError): - FourierFeatureEmbedding(input_dimension='x', output_dimension=20, - sigma=1) - FourierFeatureEmbedding(input_dimension=1, output_dimension='x', - sigma=1) - FourierFeatureEmbedding(input_dimension=1, output_dimension=20, - sigma='x') + FourierFeatureEmbedding( + input_dimension="x", output_dimension=20, sigma=1 + ) + FourierFeatureEmbedding( + input_dimension=1, output_dimension="x", sigma=1 + ) + FourierFeatureEmbedding( + input_dimension=1, output_dimension=20, sigma="x" + ) + @pytest.mark.parametrize("output_dimension", [2, 4, 6]) @pytest.mark.parametrize("input_dimension", [1, 2, 3]) @pytest.mark.parametrize("sigma", [10, 1, 0.1]) -def test_forward_backward_FourierFeatureEmbedding(input_dimension, - output_dimension, - sigma): - func = FourierFeatureEmbedding(input_dimension, output_dimension, - sigma) +def test_forward_backward_FourierFeatureEmbedding( + input_dimension, output_dimension, sigma +): + func = FourierFeatureEmbedding(input_dimension, output_dimension, sigma) # coordinates x = torch.rand((10, input_dimension), requires_grad=True) # output @@ -93,4 +107,4 @@ def test_forward_backward_FourierFeatureEmbedding(input_dimension, assert f.shape[-1] == output_dimension # compute backward loss = f.mean() - loss.backward() \ No newline at end of file + loss.backward() diff --git a/tests/test_blocks/test_fourier.py b/tests/test_blocks/test_fourier.py index 2ac41cd73..75265fe33 100644 --- a/tests/test_blocks/test_fourier.py +++ b/tests/test_blocks/test_fourier.py @@ -7,23 +7,29 @@ def test_constructor_1d(): - FourierBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=5) + FourierBlock1D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=5, + ) def test_forward_1d(): - sconv = FourierBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=4) + sconv = FourierBlock1D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=4, + ) x = torch.rand(batch, input_numb_fields, 10) sconv(x) def test_backward_1d(): - sconv = FourierBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=4) + sconv = FourierBlock1D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=4, + ) x = torch.rand(batch, input_numb_fields, 10) x.requires_grad = True sconv(x) @@ -33,23 +39,29 @@ def test_backward_1d(): def test_constructor_2d(): - FourierBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4]) + FourierBlock2D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4], + ) def test_forward_2d(): - sconv = FourierBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4]) + sconv = FourierBlock2D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4], + ) x = torch.rand(batch, input_numb_fields, 10, 10) sconv(x) def test_backward_2d(): - sconv = FourierBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4]) + sconv = FourierBlock2D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4], + ) x = torch.rand(batch, input_numb_fields, 10, 10) x.requires_grad = True sconv(x) @@ -59,23 +71,29 @@ def test_backward_2d(): def test_constructor_3d(): - FourierBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4, 4]) + FourierBlock3D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4, 4], + ) def test_forward_3d(): - sconv = FourierBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4, 4]) + sconv = FourierBlock3D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4, 4], + ) x = torch.rand(batch, input_numb_fields, 10, 10, 10) sconv(x) def test_backward_3d(): - sconv = FourierBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4, 4]) + sconv = FourierBlock3D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4, 4], + ) x = torch.rand(batch, input_numb_fields, 10, 10, 10) x.requires_grad = True sconv(x) diff --git a/tests/test_blocks/test_low_rank_block.py b/tests/test_blocks/test_low_rank_block.py index 70ceed057..0e6ddcb89 100644 --- a/tests/test_blocks/test_low_rank_block.py +++ b/tests/test_blocks/test_low_rank_block.py @@ -5,54 +5,66 @@ from pina import LabelTensor -input_dimensions=2 -embedding_dimenion=1 -rank=4 -inner_size=20 -n_layers=2 -func=torch.nn.Tanh -bias=True +input_dimensions = 2 +embedding_dimenion = 1 +rank = 4 +inner_size = 20 +n_layers = 2 +func = torch.nn.Tanh +bias = True + def test_constructor(): - LowRankBlock(input_dimensions=input_dimensions, - embedding_dimenion=embedding_dimenion, - rank=rank, - inner_size=inner_size, - n_layers=n_layers, - func=func, - bias=bias) - + LowRankBlock( + input_dimensions=input_dimensions, + embedding_dimenion=embedding_dimenion, + rank=rank, + inner_size=inner_size, + n_layers=n_layers, + func=func, + bias=bias, + ) + + def test_constructor_wrong(): with pytest.raises(ValueError): - LowRankBlock(input_dimensions=input_dimensions, - embedding_dimenion=embedding_dimenion, - rank=0.5, - inner_size=inner_size, - n_layers=n_layers, - func=func, - bias=bias) - + LowRankBlock( + input_dimensions=input_dimensions, + embedding_dimenion=embedding_dimenion, + rank=0.5, + inner_size=inner_size, + n_layers=n_layers, + func=func, + bias=bias, + ) + + def test_forward(): - block = LowRankBlock(input_dimensions=input_dimensions, - embedding_dimenion=embedding_dimenion, - rank=rank, - inner_size=inner_size, - n_layers=n_layers, - func=func, - bias=bias) - data = LabelTensor(torch.rand(10, 30, 3), labels=['x', 'y', 'u']) - block(data.extract('u'), data.extract(['x', 'y'])) + block = LowRankBlock( + input_dimensions=input_dimensions, + embedding_dimenion=embedding_dimenion, + rank=rank, + inner_size=inner_size, + n_layers=n_layers, + func=func, + bias=bias, + ) + data = LabelTensor(torch.rand(10, 30, 3), labels=["x", "y", "u"]) + block(data.extract("u"), data.extract(["x", "y"])) + def test_backward(): - block = LowRankBlock(input_dimensions=input_dimensions, - embedding_dimenion=embedding_dimenion, - rank=rank, - inner_size=inner_size, - n_layers=n_layers, - func=func, - bias=bias) - data = LabelTensor(torch.rand(10, 30, 3), labels=['x', 'y', 'u']) + block = LowRankBlock( + input_dimensions=input_dimensions, + embedding_dimenion=embedding_dimenion, + rank=rank, + inner_size=inner_size, + n_layers=n_layers, + func=func, + bias=bias, + ) + data = LabelTensor(torch.rand(10, 30, 3), labels=["x", "y", "u"]) data.requires_grad_(True) - out = block(data.extract('u'), data.extract(['x', 'y'])) + out = block(data.extract("u"), data.extract(["x", "y"])) loss = out.mean() - loss.backward() \ No newline at end of file + loss.backward() diff --git a/tests/test_blocks/test_orthogonal.py b/tests/test_blocks/test_orthogonal.py index ee107fecb..e222c6bb5 100644 --- a/tests/test_blocks/test_orthogonal.py +++ b/tests/test_blocks/test_orthogonal.py @@ -8,10 +8,11 @@ torch.randn(10, 3), torch.rand(100, 5), torch.randn(5, 5), - ] +] list_prohibited_matrices_dim0 = list_matrices[:-1] + @pytest.mark.parametrize("dim", [-1, 0, 1, None]) @pytest.mark.parametrize("requires_grad", [True, False, None]) def test_constructor(dim, requires_grad): @@ -29,11 +30,13 @@ def test_constructor(dim, requires_grad): if requires_grad is not None: assert block.requires_grad == requires_grad + def test_wrong_constructor(): with pytest.raises(IndexError): - OrthogonalBlock(2) + OrthogonalBlock(2) with pytest.raises(ValueError): - OrthogonalBlock('a') + OrthogonalBlock("a") + @pytest.mark.parametrize("V", list_matrices) def test_forward(V): @@ -42,7 +45,10 @@ def test_forward(V): V_orth = orth(V) V_orth_row = orth_row(V.T) assert torch.allclose(V_orth.T @ V_orth, torch.eye(V.shape[1]), atol=1e-6) - assert torch.allclose(V_orth_row @ V_orth_row.T, torch.eye(V.shape[1]), atol=1e-6) + assert torch.allclose( + V_orth_row @ V_orth_row.T, torch.eye(V.shape[1]), atol=1e-6 + ) + @pytest.mark.parametrize("V", list_matrices) def test_backward(V): @@ -51,6 +57,7 @@ def test_backward(V): loss = V_orth.mean() loss.backward() + @pytest.mark.parametrize("V", list_matrices) def test_wrong_backward(V): orth = OrthogonalBlock(requires_grad=False) @@ -59,10 +66,10 @@ def test_wrong_backward(V): with pytest.raises(RuntimeError): loss.backward() + @pytest.mark.parametrize("V", list_prohibited_matrices_dim0) def test_forward_prohibited(V): orth = OrthogonalBlock(0) with pytest.raises(Warning): V_orth = orth(V) assert V.shape[0] > V.shape[1] - diff --git a/tests/test_blocks/test_pod.py b/tests/test_blocks/test_pod.py index 7a222b99d..66a31d558 100644 --- a/tests/test_blocks/test_pod.py +++ b/tests/test_blocks/test_pod.py @@ -4,7 +4,10 @@ from pina.model.block.pod_block import PODBlock x = torch.linspace(-1, 1, 100) -toy_snapshots = torch.vstack([torch.exp(-x**2)*c for c in torch.linspace(0, 1, 10)]) +toy_snapshots = torch.vstack( + [torch.exp(-(x**2)) * c for c in torch.linspace(0, 1, 10)] +) + def test_constructor(): pod = PODBlock(2) @@ -23,6 +26,7 @@ def test_fit(rank, scale): assert pod.rank == rank assert pod.scale_coefficients == scale + @pytest.mark.parametrize("scale", [True, False]) @pytest.mark.parametrize("rank", [1, 2, 10]) def test_fit(rank, scale): @@ -33,15 +37,16 @@ def test_fit(rank, scale): assert pod.basis.shape == (rank, dof) assert pod._basis.shape == (n_snap, dof) if scale is True: - assert pod._scaler['mean'].shape == (n_snap,) - assert pod._scaler['std'].shape == (n_snap,) - assert pod.scaler['mean'].shape == (rank,) - assert pod.scaler['std'].shape == (rank,) - assert pod.scaler['mean'].shape[0] == pod.basis.shape[0] + assert pod._scaler["mean"].shape == (n_snap,) + assert pod._scaler["std"].shape == (n_snap,) + assert pod.scaler["mean"].shape == (rank,) + assert pod.scaler["std"].shape == (rank,) + assert pod.scaler["mean"].shape[0] == pod.basis.shape[0] else: assert pod._scaler == None assert pod.scaler == None + def test_forward(): pod = PODBlock(1) pod.fit(toy_snapshots) @@ -63,6 +68,7 @@ def test_forward(): torch.testing.assert_close(c.mean(dim=0), torch.zeros(pod.rank)) torch.testing.assert_close(c.std(dim=0), torch.ones(pod.rank)) + @pytest.mark.parametrize("scale", [True, False]) @pytest.mark.parametrize("rank", [1, 2, 10]) def test_expand(rank, scale): @@ -72,15 +78,16 @@ def test_expand(rank, scale): torch.testing.assert_close(pod.expand(c), toy_snapshots) torch.testing.assert_close(pod.expand(c[0]), toy_snapshots[0].unsqueeze(0)) + @pytest.mark.parametrize("scale", [True, False]) @pytest.mark.parametrize("rank", [1, 2, 10]) def test_reduce_expand(rank, scale): pod = PODBlock(rank, scale) pod.fit(toy_snapshots) torch.testing.assert_close( - pod.expand(pod.reduce(toy_snapshots)), - toy_snapshots) + pod.expand(pod.reduce(toy_snapshots)), toy_snapshots + ) torch.testing.assert_close( - pod.expand(pod.reduce(toy_snapshots[0])), - toy_snapshots[0].unsqueeze(0)) - # torch.testing.assert_close(pod.expand(pod.reduce(c[0])), c[0]) \ No newline at end of file + pod.expand(pod.reduce(toy_snapshots[0])), toy_snapshots[0].unsqueeze(0) + ) + # torch.testing.assert_close(pod.expand(pod.reduce(c[0])), c[0]) diff --git a/tests/test_blocks/test_rbf.py b/tests/test_blocks/test_rbf.py index ff33d692c..65912fb76 100644 --- a/tests/test_blocks/test_rbf.py +++ b/tests/test_blocks/test_rbf.py @@ -6,26 +6,42 @@ x = torch.linspace(-1, 1, 100) toy_params = torch.linspace(0, 1, 10).unsqueeze(1) -toy_snapshots = torch.vstack([torch.exp(-x**2)*c for c in toy_params]) +toy_snapshots = torch.vstack([torch.exp(-(x**2)) * c for c in toy_params]) toy_params_test = torch.linspace(0, 1, 3).unsqueeze(1) -toy_snapshots_test = torch.vstack([torch.exp(-x**2)*c for c in toy_params_test]) +toy_snapshots_test = torch.vstack( + [torch.exp(-(x**2)) * c for c in toy_params_test] +) -kernels = ["linear", "thin_plate_spline", "cubic", "quintic", - "multiquadric", "inverse_multiquadric", "inverse_quadratic", "gaussian"] +kernels = [ + "linear", + "thin_plate_spline", + "cubic", + "quintic", + "multiquadric", + "inverse_multiquadric", + "inverse_quadratic", + "gaussian", +] -noscale_invariant_kernels = ["multiquadric", "inverse_multiquadric", - "inverse_quadratic", "gaussian"] +noscale_invariant_kernels = [ + "multiquadric", + "inverse_multiquadric", + "inverse_quadratic", + "gaussian", +] scale_invariant_kernels = ["linear", "thin_plate_spline", "cubic", "quintic"] + def test_constructor_default(): rbf = RBFBlock() assert rbf.kernel == "thin_plate_spline" assert rbf.epsilon == 1 - assert rbf.smoothing == 0. + assert rbf.smoothing == 0.0 + @pytest.mark.parametrize("kernel", kernels) -@pytest.mark.parametrize("epsilon", [0.1, 1., 10.]) +@pytest.mark.parametrize("epsilon", [0.1, 1.0, 10.0]) def test_constructor_epsilon(kernel, epsilon): if kernel in scale_invariant_kernels: rbf = RBFBlock(kernel=kernel) @@ -38,15 +54,17 @@ def test_constructor_epsilon(kernel, epsilon): assert rbf.kernel == kernel assert rbf.epsilon == epsilon - assert rbf.smoothing == 0. + assert rbf.smoothing == 0.0 + @pytest.mark.parametrize("kernel", kernels) -@pytest.mark.parametrize("epsilon", [0.1, 1., 10.]) +@pytest.mark.parametrize("epsilon", [0.1, 1.0, 10.0]) @pytest.mark.parametrize("degree", [2, 3, 4]) @pytest.mark.parametrize("smoothing", [1e-5, 1e-3, 1e-1]) def test_constructor_all(kernel, epsilon, degree, smoothing): - rbf = RBFBlock(kernel=kernel, epsilon=epsilon, degree=degree, - smoothing=smoothing) + rbf = RBFBlock( + kernel=kernel, epsilon=epsilon, degree=degree, smoothing=smoothing + ) assert rbf.kernel == kernel assert rbf.epsilon == epsilon assert rbf.degree == degree @@ -58,16 +76,21 @@ def test_constructor_all(kernel, epsilon, degree, smoothing): assert rbf._scale == None assert rbf._coeffs == None + def test_fit(): rbf = RBFBlock() rbf.fit(toy_params, toy_snapshots) ndim = toy_params.shape[1] torch.testing.assert_close(rbf.y, toy_params) torch.testing.assert_close(rbf.d, toy_snapshots) - assert rbf.powers.shape == (math.comb(rbf.degree+ndim, ndim), ndim) + assert rbf.powers.shape == (math.comb(rbf.degree + ndim, ndim), ndim) assert rbf._shift.shape == (ndim,) assert rbf._scale.shape == (ndim,) - assert rbf._coeffs.shape == (rbf.powers.shape[0]+toy_snapshots.shape[0], toy_snapshots.shape[1]) + assert rbf._coeffs.shape == ( + rbf.powers.shape[0] + toy_snapshots.shape[0], + toy_snapshots.shape[1], + ) + def test_forward(): rbf = RBFBlock() @@ -76,10 +99,10 @@ def test_forward(): assert c.shape == toy_snapshots.shape torch.testing.assert_close(c, toy_snapshots) + def test_forward_unseen_parameters(): rbf = RBFBlock() rbf.fit(toy_params, toy_snapshots) c = rbf(toy_params_test) assert c.shape == toy_snapshots_test.shape torch.testing.assert_close(c, toy_snapshots_test) - diff --git a/tests/test_blocks/test_residual.py b/tests/test_blocks/test_residual.py index dea6eb030..37f54f27d 100644 --- a/tests/test_blocks/test_residual.py +++ b/tests/test_blocks/test_residual.py @@ -7,10 +7,9 @@ def test_constructor_residual_block(): res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4) - res_block = ResidualBlock(input_dim=10, - output_dim=3, - hidden_dim=4, - spectral_norm=True) + res_block = ResidualBlock( + input_dim=10, output_dim=3, hidden_dim=4, spectral_norm=True + ) def test_forward_residual_block(): @@ -22,8 +21,9 @@ def test_forward_residual_block(): assert y.shape[1] == 3 assert y.shape[0] == x.shape[0] + def test_backward_residual_block(): - + res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4) x = torch.rand(size=(80, 10)) @@ -31,27 +31,37 @@ def test_backward_residual_block(): y = res_block(x) l = torch.mean(y) l.backward() - assert x._grad.shape == torch.Size([80,10]) + assert x._grad.shape == torch.Size([80, 10]) + def test_constructor_no_activation_no_dropout(): linear_layer = nn.Linear(10, 20) enhanced_linear = EnhancedLinear(linear_layer) - assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + assert len(list(enhanced_linear.parameters())) == len( + list(linear_layer.parameters()) + ) + def test_constructor_with_activation_no_dropout(): linear_layer = nn.Linear(10, 20) activation = nn.ReLU() enhanced_linear = EnhancedLinear(linear_layer, activation) - assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + len(list(activation.parameters())) + assert len(list(enhanced_linear.parameters())) == len( + list(linear_layer.parameters()) + ) + len(list(activation.parameters())) + def test_constructor_no_activation_with_dropout(): linear_layer = nn.Linear(10, 20) dropout_prob = 0.5 enhanced_linear = EnhancedLinear(linear_layer, dropout=dropout_prob) - assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + assert len(list(enhanced_linear.parameters())) == len( + list(linear_layer.parameters()) + ) + def test_constructor_with_activation_with_dropout(): linear_layer = nn.Linear(10, 20) @@ -59,7 +69,10 @@ def test_constructor_with_activation_with_dropout(): dropout_prob = 0.5 enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob) - assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + len(list(activation.parameters())) + assert len(list(enhanced_linear.parameters())) == len( + list(linear_layer.parameters()) + ) + len(list(activation.parameters())) + def test_forward_enhanced_linear_no_dropout(): @@ -70,8 +83,9 @@ def test_forward_enhanced_linear_no_dropout(): assert y.shape[1] == 3 assert y.shape[0] == x.shape[0] + def test_backward_enhanced_linear_no_dropout(): - + enhanced_linear = EnhancedLinear(nn.Linear(10, 3)) x = torch.rand(size=(80, 10)) @@ -81,6 +95,7 @@ def test_backward_enhanced_linear_no_dropout(): l.backward() assert x._grad.shape == torch.Size([80, 10]) + def test_forward_enhanced_linear_dropout(): enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5) @@ -90,8 +105,9 @@ def test_forward_enhanced_linear_dropout(): assert y.shape[1] == 3 assert y.shape[0] == x.shape[0] + def test_backward_enhanced_linear_dropout(): - + enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5) x = torch.rand(size=(80, 10)) diff --git a/tests/test_blocks/test_spectral_convolution.py b/tests/test_blocks/test_spectral_convolution.py index 26447c4aa..ba4b4a8c5 100644 --- a/tests/test_blocks/test_spectral_convolution.py +++ b/tests/test_blocks/test_spectral_convolution.py @@ -1,4 +1,8 @@ -from pina.model.block import SpectralConvBlock1D, SpectralConvBlock2D, SpectralConvBlock3D +from pina.model.block import ( + SpectralConvBlock1D, + SpectralConvBlock2D, + SpectralConvBlock3D, +) import torch input_numb_fields = 3 @@ -7,78 +11,96 @@ def test_constructor_1d(): - SpectralConvBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=5) + SpectralConvBlock1D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=5, + ) def test_forward_1d(): - sconv = SpectralConvBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=4) + sconv = SpectralConvBlock1D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=4, + ) x = torch.rand(batch, input_numb_fields, 10) sconv(x) def test_backward_1d(): - sconv = SpectralConvBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=4) + sconv = SpectralConvBlock1D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=4, + ) x = torch.rand(batch, input_numb_fields, 10) x.requires_grad = True sconv(x) - l=torch.mean(sconv(x)) + l = torch.mean(sconv(x)) l.backward() - assert x._grad.shape == torch.Size([5,3,10]) + assert x._grad.shape == torch.Size([5, 3, 10]) def test_constructor_2d(): - SpectralConvBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4]) + SpectralConvBlock2D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4], + ) def test_forward_2d(): - sconv = SpectralConvBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4]) + sconv = SpectralConvBlock2D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4], + ) x = torch.rand(batch, input_numb_fields, 10, 10) sconv(x) def test_backward_2d(): - sconv = SpectralConvBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4]) + sconv = SpectralConvBlock2D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4], + ) x = torch.rand(batch, input_numb_fields, 10, 10) x.requires_grad = True sconv(x) - l=torch.mean(sconv(x)) + l = torch.mean(sconv(x)) l.backward() - assert x._grad.shape == torch.Size([5,3,10,10]) + assert x._grad.shape == torch.Size([5, 3, 10, 10]) def test_constructor_3d(): - SpectralConvBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4, 4]) + SpectralConvBlock3D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4, 4], + ) def test_forward_3d(): - sconv = SpectralConvBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4, 4]) + sconv = SpectralConvBlock3D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4, 4], + ) x = torch.rand(batch, input_numb_fields, 10, 10, 10) sconv(x) def test_backward_3d(): - sconv = SpectralConvBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4, 4]) + sconv = SpectralConvBlock3D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=[5, 4, 4], + ) x = torch.rand(batch, input_numb_fields, 10, 10, 10) x.requires_grad = True sconv(x) - l=torch.mean(sconv(x)) + l = torch.mean(sconv(x)) l.backward() - assert x._grad.shape == torch.Size([5,3,10,10,10]) + assert x._grad.shape == torch.Size([5, 3, 10, 10, 10]) diff --git a/tests/test_callback/test_adaptive_refinement_callback.py b/tests/test_callback/test_adaptive_refinement_callback.py index dacd077bb..dcabef13a 100644 --- a/tests/test_callback/test_adaptive_refinement_callback.py +++ b/tests/test_callback/test_adaptive_refinement_callback.py @@ -7,12 +7,13 @@ # make the problem poisson_problem = Poisson() -boundaries = ['g1', 'g2', 'g3', 'g4'] +boundaries = ["g1", "g2", "g3", "g4"] n = 10 -poisson_problem.discretise_domain(n, 'grid', domains=boundaries) -poisson_problem.discretise_domain(n, 'grid', domains='D') -model = FeedForward(len(poisson_problem.input_variables), - len(poisson_problem.output_variables)) +poisson_problem.discretise_domain(n, "grid", domains=boundaries) +poisson_problem.discretise_domain(n, "grid", domains="D") +model = FeedForward( + len(poisson_problem.input_variables), len(poisson_problem.output_variables) +) # make the solver solver = PINN(problem=poisson_problem, model=model) diff --git a/tests/test_callback/test_metric_tracker.py b/tests/test_callback/test_metric_tracker.py index 16c685318..de14694e5 100644 --- a/tests/test_callback/test_metric_tracker.py +++ b/tests/test_callback/test_metric_tracker.py @@ -7,12 +7,13 @@ # make the problem poisson_problem = Poisson() -boundaries = ['g1', 'g2', 'g3', 'g4'] +boundaries = ["g1", "g2", "g3", "g4"] n = 10 -poisson_problem.discretise_domain(n, 'grid', domains=boundaries) -poisson_problem.discretise_domain(n, 'grid', domains='D') -model = FeedForward(len(poisson_problem.input_variables), - len(poisson_problem.output_variables)) +poisson_problem.discretise_domain(n, "grid", domains=boundaries) +poisson_problem.discretise_domain(n, "grid", domains="D") +model = FeedForward( + len(poisson_problem.input_variables), len(poisson_problem.output_variables) +) # make the solver solver = PINN(problem=poisson_problem, model=model) @@ -21,6 +22,7 @@ def test_metric_tracker_constructor(): MetricTracker() + # def test_metric_tracker_routine(): #TODO revert # # make the trainer # trainer = Trainer(solver=solver, @@ -35,5 +37,3 @@ def test_metric_tracker_constructor(): # # assert the logged metrics are correct # logged_metrics = sorted(list(metrics.keys())) # assert logged_metrics == ['train_loss_epoch', 'train_loss_step', 'val_loss'] - - diff --git a/tests/test_callback/test_optimizer_callback.py b/tests/test_callback/test_optimizer_callback.py index 8fc297bb6..6250c7ace 100644 --- a/tests/test_callback/test_optimizer_callback.py +++ b/tests/test_callback/test_optimizer_callback.py @@ -10,18 +10,20 @@ # make the problem poisson_problem = Poisson() -boundaries = ['g1', 'g2', 'g3', 'g4'] +boundaries = ["g1", "g2", "g3", "g4"] n = 10 -poisson_problem.discretise_domain(n, 'grid', domains=boundaries) -poisson_problem.discretise_domain(n, 'grid', domains='D') -model = FeedForward(len(poisson_problem.input_variables), - len(poisson_problem.output_variables)) +poisson_problem.discretise_domain(n, "grid", domains=boundaries) +poisson_problem.discretise_domain(n, "grid", domains="D") +model = FeedForward( + len(poisson_problem.input_variables), len(poisson_problem.output_variables) +) # make the solver solver = PINN(problem=poisson_problem, model=model) adam_optimizer = TorchOptimizer(torch.optim.Adam, lr=0.01) -lbfgs_optimizer = TorchOptimizer(torch.optim.LBFGS, lr= 0.001) +lbfgs_optimizer = TorchOptimizer(torch.optim.LBFGS, lr=0.001) + def test_switch_optimizer_constructor(): SwitchOptimizer(adam_optimizer, epoch_switch=10) diff --git a/tests/test_callback/test_progress_bar.py b/tests/test_callback/test_progress_bar.py index 3a2f4fc47..cba623780 100644 --- a/tests/test_callback/test_progress_bar.py +++ b/tests/test_callback/test_progress_bar.py @@ -30,4 +30,4 @@ # accelerator='cpu', # max_epochs=5) # trainer.train() -# # TODO there should be a check that the correct metrics are displayed \ No newline at end of file +# # TODO there should be a check that the correct metrics are displayed diff --git a/tests/test_condition.py b/tests/test_condition.py index 2596e5f78..9199f2bd9 100644 --- a/tests/test_condition.py +++ b/tests/test_condition.py @@ -23,10 +23,10 @@ example_domain = CartesianDomain({"x": [0, 1], "y": [0, 1]}) -input_tensor = torch.rand((10,3)) -target_tensor = torch.rand((10,2)) -input_lt = LabelTensor(torch.rand((10,3)), ["x", "y", "z"]) -target_lt = LabelTensor(torch.rand((10,2)), ["a", "b"]) +input_tensor = torch.rand((10, 3)) +target_tensor = torch.rand((10, 2)) +input_lt = LabelTensor(torch.rand((10, 3)), ["x", "y", "z"]) +target_lt = LabelTensor(torch.rand((10, 2)), ["a", "b"]) x = torch.rand(10, 20, 2) pos = torch.rand(10, 20, 2) @@ -105,12 +105,12 @@ def test_init_input_target(): target = [target_graph[0], target_graph_lt[0]] with pytest.raises(ValueError): Condition(input=input, target=target) - + input_graph_lt[0].x.labels = ["a", "b"] with pytest.raises(ValueError): Condition(input=input_graph_lt, target=target_graph_lt) input_graph_lt[0].x.labels = ["u", "v"] - + def test_init_domain_equation(): cond = Condition(domain=example_domain, equation=FixedValue(0.0)) @@ -136,8 +136,11 @@ def test_init_input_equation(): Condition(input=3.0, equation="example") with pytest.raises(ValueError): Condition(input=example_domain, equation=input_graph) + + test_init_input_equation() + def test_init_data_condition(): cond = Condition(input=input_lt) assert isinstance(cond, TensorDataCondition) @@ -149,4 +152,3 @@ def test_init_data_condition(): assert isinstance(cond, GraphDataCondition) cond = Condition(input=input_graph, conditional_variables=torch.tensor(1)) assert isinstance(cond, GraphDataCondition) - diff --git a/tests/test_data/test_graph_dataset.py b/tests/test_data/test_graph_dataset.py index e50e2c4fa..1fe0c890d 100644 --- a/tests/test_data/test_graph_dataset.py +++ b/tests/test_data/test_graph_dataset.py @@ -78,20 +78,12 @@ def test_getitem(conditions_dict, max_conditions_lengths): data = dataset[50] assert isinstance(data, dict) assert all([isinstance(d["input"], Data) for d in data.values()]) + assert all([isinstance(d["target"], torch.Tensor) for d in data.values()]) assert all( - [isinstance(d["target"], torch.Tensor) for d in data.values()] + [d["input"].x.shape == torch.Size((20, 10)) for d in data.values()] ) assert all( - [ - d["input"].x.shape == torch.Size((20, 10)) - for d in data.values() - ] - ) - assert all( - [ - d["target"].shape == torch.Size((20, 10)) - for d in data.values() - ] + [d["target"].shape == torch.Size((20, 10)) for d in data.values()] ) assert all( [ @@ -99,27 +91,17 @@ def test_getitem(conditions_dict, max_conditions_lengths): for d in data.values() ] ) - assert all( - [d["input"].edge_attr.shape[0] == 60 for d in data.values()] - ) + assert all([d["input"].edge_attr.shape[0] == 60 for d in data.values()]) data = dataset.fetch_from_idx_list([i for i in range(20)]) assert isinstance(data, dict) assert all([isinstance(d["input"], Data) for d in data.values()]) + assert all([isinstance(d["target"], torch.Tensor) for d in data.values()]) assert all( - [isinstance(d["target"], torch.Tensor) for d in data.values()] + [d["input"].x.shape == torch.Size((400, 10)) for d in data.values()] ) assert all( - [ - d["input"].x.shape == torch.Size((400, 10)) - for d in data.values() - ] - ) - assert all( - [ - d["target"].shape == torch.Size((400, 10)) - for d in data.values() - ] + [d["target"].shape == torch.Size((400, 10)) for d in data.values()] ) assert all( [ @@ -127,6 +109,4 @@ def test_getitem(conditions_dict, max_conditions_lengths): for d in data.values() ] ) - assert all( - [d["input"].edge_attr.shape[0] == 1200 for d in data.values()] - ) + assert all([d["input"].edge_attr.shape[0] == 1200 for d in data.values()]) diff --git a/tests/test_data/test_tensor_dataset.py b/tests/test_data/test_tensor_dataset.py index a340576ea..81a122f2f 100644 --- a/tests/test_data/test_tensor_dataset.py +++ b/tests/test_data/test_tensor_dataset.py @@ -9,80 +9,78 @@ output_tensor_2 = torch.rand((50, 2)) conditions_dict_single = { - 'data': { - 'input': input_tensor, - 'target': output_tensor, + "data": { + "input": input_tensor, + "target": output_tensor, } } conditions_dict_single_multi = { - 'data_1': { - 'input': input_tensor, - 'target': output_tensor, + "data_1": { + "input": input_tensor, + "target": output_tensor, + }, + "data_2": { + "input": input_tensor_2, + "target": output_tensor_2, }, - 'data_2': { - 'input': input_tensor_2, - 'target': output_tensor_2, - } } -max_conditions_lengths_single = { - 'data': 100 -} +max_conditions_lengths_single = {"data": 100} -max_conditions_lengths_multi = { - 'data_1': 100, - 'data_2': 50 -} +max_conditions_lengths_multi = {"data_1": 100, "data_2": 50} @pytest.mark.parametrize( "conditions_dict, max_conditions_lengths", [ (conditions_dict_single, max_conditions_lengths_single), - (conditions_dict_single_multi, max_conditions_lengths_multi) - ] + (conditions_dict_single_multi, max_conditions_lengths_multi), + ], ) def test_constructor_tensor(conditions_dict, max_conditions_lengths): - dataset = PinaDatasetFactory(conditions_dict, - max_conditions_lengths=max_conditions_lengths, - automatic_batching=True) + dataset = PinaDatasetFactory( + conditions_dict, + max_conditions_lengths=max_conditions_lengths, + automatic_batching=True, + ) assert isinstance(dataset, PinaTensorDataset) def test_getitem_single(): - dataset = PinaDatasetFactory(conditions_dict_single, - max_conditions_lengths=max_conditions_lengths_single, - automatic_batching=False) + dataset = PinaDatasetFactory( + conditions_dict_single, + max_conditions_lengths=max_conditions_lengths_single, + automatic_batching=False, + ) tensors = dataset.fetch_from_idx_list([i for i in range(70)]) assert isinstance(tensors, dict) - assert list(tensors.keys()) == ['data'] - assert sorted(list(tensors['data'].keys())) == [ - 'input', 'target'] - assert isinstance(tensors['data']['input'], torch.Tensor) - assert tensors['data']['input'].shape == torch.Size((70, 10)) - assert isinstance(tensors['data']['target'], torch.Tensor) - assert tensors['data']['target'].shape == torch.Size((70, 2)) + assert list(tensors.keys()) == ["data"] + assert sorted(list(tensors["data"].keys())) == ["input", "target"] + assert isinstance(tensors["data"]["input"], torch.Tensor) + assert tensors["data"]["input"].shape == torch.Size((70, 10)) + assert isinstance(tensors["data"]["target"], torch.Tensor) + assert tensors["data"]["target"].shape == torch.Size((70, 2)) def test_getitem_multi(): - dataset = PinaDatasetFactory(conditions_dict_single_multi, - max_conditions_lengths=max_conditions_lengths_multi, - automatic_batching=False) + dataset = PinaDatasetFactory( + conditions_dict_single_multi, + max_conditions_lengths=max_conditions_lengths_multi, + automatic_batching=False, + ) tensors = dataset.fetch_from_idx_list([i for i in range(70)]) assert isinstance(tensors, dict) - assert list(tensors.keys()) == ['data_1', 'data_2'] - assert sorted(list(tensors['data_1'].keys())) == [ - 'input', 'target'] - assert isinstance(tensors['data_1']['input'], torch.Tensor) - assert tensors['data_1']['input'].shape == torch.Size((70, 10)) - assert isinstance(tensors['data_1']['target'], torch.Tensor) - assert tensors['data_1']['target'].shape == torch.Size((70, 2)) + assert list(tensors.keys()) == ["data_1", "data_2"] + assert sorted(list(tensors["data_1"].keys())) == ["input", "target"] + assert isinstance(tensors["data_1"]["input"], torch.Tensor) + assert tensors["data_1"]["input"].shape == torch.Size((70, 10)) + assert isinstance(tensors["data_1"]["target"], torch.Tensor) + assert tensors["data_1"]["target"].shape == torch.Size((70, 2)) - assert sorted(list(tensors['data_2'].keys())) == [ - 'input', 'target'] - assert isinstance(tensors['data_2']['input'], torch.Tensor) - assert tensors['data_2']['input'].shape == torch.Size((50, 10)) - assert isinstance(tensors['data_2']['target'], torch.Tensor) - assert tensors['data_2']['target'].shape == torch.Size((50, 2)) + assert sorted(list(tensors["data_2"].keys())) == ["input", "target"] + assert isinstance(tensors["data_2"]["input"], torch.Tensor) + assert tensors["data_2"]["input"].shape == torch.Size((50, 10)) + assert isinstance(tensors["data_2"]["target"], torch.Tensor) + assert tensors["data_2"]["target"].shape == torch.Size((50, 2)) diff --git a/tests/test_equations/test_equation.py b/tests/test_equations/test_equation.py index 9d036d2e3..096b2d5e7 100644 --- a/tests/test_equations/test_equation.py +++ b/tests/test_equations/test_equation.py @@ -7,15 +7,16 @@ def eq1(input_, output_): u_grad = grad(output_, input_) - u1_xx = grad(u_grad, input_, components=['du1dx'], d=['x']) - u2_xy = grad(u_grad, input_, components=['du2dx'], d=['y']) + u1_xx = grad(u_grad, input_, components=["du1dx"], d=["x"]) + u2_xy = grad(u_grad, input_, components=["du2dx"], d=["y"]) return torch.hstack([u1_xx, u2_xy]) def eq2(input_, output_): - force_term = (torch.sin(input_.extract(['x']) * torch.pi) * - torch.sin(input_.extract(['y']) * torch.pi)) - delta_u = laplacian(output_.extract(['u1']), input_) + force_term = torch.sin(input_.extract(["x"]) * torch.pi) * torch.sin( + input_.extract(["y"]) * torch.pi + ) + delta_u = laplacian(output_.extract(["u1"]), input_) return delta_u - force_term @@ -36,10 +37,10 @@ def test_residual(): eq_1 = Equation(eq1) eq_2 = Equation(eq2) - pts = LabelTensor(torch.rand(10, 2), labels=['x', 'y']) + pts = LabelTensor(torch.rand(10, 2), labels=["x", "y"]) pts.requires_grad = True u = torch.pow(pts, 2) - u.labels = ['u1', 'u2'] + u.labels = ["u1", "u2"] eq_1_res = eq_1.residual(pts, u) eq_2_res = eq_2.residual(pts, u) diff --git a/tests/test_equations/test_system_equation.py b/tests/test_equations/test_system_equation.py index 2d62c347f..4a0a1163e 100644 --- a/tests/test_equations/test_system_equation.py +++ b/tests/test_equations/test_system_equation.py @@ -7,15 +7,16 @@ def eq1(input_, output_): u_grad = grad(output_, input_) - u1_xx = grad(u_grad, input_, components=['du1dx'], d=['x']) - u2_xy = grad(u_grad, input_, components=['du2dx'], d=['y']) + u1_xx = grad(u_grad, input_, components=["du1dx"], d=["x"]) + u2_xy = grad(u_grad, input_, components=["du2dx"], d=["y"]) return torch.hstack([u1_xx, u2_xy]) def eq2(input_, output_): - force_term = (torch.sin(input_.extract(['x']) * torch.pi) * - torch.sin(input_.extract(['y']) * torch.pi)) - delta_u = laplacian(output_.extract(['u1']), input_) + force_term = torch.sin(input_.extract(["x"]) * torch.pi) * torch.sin( + input_.extract(["y"]) * torch.pi + ) + delta_u = laplacian(output_.extract(["u1"]), input_) return delta_u - force_term @@ -25,25 +26,25 @@ def foo(): def test_constructor(): SystemEquation([eq1, eq2]) - SystemEquation([eq1, eq2], reduction='sum') + SystemEquation([eq1, eq2], reduction="sum") with pytest.raises(NotImplementedError): - SystemEquation([eq1, eq2], reduction='foo') + SystemEquation([eq1, eq2], reduction="foo") with pytest.raises(ValueError): SystemEquation(foo) def test_residual(): - pts = LabelTensor(torch.rand(10, 2), labels=['x', 'y']) + pts = LabelTensor(torch.rand(10, 2), labels=["x", "y"]) pts.requires_grad = True u = torch.pow(pts, 2) - u.labels = ['u1', 'u2'] + u.labels = ["u1", "u2"] - eq_1 = SystemEquation([eq1, eq2], reduction='mean') + eq_1 = SystemEquation([eq1, eq2], reduction="mean") res = eq_1.residual(pts, u) assert res.shape == torch.Size([10]) - eq_1 = SystemEquation([eq1, eq2], reduction='sum') + eq_1 = SystemEquation([eq1, eq2], reduction="sum") res = eq_1.residual(pts, u) assert res.shape == torch.Size([10]) diff --git a/tests/test_geometry/test_cartesian.py b/tests/test_geometry/test_cartesian.py index fc30757b6..1de06431c 100644 --- a/tests/test_geometry/test_cartesian.py +++ b/tests/test_geometry/test_cartesian.py @@ -5,31 +5,31 @@ def test_constructor(): - CartesianDomain({'x': [0, 1], 'y': [0, 1]}) + CartesianDomain({"x": [0, 1], "y": [0, 1]}) def test_is_inside_check_border(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) - pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ['x', 'y']) - domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"]) + pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ["x", "y"]) + domain = CartesianDomain({"x": [0, 1], "y": [0, 1]}) for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, True, False]): assert domain.is_inside(pt, check_border=True) == exp_result def test_is_inside_not_check_border(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) - pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ['x', 'y']) - domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"]) + pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ["x", "y"]) + domain = CartesianDomain({"x": [0, 1], "y": [0, 1]}) for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, False, False]): assert domain.is_inside(pt, check_border=False) == exp_result def test_is_inside_fixed_variables(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) - pt_3 = LabelTensor(torch.tensor([[1.0, 1.5]]), ['x', 'y']) - domain = CartesianDomain({'x': 1, 'y': [0, 1]}) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"]) + pt_3 = LabelTensor(torch.tensor([[1.0, 1.5]]), ["x", "y"]) + domain = CartesianDomain({"x": 1, "y": [0, 1]}) for pt, exp_result in zip([pt_1, pt_2, pt_3], [False, True, False]): assert domain.is_inside(pt, check_border=False) == exp_result diff --git a/tests/test_geometry/test_difference.py b/tests/test_geometry/test_difference.py index c5300aae0..5e45836db 100644 --- a/tests/test_geometry/test_difference.py +++ b/tests/test_geometry/test_difference.py @@ -5,98 +5,67 @@ def test_constructor_two_CartesianDomains(): - Difference([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }) - ]) + Difference( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + ] + ) def test_constructor_two_3DCartesianDomain(): - Difference([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2], - 'z': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3], - 'z': [1, 3] - }) - ]) + Difference( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}), + ] + ) def test_constructor_three_CartesianDomains(): - Difference([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }), - CartesianDomain({ - 'x': [2, 4], - 'y': [2, 4] - }) - ]) + Difference( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + CartesianDomain({"x": [2, 4], "y": [2, 4]}), + ] + ) def test_is_inside_two_CartesianDomains(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ['x', 'y']) - domain = Difference([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }) - ]) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ["x", "y"]) + domain = Difference( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + ] + ) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_is_inside_two_3DCartesianDomain(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ['x', 'y', 'z']) - pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ['x', 'y', 'z']) - domain = Difference([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2], - 'z': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3], - 'z': [1, 3] - }) - ]) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ["x", "y", "z"]) + pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ["x", "y", "z"]) + domain = Difference( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}), + ] + ) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_sample(): n = 100 - domain = Difference([ - EllipsoidDomain({ - 'x': [-1, 1], - 'y': [-1, 1] - }), - CartesianDomain({ - 'x': [-0.5, 0.5], - 'y': [-0.5, 0.5] - }) - ]) + domain = Difference( + [ + EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}), + CartesianDomain({"x": [-0.5, 0.5], "y": [-0.5, 0.5]}), + ] + ) pts = domain.sample(n) assert isinstance(pts, LabelTensor) assert pts.shape[0] == n diff --git a/tests/test_geometry/test_ellipsoid.py b/tests/test_geometry/test_ellipsoid.py index fa776f9fc..203010799 100644 --- a/tests/test_geometry/test_ellipsoid.py +++ b/tests/test_geometry/test_ellipsoid.py @@ -6,15 +6,15 @@ def test_constructor(): - EllipsoidDomain({'x': [0, 1], 'y': [0, 1]}) - EllipsoidDomain({'x': [0, 1], 'y': [0, 1]}, sample_surface=True) + EllipsoidDomain({"x": [0, 1], "y": [0, 1]}) + EllipsoidDomain({"x": [0, 1], "y": [0, 1]}, sample_surface=True) def test_is_inside_sample_surface_false(): - domain = EllipsoidDomain({'x': [0, 1], 'y': [0, 1]}, sample_surface=False) - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) - pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ['x', 'y']) + domain = EllipsoidDomain({"x": [0, 1], "y": [0, 1]}, sample_surface=False) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"]) + pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ["x", "y"]) for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, False, False]): assert domain.is_inside(pt) == exp_result for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, True, False]): @@ -22,9 +22,9 @@ def test_is_inside_sample_surface_false(): def test_is_inside_sample_surface_true(): - domain = EllipsoidDomain({'x': [0, 1], 'y': [0, 1]}, sample_surface=True) - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) - pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ['x', 'y']) + domain = EllipsoidDomain({"x": [0, 1], "y": [0, 1]}, sample_surface=True) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"]) + pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ["x", "y"]) for pt, exp_result in zip([pt_1, pt_2, pt_3], [False, True, False]): assert domain.is_inside(pt) == exp_result diff --git a/tests/test_geometry/test_exclusion.py b/tests/test_geometry/test_exclusion.py index f11fa7f06..95ada2c9d 100644 --- a/tests/test_geometry/test_exclusion.py +++ b/tests/test_geometry/test_exclusion.py @@ -5,98 +5,67 @@ def test_constructor_two_CartesianDomains(): - Exclusion([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }) - ]) + Exclusion( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + ] + ) def test_constructor_two_3DCartesianDomain(): - Exclusion([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2], - 'z': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3], - 'z': [1, 3] - }) - ]) + Exclusion( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}), + ] + ) def test_constructor_three_CartesianDomains(): - Exclusion([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }), - CartesianDomain({ - 'x': [2, 4], - 'y': [2, 4] - }) - ]) + Exclusion( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + CartesianDomain({"x": [2, 4], "y": [2, 4]}), + ] + ) def test_is_inside_two_CartesianDomains(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ['x', 'y']) - domain = Exclusion([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }) - ]) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ["x", "y"]) + domain = Exclusion( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + ] + ) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_is_inside_two_3DCartesianDomain(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ['x', 'y', 'z']) - pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ['x', 'y', 'z']) - domain = Exclusion([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2], - 'z': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3], - 'z': [1, 3] - }) - ]) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ["x", "y", "z"]) + pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ["x", "y", "z"]) + domain = Exclusion( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}), + ] + ) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_sample(): n = 100 - domain = Exclusion([ - EllipsoidDomain({ - 'x': [-1, 1], - 'y': [-1, 1] - }), - CartesianDomain({ - 'x': [0.3, 1.5], - 'y': [0.3, 1.5] - }) - ]) + domain = Exclusion( + [ + EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}), + CartesianDomain({"x": [0.3, 1.5], "y": [0.3, 1.5]}), + ] + ) pts = domain.sample(n) assert isinstance(pts, LabelTensor) assert pts.shape[0] == n diff --git a/tests/test_geometry/test_intersection.py b/tests/test_geometry/test_intersection.py index 4929cacad..fe6921f16 100644 --- a/tests/test_geometry/test_intersection.py +++ b/tests/test_geometry/test_intersection.py @@ -5,86 +5,59 @@ def test_constructor_two_CartesianDomains(): - Intersection([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }) - ]) + Intersection( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + ] + ) def test_constructor_two_3DCartesianDomain(): - Intersection([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2], - 'z': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3], - 'z': [1, 3] - }) - ]) + Intersection( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}), + ] + ) def test_constructor_three_CartesianDomains(): - Intersection([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }), - CartesianDomain({ - 'x': [2, 4], - 'y': [2, 4] - }) - ]) + Intersection( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + CartesianDomain({"x": [2, 4], "y": [2, 4]}), + ] + ) def test_is_inside_two_CartesianDomains(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ['x', 'y']) - pt_3 = LabelTensor(torch.tensor([[1.5, 1.5]]), ['x', 'y']) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ["x", "y"]) + pt_3 = LabelTensor(torch.tensor([[1.5, 1.5]]), ["x", "y"]) - domain = Intersection([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3] - }) - ]) + domain = Intersection( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3]}), + ] + ) assert domain.is_inside(pt_1) == False assert domain.is_inside(pt_2) == False assert domain.is_inside(pt_3) == True def test_is_inside_two_3DCartesianDomain(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ['x', 'y', 'z']) - pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ['x', 'y', 'z']) - pt_3 = LabelTensor(torch.tensor([[1.5, 1.5, 1.5]]), ['x', 'y', 'z']) - domain = Intersection([ - CartesianDomain({ - 'x': [0, 2], - 'y': [0, 2], - 'z': [0, 2] - }), - CartesianDomain({ - 'x': [1, 3], - 'y': [1, 3], - 'z': [1, 3] - }) - ]) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ["x", "y", "z"]) + pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ["x", "y", "z"]) + pt_3 = LabelTensor(torch.tensor([[1.5, 1.5, 1.5]]), ["x", "y", "z"]) + domain = Intersection( + [ + CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}), + CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}), + ] + ) assert domain.is_inside(pt_1) == False assert domain.is_inside(pt_2) == False assert domain.is_inside(pt_3) == True @@ -92,16 +65,12 @@ def test_is_inside_two_3DCartesianDomain(): def test_sample(): n = 100 - domain = Intersection([ - EllipsoidDomain({ - 'x': [-1, 1], - 'y': [-1, 1] - }), - CartesianDomain({ - 'x': [-0.5, 0.5], - 'y': [-0.5, 0.5] - }) - ]) + domain = Intersection( + [ + EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}), + CartesianDomain({"x": [-0.5, 0.5], "y": [-0.5, 0.5]}), + ] + ) pts = domain.sample(n) assert isinstance(pts, LabelTensor) assert pts.shape[0] == n diff --git a/tests/test_geometry/test_simplex.py b/tests/test_geometry/test_simplex.py index 25224aae3..c03e1504e 100644 --- a/tests/test_geometry/test_simplex.py +++ b/tests/test_geometry/test_simplex.py @@ -6,11 +6,13 @@ def test_constructor(): - SimplexDomain([ - LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[1, 1]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), - ]) + SimplexDomain( + [ + LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[1, 1]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), + ] + ) SimplexDomain( [ LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), @@ -21,32 +23,41 @@ def test_constructor(): ) with pytest.raises(ValueError): # different labels - SimplexDomain([ - LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[1, 1]]), labels=["x", "z"]), - LabelTensor(torch.tensor([[0, 2]]), labels=["x", "a"]), - ]) + SimplexDomain( + [ + LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[1, 1]]), labels=["x", "z"]), + LabelTensor(torch.tensor([[0, 2]]), labels=["x", "a"]), + ] + ) # not LabelTensor - SimplexDomain([ - LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), - [1, 1], - LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), - ]) + SimplexDomain( + [ + LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), + [1, 1], + LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), + ] + ) # different number of vertices - SimplexDomain([ - LabelTensor(torch.tensor([[0., -2.]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[-.5, -.5]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[-2., 0.]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[-.5, .5]]), labels=["x", "y"]), - ]) + SimplexDomain( + [ + LabelTensor(torch.tensor([[0.0, -2.0]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[-0.5, -0.5]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[-2.0, 0.0]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[-0.5, 0.5]]), labels=["x", "y"]), + ] + ) + def test_sample(): # sampling inside - simplex = SimplexDomain([ - LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[1, 1]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), - ]) + simplex = SimplexDomain( + [ + LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[1, 1]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), + ] + ) pts = simplex.sample(10) assert isinstance(pts, LabelTensor) assert pts.size() == torch.Size([10, 2]) @@ -117,8 +128,9 @@ def test_is_inside_2D_check_border_false(): pt6 = LabelTensor(torch.tensor([[2.5, 1]]), ["x", "y"]) pt7 = LabelTensor(torch.tensor([[100, 100]]), ["x", "y"]) pts = [pt1, pt2, pt3, pt4, pt5, pt6, pt7] - for pt, exp_result in zip(pts, - [False, False, False, False, True, True, False]): + for pt, exp_result in zip( + pts, [False, False, False, False, True, True, False] + ): assert domain.is_inside(point=pt, check_border=False) == exp_result @@ -143,7 +155,8 @@ def test_is_inside_3D_check_border_true(): pt9 = LabelTensor(torch.tensor([[2, 1, 1]]), ["x", "y", "z"]) pts = [pt1, pt2, pt3, pt4, pt5, pt6, pt7, pt8, pt9] for pt, exp_result in zip( - pts, [True, True, True, True, True, False, True, True, False]): + pts, [True, True, True, True, True, False, True, True, False] + ): assert domain.is_inside(point=pt, check_border=True) == exp_result @@ -165,6 +178,7 @@ def test_is_inside_3D_check_border_false(): pt6 = LabelTensor(torch.tensor([[0, 0, 20]]), ["x", "y", "z"]) pt7 = LabelTensor(torch.tensor([[2, 1, 1]]), ["x", "y", "z"]) pts = [pt1, pt2, pt3, pt4, pt5, pt6, pt7] - for pt, exp_result in zip(pts, - [False, False, False, False, False, False, True]): + for pt, exp_result in zip( + pts, [False, False, False, False, False, False, True] + ): assert domain.is_inside(point=pt, check_border=False) == exp_result diff --git a/tests/test_geometry/test_union.py b/tests/test_geometry/test_union.py index acde89542..a2fd05f86 100644 --- a/tests/test_geometry/test_union.py +++ b/tests/test_geometry/test_union.py @@ -5,111 +5,88 @@ def test_constructor_two_CartesianDomains(): - Union([ - CartesianDomain({ - 'x': [0, 1], - 'y': [0, 1] - }), - CartesianDomain({ - 'x': [0.5, 2], - 'y': [-1, 0.1] - }) - ]) + Union( + [ + CartesianDomain({"x": [0, 1], "y": [0, 1]}), + CartesianDomain({"x": [0.5, 2], "y": [-1, 0.1]}), + ] + ) def test_constructor_two_EllipsoidDomains(): - Union([ - EllipsoidDomain({ - 'x': [-1, 1], - 'y': [-1, 1], - 'z': [-1, 1] - }), - EllipsoidDomain({ - 'x': [-0.5, 0.5], - 'y': [-0.5, 0.5], - 'z': [-0.5, 0.5] - }) - ]) + Union( + [ + EllipsoidDomain({"x": [-1, 1], "y": [-1, 1], "z": [-1, 1]}), + EllipsoidDomain( + {"x": [-0.5, 0.5], "y": [-0.5, 0.5], "z": [-0.5, 0.5]} + ), + ] + ) def test_constructor_EllipsoidDomain_CartesianDomain(): - Union([ - EllipsoidDomain({ - 'x': [-1, 1], - 'y': [-1, 1] - }), - CartesianDomain({ - 'x': [-0.5, 0.5], - 'y': [-0.5, 0.5] - }) - ]) + Union( + [ + EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}), + CartesianDomain({"x": [-0.5, 0.5], "y": [-0.5, 0.5]}), + ] + ) def test_is_inside_two_CartesianDomains(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[-1, -1]]), ['x', 'y']) - domain = Union([ - CartesianDomain({ - 'x': [0, 1], - 'y': [0, 1] - }), - CartesianDomain({ - 'x': [0.5, 2], - 'y': [-1, 0.1] - }) - ]) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[-1, -1]]), ["x", "y"]) + domain = Union( + [ + CartesianDomain({"x": [0, 1], "y": [0, 1]}), + CartesianDomain({"x": [0.5, 2], "y": [-1, 0.1]}), + ] + ) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_is_inside_two_EllipsoidDomains(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ['x', 'y', 'z']) - pt_2 = LabelTensor(torch.tensor([[-1, -1, -1]]), ['x', 'y', 'z']) - domain = Union([ - EllipsoidDomain({ - 'x': [-1, 1], - 'y': [-1, 1], - 'z': [-1, 1] - }), - EllipsoidDomain({ - 'x': [-0.5, 0.5], - 'y': [-0.5, 0.5], - 'z': [-0.5, 0.5] - }) - ]) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ["x", "y", "z"]) + pt_2 = LabelTensor(torch.tensor([[-1, -1, -1]]), ["x", "y", "z"]) + domain = Union( + [ + EllipsoidDomain({"x": [-1, 1], "y": [-1, 1], "z": [-1, 1]}), + EllipsoidDomain( + {"x": [-0.5, 0.5], "y": [-0.5, 0.5], "z": [-0.5, 0.5]} + ), + ] + ) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_is_inside_EllipsoidDomain_CartesianDomain(): - pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) - pt_2 = LabelTensor(torch.tensor([[-1, -1]]), ['x', 'y']) - domain = Union([ - EllipsoidDomain({ - 'x': [-1, 1], - 'y': [-1, 1], - }), - CartesianDomain({ - 'x': [0.6, 1.5], - 'y': [-2, 0] - }) - ]) + pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"]) + pt_2 = LabelTensor(torch.tensor([[-1, -1]]), ["x", "y"]) + domain = Union( + [ + EllipsoidDomain( + { + "x": [-1, 1], + "y": [-1, 1], + } + ), + CartesianDomain({"x": [0.6, 1.5], "y": [-2, 0]}), + ] + ) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_sample(): n = 100 - domain = Union([ - EllipsoidDomain({ - 'x': [-1, 1], - 'y': [-1, 1] - }), - CartesianDomain({ - 'x': [-0.5, 0.5], - 'y': [-0.5, 0.5] - }) - ]) + domain = Union( + [ + EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}), + CartesianDomain({"x": [-0.5, 0.5], "y": [-0.5, 0.5]}), + ] + ) pts = domain.sample(n) assert isinstance(pts, LabelTensor) assert pts.shape[0] == n diff --git a/tests/test_label_tensor/test_label_tensor.py b/tests/test_label_tensor/test_label_tensor.py index 2c5d15ea9..556957b9d 100644 --- a/tests/test_label_tensor/test_label_tensor.py +++ b/tests/test_label_tensor/test_label_tensor.py @@ -4,26 +4,27 @@ from pina.label_tensor import LabelTensor data = torch.rand((20, 3)) -labels_column = {1: {"name": "space", "dof": ['x', 'y', 'z']}} +labels_column = {1: {"name": "space", "dof": ["x", "y", "z"]}} labels_row = {0: {"name": "samples", "dof": range(20)}} -labels_list = ['x', 'y', 'z'] +labels_list = ["x", "y", "z"] labels_all = labels_column.copy() labels_all.update(labels_row) -@pytest.mark.parametrize("labels", - [labels_column, labels_row, labels_all, labels_list]) +@pytest.mark.parametrize( + "labels", [labels_column, labels_row, labels_all, labels_list] +) def test_constructor(labels): print(LabelTensor(data, labels)) def test_wrong_constructor(): with pytest.raises(ValueError): - LabelTensor(data, ['a', 'b']) + LabelTensor(data, ["a", "b"]) @pytest.mark.parametrize("labels", [labels_column, labels_all]) -@pytest.mark.parametrize("labels_te", ['z', ['z'], {'space': ['z']}]) +@pytest.mark.parametrize("labels_te", ["z", ["z"], {"space": ["z"]}]) def test_extract_column(labels, labels_te): tensor = LabelTensor(data, labels) new = tensor.extract(labels_te) @@ -34,7 +35,7 @@ def test_extract_column(labels, labels_te): @pytest.mark.parametrize("labels", [labels_row, labels_all]) -@pytest.mark.parametrize("labels_te", [{'samples': [2]}]) +@pytest.mark.parametrize("labels_te", [{"samples": [2]}]) def test_extract_row(labels, labels_te): tensor = LabelTensor(data, labels) new = tensor.extract(labels_te) @@ -44,13 +45,10 @@ def test_extract_row(labels, labels_te): assert torch.all(torch.isclose(data[2].reshape(1, -1), new)) -@pytest.mark.parametrize("labels_te", [{ - 'samples': [2], - 'space': ['z'] -}, { - 'space': 'z', - 'samples': 2 -}]) +@pytest.mark.parametrize( + "labels_te", + [{"samples": [2], "space": ["z"]}, {"space": "z", "samples": 2}], +) def test_extract_2D(labels_te): labels = labels_all tensor = LabelTensor(data, labels) @@ -64,16 +62,10 @@ def test_extract_2D(labels_te): def test_extract_3D(): data = torch.rand(20, 3, 4) labels = { - 1: { - "name": "space", - "dof": ['x', 'y', 'z'] - }, - 2: { - "name": "time", - "dof": range(4) - }, + 1: {"name": "space", "dof": ["x", "y", "z"]}, + 2: {"name": "time", "dof": range(4)}, } - labels_te = {'space': ['x', 'z'], 'time': range(1, 4)} + labels_te = {"space": ["x", "z"], "time": range(1, 4)} tensor = LabelTensor(data, labels) new = tensor.extract(labels_te) @@ -91,65 +83,65 @@ def test_extract_3D(): def test_concatenation_3D(): data_1 = torch.rand(20, 3, 4) - labels_1 = ['x', 'y', 'z', 'w'] + labels_1 = ["x", "y", "z", "w"] lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(50, 3, 4) - labels_2 = ['x', 'y', 'z', 'w'] + labels_2 = ["x", "y", "z", "w"] lt2 = LabelTensor(data_2, labels_2) lt_cat = LabelTensor.cat([lt1, lt2]) assert lt_cat.shape == (70, 3, 4) - assert lt_cat.full_labels[0]['dof'] == range(70) - assert lt_cat.full_labels[1]['dof'] == range(3) - assert lt_cat.full_labels[2]['dof'] == ['x', 'y', 'z', 'w'] + assert lt_cat.full_labels[0]["dof"] == range(70) + assert lt_cat.full_labels[1]["dof"] == range(3) + assert lt_cat.full_labels[2]["dof"] == ["x", "y", "z", "w"] data_1 = torch.rand(20, 3, 4) - labels_1 = ['x', 'y', 'z', 'w'] + labels_1 = ["x", "y", "z", "w"] lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(20, 2, 4) - labels_2 = ['x', 'y', 'z', 'w'] + labels_2 = ["x", "y", "z", "w"] lt2 = LabelTensor(data_2, labels_2) lt_cat = LabelTensor.cat([lt1, lt2], dim=1) assert lt_cat.shape == (20, 5, 4) - assert lt_cat.full_labels[0]['dof'] == range(20) - assert lt_cat.full_labels[1]['dof'] == range(5) - assert lt_cat.full_labels[2]['dof'] == ['x', 'y', 'z', 'w'] + assert lt_cat.full_labels[0]["dof"] == range(20) + assert lt_cat.full_labels[1]["dof"] == range(5) + assert lt_cat.full_labels[2]["dof"] == ["x", "y", "z", "w"] data_1 = torch.rand(20, 3, 2) - labels_1 = ['x', 'y'] + labels_1 = ["x", "y"] lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(20, 3, 3) - labels_2 = ['z', 'w', 'a'] + labels_2 = ["z", "w", "a"] lt2 = LabelTensor(data_2, labels_2) lt_cat = LabelTensor.cat([lt1, lt2], dim=2) assert lt_cat.shape == (20, 3, 5) - assert lt_cat.full_labels[2]['dof'] == ['x', 'y', 'z', 'w', 'a'] - assert lt_cat.full_labels[0]['dof'] == range(20) - assert lt_cat.full_labels[1]['dof'] == range(3) + assert lt_cat.full_labels[2]["dof"] == ["x", "y", "z", "w", "a"] + assert lt_cat.full_labels[0]["dof"] == range(20) + assert lt_cat.full_labels[1]["dof"] == range(3) data_1 = torch.rand(20, 2, 4) - labels_1 = ['x', 'y', 'z', 'w'] + labels_1 = ["x", "y", "z", "w"] lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(20, 3, 4) - labels_2 = ['x', 'y', 'z', 'w'] + labels_2 = ["x", "y", "z", "w"] lt2 = LabelTensor(data_2, labels_2) with pytest.raises(RuntimeError): LabelTensor.cat([lt1, lt2], dim=2) data_1 = torch.rand(20, 3, 2) - labels_1 = ['x', 'y'] + labels_1 = ["x", "y"] lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(20, 3, 3) - labels_2 = ['z', 'w', 'a'] + labels_2 = ["z", "w", "a"] lt2 = LabelTensor(data_2, labels_2) lt_cat = LabelTensor.cat([lt1, lt2], dim=2) assert lt_cat.shape == (20, 3, 5) - assert lt_cat.full_labels[2]['dof'] == ['x', 'y', 'z', 'w', 'a'] - assert lt_cat.full_labels[0]['dof'] == range(20) - assert lt_cat.full_labels[1]['dof'] == range(3) + assert lt_cat.full_labels[2]["dof"] == ["x", "y", "z", "w", "a"] + assert lt_cat.full_labels[0]["dof"] == range(20) + assert lt_cat.full_labels[1]["dof"] == range(3) def test_summation(): lt1 = LabelTensor(torch.ones(20, 3), labels_all) - lt2 = LabelTensor(torch.ones(30, 3), ['x', 'y', 'z']) + lt2 = LabelTensor(torch.ones(30, 3), ["x", "y", "z"]) with pytest.raises(RuntimeError): LabelTensor.summation([lt1, lt2]) lt1 = LabelTensor(torch.ones(20, 3), labels_all) @@ -159,7 +151,7 @@ def test_summation(): assert lt_sum.shape[0] == 20 assert lt_sum.shape[1] == 3 assert lt_sum.full_labels[0] == labels_all[0] - assert lt_sum.labels == ['x+x', 'y+y', 'z+z'] + assert lt_sum.labels == ["x+x", "y+y", "z+z"] assert torch.eq(lt_sum.tensor, torch.ones(20, 3) * 2).all() lt1 = LabelTensor(torch.ones(20, 3), labels_all) lt2 = LabelTensor(torch.ones(20, 3), labels_all) @@ -169,84 +161,72 @@ def test_summation(): assert lt_sum.shape[0] == 20 assert lt_sum.shape[1] == 3 assert lt_sum.full_labels[0] == labels_all[0] - assert lt_sum.labels == ['x+x+x', 'y+y+y', 'z+z+z'] + assert lt_sum.labels == ["x+x+x", "y+y+y", "z+z+z"] assert torch.eq(lt_sum.tensor, torch.ones(20, 3) * 2).all() def test_append_3D(): data_1 = torch.rand(20, 3, 2) - labels_1 = ['x', 'y'] + labels_1 = ["x", "y"] lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(20, 3, 2) - labels_2 = ['z', 'w'] + labels_2 = ["z", "w"] lt2 = LabelTensor(data_2, labels_2) lt1 = lt1.append(lt2) assert lt1.shape == (20, 3, 4) - assert lt1.full_labels[0]['dof'] == range(20) - assert lt1.full_labels[1]['dof'] == range(3) - assert lt1.full_labels[2]['dof'] == ['x', 'y', 'z', 'w'] + assert lt1.full_labels[0]["dof"] == range(20) + assert lt1.full_labels[1]["dof"] == range(3) + assert lt1.full_labels[2]["dof"] == ["x", "y", "z", "w"] def test_append_2D(): data_1 = torch.rand(20, 2) - labels_1 = ['x', 'y'] + labels_1 = ["x", "y"] lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(20, 2) - labels_2 = ['z', 'w'] + labels_2 = ["z", "w"] lt2 = LabelTensor(data_2, labels_2) - lt1 = lt1.append(lt2, mode='cross') + lt1 = lt1.append(lt2, mode="cross") assert lt1.shape == (400, 4) - assert lt1.full_labels[0]['dof'] == range(400) - assert lt1.full_labels[1]['dof'] == ['x', 'y', 'z', 'w'] + assert lt1.full_labels[0]["dof"] == range(400) + assert lt1.full_labels[1]["dof"] == ["x", "y", "z", "w"] def test_vstack_3D(): data_1 = torch.rand(20, 3, 2) labels_1 = { - 1: { - 'dof': ['a', 'b', 'c'], - 'name': 'first' - }, - 2: { - 'dof': ['x', 'y'], - 'name': 'second' - } + 1: {"dof": ["a", "b", "c"], "name": "first"}, + 2: {"dof": ["x", "y"], "name": "second"}, } lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(20, 3, 2) labels_1 = { - 1: { - 'dof': ['a', 'b', 'c'], - 'name': 'first' - }, - 2: { - 'dof': ['x', 'y'], - 'name': 'second' - } + 1: {"dof": ["a", "b", "c"], "name": "first"}, + 2: {"dof": ["x", "y"], "name": "second"}, } lt2 = LabelTensor(data_2, labels_1) lt_stacked = LabelTensor.vstack([lt1, lt2]) assert lt_stacked.shape == (40, 3, 2) - assert lt_stacked.full_labels[0]['dof'] == range(40) - assert lt_stacked.full_labels[1]['dof'] == ['a', 'b', 'c'] - assert lt_stacked.full_labels[2]['dof'] == ['x', 'y'] - assert lt_stacked.full_labels[1]['name'] == 'first' - assert lt_stacked.full_labels[2]['name'] == 'second' + assert lt_stacked.full_labels[0]["dof"] == range(40) + assert lt_stacked.full_labels[1]["dof"] == ["a", "b", "c"] + assert lt_stacked.full_labels[2]["dof"] == ["x", "y"] + assert lt_stacked.full_labels[1]["name"] == "first" + assert lt_stacked.full_labels[2]["name"] == "second" def test_vstack_2D(): data_1 = torch.rand(20, 2) - labels_1 = {1: {'dof': ['x', 'y'], 'name': 'second'}} + labels_1 = {1: {"dof": ["x", "y"], "name": "second"}} lt1 = LabelTensor(data_1, labels_1) data_2 = torch.rand(20, 2) - labels_1 = {1: {'dof': ['x', 'y'], 'name': 'second'}} + labels_1 = {1: {"dof": ["x", "y"], "name": "second"}} lt2 = LabelTensor(data_2, labels_1) lt_stacked = LabelTensor.vstack([lt1, lt2]) assert lt_stacked.shape == (40, 2) - assert lt_stacked.full_labels[0]['dof'] == range(40) - assert lt_stacked.full_labels[1]['dof'] == ['x', 'y'] - assert lt_stacked.full_labels[0]['name'] == 0 - assert lt_stacked.full_labels[1]['name'] == 'second' + assert lt_stacked.full_labels[0]["dof"] == range(40) + assert lt_stacked.full_labels[1]["dof"] == ["x", "y"] + assert lt_stacked.full_labels[0]["name"] == 0 + assert lt_stacked.full_labels[1]["name"] == "second" def test_sorting(): @@ -256,11 +236,11 @@ def test_sorting(): data[:, 2] = data[:, 2] data[:, 3] = data[:, 3] * 5 data[:, 4] = data[:, 4] * 3 - labels = ['d', 'b', 'a', 'e', 'c'] + labels = ["d", "b", "a", "e", "c"] lt_data = LabelTensor(data, labels) lt_sorted = LabelTensor.sort_labels(lt_data) assert lt_sorted.shape == (20, 5) - assert lt_sorted.labels == ['a', 'b', 'c', 'd', 'e'] + assert lt_sorted.labels == ["a", "b", "c", "d", "e"] assert torch.eq(lt_sorted.tensor[:, 0], torch.ones(20) * 1).all() assert torch.eq(lt_sorted.tensor[:, 1], torch.ones(20) * 2).all() assert torch.eq(lt_sorted.tensor[:, 2], torch.ones(20) * 3).all() @@ -272,26 +252,29 @@ def test_sorting(): data[:, 1, :] = data[:, 1] * 2 data[:, 2, :] = data[:, 2] data[:, 3, :] = data[:, 3] * 3 - labels = {1: {'dof': ['d', 'b', 'a', 'c'], 'name': 1}} + labels = {1: {"dof": ["d", "b", "a", "c"], "name": 1}} lt_data = LabelTensor(data, labels) lt_sorted = LabelTensor.sort_labels(lt_data, dim=1) assert lt_sorted.shape == (20, 4, 5) - assert lt_sorted.full_labels[1]['dof'] == ['a', 'b', 'c', 'd'] + assert lt_sorted.full_labels[1]["dof"] == ["a", "b", "c", "d"] assert torch.eq(lt_sorted.tensor[:, 0, :], torch.ones(20, 5) * 1).all() assert torch.eq(lt_sorted.tensor[:, 1, :], torch.ones(20, 5) * 2).all() assert torch.eq(lt_sorted.tensor[:, 2, :], torch.ones(20, 5) * 3).all() assert torch.eq(lt_sorted.tensor[:, 3, :], torch.ones(20, 5) * 4).all() -@pytest.mark.parametrize("labels", - [[f's{i}' for i in range(10)], - {0: {'dof': ['a', 'b', 'c']}, - 1: {'dof': [f's{i}' for i in range(10)]}}]) +@pytest.mark.parametrize( + "labels", + [ + [f"s{i}" for i in range(10)], + {0: {"dof": ["a", "b", "c"]}, 1: {"dof": [f"s{i}" for i in range(10)]}}, + ], +) def test_cat_bool(labels): out = torch.randn((3, 10)) out = LabelTensor(out, labels) selected = out[torch.tensor([True, True, False])] assert selected.shape == (2, 10) - assert selected.stored_labels[1]['dof'] == [f's{i}' for i in range(10)] + assert selected.stored_labels[1]["dof"] == [f"s{i}" for i in range(10)] if isinstance(labels, dict): - assert selected.stored_labels[0]['dof'] == ['a', 'b'] + assert selected.stored_labels[0]["dof"] == ["a", "b"] diff --git a/tests/test_label_tensor/test_label_tensor_01.py b/tests/test_label_tensor/test_label_tensor_01.py index ea43307cb..6806dd9e4 100644 --- a/tests/test_label_tensor/test_label_tensor_01.py +++ b/tests/test_label_tensor/test_label_tensor_01.py @@ -4,7 +4,7 @@ from pina import LabelTensor data = torch.rand((20, 3)) -labels = ['a', 'b', 'c'] +labels = ["a", "b", "c"] def test_constructor(): @@ -13,7 +13,7 @@ def test_constructor(): def test_wrong_constructor(): with pytest.raises(ValueError): - LabelTensor(data, ['a', 'b']) + LabelTensor(data, ["a", "b"]) def test_labels(): @@ -25,7 +25,7 @@ def test_labels(): def test_extract(): - label_to_extract = ['a', 'c'] + label_to_extract = ["a", "c"] tensor = LabelTensor(data, labels) new = tensor.extract(label_to_extract) assert new.labels == label_to_extract @@ -34,7 +34,7 @@ def test_extract(): def test_extract_onelabel(): - label_to_extract = ['a'] + label_to_extract = ["a"] tensor = LabelTensor(data, labels) new = tensor.extract(label_to_extract) assert new.ndim == 2 @@ -44,18 +44,19 @@ def test_extract_onelabel(): def test_wrong_extract(): - label_to_extract = ['a', 'cc'] + label_to_extract = ["a", "cc"] tensor = LabelTensor(data, labels) with pytest.raises(ValueError): tensor.extract(label_to_extract) def test_extract_order(): - label_to_extract = ['c', 'a'] + label_to_extract = ["c", "a"] tensor = LabelTensor(data, labels) new = tensor.extract(label_to_extract) - expected = torch.cat((data[:, 2].reshape(-1, 1), data[:, 0].reshape(-1, 1)), - dim=1) + expected = torch.cat( + (data[:, 2].reshape(-1, 1), data[:, 0].reshape(-1, 1)), dim=1 + ) assert new.labels == label_to_extract assert new.shape[1] == len(label_to_extract) assert torch.all(torch.isclose(expected, new)) @@ -63,31 +64,31 @@ def test_extract_order(): def test_merge(): tensor = LabelTensor(data, labels) - tensor_a = tensor.extract('a') - tensor_b = tensor.extract('b') - tensor_c = tensor.extract('c') + tensor_a = tensor.extract("a") + tensor_b = tensor.extract("b") + tensor_c = tensor.extract("c") tensor_bc = tensor_b.append(tensor_c) - assert torch.allclose(tensor_bc, tensor.extract(['b', 'c'])) + assert torch.allclose(tensor_bc, tensor.extract(["b", "c"])) def test_merge2(): tensor = LabelTensor(data, labels) - tensor_b = tensor.extract('b') - tensor_c = tensor.extract('c') + tensor_b = tensor.extract("b") + tensor_c = tensor.extract("c") tensor_bc = tensor_b.append(tensor_c) - assert torch.allclose(tensor_bc, tensor.extract(['b', 'c'])) + assert torch.allclose(tensor_bc, tensor.extract(["b", "c"])) def test_getitem(): tensor = LabelTensor(data, labels) - tensor_view = tensor['a'] - assert tensor_view.labels == ['a'] + tensor_view = tensor["a"] + assert tensor_view.labels == ["a"] assert torch.allclose(tensor_view.flatten(), data[:, 0]) - tensor_view = tensor['a', 'c'] - assert tensor_view.labels == ['a', 'c'] + tensor_view = tensor["a", "c"] + assert tensor_view.labels == ["a", "c"] assert torch.allclose(tensor_view, data[:, 0::2]) diff --git a/tests/test_loss/test_lp_loss.py b/tests/test_loss/test_lp_loss.py index 2f073b049..8f1f48d58 100644 --- a/tests/test_loss/test_lp_loss.py +++ b/tests/test_loss/test_lp_loss.py @@ -2,9 +2,9 @@ from pina.loss import LpLoss -input = torch.tensor([[3.], [1.], [-8.]]) -target = torch.tensor([[6.], [4.], [2.]]) -available_reductions = ['str', 'mean', 'none'] +input = torch.tensor([[3.0], [1.0], [-8.0]]) +target = torch.tensor([[6.0], [4.0], [2.0]]) +available_reductions = ["str", "mean", "none"] def test_LpLoss_constructor(): @@ -12,17 +12,17 @@ def test_LpLoss_constructor(): for reduction in available_reductions: LpLoss(reduction=reduction) # test p - for p in [float('inf'), -float('inf'), 1, 10, -8]: + for p in [float("inf"), -float("inf"), 1, 10, -8]: LpLoss(p=p) def test_LpLoss_forward(): # l2 loss - loss = LpLoss(p=2, reduction='mean') + loss = LpLoss(p=2, reduction="mean") l2_loss = torch.mean(torch.sqrt((input - target).pow(2))) assert loss(input, target) == l2_loss # l1 loss - loss = LpLoss(p=1, reduction='sum') + loss = LpLoss(p=1, reduction="sum") l1_loss = torch.sum(torch.abs(input - target)) assert loss(input, target) == l1_loss @@ -32,16 +32,16 @@ def test_LpRelativeLoss_constructor(): for reduction in available_reductions: LpLoss(reduction=reduction, relative=True) # test p - for p in [float('inf'), -float('inf'), 1, 10, -8]: + for p in [float("inf"), -float("inf"), 1, 10, -8]: LpLoss(p=p, relative=True) def test_LpRelativeLoss_forward(): # l2 relative loss - loss = LpLoss(p=2, reduction='mean', relative=True) + loss = LpLoss(p=2, reduction="mean", relative=True) l2_loss = torch.sqrt((input - target).pow(2)) / torch.sqrt(input.pow(2)) assert loss(input, target) == torch.mean(l2_loss) # l1 relative loss - loss = LpLoss(p=1, reduction='sum', relative=True) + loss = LpLoss(p=1, reduction="sum", relative=True) l1_loss = torch.abs(input - target) / torch.abs(input) assert loss(input, target) == torch.sum(l1_loss) diff --git a/tests/test_loss/test_power_loss.py b/tests/test_loss/test_power_loss.py index 7ea26755d..4ea90282b 100644 --- a/tests/test_loss/test_power_loss.py +++ b/tests/test_loss/test_power_loss.py @@ -3,9 +3,9 @@ from pina.loss import PowerLoss -input = torch.tensor([[3.], [1.], [-8.]]) -target = torch.tensor([[6.], [4.], [2.]]) -available_reductions = ['str', 'mean', 'none'] +input = torch.tensor([[3.0], [1.0], [-8.0]]) +target = torch.tensor([[6.0], [4.0], [2.0]]) +available_reductions = ["str", "mean", "none"] def test_PowerLoss_constructor(): @@ -13,17 +13,17 @@ def test_PowerLoss_constructor(): for reduction in available_reductions: PowerLoss(reduction=reduction) # test p - for p in [float('inf'), -float('inf'), 1, 10, -8]: + for p in [float("inf"), -float("inf"), 1, 10, -8]: PowerLoss(p=p) def test_PowerLoss_forward(): # l2 loss - loss = PowerLoss(p=2, reduction='mean') + loss = PowerLoss(p=2, reduction="mean") l2_loss = torch.mean((input - target).pow(2)) assert loss(input, target) == l2_loss # l1 loss - loss = PowerLoss(p=1, reduction='sum') + loss = PowerLoss(p=1, reduction="sum") l1_loss = torch.sum(torch.abs(input - target)) assert loss(input, target) == l1_loss @@ -33,16 +33,16 @@ def test_LpRelativeLoss_constructor(): for reduction in available_reductions: PowerLoss(reduction=reduction, relative=True) # test p - for p in [float('inf'), -float('inf'), 1, 10, -8]: + for p in [float("inf"), -float("inf"), 1, 10, -8]: PowerLoss(p=p, relative=True) def test_LpRelativeLoss_forward(): # l2 relative loss - loss = PowerLoss(p=2, reduction='mean', relative=True) + loss = PowerLoss(p=2, reduction="mean", relative=True) l2_loss = (input - target).pow(2) / input.pow(2) assert loss(input, target) == torch.mean(l2_loss) # l1 relative loss - loss = PowerLoss(p=1, reduction='sum', relative=True) + loss = PowerLoss(p=1, reduction="sum", relative=True) l1_loss = torch.abs(input - target) / torch.abs(input) assert loss(input, target) == torch.sum(l1_loss) diff --git a/tests/test_model/test_average_neural_operator.py b/tests/test_model/test_average_neural_operator.py index 1988bde2f..ded81c43d 100644 --- a/tests/test_model/test_average_neural_operator.py +++ b/tests/test_model/test_average_neural_operator.py @@ -8,139 +8,166 @@ n_layers = 4 embedding_dim = 24 func = torch.nn.Tanh -coordinates_indices = ['p'] -field_indices = ['v'] +coordinates_indices = ["p"] +field_indices = ["v"] def test_constructor(): # working constructor - lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices), - embedding_dim) - projecting_net = torch.nn.Linear(embedding_dim + len(field_indices), - len(field_indices)) + lifting_net = torch.nn.Linear( + len(coordinates_indices) + len(field_indices), embedding_dim + ) + projecting_net = torch.nn.Linear( + embedding_dim + len(field_indices), len(field_indices) + ) AveragingNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_layers=n_layers, - func=func) + func=func, + ) # not working constructor with pytest.raises(ValueError): AveragingNeuralOperator( - lifting_net=lifting_net, - projecting_net=projecting_net, - coordinates_indices=coordinates_indices, - field_indices=field_indices, - n_layers=3.2, # wrong - func=func) + lifting_net=lifting_net, + projecting_net=projecting_net, + coordinates_indices=coordinates_indices, + field_indices=field_indices, + n_layers=3.2, # wrong + func=func, + ) AveragingNeuralOperator( - lifting_net=lifting_net, - projecting_net=projecting_net, - coordinates_indices=coordinates_indices, - field_indices=field_indices, - n_layers=n_layers, - func=1) # wrong + lifting_net=lifting_net, + projecting_net=projecting_net, + coordinates_indices=coordinates_indices, + field_indices=field_indices, + n_layers=n_layers, + func=1, + ) # wrong AveragingNeuralOperator( - lifting_net=[0], # wrong - projecting_net=projecting_net, - coordinates_indices=coordinates_indices, - field_indices=field_indices, - n_layers=n_layers, - func=func) + lifting_net=[0], # wrong + projecting_net=projecting_net, + coordinates_indices=coordinates_indices, + field_indices=field_indices, + n_layers=n_layers, + func=func, + ) AveragingNeuralOperator( - lifting_net=lifting_net, - projecting_net=[0], # wront - coordinates_indices=coordinates_indices, - field_indices=field_indices, - n_layers=n_layers, - func=func) + lifting_net=lifting_net, + projecting_net=[0], # wront + coordinates_indices=coordinates_indices, + field_indices=field_indices, + n_layers=n_layers, + func=func, + ) AveragingNeuralOperator( - lifting_net=lifting_net, - projecting_net=projecting_net, - coordinates_indices=[0], #wrong - field_indices=field_indices, - n_layers=n_layers, - func=func) + lifting_net=lifting_net, + projecting_net=projecting_net, + coordinates_indices=[0], # wrong + field_indices=field_indices, + n_layers=n_layers, + func=func, + ) AveragingNeuralOperator( - lifting_net=lifting_net, - projecting_net=projecting_net, - coordinates_indices=coordinates_indices, - field_indices=[0], #wrong - n_layers=n_layers, - func=func) + lifting_net=lifting_net, + projecting_net=projecting_net, + coordinates_indices=coordinates_indices, + field_indices=[0], # wrong + n_layers=n_layers, + func=func, + ) - lifting_net = torch.nn.Linear(len(coordinates_indices), - embedding_dim) + lifting_net = torch.nn.Linear(len(coordinates_indices), embedding_dim) AveragingNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_layers=n_layers, - func=func) - - lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices), - embedding_dim) - projecting_net = torch.nn.Linear(embedding_dim, - len(field_indices)) + func=func, + ) + + lifting_net = torch.nn.Linear( + len(coordinates_indices) + len(field_indices), embedding_dim + ) + projecting_net = torch.nn.Linear(embedding_dim, len(field_indices)) AveragingNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_layers=n_layers, - func=func) - + func=func, + ) + def test_forward(): - lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices), - embedding_dim) - projecting_net = torch.nn.Linear(embedding_dim + len(field_indices), - len(field_indices)) - avno=AveragingNeuralOperator( + lifting_net = torch.nn.Linear( + len(coordinates_indices) + len(field_indices), embedding_dim + ) + projecting_net = torch.nn.Linear( + embedding_dim + len(field_indices), len(field_indices) + ) + avno = AveragingNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_layers=n_layers, - func=func) - + func=func, + ) + input_ = LabelTensor( - torch.rand(batch_size, 100, - len(coordinates_indices) + len(field_indices)), ['p', 'v']) + torch.rand( + batch_size, 100, len(coordinates_indices) + len(field_indices) + ), + ["p", "v"], + ) out = avno(input_) assert out.shape == torch.Size( - [batch_size, input_.shape[1], len(field_indices)]) + [batch_size, input_.shape[1], len(field_indices)] + ) def test_backward(): - lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices), - embedding_dim) - projecting_net = torch.nn.Linear(embedding_dim + len(field_indices), - len(field_indices)) - avno=AveragingNeuralOperator( + lifting_net = torch.nn.Linear( + len(coordinates_indices) + len(field_indices), embedding_dim + ) + projecting_net = torch.nn.Linear( + embedding_dim + len(field_indices), len(field_indices) + ) + avno = AveragingNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_layers=n_layers, - func=func) + func=func, + ) input_ = LabelTensor( - torch.rand(batch_size, 100, - len(coordinates_indices) + len(field_indices)), ['p', 'v']) + torch.rand( + batch_size, 100, len(coordinates_indices) + len(field_indices) + ), + ["p", "v"], + ) input_ = input_.requires_grad_() out = avno(input_) tmp = torch.linalg.norm(out) tmp.backward() grad = input_.grad assert grad.shape == torch.Size( - [batch_size, input_.shape[1], - len(coordinates_indices) + len(field_indices)]) \ No newline at end of file + [ + batch_size, + input_.shape[1], + len(coordinates_indices) + len(field_indices), + ] + ) diff --git a/tests/test_model/test_deeponet.py b/tests/test_model/test_deeponet.py index 9670424c7..8917811c5 100644 --- a/tests/test_model/test_deeponet.py +++ b/tests/test_model/test_deeponet.py @@ -7,42 +7,50 @@ from pina.model import FeedForward data = torch.rand((20, 3)) -input_vars = ['a', 'b', 'c'] +input_vars = ["a", "b", "c"] input_ = LabelTensor(data, input_vars) symbol_funcs_red = DeepONet._symbol_functions(dim=-1) output_dims = [1, 5, 10, 20] + def test_constructor(): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) - DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction='+', - aggregator='*') + DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=["a"], + input_indeces_trunk_net=["b", "c"], + reduction="+", + aggregator="*", + ) def test_constructor_fails_when_invalid_inner_layer_size(): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=8) with pytest.raises(ValueError): - DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction='+', - aggregator='*') + DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=["a"], + input_indeces_trunk_net=["b", "c"], + reduction="+", + aggregator="*", + ) + def test_forward_extract_str(): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) - model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction='+', - aggregator='*') + model = DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=["a"], + input_indeces_trunk_net=["b", "c"], + reduction="+", + aggregator="*", + ) model(input_) assert model(input_).shape[-1] == 1 @@ -50,82 +58,99 @@ def test_forward_extract_str(): def test_forward_extract_int(): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) - model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=[0], - input_indeces_trunk_net=[1, 2], - reduction='+', - aggregator='*') + model = DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=[0], + input_indeces_trunk_net=[1, 2], + reduction="+", + aggregator="*", + ) model(data) + def test_backward_extract_int(): data = torch.rand((20, 3)) branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) - model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=[0], - input_indeces_trunk_net=[1, 2], - reduction='+', - aggregator='*') + model = DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=[0], + input_indeces_trunk_net=[1, 2], + reduction="+", + aggregator="*", + ) data.requires_grad = True model(data) - l=torch.mean(model(data)) + l = torch.mean(model(data)) l.backward() - assert data._grad.shape == torch.Size([20,3]) + assert data._grad.shape == torch.Size([20, 3]) + def test_forward_extract_str_wrong(): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) - model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction='+', - aggregator='*') + model = DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=["a"], + input_indeces_trunk_net=["b", "c"], + reduction="+", + aggregator="*", + ) with pytest.raises(RuntimeError): model(data) + def test_backward_extract_str_wrong(): data = torch.rand((20, 3)) branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) - model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction='+', - aggregator='*') + model = DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=["a"], + input_indeces_trunk_net=["b", "c"], + reduction="+", + aggregator="*", + ) data.requires_grad = True with pytest.raises(RuntimeError): model(data) - l=torch.mean(model(data)) + l = torch.mean(model(data)) l.backward() - assert data._grad.shape == torch.Size([20,3]) + assert data._grad.shape == torch.Size([20, 3]) + -@pytest.mark.parametrize('red', symbol_funcs_red) +@pytest.mark.parametrize("red", symbol_funcs_red) def test_forward_symbol_funcs(red): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) - model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction=red, - aggregator='*') + model = DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=["a"], + input_indeces_trunk_net=["b", "c"], + reduction=red, + aggregator="*", + ) model(input_) assert model(input_).shape[-1] == 1 -@pytest.mark.parametrize('out_dim', output_dims) + +@pytest.mark.parametrize("out_dim", output_dims) def test_forward_callable_reduction(out_dim): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) reduction_layer = Linear(10, out_dim) - model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction=reduction_layer, - aggregator='*') + model = DeepONet( + branch_net=branch_net, + trunk_net=trunk_net, + input_indeces_branch_net=["a"], + input_indeces_trunk_net=["b", "c"], + reduction=reduction_layer, + aggregator="*", + ) model(input_) assert model(input_).shape[-1] == out_dim diff --git a/tests/test_model/test_feed_forward.py b/tests/test_model/test_feed_forward.py index d02dcb820..3664130b8 100644 --- a/tests/test_model/test_feed_forward.py +++ b/tests/test_model/test_feed_forward.py @@ -12,22 +12,25 @@ def test_constructor(): FeedForward(input_vars, output_vars) FeedForward(input_vars, output_vars, inner_size=10, n_layers=20) FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2]) - FeedForward(input_vars, - output_vars, - layers=[10, 20, 5, 2], - func=torch.nn.ReLU) - FeedForward(input_vars, - output_vars, - layers=[10, 20, 5, 2], - func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh]) + FeedForward( + input_vars, output_vars, layers=[10, 20, 5, 2], func=torch.nn.ReLU + ) + FeedForward( + input_vars, + output_vars, + layers=[10, 20, 5, 2], + func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh], + ) def test_constructor_wrong(): with pytest.raises(RuntimeError): - FeedForward(input_vars, - output_vars, - layers=[10, 20, 5, 2], - func=[torch.nn.ReLU, torch.nn.ReLU]) + FeedForward( + input_vars, + output_vars, + layers=[10, 20, 5, 2], + func=[torch.nn.ReLU, torch.nn.ReLU], + ) def test_forward(): @@ -36,11 +39,12 @@ def test_forward(): output_ = fnn(data) assert output_.shape == (data.shape[0], dim_out) + def test_backward(): dim_in, dim_out = 3, 2 fnn = FeedForward(dim_in, dim_out) data.requires_grad = True output_ = fnn(data) - l=torch.mean(output_) + l = torch.mean(output_) l.backward() - assert data._grad.shape == torch.Size([20,3]) + assert data._grad.shape == torch.Size([20, 3]) diff --git a/tests/test_model/test_fourier_neural_operator.py b/tests/test_model/test_fourier_neural_operator.py index 52b003e49..f9082d24c 100644 --- a/tests/test_model/test_fourier_neural_operator.py +++ b/tests/test_model/test_fourier_neural_operator.py @@ -13,36 +13,44 @@ def test_constructor(): projecting_net = torch.nn.Linear(60, output_channels) # simple constructor - FNO(lifting_net=lifting_net, + FNO( + lifting_net=lifting_net, projecting_net=projecting_net, n_modes=5, dimensions=3, inner_size=60, - n_layers=5) + n_layers=5, + ) # simple constructor with n_modes list - FNO(lifting_net=lifting_net, + FNO( + lifting_net=lifting_net, projecting_net=projecting_net, n_modes=[5, 3, 2], dimensions=3, inner_size=60, - n_layers=5) + n_layers=5, + ) # simple constructor with n_modes list of list - FNO(lifting_net=lifting_net, + FNO( + lifting_net=lifting_net, projecting_net=projecting_net, n_modes=[[5, 3, 2], [5, 3, 2]], dimensions=3, inner_size=60, - n_layers=2) + n_layers=2, + ) # simple constructor with n_modes list of list projecting_net = torch.nn.Linear(50, output_channels) - FNO(lifting_net=lifting_net, + FNO( + lifting_net=lifting_net, projecting_net=projecting_net, n_modes=5, dimensions=3, - layers=[50, 50]) + layers=[50, 50], + ) def test_1d_forward(): @@ -50,12 +58,14 @@ def test_1d_forward(): input_ = torch.rand(batch_size, resolution[0], input_channels) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) - fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=1, - inner_size=60, - n_layers=2) + fno = FNO( + lifting_net=lifting_net, + projecting_net=projecting_net, + n_modes=5, + dimensions=1, + inner_size=60, + n_layers=2, + ) out = fno(input_) assert out.shape == torch.Size([batch_size, resolution[0], output_channels]) @@ -65,91 +75,120 @@ def test_1d_backward(): input_ = torch.rand(batch_size, resolution[0], input_channels) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) - fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=1, - inner_size=60, - n_layers=2) + fno = FNO( + lifting_net=lifting_net, + projecting_net=projecting_net, + n_modes=5, + dimensions=1, + inner_size=60, + n_layers=2, + ) input_.requires_grad = True out = fno(input_) l = torch.mean(out) l.backward() - assert input_.grad.shape == torch.Size([batch_size, resolution[0], input_channels]) + assert input_.grad.shape == torch.Size( + [batch_size, resolution[0], input_channels] + ) def test_2d_forward(): input_channels = 2 - input_ = torch.rand(batch_size, resolution[0], resolution[1], - input_channels) + input_ = torch.rand( + batch_size, resolution[0], resolution[1], input_channels + ) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) - fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=2, - inner_size=60, - n_layers=2) + fno = FNO( + lifting_net=lifting_net, + projecting_net=projecting_net, + n_modes=5, + dimensions=2, + inner_size=60, + n_layers=2, + ) out = fno(input_) assert out.shape == torch.Size( - [batch_size, resolution[0], resolution[1], output_channels]) + [batch_size, resolution[0], resolution[1], output_channels] + ) def test_2d_backward(): input_channels = 2 - input_ = torch.rand(batch_size, resolution[0], resolution[1], - input_channels) + input_ = torch.rand( + batch_size, resolution[0], resolution[1], input_channels + ) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) - fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=2, - inner_size=60, - n_layers=2) + fno = FNO( + lifting_net=lifting_net, + projecting_net=projecting_net, + n_modes=5, + dimensions=2, + inner_size=60, + n_layers=2, + ) input_.requires_grad = True out = fno(input_) l = torch.mean(out) l.backward() - assert input_.grad.shape == torch.Size([ - batch_size, resolution[0], resolution[1], input_channels - ]) + assert input_.grad.shape == torch.Size( + [batch_size, resolution[0], resolution[1], input_channels] + ) def test_3d_forward(): input_channels = 3 - input_ = torch.rand(batch_size, resolution[0], resolution[1], resolution[2], - input_channels) + input_ = torch.rand( + batch_size, resolution[0], resolution[1], resolution[2], input_channels + ) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) - fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=3, - inner_size=60, - n_layers=2) + fno = FNO( + lifting_net=lifting_net, + projecting_net=projecting_net, + n_modes=5, + dimensions=3, + inner_size=60, + n_layers=2, + ) out = fno(input_) - assert out.shape == torch.Size([ - batch_size, resolution[0], resolution[1], resolution[2], output_channels - ]) + assert out.shape == torch.Size( + [ + batch_size, + resolution[0], + resolution[1], + resolution[2], + output_channels, + ] + ) def test_3d_backward(): input_channels = 3 - input_ = torch.rand(batch_size, resolution[0], resolution[1], resolution[2], - input_channels) + input_ = torch.rand( + batch_size, resolution[0], resolution[1], resolution[2], input_channels + ) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) - fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=3, - inner_size=60, - n_layers=2) + fno = FNO( + lifting_net=lifting_net, + projecting_net=projecting_net, + n_modes=5, + dimensions=3, + inner_size=60, + n_layers=2, + ) input_.requires_grad = True out = fno(input_) l = torch.mean(out) l.backward() - assert input_.grad.shape == torch.Size([ - batch_size, resolution[0], resolution[1], resolution[2], input_channels - ]) + assert input_.grad.shape == torch.Size( + [ + batch_size, + resolution[0], + resolution[1], + resolution[2], + input_channels, + ] + ) diff --git a/tests/test_model/test_kernel_neural_operator.py b/tests/test_model/test_kernel_neural_operator.py index 4a14fd1e4..d36f0aa8a 100644 --- a/tests/test_model/test_kernel_neural_operator.py +++ b/tests/test_model/test_kernel_neural_operator.py @@ -10,29 +10,46 @@ output_shape = torch.Size([batch_size, numb, output_dim]) -lifting_operator = FeedForward(input_dimensions=input_dim, output_dimensions=embedding_dim) -projection_operator = FeedForward(input_dimensions=embedding_dim, output_dimensions=output_dim) -integral_kernels = torch.nn.Sequential(FeedForward(input_dimensions=embedding_dim, - output_dimensions=embedding_dim), - FeedForward(input_dimensions=embedding_dim, - output_dimensions=embedding_dim),) +lifting_operator = FeedForward( + input_dimensions=input_dim, output_dimensions=embedding_dim +) +projection_operator = FeedForward( + input_dimensions=embedding_dim, output_dimensions=output_dim +) +integral_kernels = torch.nn.Sequential( + FeedForward( + input_dimensions=embedding_dim, output_dimensions=embedding_dim + ), + FeedForward( + input_dimensions=embedding_dim, output_dimensions=embedding_dim + ), +) + def test_constructor(): - KernelNeuralOperator(lifting_operator=lifting_operator, - integral_kernels=integral_kernels, - projection_operator=projection_operator) - + KernelNeuralOperator( + lifting_operator=lifting_operator, + integral_kernels=integral_kernels, + projection_operator=projection_operator, + ) + + def test_forward(): - operator = KernelNeuralOperator(lifting_operator=lifting_operator, - integral_kernels=integral_kernels, - projection_operator=projection_operator) + operator = KernelNeuralOperator( + lifting_operator=lifting_operator, + integral_kernels=integral_kernels, + projection_operator=projection_operator, + ) out = operator(data) assert out.shape == output_shape + def test_backward(): - operator = KernelNeuralOperator(lifting_operator=lifting_operator, - integral_kernels=integral_kernels, - projection_operator=projection_operator) + operator = KernelNeuralOperator( + lifting_operator=lifting_operator, + integral_kernels=integral_kernels, + projection_operator=projection_operator, + ) out = operator(data) loss = torch.nn.functional.mse_loss(out, torch.zeros_like(out)) loss.backward() diff --git a/tests/test_model/test_low_rank_neural_operator.py b/tests/test_model/test_low_rank_neural_operator.py index 1cd09a77f..3702df91b 100644 --- a/tests/test_model/test_low_rank_neural_operator.py +++ b/tests/test_model/test_low_rank_neural_operator.py @@ -10,132 +10,157 @@ func = torch.nn.Tanh rank = 4 n_kernel_layers = 3 -field_indices = ['u'] -coordinates_indices = ['x', 'y'] +field_indices = ["u"] +coordinates_indices = ["x", "y"] + def test_constructor(): # working constructor - lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices), - embedding_dim) - projecting_net = torch.nn.Linear(embedding_dim + len(coordinates_indices), - len(field_indices)) + lifting_net = torch.nn.Linear( + len(coordinates_indices) + len(field_indices), embedding_dim + ) + projecting_net = torch.nn.Linear( + embedding_dim + len(coordinates_indices), len(field_indices) + ) LowRankNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_kernel_layers=n_kernel_layers, - rank=rank) + rank=rank, + ) # not working constructor with pytest.raises(ValueError): LowRankNeuralOperator( - lifting_net=lifting_net, - projecting_net=projecting_net, - coordinates_indices=coordinates_indices, - field_indices=field_indices, - n_kernel_layers=3.2, # wrong - rank=rank) + lifting_net=lifting_net, + projecting_net=projecting_net, + coordinates_indices=coordinates_indices, + field_indices=field_indices, + n_kernel_layers=3.2, # wrong + rank=rank, + ) LowRankNeuralOperator( - lifting_net=[0], # wrong - projecting_net=projecting_net, - coordinates_indices=coordinates_indices, - field_indices=field_indices, - n_kernel_layers=n_kernel_layers, - rank=rank) + lifting_net=[0], # wrong + projecting_net=projecting_net, + coordinates_indices=coordinates_indices, + field_indices=field_indices, + n_kernel_layers=n_kernel_layers, + rank=rank, + ) LowRankNeuralOperator( - lifting_net=lifting_net, - projecting_net=[0], # wront - coordinates_indices=coordinates_indices, - field_indices=field_indices, - n_kernel_layers=n_kernel_layers, - rank=rank) + lifting_net=lifting_net, + projecting_net=[0], # wront + coordinates_indices=coordinates_indices, + field_indices=field_indices, + n_kernel_layers=n_kernel_layers, + rank=rank, + ) LowRankNeuralOperator( - lifting_net=lifting_net, - projecting_net=projecting_net, - coordinates_indices=[0], #wrong - field_indices=field_indices, - n_kernel_layers=n_kernel_layers, - rank=rank) + lifting_net=lifting_net, + projecting_net=projecting_net, + coordinates_indices=[0], # wrong + field_indices=field_indices, + n_kernel_layers=n_kernel_layers, + rank=rank, + ) LowRankNeuralOperator( - lifting_net=lifting_net, - projecting_net=projecting_net, - coordinates_indices=coordinates_indices, - field_indices=[0], #wrong - n_kernel_layers=n_kernel_layers, - rank=rank) + lifting_net=lifting_net, + projecting_net=projecting_net, + coordinates_indices=coordinates_indices, + field_indices=[0], # wrong + n_kernel_layers=n_kernel_layers, + rank=rank, + ) - lifting_net = torch.nn.Linear(len(coordinates_indices), - embedding_dim) + lifting_net = torch.nn.Linear(len(coordinates_indices), embedding_dim) LowRankNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_kernel_layers=n_kernel_layers, - rank=rank) - - lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices), - embedding_dim) - projecting_net = torch.nn.Linear(embedding_dim, - len(field_indices)) + rank=rank, + ) + + lifting_net = torch.nn.Linear( + len(coordinates_indices) + len(field_indices), embedding_dim + ) + projecting_net = torch.nn.Linear(embedding_dim, len(field_indices)) LowRankNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_kernel_layers=n_kernel_layers, - rank=rank) - + rank=rank, + ) + def test_forward(): - lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices), - embedding_dim) - projecting_net = torch.nn.Linear(embedding_dim + len(coordinates_indices), - len(field_indices)) + lifting_net = torch.nn.Linear( + len(coordinates_indices) + len(field_indices), embedding_dim + ) + projecting_net = torch.nn.Linear( + embedding_dim + len(coordinates_indices), len(field_indices) + ) lno = LowRankNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_kernel_layers=n_kernel_layers, - rank=rank) - + rank=rank, + ) + input_ = LabelTensor( - torch.rand(batch_size, 100, - len(coordinates_indices) + len(field_indices)), - coordinates_indices + field_indices) + torch.rand( + batch_size, 100, len(coordinates_indices) + len(field_indices) + ), + coordinates_indices + field_indices, + ) out = lno(input_) assert out.shape == torch.Size( - [batch_size, input_.shape[1], len(field_indices)]) + [batch_size, input_.shape[1], len(field_indices)] + ) def test_backward(): - lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices), - embedding_dim) - projecting_net = torch.nn.Linear(embedding_dim + len(coordinates_indices), - len(field_indices)) - lno=LowRankNeuralOperator( + lifting_net = torch.nn.Linear( + len(coordinates_indices) + len(field_indices), embedding_dim + ) + projecting_net = torch.nn.Linear( + embedding_dim + len(coordinates_indices), len(field_indices) + ) + lno = LowRankNeuralOperator( lifting_net=lifting_net, projecting_net=projecting_net, coordinates_indices=coordinates_indices, field_indices=field_indices, n_kernel_layers=n_kernel_layers, - rank=rank) + rank=rank, + ) input_ = LabelTensor( - torch.rand(batch_size, 100, - len(coordinates_indices) + len(field_indices)), - coordinates_indices + field_indices) + torch.rand( + batch_size, 100, len(coordinates_indices) + len(field_indices) + ), + coordinates_indices + field_indices, + ) input_ = input_.requires_grad_() out = lno(input_) tmp = torch.linalg.norm(out) tmp.backward() grad = input_.grad assert grad.shape == torch.Size( - [batch_size, input_.shape[1], - len(coordinates_indices) + len(field_indices)]) \ No newline at end of file + [ + batch_size, + input_.shape[1], + len(coordinates_indices) + len(field_indices), + ] + ) diff --git a/tests/test_model/test_mionet.py b/tests/test_model/test_mionet.py index 174251eed..4d59433bf 100644 --- a/tests/test_model/test_mionet.py +++ b/tests/test_model/test_mionet.py @@ -6,7 +6,7 @@ from pina.model import FeedForward data = torch.rand((20, 3)) -input_vars = ['a', 'b', 'c'] +input_vars = ["a", "b", "c"] input_ = LabelTensor(data, input_vars) @@ -14,42 +14,42 @@ def test_constructor(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=2, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1: ['x'], branch_net2: ['x', 'y'], trunk_net: ['z']} - MIONet(networks=networks, reduction='+', aggregator='*') + networks = {branch_net1: ["x"], branch_net2: ["x", "y"], trunk_net: ["z"]} + MIONet(networks=networks, reduction="+", aggregator="*") def test_constructor_fails_when_invalid_inner_layer_size(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=2, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=12) - networks = {branch_net1: ['x'], branch_net2: ['x', 'y'], trunk_net: ['z']} + networks = {branch_net1: ["x"], branch_net2: ["x", "y"], trunk_net: ["z"]} with pytest.raises(ValueError): - MIONet(networks=networks, reduction='+', aggregator='*') + MIONet(networks=networks, reduction="+", aggregator="*") def test_forward_extract_str(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']} - model = MIONet(networks=networks, reduction='+', aggregator='*') + networks = {branch_net1: ["a"], branch_net2: ["b"], trunk_net: ["c"]} + model = MIONet(networks=networks, reduction="+", aggregator="*") model(input_) def test_backward_extract_str(): data = torch.rand((20, 3)) data.requires_grad = True - input_vars = ['a', 'b', 'c'] + input_vars = ["a", "b", "c"] input_ = LabelTensor(data, input_vars) branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']} - model = MIONet(networks=networks, reduction='+', aggregator='*') + networks = {branch_net1: ["a"], branch_net2: ["b"], trunk_net: ["c"]} + model = MIONet(networks=networks, reduction="+", aggregator="*") model(input_) l = torch.mean(model(input_)) l.backward() - assert data._grad.shape == torch.Size([20,3]) + assert data._grad.shape == torch.Size([20, 3]) def test_forward_extract_int(): @@ -57,7 +57,7 @@ def test_forward_extract_int(): branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) networks = {branch_net1: [0], branch_net2: [1], trunk_net: [2]} - model = MIONet(networks=networks, reduction='+', aggregator='*') + model = MIONet(networks=networks, reduction="+", aggregator="*") model(data) @@ -68,19 +68,19 @@ def test_backward_extract_int(): branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) networks = {branch_net1: [0], branch_net2: [1], trunk_net: [2]} - model = MIONet(networks=networks, reduction='+', aggregator='*') + model = MIONet(networks=networks, reduction="+", aggregator="*") model(data) l = torch.mean(model(data)) l.backward() - assert data._grad.shape == torch.Size([20,3]) + assert data._grad.shape == torch.Size([20, 3]) def test_forward_extract_str_wrong(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']} - model = MIONet(networks=networks, reduction='+', aggregator='*') + networks = {branch_net1: ["a"], branch_net2: ["b"], trunk_net: ["c"]} + model = MIONet(networks=networks, reduction="+", aggregator="*") with pytest.raises(RuntimeError): model(data) @@ -91,10 +91,10 @@ def test_backward_extract_str_wrong(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']} - model = MIONet(networks=networks, reduction='+', aggregator='*') + networks = {branch_net1: ["a"], branch_net2: ["b"], trunk_net: ["c"]} + model = MIONet(networks=networks, reduction="+", aggregator="*") with pytest.raises(RuntimeError): model(data) l = torch.mean(model(data)) l.backward() - assert data._grad.shape == torch.Size([20,3]) + assert data._grad.shape == torch.Size([20, 3]) diff --git a/tests/test_model/test_residual_feed_forward.py b/tests/test_model/test_residual_feed_forward.py index 1c0cbf8cf..8cad1c63c 100644 --- a/tests/test_model/test_residual_feed_forward.py +++ b/tests/test_model/test_residual_feed_forward.py @@ -9,15 +9,17 @@ def test_constructor(): # wrong transformer nets (not 2) with pytest.raises(ValueError): - ResidualFeedForward(input_dimensions=2, - output_dimensions=1, - transformer_nets=[torch.nn.Linear(2, 20)]) + ResidualFeedForward( + input_dimensions=2, + output_dimensions=1, + transformer_nets=[torch.nn.Linear(2, 20)], + ) # wrong transformer nets (not nn.Module) with pytest.raises(ValueError): - ResidualFeedForward(input_dimensions=2, - output_dimensions=1, - transformer_nets=[2, 2]) + ResidualFeedForward( + input_dimensions=2, output_dimensions=1, transformer_nets=[2, 2] + ) def test_forward(): @@ -34,4 +36,3 @@ def test_backward(): l = torch.mean(model(x)) l.backward() assert x.grad.shape == torch.Size([10, 2]) - \ No newline at end of file diff --git a/tests/test_model/test_spline.py b/tests/test_model/test_spline.py index 4bb9a8035..d38b1610b 100644 --- a/tests/test_model/test_spline.py +++ b/tests/test_model/test_spline.py @@ -9,54 +9,61 @@ valid_args = [ { - 'knots': torch.tensor([0., 0., 0., 1., 2., 3., 3., 3.]), - 'control_points': torch.tensor([0., 0., 1., 0., 0.]), - 'order': 3 + "knots": torch.tensor([0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0]), + "control_points": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]), + "order": 3, }, { - 'knots': torch.tensor([-2., -2., -2., -2., -1., 0., 1., 2., 2., 2., 2.]), - 'control_points': torch.tensor([0., 0., 0., 6., 0., 0., 0.]), - 'order': 4 + "knots": torch.tensor( + [-2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0, 2.0] + ), + "control_points": torch.tensor([0.0, 0.0, 0.0, 6.0, 0.0, 0.0, 0.0]), + "order": 4, }, # {'control_points': {'n': 5, 'dim': 1}, 'order': 2}, # {'control_points': {'n': 7, 'dim': 1}, 'order': 3} ] - + + def scipy_check(model, x, y): from scipy.interpolate._bsplines import BSpline import numpy as np + spline = BSpline( t=model.knots.detach().numpy(), c=model.control_points.detach().numpy(), - k=model.order-1 + k=model.order - 1, ) y_scipy = spline(x).flatten() y = y.detach().numpy() np.testing.assert_allclose(y, y_scipy, atol=1e-5) + @pytest.mark.parametrize("args", valid_args) def test_constructor(args): Spline(**args) + def test_constructor_wrong(): with pytest.raises(ValueError): Spline() + @pytest.mark.parametrize("args", valid_args) def test_forward(args): - min_x = args['knots'][0] - max_x = args['knots'][-1] + min_x = args["knots"][0] + max_x = args["knots"][-1] xi = torch.linspace(min_x, max_x, 1000) model = Spline(**args) yi = model(xi).squeeze() scipy_check(model, xi, yi) - return - + return + @pytest.mark.parametrize("args", valid_args) def test_backward(args): - min_x = args['knots'][0] - max_x = args['knots'][-1] + min_x = args["knots"][0] + max_x = args["knots"][-1] xi = torch.linspace(min_x, max_x, 100) model = Spline(**args) yi = model(xi) diff --git a/tests/test_operator.py b/tests/test_operator.py index fe6d06fe2..e274fda65 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -10,97 +10,101 @@ def func_vector(x): def func_scalar(x): - x_ = x.extract(['x']) - y_ = x.extract(['y']) - z_ = x.extract(['z']) + x_ = x.extract(["x"]) + y_ = x.extract(["y"]) + z_ = x.extract(["z"]) return x_**2 + y_**2 + z_**2 data = torch.rand((20, 3)) -inp = LabelTensor(data, ['x', 'y', 'z']).requires_grad_(True) -labels = ['a', 'b', 'c'] +inp = LabelTensor(data, ["x", "y", "z"]).requires_grad_(True) +labels = ["a", "b", "c"] tensor_v = LabelTensor(func_vector(inp), labels) tensor_s = LabelTensor(func_scalar(inp).reshape(-1, 1), labels[0]) def test_grad_scalar_output(): grad_tensor_s = grad(tensor_s, inp) - true_val = 2*inp + true_val = 2 * inp true_val.labels = inp.labels assert grad_tensor_s.shape == inp.shape assert grad_tensor_s.labels == [ - f'd{tensor_s.labels[0]}d{i}' for i in inp.labels + f"d{tensor_s.labels[0]}d{i}" for i in inp.labels ] assert torch.allclose(grad_tensor_s, true_val) - grad_tensor_s = grad(tensor_s, inp, d=['x', 'y']) + grad_tensor_s = grad(tensor_s, inp, d=["x", "y"]) assert grad_tensor_s.shape == (20, 2) assert grad_tensor_s.labels == [ - f'd{tensor_s.labels[0]}d{i}' for i in ['x', 'y'] + f"d{tensor_s.labels[0]}d{i}" for i in ["x", "y"] ] - assert torch.allclose(grad_tensor_s, true_val.extract(['x', 'y'])) + assert torch.allclose(grad_tensor_s, true_val.extract(["x", "y"])) def test_grad_vector_output(): grad_tensor_v = grad(tensor_v, inp) true_val = torch.cat( - (2*inp.extract(['x']), - torch.zeros_like(inp.extract(['y'])), - torch.zeros_like(inp.extract(['z'])), - torch.zeros_like(inp.extract(['x'])), - 2*inp.extract(['y']), - torch.zeros_like(inp.extract(['z'])), - torch.zeros_like(inp.extract(['x'])), - torch.zeros_like(inp.extract(['y'])), - 2*inp.extract(['z']) - ), dim=1 + ( + 2 * inp.extract(["x"]), + torch.zeros_like(inp.extract(["y"])), + torch.zeros_like(inp.extract(["z"])), + torch.zeros_like(inp.extract(["x"])), + 2 * inp.extract(["y"]), + torch.zeros_like(inp.extract(["z"])), + torch.zeros_like(inp.extract(["x"])), + torch.zeros_like(inp.extract(["y"])), + 2 * inp.extract(["z"]), + ), + dim=1, ) assert grad_tensor_v.shape == (20, 9) assert grad_tensor_v.labels == [ - f'd{j}d{i}' for j in tensor_v.labels for i in inp.labels + f"d{j}d{i}" for j in tensor_v.labels for i in inp.labels ] assert torch.allclose(grad_tensor_v, true_val) - grad_tensor_v = grad(tensor_v, inp, d=['x', 'y']) + grad_tensor_v = grad(tensor_v, inp, d=["x", "y"]) true_val = torch.cat( - (2*inp.extract(['x']), - torch.zeros_like(inp.extract(['y'])), - torch.zeros_like(inp.extract(['x'])), - 2*inp.extract(['y']), - torch.zeros_like(inp.extract(['x'])), - torch.zeros_like(inp.extract(['y'])) - ), dim=1 + ( + 2 * inp.extract(["x"]), + torch.zeros_like(inp.extract(["y"])), + torch.zeros_like(inp.extract(["x"])), + 2 * inp.extract(["y"]), + torch.zeros_like(inp.extract(["x"])), + torch.zeros_like(inp.extract(["y"])), + ), + dim=1, ) assert grad_tensor_v.shape == (inp.shape[0], 6) assert grad_tensor_v.labels == [ - f'd{j}d{i}' for j in tensor_v.labels for i in ['x', 'y'] + f"d{j}d{i}" for j in tensor_v.labels for i in ["x", "y"] ] assert torch.allclose(grad_tensor_v, true_val) def test_div_vector_output(): div_tensor_v = div(tensor_v, inp) - true_val = 2*torch.sum(inp, dim=1).reshape(-1,1) + true_val = 2 * torch.sum(inp, dim=1).reshape(-1, 1) assert div_tensor_v.shape == (20, 1) - assert div_tensor_v.labels == [f'dadx+dbdy+dcdz'] + assert div_tensor_v.labels == [f"dadx+dbdy+dcdz"] assert torch.allclose(div_tensor_v, true_val) - div_tensor_v = div(tensor_v, inp, components=['a', 'b'], d=['x', 'y']) - true_val = 2*torch.sum(inp.extract(['x', 'y']), dim=1).reshape(-1,1) + div_tensor_v = div(tensor_v, inp, components=["a", "b"], d=["x", "y"]) + true_val = 2 * torch.sum(inp.extract(["x", "y"]), dim=1).reshape(-1, 1) assert div_tensor_v.shape == (inp.shape[0], 1) - assert div_tensor_v.labels == [f'dadx+dbdy'] + assert div_tensor_v.labels == [f"dadx+dbdy"] assert torch.allclose(div_tensor_v, true_val) def test_laplacian_scalar_output(): laplace_tensor_s = laplacian(tensor_s, inp) - true_val = 6*torch.ones_like(laplace_tensor_s) + true_val = 6 * torch.ones_like(laplace_tensor_s) assert laplace_tensor_s.shape == tensor_s.shape assert laplace_tensor_s.labels == [f"dd{tensor_s.labels[0]}"] assert torch.allclose(laplace_tensor_s, true_val) - laplace_tensor_s = laplacian(tensor_s, inp, components=['a'], d=['x', 'y']) - true_val = 4*torch.ones_like(laplace_tensor_s) + laplace_tensor_s = laplacian(tensor_s, inp, components=["a"], d=["x", "y"]) + true_val = 4 * torch.ones_like(laplace_tensor_s) assert laplace_tensor_s.shape == tensor_s.shape assert laplace_tensor_s.labels == [f"dd{tensor_s.labels[0]}"] assert torch.allclose(laplace_tensor_s, true_val) @@ -110,47 +114,53 @@ def test_laplacian_vector_output(): laplace_tensor_v = laplacian(tensor_v, inp) print(laplace_tensor_v.labels) print(tensor_v.labels) - true_val = 2*torch.ones_like(tensor_v) + true_val = 2 * torch.ones_like(tensor_v) assert laplace_tensor_v.shape == tensor_v.shape - assert laplace_tensor_v.labels == [ - f'dd{i}' for i in tensor_v.labels - ] + assert laplace_tensor_v.labels == [f"dd{i}" for i in tensor_v.labels] assert torch.allclose(laplace_tensor_v, true_val) - laplace_tensor_v = laplacian(tensor_v, - inp, - components=['a', 'b'], - d=['x', 'y']) - true_val = 2*torch.ones_like(tensor_v.extract(['a', 'b'])) - assert laplace_tensor_v.shape == tensor_v.extract(['a', 'b']).shape - assert laplace_tensor_v.labels == [ - f'dd{i}' for i in ['a', 'b'] - ] + laplace_tensor_v = laplacian( + tensor_v, inp, components=["a", "b"], d=["x", "y"] + ) + true_val = 2 * torch.ones_like(tensor_v.extract(["a", "b"])) + assert laplace_tensor_v.shape == tensor_v.extract(["a", "b"]).shape + assert laplace_tensor_v.labels == [f"dd{i}" for i in ["a", "b"]] assert torch.allclose(laplace_tensor_v, true_val) + def test_laplacian_vector_output2(): - x = LabelTensor(torch.linspace(0,1,10, requires_grad=True).reshape(-1,1), labels = ['x']) - y = LabelTensor(torch.linspace(3,4,10, requires_grad=True).reshape(-1,1), labels = ['y']) - input_ = LabelTensor(torch.cat((x,y), dim = 1), labels = ['x', 'y']) + x = LabelTensor( + torch.linspace(0, 1, 10, requires_grad=True).reshape(-1, 1), + labels=["x"], + ) + y = LabelTensor( + torch.linspace(3, 4, 10, requires_grad=True).reshape(-1, 1), + labels=["y"], + ) + input_ = LabelTensor(torch.cat((x, y), dim=1), labels=["x", "y"]) # Construct two scalar functions: # u = x**2 + y**2 # v = x**2 - y**2 - u = LabelTensor(input_.extract('x')**2 + input_.extract('y')**2, labels='u') - v = LabelTensor(input_.extract('x')**2 - input_.extract('y')**2, labels='v') + u = LabelTensor( + input_.extract("x") ** 2 + input_.extract("y") ** 2, labels="u" + ) + v = LabelTensor( + input_.extract("x") ** 2 - input_.extract("y") ** 2, labels="v" + ) # Define a vector-valued function, whose components are u and v. - f = LabelTensor(torch.cat((u,v), dim = 1), labels = ['u', 'v']) + f = LabelTensor(torch.cat((u, v), dim=1), labels=["u", "v"]) # Compute the scalar laplacian of both u and v: # Lap(u) = [4, 4, 4, ..., 4] # Lap(v) = [0, 0, 0, ..., 0] - lap_u = laplacian(u, input_, components=['u']) - lap_v = laplacian(v, input_, components=['v']) + lap_u = laplacian(u, input_, components=["u"]) + lap_v = laplacian(v, input_, components=["v"]) # Compute the laplacian of f: the two columns should correspond # to the laplacians of u and v, respectively... - lap_f = laplacian(f, input_, components=['u', 'v']) + lap_f = laplacian(f, input_, components=["u", "v"]) - assert torch.allclose(lap_f.extract('ddu'), lap_u) - assert torch.allclose(lap_f.extract('ddv'), lap_v) + assert torch.allclose(lap_f.extract("ddu"), lap_u) + assert torch.allclose(lap_f.extract("ddv"), lap_v) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 89b1293a9..037de9929 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -3,7 +3,10 @@ from pina.optim import TorchOptimizer opt_list = [ - torch.optim.Adam, torch.optim.AdamW, torch.optim.SGD, torch.optim.RMSprop + torch.optim.Adam, + torch.optim.AdamW, + torch.optim.SGD, + torch.optim.RMSprop, ] diff --git a/tests/test_package.py b/tests/test_package.py index f85bed550..f59bd6c21 100644 --- a/tests/test_package.py +++ b/tests/test_package.py @@ -1,2 +1,2 @@ def test_import(): - import pina \ No newline at end of file + import pina diff --git a/tests/test_problem.py b/tests/test_problem.py index 30122d48e..069dc0620 100644 --- a/tests/test_problem.py +++ b/tests/test_problem.py @@ -9,24 +9,24 @@ def test_discretise_domain(): n = 10 poisson_problem = Poisson() - boundaries = ['g1', 'g2', 'g3', 'g4'] - poisson_problem.discretise_domain(n, 'grid', domains=boundaries) + boundaries = ["g1", "g2", "g3", "g4"] + poisson_problem.discretise_domain(n, "grid", domains=boundaries) for b in boundaries: assert poisson_problem.discretised_domains[b].shape[0] == n - poisson_problem.discretise_domain(n, 'random', domains=boundaries) + poisson_problem.discretise_domain(n, "random", domains=boundaries) for b in boundaries: assert poisson_problem.discretised_domains[b].shape[0] == n - poisson_problem.discretise_domain(n, 'grid', domains=['D']) - assert poisson_problem.discretised_domains['D'].shape[0] == n ** 2 - poisson_problem.discretise_domain(n, 'random', domains=['D']) - assert poisson_problem.discretised_domains['D'].shape[0] == n + poisson_problem.discretise_domain(n, "grid", domains=["D"]) + assert poisson_problem.discretised_domains["D"].shape[0] == n**2 + poisson_problem.discretise_domain(n, "random", domains=["D"]) + assert poisson_problem.discretised_domains["D"].shape[0] == n - poisson_problem.discretise_domain(n, 'latin', domains=['D']) - assert poisson_problem.discretised_domains['D'].shape[0] == n + poisson_problem.discretise_domain(n, "latin", domains=["D"]) + assert poisson_problem.discretised_domains["D"].shape[0] == n - poisson_problem.discretise_domain(n, 'lh', domains=['D']) - assert poisson_problem.discretised_domains['D'].shape[0] == n + poisson_problem.discretise_domain(n, "lh", domains=["D"]) + assert poisson_problem.discretised_domains["D"].shape[0] == n poisson_problem.discretise_domain(n) @@ -34,61 +34,53 @@ def test_discretise_domain(): def test_variables_correct_order_sampling(): n = 10 poisson_problem = Poisson() - poisson_problem.discretise_domain(n, - 'grid', - domains=['D']) - assert poisson_problem.discretised_domains['D'].labels == sorted( - poisson_problem.input_variables) + poisson_problem.discretise_domain(n, "grid", domains=["D"]) + assert poisson_problem.discretised_domains["D"].labels == sorted( + poisson_problem.input_variables + ) - poisson_problem.discretise_domain(n, 'grid', domains=['D']) - assert poisson_problem.discretised_domains['D'].labels == sorted( - poisson_problem.input_variables) + poisson_problem.discretise_domain(n, "grid", domains=["D"]) + assert poisson_problem.discretised_domains["D"].labels == sorted( + poisson_problem.input_variables + ) def test_add_points(): poisson_problem = Poisson() - poisson_problem.discretise_domain(0, - 'random', - domains=['D']) - new_pts = LabelTensor(torch.tensor([[0.5, -0.5]]), labels=['x', 'y']) - poisson_problem.add_points({'D': new_pts}) - assert torch.isclose(poisson_problem.discretised_domains['D'].extract('x'), - new_pts.extract('x')) - assert torch.isclose(poisson_problem.discretised_domains['D'].extract('y'), - new_pts.extract('y')) + poisson_problem.discretise_domain(0, "random", domains=["D"]) + new_pts = LabelTensor(torch.tensor([[0.5, -0.5]]), labels=["x", "y"]) + poisson_problem.add_points({"D": new_pts}) + assert torch.isclose( + poisson_problem.discretised_domains["D"].extract("x"), + new_pts.extract("x"), + ) + assert torch.isclose( + poisson_problem.discretised_domains["D"].extract("y"), + new_pts.extract("y"), + ) -@pytest.mark.parametrize( - "mode", - [ - 'random', - 'grid' - ] -) + +@pytest.mark.parametrize("mode", ["random", "grid"]) def test_custom_sampling_logic(mode): poisson_problem = Poisson() sampling_rules = { - 'x': {'n': 100, 'mode': mode}, - 'y': {'n': 50, 'mode': mode} + "x": {"n": 100, "mode": mode}, + "y": {"n": 50, "mode": mode}, } poisson_problem.discretise_domain(sample_rules=sampling_rules) - for domain in ['g1', 'g2', 'g3', 'g4']: + for domain in ["g1", "g2", "g3", "g4"]: assert poisson_problem.discretised_domains[domain].shape[0] == 100 * 50 - assert poisson_problem.discretised_domains[domain].labels == ['x', 'y'] + assert poisson_problem.discretised_domains[domain].labels == ["x", "y"] + -@pytest.mark.parametrize( - "mode", - [ - 'random', - 'grid' - ] -) +@pytest.mark.parametrize("mode", ["random", "grid"]) def test_wrong_custom_sampling_logic(mode): - d2 = CartesianDomain({'x': [1,2], 'y': [0,1] }) + d2 = CartesianDomain({"x": [1, 2], "y": [0, 1]}) poisson_problem = Poisson() - poisson_problem.domains['D'] = Union([poisson_problem.domains['D'], d2]) + poisson_problem.domains["D"] = Union([poisson_problem.domains["D"], d2]) sampling_rules = { - 'x': {'n': 100, 'mode': mode}, - 'y': {'n': 50, 'mode': mode} + "x": {"n": 100, "mode": mode}, + "y": {"n": 50, "mode": mode}, } with pytest.raises(RuntimeError): - poisson_problem.discretise_domain(sample_rules=sampling_rules) \ No newline at end of file + poisson_problem.discretise_domain(sample_rules=sampling_rules) diff --git a/tests/test_problem_zoo/test_poisson_2d_square.py b/tests/test_problem_zoo/test_poisson_2d_square.py index 6c4221cd3..272eb8c5a 100644 --- a/tests/test_problem_zoo/test_poisson_2d_square.py +++ b/tests/test_problem_zoo/test_poisson_2d_square.py @@ -1,4 +1,5 @@ from pina.problem.zoo import Poisson2DSquareProblem + def test_constructor(): - Poisson2DSquareProblem() \ No newline at end of file + Poisson2DSquareProblem() diff --git a/tests/test_scheduler.py b/tests/test_scheduler.py index 5f3e3e7e1..157a818d2 100644 --- a/tests/test_scheduler.py +++ b/tests/test_scheduler.py @@ -1,4 +1,3 @@ - import torch import pytest from pina.optim import TorchOptimizer, TorchScheduler @@ -7,21 +6,21 @@ torch.optim.Adam, torch.optim.AdamW, torch.optim.SGD, - torch.optim.RMSprop + torch.optim.RMSprop, ] -sch_list = [ - torch.optim.lr_scheduler.ConstantLR -] +sch_list = [torch.optim.lr_scheduler.ConstantLR] + @pytest.mark.parametrize("scheduler_class", sch_list) def test_constructor(scheduler_class): TorchScheduler(scheduler_class) + @pytest.mark.parametrize("optimizer_class", opt_list) @pytest.mark.parametrize("scheduler_class", sch_list) def test_hook(optimizer_class, scheduler_class): opt = TorchOptimizer(optimizer_class, lr=1e-3) opt.hook(torch.nn.Linear(10, 10).parameters()) sch = TorchScheduler(scheduler_class) - sch.hook(opt) \ No newline at end of file + sch.hook(opt) diff --git a/tests/test_solver/test_causal_pinn.py b/tests/test_solver/test_causal_pinn.py index c813b630c..107502f8a 100644 --- a/tests/test_solver/test_causal_pinn.py +++ b/tests/test_solver/test_causal_pinn.py @@ -8,21 +8,22 @@ from pina.model import FeedForward from pina.problem.zoo import ( DiffusionReactionProblem, - InverseDiffusionReactionProblem + InverseDiffusionReactionProblem, ) from pina.condition import ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) from torch._dynamo.eval_frame import OptimizedModule class DummySpatialProblem(SpatialProblem): - ''' + """ A mock spatial problem for testing purposes. - ''' - output_variables = ['u'] + """ + + output_variables = ["u"] conditions = {} spatial_domain = None @@ -32,20 +33,14 @@ class DummySpatialProblem(SpatialProblem): problem.discretise_domain(50) inverse_problem = InverseDiffusionReactionProblem() inverse_problem.discretise_domain(50) -model = FeedForward( - len(problem.input_variables), - len(problem.output_variables) -) +model = FeedForward(len(problem.input_variables), len(problem.output_variables)) # add input-output condition to test supervised learning input_pts = torch.rand(50, len(problem.input_variables)) input_pts = LabelTensor(input_pts, problem.input_variables) output_pts = torch.rand(50, len(problem.output_variables)) output_pts = LabelTensor(output_pts, problem.output_variables) -problem.conditions['data'] = Condition( - input=input_pts, - target=output_pts -) +problem.conditions["data"] = Condition(input=input_pts, target=output_pts) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -58,7 +53,7 @@ def test_constructor(problem, eps): assert solver.accepted_conditions_types == ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) @@ -67,17 +62,19 @@ def test_constructor(problem, eps): @pytest.mark.parametrize("compile", [True, False]) def test_solver_train(problem, batch_size, compile): solver = CausalPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=1., - val_size=0., - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=1.0, + val_size=0.0, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -85,17 +82,19 @@ def test_solver_train(problem, batch_size, compile): @pytest.mark.parametrize("compile", [True, False]) def test_solver_validation(problem, batch_size, compile): solver = CausalPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -103,17 +102,19 @@ def test_solver_validation(problem, batch_size, compile): @pytest.mark.parametrize("compile", [True, False]) def test_solver_test(problem, batch_size, compile): solver = CausalPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.7, - val_size=0.2, - test_size=0.1, - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.7, + val_size=0.2, + test_size=0.1, + compile=compile, + ) trainer.test() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -121,26 +122,31 @@ def test_train_load_restore(problem): dir = "tests/test_solver/tmp" problem = problem solver = CausalPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.7, - val_size=0.2, - test_size=0.1, - default_root_dir=dir) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.7, + val_size=0.2, + test_size=0.1, + default_root_dir=dir, + ) trainer.train() # restore - new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') + new_trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") new_trainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' + - 'epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/" + + "epoch=4-step=5.ckpt" + ) # loading new_solver = CausalPINN.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', - problem=problem, model=model) + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", + problem=problem, + model=model, + ) test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables) assert new_solver.forward(test_pts).shape == (20, 1) @@ -148,9 +154,10 @@ def test_train_load_restore(problem): solver.forward(test_pts).shape ) torch.testing.assert_close( - new_solver.forward(test_pts), - solver.forward(test_pts)) + new_solver.forward(test_pts), solver.forward(test_pts) + ) # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_solver/test_competitive_pinn.py b/tests/test_solver/test_competitive_pinn.py index 4190edcc4..c5f8017a2 100644 --- a/tests/test_solver/test_competitive_pinn.py +++ b/tests/test_solver/test_competitive_pinn.py @@ -1,5 +1,5 @@ import torch -import pytest +import pytest from pina import LabelTensor, Condition from pina.solver import CompetitivePINN as CompPINN @@ -7,12 +7,12 @@ from pina.model import FeedForward from pina.problem.zoo import ( Poisson2DSquareProblem as Poisson, - InversePoisson2DSquareProblem as InversePoisson + InversePoisson2DSquareProblem as InversePoisson, ) from pina.condition import ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) from torch._dynamo.eval_frame import OptimizedModule @@ -22,20 +22,15 @@ problem.discretise_domain(50) inverse_problem = InversePoisson() inverse_problem.discretise_domain(50) -model = FeedForward( - len(problem.input_variables), - len(problem.output_variables) -) +model = FeedForward(len(problem.input_variables), len(problem.output_variables)) # add input-output condition to test supervised learning input_pts = torch.rand(50, len(problem.input_variables)) input_pts = LabelTensor(input_pts, problem.input_variables) output_pts = torch.rand(50, len(problem.output_variables)) output_pts = LabelTensor(output_pts, problem.output_variables) -problem.conditions['data'] = Condition( - input=input_pts, - target=output_pts -) +problem.conditions["data"] = Condition(input=input_pts, target=output_pts) + @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("discr", [None, model]) @@ -46,27 +41,30 @@ def test_constructor(problem, discr): assert solver.accepted_conditions_types == ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) + @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_train(problem, batch_size, compile): solver = CompPINN(problem=problem, model=model) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=1., - val_size=0., - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=1.0, + val_size=0.0, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (all([isinstance(model, OptimizedModule) - for model in solver.models])) - + assert all( + [isinstance(model, OptimizedModule) for model in solver.models] + ) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -74,62 +72,75 @@ def test_solver_train(problem, batch_size, compile): @pytest.mark.parametrize("compile", [True, False]) def test_solver_validation(problem, batch_size, compile): solver = CompPINN(problem=problem, model=model) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (all([isinstance(model, OptimizedModule) - for model in solver.models])) + assert all( + [isinstance(model, OptimizedModule) for model in solver.models] + ) + @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_test(problem, batch_size, compile): solver = CompPINN(problem=problem, model=model) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.7, - val_size=0.2, - test_size=0.1, - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.7, + val_size=0.2, + test_size=0.1, + compile=compile, + ) trainer.test() if trainer.compile: - assert (all([isinstance(model, OptimizedModule) - for model in solver.models])) + assert all( + [isinstance(model, OptimizedModule) for model in solver.models] + ) + @pytest.mark.parametrize("problem", [problem, inverse_problem]) def test_train_load_restore(problem): dir = "tests/test_solver/tmp" problem = problem solver = CompPINN(problem=problem, model=model) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.7, - val_size=0.2, - test_size=0.1, - default_root_dir=dir) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.7, + val_size=0.2, + test_size=0.1, + default_root_dir=dir, + ) trainer.train() # restore - new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') + new_trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") new_trainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' + - 'epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/" + + "epoch=4-step=5.ckpt" + ) # loading new_solver = CompPINN.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', - problem=problem, model=model) + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", + problem=problem, + model=model, + ) test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables) assert new_solver.forward(test_pts).shape == (20, 1) @@ -137,9 +148,10 @@ def test_train_load_restore(problem): solver.forward(test_pts).shape ) torch.testing.assert_close( - new_solver.forward(test_pts), - solver.forward(test_pts)) + new_solver.forward(test_pts), solver.forward(test_pts) + ) # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') \ No newline at end of file + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_solver/test_garom.py b/tests/test_solver/test_garom.py index aef228660..ed147c809 100644 --- a/tests/test_solver/test_garom.py +++ b/tests/test_solver/test_garom.py @@ -12,53 +12,62 @@ class TensorProblem(AbstractProblem): - input_variables = ['u_0', 'u_1'] - output_variables = ['u'] + input_variables = ["u_0", "u_1"] + output_variables = ["u"] conditions = { - 'data': Condition( - target=torch.randn(50, 2), - input=torch.randn(50, 1)) + "data": Condition(target=torch.randn(50, 2), input=torch.randn(50, 1)) } # simple Generator Network class Generator(nn.Module): - def __init__(self, - input_dimension=2, - parameters_dimension=1, - noise_dimension=2, - activation=torch.nn.SiLU): + def __init__( + self, + input_dimension=2, + parameters_dimension=1, + noise_dimension=2, + activation=torch.nn.SiLU, + ): super().__init__() self._noise_dimension = noise_dimension self._activation = activation - self.model = FeedForward(6*noise_dimension, input_dimension) + self.model = FeedForward(6 * noise_dimension, input_dimension) self.condition = FeedForward(parameters_dimension, 5 * noise_dimension) def forward(self, param): # uniform sampling in [-1, 1] - z = 2 * torch.rand(size=(param.shape[0], self._noise_dimension), - device=param.device, - dtype=param.dtype, - requires_grad=True) - 1 + z = ( + 2 + * torch.rand( + size=(param.shape[0], self._noise_dimension), + device=param.device, + dtype=param.dtype, + requires_grad=True, + ) + - 1 + ) return self.model(torch.cat((z, self.condition(param)), dim=-1)) + # Simple Discriminator Network class Discriminator(nn.Module): - def __init__(self, - input_dimension=2, - parameter_dimension=1, - hidden_dimension=2, - activation=torch.nn.ReLU): + def __init__( + self, + input_dimension=2, + parameter_dimension=1, + hidden_dimension=2, + activation=torch.nn.ReLU, + ): super().__init__() self._activation = activation self.encoding = FeedForward(input_dimension, hidden_dimension) - self.decoding = FeedForward(2*hidden_dimension, input_dimension) + self.decoding = FeedForward(2 * hidden_dimension, input_dimension) self.condition = FeedForward(parameter_dimension, hidden_dimension) def forward(self, data): @@ -70,103 +79,124 @@ def forward(self, data): def test_constructor(): - GAROM(problem=TensorProblem(), - generator=Generator(), - discriminator=Discriminator()) - assert GAROM.accepted_conditions_types == ( - InputTargetCondition + GAROM( + problem=TensorProblem(), + generator=Generator(), + discriminator=Discriminator(), ) + assert GAROM.accepted_conditions_types == (InputTargetCondition) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_train(batch_size, compile): - solver = GAROM(problem=TensorProblem(), - generator=Generator(), - discriminator=Discriminator()) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=1., - test_size=0., - val_size=0., - compile=compile) + solver = GAROM( + problem=TensorProblem(), + generator=Generator(), + discriminator=Discriminator(), + ) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=1.0, + test_size=0.0, + val_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (all([isinstance(model, OptimizedModule) - for model in solver.models])) + assert all( + [isinstance(model, OptimizedModule) for model in solver.models] + ) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_validation(batch_size, compile): - solver = GAROM(problem=TensorProblem(), - generator=Generator(), - discriminator=Discriminator()) - - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + solver = GAROM( + problem=TensorProblem(), + generator=Generator(), + discriminator=Discriminator(), + ) + + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (all([isinstance(model, OptimizedModule) - for model in solver.models])) + assert all( + [isinstance(model, OptimizedModule) for model in solver.models] + ) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_test(batch_size, compile): - solver = GAROM(problem=TensorProblem(), - generator=Generator(), - discriminator=Discriminator(), - ) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.8, - val_size=0.1, - test_size=0.1, - compile=compile) + solver = GAROM( + problem=TensorProblem(), + generator=Generator(), + discriminator=Discriminator(), + ) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.8, + val_size=0.1, + test_size=0.1, + compile=compile, + ) trainer.test() if trainer.compile: - assert (all([isinstance(model, OptimizedModule) - for model in solver.models])) + assert all( + [isinstance(model, OptimizedModule) for model in solver.models] + ) def test_train_load_restore(): dir = "tests/test_solver/tmp/" problem = TensorProblem() - solver = GAROM(problem=TensorProblem(), - generator=Generator(), - discriminator=Discriminator(), - ) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.9, - test_size=0.1, - val_size=0., - default_root_dir=dir) + solver = GAROM( + problem=TensorProblem(), + generator=Generator(), + discriminator=Discriminator(), + ) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.9, + test_size=0.1, + val_size=0.0, + default_root_dir=dir, + ) trainer.train() # restore - new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') + new_trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") new_trainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' + - 'epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/" + + "epoch=4-step=5.ckpt" + ) # loading new_solver = GAROM.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', - problem=TensorProblem(), generator=Generator(), discriminator=Discriminator()) + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", + problem=TensorProblem(), + generator=Generator(), + discriminator=Discriminator(), + ) test_pts = torch.rand(20, 1) assert new_solver.forward(test_pts).shape == (20, 2) @@ -174,4 +204,5 @@ def test_train_load_restore(): # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_solver/test_gradient_pinn.py b/tests/test_solver/test_gradient_pinn.py index e7c5adb60..c572036ea 100644 --- a/tests/test_solver/test_gradient_pinn.py +++ b/tests/test_solver/test_gradient_pinn.py @@ -8,12 +8,12 @@ from pina.trainer import Trainer from pina.problem.zoo import ( Poisson2DSquareProblem as Poisson, - InversePoisson2DSquareProblem as InversePoisson + InversePoisson2DSquareProblem as InversePoisson, ) from pina.condition import ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) from torch._dynamo.eval_frame import OptimizedModule @@ -22,7 +22,8 @@ class DummyTimeProblem(TimeDependentProblem): """ A mock time-dependent problem for testing purposes. """ - output_variables = ['u'] + + output_variables = ["u"] temporal_domain = None conditions = {} @@ -32,20 +33,14 @@ class DummyTimeProblem(TimeDependentProblem): problem.discretise_domain(50) inverse_problem = InversePoisson() inverse_problem.discretise_domain(50) -model = FeedForward( - len(problem.input_variables), - len(problem.output_variables) -) +model = FeedForward(len(problem.input_variables), len(problem.output_variables)) # add input-output condition to test supervised learning input_pts = torch.rand(50, len(problem.input_variables)) input_pts = LabelTensor(input_pts, problem.input_variables) output_pts = torch.rand(50, len(problem.output_variables)) output_pts = LabelTensor(output_pts, problem.output_variables) -problem.conditions['data'] = Condition( - input=input_pts, - target=output_pts -) +problem.conditions["data"] = Condition(input=input_pts, target=output_pts) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -57,7 +52,7 @@ def test_constructor(problem): assert solver.accepted_conditions_types == ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) @@ -66,17 +61,19 @@ def test_constructor(problem): @pytest.mark.parametrize("compile", [True, False]) def test_solver_train(problem, batch_size, compile): solver = GradientPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=1., - val_size=0., - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=1.0, + val_size=0.0, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -84,17 +81,19 @@ def test_solver_train(problem, batch_size, compile): @pytest.mark.parametrize("compile", [True, False]) def test_solver_validation(problem, batch_size, compile): solver = GradientPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -102,17 +101,19 @@ def test_solver_validation(problem, batch_size, compile): @pytest.mark.parametrize("compile", [True, False]) def test_solver_test(problem, batch_size, compile): solver = GradientPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.7, - val_size=0.2, - test_size=0.1, - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.7, + val_size=0.2, + test_size=0.1, + compile=compile, + ) trainer.test() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -120,26 +121,31 @@ def test_train_load_restore(problem): dir = "tests/test_solver/tmp" problem = problem solver = GradientPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.7, - val_size=0.2, - test_size=0.1, - default_root_dir=dir) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.7, + val_size=0.2, + test_size=0.1, + default_root_dir=dir, + ) trainer.train() # restore - new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') + new_trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") new_trainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' + - 'epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/" + + "epoch=4-step=5.ckpt" + ) # loading new_solver = GradientPINN.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', - problem=problem, model=model) + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", + problem=problem, + model=model, + ) test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables) assert new_solver.forward(test_pts).shape == (20, 1) @@ -147,9 +153,10 @@ def test_train_load_restore(problem): solver.forward(test_pts).shape ) torch.testing.assert_close( - new_solver.forward(test_pts), - solver.forward(test_pts)) + new_solver.forward(test_pts), solver.forward(test_pts) + ) # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_solver/test_pinn.py b/tests/test_solver/test_pinn.py index 88a1d0608..98d14389e 100644 --- a/tests/test_solver/test_pinn.py +++ b/tests/test_solver/test_pinn.py @@ -8,11 +8,11 @@ from pina.condition import ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) from pina.problem.zoo import ( Poisson2DSquareProblem as Poisson, - InversePoisson2DSquareProblem as InversePoisson + InversePoisson2DSquareProblem as InversePoisson, ) from torch._dynamo.eval_frame import OptimizedModule @@ -22,20 +22,15 @@ problem.discretise_domain(50) inverse_problem = InversePoisson() inverse_problem.discretise_domain(50) -model = FeedForward( - len(problem.input_variables), - len(problem.output_variables) -) +model = FeedForward(len(problem.input_variables), len(problem.output_variables)) # add input-output condition to test supervised learning input_pts = torch.rand(50, len(problem.input_variables)) input_pts = LabelTensor(input_pts, problem.input_variables) output_pts = torch.rand(50, len(problem.output_variables)) output_pts = LabelTensor(output_pts, problem.output_variables) -problem.conditions['data'] = Condition( - input=input_pts, - target=output_pts -) +problem.conditions["data"] = Condition(input=input_pts, target=output_pts) + @pytest.mark.parametrize("problem", [problem, inverse_problem]) def test_constructor(problem): @@ -44,22 +39,25 @@ def test_constructor(problem): assert solver.accepted_conditions_types == ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) + @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_train(problem, batch_size, compile): solver = PINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=1., - val_size=0., - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=1.0, + val_size=0.0, + test_size=0.0, + compile=compile, + ) trainer.train() @@ -68,31 +66,36 @@ def test_solver_train(problem, batch_size, compile): @pytest.mark.parametrize("compile", [True, False]) def test_solver_validation(problem, batch_size, compile): solver = PINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert(isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) + @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_test(problem, batch_size, compile): solver = PINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=0.7, - val_size=0.2, - test_size=0.1, - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=0.7, + val_size=0.2, + test_size=0.1, + compile=compile, + ) trainer.test() @@ -101,34 +104,40 @@ def test_train_load_restore(problem): dir = "tests/test_solver/tmp" problem = problem solver = PINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.7, - val_size=0.2, - test_size=0.1, - default_root_dir=dir) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.7, + val_size=0.2, + test_size=0.1, + default_root_dir=dir, + ) trainer.train() # restore - new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') + new_trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") new_trainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' + - 'epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/" + + "epoch=4-step=5.ckpt" + ) # loading new_solver = PINN.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', - problem=problem, model=model) + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", + problem=problem, + model=model, + ) test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables) assert new_solver.forward(test_pts).shape == (20, 1) assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape torch.testing.assert_close( - new_solver.forward(test_pts), - solver.forward(test_pts)) + new_solver.forward(test_pts), solver.forward(test_pts) + ) # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_solver/test_rba_pinn.py b/tests/test_solver/test_rba_pinn.py index cb29084a2..ba74eba91 100644 --- a/tests/test_solver/test_rba_pinn.py +++ b/tests/test_solver/test_rba_pinn.py @@ -8,11 +8,11 @@ from pina.condition import ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) from pina.problem.zoo import ( Poisson2DSquareProblem as Poisson, - InversePoisson2DSquareProblem as InversePoisson + InversePoisson2DSquareProblem as InversePoisson, ) from torch._dynamo.eval_frame import OptimizedModule @@ -21,20 +21,14 @@ problem.discretise_domain(50) inverse_problem = InversePoisson() inverse_problem.discretise_domain(50) -model = FeedForward( - len(problem.input_variables), - len(problem.output_variables) -) +model = FeedForward(len(problem.input_variables), len(problem.output_variables)) # add input-output condition to test supervised learning input_pts = torch.rand(50, len(problem.input_variables)) input_pts = LabelTensor(input_pts, problem.input_variables) output_pts = torch.rand(50, len(problem.output_variables)) output_pts = LabelTensor(output_pts, problem.output_variables) -problem.conditions['data'] = Condition( - input=input_pts, - target=output_pts -) +problem.conditions["data"] = Condition(input=input_pts, target=output_pts) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -48,7 +42,7 @@ def test_constructor(problem, eta, gamma): assert solver.accepted_conditions_types == ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) @@ -56,13 +50,15 @@ def test_constructor(problem, eta, gamma): def test_wrong_batch(problem): with pytest.raises(NotImplementedError): solver = RBAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=10, - train_size=1., - val_size=0., - test_size=0.) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=10, + train_size=1.0, + val_size=0.0, + test_size=0.0, + ) trainer.train() @@ -70,51 +66,57 @@ def test_wrong_batch(problem): @pytest.mark.parametrize("compile", [True, False]) def test_solver_train(problem, compile): solver = RBAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=1., - val_size=0., - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=1.0, + val_size=0.0, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_validation(problem, compile): solver = RBAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_test(problem, compile): solver = RBAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=0.7, - val_size=0.2, - test_size=0.1, - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=0.7, + val_size=0.2, + test_size=0.1, + compile=compile, + ) trainer.test() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -122,26 +124,31 @@ def test_train_load_restore(problem): dir = "tests/test_solver/tmp" problem = problem solver = RBAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.7, - val_size=0.2, - test_size=0.1, - default_root_dir=dir) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.7, + val_size=0.2, + test_size=0.1, + default_root_dir=dir, + ) trainer.train() # restore - new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') + new_trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") new_trainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' + - 'epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/" + + "epoch=4-step=5.ckpt" + ) # loading new_solver = RBAPINN.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', - problem=problem, model=model) + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", + problem=problem, + model=model, + ) test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables) assert new_solver.forward(test_pts).shape == (20, 1) @@ -149,9 +156,10 @@ def test_train_load_restore(problem): solver.forward(test_pts).shape ) torch.testing.assert_close( - new_solver.forward(test_pts), - solver.forward(test_pts)) + new_solver.forward(test_pts), solver.forward(test_pts) + ) # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_solver/test_reduced_order_model_solver.py b/tests/test_solver/test_reduced_order_model_solver.py index e00b58703..5427ec7a2 100644 --- a/tests/test_solver/test_reduced_order_model_solver.py +++ b/tests/test_solver/test_reduced_order_model_solver.py @@ -12,22 +12,21 @@ class LabelTensorProblem(AbstractProblem): - input_variables = ['u_0', 'u_1'] - output_variables = ['u'] + input_variables = ["u_0", "u_1"] + output_variables = ["u"] conditions = { - 'data': Condition( - input=LabelTensor(torch.randn(20, 2), ['u_0', 'u_1']), - target=LabelTensor(torch.randn(20, 1), ['u'])), + "data": Condition( + input=LabelTensor(torch.randn(20, 2), ["u_0", "u_1"]), + target=LabelTensor(torch.randn(20, 1), ["u"]), + ), } class TensorProblem(AbstractProblem): - input_variables = ['u_0', 'u_1'] - output_variables = ['u'] + input_variables = ["u_0", "u_1"] + output_variables = ["u"] conditions = { - 'data': Condition( - input=torch.randn(20, 2), - target=torch.randn(20, 1)) + "data": Condition(input=torch.randn(20, 2), target=torch.randn(20, 1)) } @@ -35,23 +34,27 @@ class AE(torch.nn.Module): def __init__(self, input_dimensions, rank): super().__init__() self.encode = FeedForward( - input_dimensions, rank, layers=[input_dimensions//4]) + input_dimensions, rank, layers=[input_dimensions // 4] + ) self.decode = FeedForward( - rank, input_dimensions, layers=[input_dimensions//4]) + rank, input_dimensions, layers=[input_dimensions // 4] + ) class AE_missing_encode(torch.nn.Module): def __init__(self, input_dimensions, rank): super().__init__() self.encode = FeedForward( - input_dimensions, rank, layers=[input_dimensions//4]) + input_dimensions, rank, layers=[input_dimensions // 4] + ) class AE_missing_decode(torch.nn.Module): def __init__(self, input_dimensions, rank): super().__init__() self.decode = FeedForward( - rank, input_dimensions, layers=[input_dimensions//4]) + rank, input_dimensions, layers=[input_dimensions // 4] + ) rank = 10 @@ -62,26 +65,41 @@ def __init__(self, input_dimensions, rank): def test_constructor(): problem = TensorProblem() - ReducedOrderModelSolver(problem=problem, - interpolation_network=interpolation_net, - reduction_network=reduction_net) - ReducedOrderModelSolver(problem=LabelTensorProblem(), - reduction_network=reduction_net, - interpolation_network=interpolation_net) - assert ReducedOrderModelSolver.accepted_conditions_types == InputTargetCondition + ReducedOrderModelSolver( + problem=problem, + interpolation_network=interpolation_net, + reduction_network=reduction_net, + ) + ReducedOrderModelSolver( + problem=LabelTensorProblem(), + reduction_network=reduction_net, + interpolation_network=interpolation_net, + ) + assert ( + ReducedOrderModelSolver.accepted_conditions_types + == InputTargetCondition + ) with pytest.raises(SyntaxError): - ReducedOrderModelSolver(problem=problem, - reduction_network=AE_missing_encode( - len(problem.output_variables), rank), - interpolation_network=interpolation_net) - ReducedOrderModelSolver(problem=problem, - reduction_network=AE_missing_decode( - len(problem.output_variables), rank), - interpolation_network=interpolation_net) + ReducedOrderModelSolver( + problem=problem, + reduction_network=AE_missing_encode( + len(problem.output_variables), rank + ), + interpolation_network=interpolation_net, + ) + ReducedOrderModelSolver( + problem=problem, + reduction_network=AE_missing_decode( + len(problem.output_variables), rank + ), + interpolation_network=interpolation_net, + ) with pytest.raises(ValueError): - ReducedOrderModelSolver(problem=Poisson2DSquareProblem(), - reduction_network=reduction_net, - interpolation_network=interpolation_net) + ReducedOrderModelSolver( + problem=Poisson2DSquareProblem(), + reduction_network=reduction_net, + interpolation_network=interpolation_net, + ) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @@ -89,99 +107,122 @@ def test_constructor(): @pytest.mark.parametrize("compile", [True, False]) def test_solver_train(use_lt, batch_size, compile): problem = LabelTensorProblem() if use_lt else TensorProblem() - solver = ReducedOrderModelSolver(problem=problem, - reduction_network=reduction_net, - interpolation_network=interpolation_net, use_lt=use_lt) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=1., - test_size=0., - val_size=0., - compile=compile) + solver = ReducedOrderModelSolver( + problem=problem, + reduction_network=reduction_net, + interpolation_network=interpolation_net, + use_lt=use_lt, + ) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=1.0, + test_size=0.0, + val_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: for v in solver.model.values(): - assert (isinstance(v, OptimizedModule)) + assert isinstance(v, OptimizedModule) @pytest.mark.parametrize("use_lt", [True, False]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_validation(use_lt, compile): problem = LabelTensorProblem() if use_lt else TensorProblem() - solver = ReducedOrderModelSolver(problem=problem, - reduction_network=reduction_net, - interpolation_network=interpolation_net, use_lt=use_lt) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + solver = ReducedOrderModelSolver( + problem=problem, + reduction_network=reduction_net, + interpolation_network=interpolation_net, + use_lt=use_lt, + ) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: for v in solver.model.values(): - assert (isinstance(v, OptimizedModule)) + assert isinstance(v, OptimizedModule) @pytest.mark.parametrize("use_lt", [True, False]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_test(use_lt, compile): problem = LabelTensorProblem() if use_lt else TensorProblem() - solver = ReducedOrderModelSolver(problem=problem, - reduction_network=reduction_net, - interpolation_network=interpolation_net, use_lt=use_lt) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=0.8, - val_size=0.1, - test_size=0.1, - compile=compile) + solver = ReducedOrderModelSolver( + problem=problem, + reduction_network=reduction_net, + interpolation_network=interpolation_net, + use_lt=use_lt, + ) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=0.8, + val_size=0.1, + test_size=0.1, + compile=compile, + ) trainer.train() if trainer.compile: for v in solver.model.values(): - assert (isinstance(v, OptimizedModule)) + assert isinstance(v, OptimizedModule) def test_train_load_restore(): dir = "tests/test_solver/tmp/" problem = LabelTensorProblem() - solver = ReducedOrderModelSolver(problem=problem, - - reduction_network=reduction_net, - interpolation_network=interpolation_net) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.9, - test_size=0.1, - val_size=0., - default_root_dir=dir) + solver = ReducedOrderModelSolver( + problem=problem, + reduction_network=reduction_net, + interpolation_network=interpolation_net, + ) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.9, + test_size=0.1, + val_size=0.0, + default_root_dir=dir, + ) trainer.train() # restore - ntrainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu',) + ntrainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + ) ntrainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt" + ) # loading new_solver = ReducedOrderModelSolver.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", problem=problem, reduction_network=reduction_net, - interpolation_network=interpolation_net) + interpolation_network=interpolation_net, + ) test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables) assert new_solver.forward(test_pts).shape == (20, 1) assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape torch.testing.assert_close( - new_solver.forward(test_pts), - solver.forward(test_pts)) + new_solver.forward(test_pts), solver.forward(test_pts) + ) # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_solver/test_self_adaptive_pinn.py b/tests/test_solver/test_self_adaptive_pinn.py index b2df9fa05..b42472df5 100644 --- a/tests/test_solver/test_self_adaptive_pinn.py +++ b/tests/test_solver/test_self_adaptive_pinn.py @@ -7,12 +7,12 @@ from pina.model import FeedForward from pina.problem.zoo import ( Poisson2DSquareProblem as Poisson, - InversePoisson2DSquareProblem as InversePoisson + InversePoisson2DSquareProblem as InversePoisson, ) from pina.condition import ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) from torch._dynamo.eval_frame import OptimizedModule @@ -22,20 +22,14 @@ problem.discretise_domain(50) inverse_problem = InversePoisson() inverse_problem.discretise_domain(50) -model = FeedForward( - len(problem.input_variables), - len(problem.output_variables) -) +model = FeedForward(len(problem.input_variables), len(problem.output_variables)) # add input-output condition to test supervised learning input_pts = torch.rand(50, len(problem.input_variables)) input_pts = LabelTensor(input_pts, problem.input_variables) output_pts = torch.rand(50, len(problem.output_variables)) output_pts = LabelTensor(output_pts, problem.output_variables) -problem.conditions['data'] = Condition( - input=input_pts, - target=output_pts -) +problem.conditions["data"] = Condition(input=input_pts, target=output_pts) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -48,7 +42,7 @@ def test_constructor(problem, weight_fn): assert solver.accepted_conditions_types == ( InputTargetCondition, InputEquationCondition, - DomainEquationCondition + DomainEquationCondition, ) @@ -56,13 +50,15 @@ def test_constructor(problem, weight_fn): def test_wrong_batch(problem): with pytest.raises(NotImplementedError): solver = SAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=10, - train_size=1., - val_size=0., - test_size=0.) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=10, + train_size=1.0, + val_size=0.0, + test_size=0.0, + ) trainer.train() @@ -70,54 +66,72 @@ def test_wrong_batch(problem): @pytest.mark.parametrize("compile", [True, False]) def test_solver_train(problem, compile): solver = SAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=1., - val_size=0., - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=1.0, + val_size=0.0, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict)) - for model in solver.models])) + assert all( + [ + isinstance(model, (OptimizedModule, torch.nn.ModuleDict)) + for model in solver.models + ] + ) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_validation(problem, compile): solver = SAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict)) - for model in solver.models])) + assert all( + [ + isinstance(model, (OptimizedModule, torch.nn.ModuleDict)) + for model in solver.models + ] + ) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @pytest.mark.parametrize("compile", [True, False]) def test_solver_test(problem, compile): solver = SAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=0.7, - val_size=0.2, - test_size=0.1, - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=0.7, + val_size=0.2, + test_size=0.1, + compile=compile, + ) trainer.test() if trainer.compile: - assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict)) - for model in solver.models])) + assert all( + [ + isinstance(model, (OptimizedModule, torch.nn.ModuleDict)) + for model in solver.models + ] + ) @pytest.mark.parametrize("problem", [problem, inverse_problem]) @@ -125,25 +139,30 @@ def test_train_load_restore(problem): dir = "tests/test_solver/tmp" problem = problem solver = SAPINN(model=model, problem=problem) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.7, - val_size=0.2, - test_size=0.1, - default_root_dir=dir) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.7, + val_size=0.2, + test_size=0.1, + default_root_dir=dir, + ) trainer.train() # restore - new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') + new_trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") new_trainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' + - 'epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/" + + "epoch=4-step=5.ckpt" + ) # loading new_solver = SAPINN.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', - problem=problem, model=model) + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", + problem=problem, + model=model, + ) test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables) assert new_solver.forward(test_pts).shape == (20, 1) @@ -151,9 +170,10 @@ def test_train_load_restore(problem): solver.forward(test_pts).shape ) torch.testing.assert_close( - new_solver.forward(test_pts), - solver.forward(test_pts)) + new_solver.forward(test_pts), solver.forward(test_pts) + ) # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_solver/test_supervised_solver.py b/tests/test_solver/test_supervised_solver.py index e8aad5bac..63f6b307e 100644 --- a/tests/test_solver/test_supervised_solver.py +++ b/tests/test_solver/test_supervised_solver.py @@ -10,22 +10,21 @@ class LabelTensorProblem(AbstractProblem): - input_variables = ['u_0', 'u_1'] - output_variables = ['u'] + input_variables = ["u_0", "u_1"] + output_variables = ["u"] conditions = { - 'data': Condition( - input=LabelTensor(torch.randn(20, 2), ['u_0', 'u_1']), - target=LabelTensor(torch.randn(20, 1), ['u'])), + "data": Condition( + input=LabelTensor(torch.randn(20, 2), ["u_0", "u_1"]), + target=LabelTensor(torch.randn(20, 1), ["u"]), + ), } class TensorProblem(AbstractProblem): - input_variables = ['u_0', 'u_1'] - output_variables = ['u'] + input_variables = ["u_0", "u_1"] + output_variables = ["u"] conditions = { - 'data': Condition( - input=torch.randn(20, 2), - target=torch.randn(20, 1)) + "data": Condition(input=torch.randn(20, 2), target=torch.randn(20, 1)) } @@ -35,9 +34,7 @@ class TensorProblem(AbstractProblem): def test_constructor(): SupervisedSolver(problem=TensorProblem(), model=model) SupervisedSolver(problem=LabelTensorProblem(), model=model) - assert SupervisedSolver.accepted_conditions_types == ( - InputTargetCondition - ) + assert SupervisedSolver.accepted_conditions_types == (InputTargetCondition) @pytest.mark.parametrize("batch_size", [None, 1, 5, 20]) @@ -46,18 +43,20 @@ def test_constructor(): def test_solver_train(use_lt, batch_size, compile): problem = LabelTensorProblem() if use_lt else TensorProblem() solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=batch_size, - train_size=1., - test_size=0., - val_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=batch_size, + train_size=1.0, + test_size=0.0, + val_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("use_lt", [True, False]) @@ -65,17 +64,19 @@ def test_solver_train(use_lt, batch_size, compile): def test_solver_validation(use_lt, compile): problem = LabelTensorProblem() if use_lt else TensorProblem() solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=0.9, - val_size=0.1, - test_size=0., - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=0.9, + val_size=0.1, + test_size=0.0, + compile=compile, + ) trainer.train() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) @pytest.mark.parametrize("use_lt", [True, False]) @@ -83,51 +84,59 @@ def test_solver_validation(use_lt, compile): def test_solver_test(use_lt, compile): problem = LabelTensorProblem() if use_lt else TensorProblem() solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt) - trainer = Trainer(solver=solver, - max_epochs=2, - accelerator='cpu', - batch_size=None, - train_size=0.8, - val_size=0.1, - test_size=0.1, - compile=compile) + trainer = Trainer( + solver=solver, + max_epochs=2, + accelerator="cpu", + batch_size=None, + train_size=0.8, + val_size=0.1, + test_size=0.1, + compile=compile, + ) trainer.test() if trainer.compile: - assert (isinstance(solver.model, OptimizedModule)) + assert isinstance(solver.model, OptimizedModule) def test_train_load_restore(): dir = "tests/test_solver/tmp/" problem = LabelTensorProblem() solver = SupervisedSolver(problem=problem, model=model) - trainer = Trainer(solver=solver, - max_epochs=5, - accelerator='cpu', - batch_size=None, - train_size=0.9, - test_size=0.1, - val_size=0., - default_root_dir=dir) + trainer = Trainer( + solver=solver, + max_epochs=5, + accelerator="cpu", + batch_size=None, + train_size=0.9, + test_size=0.1, + val_size=0.0, + default_root_dir=dir, + ) trainer.train() # restore - new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') + new_trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") new_trainer.train( - ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' + - 'epoch=4-step=5.ckpt') + ckpt_path=f"{dir}/lightning_logs/version_0/checkpoints/" + + "epoch=4-step=5.ckpt" + ) # loading new_solver = SupervisedSolver.load_from_checkpoint( - f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt', - problem=problem, model=model) + f"{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt", + problem=problem, + model=model, + ) test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables) assert new_solver.forward(test_pts).shape == (20, 1) assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape torch.testing.assert_close( - new_solver.forward(test_pts), - solver.forward(test_pts)) + new_solver.forward(test_pts), solver.forward(test_pts) + ) # rm directories import shutil - shutil.rmtree('tests/test_solver/tmp') + + shutil.rmtree("tests/test_solver/tmp") diff --git a/tests/test_utils.py b/tests/test_utils.py index 911aa4be1..a641c3838 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -10,21 +10,30 @@ def test_merge_tensors(): - tensor1 = LabelTensor(torch.rand((20, 3)), ['a', 'b', 'c']) - tensor2 = LabelTensor(torch.zeros((20, 3)), ['d', 'e', 'f']) - tensor3 = LabelTensor(torch.ones((30, 3)), ['g', 'h', 'i']) + tensor1 = LabelTensor(torch.rand((20, 3)), ["a", "b", "c"]) + tensor2 = LabelTensor(torch.zeros((20, 3)), ["d", "e", "f"]) + tensor3 = LabelTensor(torch.ones((30, 3)), ["g", "h", "i"]) merged_tensor = merge_tensors((tensor1, tensor2, tensor3)) - assert tuple(merged_tensor.labels) == ('a', 'b', 'c', 'd', 'e', 'f', 'g', - 'h', 'i') + assert tuple(merged_tensor.labels) == ( + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + ) assert merged_tensor.shape == (20 * 20 * 30, 9) - assert torch.all(merged_tensor.extract(('d', 'e', 'f')) == 0) - assert torch.all(merged_tensor.extract(('g', 'h', 'i')) == 1) + assert torch.all(merged_tensor.extract(("d", "e", "f")) == 0) + assert torch.all(merged_tensor.extract(("g", "h", "i")) == 1) def test_check_consistency_correct(): - ellipsoid1 = EllipsoidDomain({'x': [1, 2], 'y': [-2, 1]}) - example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ['x', 'y', 'z']) + ellipsoid1 = EllipsoidDomain({"x": [1, 2], "y": [-2, 1]}) + example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ["x", "y", "z"]) check_consistency(example_input_pts, torch.Tensor) check_consistency(CartesianDomain, DomainInterface, subclass=True) @@ -32,8 +41,8 @@ def test_check_consistency_correct(): def test_check_consistency_incorrect(): - ellipsoid1 = EllipsoidDomain({'x': [1, 2], 'y': [-2, 1]}) - example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ['x', 'y', 'z']) + ellipsoid1 = EllipsoidDomain({"x": [1, 2], "y": [-2, 1]}) + example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ["x", "y", "z"]) with pytest.raises(ValueError): check_consistency(example_input_pts, DomainInterface) diff --git a/tests/test_weighting/test_standard_weighting.py b/tests/test_weighting/test_standard_weighting.py index 2b32c6154..9caa89ae1 100644 --- a/tests/test_weighting/test_standard_weighting.py +++ b/tests/test_weighting/test_standard_weighting.py @@ -12,31 +12,40 @@ condition_names = problem.conditions.keys() print(problem.conditions.keys()) -@pytest.mark.parametrize("weights", - [1, 1., dict(zip(condition_names, [1]*len(condition_names)))]) + +@pytest.mark.parametrize( + "weights", [1, 1.0, dict(zip(condition_names, [1] * len(condition_names)))] +) def test_constructor(weights): ScalarWeighting(weights=weights) -@pytest.mark.parametrize("weights", ['a', [1,2,3]]) + +@pytest.mark.parametrize("weights", ["a", [1, 2, 3]]) def test_wrong_constructor(weights): with pytest.raises(ValueError): ScalarWeighting(weights=weights) -@pytest.mark.parametrize("weights", - [1, 1., dict(zip(condition_names, [1]*len(condition_names)))]) + +@pytest.mark.parametrize( + "weights", [1, 1.0, dict(zip(condition_names, [1] * len(condition_names)))] +) def test_aggregate(weights): weighting = ScalarWeighting(weights=weights) - losses = dict(zip(condition_names, [torch.randn(1) for _ in range(len(condition_names))])) + losses = dict( + zip( + condition_names, + [torch.randn(1) for _ in range(len(condition_names))], + ) + ) weighting.aggregate(losses=losses) -@pytest.mark.parametrize("weights", - [1, 1., dict(zip(condition_names, [1]*len(condition_names)))]) + +@pytest.mark.parametrize( + "weights", [1, 1.0, dict(zip(condition_names, [1] * len(condition_names)))] +) def test_train_aggregation(weights): weighting = ScalarWeighting(weights=weights) problem.discretise_domain(50) - solver = PINN( - problem=problem, - model=model, - weighting=weighting) - trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu') - trainer.train() \ No newline at end of file + solver = PINN(problem=problem, model=model, weighting=weighting) + trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") + trainer.train()