11"""Module for defining various general equations."""
22
3+ from typing import Callable
4+ import torch
35from .equation import Equation
46from ..operator import grad , div , laplacian
7+ from ..utils import check_consistency
58
69
710class FixedValue (Equation ):
@@ -121,7 +124,7 @@ def __init__(self, value, components=None, d=None):
121124
122125 :param float value: The fixed value to be enforced to the laplacian.
123126 :param list[str] components: The name of the output variables for which
124- the null laplace condition is applied. It should be a subset of the
127+ the fixed laplace condition is applied. It should be a subset of the
125128 output labels. If ``None``, all output variables are considered.
126129 Default is ``None``.
127130 :param list[str] d: The name of the input variables on which the
@@ -146,3 +149,297 @@ def equation(input_, output_):
146149 )
147150
148151 super ().__init__ (equation )
152+
153+
154+ class Laplace (FixedLaplacian ):
155+ """
156+ Equation to enforce a null laplacian for a specific condition.
157+ """
158+
159+ def __init__ (self , components = None , d = None ):
160+ """
161+ Initialization of the :class:`Laplace` class.
162+
163+ :param list[str] components: The name of the output variables for which
164+ the null laplace condition is applied. It should be a subset of the
165+ output labels. If ``None``, all output variables are considered.
166+ Default is ``None``.
167+ :param list[str] d: The name of the input variables on which the
168+ laplacian is computed. It should be a subset of the input labels.
169+ If ``None``, all the input variables are considered.
170+ Default is ``None``.
171+ """
172+ super ().__init__ (0.0 , components = components , d = d )
173+
174+
175+ class AdvectionEquation (Equation ):
176+ r"""
177+ Implementation of the N-dimensional advection equation with constant
178+ velocity parameter. The equation is defined as follows:
179+
180+ .. math::
181+
182+ \frac{\partial u}{\partial t} + c \cdot \nabla u = 0
183+
184+ Here, :math:`c` is the advection velocity parameter.
185+ """
186+
187+ def __init__ (self , c ):
188+ """
189+ Initialization of the :class:`AdvectionEquation`.
190+
191+ :param c: The advection velocity. If a scalar is provided, the same
192+ velocity is applied to all spatial dimensions. If a list is
193+ provided, it must contain one value per spatial dimension.
194+ :type c: float | int | List[float] | List[int]
195+ :raises ValueError: If ``c`` is an empty list.
196+ """
197+ # Check consistency
198+ check_consistency (c , (float , int , list ))
199+ if isinstance (c , list ):
200+ all (check_consistency (ci , (float , int )) for ci in c )
201+ if len (c ) < 1 :
202+ raise ValueError ("'c' cannot be an empty list." )
203+ else :
204+ c = [c ]
205+
206+ # Store advection velocity parameter
207+ self .c = torch .tensor (c ).unsqueeze (0 )
208+
209+ def equation (input_ , output_ ):
210+ """
211+ Implementation of the advection equation.
212+
213+ :param LabelTensor input_: The input data of the problem.
214+ :param LabelTensor output_: The output data of the problem.
215+ :return: The residual of the advection equation.
216+ :rtype: LabelTensor
217+ :raises ValueError: If the ``input_`` labels do not contain the time
218+ variable 't'.
219+ :raises ValueError: If ``c`` is a list and its length is not
220+ consistent with the number of spatial dimensions.
221+ """
222+ # Store labels
223+ input_lbl = input_ .labels
224+ spatial_d = [di for di in input_lbl if di != "t" ]
225+
226+ # Ensure time is passed as input
227+ if "t" not in input_lbl :
228+ raise ValueError (
229+ "The ``input_`` labels must contain the time 't' variable."
230+ )
231+
232+ # Ensure consistency of c length
233+ if len (self .c ) != (len (input_lbl ) - 1 ) and len (self .c ) > 1 :
234+ raise ValueError (
235+ "If 'c' is passed as a list, its length must be equal to "
236+ "the number of spatial dimensions."
237+ )
238+
239+ # Repeat c to ensure consistent shape for advection
240+ self .c = self .c .repeat (output_ .shape [0 ], 1 )
241+ if self .c .shape [1 ] != (len (input_lbl ) - 1 ):
242+ self .c = self .c .repeat (1 , len (input_lbl ) - 1 )
243+
244+ # Add a dimension to c for the following operations
245+ self .c = self .c .unsqueeze (- 1 )
246+
247+ # Compute the time derivative and the spatial gradient
248+ time_der = grad (output_ , input_ , components = None , d = "t" )
249+ grads = grad (output_ = output_ , input_ = input_ , d = spatial_d )
250+
251+ # Reshape and transpose
252+ tmp = grads .reshape (* output_ .shape , len (spatial_d ))
253+ tmp = tmp .transpose (- 1 , - 2 )
254+
255+ # Compute advection term
256+ adv = (tmp * self .c ).sum (dim = tmp .tensor .ndim - 2 )
257+
258+ return time_der + adv
259+
260+ super ().__init__ (equation )
261+
262+
263+ class AllenCahnEquation (Equation ):
264+ r"""
265+ Implementation of the N-dimensional Allen-Cahn equation, defined as follows:
266+
267+ .. math::
268+
269+ \frac{\partial u}{\partial t} - \alpha \Delta u + \beta(u^3 - u) = 0
270+
271+ Here, :math:`\alpha` and :math:`\beta` are parameters of the equation.
272+ """
273+
274+ def __init__ (self , alpha , beta ):
275+ """
276+ Initialization of the :class:`AllenCahnEquation`.
277+
278+ :param alpha: The diffusion coefficient.
279+ :type alpha: float | int
280+ :param beta: The reaction coefficient.
281+ :type beta: float | int
282+ """
283+ check_consistency (alpha , (float , int ))
284+ check_consistency (beta , (float , int ))
285+ self .alpha = alpha
286+ self .beta = beta
287+
288+ def equation (input_ , output_ ):
289+ """
290+ Implementation of the Allen-Cahn equation.
291+
292+ :param LabelTensor input_: The input data of the problem.
293+ :param LabelTensor output_: The output data of the problem.
294+ :return: The residual of the Allen-Cahn equation.
295+ :rtype: LabelTensor
296+ :raises ValueError: If the ``input_`` labels do not contain the time
297+ variable 't'.
298+ """
299+ # Ensure time is passed as input
300+ if "t" not in input_ .labels :
301+ raise ValueError (
302+ "The ``input_`` labels must contain the time 't' variable."
303+ )
304+
305+ # Compute the time derivative and the spatial laplacian
306+ u_t = grad (output_ , input_ , d = ["t" ])
307+ u_xx = laplacian (
308+ output_ , input_ , d = [di for di in input_ .labels if di != "t" ]
309+ )
310+
311+ return u_t - self .alpha * u_xx + self .beta * (output_ ** 3 - output_ )
312+
313+ super ().__init__ (equation )
314+
315+
316+ class DiffusionReactionEquation (Equation ):
317+ r"""
318+ Implementation of the N-dimensional Diffusion-Reaction equation,
319+ defined as follows:
320+
321+ .. math::
322+
323+ \frac{\partial u}{\partial t} - \alpha \Delta u - f = 0
324+
325+ Here, :math:`\alpha` is a parameter of the equation, while :math:`f` is the
326+ reaction term.
327+ """
328+
329+ def __init__ (self , alpha , forcing_term ):
330+ """
331+ Initialization of the :class:`DiffusionReactionEquation`.
332+
333+ :param alpha: The diffusion coefficient.
334+ :type alpha: float | int
335+ :param Callable forcing_term: The forcing field function, taking as
336+ input the points on which evaluation is required.
337+ """
338+ check_consistency (alpha , (float , int ))
339+ check_consistency (forcing_term , (Callable ))
340+ self .alpha = alpha
341+ self .forcing_term = forcing_term
342+
343+ def equation (input_ , output_ ):
344+ """
345+ Implementation of the Diffusion-Reaction equation.
346+
347+ :param LabelTensor input_: The input data of the problem.
348+ :param LabelTensor output_: The output data of the problem.
349+ :return: The residual of the Diffusion-Reaction equation.
350+ :rtype: LabelTensor
351+ :raises ValueError: If the ``input_`` labels do not contain the time
352+ variable 't'.
353+ """
354+ # Ensure time is passed as input
355+ if "t" not in input_ .labels :
356+ raise ValueError (
357+ "The ``input_`` labels must contain the time 't' variable."
358+ )
359+
360+ # Compute the time derivative and the spatial laplacian
361+ u_t = grad (output_ , input_ , d = ["t" ])
362+ u_xx = laplacian (
363+ output_ , input_ , d = [di for di in input_ .labels if di != "t" ]
364+ )
365+
366+ return u_t - self .alpha * u_xx - self .forcing_term (input_ )
367+
368+ super ().__init__ (equation )
369+
370+
371+ class HelmholtzEquation (Equation ):
372+ r"""
373+ Implementation of the Helmholtz equation, defined as follows:
374+
375+ .. math::
376+
377+ \Delta u + k u - f = 0
378+
379+ Here, :math:`k` is a parameter of the equation, while :math:`f` is the
380+ forcing term.
381+ """
382+
383+ def __init__ (self , k , forcing_term ):
384+ """
385+ Initialization of the :class:`HelmholtzEquation` class.
386+
387+ :param k: The parameter of the equation.
388+ :type k: float | int
389+ :param Callable forcing_term: The forcing field function, taking as
390+ input the points on which evaluation is required.
391+ """
392+ check_consistency (k , (int , float ))
393+ check_consistency (forcing_term , (Callable ))
394+ self .k = k
395+ self .forcing_term = forcing_term
396+
397+ def equation (input_ , output_ ):
398+ """
399+ Implementation of the Helmholtz equation.
400+
401+ :param LabelTensor input_: The input data of the problem.
402+ :param LabelTensor output_: The output data of the problem.
403+ :return: The residual of the Helmholtz equation.
404+ :rtype: LabelTensor
405+ """
406+ lap = laplacian (output_ , input_ )
407+ return lap + self .k * output_ - self .forcing_term (input_ )
408+
409+ super ().__init__ (equation )
410+
411+
412+ class PoissonEquation (Equation ):
413+ r"""
414+ Implementation of the Poisson equation, defined as follows:
415+
416+ .. math::
417+
418+ \Delta u - f = 0
419+
420+ Here, :math:`f` is the forcing term.
421+ """
422+
423+ def __init__ (self , forcing_term ):
424+ """
425+ Initialization of the :class:`PoissonEquation` class.
426+
427+ :param Callable forcing_term: The forcing field function, taking as
428+ input the points on which evaluation is required.
429+ """
430+ check_consistency (forcing_term , (Callable ))
431+ self .forcing_term = forcing_term
432+
433+ def equation (input_ , output_ ):
434+ """
435+ Implementation of the Poisson equation.
436+
437+ :param LabelTensor input_: The input data of the problem.
438+ :param LabelTensor output_: The output data of the problem.
439+ :return: The residual of the Poisson equation.
440+ :rtype: LabelTensor
441+ """
442+ lap = laplacian (output_ , input_ )
443+ return lap - self .forcing_term (input_ )
444+
445+ super ().__init__ (equation )
0 commit comments