Skip to content

Commit 7770570

Browse files
committed
Fix warnings in problem and loss
1 parent 3ca4f93 commit 7770570

File tree

8 files changed

+47
-27
lines changed

8 files changed

+47
-27
lines changed

pina/loss/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
"""
2+
Module for loss functions and weighting functions.
3+
"""
4+
15
__all__ = [
26
"LossInterface",
37
"LpLoss",

pina/loss/loss_interface.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ def __init__(self, reduction="mean"):
1818
will be applied, ``mean``: the sum of the output will be divided
1919
by the number of elements in the output, ``sum``: the output will
2020
be summed. Note: ``size_average`` and ``reduce`` are in the
21-
process of being deprecated, and in the meantime, specifying either of
22-
those two args will override ``reduction``. Default: ``mean``.
21+
process of being deprecated, and in the meantime, specifying either
22+
of those two args will override ``reduction``. Default: ``mean``.
2323
"""
2424
super().__init__(reduction=reduction, size_average=None, reduce=None)
2525

@@ -32,7 +32,6 @@ def forward(self, input, target):
3232
:return: Loss evaluation.
3333
:rtype: torch.Tensor
3434
"""
35-
pass
3635

3736
def _reduction(self, loss):
3837
"""Simple helper function to check reduction
@@ -42,8 +41,8 @@ def _reduction(self, loss):
4241
will be applied, ``mean``: the sum of the output will be divided
4342
by the number of elements in the output, ``sum``: the output will
4443
be summed. Note: ``size_average`` and ``reduce`` are in the
45-
process of being deprecated, and in the meantime, specifying either of
46-
those two args will override ``reduction``. Default: ``mean``.
44+
process of being deprecated, and in the meantime, specifying either
45+
of those two args will override ``reduction``. Default: ``mean``.
4746
:type reduction: str
4847
:param loss: Loss tensor for each element.
4948
:type loss: torch.Tensor

pina/loss/lp_loss.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
from .loss_interface import LossInterface
77

88

9+
# Avoid pylint warning for torch.linalg.norm (it is callable)
10+
# pylint: disable=not-callable
911
class LpLoss(LossInterface):
1012
r"""
1113
The Lp loss implementation class. Creates a criterion that measures
@@ -23,7 +25,8 @@ class LpLoss(LossInterface):
2325
2426
.. math::
2527
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
26-
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }{[\sum_{i=1}^{D}|y_n^i|^p]},
28+
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] } \\
29+
{[\sum_{i=1}^{D}|y_n^i|^p]},
2730
2831
where :math:`N` is the batch size. If ``reduction`` is not ``none``
2932
(default ``mean``), then:
@@ -38,16 +41,19 @@ class LpLoss(LossInterface):
3841
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
3942
of :math:`n` elements each.
4043
41-
The sum operation still operates over all the elements, and divides by :math:`n`.
44+
The sum operation still operates over all the elements, and divides by
45+
:math:`n`.
4246
43-
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
47+
The division by :math:`n` can be avoided if one sets ``reduction`` to
48+
``sum``.
4449
"""
4550

4651
def __init__(self, p=2, reduction="mean", relative=False):
4752
"""
4853
:param int p: Degree of Lp norm. It specifies the type of norm to
4954
be calculated. See `list of possible orders in torch linalg
50-
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
55+
`torch.linalg.norm <https://pytorch.org/docs/stable/generated/
56+
torch.linalg.norm.html#torch.linalg.norm>`_
5157
for possible degrees. Default 2 (euclidean norm).
5258
:param str reduction: Specifies the reduction to apply to the output:
5359
``none`` | ``mean`` | ``sum``. ``none``: no reduction

pina/loss/power_loss.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,15 @@ class PowerLoss(LossInterface):
1717
1818
.. math::
1919
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
20-
l_n = \frac{1}{D}\left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
20+
l_n = \frac{1}{D}\left[\sum_{i=1}^{D}
21+
\left| x_n^i - y_n^i \right|^p\right],
2122
2223
If ``'relative'`` is set to true:
2324
2425
.. math::
2526
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
26-
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }{\sum_{i=1}^{D}|y_n^i|^p},
27+
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }
28+
{\sum_{i=1}^{D}|y_n^i|^p},
2729
2830
where :math:`N` is the batch size. If ``reduction`` is not ``none``
2931
(default ``mean``), then:
@@ -38,16 +40,19 @@ class PowerLoss(LossInterface):
3840
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
3941
of :math:`n` elements each.
4042
41-
The sum operation still operates over all the elements, and divides by :math:`n`.
43+
The sum operation still operates over all the elements, and divides by
44+
:math:`n`.
4245
43-
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
46+
The division by :math:`n` can be avoided if one sets ``reduction`` to
47+
``sum``.
4448
"""
4549

4650
def __init__(self, p=2, reduction="mean", relative=False):
4751
"""
4852
:param int p: Degree of Lp norm. It specifies the type of norm to
4953
be calculated. See `list of possible orders in torch linalg
50-
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
54+
<https://pytorch.org/docs/stable/generated/
55+
torch.linalg.norm.html#torch.linalg.norm>`_ to
5156
see the possible degrees. Default 2 (euclidean norm).
5257
:param str reduction: Specifies the reduction to apply to the output:
5358
``none`` | ``mean`` | ``sum``. When ``none``: no reduction

pina/loss/weighting_interface.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,3 @@ def aggregate(self, losses):
2020
:return: The losses aggregation. It should be a scalar Tensor.
2121
:rtype: torch.Tensor
2222
"""
23-
pass

pina/problem/zoo/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""TODO"""
2+
13
__all__ = [
24
"Poisson2DSquareProblem",
35
"SupervisedProblem",

pina/problem/zoo/poisson_2d_square.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
"""Definition of the Poisson problem on a square domain."""
22

3-
from pina.problem import SpatialProblem
4-
from pina.operator import laplacian
5-
from pina import Condition
6-
from pina.domain import CartesianDomain
7-
from pina.equation.equation import Equation
8-
from pina.equation.equation_factory import FixedValue
93
import torch
4+
from ..spatial_problem import SpatialProblem
5+
from ...operator import laplacian
6+
from ... import Condition
7+
from ...domain import CartesianDomain
8+
from ...equation.equation import Equation
9+
from ...equation.equation_factory import FixedValue
1010

1111

1212
def laplace_equation(input_, output_):
@@ -48,6 +48,8 @@ class Poisson2DSquareProblem(SpatialProblem):
4848
}
4949

5050
def poisson_sol(self, pts):
51+
"""TODO"""
52+
5153
return -(
5254
torch.sin(pts.extract(["x"]) * torch.pi)
5355
* torch.sin(pts.extract(["y"]) * torch.pi)

pina/problem/zoo/supervised_problem.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,17 @@
1-
from pina.problem import AbstractProblem
2-
from pina import Condition
3-
from pina import Graph
1+
"""TODO"""
2+
3+
from ..abstract_problem import AbstractProblem
4+
from ... import Condition
5+
from ... import Graph
46

57

68
class SupervisedProblem(AbstractProblem):
79
"""
810
A problem definition for supervised learning in PINA.
911
10-
This class allows an easy and straightforward definition of a Supervised problem,
11-
based on a single condition of type `InputTargetCondition`
12+
This class allows an easy and straightforward definition of a
13+
Supervised problem, based on a single condition of type
14+
`InputTargetCondition`
1215
1316
:Example:
1417
>>> import torch
@@ -17,7 +20,7 @@ class SupervisedProblem(AbstractProblem):
1720
>>> problem = SupervisedProblem(input_data, output_data)
1821
"""
1922

20-
conditions = dict()
23+
conditions = {}
2124
output_variables = None
2225

2326
def __init__(self, input_, output_):

0 commit comments

Comments
 (0)