Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 54 additions & 23 deletions pina/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,14 @@ def grad(output_, input_, components=None, d=None):
computed.
:param LabelTensor input_: The input tensor with respect to which the
gradient is computed.
:param list[str] components: The names of the output variables for which to
:param components: The names of the output variables for which to
compute the gradient. It must be a subset of the output labels.
If ``None``, all output variables are considered. Default is ``None``.
:param list[str] d: The names of the input variables with respect to which
:type components: str | list[str]
:param d: The names of the input variables with respect to which
the gradient is computed. It must be a subset of the input labels.
If ``None``, all input variables are considered. Default is ``None``.
:type d: str | list[str]
:raises TypeError: If the input tensor is not a LabelTensor.
:raises RuntimeError: If the output is a scalar field and the components
are not equal to the output labels.
Expand All @@ -50,9 +52,10 @@ def grad_scalar_output(output_, input_, d):
computed. It must be a column tensor.
:param LabelTensor input_: The input tensor with respect to which the
gradient is computed.
:param list[str] d: The names of the input variables with respect to
:param d: The names of the input variables with respect to
which the gradient is computed. It must be a subset of the input
labels. If ``None``, all input variables are considered.
:type d: str | list[str]
:raises RuntimeError: If a vectorial function is passed.
:raises RuntimeError: If missing derivative labels.
:return: The computed gradient tensor.
Expand Down Expand Up @@ -89,6 +92,12 @@ def grad_scalar_output(output_, input_, d):
if components is None:
components = output_.labels

if not isinstance(components, list):
components = [components]

if not isinstance(d, list):
d = [d]

if output_.shape[1] == 1: # scalar output ################################

if components != output_.labels:
Expand Down Expand Up @@ -120,12 +129,14 @@ def div(output_, input_, components=None, d=None):
computed.
:param LabelTensor input_: The input tensor with respect to which the
divergence is computed.
:param list[str] components: The names of the output variables for which to
:param components: The names of the output variables for which to
compute the divergence. It must be a subset of the output labels.
If ``None``, all output variables are considered. Default is ``None``.
:param list[str] d: The names of the input variables with respect to which
:type components: str | list[str]
:param d: The names of the input variables with respect to which
the divergence is computed. It must be a subset of the input labels.
If ``None``, all input variables are considered. Default is ``None``.
:type d: str | list[str]
:raises TypeError: If the input tensor is not a LabelTensor.
:raises ValueError: If the output is a scalar field.
:raises ValueError: If the number of components is not equal to the number
Expand All @@ -142,6 +153,12 @@ def div(output_, input_, components=None, d=None):
if components is None:
components = output_.labels

if not isinstance(components, list):
components = [components]

if not isinstance(d, list):
d = [d]

if output_.shape[1] < 2 or len(components) < 2:
raise ValueError("div supported only for vector fields")

Expand Down Expand Up @@ -170,12 +187,14 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
computed.
:param LabelTensor input_: The input tensor with respect to which the
laplacian is computed.
:param list[str] components: The names of the output variables for which to
:param components: The names of the output variables for which to
compute the laplacian. It must be a subset of the output labels.
If ``None``, all output variables are considered. Default is ``None``.
:param list[str] d: The names of the input variables with respect to which
:type components: str | list[str]
:param d: The names of the input variables with respect to which
the laplacian is computed. It must be a subset of the input labels.
If ``None``, all input variables are considered. Default is ``None``.
:type d: str | list[str]
:param str method: The method used to compute the Laplacian. Default is
``std``.
:raises NotImplementedError: If ``std=divgrad``.
Expand All @@ -191,12 +210,14 @@ def scalar_laplace(output_, input_, components, d):
computed. It must be a column tensor.
:param LabelTensor input_: The input tensor with respect to which the
laplacian is computed.
:param list[str] components: The names of the output variables for which
:param components: The names of the output variables for which
to compute the laplacian. It must be a subset of the output labels.
If ``None``, all output variables are considered.
:param list[str] d: The names of the input variables with respect to
:type components: str | list[str]
:param d: The names of the input variables with respect to
which the laplacian is computed. It must be a subset of the input
labels. If ``None``, all input variables are considered.
:type d: str | list[str]
:return: The computed laplacian tensor.
:rtype: LabelTensor
"""
Expand All @@ -216,22 +237,24 @@ def scalar_laplace(output_, input_, components, d):
if components is None:
components = output_.labels

if not isinstance(components, list):
components = [components]

if not isinstance(d, list):
d = [d]

if method == "divgrad":
raise NotImplementedError("divgrad not implemented as method")

if method == "std":
if len(components) == 1:
result = scalar_laplace(output_, input_, components, d)
labels = [f"dd{components[0]}"]

else:
result = torch.empty(
input_.shape[0], len(components), device=output_.device
)
labels = [None] * len(components)
for idx, c in enumerate(components):
result[:, idx] = scalar_laplace(output_, input_, c, d).flatten()
labels[idx] = f"dd{c}"

result = torch.empty(
input_.shape[0], len(components), device=output_.device
)
labels = [None] * len(components)
for idx, c in enumerate(components):
result[:, idx] = scalar_laplace(output_, input_, [c], d).flatten()
labels[idx] = f"dd{c}"

result = result.as_subclass(LabelTensor)
result.labels = labels
Expand All @@ -251,12 +274,14 @@ def advection(output_, input_, velocity_field, components=None, d=None):
is computed.
:param str velocity_field: The name of the output variable used as velocity
field. It must be chosen among the output labels.
:param list[str] components: The names of the output variables for which
:param components: The names of the output variables for which
to compute the advection. It must be a subset of the output labels.
If ``None``, all output variables are considered. Default is ``None``.
:param list[str] d: The names of the input variables with respect to which
:type components: str | list[str]
:param d: The names of the input variables with respect to which
the advection is computed. It must be a subset of the input labels.
If ``None``, all input variables are considered. Default is ``None``.
:type d: str | list[str]
:return: The computed advection tensor.
:rtype: LabelTensor
"""
Expand All @@ -266,6 +291,12 @@ def advection(output_, input_, velocity_field, components=None, d=None):
if components is None:
components = output_.labels

if not isinstance(components, list):
components = [components]

if not isinstance(d, list):
d = [d]

tmp = (
grad(output_, input_, components, d)
.reshape(-1, len(components), len(d))
Expand Down
39 changes: 39 additions & 0 deletions tests/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,3 +164,42 @@ def test_laplacian_vector_output2():

assert torch.allclose(lap_f.extract("ddu"), lap_u)
assert torch.allclose(lap_f.extract("ddv"), lap_v)


def test_label_format():
# Testing the format of `components` or `d` in case of single str of length
# greater than 1; e.g.: "aaa".
# This test is conducted only for gradient and laplacian, since div is not
# implemented for single components.
inp.labels = ["xx", "yy", "zz"]
tensor_v = LabelTensor(func_vector(inp), ["aa", "bbb", "c"])
comp = tensor_v.labels[0]
single_d = inp.labels[0]

# Single component as string + list of d
grad_tensor_v = grad(tensor_v, inp, components=comp, d=None)
assert grad_tensor_v.labels == [f"d{comp}d{i}" for i in inp.labels]

lap_tensor_v = laplacian(tensor_v, inp, components=comp, d=None)
assert lap_tensor_v.labels == [f"dd{comp}"]

# Single component as list + list of d
grad_tensor_v = grad(tensor_v, inp, components=[comp], d=None)
assert grad_tensor_v.labels == [f"d{comp}d{i}" for i in inp.labels]

lap_tensor_v = laplacian(tensor_v, inp, components=[comp], d=None)
assert lap_tensor_v.labels == [f"dd{comp}"]

# List of components + single d as string
grad_tensor_v = grad(tensor_v, inp, components=None, d=single_d)
assert grad_tensor_v.labels == [f"d{i}d{single_d}" for i in tensor_v.labels]

lap_tensor_v = laplacian(tensor_v, inp, components=None, d=single_d)
assert lap_tensor_v.labels == [f"dd{i}" for i in tensor_v.labels]

# List of components + single d as list
grad_tensor_v = grad(tensor_v, inp, components=None, d=[single_d])
assert grad_tensor_v.labels == [f"d{i}d{single_d}" for i in tensor_v.labels]

lap_tensor_v = laplacian(tensor_v, inp, components=None, d=[single_d])
assert lap_tensor_v.labels == [f"dd{i}" for i in tensor_v.labels]
Loading