Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion docs/source/_tutorial.rst
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ Neural Operator Learning
- `Introductory Tutorial: Neural Operator Learning with PINA <tutorial21/tutorial.html>`_
- `Modeling 2D Darcy Flow with the Fourier Neural Operator <tutorial5/tutorial.html>`_
- `Solving the Kuramoto-Sivashinsky Equation with Averaging Neural Operator <tutorial10/tutorial.html>`_
- `Advection Equation with data driven DeepONet <tutorial24/tutorial.html>`_

Supervised Learning
-------------------
Expand All @@ -42,4 +43,4 @@ Supervised Learning
- `Reduced Order Model with Graph Neural Networks for Unstructured Domains <tutorial22/tutorial.html>`_
- `Data-driven System Identification with SINDy <tutorial23/tutorial.html>`_
- `Unstructured Convolutional Autoencoders with Continuous Convolution <tutorial4/tutorial.html>`_
- `Reduced Order Modeling with POD-RBF and POD-NN Approaches for Fluid Dynamics <tutorial8/tutorial.html>`_
- `Reduced Order Modeling with POD-RBF and POD-NN Approaches for Fluid Dynamics <tutorial8/tutorial.html>`_
26 changes: 7 additions & 19 deletions pina/model/deeponet.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ def __init__(
:param reduction: The reduction to be used to reduce the aggregated
result of the modules in ``networks`` to the desired output
dimension. Available reductions include: sum: ``+``, product: ``*``,
mean: ``mean``, min: ``min``, max: ``max``. Default is ``+``.
mean: ``mean``, min: ``min``, max: ``max``, identity: "id".
Default is ``+``.
:type reduction: str or Callable
:param bool scale: If ``True``, the final output is scaled before being
returned in the forward pass. Default is ``True``.
Expand Down Expand Up @@ -122,18 +123,8 @@ def __init__(
check_consistency(scale, bool)
check_consistency(translation, bool)

# check trunk branch nets consistency
shapes = []
for key, value in networks.items():
for value in networks.values():
check_consistency(value, (str, int))
check_consistency(key, torch.nn.Module)
input_ = torch.rand(10, len(value))
shapes.append(key(input_).shape[-1])

if not all(map(lambda x: x == shapes[0], shapes)):
raise ValueError(
"The passed networks have not the same output dimension."
)

# assign trunk and branch net with their input indeces
self.models = torch.nn.ModuleList(networks.keys())
Expand Down Expand Up @@ -171,6 +162,7 @@ def _symbol_functions(**kwargs):
"mean": partial(torch.mean, **kwargs),
"min": lambda x: torch.min(x, **kwargs).values,
"max": lambda x: torch.max(x, **kwargs).values,
"id": lambda x: x,
}

def _init_aggregator(self, aggregator):
Expand All @@ -181,7 +173,7 @@ def _init_aggregator(self, aggregator):
:type aggregator: str or Callable
:raises ValueError: If the aggregator is not supported.
"""
aggregator_funcs = self._symbol_functions(dim=2)
aggregator_funcs = self._symbol_functions(dim=-1)
if aggregator in aggregator_funcs:
aggregator_func = aggregator_funcs[aggregator]
elif isinstance(aggregator, nn.Module) or is_function(aggregator):
Expand Down Expand Up @@ -264,13 +256,9 @@ def forward(self, x):
# reduce
output_ = self._reduction(aggregated)
if self._reduction_type in self._symbol_functions(dim=-1):
output_ = output_.reshape(-1, 1)

# scale and translate
output_ *= self._scale
output_ += self._trasl
output_ = output_.reshape(*output_.shape, 1)

return output_
return self._scale * output_ + self._trasl

@property
def aggregator(self):
Expand Down
16 changes: 1 addition & 15 deletions tests/test_model/test_deeponet.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
data = torch.rand((20, 3))
input_vars = ["a", "b", "c"]
input_ = LabelTensor(data, input_vars)
symbol_funcs_red = DeepONet._symbol_functions(dim=-1)
symbol_funcs_red = DeepONet._symbol_functions()
output_dims = [1, 5, 10, 20]


Expand All @@ -26,20 +26,6 @@ def test_constructor():
)


def test_constructor_fails_when_invalid_inner_layer_size():
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=8)
with pytest.raises(ValueError):
DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=["a"],
input_indeces_trunk_net=["b", "c"],
reduction="+",
aggregator="*",
)


def test_forward_extract_str():
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
Expand Down
9 changes: 0 additions & 9 deletions tests/test_model/test_mionet.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,6 @@ def test_constructor():
MIONet(networks=networks, reduction="+", aggregator="*")


def test_constructor_fails_when_invalid_inner_layer_size():
branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10)
branch_net2 = FeedForward(input_dimensions=2, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=12)
networks = {branch_net1: ["x"], branch_net2: ["x", "y"], trunk_net: ["z"]}
with pytest.raises(ValueError):
MIONet(networks=networks, reduction="+", aggregator="*")


def test_forward_extract_str():
branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10)
branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10)
Expand Down
3 changes: 2 additions & 1 deletion tutorials/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ Learning Bifurcating PDE Solutions with Physics-Informed Deep Ensembles|[[.ipynb
Introductory Tutorial: Neural Operator Learning with PINA |[[.ipynb](tutorial21/tutorial.ipynb),[.py](tutorial21/tutorial.py),[.html](http://mathlab.github.io/PINA/tutorial21/tutorial.html)]|
Modeling 2D Darcy Flow with the Fourier Neural Operator |[[.ipynb](tutorial5/tutorial.ipynb),[.py](tutorial5/tutorial.py),[.html](http://mathlab.github.io/PINA/tutorial5/tutorial.html)]|
Solving the Kuramoto–Sivashinsky Equation with Averaging Neural Operator |[[.ipynb](tutorial10/tutorial.ipynb),[.py](tutorial10/tutorial.py),[.html](http://mathlab.github.io/PINA/tutorial10/tutorial.html)]|
Advection Equation with data driven DeepONet| [[.ipynb](tutorial24/tutorial.ipynb),[.py](tutorial24/tutorial.py),[.html](http://mathlab.github.io/PINA/tutorial24/tutorial.html)]|


## Supervised Learning
| Description | Tutorial |
Expand All @@ -46,4 +48,3 @@ Reduced Order Model with Graph Neural Networks for Unstructured Domains| [[.ipyn
Data-driven System Identification with SINDy| [[.ipynb](tutorial23/tutorial.ipynb),[.py](tutorial23/tutorial.py),[.html](http://mathlab.github.io/PINA/tutorial23/tutorial.html)]|
Unstructured Convolutional Autoencoders with Continuous Convolution |[[.ipynb](tutorial4/tutorial.ipynb),[.py](tutorial4/tutorial.py),[.html](http://mathlab.github.io/PINA/tutorial4/tutorial.html)]|
Reduced Order Modeling with POD-RBF and POD-NN Approaches for Fluid Dynamics| [[.ipynb](tutorial8/tutorial.ipynb),[.py](tutorial8/tutorial.py),[.html](http://mathlab.github.io/PINA/tutorial8/tutorial.html)]|

Binary file added tutorials/static/deeponet.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
490 changes: 490 additions & 0 deletions tutorials/tutorial24/tutorial.ipynb

Large diffs are not rendered by default.