Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
338 changes: 94 additions & 244 deletions tutorials/tutorial1/tutorial.ipynb

Large diffs are not rendered by default.

145 changes: 42 additions & 103 deletions tutorials/tutorial1/tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
#
# ```python
# from pina.problem import SpatialProblem
# from pina.domain import CartesianProblem
# from pina.geometry import CartesianProblem
#
# class SimpleODE(SpatialProblem):
#
Expand All @@ -53,7 +53,7 @@
# What if our equation is also time-dependent? In this case, our `class` will inherit from both `SpatialProblem` and `TimeDependentProblem`:
#

# In[1]:
# In[ ]:


## routine needed to run the notebook on Google Colab
Expand All @@ -65,13 +65,9 @@
if IN_COLAB:
get_ipython().system('pip install "pina-mathlab"')

import warnings

from pina.problem import SpatialProblem, TimeDependentProblem
from pina.domain import CartesianDomain

warnings.filterwarnings('ignore')

class TimeSpaceODE(SpatialProblem, TimeDependentProblem):

output_variables = ['u']
Expand All @@ -91,30 +87,25 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):

# ### Write the problem class
#
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operator` module. Again, we'll consider Equation (1) and represent it in **PINA**:
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:

# In[2]:


import torch
import matplotlib.pyplot as plt

from pina.problem import SpatialProblem
from pina.operator import grad
from pina.operators import grad
from pina import Condition
from pina.domain import CartesianDomain
from pina.equation import Equation, FixedValue

import torch


class SimpleODE(SpatialProblem):

output_variables = ['u']
spatial_domain = CartesianDomain({'x': [0, 1]})

domains ={
'x0': CartesianDomain({'x': 0.}),
'D': CartesianDomain({'x': [0, 1]})
}

# defining the ode equation
def ode_equation(input_, output_):

Expand All @@ -129,10 +120,13 @@ def ode_equation(input_, output_):

# conditions to hold
conditions = {
'bound_cond': Condition(domain='x0', equation=FixedValue(1.)),
'phys_cond': Condition(domain='D', equation=Equation(ode_equation))
'x0': Condition(location=CartesianDomain({'x': 0.}), equation=FixedValue(1)), # We fix initial condition to value 1
'D': Condition(location=CartesianDomain({'x': [0, 1]}), equation=Equation(ode_equation)), # We wrap the python equation using Equation
}

# sampled points (see below)
input_pts = None

# defining the true solution
def truth_solution(self, pts):
return torch.exp(pts.extract(['x']))
Expand All @@ -155,14 +149,14 @@ def truth_solution(self, pts):


# sampling 20 points in [0, 1] through discretization in all locations
problem.discretise_domain(n=20, mode='grid', domains='all')
problem.discretise_domain(n=20, mode='grid', variables=['x'], locations='all')

# sampling 20 points in (0, 1) through latin hypercube sampling in D, and 1 point in x0
problem.discretise_domain(n=20, mode='latin', domains=['D'])
problem.discretise_domain(n=1, mode='random', domains=['x0'])
problem.discretise_domain(n=20, mode='latin', variables=['x'], locations=['D'])
problem.discretise_domain(n=1, mode='random', variables=['x'], locations=['x0'])

# sampling 20 points in (0, 1) randomly
problem.discretise_domain(n=20, mode='random')
problem.discretise_domain(n=20, mode='random', variables=['x'])


# We are going to use latin hypercube points for sampling. We need to sample in all the conditions domains. In our case we sample in `D` and `x0`.
Expand All @@ -171,45 +165,41 @@ def truth_solution(self, pts):


# sampling for training
problem.discretise_domain(1, 'random', domains=['x0']) # TODO check
problem.discretise_domain(20, 'lh', domains=['D'])
problem.discretise_domain(1, 'random', locations=['x0'])
problem.discretise_domain(20, 'lh', locations=['D'])


# The points are saved in a python `dict`, and can be accessed by calling the attribute `input_pts` of the problem

# In[5]:


print('Input points:', problem.discretised_domains)
print('Input points labels:', problem.discretised_domains['D'].labels)
print('Input points:', problem.input_pts)
print('Input points labels:', problem.input_pts['D'].labels)


# To visualize the sampled points we can use the `.plot_samples` method of the `Plotter` class

# To visualize the sampled points we can use `matplotlib.pyplot`:
# In[5]:

# In[6]:

from pina import Plotter

variables=problem.spatial_variables
fig = plt.figure()
proj = "3d" if len(variables) == 3 else None
ax = fig.add_subplot(projection=proj)
for location in problem.input_pts:
coords = problem.input_pts[location].extract(variables).T.detach()
ax.plot(coords.flatten(),torch.zeros(coords.flatten().shape),".",label=location)
pl = Plotter()
pl.plot_samples(problem=problem)


# ## Perform a small training

# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solver`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightning` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callback.MetricTracker`.
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solvers`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightining` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.

# In[7]:
# In[ ]:


from pina import Trainer
from pina.solver import PINN
from pina.solvers import PINN
from pina.model import FeedForward
from lightning.pytorch.loggers import TensorBoardLogger
from pina.optim import TorchOptimizer
from pina.callbacks import MetricTracker


# build the model
Expand All @@ -221,93 +211,42 @@ def truth_solution(self, pts):
)

# create the PINN object
pinn = PINN(problem, model, TorchOptimizer(torch.optim.Adam, lr=0.005))
pinn = PINN(problem, model)

# create the trainer
trainer = Trainer(solver=pinn, max_epochs=1500, logger=TensorBoardLogger('tutorial_logs'),
accelerator='cpu',
train_size=1.0,
test_size=0.0,
val_size=0.0,
enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
trainer = Trainer(solver=pinn, max_epochs=1500, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)

# train
trainer.train()


# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightning` loggers. The final loss can be accessed by `trainer.logged_metrics`
# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightinig` loggers. The final loss can be accessed by `trainer.logged_metrics`

# In[8]:
# In[7]:


# inspecting final loss
trainer.logged_metrics


# By using `matplotlib` we can also do some qualitative plots of the solution.

# In[9]:


pts = pinn.problem.spatial_domain.sample(256, 'grid', variables='x')
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach()
true_output = pinn.problem.truth_solution(pts).cpu().detach()
pts = pts.cpu()
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
ax.plot(pts.extract(['x']), predicted_output, label='Neural Network solution')
ax.plot(pts.extract(['x']), true_output, label='True solution')
plt.legend()


# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also take a look at the loss using `TensorBoard`:

# In[ ]:


print('\nTo load TensorBoard run load_ext tensorboard on your terminal')
print("To visualize the loss you can run tensorboard --logdir 'tutorial_logs' on your terminal\n")
# By using the `Plotter` class from **PINA** we can also do some quatitative plots of the solution.

# In[8]:

# As we can see the loss has not reached a minimum, suggesting that we could train for longer! Alternatively, we can also take look at the loss using callbacks. Here we use `MetricTracker` from `pina.callback`:

# In[11]:
# plotting the solution
pl.plot(solver=pinn)


from pina.callback import MetricTracker
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also plot easily the loss:

#create the model
newmodel = FeedForward(
layers=[10, 10],
func=torch.nn.Tanh,
output_dimensions=len(problem.output_variables),
input_dimensions=len(problem.input_variables)
)
# In[9]:

# create the PINN object
newpinn = PINN(problem, newmodel, optimizer=TorchOptimizer(torch.optim.Adam, lr=0.005))

# create the trainer
newtrainer = Trainer(solver=newpinn, max_epochs=1500, logger=True, #enable parameter logging
callbacks=[MetricTracker()],
accelerator='cpu',
train_size=1.0,
test_size=0.0,
val_size=0.0,
enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
pl.plot_loss(trainer=trainer, label = 'mean_loss', logy=True)

# train
newtrainer.train()

#plot loss
trainer_metrics = newtrainer.callbacks[0].metrics
loss = trainer_metrics['train_loss']
epochs = range(len(loss))
plt.plot(epochs, loss.cpu())
# plotting
plt.xlabel('epoch')
plt.ylabel('loss')
plt.yscale('log')

# As we can see the loss has not reached a minimum, suggesting that we could train for longer

# ## What's next?
#
Expand Down
55 changes: 25 additions & 30 deletions tutorials/tutorial10/tutorial.ipynb

Large diffs are not rendered by default.

38 changes: 16 additions & 22 deletions tutorials/tutorial10/tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,14 @@

import torch
import matplotlib.pyplot as plt
import warnings

plt.style.use('tableau-colorblind10')
from scipy import io
from pina import Condition, LabelTensor
from pina.problem import AbstractProblem
from pina.model import AveragingNeuralOperator
from pina.solver import SupervisedSolver
from pina.solvers import SupervisedSolver
from pina.trainer import Trainer

warnings.filterwarnings('ignore')


# ## Data Generation
#
Expand Down Expand Up @@ -84,7 +81,7 @@


# load data
data=io.loadmat("data/Data_KS.mat")
data=io.loadmat("dat/Data_KS.mat")

# converting to label tensor
initial_cond_train = LabelTensor(torch.tensor(data['initial_cond_train'], dtype=torch.float), ['t','x','u0'])
Expand Down Expand Up @@ -206,33 +203,30 @@ def forward(self, x):
# We will now focus on solving the KS equation using the `SupervisedSolver` class
# and the `AveragingNeuralOperator` model. As done in the [FNO tutorial](https://github.com/mathLab/PINA/blob/master/tutorials/tutorial5/tutorial.ipynb) we now create the `NeuralOperatorProblem` class with `AbstractProblem`.

# In[5]:
# In[6]:


# expected running time ~ 1 minute

class NeuralOperatorProblem(AbstractProblem):
input_variables = initial_cond_train.labels
output_variables = sol_train.labels
conditions = {'data' : Condition(input=initial_cond_train,
target=sol_train)}
conditions = {'data' : Condition(input_points=initial_cond_train,
output_points=sol_train)}


# initialize problem
problem = NeuralOperatorProblem()
# initialize solver
solver = SupervisedSolver(problem=problem, model=model)
solver = SupervisedSolver(problem=problem, model=model,optimizer_kwargs={"lr":0.001})
# train, only CPU and avoid model summary at beginning of training (optional)
trainer = Trainer(solver=solver, max_epochs=40, accelerator='cpu', enable_model_summary=False, log_every_n_steps=-1, batch_size=5, # we train on CPU and avoid model summary at beginning of training (optional)
train_size=1.0,
val_size=0.0,
test_size=0.0)
trainer = Trainer(solver=solver, max_epochs=40, accelerator='cpu', enable_model_summary=False, log_every_n_steps=-1, batch_size=5) # we train on CPU and avoid model summary at beginning of training (optional)
trainer.train()


# We can now see some plots for the solutions

# In[6]:
# In[7]:


sample_number = 2
Expand All @@ -242,13 +236,13 @@ class NeuralOperatorProblem(AbstractProblem):
no_sol=no_sol[5])


# As we can see we can obtain nice result considering the small training time and the difficulty of the problem!
# Let's take a look at the training and testing error:
# As we can see we can obtain nice result considering the small trainint time and the difficulty of the problem!
# Let's see how the training and testing error:

# In[7]:
# In[8]:


from pina.loss import PowerLoss
from pina.loss.loss_interface import PowerLoss

error_metric = PowerLoss(p=2) # we use the MSE loss

Expand All @@ -261,14 +255,14 @@ class NeuralOperatorProblem(AbstractProblem):
print(f'Testing error: {float(err_test):.3f}')


# As we can see the error is pretty small, which agrees with what we can see from the previous plots.
# as we can see the error is pretty small, which agrees with what we can see from the previous plots.

# ## What's next?
#
# Now you know how to solve a time dependent neural operator problem in **PINA**! There are multiple directions you can go now:
#
# 1. Train the network for longer or with different layer sizes and assert the final accuracy
# 1. Train the network for longer or with different layer sizes and assert the finaly accuracy
#
# 2. We left a more challenging dataset [Data_KS2.mat](dat/Data_KS2.mat) where $A_k \in [-0.5, 0.5]$, $\ell_k \in [1, 2, 3]$, $\phi_k \in [0, 2\pi]$ for longer training
# 2. We left a more challenging dataset [Data_KS2.mat](dat/Data_KS2.mat) where $A_k \in [-0.5, 0.5]$, $\ell_k \in [1, 2, 3]$, $\phi_k \in [0, 2\pi]$ for loger training
#
# 3. Compare the performance between the different neural operators (you can even try to implement your favourite one!)
Loading
Loading