Skip to content

Commit 610ff1b

Browse files
MatteB03ndem0
authored andcommitted
Update tutorials 8 through 14
1 parent 385c1f0 commit 610ff1b

File tree

14 files changed

+264
-317
lines changed

14 files changed

+264
-317
lines changed
21.5 MB
Binary file not shown.
21.5 MB
Binary file not shown.

tutorials/tutorial10/tutorial.ipynb

Lines changed: 7 additions & 7 deletions
Large diffs are not rendered by default.

tutorials/tutorial10/tutorial.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
import torch
3434
import matplotlib.pyplot as plt
35-
plt.style.use('tableau-colorblind10')
35+
3636
from scipy import io
3737
from pina import Condition, LabelTensor
3838
from pina.problem import AbstractProblem
@@ -211,8 +211,8 @@ def forward(self, x):
211211
class NeuralOperatorProblem(AbstractProblem):
212212
input_variables = initial_cond_train.labels
213213
output_variables = sol_train.labels
214-
conditions = {'data' : Condition(input_points=initial_cond_train,
215-
output_points=sol_train)}
214+
conditions = {'data' : Condition(input=initial_cond_train,
215+
target=sol_train)}
216216

217217

218218
# initialize problem

tutorials/tutorial11/logging.png

204 KB
Loading

tutorials/tutorial13/tutorial.ipynb

Lines changed: 57 additions & 52 deletions
Large diffs are not rendered by default.

tutorials/tutorial13/tutorial.py

Lines changed: 60 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,21 @@
2626
get_ipython().system('pip install "pina-mathlab"')
2727

2828
import torch
29+
import matplotlib.pyplot as plt
30+
import warnings
2931

30-
from pina import Condition, Plotter, Trainer, Plotter
32+
from pina import Condition, Trainer
3133
from pina.problem import SpatialProblem
32-
from pina.operators import laplacian
33-
from pina.solvers import PINN, SAPINN
34-
from pina.model.layers import FourierFeatureEmbedding
34+
from pina.operator import laplacian
35+
from pina.solver import PINN, SelfAdaptivePINN as SAPINN
36+
from pina.model.block import FourierFeatureEmbedding
3537
from pina.loss import LpLoss
3638
from pina.domain import CartesianDomain
3739
from pina.equation import Equation, FixedValue
3840
from pina.model import FeedForward
3941

42+
warnings.filterwarnings('ignore')
43+
4044

4145
# ## Multiscale Problem
4246
#
@@ -74,10 +78,10 @@ def poisson_equation(input_, output_):
7478

7579
# here we write the problem conditions
7680
conditions = {
77-
'bound_cond0' : Condition(domain=CartesianDomain({'x': 0}),
78-
equation=FixedValue(0)),
79-
'bound_cond1' : Condition(domain=CartesianDomain({'x': 1}),
80-
equation=FixedValue(0)),
81+
'bound_cond0' : Condition(domain=CartesianDomain({'x': 0.}),
82+
equation=FixedValue(0.)),
83+
'bound_cond1' : Condition(domain=CartesianDomain({'x': 1.}),
84+
equation=FixedValue(0.)),
8185
'phys_cond': Condition(domain=spatial_domain,
8286
equation=Equation(poisson_equation)),
8387
}
@@ -88,44 +92,66 @@ def truth_solution(self, x):
8892
problem = Poisson()
8993

9094
# let's discretise the domain
91-
problem.discretise_domain(128, 'grid')
95+
problem.discretise_domain(128, 'grid', domains=['phys_cond'])
96+
problem.discretise_domain(1, 'grid', domains=['bound_cond0','bound_cond1'])
9297

9398

9499
# A standard PINN approach would be to fit this model using a Feed Forward (fully connected) Neural Network. For a conventional fully-connected neural network is easy to
95100
# approximate a function $u$, given sufficient data inside the computational domain. However solving high-frequency or multi-scale problems presents great challenges to PINNs especially when the number of data cannot capture the different scales.
96101
#
97102
# Below we run a simulation using the `PINN` solver and the self adaptive `SAPINN` solver, using a [`FeedForward`](https://mathlab.github.io/PINA/_modules/pina/model/feed_forward.html#FeedForward) model. We used a `MultiStepLR` scheduler to decrease the learning rate slowly during training (it takes around 2 minutes to run on CPU).
98103

99-
# In[19]:
104+
# In[3]:
105+
100106

107+
from pina.optim import TorchScheduler
101108

102109
# training with PINN and visualize results
110+
model=FeedForward(input_dimensions=1, output_dimensions=1, layers=[100, 100, 100])
103111
pinn = PINN(problem=problem,
104-
model=FeedForward(input_dimensions=1, output_dimensions=1, layers=[100, 100, 100]),
105-
scheduler=torch.optim.lr_scheduler.MultiStepLR,
106-
scheduler_kwargs={'milestones' : [1000, 2000, 3000, 4000], 'gamma':0.9})
107-
trainer = Trainer(pinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False)
112+
model=model,
113+
scheduler=TorchScheduler(torch.optim.lr_scheduler.MultiStepLR, # Pass the class directly, not an instance
114+
milestones=[1000,2000,3000,4000],
115+
gamma=0.9))
116+
117+
trainer = Trainer(pinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False, val_size=0., train_size=1., test_size=0.)
108118
trainer.train()
109119

110120
# training with PINN and visualize results
111121
sapinn = SAPINN(problem=problem,
112-
model=FeedForward(input_dimensions=1, output_dimensions=1, layers=[100, 100, 100]),
113-
scheduler_model=torch.optim.lr_scheduler.MultiStepLR,
114-
scheduler_model_kwargs={'milestones' : [1000, 2000, 3000, 4000], 'gamma':0.9})
115-
trainer_sapinn = Trainer(sapinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False)
122+
model=model,
123+
scheduler_model=TorchScheduler(torch.optim.lr_scheduler.MultiStepLR,
124+
milestones=[1000,2000,3000,4000],
125+
gamma=0.9))
126+
trainer_sapinn = Trainer(sapinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False, val_size=0., train_size=1., test_size=0.)
116127
trainer_sapinn.train()
117128

118-
# plot results
119-
pl = Plotter()
120-
pl.plot(pinn, title='PINN Solution')
121-
pl.plot(sapinn, title='Self Adaptive PINN Solution')
129+
130+
# In[4]:
131+
132+
133+
#define the function to plot the solution obtained using matplotlib
134+
def plot_solution(pinn_to_use, title):
135+
pts = pinn_to_use.problem.spatial_domain.sample(256, 'grid', variables='x')
136+
predicted_output = pinn_to_use.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach()
137+
true_output = pinn_to_use.problem.truth_solution(pts).cpu().detach()
138+
pts = pts.cpu()
139+
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
140+
ax.plot(pts.extract(['x']), predicted_output, label='Neural Network solution')
141+
ax.plot(pts.extract(['x']), true_output, label='True solution')
142+
plt.title(title)
143+
plt.legend()
144+
145+
#plot the solution of the two PINNs
146+
plot_solution(pinn, 'PINN solution')
147+
plot_solution(sapinn, 'Self Adaptive PINN solution')
122148

123149

124150
# We can clearly see that the solution has not been learned by the two different solvers. Indeed the big problem is not in the optimization strategy (i.e. the solver), but in the model used to solve the problem. A simple `FeedForward` network can hardly handle multiscales if not enough collocation points are used!
125151
#
126152
# We can also compute the $l_2$ relative error for the `PINN` and `SAPINN` solutions:
127153

128-
# In[20]:
154+
# In[5]:
129155

130156

131157
# l2 loss from PINA losses
@@ -153,7 +179,7 @@ def truth_solution(self, x):
153179
# In PINA we already have implemented the feature as a `layer` called [`FourierFeatureEmbedding`](https://mathlab.github.io/PINA/_rst/layers/fourier_embedding.html). Below we will build the *Multi-scale Fourier Feature Architecture*. In this architecture multiple Fourier feature embeddings (initialized with different $\sigma$)
154180
# are applied to input coordinates and then passed through the same fully-connected neural network, before the outputs are finally concatenated with a linear layer.
155181

156-
# In[21]:
182+
# In[6]:
157183

158184

159185
class MultiscaleFourierNet(torch.nn.Module):
@@ -173,36 +199,35 @@ def forward(self, x):
173199
e2 = self.layers(self.embedding2(x))
174200
return self.final_layer(torch.cat([e1, e2], dim=-1))
175201

176-
MultiscaleFourierNet()
177-
178202

179203
# We will train the `MultiscaleFourierNet` with the `PINN` solver (and feel free to try also with our PINN variants (`SAPINN`, `GPINN`, `CompetitivePINN`, ...).
180204

181-
# In[22]:
205+
# In[7]:
182206

183207

184208
multiscale_pinn = PINN(problem=problem,
185209
model=MultiscaleFourierNet(),
186-
scheduler=torch.optim.lr_scheduler.MultiStepLR,
187-
scheduler_kwargs={'milestones' : [1000, 2000, 3000, 4000], 'gamma':0.9})
188-
trainer = Trainer(multiscale_pinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
210+
scheduler=TorchScheduler(torch.optim.lr_scheduler.MultiStepLR,
211+
milestones=[1000,2000,3000,4000],
212+
gamma=0.9))
213+
trainer = Trainer(multiscale_pinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False, val_size=0., train_size=1., test_size=0.) # we train on CPU and avoid model summary at beginning of training (optional)
189214
trainer.train()
190215

191216

192217
# Let us now plot the solution and compute the relative $l_2$ again!
193218

194-
# In[24]:
219+
# In[8]:
195220

196221

197-
# plot the solution
198-
pl.plot(multiscale_pinn, title='Solution PINN with MultiscaleFourierNet')
222+
#plot solution obtained
223+
plot_solution(multiscale_pinn, 'Multiscale PINN solution')
199224

200225
# sample new test points
201226
pts = pts = problem.spatial_domain.sample(100, 'grid')
202-
print(f'Relative l2 error PINN with MultiscaleFourierNet {l2_loss(multiscale_pinn(pts), problem.truth_solution(pts)).item():.2%}')
227+
print(f'Relative l2 error PINN with MultiscaleFourierNet: {l2_loss(multiscale_pinn(pts), problem.truth_solution(pts)).item():.2%}')
203228

204229

205-
# It is pretty clear that the network has learned the correct solution, with also a very law error. Obviously a longer training and a more expressive neural network could improve the results!
230+
# It is pretty clear that the network has learned the correct solution, with also a very low error. Obviously a longer training and a more expressive neural network could improve the results!
206231
#
207232
# ## What's next?
208233
#

tutorials/tutorial14/tutorial.ipynb

Lines changed: 29 additions & 113 deletions
Large diffs are not rendered by default.

tutorials/tutorial14/tutorial.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,15 @@
2626
get_ipython().run_line_magic('matplotlib', 'inline')
2727

2828
import matplotlib.pyplot as plt
29-
plt.style.use('tableau-colorblind10')
3029
import torch
3130
import pina
31+
import warnings
32+
3233
from pina.model.layers import PODBlock, RBFBlock
3334
from pina import LabelTensor
3435

36+
warnings.filterwarnings('ignore')
37+
3538

3639
# In this tutorial we're gonna use the `LidCavity` class from the [Smithers](https://github.com/mathLab/Smithers) library, which contains a set of parametric solutions of the Lid-driven cavity problem in a square domain. The dataset consists of 300 snapshots of the parameter fields, which in this case are the magnitude of velocity and the pressure, and the corresponding parameter values $u$ and $p$. Each snapshot corresponds to a different value of the tangential velocity $\mu$ of the lid, which has been sampled uniformly between 0.01 m/s and 1 m/s.
3740
#
@@ -40,7 +43,6 @@
4043
# In[2]:
4144

4245

43-
get_ipython().system('pip install git+https://github.com/mathLab/Smithers.git #if required --break-system-packages')
4446
import smithers
4547
from smithers.dataset import LidCavity
4648
dataset = LidCavity()
@@ -165,7 +167,7 @@ def fit(self, p, x):
165167

166168
# Finally we can calculate the relative error for our model:
167169

168-
# In[ ]:
170+
# In[9]:
169171

170172

171173
relative_u_error_train = torch.norm(u_train_rbf - u_train)/torch.norm(u_train)
@@ -178,7 +180,7 @@ def fit(self, p, x):
178180

179181
# The results are promising! Now let's visualise them, comparing four random predicted snapshots to the true ones:
180182

181-
# In[ ]:
183+
# In[10]:
182184

183185

184186
import numpy as np
@@ -212,7 +214,7 @@ def fit(self, p, x):
212214

213215
# Overall we have reached a good level of approximation while avoiding time-consuming training procedures. Let's try doing the same to predict the pressure snapshots:
214216

215-
# In[ ]:
217+
# In[11]:
216218

217219

218220
'''create the model'''
@@ -235,7 +237,7 @@ def fit(self, p, x):
235237

236238
# Unfortunately here we obtain a very high relative test error, although this is likely due to the nature of the available data. Looking at the plots we can see that the pressure field is subject to high variations between subsequent snapshots, especially here:
237239

238-
# In[ ]:
240+
# In[12]:
239241

240242

241243
fig, axs = plt.subplots(2, 3, figsize=(14, 6))
@@ -250,7 +252,7 @@ def fit(self, p, x):
250252

251253
# Or here:
252254

253-
# In[ ]:
255+
# In[13]:
254256

255257

256258
fig, axs = plt.subplots(2, 3, figsize=(14, 6))
@@ -264,7 +266,7 @@ def fit(self, p, x):
264266

265267
# Scrolling through the velocity snapshots we can observe a more regular behaviour, with no such variations in subsequent snapshots. Moreover, if we decide not to consider the abovementioned "problematic" snapshots, we can already observe a huge improvement:
266268

267-
# In[ ]:
269+
# In[14]:
268270

269271

270272
'''excluding problematic snapshots'''

tutorials/tutorial2/tutorial.ipynb

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
},
1717
{
1818
"cell_type": "code",
19-
"execution_count": 4,
19+
"execution_count": null,
2020
"id": "ad0b8dd7",
2121
"metadata": {},
2222
"outputs": [],
@@ -43,7 +43,6 @@
4343
"from pina.domain import CartesianDomain\n",
4444
"from pina.equation import Equation, FixedValue\n",
4545
"from pina import Condition, LabelTensor\n",
46-
"from pina.callback import MetricTracker\n",
4746
"\n",
4847
"from lightning.pytorch.loggers import TensorBoardLogger\n",
4948
"\n",

0 commit comments

Comments
 (0)