Skip to content

Commit a765a9f

Browse files
MatteB03ndem0
authored andcommitted
Add plot in tutorials 1,3,4,9
1 parent e75a8ec commit a765a9f

File tree

8 files changed

+704
-217
lines changed

8 files changed

+704
-217
lines changed

tutorials/tutorial1/tutorial.ipynb

Lines changed: 122 additions & 83 deletions
Large diffs are not rendered by default.

tutorials/tutorial1/tutorial.py

Lines changed: 38 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
8989
#
9090
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
9191

92-
# In[ ]:
92+
# In[2]:
9393

9494

9595
from pina.problem import SpatialProblem
@@ -167,7 +167,7 @@ def truth_solution(self, pts):
167167

168168

169169
# sampling for training
170-
problem.discretise_domain(20, 'random', domains=['x0']) # TODO check
170+
problem.discretise_domain(1, 'random', domains=['x0']) # TODO check
171171
problem.discretise_domain(20, 'lh', domains=['D'])
172172

173173

@@ -180,28 +180,32 @@ def truth_solution(self, pts):
180180
print('Input points labels:', problem.discretised_domains['D'].labels)
181181

182182

183-
# To visualize the sampled points we can use the `.plot_samples` method of the `Plotter` class
183+
# To visualize the sampled points we can use `matplotlib.pyplot`:
184184

185185
# In[6]:
186186

187187

188-
#from pina import Plotter
189-
190-
#pl = Plotter()
191-
#pl.plot_samples(problem=problem)
188+
import matplotlib.pyplot as plt
189+
variables=problem.spatial_variables
190+
fig = plt.figure()
191+
proj = "3d" if len(variables) == 3 else None
192+
ax = fig.add_subplot(projection=proj)
193+
for location in problem.input_pts:
194+
coords = problem.input_pts[location].extract(variables).T.detach()
195+
ax.plot(coords.flatten(),torch.zeros(coords.flatten().shape),".",label=location)
192196

193197

194198
# ## Perform a small training
195199

196-
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solvers`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightining` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.
200+
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solver`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightning` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.
197201

198202
# In[7]:
199203

200204

201205
from pina import Trainer
202-
from pina.solvers import PINN
206+
from pina.solver import PINN
203207
from pina.model import FeedForward
204-
from pina.callbacks import MetricTracker
208+
from pina.callback import MetricTracker
205209

206210

207211
# build the model
@@ -229,23 +233,43 @@ def truth_solution(self, pts):
229233

230234
# inspecting final loss
231235
trainer.logged_metrics
236+
print(type(problem.truth_solution))
232237

233238

234-
# By using the `Plotter` class from **PINA** we can also do some quatitative plots of the solution.
239+
# By using `matplotlib` we can also do some qualitative plots of the solution.
235240

236241
# In[9]:
237242

238243

239-
# plotting the solution
240-
#pl.plot(solver=pinn)
244+
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables='x')
245+
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach()
246+
true_output = pinn.problem.truth_solution(pts).cpu().detach()
247+
pts = pts.cpu()
248+
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
249+
ax.plot(pts.extract(['x']), predicted_output, label='Neural Network solution')
250+
ax.plot(pts.extract(['x']), true_output, label='True solution')
251+
plt.legend()
241252

242253

243254
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also plot easily the loss:
244255

245256
# In[10]:
246257

247258

248-
#pl.plot_loss(trainer=trainer, label = 'mean_loss', logy=True)
259+
list_ = [
260+
idx for idx, s in enumerate(trainer.callbacks)
261+
if isinstance(s, MetricTracker)
262+
]
263+
print(list_[0])
264+
trainer_metrics = trainer.callbacks[list_[0]].metrics
265+
266+
loss = trainer_metrics['val_loss']
267+
epochs = range(len(loss))
268+
plt.plot(epochs, loss.cpu())
269+
# plotting
270+
plt.xlabel('epoch')
271+
plt.ylabel('loss')
272+
plt.yscale('log')
249273

250274

251275
# As we can see the loss has not reached a minimum, suggesting that we could train for longer

tutorials/tutorial3/tutorial.ipynb

Lines changed: 253 additions & 27 deletions
Large diffs are not rendered by default.

tutorials/tutorial3/tutorial.py

Lines changed: 152 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
#
1010
# First of all, some useful imports.
1111

12-
# In[1]:
12+
# In[12]:
1313

1414

1515
## routine needed to run the notebook on Google Colab
@@ -23,14 +23,15 @@
2323

2424
import torch
2525

26+
import matplotlib.pylab as plt
2627
from pina.problem import SpatialProblem, TimeDependentProblem
2728
from pina.operator import laplacian, grad
2829
from pina.domain import CartesianDomain
2930
from pina.solver import PINN
3031
from pina.trainer import Trainer
3132
from pina.equation import Equation
3233
from pina.equation.equation_factory import FixedValue
33-
from pina import Condition
34+
from pina import Condition, LabelTensor
3435

3536

3637
# ## The problem definition
@@ -49,7 +50,7 @@
4950

5051
# Now, the wave problem is written in PINA code as a class, inheriting from `SpatialProblem` and `TimeDependentProblem` since we deal with spatial, and time dependent variables. The equations are written as `conditions` that should be satisfied in the corresponding domains. `truth_solution` is the exact solution which will be compared with the predicted one.
5152

52-
# In[2]:
53+
# In[13]:
5354

5455

5556
class Wave(TimeDependentProblem, SpatialProblem):
@@ -95,7 +96,7 @@ def wave_sol(self, pts):
9596
#
9697
# where $NN$ is the neural net output. This neural network takes as input the coordinates (in this case $x$, $y$ and $t$) and provides the unknown field $u$. By construction, it is zero on the boundaries. The residuals of the equations are evaluated at several sampling points (which the user can manipulate using the method `discretise_domain`) and the loss minimized by the neural network is the sum of the residuals.
9798

98-
# In[3]:
99+
# In[14]:
99100

100101

101102
class HardMLP(torch.nn.Module):
@@ -119,7 +120,7 @@ def forward(self, x):
119120

120121
# In this tutorial, the neural network is trained for 1000 epochs with a learning rate of 0.001 (default in `PINN`). Training takes approximately 3 minutes.
121122

122-
# In[4]:
123+
# In[15]:
123124

124125

125126
# generate the data
@@ -135,22 +136,88 @@ def forward(self, x):
135136

136137
# Notice that the loss on the boundaries of the spatial domain is exactly zero, as expected! After the training is completed one can now plot some results using the `Plotter` class of **PINA**.
137138

138-
# In[5]:
139+
# In[16]:
139140

140141

141142
#plotter = Plotter()
142143

143144
# plotting at fixed time t = 0.0
144-
#print('Plotting at t=0')
145+
print('Plotting at t=0')
145146
#plotter.plot(pinn, fixed_variables={'t': 0.0})
146-
147+
fixed_variables={'t': 0.0}
148+
method='contourf'
149+
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
150+
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
151+
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
152+
fixed_pts *= torch.tensor(list(fixed_variables.values()))
153+
fixed_pts = fixed_pts.as_subclass(LabelTensor)
154+
fixed_pts.labels = list(fixed_variables.keys())
155+
pts = pts.append(fixed_pts)
156+
pts = pts.to(device=pinn.device)
157+
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
158+
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
159+
pts = pts.cpu()
160+
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
161+
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
162+
cb = getattr(ax[0], method)(*grids, predicted_output)
163+
fig.colorbar(cb, ax=ax[0])
164+
ax[0].title.set_text('Neural Network prediction')
165+
cb = getattr(ax[1], method)(*grids, true_output)
166+
fig.colorbar(cb, ax=ax[1])
167+
ax[1].title.set_text('True solution')
168+
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
169+
fig.colorbar(cb, ax=ax[2])
170+
ax[2].title.set_text('Residual')
147171
# plotting at fixed time t = 0.5
148-
#print('Plotting at t=0.5')
172+
print('Plotting at t=0.5')
149173
#plotter.plot(pinn, fixed_variables={'t': 0.5})
150-
174+
fixed_variables={'t': 0.5}
175+
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
176+
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
177+
fixed_pts *= torch.tensor(list(fixed_variables.values()))
178+
fixed_pts = fixed_pts.as_subclass(LabelTensor)
179+
fixed_pts.labels = list(fixed_variables.keys())
180+
pts = pts.append(fixed_pts)
181+
pts = pts.to(device=pinn.device)
182+
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
183+
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
184+
pts = pts.cpu()
185+
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
186+
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
187+
cb = getattr(ax[0], method)(*grids, predicted_output)
188+
fig.colorbar(cb, ax=ax[0])
189+
ax[0].title.set_text('Neural Network prediction')
190+
cb = getattr(ax[1], method)(*grids, true_output)
191+
fig.colorbar(cb, ax=ax[1])
192+
ax[1].title.set_text('True solution')
193+
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
194+
fig.colorbar(cb, ax=ax[2])
195+
ax[2].title.set_text('Residual')
151196
# plotting at fixed time t = 1.
152-
#print('Plotting at t=1')
197+
print('Plotting at t=1')
153198
#plotter.plot(pinn, fixed_variables={'t': 1.0})
199+
fixed_variables={'t': 1.0}
200+
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
201+
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
202+
fixed_pts *= torch.tensor(list(fixed_variables.values()))
203+
fixed_pts = fixed_pts.as_subclass(LabelTensor)
204+
fixed_pts.labels = list(fixed_variables.keys())
205+
pts = pts.append(fixed_pts)
206+
pts = pts.to(device=pinn.device)
207+
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
208+
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
209+
pts = pts.cpu()
210+
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
211+
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
212+
cb = getattr(ax[0], method)(*grids, predicted_output)
213+
fig.colorbar(cb, ax=ax[0])
214+
ax[0].title.set_text('Neural Network prediction')
215+
cb = getattr(ax[1], method)(*grids, true_output)
216+
fig.colorbar(cb, ax=ax[1])
217+
ax[1].title.set_text('True solution')
218+
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
219+
fig.colorbar(cb, ax=ax[2])
220+
ax[2].title.set_text('Residual')
154221

155222

156223
# The results are not so great, and we can clearly see that as time progress the solution gets worse.... Can we do better?
@@ -161,7 +228,7 @@ def forward(self, x):
161228
#
162229
# Let us build the network first
163230

164-
# In[6]:
231+
# In[17]:
165232

166233

167234
class HardMLPtime(torch.nn.Module):
@@ -184,7 +251,7 @@ def forward(self, x):
184251

185252
# Now let's train with the same configuration as thre previous test
186253

187-
# In[7]:
254+
# In[18]:
188255

189256

190257
# generate the data
@@ -200,22 +267,88 @@ def forward(self, x):
200267

201268
# We can clearly see that the loss is way lower now. Let's plot the results
202269

203-
# In[8]:
270+
# In[19]:
204271

205272

206273
#plotter = Plotter()
207274

208275
# plotting at fixed time t = 0.0
209-
#print('Plotting at t=0')
276+
print('Plotting at t=0')
210277
#plotter.plot(pinn, fixed_variables={'t': 0.0})
211-
278+
fixed_variables={'t': 0.0}
279+
method='contourf'
280+
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
281+
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
282+
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
283+
fixed_pts *= torch.tensor(list(fixed_variables.values()))
284+
fixed_pts = fixed_pts.as_subclass(LabelTensor)
285+
fixed_pts.labels = list(fixed_variables.keys())
286+
pts = pts.append(fixed_pts)
287+
pts = pts.to(device=pinn.device)
288+
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
289+
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
290+
pts = pts.cpu()
291+
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
292+
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
293+
cb = getattr(ax[0], method)(*grids, predicted_output)
294+
fig.colorbar(cb, ax=ax[0])
295+
ax[0].title.set_text('Neural Network prediction')
296+
cb = getattr(ax[1], method)(*grids, true_output)
297+
fig.colorbar(cb, ax=ax[1])
298+
ax[1].title.set_text('True solution')
299+
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
300+
fig.colorbar(cb, ax=ax[2])
301+
ax[2].title.set_text('Residual')
212302
# plotting at fixed time t = 0.5
213-
#print('Plotting at t=0.5')
303+
print('Plotting at t=0.5')
214304
#plotter.plot(pinn, fixed_variables={'t': 0.5})
215-
305+
fixed_variables={'t': 0.5}
306+
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
307+
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
308+
fixed_pts *= torch.tensor(list(fixed_variables.values()))
309+
fixed_pts = fixed_pts.as_subclass(LabelTensor)
310+
fixed_pts.labels = list(fixed_variables.keys())
311+
pts = pts.append(fixed_pts)
312+
pts = pts.to(device=pinn.device)
313+
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
314+
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
315+
pts = pts.cpu()
316+
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
317+
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
318+
cb = getattr(ax[0], method)(*grids, predicted_output)
319+
fig.colorbar(cb, ax=ax[0])
320+
ax[0].title.set_text('Neural Network prediction')
321+
cb = getattr(ax[1], method)(*grids, true_output)
322+
fig.colorbar(cb, ax=ax[1])
323+
ax[1].title.set_text('True solution')
324+
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
325+
fig.colorbar(cb, ax=ax[2])
326+
ax[2].title.set_text('Residual')
216327
# plotting at fixed time t = 1.
217-
#print('Plotting at t=1')
328+
print('Plotting at t=1')
218329
#plotter.plot(pinn, fixed_variables={'t': 1.0})
330+
fixed_variables={'t': 1.0}
331+
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
332+
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
333+
fixed_pts *= torch.tensor(list(fixed_variables.values()))
334+
fixed_pts = fixed_pts.as_subclass(LabelTensor)
335+
fixed_pts.labels = list(fixed_variables.keys())
336+
pts = pts.append(fixed_pts)
337+
pts = pts.to(device=pinn.device)
338+
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
339+
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
340+
pts = pts.cpu()
341+
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
342+
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
343+
cb = getattr(ax[0], method)(*grids, predicted_output)
344+
fig.colorbar(cb, ax=ax[0])
345+
ax[0].title.set_text('Neural Network prediction')
346+
cb = getattr(ax[1], method)(*grids, true_output)
347+
fig.colorbar(cb, ax=ax[1])
348+
ax[1].title.set_text('True solution')
349+
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
350+
fig.colorbar(cb, ax=ax[2])
351+
ax[2].title.set_text('Residual')
219352

220353

221354
# We can see now that the results are way better! This is due to the fact that previously the network was not learning correctly the initial conditon, leading to a poor solution when time evolved. By imposing the initial condition the network is able to correctly solve the problem.

tutorials/tutorial4/tutorial.ipynb

Lines changed: 66 additions & 34 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)