Skip to content

Commit 3d07a44

Browse files
Github action: auto-update.
1 parent ed928ac commit 3d07a44

File tree

88 files changed

+2390
-1362
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

88 files changed

+2390
-1362
lines changed
Binary file not shown.

dev/_downloads/082e73328a5caf8c1fe9ad7fe05cf68f/plot_incremental_FNO_darcy.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
"cell_type": "markdown",
55
"metadata": {},
66
"source": [
7-
"\n# Training a neural operator on Darcy-Flow - Author Robert Joseph George\nIn this example, we demonstrate how to use the small Darcy-Flow example we ship with the package on Incremental FNO and Incremental Resolution\n"
7+
"\n# Training an FNO with incremental meta-learning\nIn this example, we demonstrate how to use the small Darcy-Flow \nexample we ship with the package to demonstrate the Incremental FNO\nmeta-learning algorithm\n"
88
]
99
},
1010
{
Binary file not shown.

dev/_downloads/1a3050d57a180b92b424ce128dfe1d36/plot_FNO_darcy.py

Lines changed: 88 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,21 @@
11
"""
2-
Training a TFNO on Darcy-Flow
2+
Training an FNO on Darcy-Flow
33
=============================
44
5-
In this example, we demonstrate how to use the small Darcy-Flow example we ship with the package
6-
to train a Tensorized Fourier-Neural Operator
5+
In this example, we demonstrate how to use the small `Darcy-Flow example <../auto_examples/plot_darcy_flow.html>`_ we ship with the package
6+
to train a Fourier Neural Operator.
7+
8+
Note that this dataset is much smaller than one we would use in practice. The small Darcy-flow is an example built to
9+
be trained on a CPU in a few seconds, whereas normally we would train on one or multiple GPUs.
710
"""
811

912
# %%
1013
#
1114

12-
1315
import torch
1416
import matplotlib.pyplot as plt
1517
import sys
16-
from neuralop.models import TFNO
18+
from neuralop.models import FNO
1719
from neuralop import Trainer
1820
from neuralop.training import AdamW
1921
from neuralop.data.datasets import load_darcy_flow_small
@@ -24,7 +26,7 @@
2426

2527

2628
# %%
27-
# Loading the Navier-Stokes dataset in 128x128 resolution
29+
# Let's load the small Darcy-flow dataset.
2830
train_loader, test_loaders, data_processor = load_darcy_flow_small(
2931
n_train=1000, batch_size=32,
3032
test_resolutions=[16, 32], n_tests=[100, 50],
@@ -34,15 +36,22 @@
3436

3537

3638
# %%
37-
# We create a tensorized FNO model
39+
# We create a simple FNO model
3840

39-
model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)
41+
model = FNO(n_modes=(16, 16),
42+
in_channels=1,
43+
out_channels=1,
44+
hidden_channels=32,
45+
projection_channel_ratio=2)
4046
model = model.to(device)
4147

4248
n_params = count_model_params(model)
4349
print(f'\nOur model has {n_params} parameters.')
4450
sys.stdout.flush()
4551

52+
# %%
53+
# Training setup
54+
# ----------------
4655

4756
# %%
4857
#Create the optimizer
@@ -53,7 +62,7 @@
5362

5463

5564
# %%
56-
# Creating the losses
65+
# Then create the losses
5766
l2loss = LpLoss(d=2, p=2)
5867
h1loss = H1Loss(d=2)
5968

@@ -62,7 +71,8 @@
6271

6372

6473
# %%
65-
74+
# Training the model
75+
# ---------------------
6676

6777
print('\n### MODEL ###\n', model)
6878
print('\n### OPTIMIZER ###\n', optimizer)
@@ -74,7 +84,7 @@
7484

7585

7686
# %%
77-
# Create the trainer
87+
# Create the trainer:
7888
trainer = Trainer(model=model, n_epochs=20,
7989
device=device,
8090
data_processor=data_processor,
@@ -85,7 +95,7 @@
8595

8696

8797
# %%
88-
# Actually train the model on our small Darcy-Flow dataset
98+
# Then train the model on our small Darcy-Flow dataset:
8999

90100
trainer.train(train_loader=train_loader,
91101
test_loaders=test_loaders,
@@ -95,18 +105,61 @@
95105
training_loss=train_loss,
96106
eval_losses=eval_losses)
97107

108+
# %%
109+
# .. plot_preds :
110+
# Visualizing predictions
111+
# ------------------------
112+
# Let's take a look at what our model's predicted outputs look like.
113+
# Again note that in this example, we train on a very small resolution for
114+
# a very small number of epochs.
115+
# In practice, we would train at a larger resolution, on many more samples.
116+
117+
test_samples = test_loaders[16].dataset
118+
119+
fig = plt.figure(figsize=(7, 7))
120+
for index in range(3):
121+
data = test_samples[index]
122+
data = data_processor.preprocess(data, batched=False)
123+
# Input x
124+
x = data['x']
125+
# Ground-truth
126+
y = data['y']
127+
# Model prediction
128+
out = model(x.unsqueeze(0))
129+
130+
ax = fig.add_subplot(3, 3, index*3 + 1)
131+
ax.imshow(x[0], cmap='gray')
132+
if index == 0:
133+
ax.set_title('Input x')
134+
plt.xticks([], [])
135+
plt.yticks([], [])
136+
137+
ax = fig.add_subplot(3, 3, index*3 + 2)
138+
ax.imshow(y.squeeze())
139+
if index == 0:
140+
ax.set_title('Ground-truth y')
141+
plt.xticks([], [])
142+
plt.yticks([], [])
143+
144+
ax = fig.add_subplot(3, 3, index*3 + 3)
145+
ax.imshow(out.squeeze().detach().numpy())
146+
if index == 0:
147+
ax.set_title('Model prediction')
148+
plt.xticks([], [])
149+
plt.yticks([], [])
150+
151+
fig.suptitle('Inputs, ground-truth output and prediction (16x16).', y=0.98)
152+
plt.tight_layout()
153+
fig.show()
154+
98155

99156
# %%
100-
# Plot the prediction, and compare with the ground-truth
101-
# Note that we trained on a very small resolution for
102-
# a very small number of epochs
103-
# In practice, we would train at larger resolution, on many more samples.
104-
#
105-
# However, for practicity, we created a minimal example that
106-
# i) fits in just a few Mb of memory
107-
# ii) can be trained quickly on CPU
108-
#
109-
# In practice we would train a Neural Operator on one or multiple GPUs
157+
# .. zero_shot :
158+
# Zero-shot super-evaluation
159+
# ---------------------------
160+
# In addition to training and making predictions on the same input size,
161+
# the FNO's invariance to the discretization of input data means we
162+
# can natively make predictions on higher-resolution inputs and get higher-resolution outputs.
110163

111164
test_samples = test_loaders[32].dataset
112165

@@ -142,6 +195,18 @@
142195
plt.xticks([], [])
143196
plt.yticks([], [])
144197

145-
fig.suptitle('Inputs, ground-truth output and prediction.', y=0.98)
198+
fig.suptitle('Inputs, ground-truth output and prediction (32x32).', y=0.98)
146199
plt.tight_layout()
147200
fig.show()
201+
202+
# %%
203+
# We only trained the model on data at a resolution of 16x16, and with no modifications
204+
# or special prompting, we were able to perform inference on higher-resolution input data
205+
# and get higher-resolution predictions! In practice, we often want to evaluate neural operators
206+
# at multiple resolutions to track a model's zero-shot super-evaluation performance throughout
207+
# training. That's why many of our datasets, including the small Darcy-flow we showcased,
208+
# are parameterized with a list of `test_resolutions` to choose from.
209+
#
210+
# However, as you can see, these predictions are noisier than we would expect for a model evaluated
211+
# at the same resolution at which it was trained. Leveraging the FNO's discretization-invariance, there
212+
# are other ways to scale the outputs of the FNO to train a true super-resolution capability.
Binary file not shown.

dev/_downloads/2a3ecbdce9fd535c53d44cc373f6a228/checkpoint_FNO_darcy.py

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,16 @@
11
"""
2-
Training a TFNO on Darcy-Flow
3-
=============================
2+
Checkpointing and loading training states
3+
=========================================
44
5-
In this example, we demonstrate how to use the small Darcy-Flow example we ship with the package
6-
to train a Tensorized Fourier-Neural Operator
5+
In this example, we demonstrate the Trainer's saving and loading functionality, which makes it easy to checkpoint and resume training states.
76
"""
87

98
# %%
109
#
1110
import torch
1211
import matplotlib.pyplot as plt
1312
import sys
14-
from neuralop.models import TFNO
13+
from neuralop.models import FNO
1514
from neuralop import Trainer
1615
from neuralop.training import AdamW
1716
from neuralop.data.datasets import load_darcy_flow_small
@@ -31,9 +30,16 @@
3130

3231

3332
# %%
34-
# We create a tensorized FNO model
33+
# We create an FNO model
34+
35+
model = FNO(n_modes=(16, 16),
36+
in_channels=1,
37+
out_channels=1,
38+
hidden_channels=32,
39+
projection_channel_ratio=2,
40+
factorization='tucker',
41+
rank=0.42)
3542

36-
model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)
3743
model = model.to(device)
3844

3945
n_params = count_model_params(model)
@@ -94,6 +100,7 @@
94100
save_dir="./checkpoints")
95101

96102

103+
# .. resume_from_dir:
97104
# resume training from saved checkpoint at epoch 10
98105

99106
trainer = Trainer(model=model, n_epochs=20,
Binary file not shown.
Binary file not shown.

dev/_downloads/52640fe09fbb5b08e5a2370e57b3b066/checkpoint_FNO_darcy.ipynb

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
"cell_type": "markdown",
55
"metadata": {},
66
"source": [
7-
"\n# Training a TFNO on Darcy-Flow\n\nIn this example, we demonstrate how to use the small Darcy-Flow example we ship with the package\nto train a Tensorized Fourier-Neural Operator\n"
7+
"\n# Checkpointing and loading training states\n\nIn this example, we demonstrate the Trainer's saving and loading functionality, which makes it easy to checkpoint and resume training states. \n"
88
]
99
},
1010
{
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import TFNO\nfrom neuralop import Trainer\nfrom neuralop.training import AdamW\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'"
18+
"import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import FNO\nfrom neuralop import Trainer\nfrom neuralop.training import AdamW\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'"
1919
]
2020
},
2121
{
@@ -40,7 +40,7 @@
4040
"cell_type": "markdown",
4141
"metadata": {},
4242
"source": [
43-
"We create a tensorized FNO model\n\n"
43+
"We create an FNO model\n\n"
4444
]
4545
},
4646
{
@@ -51,7 +51,7 @@
5151
},
5252
"outputs": [],
5353
"source": [
54-
"model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
54+
"model = FNO(n_modes=(16, 16),\n in_channels=1, \n out_channels=1, \n hidden_channels=32, \n projection_channel_ratio=2, \n factorization='tucker', \n rank=0.42)\n\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
5555
]
5656
},
5757
{
@@ -134,7 +134,7 @@
134134
},
135135
"outputs": [],
136136
"source": [
137-
"trainer.train(train_loader=train_loader,\n test_loaders={},\n optimizer=optimizer,\n scheduler=scheduler, \n regularizer=False, \n training_loss=train_loss, \n save_every=1,\n save_dir=\"./checkpoints\")\n\n\n# resume training from saved checkpoint at epoch 10\n\ntrainer = Trainer(model=model, n_epochs=20,\n device=device,\n data_processor=data_processor,\n wandb_log=False,\n eval_interval=3,\n use_distributed=False,\n verbose=True)\n\ntrainer.train(train_loader=train_loader,\n test_loaders={},\n optimizer=optimizer,\n scheduler=scheduler, \n regularizer=False, \n training_loss=train_loss,\n resume_from_dir=\"./checkpoints\")"
137+
"trainer.train(train_loader=train_loader,\n test_loaders={},\n optimizer=optimizer,\n scheduler=scheduler, \n regularizer=False, \n training_loss=train_loss, \n save_every=1,\n save_dir=\"./checkpoints\")\n\n\n# .. resume_from_dir:\n# resume training from saved checkpoint at epoch 10\n\ntrainer = Trainer(model=model, n_epochs=20,\n device=device,\n data_processor=data_processor,\n wandb_log=False,\n eval_interval=3,\n use_distributed=False,\n verbose=True)\n\ntrainer.train(train_loader=train_loader,\n test_loaders={},\n optimizer=optimizer,\n scheduler=scheduler, \n regularizer=False, \n training_loss=train_loss,\n resume_from_dir=\"./checkpoints\")"
138138
]
139139
}
140140
],
145 Bytes
Binary file not shown.

0 commit comments

Comments
 (0)