Skip to content

Commit aa963b8

Browse files
[Hackathon 6th Code Camp No.15] support neuraloperator (#867)
* add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator * add-neuraloperator
1 parent ea69876 commit aa963b8

22 files changed

+6017
-0
lines changed

docs/zh/api/arch.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,5 +29,10 @@
2929
- ChipDeepONets
3030
- AutoEncoder
3131
- CuboidTransformer
32+
- SFNONet
33+
- UNONet
34+
- TFNO1dNet
35+
- TFNO2dNet
36+
- TFNO3dNet
3237
show_root_heading: true
3338
heading_level: 3

docs/zh/api/data/dataset.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,6 @@
2525
- RadarDataset
2626
- build_dataset
2727
- DGMRDataset
28+
- DarcyFlowDataset
29+
- SphericalSWEDataset
2830
show_root_heading: true
Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
hydra:
2+
run:
3+
# dynamic output directory according to running time and override name
4+
dir: outputs_sfno_pretrain
5+
job:
6+
name: ${mode} # name of logfile
7+
chdir: false # keep current working direcotry unchaned
8+
config:
9+
override_dirname:
10+
exclude_keys:
11+
- TRAIN.checkpoint_path
12+
- TRAIN.pretrained_model_path
13+
- EVAL.pretrained_model_path
14+
- mode
15+
- output_dir
16+
- log_freq
17+
callbacks:
18+
init_callback:
19+
_target_: ppsci.utils.callbacks.InitCallback
20+
sweep:
21+
# output directory for multirun
22+
dir: ${hydra.run.dir}
23+
subdir: ./
24+
25+
# general settings
26+
mode: train # running mode: train/eval/export/infer
27+
seed: 666
28+
output_dir: ${hydra:run.dir}
29+
log_freq: 20
30+
31+
# set train and evaluate data path
32+
FILE_PATH: ./datasets/SWE/
33+
34+
# dataset setting
35+
DATASET:
36+
label_keys: ["y"]
37+
train_resolution: "32x64"
38+
test_resolutions: ["32x64","64x128"]
39+
40+
41+
# model settings
42+
MODEL:
43+
input_keys: ["x"]
44+
output_keys: ["y"]
45+
in_channels: 3
46+
out_channels: 3
47+
n_modes: [32, 32]
48+
hidden_channels: 32
49+
projection_channels: 64
50+
n_layers: 4
51+
52+
use_mlp: false
53+
mlp:
54+
expansion: 0.5
55+
dropout: 0.0
56+
norm: 'group_norm'
57+
fno_skip: "linear"
58+
mlp_skip: "soft-gating"
59+
separable: false
60+
preactivation: false
61+
factorization: null
62+
rank: 1.0
63+
joint_factorization: false
64+
fixed_rank_modes: null
65+
implementation: "factorized"
66+
domain_padding: null #0.078125
67+
domain_padding_mode: "one-sided" #symmetric
68+
fft_norm: 'forward'
69+
patching_levels: 0
70+
71+
72+
# training settings
73+
TRAIN:
74+
epochs: 300
75+
save_freq: 20
76+
eval_during_train: true
77+
eval_freq: 1
78+
lr_scheduler:
79+
epochs: ${TRAIN.epochs}
80+
learning_rate: 5e-3
81+
by_epoch: True
82+
type: "StepDecay"
83+
step_size: 60
84+
gamma: 0.5
85+
# ReduceOnPlateau only
86+
scheduler_patience: 5
87+
88+
# CosineAnnealingLR
89+
scheduler_T_max: 30
90+
wd: 1e-4
91+
batch_size: 4
92+
pretrained_model_path: null
93+
checkpoint_path: null
94+
95+
96+
# evaluation settings
97+
EVAL:
98+
pretrained_model_path: ./outputs_sfno_pretrain/checkpoints/best_model.pdparams
99+
compute_metric_by_batch: false
100+
eval_with_no_grad: true
101+
batch_size: 10
102+
103+
INFER:
104+
pretrained_model_path: ./outputs_sfno_pretrain/checkpoints/best_model.pdparams
105+
export_path: ./inference/sfno/sfno_darcyflow
106+
pdmodel_path: ${INFER.export_path}.pdmodel
107+
pdiparams_path: ${INFER.export_path}.pdiparams
108+
device: gpu
109+
engine: native
110+
precision: fp32
111+
onnx_path: ${INFER.export_path}.onnx
112+
ir_optim: true
113+
min_subgraph_size: 10
114+
gpu_mem: 4000
115+
gpu_id: 0
116+
max_batch_size: 16
117+
num_cpu_threads: 4
118+
batch_size: 1
119+
data_path: ./datasets/SWE/test_SWE_32x64.npy
Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
hydra:
2+
run:
3+
# dynamic output directory according to running time and override name
4+
dir: outputs_tfno_pretrain
5+
job:
6+
name: ${mode} # name of logfile
7+
chdir: false # keep current working direcotry unchaned
8+
config:
9+
override_dirname:
10+
exclude_keys:
11+
- TRAIN.checkpoint_path
12+
- TRAIN.pretrained_model_path
13+
- EVAL.pretrained_model_path
14+
- mode
15+
- output_dir
16+
- log_freq
17+
callbacks:
18+
init_callback:
19+
_target_: ppsci.utils.callbacks.InitCallback
20+
sweep:
21+
# output directory for multirun
22+
dir: ${hydra.run.dir}
23+
subdir: ./
24+
25+
# general settings
26+
mode: train # running mode: train/eval/export/infer
27+
seed: 666
28+
output_dir: ${hydra:run.dir}
29+
log_freq: 20
30+
31+
# set train and evaluate data path
32+
FILE_PATH: ./datasets/darcyflow/
33+
34+
# dataset setting
35+
DATASET:
36+
label_keys: ["y"]
37+
train_resolution: 16
38+
test_resolutions: [16,32]
39+
grid_boundaries: [[0, 1], [0, 1]]
40+
positional_encoding: True
41+
encode_input: False
42+
encode_output: False
43+
encoding: "channel-wise"
44+
channel_dim: 1
45+
46+
# model settings
47+
MODEL:
48+
input_keys: ["x"]
49+
output_keys: ["y"]
50+
n_modes_height: 16
51+
n_modes_width: 16
52+
in_channels: 3
53+
out_channels: 1
54+
hidden_channels: 32
55+
projection_channels: 64
56+
n_layers: 4
57+
58+
use_mlp: False
59+
mlp:
60+
expansion: 0.5
61+
dropout: 0.0
62+
norm: "group_norm"
63+
fno_skip: "linear"
64+
mlp_skip: "soft-gating"
65+
separable: false
66+
preactivation: false
67+
factorization: 'dense'
68+
rank: 1.0
69+
joint_factorization: false
70+
fixed_rank_modes: null
71+
implementation: "factorized"
72+
domain_padding: null #0.078125
73+
domain_padding_mode: "one-sided" #symmetric
74+
fft_norm: 'forward'
75+
patching_levels: 0
76+
77+
78+
# training settings
79+
TRAIN:
80+
epochs: 300
81+
save_freq: 20
82+
eval_during_train: true
83+
eval_freq: 1
84+
training_loss: "h1"
85+
lr_scheduler:
86+
epochs: ${TRAIN.epochs}
87+
learning_rate: 5e-3
88+
by_epoch: True
89+
type: "StepDecay"
90+
step_size: 60
91+
gamma: 0.5
92+
# ReduceOnPlateau only
93+
scheduler_patience: 5
94+
95+
# CosineAnnealingLR
96+
scheduler_T_max: 500
97+
wd: 1.0e-4
98+
batch_size: 16
99+
pretrained_model_path: null
100+
checkpoint_path: null
101+
102+
103+
# evaluation settings
104+
EVAL:
105+
pretrained_model_path: ./outputs_tfno_pretrain/checkpoints/best_model.pdparams
106+
compute_metric_by_batch: false
107+
eval_with_no_grad: true
108+
batch_size: 16
109+
110+
INFER:
111+
pretrained_model_path: ./outputs_tfno_pretrain/checkpoints/best_model.pdparams
112+
export_path: ./inference/tfno/tfno_darcyflow
113+
pdmodel_path: ${INFER.export_path}.pdmodel
114+
pdiparams_path: ${INFER.export_path}.pdiparams
115+
device: gpu
116+
engine: native
117+
precision: fp32
118+
onnx_path: ${INFER.export_path}.onnx
119+
ir_optim: true
120+
min_subgraph_size: 10
121+
gpu_mem: 4000
122+
gpu_id: 0
123+
max_batch_size: 16
124+
num_cpu_threads: 1
125+
batch_size: 1
126+
data_path: ./datasets/darcyflow/darcy_test_16.npy
127+
grid_boundaries: [[0, 1], [0, 1]]
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
hydra:
2+
run:
3+
# dynamic output directory according to running time and override name
4+
dir: outputs_uno_pretrain
5+
job:
6+
name: ${mode} # name of logfile
7+
chdir: false # keep current working direcotry unchaned
8+
config:
9+
override_dirname:
10+
exclude_keys:
11+
- TRAIN.checkpoint_path
12+
- TRAIN.pretrained_model_path
13+
- EVAL.pretrained_model_path
14+
- mode
15+
- output_dir
16+
- log_freq
17+
callbacks:
18+
init_callback:
19+
_target_: ppsci.utils.callbacks.InitCallback
20+
sweep:
21+
# output directory for multirun
22+
dir: ${hydra.run.dir}
23+
subdir: ./
24+
25+
# general settings
26+
mode: train # running mode: train/eval/export/infer
27+
seed: 666
28+
output_dir: ${hydra:run.dir}
29+
log_freq: 20
30+
31+
# set train and evaluate data path
32+
FILE_PATH: ./datasets/darcyflow/
33+
34+
# dataset setting
35+
DATASET:
36+
label_keys: ["y"]
37+
train_resolution: 16
38+
test_resolutions: [16,32]
39+
grid_boundaries: [[0, 1], [0, 1]]
40+
positional_encoding: True
41+
encode_input: False
42+
encode_output: False
43+
encoding: "channel-wise"
44+
channel_dim: 1
45+
46+
# model settings
47+
MODEL:
48+
input_keys: ["x"]
49+
output_keys: ["y"]
50+
in_channels: 3
51+
out_channels: 1
52+
hidden_channels: 64
53+
projection_channels: 64
54+
n_layers: 5
55+
uno_out_channels: [32,64,64,64,32]
56+
uno_n_modes: [[16,16],[8,8],[8,8],[8,8],[16,16]]
57+
uno_scalings: [[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]]
58+
horizontal_skips_map: null
59+
incremental_n_modes: null
60+
61+
use_mlp: false
62+
mlp:
63+
expansion: 0.5
64+
dropout: 0.0
65+
norm: "group_norm"
66+
fno_skip: "linear"
67+
horizontal_skip: "linear"
68+
mlp_skip: "soft-gating"
69+
separable: false
70+
preactivation: false
71+
factorization: null
72+
rank: 1.0
73+
joint_factorization: false
74+
fixed_rank_modes: null
75+
implementation: "factorized"
76+
domain_padding: 0.2 #0.078125
77+
domain_padding_mode: "one-sided" #symmetric
78+
fft_norm: 'forward'
79+
patching_levels: 0
80+
81+
82+
# training settings
83+
TRAIN:
84+
epochs: 300
85+
save_freq: 20
86+
eval_during_train: true
87+
eval_freq: 1
88+
training_loss: "h1"
89+
lr_scheduler:
90+
epochs: ${TRAIN.epochs}
91+
learning_rate: 5e-3
92+
by_epoch: True
93+
type: "StepDecay"
94+
step_size: 60
95+
gamma: 0.5
96+
# ReduceOnPlateau only
97+
scheduler_patience: 5
98+
99+
# CosineAnnealingLR
100+
scheduler_T_max: 30
101+
wd: 1.0e-4
102+
batch_size: 16
103+
# /home/aistudio/darcy_flow_small.pdparams
104+
pretrained_model_path: null
105+
checkpoint_path: null
106+
107+
108+
# evaluation settings
109+
EVAL:
110+
pretrained_model_path: ./outputs_uno_pretrain/checkpoints/best_model.pdparams
111+
compute_metric_by_batch: false
112+
eval_with_no_grad: true
113+
batch_size: 16
114+
115+
INFER:
116+
pretrained_model_path: ./outputs_uno_pretrain/checkpoints/best_model.pdparams
117+
export_path: ./inference/uno/uno_darcyflow
118+
pdmodel_path: ${INFER.export_path}.pdmodel
119+
pdiparams_path: ${INFER.export_path}.pdiparams
120+
device: gpu
121+
engine: native
122+
precision: fp32
123+
onnx_path: ${INFER.export_path}.onnx
124+
ir_optim: true
125+
min_subgraph_size: 10
126+
gpu_mem: 4000
127+
gpu_id: 0
128+
max_batch_size: 16
129+
num_cpu_threads: 4
130+
batch_size: 1
131+
data_path: ./datasets/darcyflow/darcy_test_16.npy
132+
grid_boundaries: [[0, 1], [0, 1]]

0 commit comments

Comments
 (0)