Skip to content

Commit ea7d108

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 6c9b422 commit ea7d108

38 files changed

+2234
-1461
lines changed

examples/CNN_UTS/readme.md

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,3 @@ python main.py mode=eval
8888
- 配置化超参数与数据路径
8989
- 自动保存/加载模型与预测结果
9090
- 多种可视化与统计指标输出
91-
92-
93-

jointContribution/PINO/PINO_paddle/configs/pretrain/Darcy-pretrain.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,5 +30,3 @@ log:
3030
project: 'PINO-Darcy-pretrain'
3131
group: 'gelu-pino'
3232
entity: hzzheng-pino
33-
34-

jointContribution/PINO/PINO_paddle/configs/pretrain/burgers-pretrain.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,4 +33,3 @@ log:
3333
project: PINO-burgers-pretrain
3434
group: gelu-eqn
3535
entity: hzzheng-pino
36-

jointContribution/PINO/PINO_paddle/configs/test/burgers.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,5 +23,3 @@ test:
2323
log:
2424
project: 'PINO-burgers-test'
2525
group: 'gelu-test'
26-
27-

jointContribution/PINO/PINO_paddle/configs/test/darcy.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,5 +22,3 @@ test:
2222
log:
2323
project: 'PINO-Darcy'
2424
group: 'default'
25-
26-
Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,40 +1,44 @@
11
import os
22
from argparse import ArgumentParser
3+
34
import requests
45
from tqdm import tqdm
56

67
_url_dict = {
7-
'NS-T4000': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fft_Re500_T4000.npy',
8-
'NS-Re500Part0': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part0.npy',
9-
'NS-Re500Part1': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part1.npy',
10-
'NS-Re500Part2': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part2.npy',
11-
'NS-Re100Part0': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re100_T128_part0.npy',
12-
'burgers': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/burgers_pino.mat',
13-
'NS-Re500_T300_id0': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/NS-Re500_T300_id0.npy',
14-
'NS-Re500_T300_id0-shuffle': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS-Re500_T300_id0-shuffle.npy',
15-
'darcy-train': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/piececonst_r421_N1024_smooth1.mat',
16-
'Re500-1_8s-800-pino-140k': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/checkpoints/Re500-1_8s-800-PINO-140000.pt',
8+
"NS-T4000": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fft_Re500_T4000.npy",
9+
"NS-Re500Part0": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part0.npy",
10+
"NS-Re500Part1": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part1.npy",
11+
"NS-Re500Part2": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part2.npy",
12+
"NS-Re100Part0": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re100_T128_part0.npy",
13+
"burgers": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/burgers_pino.mat",
14+
"NS-Re500_T300_id0": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/NS-Re500_T300_id0.npy",
15+
"NS-Re500_T300_id0-shuffle": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS-Re500_T300_id0-shuffle.npy",
16+
"darcy-train": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/piececonst_r421_N1024_smooth1.mat",
17+
"Re500-1_8s-800-pino-140k": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/checkpoints/Re500-1_8s-800-PINO-140000.pt",
1718
}
1819

20+
1921
def download_file(url, file_path):
20-
print('Start downloading...')
22+
print("Start downloading...")
2123
with requests.get(url, stream=True) as r:
2224
r.raise_for_status()
23-
with open(file_path, 'wb') as f:
25+
with open(file_path, "wb") as f:
2426
for chunk in tqdm(r.iter_content(chunk_size=256 * 1024 * 1024)):
2527
f.write(chunk)
26-
print('Complete')
28+
print("Complete")
29+
2730

2831
def main(args):
2932
url = _url_dict[args.name]
30-
file_name = url.split('/')[-1]
33+
file_name = url.split("/")[-1]
3134
os.makedirs(args.outdir, exist_ok=True)
3235
file_path = os.path.join(args.outdir, file_name)
3336
download_file(url, file_path)
3437

35-
if __name__ == '__main__':
36-
parser = ArgumentParser(description='Parser for downloading assets')
37-
parser.add_argument('--name', type=str, default='NS-T4000')
38-
parser.add_argument('--outdir', type=str, default='../data')
38+
39+
if __name__ == "__main__":
40+
parser = ArgumentParser(description="Parser for downloading assets")
41+
parser.add_argument("--name", type=str, default="NS-T4000")
42+
parser.add_argument("--outdir", type=str, default="../data")
3943
args = parser.parse_args()
40-
main(args)
44+
main(args)

jointContribution/PINO/PINO_paddle/eval_operator.py

Lines changed: 70 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1,78 +1,94 @@
1-
import yaml
1+
from argparse import ArgumentParser
22

33
import paddle
4+
import yaml
5+
from models import FNO2d
6+
from models import FNO3d
47
from paddle.io import DataLoader
5-
from models import FNO3d, FNO2d
6-
from train_utils import NSLoader, get_forcing, DarcyFlow
7-
8-
from train_utils.eval_3d import eval_ns
8+
from train_utils import DarcyFlow
9+
from train_utils import NSLoader
10+
from train_utils import get_forcing
911
from train_utils.eval_2d import eval_darcy
12+
from train_utils.eval_3d import eval_ns
1013

11-
from argparse import ArgumentParser
1214

1315
def test_3d(config):
14-
device = 0 if paddle.cuda.is_available() else 'cpu'
15-
data_config = config['data']
16-
loader = NSLoader(datapath1=data_config['datapath'],
17-
nx=data_config['nx'], nt=data_config['nt'],
18-
sub=data_config['sub'], sub_t=data_config['sub_t'],
19-
N=data_config['total_num'],
20-
t_interval=data_config['time_interval'])
16+
device = 0 if paddle.cuda.is_available() else "cpu"
17+
data_config = config["data"]
18+
loader = NSLoader(
19+
datapath1=data_config["datapath"],
20+
nx=data_config["nx"],
21+
nt=data_config["nt"],
22+
sub=data_config["sub"],
23+
sub_t=data_config["sub_t"],
24+
N=data_config["total_num"],
25+
t_interval=data_config["time_interval"],
26+
)
2127

22-
eval_loader = loader.make_loader(n_sample=data_config['n_sample'],
23-
batch_size=config['test']['batchsize'],
24-
start=data_config['offset'],
25-
train=data_config['shuffle'])
26-
model = FNO3d(modes1=config['model']['modes1'],
27-
modes2=config['model']['modes2'],
28-
modes3=config['model']['modes3'],
29-
fc_dim=config['model']['fc_dim'],
30-
layers=config['model']['layers']).to(device)
28+
eval_loader = loader.make_loader(
29+
n_sample=data_config["n_sample"],
30+
batch_size=config["test"]["batchsize"],
31+
start=data_config["offset"],
32+
train=data_config["shuffle"],
33+
)
34+
model = FNO3d(
35+
modes1=config["model"]["modes1"],
36+
modes2=config["model"]["modes2"],
37+
modes3=config["model"]["modes3"],
38+
fc_dim=config["model"]["fc_dim"],
39+
layers=config["model"]["layers"],
40+
).to(device)
3141

32-
if 'ckpt' in config['test']:
33-
ckpt_path = config['test']['ckpt']
42+
if "ckpt" in config["test"]:
43+
ckpt_path = config["test"]["ckpt"]
3444
ckpt = paddle.load(ckpt_path)
35-
model.load_state_dict(ckpt['model'])
36-
print('Weights loaded from %s' % ckpt_path)
37-
print(f'Resolution : {loader.S}x{loader.S}x{loader.T}')
45+
model.load_state_dict(ckpt["model"])
46+
print("Weights loaded from %s" % ckpt_path)
47+
print(f"Resolution : {loader.S}x{loader.S}x{loader.T}")
3848
forcing = get_forcing(loader.S).to(device)
39-
eval_ns(model,
40-
loader,
41-
eval_loader,
42-
forcing,
43-
config,
44-
device=device)
49+
eval_ns(model, loader, eval_loader, forcing, config, device=device)
50+
4551

4652
def test_2d(config):
47-
data_config = config['data']
48-
dataset = DarcyFlow(data_config['datapath'],
49-
nx=data_config['nx'], sub=data_config['sub'],
50-
offset=data_config['offset'], num=data_config['n_sample'])
51-
dataloader = DataLoader(dataset, batch_size=config['test']['batchsize'], shuffle=False)
52-
model = FNO2d(modes1=config['model']['modes1'],
53-
modes2=config['model']['modes2'],
54-
fc_dim=config['model']['fc_dim'],
55-
layers=config['model']['layers'],
56-
act=config['model']['act'])
53+
data_config = config["data"]
54+
dataset = DarcyFlow(
55+
data_config["datapath"],
56+
nx=data_config["nx"],
57+
sub=data_config["sub"],
58+
offset=data_config["offset"],
59+
num=data_config["n_sample"],
60+
)
61+
dataloader = DataLoader(
62+
dataset, batch_size=config["test"]["batchsize"], shuffle=False
63+
)
64+
model = FNO2d(
65+
modes1=config["model"]["modes1"],
66+
modes2=config["model"]["modes2"],
67+
fc_dim=config["model"]["fc_dim"],
68+
layers=config["model"]["layers"],
69+
act=config["model"]["act"],
70+
)
5771
# Load from checkpoint
58-
if 'ckpt' in config['test']:
59-
ckpt_path = config['test']['ckpt']
72+
if "ckpt" in config["test"]:
73+
ckpt_path = config["test"]["ckpt"]
6074
ckpt = paddle.load(ckpt_path)
61-
model.set_dict(ckpt['model'])
62-
print('Weights loaded from %s' % ckpt_path)
75+
model.set_dict(ckpt["model"])
76+
print("Weights loaded from %s" % ckpt_path)
6377
eval_darcy(model, dataloader, config)
6478

65-
if __name__ == '__main__':
66-
parser = ArgumentParser(description='Basic paser')
67-
parser.add_argument('--config_path', type=str, help='Path to the configuration file')
68-
parser.add_argument('--log', action='store_true', help='Turn on the wandb')
79+
80+
if __name__ == "__main__":
81+
parser = ArgumentParser(description="Basic paser")
82+
parser.add_argument(
83+
"--config_path", type=str, help="Path to the configuration file"
84+
)
85+
parser.add_argument("--log", action="store_true", help="Turn on the wandb")
6986
options = parser.parse_args()
7087
config_file = options.config_path
71-
with open(config_file, 'r') as stream:
88+
with open(config_file, "r") as stream:
7289
config = yaml.load(stream, yaml.FullLoader)
7390

74-
if 'name' in config['data'] and config['data']['name'] == 'Darcy':
91+
if "name" in config["data"] and config["data"]["name"] == "Darcy":
7592
test_2d(config)
7693
else:
7794
test_3d(config)
78-

jointContribution/PINO/PINO_paddle/models/FCN.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,50 +1,52 @@
11
import paddle.nn as nn
22

3+
34
def linear_block(in_channel, out_channel):
4-
block = nn.Sequential(
5-
nn.Linear(in_channel, out_channel),
6-
nn.Tanh()
7-
)
5+
block = nn.Sequential(nn.Linear(in_channel, out_channel), nn.Tanh())
86
return block
97

8+
109
class FCNet(nn.Layer):
11-
'''
10+
"""
1211
Fully connected layers with Tanh as nonlinearity
1312
Reproduced from PINNs Burger equation
14-
'''
13+
"""
1514

1615
def __init__(self, layers=[2, 10, 1]):
1716
super(FCNet, self).__init__()
1817

19-
fc_list = [linear_block(in_size, out_size)
20-
for in_size, out_size in zip(layers, layers[1:-1])]
18+
fc_list = [
19+
linear_block(in_size, out_size)
20+
for in_size, out_size in zip(layers, layers[1:-1])
21+
]
2122
fc_list.append(nn.Linear(layers[-2], layers[-1]))
2223
self.fc = nn.Sequential(*fc_list)
2324

2425
def forward(self, x):
2526
return self.fc(x)
2627

28+
2729
class DenseNet(nn.Layer):
2830
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
2931
super(DenseNet, self).__init__()
3032

3133
self.n_layers = len(layers) - 1
3234
assert self.n_layers >= 1
3335
if isinstance(nonlinearity, str):
34-
if nonlinearity == 'tanh':
36+
if nonlinearity == "tanh":
3537
nonlinearity = nn.Tanh
36-
elif nonlinearity == 'relu':
38+
elif nonlinearity == "relu":
3739
nonlinearity == nn.ReLU
3840
else:
39-
raise ValueError(f'{nonlinearity} is not supported')
41+
raise ValueError(f"{nonlinearity} is not supported")
4042
self.layers = nn.ModuleList()
4143

4244
for j in range(self.n_layers):
43-
self.layers.append(nn.Linear(layers[j], layers[j+1]))
45+
self.layers.append(nn.Linear(layers[j], layers[j + 1]))
4446

4547
if j != self.n_layers - 1:
4648
if normalize:
47-
self.layers.append(nn.BatchNorm1d(layers[j+1]))
49+
self.layers.append(nn.BatchNorm1d(layers[j + 1]))
4850

4951
self.layers.append(nonlinearity())
5052

@@ -56,4 +58,3 @@ def forward(self, x):
5658
x = l(x)
5759

5860
return x
59-

0 commit comments

Comments
 (0)