Skip to content

Commit 0962c2b

Browse files
committed
initial push
1 parent d11d8b9 commit 0962c2b

File tree

6 files changed

+769
-1
lines changed

6 files changed

+769
-1
lines changed

LICENSE

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
*** License Agreement ***
2+
3+
Spatio-temporal Fourier Transformer for Long-term Dynamics Prediction (StFT) Copyright (c) 2025, The Regents of the University of California,
4+
through Lawrence Berkeley National Laboratory (subject to receipt of any
5+
required approvals from the U.S. Dept. of Energy). All rights reserved.
6+
Redistribution and use in source and binary forms, with or without
7+
modification, are permitted provided that the following conditions are met:
8+
9+
(1) Redistributions of source code must retain the above copyright notice,
10+
this list of conditions and the following disclaimer.
11+
12+
(2) Redistributions in binary form must reproduce the above copyright
13+
notice, this list of conditions and the following disclaimer in the
14+
documentation and/or other materials provided with the distribution.
15+
16+
(3) Neither the name of the University of California, Lawrence Berkeley
17+
National Laboratory, U.S. Dept. of Energy nor the names of its contributors
18+
may be used to endorse or promote products derived from this software
19+
without specific prior written permission.
20+
21+
22+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25+
26+
You are under no obligation whatsoever to provide any bug fixes, patches,
27+
or upgrades to the features, functionality or performance of the source
28+
code ("Enhancements") to anyone; however, if you choose to make your
29+
Enhancements available either publicly, or directly to Lawrence Berkeley
30+
National Laboratory, without imposing a separate written license agreement
31+
for such Enhancements, then you hereby grant the following license: a
32+
non-exclusive, royalty-free perpetual license to install, use, modify,
33+
prepare derivative works, incorporate into other computer software,
34+
distribute, and sublicense such enhancements or derivative works thereof,
35+
in binary and source code form.

README.md

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,31 @@
1-
# StFT
1+
# StFT: Spatio-temporal-Fourier-Transformer-for-Long-term-Dynamics-Prediction
2+
3+
4+
## Training StFT
5+
6+
To train StFT on the plasma MHD dataset:
7+
```bash
8+
python train.py
9+
```
10+
By default, the results will be saved to the ray_results at home directory. To customize the saved direcotry, you can change the save_path variable in the train.py file.
11+
12+
See the [LICENSE file](LICENSE) for copyright and licensing information.
13+
14+
## About
15+
*** Copyright Notice ***
16+
17+
Spatio-temporal Fourier Transformer for Long-term Dynamics Prediction (StFT) Copyright (c) 2025, The Regents of the University of California,
18+
through Lawrence Berkeley National Laboratory (subject to receipt of any
19+
required approvals from the U.S. Dept. of Energy). All rights reserved.
20+
21+
If you have questions about your rights to use or distribute this software,
22+
please contact Berkeley Lab's Intellectual Property Office at
23+
IPO@lbl.gov.
24+
25+
NOTICE. This Software was developed under funding from the U.S. Department
26+
of Energy and the U.S. Government consequently retains certain rights. As
27+
such, the U.S. Government has been granted for itself and others acting on
28+
its behalf a paid-up, nonexclusive, irrevocable, worldwide license in the
29+
Software to reproduce, distribute copies to the public, prepare derivative
30+
works, and perform publicly and display publicly, and to permit others to do so.
31+

StFT_3D.py

Lines changed: 272 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,272 @@
1+
import torch
2+
import torch.nn as nn
3+
import torch.nn.functional as F
4+
from model_utils import TransformerLayer, get_2d_sincos_pos_embed
5+
6+
7+
class StFTBlcok(nn.Module):
8+
def __init__(
9+
self,
10+
cond_time,
11+
freq_in_channels,
12+
in_dim,
13+
out_dim,
14+
out_channel,
15+
num_patches,
16+
modes,
17+
lift_channel=32,
18+
dim=256,
19+
depth=2,
20+
num_heads=1,
21+
mlp_dim=256,
22+
act="relu",
23+
grid_size=(4, 4),
24+
layer_indx=0,
25+
):
26+
super(StFTBlcok, self).__init__()
27+
self.layer_indx = layer_indx
28+
self.cond_time = cond_time
29+
self.freq_in_channels = freq_in_channels
30+
self.modes = modes
31+
self.out_channel = out_channel
32+
self.lift_channel = lift_channel
33+
self.token_embed = nn.Linear(in_dim, dim)
34+
self.pos_embed = nn.Parameter(
35+
torch.randn(1, num_patches, dim), requires_grad=False
36+
)
37+
self.pos_embed_f = nn.Parameter(
38+
torch.randn(1, num_patches, dim), requires_grad=False
39+
)
40+
pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], grid_size)
41+
pos_embed_f = get_2d_sincos_pos_embed(self.pos_embed_f.shape[-1], grid_size)
42+
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
43+
self.pos_embed_f.data.copy_(
44+
torch.from_numpy(pos_embed_f).float().unsqueeze(0)
45+
)
46+
self.encoder_layers = nn.ModuleList(
47+
[TransformerLayer(dim, num_heads, mlp_dim, act) for _ in range(depth)]
48+
)
49+
self.encoder_layers_f = nn.ModuleList(
50+
[TransformerLayer(dim, num_heads, mlp_dim, act) for _ in range(depth)]
51+
)
52+
self.head = nn.Sequential(nn.LayerNorm(dim), nn.Linear(dim, out_dim))
53+
self.p = nn.Linear(freq_in_channels, lift_channel)
54+
self.linear = nn.Linear(
55+
modes[0] * modes[1] * (self.cond_time + self.layer_indx) * lift_channel * 2,
56+
dim,
57+
)
58+
self.q = nn.Linear(dim, modes[0] * modes[1] * 1 * lift_channel * 2)
59+
self.down = nn.Linear(lift_channel, out_channel)
60+
61+
def forward(self, x):
62+
x_copy = x
63+
n, l, _, ph, pw = x.shape
64+
x_or = x[:, :, : self.cond_time * self.freq_in_channels]
65+
x_added = x[:, :, (self.cond_time * self.freq_in_channels) :]
66+
x_or = x_or.permute(0, 1, 3, 4, 2).view(n, l, ph, pw, self.cond_time, self.freq_in_channels)
67+
grid_dup = x_or[:, :, :, :, :1, -2:].repeat(1, 1, 1, 1, self.layer_indx, 1)
68+
69+
x_added = x_added.permute(0, 1, 3, 4, 2).view(n, l, ph, pw, self.layer_indx, self.freq_in_channels - 2)
70+
x_added = torch.cat((x_added, grid_dup), axis=-1)
71+
x = torch.cat((x_or, x_added), axis=-2)
72+
x = self.p(x)
73+
v, t = x.shape[-1], x.shape[-2]
74+
x = x.permute(0, 1, 5, 4, 2, 3).view(n * l, v, t, ph, pw)
75+
x_ft = torch.fft.rfftn(x, dim=[2, 3, 4])[
76+
:, :, :, : self.modes[0], : self.modes[1]
77+
]
78+
x_ft_real = (x_ft.real).flatten(1)
79+
x_ft_imag = (x_ft.imag).flatten(1)
80+
x_ft_real = x_ft_real.view(n, l, -1)
81+
x_ft_imag = x_ft_imag.view(n, l, -1)
82+
x_ft_real_imag = torch.cat((x_ft_real, x_ft_imag), axis=-1)
83+
x = self.linear(x_ft_real_imag)
84+
x = x + self.pos_embed_f
85+
for layer in self.encoder_layers_f:
86+
x = layer(x)
87+
x_real, x_imag = self.q(x).split(
88+
self.modes[0] * self.modes[1] * self.lift_channel, dim=-1
89+
)
90+
x_real = x_real.reshape(n * l, -1, 1, self.modes[0], self.modes[1])
91+
x_imag = x_imag.reshape(n * l, -1, 1, self.modes[0], self.modes[1])
92+
x_complex = torch.complex(x_real, x_imag)
93+
out_ft = torch.zeros(
94+
n * l,
95+
self.lift_channel,
96+
1,
97+
ph,
98+
pw // 2 + 1,
99+
dtype=torch.cfloat,
100+
device=x.device,
101+
)
102+
out_ft[:, :, :, : self.modes[0], : self.modes[1]] = x_complex
103+
x = torch.fft.irfftn(out_ft, s=(1, ph, pw))
104+
x = x.permute(0, 3, 4, 1, 2).view(n * l, ph, pw, -1)
105+
x = self.down(x)
106+
c = x.shape[-1]
107+
x_f = x.permute(0, 3, 1, 2).view(n, l, c, ph, pw)
108+
x = x_copy
109+
_, _, _, ph, pw = x.shape
110+
x = x.flatten(2)
111+
x = self.token_embed(x) + self.pos_embed
112+
for layer in self.encoder_layers:
113+
x = layer(x)
114+
x = self.head(x)
115+
x = x.view(n, l, self.out_channel, ph, pw)
116+
x = x + x_f
117+
return x
118+
119+
120+
class StFT(nn.Module):
121+
def __init__(
122+
self,
123+
cond_time,
124+
num_vars,
125+
patch_sizes,
126+
overlaps,
127+
in_channels,
128+
out_channels,
129+
modes,
130+
img_size=(50, 50),
131+
lift_channel=32,
132+
dim=128,
133+
vit_depth=3,
134+
num_heads=1,
135+
mlp_dim=128,
136+
act="relu",
137+
):
138+
super(StFT, self).__init__()
139+
140+
blocks = []
141+
self.cond_time = cond_time
142+
self.num_vars = num_vars
143+
self.patch_sizes = patch_sizes
144+
self.overlaps = overlaps
145+
for depth, (p1, p2) in enumerate(patch_sizes):
146+
H, W = img_size
147+
cur_modes = modes[depth]
148+
cur_depth = vit_depth[depth]
149+
overlap_h, overlap_w = overlaps[depth]
150+
151+
step_h = p1 - overlap_h
152+
step_w = p2 - overlap_w
153+
154+
pad_h = (step_h - (H - p1) % step_h) % step_h
155+
pad_w = (step_w - (W - p2) % step_w) % step_w
156+
H_pad = H + pad_h
157+
W_pad = W + pad_w
158+
159+
num_patches_h = (H_pad - p1) // step_h + 1
160+
num_patches_w = (W_pad - p2) // step_w + 1
161+
162+
num_patches = num_patches_h * num_patches_w
163+
if depth == 0:
164+
blocks.append(
165+
StFTBlcok(
166+
cond_time,
167+
num_vars,
168+
p1 * p2 * in_channels,
169+
out_channels * p1 * p2,
170+
out_channels,
171+
num_patches,
172+
cur_modes,
173+
lift_channel=lift_channel,
174+
dim=dim,
175+
depth=cur_depth,
176+
num_heads=num_heads,
177+
mlp_dim=mlp_dim,
178+
act=act,
179+
grid_size=(num_patches_h, num_patches_w),
180+
layer_indx=depth,
181+
)
182+
)
183+
else:
184+
blocks.append(
185+
StFTBlcok(
186+
cond_time,
187+
num_vars,
188+
p1 * p2 * (in_channels + out_channels),
189+
out_channels * p1 * p2,
190+
out_channels,
191+
num_patches,
192+
cur_modes,
193+
lift_channel=lift_channel,
194+
dim=dim,
195+
depth=cur_depth,
196+
num_heads=num_heads,
197+
mlp_dim=mlp_dim,
198+
act=act,
199+
grid_size=(num_patches_h, num_patches_w),
200+
layer_indx=1,
201+
)
202+
)
203+
204+
self.blocks = nn.ModuleList(blocks)
205+
206+
def forward(self, x, grid):
207+
grid_dup = grid[None, :, :, :].repeat(x.shape[0], x.shape[1], 1, 1, 1)
208+
x = torch.cat((x, grid_dup), axis=2)
209+
x = x.view(x.shape[0], x.shape[1] * x.shape[2], x.shape[3], x.shape[4])
210+
layer_outputs = []
211+
patches = x
212+
restore_params = []
213+
or_patches = x
214+
if True:
215+
for depth in range(len(self.patch_sizes)):
216+
if True:
217+
p1, p2 = self.patch_sizes[depth]
218+
overlap_h, overlap_w = self.overlaps[depth]
219+
220+
step_h = p1 - overlap_h
221+
step_w = p2 - overlap_w
222+
223+
pad_h = (step_h - (patches.shape[2] - p1) % step_h) % step_h
224+
pad_w = (step_w - (patches.shape[3] - p2) % step_w) % step_w
225+
padding = (
226+
pad_w // 2,
227+
pad_w - pad_w // 2,
228+
pad_h // 2,
229+
pad_h - pad_h // 2,
230+
)
231+
232+
patches = F.pad(patches, padding, mode="constant", value=0)
233+
_, _, H_pad, W_pad = patches.shape
234+
235+
h = (H_pad - p1) // step_h + 1
236+
w = (W_pad - p2) // step_w + 1
237+
238+
restore_params.append(
239+
(p1, p2, step_h, step_w, padding, H_pad, W_pad, h, w)
240+
)
241+
242+
patches = patches.unfold(2, p1, step_h).unfold(3, p2, step_w)
243+
n, c, h, w, ph, pw = x.shape
244+
patches = patches.permute(0, 2, 3, 1, 4, 5).view(n, h*w, c, ph, pw)
245+
processed_patches = self.blocks[depth](patches)
246+
247+
patches = processed_patches.permute(0, 2, 1, 3, 4).view(n, c, h, w, ph, pw)
248+
output = F.fold(
249+
torch.reshape(patches.permute(0, 1, 4, 5, 2, 3),(n, c * ph * pw, h * w)),
250+
output_size=(H_pad, W_pad),
251+
kernel_size=(p1, p2),
252+
stride=(step_h, step_w),
253+
)
254+
255+
overlap_count = F.fold(
256+
torch.reshape(torch.ones_like(patches).permute(0, 1, 4, 5, 2, 3),(n, c * ph * pw, h * w)),
257+
output_size=(H_pad, W_pad),
258+
kernel_size=(p1, p2),
259+
stride=(step_h, step_w),
260+
)
261+
output = output / overlap_count
262+
output = output[
263+
:,
264+
:,
265+
padding[2] : H_pad - padding[3],
266+
padding[0] : W_pad - padding[1],
267+
]
268+
layer_outputs.append(output)
269+
added = output
270+
patches = torch.cat((or_patches, added.detach().clone()), axis=1)
271+
272+
return layer_outputs

0 commit comments

Comments
 (0)