|
| 1 | +import torch |
| 2 | +import torch.nn as nn |
| 3 | +import torch.nn.functional as F |
| 4 | +from model_utils import TransformerLayer, get_2d_sincos_pos_embed |
| 5 | + |
| 6 | + |
| 7 | +class StFTBlcok(nn.Module): |
| 8 | + def __init__( |
| 9 | + self, |
| 10 | + cond_time, |
| 11 | + freq_in_channels, |
| 12 | + in_dim, |
| 13 | + out_dim, |
| 14 | + out_channel, |
| 15 | + num_patches, |
| 16 | + modes, |
| 17 | + lift_channel=32, |
| 18 | + dim=256, |
| 19 | + depth=2, |
| 20 | + num_heads=1, |
| 21 | + mlp_dim=256, |
| 22 | + act="relu", |
| 23 | + grid_size=(4, 4), |
| 24 | + layer_indx=0, |
| 25 | + ): |
| 26 | + super(StFTBlcok, self).__init__() |
| 27 | + self.layer_indx = layer_indx |
| 28 | + self.cond_time = cond_time |
| 29 | + self.freq_in_channels = freq_in_channels |
| 30 | + self.modes = modes |
| 31 | + self.out_channel = out_channel |
| 32 | + self.lift_channel = lift_channel |
| 33 | + self.token_embed = nn.Linear(in_dim, dim) |
| 34 | + self.pos_embed = nn.Parameter( |
| 35 | + torch.randn(1, num_patches, dim), requires_grad=False |
| 36 | + ) |
| 37 | + self.pos_embed_f = nn.Parameter( |
| 38 | + torch.randn(1, num_patches, dim), requires_grad=False |
| 39 | + ) |
| 40 | + pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], grid_size) |
| 41 | + pos_embed_f = get_2d_sincos_pos_embed(self.pos_embed_f.shape[-1], grid_size) |
| 42 | + self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) |
| 43 | + self.pos_embed_f.data.copy_( |
| 44 | + torch.from_numpy(pos_embed_f).float().unsqueeze(0) |
| 45 | + ) |
| 46 | + self.encoder_layers = nn.ModuleList( |
| 47 | + [TransformerLayer(dim, num_heads, mlp_dim, act) for _ in range(depth)] |
| 48 | + ) |
| 49 | + self.encoder_layers_f = nn.ModuleList( |
| 50 | + [TransformerLayer(dim, num_heads, mlp_dim, act) for _ in range(depth)] |
| 51 | + ) |
| 52 | + self.head = nn.Sequential(nn.LayerNorm(dim), nn.Linear(dim, out_dim)) |
| 53 | + self.p = nn.Linear(freq_in_channels, lift_channel) |
| 54 | + self.linear = nn.Linear( |
| 55 | + modes[0] * modes[1] * (self.cond_time + self.layer_indx) * lift_channel * 2, |
| 56 | + dim, |
| 57 | + ) |
| 58 | + self.q = nn.Linear(dim, modes[0] * modes[1] * 1 * lift_channel * 2) |
| 59 | + self.down = nn.Linear(lift_channel, out_channel) |
| 60 | + |
| 61 | + def forward(self, x): |
| 62 | + x_copy = x |
| 63 | + n, l, _, ph, pw = x.shape |
| 64 | + x_or = x[:, :, : self.cond_time * self.freq_in_channels] |
| 65 | + x_added = x[:, :, (self.cond_time * self.freq_in_channels) :] |
| 66 | + x_or = x_or.permute(0, 1, 3, 4, 2).view(n, l, ph, pw, self.cond_time, self.freq_in_channels) |
| 67 | + grid_dup = x_or[:, :, :, :, :1, -2:].repeat(1, 1, 1, 1, self.layer_indx, 1) |
| 68 | + |
| 69 | + x_added = x_added.permute(0, 1, 3, 4, 2).view(n, l, ph, pw, self.layer_indx, self.freq_in_channels - 2) |
| 70 | + x_added = torch.cat((x_added, grid_dup), axis=-1) |
| 71 | + x = torch.cat((x_or, x_added), axis=-2) |
| 72 | + x = self.p(x) |
| 73 | + v, t = x.shape[-1], x.shape[-2] |
| 74 | + x = x.permute(0, 1, 5, 4, 2, 3).view(n * l, v, t, ph, pw) |
| 75 | + x_ft = torch.fft.rfftn(x, dim=[2, 3, 4])[ |
| 76 | + :, :, :, : self.modes[0], : self.modes[1] |
| 77 | + ] |
| 78 | + x_ft_real = (x_ft.real).flatten(1) |
| 79 | + x_ft_imag = (x_ft.imag).flatten(1) |
| 80 | + x_ft_real = x_ft_real.view(n, l, -1) |
| 81 | + x_ft_imag = x_ft_imag.view(n, l, -1) |
| 82 | + x_ft_real_imag = torch.cat((x_ft_real, x_ft_imag), axis=-1) |
| 83 | + x = self.linear(x_ft_real_imag) |
| 84 | + x = x + self.pos_embed_f |
| 85 | + for layer in self.encoder_layers_f: |
| 86 | + x = layer(x) |
| 87 | + x_real, x_imag = self.q(x).split( |
| 88 | + self.modes[0] * self.modes[1] * self.lift_channel, dim=-1 |
| 89 | + ) |
| 90 | + x_real = x_real.reshape(n * l, -1, 1, self.modes[0], self.modes[1]) |
| 91 | + x_imag = x_imag.reshape(n * l, -1, 1, self.modes[0], self.modes[1]) |
| 92 | + x_complex = torch.complex(x_real, x_imag) |
| 93 | + out_ft = torch.zeros( |
| 94 | + n * l, |
| 95 | + self.lift_channel, |
| 96 | + 1, |
| 97 | + ph, |
| 98 | + pw // 2 + 1, |
| 99 | + dtype=torch.cfloat, |
| 100 | + device=x.device, |
| 101 | + ) |
| 102 | + out_ft[:, :, :, : self.modes[0], : self.modes[1]] = x_complex |
| 103 | + x = torch.fft.irfftn(out_ft, s=(1, ph, pw)) |
| 104 | + x = x.permute(0, 3, 4, 1, 2).view(n * l, ph, pw, -1) |
| 105 | + x = self.down(x) |
| 106 | + c = x.shape[-1] |
| 107 | + x_f = x.permute(0, 3, 1, 2).view(n, l, c, ph, pw) |
| 108 | + x = x_copy |
| 109 | + _, _, _, ph, pw = x.shape |
| 110 | + x = x.flatten(2) |
| 111 | + x = self.token_embed(x) + self.pos_embed |
| 112 | + for layer in self.encoder_layers: |
| 113 | + x = layer(x) |
| 114 | + x = self.head(x) |
| 115 | + x = x.view(n, l, self.out_channel, ph, pw) |
| 116 | + x = x + x_f |
| 117 | + return x |
| 118 | + |
| 119 | + |
| 120 | +class StFT(nn.Module): |
| 121 | + def __init__( |
| 122 | + self, |
| 123 | + cond_time, |
| 124 | + num_vars, |
| 125 | + patch_sizes, |
| 126 | + overlaps, |
| 127 | + in_channels, |
| 128 | + out_channels, |
| 129 | + modes, |
| 130 | + img_size=(50, 50), |
| 131 | + lift_channel=32, |
| 132 | + dim=128, |
| 133 | + vit_depth=3, |
| 134 | + num_heads=1, |
| 135 | + mlp_dim=128, |
| 136 | + act="relu", |
| 137 | + ): |
| 138 | + super(StFT, self).__init__() |
| 139 | + |
| 140 | + blocks = [] |
| 141 | + self.cond_time = cond_time |
| 142 | + self.num_vars = num_vars |
| 143 | + self.patch_sizes = patch_sizes |
| 144 | + self.overlaps = overlaps |
| 145 | + for depth, (p1, p2) in enumerate(patch_sizes): |
| 146 | + H, W = img_size |
| 147 | + cur_modes = modes[depth] |
| 148 | + cur_depth = vit_depth[depth] |
| 149 | + overlap_h, overlap_w = overlaps[depth] |
| 150 | + |
| 151 | + step_h = p1 - overlap_h |
| 152 | + step_w = p2 - overlap_w |
| 153 | + |
| 154 | + pad_h = (step_h - (H - p1) % step_h) % step_h |
| 155 | + pad_w = (step_w - (W - p2) % step_w) % step_w |
| 156 | + H_pad = H + pad_h |
| 157 | + W_pad = W + pad_w |
| 158 | + |
| 159 | + num_patches_h = (H_pad - p1) // step_h + 1 |
| 160 | + num_patches_w = (W_pad - p2) // step_w + 1 |
| 161 | + |
| 162 | + num_patches = num_patches_h * num_patches_w |
| 163 | + if depth == 0: |
| 164 | + blocks.append( |
| 165 | + StFTBlcok( |
| 166 | + cond_time, |
| 167 | + num_vars, |
| 168 | + p1 * p2 * in_channels, |
| 169 | + out_channels * p1 * p2, |
| 170 | + out_channels, |
| 171 | + num_patches, |
| 172 | + cur_modes, |
| 173 | + lift_channel=lift_channel, |
| 174 | + dim=dim, |
| 175 | + depth=cur_depth, |
| 176 | + num_heads=num_heads, |
| 177 | + mlp_dim=mlp_dim, |
| 178 | + act=act, |
| 179 | + grid_size=(num_patches_h, num_patches_w), |
| 180 | + layer_indx=depth, |
| 181 | + ) |
| 182 | + ) |
| 183 | + else: |
| 184 | + blocks.append( |
| 185 | + StFTBlcok( |
| 186 | + cond_time, |
| 187 | + num_vars, |
| 188 | + p1 * p2 * (in_channels + out_channels), |
| 189 | + out_channels * p1 * p2, |
| 190 | + out_channels, |
| 191 | + num_patches, |
| 192 | + cur_modes, |
| 193 | + lift_channel=lift_channel, |
| 194 | + dim=dim, |
| 195 | + depth=cur_depth, |
| 196 | + num_heads=num_heads, |
| 197 | + mlp_dim=mlp_dim, |
| 198 | + act=act, |
| 199 | + grid_size=(num_patches_h, num_patches_w), |
| 200 | + layer_indx=1, |
| 201 | + ) |
| 202 | + ) |
| 203 | + |
| 204 | + self.blocks = nn.ModuleList(blocks) |
| 205 | + |
| 206 | + def forward(self, x, grid): |
| 207 | + grid_dup = grid[None, :, :, :].repeat(x.shape[0], x.shape[1], 1, 1, 1) |
| 208 | + x = torch.cat((x, grid_dup), axis=2) |
| 209 | + x = x.view(x.shape[0], x.shape[1] * x.shape[2], x.shape[3], x.shape[4]) |
| 210 | + layer_outputs = [] |
| 211 | + patches = x |
| 212 | + restore_params = [] |
| 213 | + or_patches = x |
| 214 | + if True: |
| 215 | + for depth in range(len(self.patch_sizes)): |
| 216 | + if True: |
| 217 | + p1, p2 = self.patch_sizes[depth] |
| 218 | + overlap_h, overlap_w = self.overlaps[depth] |
| 219 | + |
| 220 | + step_h = p1 - overlap_h |
| 221 | + step_w = p2 - overlap_w |
| 222 | + |
| 223 | + pad_h = (step_h - (patches.shape[2] - p1) % step_h) % step_h |
| 224 | + pad_w = (step_w - (patches.shape[3] - p2) % step_w) % step_w |
| 225 | + padding = ( |
| 226 | + pad_w // 2, |
| 227 | + pad_w - pad_w // 2, |
| 228 | + pad_h // 2, |
| 229 | + pad_h - pad_h // 2, |
| 230 | + ) |
| 231 | + |
| 232 | + patches = F.pad(patches, padding, mode="constant", value=0) |
| 233 | + _, _, H_pad, W_pad = patches.shape |
| 234 | + |
| 235 | + h = (H_pad - p1) // step_h + 1 |
| 236 | + w = (W_pad - p2) // step_w + 1 |
| 237 | + |
| 238 | + restore_params.append( |
| 239 | + (p1, p2, step_h, step_w, padding, H_pad, W_pad, h, w) |
| 240 | + ) |
| 241 | + |
| 242 | + patches = patches.unfold(2, p1, step_h).unfold(3, p2, step_w) |
| 243 | + n, c, h, w, ph, pw = x.shape |
| 244 | + patches = patches.permute(0, 2, 3, 1, 4, 5).view(n, h*w, c, ph, pw) |
| 245 | + processed_patches = self.blocks[depth](patches) |
| 246 | + |
| 247 | + patches = processed_patches.permute(0, 2, 1, 3, 4).view(n, c, h, w, ph, pw) |
| 248 | + output = F.fold( |
| 249 | + torch.reshape(patches.permute(0, 1, 4, 5, 2, 3),(n, c * ph * pw, h * w)), |
| 250 | + output_size=(H_pad, W_pad), |
| 251 | + kernel_size=(p1, p2), |
| 252 | + stride=(step_h, step_w), |
| 253 | + ) |
| 254 | + |
| 255 | + overlap_count = F.fold( |
| 256 | + torch.reshape(torch.ones_like(patches).permute(0, 1, 4, 5, 2, 3),(n, c * ph * pw, h * w)), |
| 257 | + output_size=(H_pad, W_pad), |
| 258 | + kernel_size=(p1, p2), |
| 259 | + stride=(step_h, step_w), |
| 260 | + ) |
| 261 | + output = output / overlap_count |
| 262 | + output = output[ |
| 263 | + :, |
| 264 | + :, |
| 265 | + padding[2] : H_pad - padding[3], |
| 266 | + padding[0] : W_pad - padding[1], |
| 267 | + ] |
| 268 | + layer_outputs.append(output) |
| 269 | + added = output |
| 270 | + patches = torch.cat((or_patches, added.detach().clone()), axis=1) |
| 271 | + |
| 272 | + return layer_outputs |
0 commit comments