Skip to content

Commit a2ec5f8

Browse files
committed
update
1 parent bf6c211 commit a2ec5f8

File tree

2 files changed

+0
-91
lines changed

2 files changed

+0
-91
lines changed

src/diffusers/models/downsampling.py

Lines changed: 0 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -285,53 +285,6 @@ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
285285
return F.conv2d(inputs, weight, stride=2)
286286

287287

288-
class ConvPixelUnshuffleDownsample2D(nn.Module):
289-
def __init__(
290-
self,
291-
in_channels: int,
292-
out_channels: int,
293-
kernel_size: int,
294-
factor: int,
295-
):
296-
super().__init__()
297-
self.factor = factor
298-
out_ratio = factor**2
299-
assert out_channels % out_ratio == 0
300-
self.conv = nn.Conv2d(
301-
in_channels=in_channels,
302-
out_channels=out_channels // out_ratio,
303-
kernel_size=kernel_size,
304-
padding=kernel_size // 2,
305-
)
306-
307-
def forward(self, x: torch.Tensor) -> torch.Tensor:
308-
x = self.conv(x)
309-
x = F.pixel_unshuffle(x, self.factor)
310-
return x
311-
312-
313-
class PixelUnshuffleChannelAveragingDownsample2D(nn.Module):
314-
def __init__(
315-
self,
316-
in_channels: int,
317-
out_channels: int,
318-
factor: int,
319-
):
320-
super().__init__()
321-
self.in_channels = in_channels
322-
self.out_channels = out_channels
323-
self.factor = factor
324-
assert in_channels * factor**2 % out_channels == 0
325-
self.group_size = in_channels * factor**2 // out_channels
326-
327-
def forward(self, x: torch.Tensor) -> torch.Tensor:
328-
x = F.pixel_unshuffle(x, self.factor)
329-
B, C, H, W = x.shape
330-
x = x.view(B, self.out_channels, self.group_size, H, W)
331-
x = x.mean(dim=2)
332-
return x
333-
334-
335288
class CogVideoXDownsample3D(nn.Module):
336289
# Todo: Wait for paper relase.
337290
r"""

src/diffusers/models/upsampling.py

Lines changed: 0 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -348,50 +348,6 @@ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
348348
return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1)
349349

350350

351-
class ConvPixelShuffleUpsample2D(nn.Module):
352-
def __init__(
353-
self,
354-
in_channels: int,
355-
out_channels: int,
356-
kernel_size: int,
357-
factor: int,
358-
):
359-
super().__init__()
360-
self.factor = factor
361-
out_ratio = factor**2
362-
self.conv = nn.Conv2d(
363-
in_channels=in_channels,
364-
out_channels=out_channels * out_ratio,
365-
kernel_size=kernel_size,
366-
padding=kernel_size // 2,
367-
)
368-
369-
def forward(self, x: torch.Tensor) -> torch.Tensor:
370-
x = self.conv(x)
371-
x = F.pixel_shuffle(x, self.factor)
372-
return x
373-
374-
375-
class ChannelDuplicatingPixelUnshuffleUpsample2D(nn.Module):
376-
def __init__(
377-
self,
378-
in_channels: int,
379-
out_channels: int,
380-
factor: int,
381-
):
382-
super().__init__()
383-
self.in_channels = in_channels
384-
self.out_channels = out_channels
385-
self.factor = factor
386-
assert out_channels * factor**2 % in_channels == 0
387-
self.repeats = out_channels * factor**2 // in_channels
388-
389-
def forward(self, x: torch.Tensor) -> torch.Tensor:
390-
x = x.repeat_interleave(self.repeats, dim=1)
391-
x = F.pixel_shuffle(x, self.factor)
392-
return x
393-
394-
395351
class CogVideoXUpsample3D(nn.Module):
396352
r"""
397353
A 3D Upsample layer using in CogVideoX by Tsinghua University & ZhipuAI # Todo: Wait for paper relase.

0 commit comments

Comments
 (0)