Skip to content

Commit 3a28e36

Browse files
authored
[Post release 0.28.0] remove deprecated blocks. (#8291)
* remove deprecated blocks. * update the location paths.
1 parent 3393c01 commit 3a28e36

24 files changed

+69
-879
lines changed

src/diffusers/models/autoencoders/autoencoder_kl.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -245,11 +245,13 @@ def encode(
245245
Args:
246246
x (`torch.Tensor`): Input batch of images.
247247
return_dict (`bool`, *optional*, defaults to `True`):
248-
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
248+
Whether to return a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] instead of a plain
249+
tuple.
249250
250251
Returns:
251252
The latent representations of the encoded images. If `return_dict` is True, a
252-
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
253+
[`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is
254+
returned.
253255
"""
254256
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
255257
return self.tiled_encode(x, return_dict=return_dict)
@@ -331,12 +333,13 @@ def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> Autoencoder
331333
Args:
332334
x (`torch.Tensor`): Input batch of images.
333335
return_dict (`bool`, *optional*, defaults to `True`):
334-
Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
336+
Whether or not to return a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] instead of a
337+
plain tuple.
335338
336339
Returns:
337-
[`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`:
338-
If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain
339-
`tuple` is returned.
340+
[`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] or `tuple`:
341+
If return_dict is True, a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] is returned,
342+
otherwise a plain `tuple` is returned.
340343
"""
341344
overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
342345
blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)

src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -323,11 +323,13 @@ def encode(
323323
Args:
324324
x (`torch.Tensor`): Input batch of images.
325325
return_dict (`bool`, *optional*, defaults to `True`):
326-
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
326+
Whether to return a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] instead of a plain
327+
tuple.
327328
328329
Returns:
329330
The latent representations of the encoded images. If `return_dict` is True, a
330-
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
331+
[`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is
332+
returned.
331333
"""
332334
h = self.encoder(x)
333335
moments = self.quant_conv(h)

src/diffusers/models/autoencoders/consistency_decoder_vae.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -284,13 +284,13 @@ def encode(
284284
Args:
285285
x (`torch.Tensor`): Input batch of images.
286286
return_dict (`bool`, *optional*, defaults to `True`):
287-
Whether to return a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] instead of a plain
288-
tuple.
287+
Whether to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
288+
instead of a plain tuple.
289289
290290
Returns:
291291
The latent representations of the encoded images. If `return_dict` is True, a
292-
[`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a plain `tuple`
293-
is returned.
292+
[`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a
293+
plain `tuple` is returned.
294294
"""
295295
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
296296
return self.tiled_encode(x, return_dict=return_dict)
@@ -382,13 +382,13 @@ def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> Union[Consi
382382
Args:
383383
x (`torch.Tensor`): Input batch of images.
384384
return_dict (`bool`, *optional*, defaults to `True`):
385-
Whether or not to return a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] instead of a
386-
plain tuple.
385+
Whether or not to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
386+
instead of a plain tuple.
387387
388388
Returns:
389-
[`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`:
390-
If return_dict is True, a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned,
391-
otherwise a plain `tuple` is returned.
389+
[`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`:
390+
If return_dict is True, a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
391+
is returned, otherwise a plain `tuple` is returned.
392392
"""
393393
overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
394394
blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)

src/diffusers/models/dual_transformer_2d.py

Lines changed: 0 additions & 20 deletions
This file was deleted.

src/diffusers/models/prior_transformer.py

Lines changed: 0 additions & 12 deletions
This file was deleted.

src/diffusers/models/t5_film_transformer.py

Lines changed: 0 additions & 70 deletions
This file was deleted.

src/diffusers/models/transformer_2d.py

Lines changed: 0 additions & 25 deletions
This file was deleted.

src/diffusers/models/transformer_temporal.py

Lines changed: 0 additions & 34 deletions
This file was deleted.

src/diffusers/models/transformers/dual_transformer_2d.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -123,9 +123,9 @@ def forward(
123123
tuple.
124124
125125
Returns:
126-
[`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
127-
[`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
128-
returning a tuple, the first element is the sample tensor.
126+
[`~models.transformers.transformer_2d.Transformer2DModelOutput`] or `tuple`:
127+
[`~models.transformers.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a
128+
`tuple`. When returning a tuple, the first element is the sample tensor.
129129
"""
130130
input_states = hidden_states
131131

src/diffusers/models/transformers/prior_transformer.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -266,13 +266,13 @@ def forward(
266266
attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`):
267267
Text mask for the text embeddings.
268268
return_dict (`bool`, *optional*, defaults to `True`):
269-
Whether or not to return a [`~models.prior_transformer.PriorTransformerOutput`] instead of a plain
270-
tuple.
269+
Whether or not to return a [`~models.transformers.prior_transformer.PriorTransformerOutput`] instead of
270+
a plain tuple.
271271
272272
Returns:
273-
[`~models.prior_transformer.PriorTransformerOutput`] or `tuple`:
274-
If return_dict is True, a [`~models.prior_transformer.PriorTransformerOutput`] is returned, otherwise a
275-
tuple is returned where the first element is the sample tensor.
273+
[`~models.transformers.prior_transformer.PriorTransformerOutput`] or `tuple`:
274+
If return_dict is True, a [`~models.transformers.prior_transformer.PriorTransformerOutput`] is
275+
returned, otherwise a tuple is returned where the first element is the sample tensor.
276276
"""
277277
batch_size = hidden_states.shape[0]
278278

0 commit comments

Comments
 (0)