diff --git a/src/diffusers/models/transformers/__init__.py b/src/diffusers/models/transformers/__init__.py
index a2c087d708a4..297ffb21b494 100644
--- a/src/diffusers/models/transformers/__init__.py
+++ b/src/diffusers/models/transformers/__init__.py
@@ -20,3 +20,4 @@
from .transformer_mochi import MochiTransformer3DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
+ from .omnigen_transformer import OmniGenTransformer
diff --git a/src/diffusers/models/transformers/omnigen_transformer.py b/src/diffusers/models/transformers/omnigen_transformer.py
new file mode 100644
index 000000000000..419bb1718d79
--- /dev/null
+++ b/src/diffusers/models/transformers/omnigen_transformer.py
@@ -0,0 +1,380 @@
+import torch
+import torch.nn as nn
+import numpy as np
+import math
+from typing import Dict
+
+from diffusers.configuration_utils import ConfigMixin
+from diffusers.loaders import PeftAdapterMixin
+from diffusers.models.modeling_utils import ModelMixin
+from diffusers.models.transformers.phi3_transformer_config import Phi3Transformer
+
+from transformers import Phi3Config
+
+def modulate(x, shift, scale):
+ return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
+
+class TimestepEmbedder(nn.Module):
+ """
+ Embeds scalar timesteps into vector representations.
+ """
+ def __init__(self, hidden_size, frequency_embedding_size=256):
+ super().__init__()
+ self.mlp = nn.Sequential(
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
+ nn.SiLU(),
+ nn.Linear(hidden_size, hidden_size, bias=True),
+ )
+ self.frequency_embedding_size = frequency_embedding_size
+
+ @staticmethod
+ def timestep_embedding(t, dim, max_period=10000):
+ """
+ Create sinusoidal timestep embeddings.
+ :param t: a 1-D Tensor of N indices, one per batch element.
+ These may be fractional.
+ :param dim: the dimension of the output.
+ :param max_period: controls the minimum frequency of the embeddings.
+ :return: an (N, D) Tensor of positional embeddings.
+ """
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
+ half = dim // 2
+ freqs = torch.exp(
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
+ ).to(device=t.device)
+ args = t[:, None].float() * freqs[None]
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
+ if dim % 2:
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
+ return embedding
+
+ def forward(self, t, dtype=torch.float32):
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(dtype)
+ t_emb = self.mlp(t_freq)
+ return t_emb
+
+
+class FinalLayer(nn.Module):
+ """
+ The final layer of DiT.
+ """
+ def __init__(self, hidden_size, patch_size, out_channels):
+ super().__init__()
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
+ self.adaLN_modulation = nn.Sequential(
+ nn.SiLU(),
+ nn.Linear(hidden_size, 2 * hidden_size, bias=True)
+ )
+
+ def forward(self, x, c):
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
+ x = modulate(self.norm_final(x), shift, scale)
+ x = self.linear(x)
+ return x
+
+
+def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=1):
+ """
+ grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or
+ [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
+ """
+ if isinstance(grid_size, int):
+ grid_size = (grid_size, grid_size)
+
+ grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale
+ grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
+ grid = np.stack(grid, axis=0)
+
+ grid = grid.reshape([2, 1, grid_size[1], grid_size[0]])
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
+ if cls_token and extra_tokens > 0:
+ pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
+ return pos_embed
+
+
+def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
+ assert embed_dim % 2 == 0
+
+ # use half of dimensions to encode grid_h
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
+
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
+ return emb
+
+
+def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
+ """
+ embed_dim: output dimension for each position
+ pos: a list of positions to be encoded: size (M,)
+ out: (M, D)
+ """
+ assert embed_dim % 2 == 0
+ omega = np.arange(embed_dim // 2, dtype=np.float64)
+ omega /= embed_dim / 2.
+ omega = 1. / 10000**omega # (D/2,)
+
+ pos = pos.reshape(-1) # (M,)
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
+
+ emb_sin = np.sin(out) # (M, D/2)
+ emb_cos = np.cos(out) # (M, D/2)
+
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
+ return emb
+
+
+class PatchEmbedMR(nn.Module):
+ """ 2D Image to Patch Embedding
+ """
+ def __init__(
+ self,
+ patch_size: int = 2,
+ in_chans: int = 4,
+ embed_dim: int = 768,
+ bias: bool = True,
+ ):
+ super().__init__()
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
+
+ def forward(self, x):
+ x = self.proj(x)
+ x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
+ return x
+
+class OmniGenTransformer(ModelMixin, ConfigMixin, PeftAdapterMixin):
+ """
+ Diffusion model with a Transformer backbone.
+ """
+ def __init__(
+ self,
+ transformer_config: Phi3Config,
+ patch_size=2,
+ in_channels=4,
+ pe_interpolation: float = 1.0,
+ pos_embed_max_size: int = 192,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = in_channels
+ self.patch_size = patch_size
+ self.pos_embed_max_size = pos_embed_max_size
+
+ hidden_size = transformer_config.hidden_size
+
+ self.x_embedder = PatchEmbedMR(patch_size, in_channels, hidden_size, bias=True)
+ self.input_x_embedder = PatchEmbedMR(patch_size, in_channels, hidden_size, bias=True)
+
+ self.time_token = TimestepEmbedder(hidden_size)
+ self.t_embedder = TimestepEmbedder(hidden_size)
+
+ self.pe_interpolation = pe_interpolation
+ pos_embed = get_2d_sincos_pos_embed(hidden_size, pos_embed_max_size, interpolation_scale=self.pe_interpolation, base_size=64)
+ self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=True)
+
+ self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels)
+
+ self.initialize_weights()
+
+ self.llm = Phi3Transformer(config=transformer_config)
+ self.llm.config.use_cache = False
+
+ def initialize_weights(self):
+ assert not hasattr(self, "llama")
+
+ # Initialize transformer layers:
+ def _basic_init(module):
+ if isinstance(module, nn.Linear):
+ torch.nn.init.xavier_uniform_(module.weight)
+ if module.bias is not None:
+ nn.init.constant_(module.bias, 0)
+ self.apply(_basic_init)
+
+ # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
+ w = self.x_embedder.proj.weight.data
+ nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
+ nn.init.constant_(self.x_embedder.proj.bias, 0)
+
+ w = self.input_x_embedder.proj.weight.data
+ nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
+ nn.init.constant_(self.x_embedder.proj.bias, 0)
+
+
+ # Initialize timestep embedding MLP:
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
+ nn.init.normal_(self.time_token.mlp[0].weight, std=0.02)
+ nn.init.normal_(self.time_token.mlp[2].weight, std=0.02)
+
+ # Zero-out output layers:
+ nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
+ nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0)
+ nn.init.constant_(self.final_layer.linear.weight, 0)
+ nn.init.constant_(self.final_layer.linear.bias, 0)
+
+ def unpatchify(self, x, h, w):
+ """
+ x: (N, T, patch_size**2 * C)
+ imgs: (N, H, W, C)
+ """
+ c = self.out_channels
+
+ x = x.reshape(shape=(x.shape[0], h//self.patch_size, w//self.patch_size, self.patch_size, self.patch_size, c))
+ x = torch.einsum('nhwpqc->nchpwq', x)
+ imgs = x.reshape(shape=(x.shape[0], c, h, w))
+ return imgs
+
+
+ def cropped_pos_embed(self, height, width):
+ """Crops positional embeddings for SD3 compatibility."""
+ if self.pos_embed_max_size is None:
+ raise ValueError("`pos_embed_max_size` must be set for cropping.")
+
+ height = height // self.patch_size
+ width = width // self.patch_size
+ if height > self.pos_embed_max_size:
+ raise ValueError(
+ f"Height ({height}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}."
+ )
+ if width > self.pos_embed_max_size:
+ raise ValueError(
+ f"Width ({width}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}."
+ )
+
+ top = (self.pos_embed_max_size - height) // 2
+ left = (self.pos_embed_max_size - width) // 2
+ spatial_pos_embed = self.pos_embed.reshape(1, self.pos_embed_max_size, self.pos_embed_max_size, -1)
+ spatial_pos_embed = spatial_pos_embed[:, top : top + height, left : left + width, :]
+ # print(top, top + height, left, left + width, spatial_pos_embed.size())
+ spatial_pos_embed = spatial_pos_embed.reshape(1, -1, spatial_pos_embed.shape[-1])
+ return spatial_pos_embed
+
+
+ def patch_multiple_resolutions(self, latents, padding_latent=None, is_input_images:bool=False):
+ if isinstance(latents, list):
+ return_list = False
+ if padding_latent is None:
+ padding_latent = [None] * len(latents)
+ return_list = True
+ patched_latents, num_tokens, shapes = [], [], []
+ for latent, padding in zip(latents, padding_latent):
+ height, width = latent.shape[-2:]
+ if is_input_images:
+ latent = self.input_x_embedder(latent)
+ else:
+ latent = self.x_embedder(latent)
+ pos_embed = self.cropped_pos_embed(height, width)
+ latent = latent + pos_embed
+ if padding is not None:
+ latent = torch.cat([latent, padding], dim=-2)
+ patched_latents.append(latent)
+
+ num_tokens.append(pos_embed.size(1))
+ shapes.append([height, width])
+ if not return_list:
+ latents = torch.cat(patched_latents, dim=0)
+ else:
+ latents = patched_latents
+ else:
+ height, width = latents.shape[-2:]
+ if is_input_images:
+ latents = self.input_x_embedder(latents)
+ else:
+ latents = self.x_embedder(latents)
+ pos_embed = self.cropped_pos_embed(height, width)
+ latents = latents + pos_embed
+ num_tokens = latents.size(1)
+ shapes = [height, width]
+ return latents, num_tokens, shapes
+
+
+ def forward(self, x, timestep, input_ids, input_img_latents, input_image_sizes, attention_mask, position_ids, padding_latent=None, past_key_values=None, return_past_key_values=True, offload_model:bool=False):
+ """
+
+ """
+ input_is_list = isinstance(x, list)
+ x, num_tokens, shapes = self.patch_multiple_resolutions(x, padding_latent)
+ time_token = self.time_token(timestep, dtype=x[0].dtype).unsqueeze(1)
+
+ if input_img_latents is not None:
+ input_latents, _, _ = self.patch_multiple_resolutions(input_img_latents, is_input_images=True)
+ if input_ids is not None:
+ condition_embeds = self.llm.embed_tokens(input_ids).clone()
+ input_img_inx = 0
+ for b_inx in input_image_sizes.keys():
+ for start_inx, end_inx in input_image_sizes[b_inx]:
+ condition_embeds[b_inx, start_inx: end_inx] = input_latents[input_img_inx]
+ input_img_inx += 1
+ if input_img_latents is not None:
+ assert input_img_inx == len(input_latents)
+
+ input_emb = torch.cat([condition_embeds, time_token, x], dim=1)
+ else:
+ input_emb = torch.cat([time_token, x], dim=1)
+ output = self.llm(inputs_embeds=input_emb, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, offload_model=offload_model)
+ output, past_key_values = output.last_hidden_state, output.past_key_values
+ if input_is_list:
+ image_embedding = output[:, -max(num_tokens):]
+ time_emb = self.t_embedder(timestep, dtype=x.dtype)
+ x = self.final_layer(image_embedding, time_emb)
+ latents = []
+ for i in range(x.size(0)):
+ latent = x[i:i+1, :num_tokens[i]]
+ latent = self.unpatchify(latent, shapes[i][0], shapes[i][1])
+ latents.append(latent)
+ else:
+ image_embedding = output[:, -num_tokens:]
+ time_emb = self.t_embedder(timestep, dtype=x.dtype)
+ x = self.final_layer(image_embedding, time_emb)
+ latents = self.unpatchify(x, shapes[0], shapes[1])
+
+ if return_past_key_values:
+ return latents, past_key_values
+ return latents
+
+ @torch.no_grad()
+ def forward_with_cfg(self, x, timestep, input_ids, input_img_latents, input_image_sizes, attention_mask, position_ids, cfg_scale, use_img_cfg, img_cfg_scale, past_key_values, use_kv_cache, offload_model):
+ self.llm.config.use_cache = use_kv_cache
+ model_out, past_key_values = self.forward(x, timestep, input_ids, input_img_latents, input_image_sizes, attention_mask, position_ids, past_key_values=past_key_values, return_past_key_values=True, offload_model=offload_model)
+ if use_img_cfg:
+ cond, uncond, img_cond = torch.split(model_out, len(model_out) // 3, dim=0)
+ cond = uncond + img_cfg_scale * (img_cond - uncond) + cfg_scale * (cond - img_cond)
+ model_out = [cond, cond, cond]
+ else:
+ cond, uncond = torch.split(model_out, len(model_out) // 2, dim=0)
+ cond = uncond + cfg_scale * (cond - uncond)
+ model_out = [cond, cond]
+
+ return torch.cat(model_out, dim=0), past_key_values
+
+
+ @torch.no_grad()
+ def forward_with_separate_cfg(self, x, timestep, input_ids, input_img_latents, input_image_sizes, attention_mask, position_ids, cfg_scale, use_img_cfg, img_cfg_scale, past_key_values, use_kv_cache, offload_model):
+ self.llm.config.use_cache = use_kv_cache
+ if past_key_values is None:
+ past_key_values = [None] * len(attention_mask)
+
+ x = torch.split(x, len(x) // len(attention_mask), dim=0)
+ timestep = timestep.to(x[0].dtype)
+ timestep = torch.split(timestep, len(timestep) // len(input_ids), dim=0)
+
+ model_out, pask_key_values = [], []
+ for i in range(len(input_ids)):
+ temp_out, temp_pask_key_values = self.forward(x[i], timestep[i], input_ids[i], input_img_latents[i], input_image_sizes[i], attention_mask[i], position_ids[i], past_key_values=past_key_values[i], return_past_key_values=True, offload_model=offload_model)
+ model_out.append(temp_out)
+ pask_key_values.append(temp_pask_key_values)
+
+ if len(model_out) == 3:
+ cond, uncond, img_cond = model_out
+ cond = uncond + img_cfg_scale * (img_cond - uncond) + cfg_scale * (cond - img_cond)
+ model_out = [cond, cond, cond]
+ elif len(model_out) == 2:
+ cond, uncond = model_out
+ cond = uncond + cfg_scale * (cond - uncond)
+ model_out = [cond, cond]
+ else:
+ return model_out[0]
+
+ return torch.cat(model_out, dim=0), pask_key_values
diff --git a/src/diffusers/models/transformers/phi3_transformer_config.py b/src/diffusers/models/transformers/phi3_transformer_config.py
new file mode 100644
index 000000000000..71ed88c630d8
--- /dev/null
+++ b/src/diffusers/models/transformers/phi3_transformer_config.py
@@ -0,0 +1,183 @@
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+
+from transformers.modeling_outputs import (
+ BaseModelOutputWithPast,
+)
+from transformers import Phi3Model
+from transformers.cache_utils import Cache, DynamicCache
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+class Phi3Transformer(Phi3Model):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
+ We only modified the attention mask
+ Args:
+ config: Phi3Config
+ """
+ def prefetch_layer(self, layer_idx: int, device: torch.device):
+ "Starts prefetching the next layer cache"
+ with torch.cuda.stream(self.prefetch_stream):
+ # Prefetch next layer tensors to GPU
+ for name, param in self.layers[layer_idx].named_parameters():
+ param.data = param.data.to(device, non_blocking=True)
+
+ def evict_previous_layer(self, layer_idx: int):
+ "Moves the previous layer cache to the CPU"
+ prev_layer_idx = layer_idx - 1
+ for name, param in self.layers[prev_layer_idx].named_parameters():
+ param.data = param.data.to("cpu", non_blocking=True)
+
+ def get_offlaod_layer(self, layer_idx: int, device: torch.device):
+ # init stream
+ if not hasattr(self, "prefetch_stream"):
+ self.prefetch_stream = torch.cuda.Stream()
+
+ # delete previous layer
+ torch.cuda.current_stream().synchronize()
+ self.evict_previous_layer(layer_idx)
+
+ # make sure the current layer is ready
+ torch.cuda.synchronize(self.prefetch_stream)
+
+ # load next layer
+ self.prefetch_layer((layer_idx + 1) % len(self.layers), device)
+
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ offload_model: Optional[bool] = False,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # kept for BC (non `Cache` `past_key_values` inputs)
+ return_legacy_cache = False
+ if use_cache and not isinstance(past_key_values, Cache):
+ return_legacy_cache = True
+ if past_key_values is None:
+ past_key_values = DynamicCache()
+ else:
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ logger.warning_once(
+ "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
+ "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
+ "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
+ )
+
+ # if inputs_embeds is None:
+ # inputs_embeds = self.embed_tokens(input_ids)
+
+ # if cache_position is None:
+ # past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ # cache_position = torch.arange(
+ # past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ # )
+ # if position_ids is None:
+ # position_ids = cache_position.unsqueeze(0)
+
+ if attention_mask is not None and attention_mask.dim() == 3:
+ dtype = inputs_embeds.dtype
+ min_dtype = torch.finfo(dtype).min
+ attention_mask = (1 - attention_mask) * min_dtype
+ attention_mask = attention_mask.unsqueeze(1).to(inputs_embeds.dtype)
+ else:
+ raise
+ # causal_mask = self._update_causal_mask(
+ # attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
+ # )
+
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = None
+
+ layer_idx = -1
+ for decoder_layer in self.layers:
+ layer_idx += 1
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ cache_position,
+ )
+ else:
+ if offload_model and not self.training:
+ self.get_offlaod_layer(layer_idx, device=inputs_embeds.device)
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ print('************')
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if return_legacy_cache:
+ next_cache = next_cache.to_legacy_cache()
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
\ No newline at end of file
diff --git a/src/diffusers/pipelines/omnigen/__init__.py b/src/diffusers/pipelines/omnigen/__init__.py
new file mode 100644
index 000000000000..ba454b731cdd
--- /dev/null
+++ b/src/diffusers/pipelines/omnigen/__init__.py
@@ -0,0 +1,21 @@
+from typing import TYPE_CHECKING
+
+from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
+
+
+_import_structure = {
+ "pipeline_omnigen": ["OmniGenPipeline"]
+}
+
+if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
+ from .pipeline_omnigen import OmniGenPipeline
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(
+ __name__,
+ globals()["__file__"],
+ _import_structure,
+ module_spec=__spec__,
+ )
\ No newline at end of file
diff --git a/src/diffusers/pipelines/omnigen/pipeline_omnigen.py b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py
new file mode 100644
index 000000000000..5ad69b208929
--- /dev/null
+++ b/src/diffusers/pipelines/omnigen/pipeline_omnigen.py
@@ -0,0 +1,349 @@
+from typing import Any, Callable, Dict, List, Optional, Union
+
+import PIL
+import numpy as np
+import torch
+from diffusers.utils import (
+ USE_PEFT_BACKEND,
+ is_torch_xla_available,
+ logging,
+ replace_example_docstring,
+ scale_lora_layers,
+ unscale_lora_layers,
+)
+from diffusers.utils.torch_utils import randn_tensor
+from diffusers.pipelines.pipeline_utils import DiffusionPipeline
+from diffusers.models import AutoencoderKL
+from diffusers.models.transformers import OmniGenTransformer
+from diffusers.utils.omnigen_processor import OmniGenProcessor
+from diffusers.loaders import TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
+
+logger = logging.get_logger(__name__)
+
+EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ >>> import torch
+ >>> from diffusers import OmniGenPipeline
+ >>> pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1")
+ >>> pipe.to("cuda")
+ >>> prompt = "A cat holding a sign that says hello world"
+ >>> image = pipe(prompt, num_inference_steps=50, guidance_scale=3.0).images[0]
+ >>> image.save("omnigen.png")
+ ```
+"""
+
+class OmniGenPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):
+ r"""
+ Pipeline for text-to-image generation using OmniGen.
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
+ implemented for all pipelines.
+
+ Args:
+ vae ([`AutoencoderKL`]):
+ Variational Auto-Encoder (VAE) model for encoding and decoding images
+ model ([`OmniGenTransformer`]):
+ OmniGen transformer model for generating images
+ processor ([`OmniGenProcessor`]):
+ Processor for handling text and image inputs
+ """
+ model_cpu_offload_seq = "model->vae"
+ _optional_components = []
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
+
+ def __init__(
+ self,
+ vae: AutoencoderKL,
+ model: OmniGenTransformer,
+ processor: OmniGenProcessor,
+ ):
+ super().__init__()
+ self.register_modules(
+ vae=vae,
+ model=model,
+ processor=processor,
+ )
+
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ self.model_cpu_offload = False
+
+ def enable_model_cpu_offload(self, gpu_id: Optional[int] = None):
+ """
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance.
+ """
+ self.model_cpu_offload = True
+ if gpu_id is None:
+ gpu_id = 0
+
+ self._move_text_encoder_to_cpu()
+ self.model.to("cpu")
+ self.vae.to("cpu")
+ torch.cuda.empty_cache()
+
+ def disable_model_cpu_offload(self):
+ """
+ Disable CPU offloading.
+ """
+ self.model_cpu_offload = False
+ device = self._execution_device
+ self.model.to(device)
+ self.vae.to(device)
+
+ def enable_vae_slicing(self):
+ """
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
+ """
+ self.vae.enable_slicing()
+
+ def disable_vae_slicing(self):
+ """
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
+ computing decoding in one step.
+ """
+ self.vae.disable_slicing()
+
+ def prepare_latents(
+ self,
+ batch_size: int,
+ height: int,
+ width: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ generator: Optional[torch.Generator] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ ) -> torch.FloatTensor:
+ """
+ Prepare latents for diffusion.
+ """
+ latent_height = height // self.vae_scale_factor
+ latent_width = width // self.vae_scale_factor
+
+ if latents is None:
+ shape = (batch_size, 4, latent_height, latent_width)
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ else:
+ latents = latents.to(device=device, dtype=dtype)
+
+ return latents
+
+ @torch.no_grad()
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]],
+ input_images: Union[List[str], List[List[str]]] = None,
+ height: Optional[int] = 1024,
+ width: Optional[int] = 1024,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 3.0,
+ use_img_guidance: bool = True,
+ img_guidance_scale: float = 1.6,
+ max_input_image_size: int = 1024,
+ separate_cfg_infer: bool = True,
+ use_kv_cache: bool = True,
+ offload_kv_cache: bool = True,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: Optional[int] = 1,
+ **kwargs,
+ ):
+ """
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide image generation.
+ input_images (`List[str]` or `List[List[str]]`, *optional*):
+ Optional input images to condition generation on.
+ height (`int`, *optional*, defaults to 1024):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to 1024):
+ The width in pixels of the generated image.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps.
+ guidance_scale (`float`, *optional*, defaults to 3.0):
+ Guidance scale for text conditioning.
+ use_img_guidance (`bool`, *optional*, defaults to True):
+ Whether to use image guidance when input images are provided.
+ img_guidance_scale (`float`, *optional*, defaults to 1.6):
+ Guidance scale for image conditioning.
+ max_input_image_size (`int`, *optional*, defaults to 1024):
+ Maximum size for input images.
+ separate_cfg_infer (`bool`, *optional*, defaults to True):
+ Whether to separate classifier-free guidance inference.
+ use_kv_cache (`bool`, *optional*, defaults to True):
+ Whether to use key-value cache for transformer.
+ offload_kv_cache (`bool`, *optional*, defaults to True):
+ Whether to offload key-value cache to CPU.
+ generator (`torch.Generator`, *optional*):
+ A torch generator to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents.
+ output_type (`str`, *optional*, defaults to "pil"):
+ The output format of the generated image.
+ return_dict (`bool`, *optional*, defaults to True):
+ Whether or not to return a [`~pipelines.stable_diffusion.OmniGenPipelineOutput`] instead of a plain tuple.
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function calls.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.stable_diffusion.OmniGenPipelineOutput`] or `tuple`:
+ [`~pipelines.stable_diffusion.OmniGenPipelineOutput`] if `return_dict` is True, otherwise a `tuple`.
+ When returning a tuple, the first element is a list with the generated images.
+ """
+ # 0. Default height and width to model dimensions if not specified
+ height = height or 1024
+ width = width or 1024
+
+ if isinstance(prompt, str):
+ batch_size = 1
+ prompt = [prompt]
+ if input_images is not None:
+ input_images = [input_images]
+ else:
+ batch_size = len(prompt)
+
+ device = self._execution_device
+
+ # 1. Process inputs
+ input_data = self.processor(
+ prompt,
+ input_images,
+ height=height,
+ width=width,
+ use_img_cfg=use_img_guidance,
+ separate_cfg_input=separate_cfg_infer
+ )
+
+ # 2. Define number of train timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps = self.scheduler.timesteps
+
+ # 3. Prepare latents
+ latents = self.prepare_latents(
+ batch_size=batch_size,
+ height=height,
+ width=width,
+ dtype=self.model.dtype,
+ device=device,
+ generator=generator,
+ latents=latents,
+ )
+
+ # 4. Prepare image latents if using image guidance
+ input_img_latents = []
+ if input_images is not None:
+ if self.model_cpu_offload:
+ self.vae.to(device)
+
+ if separate_cfg_infer:
+ for temp_pixel_values in input_data['input_pixel_values']:
+ temp_input_latents = []
+ for img in temp_pixel_values:
+ img = self.vae.encode(img.to(device)).latent_dist.sample()
+ temp_input_latents.append(img)
+ input_img_latents.append(temp_input_latents)
+ else:
+ for img in input_data['input_pixel_values']:
+ img = self.vae.encode(img.to(device)).latent_dist.sample()
+ input_img_latents.append(img)
+
+ if self.model_cpu_offload:
+ self.vae.to('cpu')
+ torch.cuda.empty_cache()
+
+ # 5. Set model kwargs for inference
+ model_kwargs = {
+ 'input_ids': input_data['input_ids'].to(device),
+ 'input_img_latents': input_img_latents,
+ 'input_image_sizes': input_data['input_image_sizes'],
+ 'attention_mask': input_data['attention_mask'].to(device),
+ 'position_ids': input_data['position_ids'].to(device),
+ 'cfg_scale': guidance_scale,
+ 'img_cfg_scale': img_guidance_scale,
+ 'use_img_cfg': use_img_guidance,
+ 'use_kv_cache': use_kv_cache,
+ 'offload_model': self.model_cpu_offload,
+ }
+
+ # 6. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ # Expand the latents for classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1 else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # Predict the noise residual
+ if separate_cfg_infer:
+ noise_pred = self.model.forward_with_separate_cfg(
+ latent_model_input, t, **model_kwargs
+ )
+ else:
+ noise_pred = self.model.forward_with_cfg(
+ latent_model_input, t, **model_kwargs
+ )
+
+ # Perform guidance
+ if guidance_scale > 1:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # Compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
+ if self.model_cpu_offload:
+ self.vae.to(device)
+
+ # 8. Post-processing
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
+
+ if self.model_cpu_offload:
+ self.vae.to('cpu')
+ torch.cuda.empty_cache()
+
+ # 9. Convert to PIL
+ if output_type == "pil":
+ image = (image / 2 + 0.5).clamp(0, 1)
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
+ image = self.numpy_to_pil(image)
+ elif output_type == "pt":
+ image = (image / 2 + 0.5).clamp(0, 1)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image,)
+
+ return OmniGenPipelineOutput(images=image)
+
+class OmniGenPipelineOutput:
+ """
+ Output class for OmniGen pipelines.
+
+ Args:
+ images (`List[PIL.Image.Image]` or `np.ndarray`):
+ List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
+ num_channels)`.
+ """
+
+ images: Union[List[PIL.Image.Image], np.ndarray]
+
+ def __init__(self, images: Union[List[PIL.Image.Image], np.ndarray]):
+ self.images = images
\ No newline at end of file
diff --git a/src/diffusers/schedulers/__init__.py b/src/diffusers/schedulers/__init__.py
index bb9088538653..29c690592fe7 100644
--- a/src/diffusers/schedulers/__init__.py
+++ b/src/diffusers/schedulers/__init__.py
@@ -74,6 +74,7 @@
_import_structure["scheduling_unipc_multistep"] = ["UniPCMultistepScheduler"]
_import_structure["scheduling_utils"] = ["AysSchedules", "KarrasDiffusionSchedulers", "SchedulerMixin"]
_import_structure["scheduling_vq_diffusion"] = ["VQDiffusionScheduler"]
+ _import_structure["scheduling_omnigen"] = ["OmniGenScheduler"]
try:
if not is_flax_available():
@@ -174,6 +175,7 @@
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import AysSchedules, KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
+ from .scheduling_omnigen import OmniGenScheduler
try:
if not is_flax_available():
diff --git a/src/diffusers/schedulers/scheduling_omnigen.py b/src/diffusers/schedulers/scheduling_omnigen.py
new file mode 100644
index 000000000000..7f601373414f
--- /dev/null
+++ b/src/diffusers/schedulers/scheduling_omnigen.py
@@ -0,0 +1,178 @@
+from tqdm import tqdm
+from typing import Optional, Dict, Any, Tuple, List
+import gc
+
+import torch
+from transformers.cache_utils import Cache, DynamicCache, OffloadedCache
+
+
+
+class OmniGenCache(DynamicCache):
+ def __init__(self,
+ num_tokens_for_img: int, offload_kv_cache: bool=False) -> None:
+ if not torch.cuda.is_available():
+ raise RuntimeError("OffloadedCache can only be used with a GPU")
+ super().__init__()
+ self.original_device = []
+ self.prefetch_stream = torch.cuda.Stream()
+ self.num_tokens_for_img = num_tokens_for_img
+ self.offload_kv_cache = offload_kv_cache
+
+ def prefetch_layer(self, layer_idx: int):
+ "Starts prefetching the next layer cache"
+ if layer_idx < len(self):
+ with torch.cuda.stream(self.prefetch_stream):
+ # Prefetch next layer tensors to GPU
+ device = self.original_device[layer_idx]
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device, non_blocking=True)
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device, non_blocking=True)
+
+
+ def evict_previous_layer(self, layer_idx: int):
+ "Moves the previous layer cache to the CPU"
+ if len(self) > 2:
+ # We do it on the default stream so it occurs after all earlier computations on these tensors are done
+ if layer_idx == 0:
+ prev_layer_idx = -1
+ else:
+ prev_layer_idx = (layer_idx - 1) % len(self)
+ self.key_cache[prev_layer_idx] = self.key_cache[prev_layer_idx].to("cpu", non_blocking=True)
+ self.value_cache[prev_layer_idx] = self.value_cache[prev_layer_idx].to("cpu", non_blocking=True)
+
+
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
+ "Gets the cache for this layer to the device. Prefetches the next and evicts the previous layer."
+ if layer_idx < len(self):
+ if self.offload_kv_cache:
+ # Evict the previous layer if necessary
+ torch.cuda.current_stream().synchronize()
+ self.evict_previous_layer(layer_idx)
+ # Load current layer cache to its original device if not already there
+ original_device = self.original_device[layer_idx]
+ # self.prefetch_stream.synchronize(original_device)
+ torch.cuda.synchronize(self.prefetch_stream)
+ key_tensor = self.key_cache[layer_idx]
+ value_tensor = self.value_cache[layer_idx]
+
+ # Prefetch the next layer
+ self.prefetch_layer((layer_idx + 1) % len(self))
+ else:
+ key_tensor = self.key_cache[layer_idx]
+ value_tensor = self.value_cache[layer_idx]
+ return (key_tensor, value_tensor)
+ else:
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
+
+
+ def update(
+ self,
+ key_states: torch.Tensor,
+ value_states: torch.Tensor,
+ layer_idx: int,
+ cache_kwargs: Optional[Dict[str, Any]] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
+ Parameters:
+ key_states (`torch.Tensor`):
+ The new key states to cache.
+ value_states (`torch.Tensor`):
+ The new value states to cache.
+ layer_idx (`int`):
+ The index of the layer to cache the states for.
+ cache_kwargs (`Dict[str, Any]`, `optional`):
+ Additional arguments for the cache subclass. No additional arguments are used in `OffloadedCache`.
+ Return:
+ A tuple containing the updated key and value states.
+ """
+ # Update the cache
+ if len(self.key_cache) < layer_idx:
+ raise ValueError("OffloadedCache does not support model usage where layers are skipped. Use DynamicCache.")
+ elif len(self.key_cache) == layer_idx:
+ # only cache the states for condition tokens
+ key_states = key_states[..., :-(self.num_tokens_for_img+1), :]
+ value_states = value_states[..., :-(self.num_tokens_for_img+1), :]
+
+ # Update the number of seen tokens
+ if layer_idx == 0:
+ self._seen_tokens += key_states.shape[-2]
+
+ self.key_cache.append(key_states)
+ self.value_cache.append(value_states)
+ self.original_device.append(key_states.device)
+ if self.offload_kv_cache:
+ self.evict_previous_layer(layer_idx)
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
+ else:
+ # only cache the states for condition tokens
+ key_tensor, value_tensor = self[layer_idx]
+ k = torch.cat([key_tensor, key_states], dim=-2)
+ v = torch.cat([value_tensor, value_states], dim=-2)
+ return k, v
+
+
+
+class OmniGenScheduler:
+ def __init__(self, num_steps: int=50, time_shifting_factor: int=1):
+ self.num_steps = num_steps
+ self.time_shift = time_shifting_factor
+
+ t = torch.linspace(0, 1, num_steps+1)
+ t = t / (t + time_shifting_factor - time_shifting_factor * t)
+ self.sigma = t
+
+ def crop_kv_cache(self, past_key_values, num_tokens_for_img):
+ # return
+ crop_past_key_values = ()
+ for layer_idx in range(len(past_key_values)):
+ key_states, value_states = past_key_values[layer_idx][:2]
+ crop_past_key_values += ((key_states[..., :-(num_tokens_for_img+1), :], value_states[..., :-(num_tokens_for_img+1), :], ),)
+ # return crop_past_key_values
+ return DynamicCache.from_legacy_cache(crop_past_key_values)
+
+ def crop_position_ids_for_cache(self, position_ids, num_tokens_for_img):
+ if isinstance(position_ids, list):
+ for i in range(len(position_ids)):
+ position_ids[i] = position_ids[i][:, -(num_tokens_for_img+1):]
+ else:
+ position_ids = position_ids[:, -(num_tokens_for_img+1):]
+ return position_ids
+
+ def crop_attention_mask_for_cache(self, attention_mask, num_tokens_for_img):
+ if isinstance(attention_mask, list):
+ return [x[..., -(num_tokens_for_img+1):, :] for x in attention_mask]
+ return attention_mask[..., -(num_tokens_for_img+1):, :]
+
+ def crop_cache(self, cache, num_tokens_for_img):
+ for i in range(len(cache.key_cache)):
+ cache.key_cache[i] = cache.key_cache[i][..., :-(num_tokens_for_img+1), :]
+ cache.value_cache[i] = cache.value_cache[i][..., :-(num_tokens_for_img+1), :]
+
+ return cache
+
+ def __call__(self, z, func, model_kwargs, use_kv_cache: bool=True, offload_kv_cache: bool=True):
+ num_tokens_for_img = z.size(-1)*z.size(-2) // 4
+ if isinstance(model_kwargs['input_ids'], list):
+ cache = [OmniGenCache(num_tokens_for_img, offload_kv_cache) for _ in range(len(model_kwargs['input_ids']))] if use_kv_cache else None
+ else:
+ cache = OmniGenCache(num_tokens_for_img, offload_kv_cache) if use_kv_cache else None
+ for i in tqdm(range(self.num_steps)):
+ timesteps = torch.zeros(size=(len(z), )).to(z.device) + self.sigma[i]
+ pred, cache = func(z, timesteps, past_key_values=cache, **model_kwargs)
+ sigma_next = self.sigma[i+1]
+ sigma = self.sigma[i]
+ z = z + (sigma_next - sigma) * pred
+ if i == 0 and use_kv_cache:
+ num_tokens_for_img = z.size(-1)*z.size(-2) // 4
+ if isinstance(cache, list):
+ model_kwargs['input_ids'] = [None] * len(cache)
+ else:
+ model_kwargs['input_ids'] = None
+
+ model_kwargs['position_ids'] = self.crop_position_ids_for_cache(model_kwargs['position_ids'], num_tokens_for_img)
+ model_kwargs['attention_mask'] = self.crop_attention_mask_for_cache(model_kwargs['attention_mask'], num_tokens_for_img)
+
+ del cache
+ torch.cuda.empty_cache()
+ gc.collect()
+ return z
\ No newline at end of file
diff --git a/src/diffusers/utils/omnigen_processor.py b/src/diffusers/utils/omnigen_processor.py
new file mode 100644
index 000000000000..7c519b69186b
--- /dev/null
+++ b/src/diffusers/utils/omnigen_processor.py
@@ -0,0 +1,338 @@
+import os
+import re
+from typing import Dict, List
+import json
+
+import torch
+import numpy as np
+import random
+from PIL import Image
+from torchvision import transforms
+from transformers import AutoTokenizer
+from huggingface_hub import snapshot_download
+
+from diffusers.utils.omnigen_utils import (
+ create_logger,
+ update_ema,
+ requires_grad,
+ center_crop_arr,
+ crop_arr,
+)
+
+
+
+
+class OmniGenProcessor:
+ def __init__(self,
+ text_tokenizer,
+ max_image_size: int=1024):
+ self.text_tokenizer = text_tokenizer
+ self.max_image_size = max_image_size
+
+ self.image_transform = transforms.Compose([
+ transforms.Lambda(lambda pil_image: crop_arr(pil_image, max_image_size)),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
+ ])
+
+ self.collator = OmniGenCollator()
+ self.separate_collator = OmniGenSeparateCollator()
+
+ @classmethod
+ def from_pretrained(cls, model_name):
+ if not os.path.exists(model_name):
+ cache_folder = os.getenv('HF_HUB_CACHE')
+ model_name = snapshot_download(repo_id=model_name,
+ cache_dir=cache_folder,
+ allow_patterns="*.json")
+ text_tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ return cls(text_tokenizer)
+
+
+ def process_image(self, image):
+ image = Image.open(image).convert('RGB')
+ return self.image_transform(image)
+
+ def process_multi_modal_prompt(self, text, input_images):
+ text = self.add_prefix_instruction(text)
+ if input_images is None or len(input_images) == 0:
+ model_inputs = self.text_tokenizer(text)
+ return {"input_ids": model_inputs.input_ids, "pixel_values": None, "image_sizes": None}
+
+ pattern = r"<\|image_\d+\|>"
+ prompt_chunks = [self.text_tokenizer(chunk).input_ids for chunk in re.split(pattern, text)]
+
+ for i in range(1, len(prompt_chunks)):
+ if prompt_chunks[i][0] == 1:
+ prompt_chunks[i] = prompt_chunks[i][1:]
+
+ image_tags = re.findall(pattern, text)
+ image_ids = [int(s.split("|")[1].split("_")[-1]) for s in image_tags]
+
+ unique_image_ids = sorted(list(set(image_ids)))
+ assert unique_image_ids == list(range(1, len(unique_image_ids)+1)), f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}"
+ # total images must be the same as the number of image tags
+ assert len(unique_image_ids) == len(input_images), f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(input_images)} images"
+
+ input_images = [input_images[x-1] for x in image_ids]
+
+ all_input_ids = []
+ img_inx = []
+ idx = 0
+ for i in range(len(prompt_chunks)):
+ all_input_ids.extend(prompt_chunks[i])
+ if i != len(prompt_chunks) -1:
+ start_inx = len(all_input_ids)
+ size = input_images[i].size(-2) * input_images[i].size(-1) // 16 // 16
+ img_inx.append([start_inx, start_inx+size])
+ all_input_ids.extend([0]*size)
+
+ return {"input_ids": all_input_ids, "pixel_values": input_images, "image_sizes": img_inx}
+
+
+ def add_prefix_instruction(self, prompt):
+ user_prompt = '<|user|>\n'
+ generation_prompt = 'Generate an image according to the following instructions\n'
+ assistant_prompt = '<|assistant|>\n<|diffusion|>'
+ prompt_suffix = "<|end|>\n"
+ prompt = f"{user_prompt}{generation_prompt}{prompt}{prompt_suffix}{assistant_prompt}"
+ return prompt
+
+
+ def __call__(self,
+ instructions: List[str],
+ input_images: List[List[str]] = None,
+ height: int = 1024,
+ width: int = 1024,
+ negative_prompt: str = "low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers.",
+ use_img_cfg: bool = True,
+ separate_cfg_input: bool = False,
+ use_input_image_size_as_output: bool=False,
+ ) -> Dict:
+
+ if input_images is None:
+ use_img_cfg = False
+ if isinstance(instructions, str):
+ instructions = [instructions]
+ input_images = [input_images]
+
+ input_data = []
+ for i in range(len(instructions)):
+ cur_instruction = instructions[i]
+ cur_input_images = None if input_images is None else input_images[i]
+ if cur_input_images is not None and len(cur_input_images) > 0:
+ cur_input_images = [self.process_image(x) for x in cur_input_images]
+ else:
+ cur_input_images = None
+ assert "
<|image_1|>" not in cur_instruction
+
+ mllm_input = self.process_multi_modal_prompt(cur_instruction, cur_input_images)
+
+
+ neg_mllm_input, img_cfg_mllm_input = None, None
+ neg_mllm_input = self.process_multi_modal_prompt(negative_prompt, None)
+ if use_img_cfg:
+ if cur_input_images is not None and len(cur_input_images) >= 1:
+ img_cfg_prompt = [f"
<|image_{i+1}|>" for i in range(len(cur_input_images))]
+ img_cfg_mllm_input = self.process_multi_modal_prompt(" ".join(img_cfg_prompt), cur_input_images)
+ else:
+ img_cfg_mllm_input = neg_mllm_input
+
+ if use_input_image_size_as_output:
+ input_data.append((mllm_input, neg_mllm_input, img_cfg_mllm_input, [mllm_input['pixel_values'][0].size(-2), mllm_input['pixel_values'][0].size(-1)]))
+ else:
+ input_data.append((mllm_input, neg_mllm_input, img_cfg_mllm_input, [height, width]))
+
+ if separate_cfg_input:
+ return self.separate_collator(input_data)
+ return self.collator(input_data)
+
+
+
+
+class OmniGenCollator:
+ def __init__(self, pad_token_id=2, hidden_size=3072):
+ self.pad_token_id = pad_token_id
+ self.hidden_size = hidden_size
+
+ def create_position(self, attention_mask, num_tokens_for_output_images):
+ position_ids = []
+ text_length = attention_mask.size(-1)
+ img_length = max(num_tokens_for_output_images)
+ for mask in attention_mask:
+ temp_l = torch.sum(mask)
+ temp_position = [0]*(text_length-temp_l) + [i for i in range(temp_l+img_length+1)] # we add a time embedding into the sequence, so add one more token
+ position_ids.append(temp_position)
+ return torch.LongTensor(position_ids)
+
+ def create_mask(self, attention_mask, num_tokens_for_output_images):
+ extended_mask = []
+ padding_images = []
+ text_length = attention_mask.size(-1)
+ img_length = max(num_tokens_for_output_images)
+ seq_len = text_length + img_length + 1 # we add a time embedding into the sequence, so add one more token
+ inx = 0
+ for mask in attention_mask:
+ temp_l = torch.sum(mask)
+ pad_l = text_length - temp_l
+
+ temp_mask = torch.tril(torch.ones(size=(temp_l+1, temp_l+1)))
+
+ image_mask = torch.zeros(size=(temp_l+1, img_length))
+ temp_mask = torch.cat([temp_mask, image_mask], dim=-1)
+
+ image_mask = torch.ones(size=(img_length, temp_l+img_length+1))
+ temp_mask = torch.cat([temp_mask, image_mask], dim=0)
+
+ if pad_l > 0:
+ pad_mask = torch.zeros(size=(temp_l+1+img_length, pad_l))
+ temp_mask = torch.cat([pad_mask, temp_mask], dim=-1)
+
+ pad_mask = torch.ones(size=(pad_l, seq_len))
+ temp_mask = torch.cat([pad_mask, temp_mask], dim=0)
+
+ true_img_length = num_tokens_for_output_images[inx]
+ pad_img_length = img_length - true_img_length
+ if pad_img_length > 0:
+ temp_mask[:, -pad_img_length:] = 0
+ temp_padding_imgs = torch.zeros(size=(1, pad_img_length, self.hidden_size))
+ else:
+ temp_padding_imgs = None
+
+ extended_mask.append(temp_mask.unsqueeze(0))
+ padding_images.append(temp_padding_imgs)
+ inx += 1
+ return torch.cat(extended_mask, dim=0), padding_images
+
+ def adjust_attention_for_input_images(self, attention_mask, image_sizes):
+ for b_inx in image_sizes.keys():
+ for start_inx, end_inx in image_sizes[b_inx]:
+ attention_mask[b_inx][start_inx:end_inx, start_inx:end_inx] = 1
+
+ return attention_mask
+
+ def pad_input_ids(self, input_ids, image_sizes):
+ max_l = max([len(x) for x in input_ids])
+ padded_ids = []
+ attention_mask = []
+ new_image_sizes = []
+
+ for i in range(len(input_ids)):
+ temp_ids = input_ids[i]
+ temp_l = len(temp_ids)
+ pad_l = max_l - temp_l
+ if pad_l == 0:
+ attention_mask.append([1]*max_l)
+ padded_ids.append(temp_ids)
+ else:
+ attention_mask.append([0]*pad_l+[1]*temp_l)
+ padded_ids.append([self.pad_token_id]*pad_l+temp_ids)
+
+ if i in image_sizes:
+ new_inx = []
+ for old_inx in image_sizes[i]:
+ new_inx.append([x+pad_l for x in old_inx])
+ image_sizes[i] = new_inx
+
+ return torch.LongTensor(padded_ids), torch.LongTensor(attention_mask), image_sizes
+
+
+ def process_mllm_input(self, mllm_inputs, target_img_size):
+ num_tokens_for_output_images = []
+ for img_size in target_img_size:
+ num_tokens_for_output_images.append(img_size[0]*img_size[1]//16//16)
+
+ pixel_values, image_sizes = [], {}
+ b_inx = 0
+ for x in mllm_inputs:
+ if x['pixel_values'] is not None:
+ pixel_values.extend(x['pixel_values'])
+ for size in x['image_sizes']:
+ if b_inx not in image_sizes:
+ image_sizes[b_inx] = [size]
+ else:
+ image_sizes[b_inx].append(size)
+ b_inx += 1
+ pixel_values = [x.unsqueeze(0) for x in pixel_values]
+
+
+ input_ids = [x['input_ids'] for x in mllm_inputs]
+ padded_input_ids, attention_mask, image_sizes = self.pad_input_ids(input_ids, image_sizes)
+ position_ids = self.create_position(attention_mask, num_tokens_for_output_images)
+ attention_mask, padding_images = self.create_mask(attention_mask, num_tokens_for_output_images)
+ attention_mask = self.adjust_attention_for_input_images(attention_mask, image_sizes)
+
+ return padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes
+
+
+ def __call__(self, features):
+ mllm_inputs = [f[0] for f in features]
+ cfg_mllm_inputs = [f[1] for f in features]
+ img_cfg_mllm_input = [f[2] for f in features]
+ target_img_size = [f[3] for f in features]
+
+
+ if img_cfg_mllm_input[0] is not None:
+ mllm_inputs = mllm_inputs + cfg_mllm_inputs + img_cfg_mllm_input
+ target_img_size = target_img_size + target_img_size + target_img_size
+ else:
+ mllm_inputs = mllm_inputs + cfg_mllm_inputs
+ target_img_size = target_img_size + target_img_size
+
+
+ all_padded_input_ids, all_position_ids, all_attention_mask, all_padding_images, all_pixel_values, all_image_sizes = self.process_mllm_input(mllm_inputs, target_img_size)
+
+ data = {"input_ids": all_padded_input_ids,
+ "attention_mask": all_attention_mask,
+ "position_ids": all_position_ids,
+ "input_pixel_values": all_pixel_values,
+ "input_image_sizes": all_image_sizes,
+ "padding_images": all_padding_images,
+ }
+ return data
+
+
+class OmniGenSeparateCollator(OmniGenCollator):
+ def __call__(self, features):
+ mllm_inputs = [f[0] for f in features]
+ cfg_mllm_inputs = [f[1] for f in features]
+ img_cfg_mllm_input = [f[2] for f in features]
+ target_img_size = [f[3] for f in features]
+
+ all_padded_input_ids, all_attention_mask, all_position_ids, all_pixel_values, all_image_sizes, all_padding_images = [], [], [], [], [], []
+
+
+ padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes = self.process_mllm_input(mllm_inputs, target_img_size)
+ all_padded_input_ids.append(padded_input_ids)
+ all_attention_mask.append(attention_mask)
+ all_position_ids.append(position_ids)
+ all_pixel_values.append(pixel_values)
+ all_image_sizes.append(image_sizes)
+ all_padding_images.append(padding_images)
+
+ if cfg_mllm_inputs[0] is not None:
+ padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes = self.process_mllm_input(cfg_mllm_inputs, target_img_size)
+ all_padded_input_ids.append(padded_input_ids)
+ all_attention_mask.append(attention_mask)
+ all_position_ids.append(position_ids)
+ all_pixel_values.append(pixel_values)
+ all_image_sizes.append(image_sizes)
+ all_padding_images.append(padding_images)
+ if img_cfg_mllm_input[0] is not None:
+ padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes = self.process_mllm_input(img_cfg_mllm_input, target_img_size)
+ all_padded_input_ids.append(padded_input_ids)
+ all_attention_mask.append(attention_mask)
+ all_position_ids.append(position_ids)
+ all_pixel_values.append(pixel_values)
+ all_image_sizes.append(image_sizes)
+ all_padding_images.append(padding_images)
+
+ data = {"input_ids": all_padded_input_ids,
+ "attention_mask": all_attention_mask,
+ "position_ids": all_position_ids,
+ "input_pixel_values": all_pixel_values,
+ "input_image_sizes": all_image_sizes,
+ "padding_images": all_padding_images,
+ }
+ return data
\ No newline at end of file
diff --git a/src/diffusers/utils/omnigen_utils.py b/src/diffusers/utils/omnigen_utils.py
new file mode 100644
index 000000000000..1cc9d2cc706e
--- /dev/null
+++ b/src/diffusers/utils/omnigen_utils.py
@@ -0,0 +1,109 @@
+import logging
+
+from PIL import Image
+import torch
+import numpy as np
+
+def create_logger(logging_dir):
+ """
+ Create a logger that writes to a log file and stdout.
+ """
+ logging.basicConfig(
+ level=logging.INFO,
+ format='[\033[34m%(asctime)s\033[0m] %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S',
+ handlers=[logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt")]
+ )
+ logger = logging.getLogger(__name__)
+ return logger
+
+
+@torch.no_grad()
+def update_ema(ema_model, model, decay=0.9999):
+ """
+ Step the EMA model towards the current model.
+ """
+ ema_params = dict(ema_model.named_parameters())
+ for name, param in model.named_parameters():
+ # TODO: Consider applying only to params that require_grad to avoid small numerical changes of pos_embed
+ ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
+
+
+
+
+def requires_grad(model, flag=True):
+ """
+ Set requires_grad flag for all parameters in a model.
+ """
+ for p in model.parameters():
+ p.requires_grad = flag
+
+
+def center_crop_arr(pil_image, image_size):
+ """
+ Center cropping implementation from ADM.
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
+ """
+ while min(*pil_image.size) >= 2 * image_size:
+ pil_image = pil_image.resize(
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
+ )
+
+ scale = image_size / min(*pil_image.size)
+ pil_image = pil_image.resize(
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
+ )
+
+ arr = np.array(pil_image)
+ crop_y = (arr.shape[0] - image_size) // 2
+ crop_x = (arr.shape[1] - image_size) // 2
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
+
+
+
+def crop_arr(pil_image, max_image_size):
+ while min(*pil_image.size) >= 2 * max_image_size:
+ pil_image = pil_image.resize(
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
+ )
+
+ if max(*pil_image.size) > max_image_size:
+ scale = max_image_size / max(*pil_image.size)
+ pil_image = pil_image.resize(
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
+ )
+
+ if min(*pil_image.size) < 16:
+ scale = 16 / min(*pil_image.size)
+ pil_image = pil_image.resize(
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
+ )
+
+ arr = np.array(pil_image)
+ crop_y1 = (arr.shape[0] % 16) // 2
+ crop_y2 = arr.shape[0] % 16 - crop_y1
+
+ crop_x1 = (arr.shape[1] % 16) // 2
+ crop_x2 = arr.shape[1] % 16 - crop_x1
+
+ arr = arr[crop_y1:arr.shape[0]-crop_y2, crop_x1:arr.shape[1]-crop_x2]
+ return Image.fromarray(arr)
+
+
+
+def vae_encode(vae, x, weight_dtype):
+ if x is not None:
+ if vae.config.shift_factor is not None:
+ x = vae.encode(x).latent_dist.sample()
+ x = (x - vae.config.shift_factor) * vae.config.scaling_factor
+ else:
+ x = vae.encode(x).latent_dist.sample().mul_(vae.config.scaling_factor)
+ x = x.to(weight_dtype)
+ return x
+
+def vae_encode_list(vae, x, weight_dtype):
+ latents = []
+ for img in x:
+ img = vae_encode(vae, img, weight_dtype)
+ latents.append(img)
+ return latents