Skip to content

Commit c4d1e69

Browse files
remove text_projector
1 parent b86bfd4 commit c4d1e69

File tree

2 files changed

+1
-5
lines changed

2 files changed

+1
-5
lines changed

src/diffusers/models/embeddings.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -859,7 +859,7 @@ def forward(
859859
prompt_embeds = self.text_proj(prompt_embeds) # embed_dim: 4096 -> 4096
860860
if negative_prompt_embeds is not None:
861861
negative_prompt_embeds = self.text_proj(negative_prompt_embeds) # embed_dim: 4096 -> 4096
862-
862+
breakpoint()
863863
return hidden_states, prompt_embeds, negative_prompt_embeds
864864

865865

src/diffusers/pipelines/cogview4/pipeline_cogview4.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,6 @@ def __init__(
174174
)
175175
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
176176
self.image_factor = 16
177-
self.text_projector = torch.nn.Linear(4096, 4096)
178177
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
179178

180179
def _get_glm_embeds(
@@ -218,9 +217,6 @@ def _get_glm_embeds(
218217
)
219218
text_input_ids = torch.cat([pad_ids, text_input_ids], dim=1)
220219
prompt_embeds = self.text_encoder(text_input_ids.to(self.text_encoder.model.device), output_hidden_states=True).hidden_states[-2]
221-
self.text_projector.to(dtype=dtype, device=device)
222-
prompt_embeds = self.text_projector(prompt_embeds)
223-
breakpoint()
224220
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
225221
_, seq_len, _= prompt_embeds.shape
226222
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)

0 commit comments

Comments
 (0)