-
Notifications
You must be signed in to change notification settings - Fork 6.5k
Hunyuan Video adjustments #11140
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Hunyuan Video adjustments #11140
Changes from 4 commits
c86b296
d5932aa
1385e3e
0a2bf69
3dc984e
72bb72d
1d00314
2dbb4ce
027167f
bc16a5b
9bb2de8
6f61d29
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -189,11 +189,14 @@ def retrieve_timesteps( | |
| elif sigmas is not None: | ||
| accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) | ||
| if not accept_sigmas: | ||
| raise ValueError( | ||
| print( | ||
| f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" | ||
| f" sigmas schedules. Please check whether you are using the correct scheduler." | ||
| f" sigmas schedules. Please check whether you are using the correct scheduler. The pipeline" | ||
| f" will continue without setting sigma values" | ||
| ) | ||
| scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) | ||
| scheduler.set_timesteps(num_inference_steps, device=device) | ||
| else: | ||
| scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) | ||
| timesteps = scheduler.timesteps | ||
| num_inference_steps = len(timesteps) | ||
| else: | ||
|
|
@@ -651,6 +654,12 @@ def __call__( | |
| self._attention_kwargs = attention_kwargs | ||
| self._current_timestep = None | ||
| self._interrupt = False | ||
|
|
||
| if self.do_spatio_temporal_guidance: | ||
| for i in stg_applied_layers_idx: | ||
| self.transformer.transformer_blocks[i].forward = types.MethodType( | ||
| forward_with_stg, self.transformer.transformer_blocks[i] | ||
| ) | ||
|
|
||
| device = self._execution_device | ||
|
|
||
|
|
@@ -722,12 +731,6 @@ def __call__( | |
| # broadcast to batch dimension in a way that's compatible with ONNX/Core ML | ||
| timestep = t.expand(latents.shape[0]).to(latents.dtype) | ||
|
|
||
| if self.do_spatio_temporal_guidance: | ||
| for i in stg_applied_layers_idx: | ||
| self.transformer.transformer_blocks[i].forward = types.MethodType( | ||
| forward_without_stg, self.transformer.transformer_blocks[i] | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note However, any results you have to share using this PR would be interesting, as it is using There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, thanks for the catch, I'll update. I found that this implementation became scheduler agnostic (or stg became the 'scheduler'?). I'll test that a bit more and see what the exact side effects are |
||
| ) | ||
|
|
||
| noise_pred = self.transformer( | ||
| hidden_states=latent_model_input, | ||
| timestep=timestep, | ||
|
|
@@ -740,11 +743,6 @@ def __call__( | |
| )[0] | ||
|
|
||
| if self.do_spatio_temporal_guidance: | ||
| for i in stg_applied_layers_idx: | ||
| self.transformer.transformer_blocks[i].forward = types.MethodType( | ||
| forward_with_stg, self.transformer.transformer_blocks[i] | ||
| ) | ||
|
|
||
| noise_pred_perturb = self.transformer( | ||
| hidden_states=latent_model_input, | ||
| timestep=timestep, | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.