Skip to content

Commit 61aea50

Browse files
committed
style
1 parent 582755c commit 61aea50

File tree

1 file changed

+39
-37
lines changed

1 file changed

+39
-37
lines changed

examples/community/pipeline_flux_rf_inversion.py

Lines changed: 39 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -591,29 +591,29 @@ def interrupt(self):
591591
@torch.no_grad()
592592
@replace_example_docstring(EXAMPLE_DOC_STRING)
593593
def __call__(
594-
self,
595-
prompt: Union[str, List[str]] = None,
596-
prompt_2: Optional[Union[str, List[str]]] = None,
597-
height: Optional[int] = None,
598-
width: Optional[int] = None,
599-
eta: float = 1.0,
600-
timestep_offset: float = 0.6,
601-
start_timestep: float = 0.,
602-
stop_timestep: float = 0.25,
603-
num_inference_steps: int = 28,
604-
timesteps: List[int] = None,
605-
guidance_scale: float = 3.5,
606-
num_images_per_prompt: Optional[int] = 1,
607-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
608-
latents: Optional[torch.FloatTensor] = None,
609-
prompt_embeds: Optional[torch.FloatTensor] = None,
610-
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
611-
output_type: Optional[str] = "pil",
612-
return_dict: bool = True,
613-
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
614-
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
615-
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
616-
max_sequence_length: int = 512,
594+
self,
595+
prompt: Union[str, List[str]] = None,
596+
prompt_2: Optional[Union[str, List[str]]] = None,
597+
height: Optional[int] = None,
598+
width: Optional[int] = None,
599+
eta: float = 1.0,
600+
timestep_offset: float = 0.6,
601+
start_timestep: float = 0.0,
602+
stop_timestep: float = 0.25,
603+
num_inference_steps: int = 28,
604+
timesteps: List[int] = None,
605+
guidance_scale: float = 3.5,
606+
num_images_per_prompt: Optional[int] = 1,
607+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
608+
latents: Optional[torch.FloatTensor] = None,
609+
prompt_embeds: Optional[torch.FloatTensor] = None,
610+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
611+
output_type: Optional[str] = "pil",
612+
return_dict: bool = True,
613+
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
614+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
615+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
616+
max_sequence_length: int = 512,
617617
):
618618
r"""
619619
Function invoked when calling the pipeline for generation.
@@ -853,19 +853,19 @@ def __call__(
853853

854854
@torch.no_grad()
855855
def invert(
856-
self,
857-
image: PipelineImageInput,
858-
source_prompt: str = "",
859-
source_guidance_scale=0.0,
860-
num_inversion_steps: int = 28,
861-
timestep_offset: float = 0.6,
862-
gamma: float = 0.5,
863-
height: Optional[int] = None,
864-
width: Optional[int] = None,
865-
timesteps: List[int] = None,
866-
dtype: Optional[torch.dtype] = None,
867-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
868-
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
856+
self,
857+
image: PipelineImageInput,
858+
source_prompt: str = "",
859+
source_guidance_scale=0.0,
860+
num_inversion_steps: int = 28,
861+
timestep_offset: float = 0.6,
862+
gamma: float = 0.5,
863+
height: Optional[int] = None,
864+
width: Optional[int] = None,
865+
timesteps: List[int] = None,
866+
dtype: Optional[torch.dtype] = None,
867+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
868+
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
869869
):
870870
r"""
871871
Performs Algorithm 1: Controlled Forward ODE from https://arxiv.org/pdf/2410.10792
@@ -932,7 +932,9 @@ def invert(
932932
sigmas,
933933
mu=mu,
934934
)
935-
timesteps, sigmas, num_inversion_steps = self.get_timesteps(num_inversion_steps, timestep_offset=timestep_offset)
935+
timesteps, sigmas, num_inversion_steps = self.get_timesteps(
936+
num_inversion_steps, timestep_offset=timestep_offset
937+
)
936938

937939
# 3. prepare text embeddings
938940
(

0 commit comments

Comments
 (0)