107107
108108 >>> # nesting_level=0 -> 64x64; nesting_level=1 -> 256x256 - 64x64; nesting_level=2 -> 1024x1024 - 256x256 - 64x64
109109 >>> pipe = DiffusionPipeline.from_pretrained("tolgacangoz/matryoshka-diffusion-models",
110- >>> custom_pipeline="matryoshka").to("cuda")
110+ ... nesting_level=0,
111+ ... trust_remote_code=False, # One needs to give permission for this code to run
112+ ... ).to("cuda")
111113
112114 >>> prompt0 = "a blue jay stops on the top of a helmet of Japanese samurai, background with sakura tree"
113115 >>> prompt = f"breathtaking {prompt0}. award-winning, professional, highly detailed"
114- >>> negative_prompt = "deformed, mutated, ugly, disfigured, blur, blurry, noise, noisy"
115- >>> image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=50).images
116+ >>> image = pipe(prompt, num_inference_steps=50).images
116117 >>> make_image_grid(image, rows=1, cols=len(image))
117118
118- >>> pipe.change_nesting_level(<int>) # 0, 1, or 2
119+ >>> # pipe.change_nesting_level(<int>) # 0, 1, or 2
119120 >>> # 50+, 100+, and 250+ num_inference_steps are recommended for nesting levels 0, 1, and 2 respectively.
120121 ```
121122"""
@@ -420,6 +421,7 @@ def __init__(
420421 self .timesteps = torch .from_numpy (np .arange (0 , num_train_timesteps )[::- 1 ].copy ().astype (np .int64 ))
421422
422423 self .scales = None
424+ self .schedule_shifted_power = 1.0
423425
424426 def scale_model_input (self , sample : torch .Tensor , timestep : Optional [int ] = None ) -> torch .Tensor :
425427 """
@@ -532,6 +534,7 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
532534
533535 def get_schedule_shifted (self , alpha_prod , scale_factor = None ):
534536 if (scale_factor is not None ) and (scale_factor > 1 ): # rescale noise schedule
537+ scale_factor = scale_factor ** self .schedule_shifted_power
535538 snr = alpha_prod / (1 - alpha_prod )
536539 scaled_snr = snr / scale_factor
537540 alpha_prod = 1 / (1 + 1 / scaled_snr )
@@ -639,17 +642,14 @@ def step(
639642 # 4. Clip or threshold "predicted x_0"
640643 if self .config .thresholding :
641644 if len (model_output ) > 1 :
642- pred_original_sample = [
643- self ._threshold_sample (p_o_s * scale ) / scale
644- for p_o_s , scale in zip (pred_original_sample , self .scales )
645- ]
645+ pred_original_sample = [self ._threshold_sample (p_o_s ) for p_o_s in pred_original_sample ]
646646 else :
647647 pred_original_sample = self ._threshold_sample (pred_original_sample )
648648 elif self .config .clip_sample :
649649 if len (model_output ) > 1 :
650650 pred_original_sample = [
651- ( p_o_s * scale ) .clamp (- self .config .clip_sample_range , self .config .clip_sample_range ) / scale
652- for p_o_s , scale in zip ( pred_original_sample , self . scales )
651+ p_o_s .clamp (- self .config .clip_sample_range , self .config .clip_sample_range )
652+ for p_o_s in pred_original_sample
653653 ]
654654 else :
655655 pred_original_sample = pred_original_sample .clamp (
@@ -3816,6 +3816,8 @@ def __init__(
38163816
38173817 if hasattr (unet , "nest_ratio" ):
38183818 scheduler .scales = unet .nest_ratio + [1 ]
3819+ if nesting_level == 2 :
3820+ scheduler .schedule_shifted_power = 2.0
38193821
38203822 self .register_modules (
38213823 text_encoder = text_encoder ,
@@ -3842,12 +3844,14 @@ def change_nesting_level(self, nesting_level: int):
38423844 ).to (self .device )
38433845 self .config .nesting_level = 1
38443846 self .scheduler .scales = self .unet .nest_ratio + [1 ]
3847+ self .scheduler .schedule_shifted_power = 1.0
38453848 elif nesting_level == 2 :
38463849 self .unet = NestedUNet2DConditionModel .from_pretrained (
38473850 "tolgacangoz/matryoshka-diffusion-models" , subfolder = "unet/nesting_level_2"
38483851 ).to (self .device )
38493852 self .config .nesting_level = 2
38503853 self .scheduler .scales = self .unet .nest_ratio + [1 ]
3854+ self .scheduler .schedule_shifted_power = 2.0
38513855 else :
38523856 raise ValueError ("Currently, nesting levels 0, 1, and 2 are supported." )
38533857
@@ -4627,8 +4631,8 @@ def __call__(
46274631 image = latents
46284632
46294633 if self .scheduler .scales is not None :
4630- for i , ( img , scale ) in enumerate (zip ( image , self . scheduler . scales ) ):
4631- image [i ] = self .image_processor .postprocess (img * scale , output_type = output_type )[0 ]
4634+ for i , img in enumerate (image ):
4635+ image [i ] = self .image_processor .postprocess (img , output_type = output_type )[0 ]
46324636 else :
46334637 image = self .image_processor .postprocess (image , output_type = output_type )
46344638
0 commit comments