|  | 
| 62 | 62 |         >>> image.save("qwenimage_edit.png") | 
| 63 | 63 |         ``` | 
| 64 | 64 | """ | 
| 65 |  | -PREFERRED_QWENIMAGE_RESOLUTIONS = [ | 
| 66 |  | -    (672, 1568), | 
| 67 |  | -    (688, 1504), | 
| 68 |  | -    (720, 1456), | 
| 69 |  | -    (752, 1392), | 
| 70 |  | -    (800, 1328), | 
| 71 |  | -    (832, 1248), | 
| 72 |  | -    (880, 1184), | 
| 73 |  | -    (944, 1104), | 
| 74 |  | -    (1024, 1024), | 
| 75 |  | -    (1104, 944), | 
| 76 |  | -    (1184, 880), | 
| 77 |  | -    (1248, 832), | 
| 78 |  | -    (1328, 800), | 
| 79 |  | -    (1392, 752), | 
| 80 |  | -    (1456, 720), | 
| 81 |  | -    (1504, 688), | 
| 82 |  | -    (1568, 672), | 
| 83 |  | -] | 
| 84 | 65 | 
 | 
| 85 | 66 | 
 | 
| 86 | 67 | # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift | 
| @@ -685,17 +666,9 @@ def __call__( | 
| 685 | 666 |         # 3. Preprocess image | 
| 686 | 667 |         if image is not None and not (isinstance(image, torch.Tensor) and image.size(1) == self.latent_channels): | 
| 687 | 668 |             img = image[0] if isinstance(image, list) else image | 
| 688 |  | -            image_height, image_width = self.image_processor.get_default_height_width(img) | 
| 689 |  | -            aspect_ratio = image_width / image_height | 
| 690 |  | -            if _auto_resize: | 
| 691 |  | -                _, image_width, image_height = min( | 
| 692 |  | -                    (abs(aspect_ratio - w / h), w, h) for w, h in PREFERRED_QWENIMAGE_RESOLUTIONS | 
| 693 |  | -                ) | 
| 694 |  | -            image_width = image_width // multiple_of * multiple_of | 
| 695 |  | -            image_height = image_height // multiple_of * multiple_of | 
| 696 |  | -            image = self.image_processor.resize(image, image_height, image_width) | 
|  | 669 | +            image = self.image_processor.resize(image, calculated_height, calculated_width) | 
| 697 | 670 |             prompt_image = image | 
| 698 |  | -            image = self.image_processor.preprocess(image, image_height, image_width) | 
|  | 671 | +            image = self.image_processor.preprocess(image, calculated_height, calculated_width) | 
| 699 | 672 |             image = image.unsqueeze(2) | 
| 700 | 673 | 
 | 
| 701 | 674 |         has_neg_prompt = negative_prompt is not None or ( | 
| @@ -741,7 +714,7 @@ def __call__( | 
| 741 | 714 |         img_shapes = [ | 
| 742 | 715 |             [ | 
| 743 | 716 |                 (1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2), | 
| 744 |  | -                (1, image_height // self.vae_scale_factor // 2, image_width // self.vae_scale_factor // 2), | 
|  | 717 | +                (1, calculated_height // self.vae_scale_factor // 2, calculated_width // self.vae_scale_factor // 2), | 
| 745 | 718 |             ] | 
| 746 | 719 |         ] * batch_size | 
| 747 | 720 | 
 | 
|  | 
0 commit comments