|  | 
| 48 | 48 |         >>> from diffusers import QwenImageControlNetModel, QwenImageMultiControlNetModel, QwenImageControlNetPipeline | 
| 49 | 49 | 
 | 
| 50 | 50 |         >>> # QwenImageControlNetModel | 
| 51 |  | -        >>> controlnet = QwenImageControlNetModel.from_pretrained("InstantX/Qwen-Image-ControlNet-Union", torch_dtype=torch.bfloat16) | 
| 52 |  | -        >>> pipe = QwenImageControlNetPipeline.from_pretrained("Qwen/Qwen-Image", controlnet=controlnet, torch_dtype=torch.bfloat16) | 
|  | 51 | +        >>> controlnet = QwenImageControlNetModel.from_pretrained( | 
|  | 52 | +        ...     "InstantX/Qwen-Image-ControlNet-Union", torch_dtype=torch.bfloat16 | 
|  | 53 | +        ... ) | 
|  | 54 | +        >>> pipe = QwenImageControlNetPipeline.from_pretrained( | 
|  | 55 | +        ...     "Qwen/Qwen-Image", controlnet=controlnet, torch_dtype=torch.bfloat16 | 
|  | 56 | +        ... ) | 
| 53 | 57 |         >>> pipe.to("cuda") | 
| 54 | 58 |         >>> prompt = "Aesthetics art, traditional asian pagoda, elaborate golden accents, sky blue and white color palette, swirling cloud pattern, digital illustration, east asian architecture, ornamental rooftop, intricate detailing on building, cultural representation." | 
| 55 | 59 |         >>> negative_prompt = " " | 
| 56 |  | -        >>> control_image = load_image("https://huggingface.co/InstantX/Qwen-Image-ControlNet-Union/resolve/main/conds/canny.png") | 
|  | 60 | +        >>> control_image = load_image( | 
|  | 61 | +        ...     "https://huggingface.co/InstantX/Qwen-Image-ControlNet-Union/resolve/main/conds/canny.png" | 
|  | 62 | +        ... ) | 
| 57 | 63 |         >>> # Depending on the variant being used, the pipeline call will slightly vary. | 
| 58 | 64 |         >>> # Refer to the pipeline documentation for more details. | 
| 59 |  | -        >>> image = pipe(prompt, negative_prompt=negative_prompt, control_image=control_image, controlnet_conditioning_scale=1.0, num_inference_steps=30, true_cfg_scale=4.0).images[0] | 
|  | 65 | +        >>> image = pipe( | 
|  | 66 | +        ...     prompt, | 
|  | 67 | +        ...     negative_prompt=negative_prompt, | 
|  | 68 | +        ...     control_image=control_image, | 
|  | 69 | +        ...     controlnet_conditioning_scale=1.0, | 
|  | 70 | +        ...     num_inference_steps=30, | 
|  | 71 | +        ...     true_cfg_scale=4.0, | 
|  | 72 | +        ... ).images[0] | 
| 60 | 73 |         >>> image.save("qwenimage_cn_union.png") | 
| 61 | 74 | 
 | 
| 62 | 75 |         >>> # QwenImageMultiControlNetModel | 
| 63 |  | -        >>> controlnet = QwenImageControlNetModel.from_pretrained("InstantX/Qwen-Image-ControlNet-Union", torch_dtype=torch.bfloat16) | 
|  | 76 | +        >>> controlnet = QwenImageControlNetModel.from_pretrained( | 
|  | 77 | +        ...     "InstantX/Qwen-Image-ControlNet-Union", torch_dtype=torch.bfloat16 | 
|  | 78 | +        ... ) | 
| 64 | 79 |         >>> controlnet = QwenImageMultiControlNetModel([controlnet]) | 
| 65 |  | -        >>> pipe = QwenImageControlNetPipeline.from_pretrained("Qwen/Qwen-Image", controlnet=controlnet, torch_dtype=torch.bfloat16) | 
|  | 80 | +        >>> pipe = QwenImageControlNetPipeline.from_pretrained( | 
|  | 81 | +        ...     "Qwen/Qwen-Image", controlnet=controlnet, torch_dtype=torch.bfloat16 | 
|  | 82 | +        ... ) | 
| 66 | 83 |         >>> pipe.to("cuda") | 
| 67 | 84 |         >>> prompt = "Aesthetics art, traditional asian pagoda, elaborate golden accents, sky blue and white color palette, swirling cloud pattern, digital illustration, east asian architecture, ornamental rooftop, intricate detailing on building, cultural representation." | 
| 68 | 85 |         >>> negative_prompt = " " | 
| 69 |  | -        >>> control_image = load_image("https://huggingface.co/InstantX/Qwen-Image-ControlNet-Union/resolve/main/conds/canny.png") | 
|  | 86 | +        >>> control_image = load_image( | 
|  | 87 | +        ...     "https://huggingface.co/InstantX/Qwen-Image-ControlNet-Union/resolve/main/conds/canny.png" | 
|  | 88 | +        ... ) | 
| 70 | 89 |         >>> # Depending on the variant being used, the pipeline call will slightly vary. | 
| 71 | 90 |         >>> # Refer to the pipeline documentation for more details. | 
| 72 |  | -        >>> image = pipe(prompt, negative_prompt=negative_prompt, control_image=[control_image, control_image], controlnet_conditioning_scale=[0.5, 0.5], num_inference_steps=30, true_cfg_scale=4.0).images[0] | 
|  | 91 | +        >>> image = pipe( | 
|  | 92 | +        ...     prompt, | 
|  | 93 | +        ...     negative_prompt=negative_prompt, | 
|  | 94 | +        ...     control_image=[control_image, control_image], | 
|  | 95 | +        ...     controlnet_conditioning_scale=[0.5, 0.5], | 
|  | 96 | +        ...     num_inference_steps=30, | 
|  | 97 | +        ...     true_cfg_scale=4.0, | 
|  | 98 | +        ... ).images[0] | 
| 73 | 99 |         >>> image.save("qwenimage_cn_union_multi.png") | 
| 74 | 100 |         ``` | 
| 75 | 101 | """ | 
|  | 
0 commit comments