forked from nunchaku-ai/nunchaku
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathflux.1-dev-IP-adapter.py
More file actions
43 lines (35 loc) · 1.43 KB
/
flux.1-dev-IP-adapter.py
File metadata and controls
43 lines (35 loc) · 1.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
from diffusers import FluxPipeline
from diffusers.utils import load_image
from nunchaku import NunchakuFluxTransformer2dModel
from nunchaku.caching.diffusers_adapters import apply_cache_on_pipe
from nunchaku.models.ip_adapter.diffusers_adapters import apply_IPA_on_pipe
from nunchaku.utils import get_precision
precision = get_precision()
transformer = NunchakuFluxTransformer2dModel.from_pretrained(
f"nunchaku-tech/nunchaku-flux.1-dev/svdq-{precision}_r32-flux.1-dev.safetensors"
)
pipeline = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda")
pipeline.load_ip_adapter(
pretrained_model_name_or_path_or_dict="XLabs-AI/flux-ip-adapter-v2",
weight_name="ip_adapter.safetensors",
image_encoder_pretrained_model_name_or_path="openai/clip-vit-large-patch14",
)
apply_IPA_on_pipe(pipeline, ip_adapter_scale=1.1, repo_id="XLabs-AI/flux-ip-adapter-v2")
apply_cache_on_pipe(
pipeline,
use_double_fb_cache=True,
residual_diff_threshold_multi=0.09,
residual_diff_threshold_single=0.12,
)
IP_image = load_image(
"https://huggingface.co/datasets/nunchaku-tech/test-data/resolve/main/ComfyUI-nunchaku/inputs/monalisa.jpg"
)
image = pipeline(
prompt="holding an sign saying 'SVDQuant is fast!'",
ip_adapter_image=IP_image.convert("RGB"),
num_inference_steps=50,
).images[0]
image.save(f"flux.1-dev-IP-adapter-{precision}.png")