You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
import requests
model = 'v2-1_768-ema-pruned' #@param ["v2-1_512-ema-pruned", "v2-1_768-ema-pruned", "v1.5"]
#@markdown Optionally, specify your own checkpoint below. Make sure to select the correct model above.
url_ckpt = "" #@param {type:"string"}
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
and turn it into a script or extension?
https://colab.research.google.com/drive/1I77--5PS6C-sAskl9OggS1zR0HLKdq1M?usp=sharing#scrollTo=jgZQj-tE6GWW
it works really well
!pip install wget
import wget
import os
import requests
model = 'v2-1_768-ema-pruned' #@param ["v2-1_512-ema-pruned", "v2-1_768-ema-pruned", "v1.5"]
#@markdown Optionally, specify your own checkpoint below. Make sure to select the correct model above.
url_ckpt = "" #@param {type:"string"}
if len(url_ckpt) < 1:
if model == "v2-1_512-ema-pruned":
url_ckpt = "https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.ckpt"
fp_config = 'latentblending/configs/v2-inference.yaml'
elif model == "v2-1_768-ema-pruned":
url_ckpt = "https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt"
fp_config = 'latentblending/configs/v2-inference-v.yaml'
Check that the supplied URLs exist.
response = requests.head(url_ckpt)
if response.status_code != 200 and response.status_code != 302:
raise ValueError(f"url_ckpt could not be downloaded: {url_ckpt} gives {response.status_code}")
fp_ckpt = 'model.ckpt'
wget.download(url_ckpt, fp_ckpt)
assert os.path.isfile(fp_ckpt), "model download has failed."
if model == "v2-1_512-ema-pruned":
fp_config = 'latentblending/configs/v2-inference.yaml'
elif model == "v2-1_768-ema-pruned":
fp_config = 'latentblending/configs/v2-inference-v.yaml'
elif model == 'v1.5':
fp_config = 'latentblending/configs/v1-inference.yaml'
print(f"url_ckpt: {url_ckpt} fp_config {fp_config}")
installs
!pip install open-clip-torch
!pip install omegaconf
!pip install fastcore -U
!pip install Pillow
!pip install ffmpeg-python
!pip install einops
!pip install gradio
import os, sys
from subprocess import getoutput
Xformers
os.system("pip install --extra-index-url https://download.pytorch.org/whl/cu113 torch torchvision==0.13.1+cu113")
os.system("pip install triton==2.0.0.dev20220701")
gpu_info = getoutput('nvidia-smi')
if("A10G" in gpu_info):
os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl")
elif("T4" in gpu_info):
os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl")
!pip install pytorch_lightning
!pip install transformers
Get Latent Blending from git / pull
!git clone https://github.com/lunarring/latentblending
!cd latentblending; git pull; cd ..
sys.path.append("/content/latentblending")
Imports
import torch
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import warnings
import torch
from tqdm.auto import tqdm
from PIL import Image
import torch
from typing import Callable, List, Optional, Union
from latent_blending import LatentBlending, add_frames_linear_interp, get_time, yml_save, LatentBlending, compare_dicts
from stable_diffusion_holder import StableDiffusionHolder
from gradio_ui import BlendingFrontend
import gradio as gr
torch.set_grad_enabled(False)
torch.backends.cudnn.benchmark = False
#%% First let us spawn a stable diffusion holder
device = "cuda"
sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
from latent_blending import get_time, yml_save, LatentBlending, add_frames_linear_interp, compare_dicts
from gradio_ui import BlendingFrontend
import gradio as gr
if name == "main":
Beta Was this translation helpful? Give feedback.
All reactions