Skip to content

Commit 8e7e272

Browse files
committed
chore: automatically install required dependencies, if not found
1 parent fe71ff0 commit 8e7e272

File tree

1 file changed

+106
-96
lines changed
  • python_coreml_stable_diffusion

1 file changed

+106
-96
lines changed

python_coreml_stable_diffusion/web.py

Lines changed: 106 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -1,97 +1,107 @@
1-
2-
import python_coreml_stable_diffusion.pipeline as pipeline
3-
4-
import gradio as gr
5-
from diffusers import StableDiffusionPipeline
6-
7-
def init(args):
8-
pipeline.logger.info("Initializing PyTorch pipe for reference configuration")
9-
pytorch_pipe = StableDiffusionPipeline.from_pretrained(args.model_version,
10-
use_auth_token=True)
11-
12-
user_specified_scheduler = None
13-
if args.scheduler is not None:
14-
user_specified_scheduler = pipeline.SCHEDULER_MAP[
15-
args.scheduler].from_config(pytorch_pipe.scheduler.config)
16-
17-
coreml_pipe = pipeline.get_coreml_pipe(pytorch_pipe=pytorch_pipe,
18-
mlpackages_dir=args.i,
19-
model_version=args.model_version,
20-
compute_unit=args.compute_unit,
21-
scheduler_override=user_specified_scheduler)
22-
23-
24-
def infer(prompt, steps):
25-
pipeline.logger.info("Beginning image generation.")
26-
image = coreml_pipe(
27-
prompt=prompt,
28-
height=coreml_pipe.height,
29-
width=coreml_pipe.width,
30-
num_inference_steps=steps,
31-
)
32-
images = []
33-
images.append(image["images"][0])
34-
return images
35-
36-
37-
demo = gr.Blocks()
38-
39-
with demo:
40-
gr.Markdown(
41-
"<center><h1>Core ML Stable Diffusion</h1>Run Stable Diffusion on Apple Silicon with Core ML</center>")
42-
with gr.Group():
43-
with gr.Box():
44-
with gr.Row():
45-
with gr.Column():
46-
with gr.Row():
47-
text = gr.Textbox(
48-
label="Prompt",
49-
lines=11,
50-
placeholder="Enter your prompt",
1+
try:
2+
import gradio as gr
3+
import python_coreml_stable_diffusion.pipeline as pipeline
4+
from diffusers import StableDiffusionPipeline
5+
6+
def init(args):
7+
pipeline.logger.info("Initializing PyTorch pipe for reference configuration")
8+
pytorch_pipe = StableDiffusionPipeline.from_pretrained(args.model_version,
9+
use_auth_token=True)
10+
11+
user_specified_scheduler = None
12+
if args.scheduler is not None:
13+
user_specified_scheduler = pipeline.SCHEDULER_MAP[
14+
args.scheduler].from_config(pytorch_pipe.scheduler.config)
15+
16+
coreml_pipe = pipeline.get_coreml_pipe(pytorch_pipe=pytorch_pipe,
17+
mlpackages_dir=args.i,
18+
model_version=args.model_version,
19+
compute_unit=args.compute_unit,
20+
scheduler_override=user_specified_scheduler)
21+
22+
23+
def infer(prompt, steps):
24+
pipeline.logger.info("Beginning image generation.")
25+
image = coreml_pipe(
26+
prompt=prompt,
27+
height=coreml_pipe.height,
28+
width=coreml_pipe.width,
29+
num_inference_steps=steps,
30+
)
31+
images = []
32+
images.append(image["images"][0])
33+
return images
34+
35+
36+
demo = gr.Blocks()
37+
38+
with demo:
39+
gr.Markdown(
40+
"<center><h1>Core ML Stable Diffusion</h1>Run Stable Diffusion on Apple Silicon with Core ML</center>")
41+
with gr.Group():
42+
with gr.Box():
43+
with gr.Row():
44+
with gr.Column():
45+
with gr.Row():
46+
text = gr.Textbox(
47+
label="Prompt",
48+
lines=11,
49+
placeholder="Enter your prompt",
50+
)
51+
with gr.Row():
52+
btn = gr.Button("Generate image")
53+
with gr.Row():
54+
steps = gr.Slider(label="Steps", minimum=1,
55+
maximum=50, value=10, step=1)
56+
with gr.Column():
57+
gallery = gr.Gallery(
58+
label="Generated image", elem_id="gallery"
5159
)
52-
with gr.Row():
53-
btn = gr.Button("Generate image")
54-
with gr.Row():
55-
steps = gr.Slider(label="Steps", minimum=1,
56-
maximum=50, value=10, step=1)
57-
with gr.Column():
58-
gallery = gr.Gallery(
59-
label="Generated image", elem_id="gallery"
60-
)
61-
62-
text.submit(infer, inputs=[text, steps], outputs=gallery)
63-
btn.click(infer, inputs=[text, steps], outputs=gallery)
64-
65-
demo.launch(debug=True, server_name="0.0.0.0")
66-
67-
68-
if __name__ == "__main__":
69-
parser = pipeline.argparse.ArgumentParser()
70-
71-
parser.add_argument(
72-
"-i",
73-
required=True,
74-
help=("Path to input directory with the .mlpackage files generated by "
75-
"python_coreml_stable_diffusion.torch2coreml"))
76-
parser.add_argument(
77-
"--model-version",
78-
default="CompVis/stable-diffusion-v1-4",
79-
help=
80-
("The pre-trained model checkpoint and configuration to restore. "
81-
"For available versions: https://huggingface.co/models?search=stable-diffusion"
82-
))
83-
parser.add_argument(
84-
"--compute-unit",
85-
choices=pipeline.get_available_compute_units(),
86-
default="ALL",
87-
help=("The compute units to be used when executing Core ML models. "
88-
f"Options: {pipeline.get_available_compute_units()}"))
89-
parser.add_argument(
90-
"--scheduler",
91-
choices=tuple(pipeline.SCHEDULER_MAP.keys()),
92-
default=None,
93-
help=("The scheduler to use for running the reverse diffusion process. "
94-
"If not specified, the default scheduler from the diffusers pipeline is utilized"))
95-
96-
args = parser.parse_args()
97-
init(args)
60+
61+
text.submit(infer, inputs=[text, steps], outputs=gallery)
62+
btn.click(infer, inputs=[text, steps], outputs=gallery)
63+
64+
demo.launch(debug=True, server_name="0.0.0.0")
65+
66+
67+
if __name__ == "__main__":
68+
parser = pipeline.argparse.ArgumentParser()
69+
70+
parser.add_argument(
71+
"-i",
72+
required=True,
73+
help=("Path to input directory with the .mlpackage files generated by "
74+
"python_coreml_stable_diffusion.torch2coreml"))
75+
parser.add_argument(
76+
"--model-version",
77+
default="CompVis/stable-diffusion-v1-4",
78+
help=
79+
("The pre-trained model checkpoint and configuration to restore. "
80+
"For available versions: https://huggingface.co/models?search=stable-diffusion"
81+
))
82+
parser.add_argument(
83+
"--compute-unit",
84+
choices=pipeline.get_available_compute_units(),
85+
default="ALL",
86+
help=("The compute units to be used when executing Core ML models. "
87+
f"Options: {pipeline.get_available_compute_units()}"))
88+
parser.add_argument(
89+
"--scheduler",
90+
choices=tuple(pipeline.SCHEDULER_MAP.keys()),
91+
default=None,
92+
help=("The scheduler to use for running the reverse diffusion process. "
93+
"If not specified, the default scheduler from the diffusers pipeline is utilized"))
94+
95+
args = parser.parse_args()
96+
init(args)
97+
98+
except ModuleNotFoundError as moduleNotFound:
99+
try:
100+
import subprocess
101+
import sys
102+
103+
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'gradio'])
104+
print(f'Successfully installed package `gradio` automatically!')
105+
except subprocess.CalledProcessError:
106+
print(f'Automatic package installation failed, try manually executing `pip install gradio`')
107+
raise ModuleNotFoundError('package `gradio` not found, you can install via `pip install gradio`.') from moduleNotFound

0 commit comments

Comments
 (0)