|
| 1 | +import io |
| 2 | +import os |
| 3 | +import httpx |
| 4 | +import uuid |
| 5 | +import pathlib |
| 6 | +import yaml |
| 7 | + |
| 8 | +import gradio as gr |
| 9 | +from pydantic import BaseModel, HttpUrl |
| 10 | +from PIL import Image, ExifTags |
| 11 | +from typing import List |
| 12 | +from urllib.parse import urljoin |
| 13 | + |
| 14 | + |
| 15 | +class Model(BaseModel): |
| 16 | + name: str |
| 17 | + address: HttpUrl |
| 18 | + |
| 19 | +class AppSettings(BaseModel): |
| 20 | + models: List[Model] |
| 21 | + example_prompt: str |
| 22 | + |
| 23 | + |
| 24 | +settings_path = pathlib.Path("/etc/gradio-app/gradio_config.yaml") |
| 25 | +if not settings_path.exists(): |
| 26 | + print("No settings overrides found at", settings_path) |
| 27 | + settings_path = "./gradio_config.yaml" |
| 28 | +print("Using settings from", settings_path) |
| 29 | +with open(settings_path, "r") as file: |
| 30 | + settings = AppSettings(**yaml.safe_load(file)) |
| 31 | +print("App config:", settings.model_dump()) |
| 32 | + |
| 33 | +MODELS = {m.name: m.address for m in settings.models} |
| 34 | +MODEL_NAMES = list(MODELS.keys()) |
| 35 | + |
| 36 | +# Disable analytics for GDPR compliance |
| 37 | +os.environ["GRADIO_ANALYTICS_ENABLED"] = "False" |
| 38 | + |
| 39 | +def save_image(model_name: str, prompt: str, seed: int, add_sampling_metadata: bool, image: Image.Image): |
| 40 | + filename = f"output/gradio/{uuid.uuid4()}.jpg" |
| 41 | + os.makedirs(os.path.dirname(filename), exist_ok=True) |
| 42 | + exif_data = Image.Exif() |
| 43 | + exif_data[ExifTags.Base.Software] = "AI generated;img2img;flux" |
| 44 | + exif_data[ExifTags.Base.Make] = "Black Forest Labs" |
| 45 | + exif_data[ExifTags.Base.Model] = model_name |
| 46 | + if add_sampling_metadata: |
| 47 | + exif_data[ExifTags.Base.ImageDescription] = prompt |
| 48 | + image.save(filename, format="jpeg", exif=exif_data, quality=95, subsampling=0) |
| 49 | + return filename |
| 50 | + |
| 51 | + |
| 52 | +async def generate_image( |
| 53 | + model_name: str, |
| 54 | + width: int, |
| 55 | + height: int, |
| 56 | + num_steps: int, |
| 57 | + guidance: float, |
| 58 | + seed: int, |
| 59 | + prompt: str, |
| 60 | + add_sampling_metadata: bool, |
| 61 | +): |
| 62 | + url = urljoin(str(MODELS[model_name]), "/generate") |
| 63 | + data = { |
| 64 | + "width": width, |
| 65 | + "height": height, |
| 66 | + "num_steps": num_steps, |
| 67 | + "guidance": guidance, |
| 68 | + "seed": seed, |
| 69 | + "prompt": prompt, |
| 70 | + "add_sampling_metadata": add_sampling_metadata, |
| 71 | + } |
| 72 | + async with httpx.AsyncClient(timeout=60) as client: |
| 73 | + try: |
| 74 | + response = await client.post(url, json=data) |
| 75 | + except httpx.ConnectError: |
| 76 | + raise gr.Error("Model backend unavailable") |
| 77 | + if response.status_code == 400: |
| 78 | + data = response.json() |
| 79 | + if "error" in data and "message" in data["error"]: |
| 80 | + message = data["error"]["message"] |
| 81 | + if "seed" in data["error"]: |
| 82 | + message += f" (seed: {data['error']['seed']})" |
| 83 | + raise gr.Error(message) |
| 84 | + try: |
| 85 | + response.raise_for_status() |
| 86 | + except httpx.HTTPStatusError as err: |
| 87 | + # Raise a generic error message to avoid leaking unwanted details |
| 88 | + # Admin should consult API logs for more info |
| 89 | + raise gr.Error(f"Backend error (HTTP {err.response.status_code})") |
| 90 | + image = Image.open(io.BytesIO(response.content)) |
| 91 | + seed = response.headers.get("x-flux-seed", "unknown") |
| 92 | + filename = save_image(model_name, prompt, seed, add_sampling_metadata, image) |
| 93 | + |
| 94 | + return image, seed, filename, None |
| 95 | + |
| 96 | + |
| 97 | +with gr.Blocks() as demo: |
| 98 | + gr.Markdown("# Flux Image Generation Demo") |
| 99 | + |
| 100 | + with gr.Row(): |
| 101 | + with gr.Column(): |
| 102 | + model = gr.Dropdown(MODEL_NAMES, value=MODEL_NAMES[0], label="Model", interactive=len(MODEL_NAMES) > 1) |
| 103 | + prompt = gr.Textbox(label="Prompt", value=settings.example_prompt) |
| 104 | + |
| 105 | + with gr.Accordion("Advanced Options", open=False): |
| 106 | + # TODO: Make min/max slide values configurable |
| 107 | + width = gr.Slider(128, 8192, 1360, step=16, label="Width") |
| 108 | + height = gr.Slider(128, 8192, 768, step=16, label="Height") |
| 109 | + num_steps = gr.Slider(1, 50, 4 if model.value == "flux-schnell" else 50, step=1, label="Number of steps") |
| 110 | + guidance = gr.Slider(1.0, 10.0, 3.5, step=0.1, label="Guidance", interactive=not model.value == "flux-schnell") |
| 111 | + seed = gr.Textbox("-1", label="Seed (-1 for random)") |
| 112 | + add_sampling_metadata = gr.Checkbox(label="Add sampling parameters to metadata?", value=True) |
| 113 | + |
| 114 | + generate_btn = gr.Button("Generate") |
| 115 | + |
| 116 | + with gr.Column(): |
| 117 | + output_image = gr.Image(label="Generated Image") |
| 118 | + seed_output = gr.Textbox(label="Used Seed") |
| 119 | + warning_text = gr.Textbox(label="Warning", visible=False) |
| 120 | + download_btn = gr.File(label="Download full-resolution") |
| 121 | + |
| 122 | + generate_btn.click( |
| 123 | + fn=generate_image, |
| 124 | + inputs=[model, width, height, num_steps, guidance, seed, prompt, add_sampling_metadata], |
| 125 | + outputs=[output_image, seed_output, download_btn, warning_text], |
| 126 | + ) |
| 127 | + demo.launch(enable_monitoring=False) |
0 commit comments