|
1 | 1 | # --- |
2 | 2 | # deploy: true |
3 | | -# cmd: ["modal", "serve", "06_gpu_and_ml/comfyui/comfyapp.py"] |
4 | 3 | # --- |
5 | 4 |
|
6 | 5 | # # Run Flux on ComfyUI as an API |
|
40 | 39 | import modal |
41 | 40 | import modal.experimental |
42 | 41 |
|
| 42 | +try: |
| 43 | + import aiohttp |
| 44 | +except ImportError: |
| 45 | + aiohttp = None |
| 46 | + |
43 | 47 | image = ( # build up a Modal Image to run ComfyUI, step by step |
44 | 48 | modal.Image.debian_slim( # start from basic Linux with Python |
45 | 49 | python_version="3.11" |
@@ -103,7 +107,7 @@ def hf_download(): |
103 | 107 |
|
104 | 108 | image = ( |
105 | 109 | # install huggingface_hub with hf_xet support to speed up downloads |
106 | | - image.uv_pip_install("huggingface-hub==1.4.1") |
| 110 | + image.uv_pip_install("huggingface-hub==0.36.0") |
107 | 111 | .env({"HF_XET_HIGH_PERFORMANCE": "1"}) |
108 | 112 | .run_function( |
109 | 113 | hf_download, |
@@ -243,11 +247,48 @@ def poll_server_health(self) -> Dict: |
243 | 247 |
|
244 | 248 | #  |
245 | 249 |
|
246 | | -# ## More resources |
247 | | -# - Use [memory snapshots](https://modal.com/docs/guide/memory-snapshot) to speed up cold starts (check out the `memory_snapshot` directory on [Github](https://github.com/modal-labs/modal-examples/tree/main/06_gpu_and_ml/comfyui)) |
248 | | -# - Run a ComfyUI workflow as a [Python script](https://modal.com/blog/comfyui-prototype-to-production) |
| 250 | +# ## Testing the API |
| 251 | + |
| 252 | +# To test the API setup, we include a `local_entrypoint` that healthchecks the server |
| 253 | +# and then hits it with a test prompt. |
| 254 | + |
| 255 | +# If you execute the command |
| 256 | + |
| 257 | +# ```bash |
| 258 | +# modal run 06_gpu_and_ml/comfyui/comfyapp.py |
| 259 | +# ``` |
| 260 | + |
| 261 | +# a fresh replica of the ComfyUI server will be spun up on Modal while |
| 262 | +# the code below executes on your local machine. |
| 263 | + |
| 264 | + |
| 265 | +@app.local_entrypoint() |
| 266 | +async def test(): |
| 267 | + import time |
| 268 | + |
| 269 | + url = await ComfyUI().api.get_web_url.aio() |
| 270 | + prompt = "Spider-Man visits Yosemite, rendered by Blender, trending on artstation" |
| 271 | + |
| 272 | + async with aiohttp.ClientSession(base_url=url) as session: |
| 273 | + print(f"Sending request to {url} with prompt: {prompt}") |
| 274 | + start_time = time.time() |
| 275 | + data = json.dumps({"prompt": prompt}).encode("utf-8") |
| 276 | + async with session.post( |
| 277 | + "/", |
| 278 | + data=data, |
| 279 | + headers={"Content-Type": "application/json"}, |
| 280 | + ) as resp: |
| 281 | + resp.raise_for_status() |
| 282 | + img_bytes = await resp.read() |
| 283 | + elapsed = round(time.time() - start_time, 1) |
| 284 | + print(f"Image finished generating in {elapsed} seconds!") |
| 285 | + |
| 286 | + output_dir = Path("/tmp/comfyui") |
| 287 | + output_dir.mkdir(exist_ok=True, parents=True) |
| 288 | + filename = output_dir / f"{slugify(prompt)}.png" |
| 289 | + filename.write_bytes(img_bytes) |
| 290 | + print(f"Saved to '{filename}'") |
249 | 291 |
|
250 | | -# - When to use [A1111 vs ComfyUI](https://modal.com/blog/a1111-vs-comfyui) |
251 | 292 |
|
252 | | -# - Understand tradeoffs of parallel processing strategies when |
253 | | -# [scaling ComfyUI](https://modal.com/blog/scaling-comfyui) |
| 293 | +def slugify(s: str) -> str: |
| 294 | + return s.lower().replace(" ", "-").replace(".", "-").replace("/", "-")[:32] |
0 commit comments