Skip to content
This repository was archived by the owner on May 20, 2025. It is now read-only.

Commit 8f7a0aa

Browse files
committed
use lambda to download models
1 parent bd4de2e commit 8f7a0aa

File tree

1 file changed

+47
-11
lines changed

1 file changed

+47
-11
lines changed

docs/guides/python/ai-podcast-part-1.mdx

Lines changed: 47 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,10 @@ Before we can deploy our project to the cloud we need to make a few changes. Fir
308308
This is why we added the models bucket and download topic initially. It's time to use them. Let's add the download topic subscriber and api endpoint to `services/api.py`.
309309

310310
```python title: services/api.py
311-
from common.resources import main_api, model_dir, cache_dir, zip_path, gen_audio_job, download_audio_model_topic, models_bucket
311+
from common.resources import (
312+
main_api, model_dir, cache_dir, zip_path,
313+
gen_audio_job, download_audio_model_topic, models_bucket
314+
)
312315
from nitric.application import Nitric
313316
from nitric.context import HttpContext, MessageContext
314317
from huggingface_hub import snapshot_download
@@ -328,16 +331,20 @@ async def do_download_audio_model(ctx: MessageContext):
328331
model_id: str = ctx.req.data["model_id"]
329332

330333
print(f"Downloading model to {model_dir}")
331-
dir = snapshot_download(model_id, local_dir=model_dir, cache_dir=cache_dir, allow_patterns=[
332-
"config.json",
333-
"generation_config.json",
334-
"pytorch_model.bin",
335-
"speaker_embeddings_path.json",
336-
"special_tokens_map.json",
337-
"tokenizer.json",
338-
"tokenizer_config.json",
339-
"vocab.txt"
340-
])
334+
dir = snapshot_download(model_id,
335+
local_dir=model_dir,
336+
cache_dir=cache_dir,
337+
allow_patterns=[
338+
"config.json",
339+
"generation_config.json",
340+
"pytorch_model.bin",
341+
"speaker_embeddings_path.json",
342+
"special_tokens_map.json",
343+
"tokenizer.json",
344+
"tokenizer_config.json",
345+
"vocab.txt"
346+
]
347+
)
341348

342349
print(f"Downloaded model to {dir}")
343350

@@ -368,6 +375,35 @@ async def download_audio(ctx: HttpContext):
368375
model_id = ctx.req.query.get("model", audio_model_id)
369376
# asynchronously download the model
370377
await download_audio_model.publish({ "model_id": model_id })
378+
379+
@main_api.post("/audio/:filename")
380+
async def submit_auto(ctx: HttpContext):
381+
name = ctx.req.params["filename"]
382+
model_id = ctx.req.query.get("model", audio_model_id)
383+
preset = ctx.req.query.get("preset", default_voice_preset)
384+
385+
if isinstance(model_id, list):
386+
model_id = model_id[0]
387+
388+
model_downloaded = await models.exists(f"{model_id}.zip")
389+
if not model_downloaded:
390+
ctx.res.status = 404
391+
ctx.res.body = f'model \'{model_id}\' hasn\'t been downloaded yet, call POST: /download-audio-model to pre-download the model'
392+
return
393+
394+
if isinstance(preset, list):
395+
preset = preset[0]
396+
397+
body = ctx.req.data
398+
if body is None:
399+
ctx.res.status = 400
400+
return
401+
402+
print(f"using preset {preset}")
403+
404+
await generate_audio.submit({"file": name, "model_id": model_id, "text": body.decode('utf-8'), "preset": preset})
405+
406+
Nitric.run()
371407
```
372408

373409
We'll also update our audio generation job to download the model from the bucket before processing the audio.

0 commit comments

Comments
 (0)