Skip to content

Commit 1c60c9f

Browse files
Merge branch 'Samagra-Development:restructure' into restructure
2 parents c5fc7d7 + 85994e6 commit 1c60c9f

File tree

22 files changed

+338
-43
lines changed

22 files changed

+338
-43
lines changed

config.json

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,27 @@
11
{
22
"models": [
3+
{
4+
"serviceName": "asr_whisper_en",
5+
"modelBasePath": "src/asr/whisper_en/local.",
6+
"apiBasePath": "asr/whisper_en/local/",
7+
"containerPort": 8000,
8+
"environment": {},
9+
"nginx": [],
10+
"nginx": ["client_max_body_size 100M;", "proxy_read_timeout 600;", "proxy_connect_timeout 600;", "proxy_send_timeout 600;"],
11+
"constraints": ["node.labels.node_vm_type==gpu"],
12+
"build": true
13+
},
14+
{
15+
"serviceName": "asr_lang_detect",
16+
"modelBasePath": "src/asr/whisper_lang_rec/local.",
17+
"apiBasePath": "asr/whisper_lang_rec/local/",
18+
"containerPort": 8000,
19+
"environment": {},
20+
"nginx": [],
21+
"nginx": ["client_max_body_size 100M;", "proxy_read_timeout 600;", "proxy_connect_timeout 600;", "proxy_send_timeout 600;"],
22+
"constraints": ["node.labels.node_vm_type==gpu"],
23+
"build": true
24+
},
325
{
426
"serviceName": "ner",
527
"modelBasePath": "src/ner/agri_ner_akai/local/.",

src/asr/whisper_en/README.md

Whitespace-only changes.
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Use an official Python runtime as a parent image
2+
FROM python:3.9-slim
3+
4+
WORKDIR /app
5+
6+
7+
#install requirements
8+
COPY requirements.txt requirements.txt
9+
RUN pip3 install -r requirements.txt
10+
11+
# Copy the rest of the application code to the working directory
12+
COPY . /app/
13+
EXPOSE 8000
14+
# Set the entrypoint for the container
15+
CMD ["hypercorn", "--bind", "0.0.0.0:8000", "api:app"]

src/asr/whisper_en/local/README.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
### Testing the model deployment :
2+
To run for testing you can follow the following steps :
3+
4+
- Git clone the repo
5+
- Go to current folder location i.e. ``` cd /src/asr/fairseq_mms/local ```
6+
- Create docker image file and test the api:
7+
```
8+
docker build -t testmodel .
9+
docker run -p 8000:8000 testmodel
10+
curl -X POST -F "[email protected]" http://localhost:8000/
11+
```
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
from .request import ModelRequest
2+
from .request import Model

src/asr/whisper_en/local/api.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
from model import Model
2+
from request import ModelRequest
3+
from quart import Quart, request
4+
from quart_cors import cors # Import the cors function
5+
import aiohttp
6+
import os
7+
import tempfile
8+
9+
app = Quart(__name__)
10+
app = cors(app) # Apply the cors function to your app to enable CORS for all routes
11+
12+
model = None
13+
14+
@app.before_serving
15+
async def startup():
16+
app.client = aiohttp.ClientSession()
17+
global model
18+
model = Model(app)
19+
20+
@app.route('/', methods=['POST'])
21+
async def embed():
22+
global model
23+
24+
temp_dir = tempfile.mkdtemp()
25+
data = await request.get_json()
26+
files = await request.files # await the coroutine
27+
uploaded_file = files.get('file') # now you can use .get()
28+
29+
file_path = os.path.join(temp_dir, uploaded_file.name)
30+
await uploaded_file.save(file_path)
31+
32+
req = ModelRequest(wav_file=file_path)
33+
response = await model.inference(req)
34+
35+
os.remove(file_path)
36+
os.rmdir(temp_dir)
37+
38+
return response
39+
40+
if __name__ == "__main__":
41+
app.run()

src/asr/whisper_en/local/model.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import torch
2+
import torchaudio
3+
from transformers import pipeline
4+
from request import ModelRequest
5+
6+
7+
class Model():
8+
def __new__(cls, context):
9+
cls.context = context
10+
if not hasattr(cls, 'instance'):
11+
cls.instance = super(Model, cls).__new__(cls)
12+
13+
# Initialize Whisper ASR pipeline
14+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
15+
cls.pipe = pipeline(
16+
"automatic-speech-recognition",
17+
model="openai/whisper-tiny.en",
18+
chunk_length_s=10,
19+
device=device,
20+
)
21+
return cls.instance
22+
23+
def transcribe_audio(self, audio_path):
24+
audio_input, sampling_rate = torchaudio.load(audio_path)
25+
audio_data = {
26+
"array": audio_input.squeeze().numpy(),
27+
"sampling_rate": sampling_rate
28+
}
29+
30+
# Get the transcription
31+
prediction = self.pipe(audio_data.copy(), batch_size=8)["text"]
32+
return prediction
33+
34+
async def inference(self, request: ModelRequest):
35+
transcription = self.transcribe_audio(request.wav_file)
36+
if not transcription:
37+
transcription = 'Unable to transcribe the audio.'
38+
return transcription
39+
40+
41+
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import requests
2+
import json
3+
4+
5+
class ModelRequest():
6+
def __init__(self, wav_file):
7+
self.wav_file = wav_file
8+
9+
def to_json(self):
10+
return json.dumps(self, default=lambda o: o.__dict__,
11+
sort_keys=True, indent=4)
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
torch
2+
transformers
3+
quart
4+
aiohttp
5+
librosa
6+
quart-cors
7+
torchaudio
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
# Use an official Python runtime as a parent image
2+
FROM python:3.9-slim
3+
4+
WORKDIR /app
5+
6+
# Install requirements
7+
COPY requirements.txt requirements.txt
8+
RUN pip3 install -r requirements.txt
9+
10+
# Update aptitude with new repo info, and install FFmpeg
11+
RUN apt-get update \
12+
&& apt-get install -y ffmpeg \
13+
&& apt-get clean \
14+
&& rm -rf /var/lib/apt/lists/*
15+
16+
# Copy the rest of the application code to the working directory
17+
COPY . /app/
18+
EXPOSE 8000
19+
20+
# Set the entrypoint for the container
21+
CMD ["hypercorn", "--bind", "0.0.0.0:8000", "api:app"]

0 commit comments

Comments
 (0)