Skip to content

Commit aacbb0c

Browse files
committed
NeMo SALM scripts
Signed-off-by: Piotr Żelasko <[email protected]>
1 parent 76e5444 commit aacbb0c

File tree

2 files changed

+353
-0
lines changed

2 files changed

+353
-0
lines changed

nemo_asr/run_eval_salm.py

Lines changed: 258 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,258 @@
1+
import argparse
2+
3+
import io
4+
import os
5+
import torch
6+
import evaluate
7+
import soundfile
8+
import lhotse
9+
10+
from tqdm import tqdm
11+
from normalizer import data_utils
12+
import numpy as np
13+
14+
from nemo.collections.asr.models import ASRModel
15+
import time
16+
17+
18+
from nemo.collections.speechlm2.models.salm import SALM
19+
from omegaconf import OmegaConf
20+
from pathlib import Path
21+
from transformers import GenerationConfig
22+
23+
24+
25+
wer_metric = evaluate.load("wer")
26+
27+
28+
class ToAudio(torch.utils.data.Dataset):
29+
def __getitem__(self, cuts):
30+
audios, audio_lens = cuts.load_audio(collate=True)
31+
return {"cuts": cuts, "audios": audios, "audio_lens": audio_lens}
32+
33+
34+
def setup_dloader(audio_files, batch_size, num_workers):
35+
cuts = lhotse.CutSet([lhotse.Recording.from_file(p).to_cut() for p in audio_files])
36+
return torch.utils.data.DataLoader(
37+
dataset=ToAudio(),
38+
sampler=lhotse.dataset.DynamicCutSampler(cuts, max_cuts=batch_size),
39+
num_workers=num_workers,
40+
batch_size=None,
41+
)
42+
43+
44+
def transcribe(model, dloader) -> list[str]:
45+
hyps = []
46+
eos_tokens = torch.tensor([model.text_eos_id])
47+
for batch_idx, batch in enumerate(dloader):
48+
answer_ids = model.generate(
49+
prompts=[
50+
[
51+
{"role": "user", "slots": {"message": f"Transcribe the following: {model.audio_locator_tag}"}}
52+
]
53+
] * len(batch["cuts"]),
54+
audios=batch["audios"].to(model.device, non_blocking=True),
55+
audio_lens=batch["audio_lens"].to(model.device, non_blocking=True),
56+
generation_config=GenerationConfig(
57+
max_new_tokens=128,
58+
bos_token_id=model.text_bos_id,
59+
eos_token_id=eos_tokens,
60+
pad_token_id=model.text_pad_id,
61+
),
62+
)
63+
answer_ids = [parse_hyp(ans, eos_tokens) for ans in answer_ids.cpu()]
64+
hyps.extend(model.tokenizer.ids_to_text(ans).strip() for ans in answer_ids)
65+
return hyps
66+
67+
68+
def parse_hyp(answer: torch.Tensor, eos_tokens):
69+
end = (answer == torch.isin(answer, eos_tokens)).nonzero(as_tuple=True)[0]
70+
if end.numel() == 0:
71+
return answer
72+
end = end[0]
73+
return answer[:end]
74+
75+
76+
def main(args):
77+
78+
DATA_CACHE_DIR = os.path.join(os.getcwd(), "audio_cache")
79+
DATASET_NAME = args.dataset
80+
SPLIT_NAME = args.split
81+
82+
CACHE_DIR = os.path.join(DATA_CACHE_DIR, DATASET_NAME, SPLIT_NAME)
83+
if not os.path.exists(CACHE_DIR):
84+
os.makedirs(CACHE_DIR)
85+
86+
torch.set_float32_matmul_precision("medium")
87+
88+
device = torch.device(f"cuda:{args.device}")
89+
model = SALM.from_pretrained(args.model_id).eval().to(torch.bfloat16).to(device)
90+
91+
dataset = data_utils.load_data(args)
92+
93+
def download_audio_files(batch):
94+
95+
# download audio files and write the paths, transcriptions and durations to a manifest file
96+
audio_paths = []
97+
durations = []
98+
99+
for id, sample in zip(batch["id"], batch["audio"]):
100+
101+
# first step added here to make ID and wav filenames unique
102+
# several datasets like earnings22 have a hierarchical structure
103+
# for eg. earnings22/test/4432298/281.wav, earnings22/test/4450488/281.wav
104+
# lhotse uses the filename (281.wav) here as unique ID to create and name cuts
105+
# ref: https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/collation.py#L186
106+
id = id.replace('/', '_').removesuffix('.wav')
107+
108+
audio_path = os.path.join(CACHE_DIR, f"{id}.wav")
109+
110+
if "array" in sample:
111+
audio_array = np.float32(sample["array"])
112+
sample_rate = 16000
113+
114+
elif "bytes" in sample: # added to be compatible with latest datasets library (3.x.x) that produces byte stream
115+
with io.BytesIO(sample["bytes"]) as audio_file:
116+
audio_array, sample_rate = soundfile.read(audio_file, dtype="float32")
117+
118+
else:
119+
raise ValueError("Sample must have either 'array' or 'bytes' key")
120+
121+
if not os.path.exists(audio_path):
122+
os.makedirs(os.path.dirname(audio_path), exist_ok=True)
123+
soundfile.write(audio_path, audio_array, sample_rate)
124+
125+
audio_paths.append(audio_path)
126+
durations.append(len(audio_array) / sample_rate)
127+
128+
129+
batch["references"] = batch["norm_text"]
130+
batch["audio_filepaths"] = audio_paths
131+
batch["durations"] = durations
132+
133+
return batch
134+
135+
136+
if args.max_eval_samples is not None and args.max_eval_samples > 0:
137+
print(f"Subsampling dataset to first {args.max_eval_samples} samples !")
138+
dataset = dataset.take(args.max_eval_samples)
139+
140+
dataset = data_utils.prepare_data(dataset)
141+
142+
# prepraing the offline dataset
143+
dataset = dataset.map(download_audio_files, batch_size=args.batch_size, batched=True, remove_columns=["audio"])
144+
145+
# Write manifest from daraset batch using json and keys audio_filepath, duration, text
146+
147+
all_data = {
148+
"audio_filepaths": [],
149+
"durations": [],
150+
"references": [],
151+
}
152+
153+
data_itr = iter(dataset)
154+
for data in tqdm(data_itr, desc="Downloading Samples"):
155+
for key in all_data:
156+
all_data[key].append(data[key])
157+
158+
# Sort audio_filepaths and references based on durations values
159+
sorted_indices = sorted(range(len(all_data["durations"])), key=lambda k: all_data["durations"][k], reverse=True)
160+
all_data["audio_filepaths"] = [all_data["audio_filepaths"][i] for i in sorted_indices]
161+
all_data["references"] = [all_data["references"][i] for i in sorted_indices]
162+
all_data["durations"] = [all_data["durations"][i] for i in sorted_indices]
163+
164+
165+
total_time = 0
166+
for _ in range(2): # warmup once and calculate rtf
167+
if _ == 0:
168+
audio_files = all_data["audio_filepaths"][:args.batch_size * 4] # warmup with 4 batches
169+
else:
170+
audio_files = all_data["audio_filepaths"]
171+
dloader = setup_dloader(audio_files=audio_files, batch_size=args.batch_size, num_workers=1)
172+
with torch.inference_mode():
173+
start_time = time.time()
174+
transcriptions = transcribe(model, dloader)
175+
end_time = time.time()
176+
if _ == 1:
177+
total_time += end_time - start_time
178+
total_time = total_time
179+
180+
# normalize transcriptions with English normalizer
181+
if isinstance(transcriptions, tuple) and len(transcriptions) == 2:
182+
transcriptions = transcriptions[0]
183+
predictions = [data_utils.normalizer(pred) for pred in transcriptions]
184+
185+
avg_time = total_time / len(all_data["audio_filepaths"])
186+
187+
# Write manifest results (WER and RTFX)
188+
manifest_path = data_utils.write_manifest(
189+
all_data["references"],
190+
predictions,
191+
args.model_id,
192+
args.dataset_path,
193+
args.dataset,
194+
args.split,
195+
audio_length=all_data["durations"],
196+
transcription_time=[avg_time] * len(all_data["audio_filepaths"]),
197+
)
198+
199+
print("Results saved at path:", os.path.abspath(manifest_path))
200+
201+
wer = wer_metric.compute(references=all_data['references'], predictions=predictions)
202+
wer = round(100 * wer, 2)
203+
204+
audio_length = sum(all_data["durations"])
205+
rtfx = audio_length / total_time
206+
rtfx = round(rtfx, 2)
207+
208+
print("RTFX:", rtfx)
209+
print("WER:", wer, "%")
210+
211+
212+
if __name__ == "__main__":
213+
parser = argparse.ArgumentParser()
214+
215+
parser.add_argument(
216+
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with NVIDIA NeMo.",
217+
)
218+
parser.add_argument(
219+
'--dataset_path', type=str, default='esb/datasets', help='Dataset path. By default, it is `esb/datasets`'
220+
)
221+
parser.add_argument(
222+
"--dataset",
223+
type=str,
224+
required=True,
225+
help="Dataset name. *E.g.* `'librispeech_asr` for the LibriSpeech ASR dataset, or `'common_voice'` for Common Voice. The full list of dataset names "
226+
"can be found at `https://huggingface.co/datasets/esb/datasets`",
227+
)
228+
parser.add_argument(
229+
"--split",
230+
type=str,
231+
default="test",
232+
help="Split of the dataset. *E.g.* `'validation`' for the dev split, or `'test'` for the test split.",
233+
)
234+
parser.add_argument(
235+
"--device",
236+
type=int,
237+
default=-1,
238+
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
239+
)
240+
parser.add_argument(
241+
"--batch_size", type=int, default=32, help="Number of samples to go through each streamed batch.",
242+
)
243+
parser.add_argument(
244+
"--max_eval_samples",
245+
type=int,
246+
default=None,
247+
help="Number of samples to be evaluated. Put a lower number e.g. 64 for testing this script.",
248+
)
249+
parser.add_argument(
250+
"--no-streaming",
251+
dest='streaming',
252+
action="store_false",
253+
help="Choose whether you'd like to download the entire dataset or stream it during the evaluation.",
254+
)
255+
args = parser.parse_args()
256+
parser.set_defaults(streaming=True)
257+
258+
main(args)

nemo_asr/run_salm.sh

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
#!/bin/bash
2+
3+
export PYTHONPATH="..":$PYTHONPATH
4+
5+
MODEL_IDs=(
6+
nvidia/canary-qwen-2.5b
7+
)
8+
BATCH_SIZE=192
9+
DEVICE_ID=0
10+
11+
num_models=${#MODEL_IDs[@]}
12+
13+
for (( i=0; i<${num_models}; i++ ));
14+
do
15+
MODEL_ID=${MODEL_IDs[$i]}
16+
17+
python run_eval_salm.py \
18+
--model_id=${MODEL_ID} \
19+
--dataset_path="hf-audio/esb-datasets-test-only-sorted" \
20+
--dataset="ami" \
21+
--split="test" \
22+
--device=${DEVICE_ID} \
23+
--batch_size=${BATCH_SIZE} \
24+
--max_eval_samples=-1
25+
26+
python run_eval_salm.py \
27+
--model_id=${MODEL_ID} \
28+
--dataset_path="hf-audio/esb-datasets-test-only-sorted" \
29+
--dataset="earnings22" \
30+
--split="test" \
31+
--device=${DEVICE_ID} \
32+
--batch_size=${BATCH_SIZE} \
33+
--max_eval_samples=-1
34+
35+
python run_eval_salm.py \
36+
--model_id=${MODEL_ID} \
37+
--dataset_path="hf-audio/esb-datasets-test-only-sorted" \
38+
--dataset="gigaspeech" \
39+
--split="test" \
40+
--device=${DEVICE_ID} \
41+
--batch_size=${BATCH_SIZE} \
42+
--max_eval_samples=-1
43+
44+
python run_eval_salm.py \
45+
--model_id=${MODEL_ID} \
46+
--dataset_path="hf-audio/esb-datasets-test-only-sorted" \
47+
--dataset="librispeech" \
48+
--split="test.clean" \
49+
--device=${DEVICE_ID} \
50+
--batch_size=${BATCH_SIZE} \
51+
--max_eval_samples=-1
52+
53+
python run_eval_salm.py \
54+
--model_id=${MODEL_ID} \
55+
--dataset_path="hf-audio/esb-datasets-test-only-sorted" \
56+
--dataset="librispeech" \
57+
--split="test.other" \
58+
--device=${DEVICE_ID} \
59+
--batch_size=${BATCH_SIZE} \
60+
--max_eval_samples=-1
61+
62+
python run_eval_salm.py \
63+
--model_id=${MODEL_ID} \
64+
--dataset_path="hf-audio/esb-datasets-test-only-sorted" \
65+
--dataset="spgispeech" \
66+
--split="test" \
67+
--device=${DEVICE_ID} \
68+
--batch_size=${BATCH_SIZE} \
69+
--max_eval_samples=-1
70+
71+
python run_eval_salm.py \
72+
--model_id=${MODEL_ID} \
73+
--dataset_path="hf-audio/esb-datasets-test-only-sorted" \
74+
--dataset="tedlium" \
75+
--split="test" \
76+
--device=${DEVICE_ID} \
77+
--batch_size=${BATCH_SIZE} \
78+
--max_eval_samples=-1
79+
80+
python run_eval_salm.py \
81+
--model_id=${MODEL_ID} \
82+
--dataset_path="hf-audio/esb-datasets-test-only-sorted" \
83+
--dataset="voxpopuli" \
84+
--split="test" \
85+
--device=${DEVICE_ID} \
86+
--batch_size=${BATCH_SIZE} \
87+
--max_eval_samples=-1
88+
89+
# Evaluate results
90+
RUNDIR=`pwd` && \
91+
cd ../normalizer && \
92+
python -c "import eval_utils; eval_utils.score_results('${RUNDIR}/results', '${MODEL_ID}')" && \
93+
cd $RUNDIR
94+
95+
done

0 commit comments

Comments
 (0)