File tree Expand file tree Collapse file tree 2 files changed +1
-16
lines changed
Expand file tree Collapse file tree 2 files changed +1
-16
lines changed Original file line number Diff line number Diff line change @@ -23,7 +23,7 @@ def main(args) -> None:
2323
2424 def benchmark (batch ):
2525 start_time = time .time ()
26- segments , _ = asr_model .transcribe (batch ["array" ], language = "en" )
26+ segments , _ = asr_model .transcribe (batch ["audio" ][ " array" ], language = "en" )
2727 outputs = [segment ._asdict () for segment in segments ]
2828 batch ["transcription_time_s" ] = time .time () - start_time
2929 batch ["predictions" ] = data_utils .normalizer ("" .join ([segment ["text" ] for segment in outputs ])).strip ()
@@ -117,12 +117,6 @@ def benchmark(batch):
117117 default = - 1 ,
118118 help = "The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on." ,
119119 )
120- parser .add_argument (
121- "--batch_size" ,
122- type = int ,
123- default = 16 ,
124- help = "Number of samples to go through each streamed batch." ,
125- )
126120 parser .add_argument (
127121 "--max_eval_samples" ,
128122 type = int ,
Original file line number Diff line number Diff line change 33export PYTHONPATH=" .." :$PYTHONPATH
44
55MODEL_IDs=(" tiny.en" " small.en" " base.en" " medium.en" " large-v1" " large-v2" " large-v3" )
6- BATCH_SIZE=1
76DEVICE_INDEX=0
87
98num_models=${# MODEL_IDs[@]}
1817 --dataset=" ami" \
1918 --split=" test" \
2019 --device=${DEVICE_INDEX} \
21- --batch_size=${BATCH_SIZE} \
2220 --max_eval_samples=-1
2321
2422 python run_eval.py \
2725 --dataset=" earnings22" \
2826 --split=" test" \
2927 --device=${DEVICE_INDEX} \
30- --batch_size=${BATCH_SIZE} \
3128 --max_eval_samples=-1
3229
3330 python run_eval.py \
3633 --dataset=" gigaspeech" \
3734 --split=" test" \
3835 --device=${DEVICE_INDEX} \
39- --batch_size=${BATCH_SIZE} \
4036 --max_eval_samples=-1
4137
4238 python run_eval.py \
4541 --dataset=" librispeech" \
4642 --split=" test.clean" \
4743 --device=${DEVICE_INDEX} \
48- --batch_size=${BATCH_SIZE} \
4944 --max_eval_samples=-1
5045
5146 python run_eval.py \
5449 --dataset=" librispeech" \
5550 --split=" test.other" \
5651 --device=${DEVICE_INDEX} \
57- --batch_size=${BATCH_SIZE} \
5852 --max_eval_samples=-1
5953
6054 python run_eval.py \
6357 --dataset=" spgispeech" \
6458 --split=" test" \
6559 --device=${DEVICE_INDEX} \
66- --batch_size=${BATCH_SIZE} \
6760 --max_eval_samples=-1
6861
6962 python run_eval.py \
7265 --dataset=" tedlium" \
7366 --split=" test" \
7467 --device=${DEVICE_INDEX} \
75- --batch_size=${BATCH_SIZE} \
7668 --max_eval_samples=-1
7769
7870 python run_eval.py \
8173 --dataset=" voxpopuli" \
8274 --split=" test" \
8375 --device=${DEVICE_INDEX} \
84- --batch_size=${BATCH_SIZE} \
8576 --max_eval_samples=-1
8677
8778 # Evaluate results
You can’t perform that action at this time.
0 commit comments