33# Apache 2.0.
44#
55# This recipe performs diarization for the mix-headset data in the
6- # AMI dataset. The x-vector extractor we use is trained on VoxCeleb v2
6+ # AMI dataset. The x-vector extractor we use is trained on VoxCeleb v2
77# corpus with simulated RIRs. We use oracle SAD in this recipe.
88# This recipe demonstrates the following:
99# 1. Diarization using x-vector and clustering (AHC, VBx, spectral)
@@ -38,7 +38,7 @@ diarizer_type=spectral # must be one of (ahc, spectral, vbx)
3838# Path where AMI gets downloaded (or where locally available):
3939AMI_DIR=$PWD /wav_db # Default,
4040case $( hostname -d) in
41- fit.vutbr.cz) AMI_DIR=/mnt/matylda5/iveselyk/KALDI_AMI_WAV ;; # BUT,
41+ fit.vutbr.cz) AMI_DIR=/mnt/matylda2/data/AMI_KALDI_DOWNLOAD ;; # BUT,
4242 clsp.jhu.edu) AMI_DIR=/export/corpora5/amicorpus ;; # JHU,
4343 cstr.ed.ac.uk) AMI_DIR= ;; # Edinburgh,
4444esac
@@ -57,7 +57,7 @@ if [ $stage -le 1 ]; then
5757 local/ami_download.sh $mic $AMI_DIR
5858fi
5959
60- # Prepare data directories.
60+ # Prepare data directories.
6161if [ $stage -le 2 ]; then
6262 # Download the data split and references from BUT's AMI setup
6363 if ! [ -d AMI-diarization-setup ]; then
@@ -120,7 +120,7 @@ if [ $stage -le 6 ]; then
120120 transform-vec $model_dir /xvectors_plda_train/transform.mat ark:- ark:- |\
121121 ivector-normalize-length ark:- ark:- |" \
122122 $model_dir /xvectors_plda_train/plda || exit 1;
123-
123+
124124 cp $model_dir /xvectors_plda_train/plda $model_dir /
125125 cp $model_dir /xvectors_plda_train/transform.mat $model_dir /
126126 cp $model_dir /xvectors_plda_train/mean.vec $model_dir /
0 commit comments