From 2db3611d4fbcab6003a48964c3f4fca67cb5b237 Mon Sep 17 00:00:00 2001 From: Nikhilesh Bhatnagar Date: Mon, 26 Feb 2024 04:03:20 +0000 Subject: [PATCH] initial commit --- README.md | 25 +- aqc.patch | 36 + triton_inference.ipynb | 106 + triton_model/1/aqc/examples/.gitignore | 2 + triton_model/1/aqc/examples/__init__.py | 9 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 232 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 259 bytes .../1/aqc/examples/data2vec/README.md | 97 + .../1/aqc/examples/data2vec/__init__.py | 1 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 176 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 205 bytes .../audio/pretraining/base_librispeech.yaml | 89 + .../aqc/examples/data2vec/models/__init__.py | 8 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 417 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 442 bytes .../data2vec_audio.cpython-310.pyc | Bin 0 -> 17253 bytes .../__pycache__/data2vec_audio.cpython-39.pyc | Bin 0 -> 17284 bytes .../models/__pycache__/utils.cpython-310.pyc | Bin 0 -> 293 bytes .../data2vec/models/data2vec_audio.py | 795 ++++ .../1/aqc/examples/data2vec/models/utils.py | 3 + .../language_model/README.adaptive_inputs.md | 39 + .../examples/language_model/README.conv.md | 40 + .../1/aqc/examples/language_model/README.md | 123 + .../language_model/prepare-wikitext-103.sh | 33 + .../aqc/examples/speech_recognition/README.md | 83 + .../examples/speech_recognition/__init__.py | 1 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 223 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 252 bytes .../__pycache__/w2l_decoder.cpython-310.pyc | Bin 0 -> 14196 bytes .../speech_recognition/criterions/ASG_loss.py | 170 + .../speech_recognition/criterions/__init__.py | 17 + .../__pycache__/ASG_loss.cpython-310.pyc | Bin 0 -> 5163 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 619 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 642 bytes .../cross_entropy_acc.cpython-310.pyc | Bin 0 -> 4910 bytes .../cross_entropy_acc.cpython-39.pyc | Bin 0 -> 5081 bytes .../criterions/cross_entropy_acc.py | 130 + .../speech_recognition/data/__init__.py | 11 + .../data/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 230 bytes .../data/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 259 bytes .../__pycache__/asr_dataset.cpython-310.pyc | Bin 0 -> 4162 bytes .../__pycache__/asr_dataset.cpython-39.pyc | Bin 0 -> 4169 bytes .../__pycache__/collaters.cpython-310.pyc | Bin 0 -> 4326 bytes .../data/__pycache__/collaters.cpython-39.pyc | Bin 0 -> 4441 bytes .../__pycache__/data_utils.cpython-310.pyc | Bin 0 -> 2934 bytes .../__pycache__/data_utils.cpython-39.pyc | Bin 0 -> 2960 bytes .../__pycache__/replabels.cpython-310.pyc | Bin 0 -> 1545 bytes .../data/__pycache__/replabels.cpython-39.pyc | Bin 0 -> 1574 bytes .../speech_recognition/data/asr_dataset.py | 122 + .../speech_recognition/data/collaters.py | 131 + .../speech_recognition/data/data_utils.py | 100 + .../speech_recognition/data/replabels.py | 70 + .../datasets/asr_prep_json.py | 125 + .../datasets/prepare-librispeech.sh | 88 + .../aqc/examples/speech_recognition/infer.py | 436 ++ .../speech_recognition/kaldi/__init__.py | 0 .../kaldi/add-self-loop-simple.cc | 94 + .../kaldi/config/kaldi_initializer.yaml | 8 + .../speech_recognition/kaldi/kaldi_decoder.py | 244 ++ .../kaldi/kaldi_initializer.py | 698 ++++ .../speech_recognition/models/__init__.py | 8 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 437 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 462 bytes .../vggtransformer.cpython-310.pyc | Bin 0 -> 24115 bytes .../__pycache__/vggtransformer.cpython-39.pyc | Bin 0 -> 24280 bytes .../w2l_conv_glu_enc.cpython-310.pyc | Bin 0 -> 5241 bytes .../w2l_conv_glu_enc.cpython-39.pyc | Bin 0 -> 5260 bytes .../models/vggtransformer.py | 1020 +++++ .../models/w2l_conv_glu_enc.py | 177 + .../examples/speech_recognition/new/README.md | 43 + .../speech_recognition/new/__init__.py | 0 .../new/conf/hydra/sweeper/ax.yaml | 29 + .../speech_recognition/new/conf/infer.yaml | 25 + .../new/decoders/__init__.py | 0 .../new/decoders/base_decoder.py | 62 + .../new/decoders/decoder.py | 32 + .../new/decoders/decoder_config.py | 70 + .../new/decoders/flashlight_decoder.py | 431 ++ .../new/decoders/viterbi_decoder.py | 24 + .../examples/speech_recognition/new/infer.py | 475 +++ .../speech_recognition/tasks/__init__.py | 8 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 434 bytes .../tasks/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 459 bytes .../speech_recognition.cpython-310.pyc | Bin 0 -> 6081 bytes .../speech_recognition.cpython-39.pyc | Bin 0 -> 6132 bytes .../tasks/speech_recognition.py | 157 + .../speech_recognition/utils/wer_utils.py | 381 ++ .../speech_recognition/w2l_decoder.py | 486 +++ triton_model/1/aqc/examples/wav2vec/README.md | 426 ++ .../1/aqc/examples/wav2vec/__init__.py | 0 .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 173 bytes .../wav2vec/config/finetuning/base_100h.yaml | 58 + .../wav2vec/config/finetuning/base_10h.yaml | 63 + .../wav2vec/config/finetuning/base_10m.yaml | 63 + .../wav2vec/config/finetuning/base_1h.yaml | 63 + .../wav2vec/config/finetuning/base_960h.yaml | 57 + .../wav2vec/config/finetuning/vox_100h.yaml | 58 + .../wav2vec/config/finetuning/vox_10h.yaml | 63 + .../wav2vec/config/finetuning/vox_10m.yaml | 63 + .../wav2vec/config/finetuning/vox_1h.yaml | 63 + .../wav2vec/config/finetuning/vox_960h.yaml | 57 + .../wav2vec2_base_librispeech.yaml | 57 + .../wav2vec2_conformer_base_librispeech.yaml | 60 + .../wav2vec2_conformer_large_librivox.yaml | 72 + .../pretraining/wav2vec2_large_librivox.yaml | 70 + .../wav2vec2_large_librivox_tpu-pod.yaml | 72 + .../wav2vec2_large_librivox_tpu.yaml | 77 + .../1/aqc/examples/wav2vec/libri_labels.py | 56 + .../wav2vec/scripts/binarize_manifest.sh | 33 + .../examples/wav2vec/unsupervised/README.md | 119 + .../examples/wav2vec/unsupervised/__init__.py | 0 .../config/finetuning/w2v_finetune.yaml | 62 + .../wav2vec/unsupervised/config/gan/w2vu.yaml | 115 + .../unsupervised/config/gan/w2vu2.yaml | 154 + .../unsupervised/config/generate/viterbi.yaml | 21 + .../config/timit_matched/test.uid | 192 + .../config/timit_matched/train.uid | 3696 +++++++++++++++++ .../config/timit_matched/train_text.uid | 3696 +++++++++++++++++ .../config/timit_matched/valid.uid | 400 ++ .../config/timit_unmatched/test.uid | 1680 ++++++++ .../config/timit_unmatched/train.uid | 3000 +++++++++++++ .../config/timit_unmatched/train_text.uid | 1000 +++++ .../config/timit_unmatched/valid.uid | 620 +++ .../wav2vec/unsupervised/data/__init__.py | 13 + .../data/extracted_features_dataset.py | 167 + .../unsupervised/data/random_input_dataset.py | 62 + .../unsupervised/kaldi_self_train/README.md | 56 + .../unsupervised/kaldi_self_train/st/cmd.sh | 15 + .../kaldi_self_train/st/decode_phone.sh | 33 + .../kaldi_self_train/st/decode_word_step1.sh | 46 + .../kaldi_self_train/st/decode_word_step2.sh | 30 + .../st/local/copy_aligned_text.py | 4 + .../kaldi_self_train/st/local/decode.sh | 38 + .../st/local/prepare_data_from_w2v.py | 56 + .../kaldi_self_train/st/local/prepare_lang.sh | 37 + .../st/local/prepare_lang_word.sh | 35 + .../kaldi_self_train/st/local/prepare_lm.sh | 35 + .../kaldi_self_train/st/local/score.sh | 63 + .../kaldi_self_train/st/local/show_wer.sh | 52 + .../st/local/train_subset_lgbeam.sh | 129 + .../kaldi_self_train/st/local/unsup_select.py | 135 + .../st/local/unsup_select_decode.sh | 37 + .../st/local/unsup_select_decode_word.sh | 35 + .../unsupervised/kaldi_self_train/st/path.sh | 5 + .../unsupervised/kaldi_self_train/st/steps | 1 + .../st/steps_gan/train_deltas.sh | 175 + .../st/steps_gan/train_lda_mllt.sh | 239 ++ .../st/steps_gan/train_sat.sh | 281 ++ .../unsupervised/kaldi_self_train/st/train.sh | 43 + .../unsupervised/kaldi_self_train/st/utils | 1 + .../wav2vec/unsupervised/models/__init__.py | 11 + .../wav2vec/unsupervised/models/wav2vec_u.py | 687 +++ .../wav2vec/unsupervised/scripts/apply_pca.py | 76 + .../unsupervised/scripts/copy_labels.py | 10 + .../unsupervised/scripts/filter_lexicon.py | 40 + .../unsupervised/scripts/filter_tsv.py | 37 + .../unsupervised/scripts/g2p_wrd_to_phn.py | 45 + .../unsupervised/scripts/ltr_to_wrd.py | 16 + .../wav2vec/unsupervised/scripts/mean_pool.py | 99 + .../unsupervised/scripts/merge_clusters.py | 114 + .../scripts/normalize_and_filter_text.py | 72 + .../unsupervised/scripts/normalize_text.py | 22 + .../wav2vec/unsupervised/scripts/pca.py | 53 + .../scripts/phonemize_with_sil.py | 83 + .../unsupervised/scripts/prepare_audio.sh | 78 + .../unsupervised/scripts/prepare_audio_v2.sh | 68 + .../unsupervised/scripts/prepare_text.sh | 83 + .../unsupervised/scripts/prepare_timit.sh | 79 + .../unsupervised/scripts/remove_silence.py | 63 + .../wav2vec/unsupervised/scripts/vads.py | 98 + .../scripts/wav2vec_apply_cluster_faiss.py | 128 + .../scripts/wav2vec_cluster_faiss.py | 210 + .../scripts/wav2vec_extract_features.py | 119 + .../wav2vec/unsupervised/scripts/wer.py | 82 + .../unsupervised/scripts/wrd_to_ltr.py | 16 + .../wav2vec/unsupervised/tasks/__init__.py | 11 + .../unsupervised/tasks/unpaired_audio_text.py | 452 ++ .../wav2vec/unsupervised/w2vu_generate.py | 714 ++++ .../examples/wav2vec/vq-wav2vec_featurize.py | 250 ++ .../aqc/examples/wav2vec/wav2vec_featurize.py | 249 ++ .../aqc/examples/wav2vec/wav2vec_manifest.py | 87 + .../1/aqc/examples/wav2vec/xlsr/README.md | 68 + .../wav2vec/xlsr/config/finetune.yaml | 66 + triton_model/1/model.py | 224 + triton_model/config.pbtxt | 37 + 185 files changed, 30017 insertions(+), 1 deletion(-) create mode 100644 aqc.patch create mode 100644 triton_inference.ipynb create mode 100644 triton_model/1/aqc/examples/.gitignore create mode 100644 triton_model/1/aqc/examples/__init__.py create mode 100644 triton_model/1/aqc/examples/__pycache__/__init__.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/data2vec/README.md create mode 100644 triton_model/1/aqc/examples/data2vec/__init__.py create mode 100644 triton_model/1/aqc/examples/data2vec/__pycache__/__init__.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/data2vec/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/data2vec/config/audio/pretraining/base_librispeech.yaml create mode 100644 triton_model/1/aqc/examples/data2vec/models/__init__.py create mode 100644 triton_model/1/aqc/examples/data2vec/models/__pycache__/__init__.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/data2vec/models/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/data2vec/models/__pycache__/data2vec_audio.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/data2vec/models/__pycache__/data2vec_audio.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/data2vec/models/__pycache__/utils.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/data2vec/models/data2vec_audio.py create mode 100644 triton_model/1/aqc/examples/data2vec/models/utils.py create mode 100644 triton_model/1/aqc/examples/language_model/README.adaptive_inputs.md create mode 100644 triton_model/1/aqc/examples/language_model/README.conv.md create mode 100644 triton_model/1/aqc/examples/language_model/README.md create mode 100644 triton_model/1/aqc/examples/language_model/prepare-wikitext-103.sh create mode 100644 triton_model/1/aqc/examples/speech_recognition/README.md create mode 100644 triton_model/1/aqc/examples/speech_recognition/__init__.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/__pycache__/__init__.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/__pycache__/w2l_decoder.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/criterions/ASG_loss.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/criterions/__init__.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/ASG_loss.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/__init__.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/cross_entropy_acc.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/cross_entropy_acc.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/criterions/cross_entropy_acc.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__init__.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/__init__.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/asr_dataset.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/asr_dataset.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/collaters.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/collaters.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/data_utils.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/data_utils.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/replabels.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/__pycache__/replabels.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/asr_dataset.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/collaters.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/data_utils.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/data/replabels.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/datasets/asr_prep_json.py create mode 100755 triton_model/1/aqc/examples/speech_recognition/datasets/prepare-librispeech.sh create mode 100644 triton_model/1/aqc/examples/speech_recognition/infer.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/kaldi/__init__.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/kaldi/add-self-loop-simple.cc create mode 100644 triton_model/1/aqc/examples/speech_recognition/kaldi/config/kaldi_initializer.yaml create mode 100644 triton_model/1/aqc/examples/speech_recognition/kaldi/kaldi_decoder.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/kaldi/kaldi_initializer.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/__init__.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/__pycache__/__init__.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/__pycache__/vggtransformer.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/__pycache__/vggtransformer.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/__pycache__/w2l_conv_glu_enc.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/__pycache__/w2l_conv_glu_enc.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/vggtransformer.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/models/w2l_conv_glu_enc.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/README.md create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/__init__.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/conf/hydra/sweeper/ax.yaml create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/conf/infer.yaml create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/decoders/__init__.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/decoders/base_decoder.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/decoders/decoder.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/decoders/decoder_config.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/decoders/flashlight_decoder.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/decoders/viterbi_decoder.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/new/infer.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/tasks/__init__.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/__init__.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/speech_recognition.cpython-310.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/speech_recognition.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/speech_recognition/tasks/speech_recognition.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/utils/wer_utils.py create mode 100644 triton_model/1/aqc/examples/speech_recognition/w2l_decoder.py create mode 100644 triton_model/1/aqc/examples/wav2vec/README.md create mode 100644 triton_model/1/aqc/examples/wav2vec/__init__.py create mode 100644 triton_model/1/aqc/examples/wav2vec/__pycache__/__init__.cpython-39.pyc create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/base_100h.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/base_10h.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/base_10m.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/base_1h.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/base_960h.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_100h.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_10h.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_10m.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_1h.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_960h.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_base_librispeech.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_conformer_base_librispeech.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_conformer_large_librivox.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox_tpu-pod.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox_tpu.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/libri_labels.py create mode 100644 triton_model/1/aqc/examples/wav2vec/scripts/binarize_manifest.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/README.md create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/__init__.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/finetuning/w2v_finetune.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/gan/w2vu.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/gan/w2vu2.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/generate/viterbi.yaml create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/test.uid create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/train.uid create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/train_text.uid create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/valid.uid create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/test.uid create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/train.uid create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/train_text.uid create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/valid.uid create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/data/__init__.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/data/extracted_features_dataset.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/data/random_input_dataset.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/README.md create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_phone.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_word_step1.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_word_step2.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lm.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/score.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/show_wer.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/train_subset_lgbeam.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select.py create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode_word.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh create mode 120000 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_deltas.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_lda_mllt.sh create mode 100755 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh create mode 120000 triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/utils create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/models/__init__.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/models/wav2vec_u.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/apply_pca.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/copy_labels.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/filter_lexicon.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/filter_tsv.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/mean_pool.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/merge_clusters.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/normalize_text.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/pca.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_audio.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_audio_v2.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_text.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_timit.sh create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/remove_silence.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/vads.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wer.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/tasks/__init__.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py create mode 100644 triton_model/1/aqc/examples/wav2vec/unsupervised/w2vu_generate.py create mode 100644 triton_model/1/aqc/examples/wav2vec/vq-wav2vec_featurize.py create mode 100644 triton_model/1/aqc/examples/wav2vec/wav2vec_featurize.py create mode 100644 triton_model/1/aqc/examples/wav2vec/wav2vec_manifest.py create mode 100644 triton_model/1/aqc/examples/wav2vec/xlsr/README.md create mode 100644 triton_model/1/aqc/examples/wav2vec/xlsr/config/finetune.yaml create mode 100644 triton_model/1/model.py create mode 100644 triton_model/config.pbtxt diff --git a/README.md b/README.md index 6bd503d..1db9099 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,25 @@ -# dhruva-iitm-asr +# Dhruva IITM ASR +## How to get the models + +### For Indian Languages +Store the models and the respective dictionary files in the folder `asr/1/models`. The languages are listed on the website `https://asr.iitm.ac.in/models` +The base url for the model is `wget https://asr.iitm.ac.in/SPRING_INX/models/fine_tuned/SPRING_INX_data2vec_aqc_Bengali.pt` +For dictionar is `wget https://asr.iitm.ac.in/SPRING_INX/models/dictionaries/SPRING_INX_Urdu_dict.txt` + + +### For english whisper +Place the model files in `asr/1/whisper_models` +* Install `faster_whisper` +* In a python interpreter import `faster_whisper` and run `model = faster_whisper.WhisperModel('large-v3', device='cuda', compute_type="int8", device_index=1, download_root='/path/to/repo/asr/1/whisper_models')` +* That will store the model files in that custom location + +## To create the environment +* Install conda pack +* Create a new python 3.10 environment +* Clone `https://github.com/Speech-Lab-IITM/data2vec-aqc/tree/master` and `git apply` the patch `aqc.patch` +* You can now create the wheel using `python setup.py bdist_wheel` +* Do the same for `https://github.com/Spijkervet/torchaudio-augmentations` +* Do the same for `https://github.com/flashlight/sequence` +* And do `pip install git+https://github.com/kpu/kenlm.git fast_pytorch_kmeans tensorboardX flashlight-text soundfile torchaudio data2vec-aqc/dist/fairseq-0.12.2-cp310-cp310-linux_x86_64.whl sequence/dist/flashlight_sequence-0.0.0+91e2b0f.d20240210-cp310-cp310-linux_x86_64.whl torchaudio-augmentations/dist/torchaudio_augmentations-0.2.4-py3-none-any.whl faster-whisper` +* Finally use conda pack to save the env. diff --git a/aqc.patch b/aqc.patch new file mode 100644 index 0000000..3e2ef4c --- /dev/null +++ b/aqc.patch @@ -0,0 +1,36 @@ +diff --git a/examples/data2vec/models/data2vec_audio.py b/examples/data2vec/models/data2vec_audio.py +index 5cf5b53..ccaf4a4 100644 +--- a/examples/data2vec/models/data2vec_audio.py ++++ b/examples/data2vec/models/data2vec_audio.py +@@ -423,7 +423,7 @@ class Data2VecAudioModel(BaseFairseqModel): + + # extracting both the source and the augmented audios from source + source_audios = source[0] +- aug_audios = source[1] ++ aug_audios = source[0] + + if self.feature_grad_mult > 0: + features = self.feature_extractor(source_audios) #features +diff --git a/setup.py b/setup.py +index a7ce61a..3dc7b44 100644 +--- a/setup.py ++++ b/setup.py +@@ -105,12 +105,12 @@ try: + "fairseq/clib/libnat/edit_dist.cpp", + ], + ), +- cpp_extension.CppExtension( +- "alignment_train_cpu_binding", +- sources=[ +- "examples/operators/alignment_train_cpu.cpp", +- ], +- ), ++ # cpp_extension.CppExtension( ++ # "alignment_train_cpu_binding", ++ # sources=[ ++ # "examples/operators/alignment_train_cpu.cpp", ++ # ], ++ # ), + ] + ) + if "CUDA_HOME" in os.environ: diff --git a/triton_inference.ipynb b/triton_inference.ipynb new file mode 100644 index 0000000..3e3055d --- /dev/null +++ b/triton_inference.ipynb @@ -0,0 +1,106 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "ename": "ImportError", + "evalue": "/lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.28' not found (required by /home/nikhilesh/default_home/miniforge3/lib/python3.12/site-packages/gevent/libuv/_corecffi.abi3.so)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[2], line 7\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtritonclient\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mhttp\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mhttpclient\u001b[39;00m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# triton_client = httpclient.InferenceServerClient(url=\"10.4.25.9:8011\")\u001b[39;00m\n\u001b[0;32m----> 7\u001b[0m triton_client \u001b[38;5;241m=\u001b[39m \u001b[43mhttpclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mInferenceServerClient\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlocalhost:8011\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/tritonclient/http/_client.py:182\u001b[0m, in \u001b[0;36mInferenceServerClient.__init__\u001b[0;34m(self, url, verbose, concurrency, connection_timeout, network_timeout, max_greenlets, ssl, ssl_options, ssl_context_factory, insecure)\u001b[0m\n\u001b[1;32m 180\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_parsed_url \u001b[38;5;241m=\u001b[39m URL(scheme \u001b[38;5;241m+\u001b[39m url)\n\u001b[1;32m 181\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_base_uri \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_parsed_url\u001b[38;5;241m.\u001b[39mrequest_uri\u001b[38;5;241m.\u001b[39mrstrip(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m/\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 182\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_client_stub \u001b[38;5;241m=\u001b[39m \u001b[43mHTTPClient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_url\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 183\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parsed_url\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 184\u001b[0m \u001b[43m \u001b[49m\u001b[43mconcurrency\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconcurrency\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 185\u001b[0m \u001b[43m \u001b[49m\u001b[43mconnection_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconnection_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 186\u001b[0m \u001b[43m \u001b[49m\u001b[43mnetwork_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnetwork_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 187\u001b[0m \u001b[43m \u001b[49m\u001b[43mssl_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mssl_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 188\u001b[0m \u001b[43m \u001b[49m\u001b[43mssl_context_factory\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mssl_context_factory\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 189\u001b[0m \u001b[43m \u001b[49m\u001b[43minsecure\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minsecure\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 190\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 191\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pool \u001b[38;5;241m=\u001b[39m gevent\u001b[38;5;241m.\u001b[39mpool\u001b[38;5;241m.\u001b[39mPool(max_greenlets)\n\u001b[1;32m 192\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose \u001b[38;5;241m=\u001b[39m verbose\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/geventhttpclient/client.py:68\u001b[0m, in \u001b[0;36mHTTPClient.from_url\u001b[0;34m(cls, url, **kw)\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m enable_ssl:\n\u001b[1;32m 67\u001b[0m kw\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mssl_options\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[0;32m---> 68\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhost\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mport\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mport\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mssl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43menable_ssl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkw\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/geventhttpclient/client.py:125\u001b[0m, in \u001b[0;36mHTTPClient.__init__\u001b[0;34m(self, host, port, headers, block_size, connection_timeout, network_timeout, disable_ipv6, concurrency, ssl, ssl_options, ssl_context_factory, insecure, proxy_host, proxy_port, version, headers_type)\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m connection_port:\n\u001b[1;32m 124\u001b[0m connection_port \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mport\n\u001b[0;32m--> 125\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_connection_pool \u001b[38;5;241m=\u001b[39m \u001b[43mConnectionPool\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[43m \u001b[49m\u001b[43mconnection_host\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconnection_port\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 127\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhost\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mport\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 128\u001b[0m \u001b[43m \u001b[49m\u001b[43msize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconcurrency\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 129\u001b[0m \u001b[43m \u001b[49m\u001b[43mnetwork_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnetwork_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 130\u001b[0m \u001b[43m \u001b[49m\u001b[43mconnection_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconnection_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 131\u001b[0m \u001b[43m \u001b[49m\u001b[43mdisable_ipv6\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdisable_ipv6\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 132\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_proxy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43muse_proxy\u001b[49m\n\u001b[1;32m 133\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 134\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mversion \u001b[38;5;241m=\u001b[39m version\n\u001b[1;32m 135\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mheaders_type \u001b[38;5;241m=\u001b[39m headers_type\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/geventhttpclient/connectionpool.py:60\u001b[0m, in \u001b[0;36mConnectionPool.__init__\u001b[0;34m(self, connection_host, connection_port, request_host, request_port, size, disable_ipv6, connection_timeout, network_timeout, use_proxy)\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_request_port \u001b[38;5;241m=\u001b[39m request_port\n\u001b[1;32m 59\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_semaphore \u001b[38;5;241m=\u001b[39m lock\u001b[38;5;241m.\u001b[39mBoundedSemaphore(size)\n\u001b[0;32m---> 60\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_socket_queue \u001b[38;5;241m=\u001b[39m \u001b[43mgevent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mqueue\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mLifoQueue\u001b[49m\u001b[43m(\u001b[49m\u001b[43msize\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 61\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_use_proxy \u001b[38;5;241m=\u001b[39m use_proxy\n\u001b[1;32m 63\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconnection_timeout \u001b[38;5;241m=\u001b[39m connection_timeout\n", + "File \u001b[0;32msrc/gevent/queue.py:154\u001b[0m, in \u001b[0;36mgevent._gevent_cqueue.Queue.__init__\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32msrc/gevent/_hub_local.py:115\u001b[0m, in \u001b[0;36mgevent._gevent_c_hub_local.get_hub_noargs\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/gevent/hub.py:445\u001b[0m, in \u001b[0;36mHub.__init__\u001b[0;34m(self, loop, default)\u001b[0m\n\u001b[1;32m 443\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m loop \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 444\u001b[0m loop \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbackend\n\u001b[0;32m--> 445\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloop \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloop_class\u001b[49m(flags\u001b[38;5;241m=\u001b[39mloop, default\u001b[38;5;241m=\u001b[39mdefault) \u001b[38;5;66;03m# pylint:disable=not-callable\u001b[39;00m\n\u001b[1;32m 446\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_resolver \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_threadpool \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/gevent/hub.py:459\u001b[0m, in \u001b[0;36mHub.loop_class\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 457\u001b[0m \u001b[38;5;129m@property\u001b[39m\n\u001b[1;32m 458\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mloop_class\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m--> 459\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mGEVENT_CONFIG\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloop\u001b[49m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/gevent/_config.py:55\u001b[0m, in \u001b[0;36mSettingType.__new__..getter\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mgetter\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m---> 55\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msettings\u001b[49m\u001b[43m[\u001b[49m\u001b[43msetting_name\u001b[49m\u001b[43m]\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/gevent/_config.py:151\u001b[0m, in \u001b[0;36mSetting.get\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvalue\n\u001b[1;32m 149\u001b[0m \u001b[38;5;66;03m# Otherwise, read from the environment and reify\u001b[39;00m\n\u001b[1;32m 150\u001b[0m \u001b[38;5;66;03m# so we return consistent results.\u001b[39;00m\n\u001b[0;32m--> 151\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvalue \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvalidate\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_default\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 152\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvalue\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/gevent/_config.py:259\u001b[0m, in \u001b[0;36mImportableSetting.validate\u001b[0;34m(self, value)\u001b[0m\n\u001b[1;32m 257\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(value, \u001b[38;5;28mtype\u001b[39m):\n\u001b[1;32m 258\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m value\n\u001b[0;32m--> 259\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_import_one_of\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mshortname_map\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/gevent/_config.py:234\u001b[0m, in \u001b[0;36mImportableSetting._import_one_of\u001b[0;34m(self, candidates)\u001b[0m\n\u001b[1;32m 231\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mImportError\u001b[39;00m:\n\u001b[1;32m 232\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[0;32m--> 234\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_import_one\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcandidates\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/gevent/_config.py:248\u001b[0m, in \u001b[0;36mImportableSetting._import_one\u001b[0;34m(self, path, _MISSING)\u001b[0m\n\u001b[1;32m 241\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mImportError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot import \u001b[39m\u001b[38;5;132;01m%r\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 242\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mRequired format: [package.]module.class. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 243\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOr choose from \u001b[39m\u001b[38;5;132;01m%r\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 244\u001b[0m \u001b[38;5;241m%\u001b[39m (path, \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mshortname_map)))\n\u001b[1;32m 247\u001b[0m module, item \u001b[38;5;241m=\u001b[39m path\u001b[38;5;241m.\u001b[39mrsplit(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;241m1\u001b[39m)\n\u001b[0;32m--> 248\u001b[0m module \u001b[38;5;241m=\u001b[39m \u001b[43mimportlib\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mimport_module\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodule\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 249\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mgetattr\u001b[39m(module, item, _MISSING)\n\u001b[1;32m 250\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m x \u001b[38;5;129;01mis\u001b[39;00m _MISSING:\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/importlib/__init__.py:90\u001b[0m, in \u001b[0;36mimport_module\u001b[0;34m(name, package)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n\u001b[1;32m 89\u001b[0m level \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m---> 90\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_bootstrap\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_gcd_import\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m[\u001b[49m\u001b[43mlevel\u001b[49m\u001b[43m:\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpackage\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlevel\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m:1387\u001b[0m, in \u001b[0;36m_gcd_import\u001b[0;34m(name, package, level)\u001b[0m\n", + "File \u001b[0;32m:1360\u001b[0m, in \u001b[0;36m_find_and_load\u001b[0;34m(name, import_)\u001b[0m\n", + "File \u001b[0;32m:1331\u001b[0m, in \u001b[0;36m_find_and_load_unlocked\u001b[0;34m(name, import_)\u001b[0m\n", + "File \u001b[0;32m:935\u001b[0m, in \u001b[0;36m_load_unlocked\u001b[0;34m(spec)\u001b[0m\n", + "File \u001b[0;32m:994\u001b[0m, in \u001b[0;36mexec_module\u001b[0;34m(self, module)\u001b[0m\n", + "File \u001b[0;32m:488\u001b[0m, in \u001b[0;36m_call_with_frames_removed\u001b[0;34m(f, *args, **kwds)\u001b[0m\n", + "File \u001b[0;32m~/default_home/miniforge3/lib/python3.12/site-packages/gevent/libuv/loop.py:23\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mgevent\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_ffi\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mloop\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AbstractCallbacks\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mgevent\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_interfaces\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ILoop\n\u001b[0;32m---> 23\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mgevent\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mlibuv\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m _corecffi \u001b[38;5;66;03m# pylint:disable=no-name-in-module,import-error\u001b[39;00m\n\u001b[1;32m 25\u001b[0m ffi \u001b[38;5;241m=\u001b[39m _corecffi\u001b[38;5;241m.\u001b[39mffi\n\u001b[1;32m 26\u001b[0m libuv \u001b[38;5;241m=\u001b[39m _corecffi\u001b[38;5;241m.\u001b[39mlib\n", + "\u001b[0;31mImportError\u001b[0m: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.28' not found (required by /home/nikhilesh/default_home/miniforge3/lib/python3.12/site-packages/gevent/libuv/_corecffi.abi3.so)" + ] + } + ], + "source": [ + "# pip install tritonclient[http] soundfile\n", + "import soundfile\n", + "import numpy as np\n", + "from tritonclient.utils import *\n", + "import tritonclient.http as httpclient\n", + "# triton_client = httpclient.InferenceServerClient(url=\"10.4.25.9:8011\")\n", + "triton_client = httpclient.InferenceServerClient(url=\"localhost:8011\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " चह भारकेमहर वा ीकावायजकसीर देके नही हलजा ररभारतीय ने यह काम किया हैतो आज तक और तिहाल शिरोमणि का मानन हैकिइस समय मे देश के लि ही सबसे बडी चिंता की बात हैकि भात के लोगक मुसलमान को वोट दे रहे हैाद केलो कहैरानकर ह हरे देश मे कु की कक हो क है क ा है। \n" + ] + } + ], + "source": [ + "audio, rate = soundfile.read(\"recording.wav\", dtype=\"float32\")\n", + "audio = np.array(audio, dtype='float32')[1300000:2000000]\n", + "language = np.array(['hi'], dtype='object')\n", + "inputs = [\n", + " httpclient.InferInput(\n", + " \"INPUT_AUDIO\", audio.shape, np_to_triton_dtype(audio.dtype)\n", + " ),\n", + " httpclient.InferInput(\n", + " \"INPUT_LANGUAGE_ID\", language.shape, np_to_triton_dtype(language.dtype)\n", + " ),\n", + "]\n", + "inputs[0].set_data_from_numpy(audio)\n", + "inputs[1].set_data_from_numpy(language)\n", + "outputs = [httpclient.InferRequestedOutput(\"OUTPUT_RECOGNIZED_TEXT\")]\n", + "result = triton_client.infer(model_name=\"asr\", inputs=inputs, outputs=outputs)\n", + "result = result.as_numpy(\"OUTPUT_RECOGNIZED_TEXT\")\n", + "print(result[0].decode('utf-8'))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "dhruva-iitm-tts", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/triton_model/1/aqc/examples/.gitignore b/triton_model/1/aqc/examples/.gitignore new file mode 100644 index 0000000..1ef816f --- /dev/null +++ b/triton_model/1/aqc/examples/.gitignore @@ -0,0 +1,2 @@ +!*/*.sh +!*/*.md diff --git a/triton_model/1/aqc/examples/__init__.py b/triton_model/1/aqc/examples/__init__.py new file mode 100644 index 0000000..44bb24a --- /dev/null +++ b/triton_model/1/aqc/examples/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +try: + from fairseq.version import __version__ # noqa +except ImportError: + pass diff --git a/triton_model/1/aqc/examples/__pycache__/__init__.cpython-310.pyc b/triton_model/1/aqc/examples/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58f4e54c216af731c3545ef961e751749c87ee1d GIT binary patch literal 232 zcmYjMI|{-;6nt;}5Pt}I34`EbFJfaMf{le}vjnrkLgL1^@p}Ys;z_(iS}U($qc39N zJl?o8p^A%qw~j4MP~Kq%%=1TD^Og#4CY=Lmk` z$!YL0t0{HR*EJ)InrkFm$Jycesbly!0D1X@F(y%XM6ly z%O*II-aa+9rP#=h!z0xxN^6s=bmS_rO~Ff}lRn>4UFl6N=|#IP_oY(S+oaT@+x5kT S3+B0{g`k0`IQlsUkr7F^Gc(44TX@8G*u@ zjJMcw^HWlDiv2X1ZZQ;r^sHnkVgXWM;+KhjNl|7=eqMZ0YC*n!VsVkap?+dvvVLks wVs1fBYO#JwVo9P=S!%L=e0*kJW=VX!UP0w84jZ7Mr8%i~AUlddh6*qM08G{=`2YX_ literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/data2vec/__pycache__/__init__.cpython-39.pyc b/triton_model/1/aqc/examples/data2vec/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e9adde8b13ade0ae6a06334a8b7171f84b9275f GIT binary patch literal 205 zcmYe~<>g`kf{S&NQbmCDV-N=!FabFZKwK;UBvKes7;_kM8KW2(8B&;n88n$+G6ID) z8E>)W=BK3Q6#Hp1-C`&L=~>B8!~&$i#IIoejQreG{k+WVjLe+W;tc(QqWrAXKp-NfP|-IAirlKedVl*E!mqq5Xw-NeFV{nU!Y+ybC_xTt=7d}dx|NqoFsLFFwD R8=#$~IjMFaCwvBC1^_wNG`s)+ literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/data2vec/config/audio/pretraining/base_librispeech.yaml b/triton_model/1/aqc/examples/data2vec/config/audio/pretraining/base_librispeech.yaml new file mode 100644 index 0000000..cffaa2d --- /dev/null +++ b/triton_model/1/aqc/examples/data2vec/config/audio/pretraining/base_librispeech.yaml @@ -0,0 +1,89 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + tensorboard_logdir: tb + +checkpoint: + save_interval: 5 + save_interval_updates: 25000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + +task: + _name: audio_pretraining + data: ??? + max_sample_size: 320000 + min_sample_size: 32000 + normalize: true + +dataset: + num_workers: 6 + max_tokens: 3800000 + skip_invalid_size_inputs_valid_test: true + validate_interval: 5 + required_batch_size_multiple: 1 + disable_validation: false + +distributed_training: + distributed_world_size: 16 + ddp_backend: legacy_ddp + +criterion: + _name: model + log_keys: + - ema_decay + - target_var + - pred_var + +optimization: + max_update: 400000 + lr: [0.0005] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-06 + weight_decay: 0.01 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.03,0.9,0.07] + +model: + _name: data2vec_audio + extractor_mode: layer_norm + encoder_layerdrop: 0.05 + dropout_input: 0.0 + dropout_features: 0.0 + feature_grad_mult: 1.0 + encoder_embed_dim: 768 + + mask_prob: 0.65 + mask_length: 10 + + loss_beta: 0 + loss_scale: null + + instance_norm_target_layer: true + average_top_k_layers: 8 + + pos_conv_depth: 5 + conv_pos: 95 + + ema_decay: 0.999 + ema_end_decay: 0.9999 + ema_anneal_end_step: 30000 + ema_transformer_only: true + ema_layers_only: true + + require_same_masks: true + mask_dropout: 0 + + final_proj_dim: 256 + cluster_factor: 16 + scale_factor: 0.3 + num_negatives: 100 + cross_sample_negatives: 0 diff --git a/triton_model/1/aqc/examples/data2vec/models/__init__.py b/triton_model/1/aqc/examples/data2vec/models/__init__.py new file mode 100644 index 0000000..4ae112a --- /dev/null +++ b/triton_model/1/aqc/examples/data2vec/models/__init__.py @@ -0,0 +1,8 @@ +import importlib +import os + + +for file in sorted(os.listdir(os.path.dirname(__file__))): + if file.endswith(".py") and not file.startswith("_"): + model_name = file[: file.find(".py")] + importlib.import_module("examples.data2vec.models." + model_name) diff --git a/triton_model/1/aqc/examples/data2vec/models/__pycache__/__init__.cpython-310.pyc b/triton_model/1/aqc/examples/data2vec/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..784dde6d1eeddd42a7d326922e14b3a441bc5d04 GIT binary patch literal 417 zcmYjN%}T^D5YDvSZMSPhRFD;4U@r}NRm6ig!Hc(ogt!?o(!WjO-<}r4i%-!WeG=ax zS5LlzAee4ha3C|^H%aFEk}64J;^N}{6JsFct0Vto=*XE{TDT;LfShD~g$&R`XnchG zpc&w>g%)1noLnDdA%;yPycQI7Xe9g=v_k?Mi@W}tHzC;=NwXmW9Pg19fXsIUT#@PE z+bNODg`d_h7T98f&%CH})T!WxA3fqMEh>SzPN#_-$)c{5$>ojpDs6|_5fnDer8Yt; z8`Rv~+0em~7ic5KZe@;)*`TX7!cyob>5jzOaAkVz_DW&nl?q$kOL`gRS`RsX5Vhqj zuF}sjHAxu4$MkIoLe;2l4 J6TD^c`v=J0dgcHC literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/data2vec/models/__pycache__/__init__.cpython-39.pyc b/triton_model/1/aqc/examples/data2vec/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d24e39a3ab28962e1c807433a12b1fe7ca45c1d9 GIT binary patch literal 442 zcmYjOy-ve05cVZ$lcrQc2nm%3sMMjc#KeFQ12YR-BxITTP+am?+x!kJ2#J{|NJd`C zD-*B4gwv>qlYIAmcecMf9iL2M;_Ut917jfMt0Vty=*XEnS~w?&fSja#gcQ(2XncfK z&!L?c#B~4Y*ZTkzpUefJQe_7fOMH!Q&G>moA WM<>1DsoMJ@^GrdWCoZ literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/data2vec/models/__pycache__/data2vec_audio.cpython-310.pyc b/triton_model/1/aqc/examples/data2vec/models/__pycache__/data2vec_audio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ff1a6c9a144f1bfc9fd53f0c33db7984c95f3bc GIT binary patch literal 17253 zcmb_^36LDud0t=BbMEZy?(Dr-46wL{02U`fi2?!e0x61EN(3Z@BPyfW>7K>x&arsi z0|M(Fi_9XCDk(@*r7BUPs5G0hk}#FnPMA~_$8>7v!(wI!g8{^H1(nNEzG%53m##D2UP5_s(#{?Rkz-5RT@ZZpZp|iJ%8=# zYi+C3u>HY)^4WH)R$oG$e6`(N?YMTiS#eg%^_Eqy+74Rfo~bzY)k@*r!C$Gaw%BhV824ysa6^`K<1~e*_ch08@ct* zK;{a5&Z}r^D@4YU_F2W&E!EO2y{=lueXV5JG266a7YR23kB9KMov>q*3d$0)EQzvI zSeCR?fTu4iR;HA;Ge{4}^ngrfWjZUa;W$@Gv+56Sef?0*FP=fnPo<(x(( zd<^jM5I!Q|6M*NfNub%NoX3=`H;sBTVZAZg@4h~KHiVB$`2K$T3E93N>m5M*gJHc% zInJDf9|HVv2%nPhBbd7h)II8tJO`8|UOe5g>g@nG1P{tLZrY92wG)fBTPXld3Qn`# zc5fCMX9|tBuz!tpfb<6HPSaeejd4UuHiGYp==^FSs}D zg0QRYmg9YF+ z21Wi<9doR-s`egbqkEM({_w889DjVV;#P0&(Z~3bXm?h37c3qB%GhuG$_M}L^4eIv zRd?%^M!{}Y3YJ~1+%1TTi+hHAS>~5!|MGh;&Obuke60>jv$N&^YuYUW&0znS828EN<&r$ZCSmscIu6rHg;cd7n|*--Euq4LeQenZZ&vf zIG#;n;^*1T7C@P`)-L=Zy9v+EA7pRAG0Ben)aHzT`_KP5{{6knYbP9>Cuf1pYAq3K zf>SKqu82ad(r5r{>;}++=;ddd^;UGusIc@?`Ja8^H`nf6UOT<3n^jP@dVryOc9mgQ ztW-vEaoSIm%dJY&E|>jOnRFRc9_dWEe5+Gw1SLWTl8R~Gu~@CquDJeSncLoW<%u}u zvY25F5M8q9rMcR$om0HYXK&lpQ}V6Z%$F;q z8}qAo#W?!mfX)Jfl%nYgHTy`@HPz6xN5c;_llhF1QKNqonx+z#i)l!iYWAUK1Ti5| zYf~M5DEqE*KcYVq32x)F_&Egxp6V$RO4qPdPxJI?relB^UM!FFb>$sxLF7?SF1Kj< zG2Ck5`Z}&fDOv3ZQ6{$gX&0x^kZH$Pg+H)bb+ISS0IxPTOk6-;{t)RzaAAM|yp<(@ z9ZoJGmnoh?vXqTBFW9W2Dl~wr_-*^+k@3%Y4?*N`QY8llnl$TV&F?+ z5@uTQdx?^1JtlMDbM7TeahV>lvM5Q|N$Ysfhc#&B?#1xrq>_nb!a89MS;P0#Qra4^ zGs^=tJYC>z3{3W__%BZxyTqfR9)Q ztvS@p+oRSY>+n4Vqm5y#Q`QmdD9Xpt!!fIf)Wla5Ln%$-2|Dc$KezP>3b+-$T3f1e zm!dx&ei}a>y7Le4K)K4g(o;9USn}x0nyY*2vazmqHBYnDNieOG3g9t|p^36M;soL( z;w0h};uPXE;kWCzG7xDwzM%m!jMNC?5yW}KdBmfLM-h)9 z9z#5icpUKr;t9l)h$j(GA)Z1!jd&XI4B{EY`w;I#Jd1c1@qWbnF>(Rv0~=i1fiQPa z<_-cr2k0E)Lx>L}K7#lN;-i+fVu)`r)zT0i>*{VXT)|_=AII;674yI+A$1bJc`JUO zJ90{{l{;`uuJknWXWX;BbAZG*)ZY0GMUMWsd%;uJIjbk!kFjT&{+vrQgp;c!%ySkza3J0uo9xZ+6EhGkmmSFS+-cOI9G2J zlfI6(-%qqUO%h(mH(a~9>YMU41%U?>y;yJAMFaH)M6et|K0Tvz)Ki^U0!O~%EX_;==ttITjV5=PnA8qhV zjmm1+k`JA;TB+LpkgQ$3Ny^vYIE((sHqfFChSB!NwgERs9^M8IX0W}3a3|5TEKzk>io2!wM2fqm0DWfv_Y*(Lpc>flw79V{Q`=prW7b zL;NJM76)0r&VQjC4qDP}|* zGnA(hG|?T~4cI=(0D36(6H#M7M^@A-uSTfj_qx0$$~Ag1D=h7qWENV_pfoOC4{NJ3 zl^|2FoLEk+=;B4P6G*35V&d@t+SOVEU=Rom>qT|e4SD`rZ@`o1iaJ^^y8-57MG3~} z4a#;o@fzYGp0k`Q;_|ROWqn}{^l*hz3pTJ2ZlL5ce+1EzyQ1nBOL4f!T!_p96xDkE}O za1J8S+P=LO-Je61>x%w(3(4=2Fgv`BSP|5&Ky4ubJ%Tr!+oM$@GSe}29t2Eb4BXf^fR4$!zxq?EgSkD>O>2XhGssH`*dHgHk^=w zbMYMRl<3ln>>^O8{*Xg(tc0;PyT@d=RIL&{qd(4yFRYv*6_;S4mDlxdtZQ~5geouUlcx)m?T(+398$iW3{}LNj81QI?#bA*^m4VHm#^4SDFi*TRpw_E*ea&tAre)uT3g1t5TF$MGZLitB zY2R6`v@Ahof%qbWFEDtW!5?K%W0`*{yzNWLr+&q=+^CTB(N%KU>wIzf(2)jmv^ zDezvw8^6hY$y%sk{{HX)NndVPot^z;%*5UVa&uO&C6JRZ2a1Fy_E2etc8njKiaWA>9I_EtpU5KU& zZq$mcYtEWELd8mM<_lX|yw)QL-F&48Qrl2*b}{y0@+d}9XI^$2oN zaR;L`N})g9TIMBj9g+*;I94gHFu?9Y$`j8cg2o$MXkhEBBY$PhI0-<&%H|+oG#cV( z*+w3?UWMX&sM>a@rg318XjGtqzAMhKr?`7})hb~Djg}0rvNR(Uu2Y z=rYE&$%BK1AgABwgO3GB^*t8=$v0*ZL!gAf>Aq2hCA#g1e~luETjCd4DO?26VCN#z zoK|?mWv+r-T2S}o^0rjnCXkTBBRe2({rAub?h#I5MHA$($P+QebHI|WbAdW|K!aQ$ zpaztoraYaPXXF)72qa^9@Ms{P!Au;m#T2 z=e!iwo{@YmW(`zGE_&RV=sy=+|T?RD-sX$RF=lh#%W1z@^qMXFd zx~eibDi z@E7L}1CNY}GrX0CIOLJJO5z=b!3Qb4jW~IhYS8-3;l0#>xAOLM)2&%*j&KKuiJiOK z?ZW4$w&8P{_qT`pfjbF3V0XYv<8B(rYZyJ#Ph)(%(&=zcDGzcT=9KQG(I$&}j0Zif zn}gU-@&8@$5t$7|ybGxhI;Ra-Hi5}8w1PATP16uk!>$4OF1q|!U!WqSun zgJJe!3hRNiJnjvKrGx!arh?Lm)}WVVo$f>n=hGeaGTpHVf|Z$&a+50>MtKG^nDfRk zyO@=_uVZdw-AT_t$!BHB=$4XElrTSOrT>C{9gatJrLK0Vtvuw7TS;pG)N9(Cwz9zA zam0g&XAtKQ!${aW?2ThrU|xi|Z`>QlY@rVTbsv)B&URrqL{Af8PX(_adx9AfEho?t zDYTsMCU&$u&^;Jp`}ZX+KDvCYSEQ~K^>Kb!%P%1X=<;LU2z6VWlpY0sq7)b`YjKvn3*Hzo0{Wib z6W$S|j>*)~-p6R>?H1jSyHA3P9L4G&63dl;VoF)ptXa%=KTh*8_uTSDV6AQyfVIbE zZX(Q`@QyDJf!8_No$sCkW@M!s+3q>Op7zcXR)eaud)7P4UT_cgV}<9#(&ND{U|e}eETcxs zd)7M-YOtdWYkLMKVDz5F>~R*>0dNH8F~WIBZ_lyk?&Gr8bNw;TdFR404`R##JxV?QKuNx{DiFe3DmqOYclne7h+8J66$@D9O~P;`1?4!mrx?HnIU~R?(1-Z zh~aG2F0g(1Q{KnDC*jcnc=w9TU-X^|^G~;i0eN|Q53>Km_lREI3(u1Bj0zobBc^nq#w{g>0Hi>l)Ok)Ko}lntUW-bG*Y;TW&UG2WG9oD1OF z#&~Vp7(by#IL1+VnuhouW_Yw$0^Xd*=}cLVV}GU~uw4jv>7{^E`|pf!L2p1scpyQjU`#7|v;`hQR_>dnuwj4qT++SPY}M z)q$&KDB1;WuCf~zEtbJk!!n z3Bb4q8;iB6aocPv{wK!RHN!Z7Qm;4_%`hpI0}#YO=ClnxoTt7iRtFk&RlJWD?`Yxy zB51I8&RRSI{2HeFBZ~f}LhJgCUBi&e6(w( zT*a=9l8RV4(+xG4p_Gt-hA|9AT{Ns>kyG$bp2Zh13jv$l?j!1tJQ3|tV)KYLv~Xv% z%>x2b{RXOjuVe7Sty|z7?i@=v7yoOg$c|oLjlpl4TE8NL6_umZEhQNnZ z3*IFi7_NmQsEP68@L5`fN=y6zIq^e+j20Zo=`8^jUEpl)>rSVML38ZPpl5nWrvAfa zKNGnKfm*amDSTrALgPq>Dd?i%3PW#m_|iCbkV=OFs?@wgpUEm)K&4bbjA$?}B_c=R z`d3Hbr=AUp3MeYT>3vNvTr8|5PZUU@a0}KltR9ut--EtEx^@)5AeOnf&oQ88+mA^* zNuc}sPlWP>e0`Og3tpuFZFqKmtO`ZDE&e%c{UQT=hogw^%c4MyCJ(kxo5g!9GHQ)F zyz8o+CN@r-XS>%JNE_8LrfxE*Gx$3U`qnC`!4?0JFsYxDo&_CdZ!`Ek2CpLUW71$N zjRk%#@?ne40`733^ITw030&q1Y&^%{5QF^;{xO4t40uxmC+Rp-KTX(|5kQ$!f#1>? zlRA9Kp%Do%s2sUj@hgBzgO(H;_^T^Y)m%!~peg2wz-0ghtBpF8(d<+zfJijOKV|Ui z>^LJe!C{Z$-w^)q8B8-6#e87dka`$6kHKZ_uAdD)uP8UpNVJlkfH=Gg{P7Ha>Oeap z=7L-u+G;-r10y~Gg1=;-ekO^0lYO6H@O1=!%od_8@U_vMDx4dEz|!km{ArXGpAVEu zKW1gVsp3Pn7>Kr8I@*ihXF>l(mhWY0Ab1-CM`ov#s&tmZw4aDtmokJ#IWfZ34s}|5 zNa1o6!Aqm8wHrLt7WF0gVQd;Ur8&sYZnr&^@;C~7r650C+U#^OBQ0aA%4V-`5&^sTe3fD*PNhd>eVS=Ad@bZxH>Dx@smj)w zmqvGi>>shGbCa_6yz3LKo(kH5S0h5~Q^Ry9?bd!x%2WlIq6f zttt%#mrL$g1r9nO`8%u>@sMBoHgM1Rbp(k4m|&ydeDogUU?i=tW3d zHGKr3pv3#BrL`aDq1@CSYL5=s)4((vfk^XG-IZS<+P2`~(g*oIAL-Ubg znCO-EZvI2hVcpaN)7(r$hYGm)z=#`6>#9)c}SbVd{K|?^~Rn zLuLtzOuEe620eztlZs=SLscq8VJ?xmY3Zy-a~f(D!dmbapdjXjR;mAmz9fN=gbr+m ziN_dUm4u#i;80*|3TPKzWZ1lSplXf=Ci&h;!f!kO8ElS2xq^FRV25!wkAP3HEZ4i2 z&*aV?w(+=JG9zra2Lxo-hX11OVt@1U>yAlrF474*QbiH&gi(9ZI zLK6VZpJj|}=5@5g2NhB(WeTkzpE3o#H#P{{X2m9yX>1kS!7d~7c|iE34n|8tk80A% zfp(?ql(*?$nGVL=T2hQJNDfhoC;@9HEt%Y=rTf48w|gJ_;O8&F2e!{=fXvZfAPakL z`1u^3b1UGGeg%zuquQ}5lKJsdFib<5q~s`0fL4epzF0b*RQMxE60WUPeQnJt>1&Q7 zUSgH)hbZ{n^dIot$W;vEX6Av%bp7KUC;;cDv^|ZBTb}Q2E@V6~N^tVfC>~&2xPcm6 zy5Qsmm#`Rp03m1U6rAMqE$Gf6A8p79Pa_?<>urDKWes1)DJzjOFG1q5$V5Klu-NL@1z;+Dn^-cc9inz3ON zZpAM#c!=N~mHzMec0#_9+U@Pioq9+ zTw>&e8-4@mY?0ifE{pMiDJAHd#D=QzpnW^Tp=3tdLsXV*OWm z*b)mDn3A)d`sm?DIep3sbjJnZ1zP4nRMt-Zp%y`}(LaMuu8M0I6h5^JK(&7Y0Pg3F zs|)xmYF8JEQ(~S|JjLKNgK-A0Fu2d)YYe{0;5!Jw#PW-Ie8F(DZHb>DSc(vLSeAMj zZb8A93{psu-1QcI`uA9!$S$5o0Ja;>s`YBv7jLB?R*M@f90STW`?rk89Y+(Tu1^da-Blg$Z3;K+zHBK-=@Nd`xmG Ox&|&UaW-X+ss9JP&hugb literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/data2vec/models/__pycache__/data2vec_audio.cpython-39.pyc b/triton_model/1/aqc/examples/data2vec/models/__pycache__/data2vec_audio.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6a7e2be5e1b39868d30030ef7279ea5b23502ea GIT binary patch literal 17284 zcmb_^d5|2}d0$`CbMEZy?(BgDuoz%*Esn)WQldZrBo0y%xuQtu;8^l_Fx|VD-8mMo zdqH5+BgI;zq(2M^VWn)#rmJRCNnFg~D0Y&v^M_MWIgXP`6z8al)j3LzE0LLs<5Uub zE{g>6`+eOrdtj-=t}JTa@xA-K?|tvP8r6Y;jE28=zWA2=rM9O19y6Q&63AS}&pWDX znxi>-Ln}wIUe=K}8b;GBn@y{1>4ck&MAI(YlC~PjW~!W$wBAUU)6zcE%$Bn;EZ58$ znyxhm%J~>F*esNXB%NpsH%H1N&0@JIdAl*%94n7WI@uU+PLwB_yUM#HpK44tcb9iJ z_muZE_m=kxERFV4<*DYr^1f!NToPEOvA=nse4sg9o^Bp2A8Z~f9};-BakzP;d_>n= zt$Wm+S=YOA?8N9ERPq@3Bf_vNBkqLM zIqB%HYcs`nxrcKzMwoW0ezn%9dR~~Aue%LrMh{b;TK4PhR<(h&@$rwd*0oojf2HlL zG~95opS;p;&DR%Dr&wz@msfnZ(yV$*m3qsm*IW;+3NKVW_jEp?J7+)Z*3L#Fg~K;gwdKvXRnt}1S~c{b z&_nJ;RdrrjY54W!#=S88a`m38=1?abeQ~9E(`}#wzpXyKQf>M5RkX0e!J8}d^R9B8 zN);V0xM3P|cJEfOhH#+nRqi&b72j=nZ50moODbNi+HkR2`;)J@m`05|xcSdQ<}!ZX zYrsTTBleYz&uFgc=#Jr-b=|QZ7-h>%xVDqHK)4NfGKMGJl$+S4p)4h3X_RH+vb384 zJbMA>UCz2Wqz5EDAnClM^O7Ev^q`~*k}gPkNYX=+9+v(`(0?)Re^}-;D)2GD$7A@2 zz$XANI=g^hqcV?4ska;T_QdtZq~E=L_*4uZ7x=z@`w3}Zl6w2m{yQbp2`x_)+qj?Y4ib)HqXW zv^{UObh^~!nUz*Nx70W*K;UQ6ap$W*oG{fyLtcb1&H5GgGq?JhoOsE<<(8CNYqvaK ztq68X`_j#OD5`rUDFJfLmg@7RxpvECi&jY*d#rWlly_VCN1b|eCd^3xQub~NGhdy& z)qbd?^>ADgl@@Tk;#bvz>qkZ5WF2#? zwrcJUWurTldExLAeR<*d&8lC!wL>4{3#z@c{A9tx@vn}3_dDJ8Vsb!u6)7eVB`Q}w#vJX!VBNnh3dcIym1Jdh`JNiXFr1I~GJ=e0=&8qfgoa@~HXaSzlDBet|DQ3T3oKDE zP)oNT<*hgtNZcB5>AA+u>iyYBaF4Ix=amoydZ0~c9m~-JBQSR}odC=V5=Eq6*WNMa z-X+bGy_m7X1g^aDLlbwVoUW}XRpC_#vp$ZfA!#qvRXDI*^D7GYDM+`mo9Zd_6%LVz zM7Igp!7KX)z#b>3=_xE!J;!SK-iAh7HKQv7;2LXf{!7Lg?_VR)dkOgtzO8>%o0r${ zey(gemXiRtk`S2fB=4unwsTB!StoZtT~11Rz{#T|<))qEQ6J8rQ@EeNdz5ibI780x zeVwo3;(#;qw(b<2(fd|8@0@hToe7i-I=h@nj8brRJ9`ihIkV1QX9^|5&MBwl>_=+E zIp9pAtmuw92c1LrHS|5^oOTX7M^HNMoN=-_I@4b=4)$OS6>HH$%C#Kz8UC?)|%ci0>jaFfnnXH0iJLe+9*pR zP9aVsP9x4B&LGYr&iXmDVh;oO<-uTBfDZbF?ogmD0&NzP>joghNR1#KL0m*!L_CUk z6!941F~sAD#}Q8;ojYP??t>9@f6}I#QPBM!^kD1_pftp z`{UdJ$sGWE8qjIP2N54ad>HXz#77)s$$F@(gG{$9ghxB3KNGLv81l#QJK-b(Fil9E z#BbI~KH#35lGSn#j>=k3BY(y}+dT(Ja$WD9U)N;(r~Ib_eU0;a#{UR=mh?xZwYHc7 zBqi;i?L&G!e$3$>UT|>vW>RzSKE|JL9Sglf21}U*7jP#g{opH9d$|o}(jtWk(^7V` zUiGfum`R5w7=bX=T4|CBd!gmK&E?RRrxny5tn|%#%dNzD`_AnO>u2x^x|Mmb(tbGf zrdwaQ<%5#97x7Fk`?td3&7gVk&}}vI97w~;va3ku`7DD}U&f3>%e_-=gd;Mk`H1n3 zCtezc4gf ztWXxFugbZ@F4?!y&0%yG2UZ;5awKku9K`F1SZG9NPli7HYq!Geb+Q;YBGD_g(yCL|tAIV{fdl2#0B4qr z@lg#jhlPM|7N#`JY%C%+$C?kAs|+1}NzktOK;k)PsEcnVI`$78an zFXDl5j0Z+X_sum84-R3LdKsJsQk4WkdokHft!r1cFHU^H=$IWVFuG|xFy_x|zhJEE zx_0}lj|T|t66y=A-$E~V$P7F^CWvHKh-6~!#*88SU6>+AJimPI>}nc!yh5Tgi+3{Z zE!82?xc9u7oFcm*rx7(VJ=c$TK;Z^EC=FA+#$kbcsZ&|*p^g`tRmV}}RMhJTW)kWv zh{_objNzc6P+w&i`ckOJC)q#Fu+Xe7xxg&S`_&o<$~&m)?LnZCkJF7uwuQ6Bzamb+ z&L#4Hv}==H zXx&F?QvGUNTbERdY{6n`F|%ZLESisG<>MdOIuM?E&i9{R75XlE*#UlnZ%??G54AW#;qohTSH|M1l}ko|l3d6Yyn+_EC>a|UNz0K=QXY1_mA-G1M)b9!E% zG*%DnaPHfdI2UTyw=np<%P2=bi-CGaPxgMDS1dYyT^(a;(FCiHi#o1d&;kn_hcOpg z#LyWl+S3g9eW=e!3Fq<@{NOLI>BEPB3y z{os8MlL|}4qaE$eXaz5bQ<+!oCS;%9M$Om}GH`+FZML!?NqeEa^d@8VQ-p8Zj4oi` z#m~zjAf=}mGz^SbJ-Wk&>^SU}z2QDlGPr`DY%mGu8ds#R?tWpV-f&9QQp<%hh?>z- zPhB$`$$2J3i-s4yS(cCIaJvMTy~!>jwd(J32$4dp)u|mOyQyl8=-K<@vGawMDSjR= z3Rr0E3uY(Lu{#h$7IjBovQ&#`s#*|S5SA7 zXBI~(#2_4Z5)ZNv95^D!h(`nj0!_dcyVlH(-m)C6!&Iq)!2*MO2*5(|>VRf1 z-wO@D9omk22kQJVz0&e-uek228`|#O@LROF!So7~U!Z zEtxa;UXzCR_$$PJWM?zF_1H3uN0w=SU_3VL6bM~vTg|8qUVtCi1H&MLmb?!CFTLe< zCYc*E1phN2+?_+ahUNo^9VFJ(&c-<8O21En+6q7Yaiag?GtmD!#=$_muBoR{1ny9fjVeV*s<7c~ z-j6W5tDh(LHLcP9yQKkReBu{dYdGw_vQf7qSV-OKm&;2PD{QYzC_pdJ1h;F?< z%J4)%LBIcigrj$CvMNhPmyfiLm%!-iq~SnGvMVTCfD($KIljv{q>HJG#0K z094}OOd~5`9r??v)=2;&_O<|_qtQ@*lMUs-%^Ec0L$$U?)r|*(MWYH0^gVT&Jth5n z%Wef~=@I5k*m}a`t*Te`eHGf#2odi568rlt20|^1NX=v;&gE~hsSfkzCNY1$-k>EF z5HB>mdD@m1pym-7Jp*V3rcB697+;J;V0WYf0BG5O5@UTX+JI+_6JsC%G_hD9^L^9cj$5px#P9!lRJ(B>=BZ;4mlNR zU>ZW;*Sa1-aAsEYdaA#V>d_-|7$AJ#Z{WTgYkC))BknpZ;GD%e?*2L`ho4x?f>T5vcu>e| z;#nzb;6I^x=zu!lUwSE#(D(#C13_E($sUI{wL{Oqr!74v`#nSO==Yor_-|X!pMy|i z;oq7GxqSExI7aXMX~^jO_Xd9P?op-f1R3l=PB>u99SD&sYU7}7P;+N7?+q^Wtxc`o z!fFSwqO4HX1Sl(1{y~O2y$&NmCl}9k-alu z&@(CKW%LOn1Ld6oHqm}$KQ@wF204509cGOhw+ETA;B5mO-sELki|;lHH9$3VP z`k?!GHBI2I+@Vgkb%8o1+{A;#&nMe$!Rf*o@X%W@I?G$!O+sSB{ltA72(rAP$Qu|v z*H2@7yw%xwPMH7_ALf+pX3-{(dW;7Hqf>xF0Ez+JkVWtlxphiii@5R7NUg)p2~19) zC1gG5p@xte_AN+%*~KCFXj$^sXBUT&A8~Ble}gSiqZk;df!#tp0t1l|3_HdIs04gS z*!QTDSO5juE zV}2LkWOuxmjQV-AK1IJ?*1U(JwS>K!x&Ro6XfafU@$Hn?3XeXl}@x$L7sIw z6RjdB&}fkBjP)Q`n++K_y<}i^KZO}g2VZYK}?9Y;Kfcn{(NVi*#;hk|kJ3QUkN362Nj zm@ViWX#J3kJJo@45j{=BJ(Yrz^aN8ST27!PQfN65Ol)hpzjGkM`iJ^Az9w|}$l}rN zjF?}eGo;>BTLg6RSTKSW5PBTpi3xbl$>`Hq+jMVjooVb^uf84>FryLR-?Z4BwP4yA z+Xd}8M6`oWx^qZ+JNeM`z9XsGho<@-?9!w6wcub}Ya*`2vOj{YdQ(~4+EN8)atr*> zLrWcmJ$rlIQ-EJ^_B;VU4fqXMy*Jed*L-kI_s=i{Up_5kokhK``R8zdr+M3hY1}0k zqIuhJ%cDByQRiEz^Hfx)ckgkVdemS#!2G;#qdZN~l`Xf>7QUC3_d8pt%UF!DV||Qe zD#F;q*!>9Gr~9Q$MWsi8p(q6oOD)c_`*bh{oPbWK`%G{csiTrQ()|cc!JQfZqyDqd zK8<2^lo@#9ADh(H3}*`S-G|dW=AT=<0KB!F67cr8M$$GVq-(@32`+snb3;AAjc#O~sDo$Z_h z?D^m@y(Fn;rREH7_Wl5^a8j)KF{$}6)Vv@ynR+gW zF{X17^*&DC^N^A-lQ|rYu|$c zb_}oG@nG_f{xGFJi{4*Dt4ZfLYCapMPe3w`xPj9;xq;V%X<*r;a}sr!lN}St>;>(K z+ne%!dduxS&9T|f;tl5%968b|$G+mb^_eA&5dX8?&pD?fIM?y>nsWv%b}fDesk6TT zT+}uH^FOLaEGzWJSVmv7@fdH&7|Sxoxd^^xj5oH7@gr))V;qswOe=5zi$}U;;LUlQ z&ZP4c_Gc0T+|w~fO=g<9Q~dq+OPl0TLs502DA2=6N+Lu33PFDx!F7mpzl=Cch(Zmf zDv|311mil**)VFrvM(aL=fRm;4+mj5cUIur8H;*Rn-|#)i*8oHS;M{UF4PVf^1lM! z8%6ZT@kmgowO3TljmEdAWUYY*?*AD`^|g3KWEOC&U(8VycOY8*dE+*1QhK!1^C*W3 z+n7Ba>Q5*WG8d@iqWFLDCG`3~ImGJh4jKZO|6ql2HuP^B4b^*S^u+v<0E(88ZizC# zVNn%8D3b@rjAv&paR;{-Vo+cG||^xWeoW=S>16B{JzSefOz9?edqBOo6>hJbFeXK)fviXT0L^2Qc@ z_}CM@9V%=b&bkrri?MMuK&B8?KXkd3A2C?}I1cv)o8oamxRHfYO+lQ97?|GB?>>S9 z*W5dntXqAFV^d8h67Gl2!r?4J$la>M^r^AOF|oXYT+KkTd*HxC=yk%AHUl4-#= zWrZJ7cH*R$8~t(cy5%+#zL&<=%M*!l}(^>CL&WbH=kz%Sd9E-aHkC+A z_MZtAb*s5d%?7VpgjhV`Fj0d_-c|pMwZ6@uz-|ABQWPoNsNmS3+Ui{vS@VrLeDi86 zO>CSx#~Nh@ml+&kK-I8XWbmsDL~lzgm*{%ce_~2BdE%FFhuJq7{D1+kXqXUlub36W zLeB%PcNTD_i(T&`%S+^Nw~vhvFqmdA#o!+>*w28sS(TW=hfSI|Tc|&aER;P}cs-3W zX~HcX8j}cvDm^DHeo#>gHRx4Gxh@-#<5AyNTMyC^-pnd(Y~^QCvHs5~XYW?<^3&Glt8(_z_fq%ZsUaWml!xT2 zm(4RTk3IpC#ry$h``;M+A%f6y;I9n}NEM$}c;zf{AO3!l8QheS)rj#VDpdcHYsqtY z5kM*oZZf?Hz=q8S29t6svalePj@dLMqe8!5=x-GIFx;%N04|y!tz}qf=&TvXa3B_{ zn~+;o9*QoP>{u1Mr#hEpJXdrJtqFwwT%BT!w87<_}leDv;;*5M1jrTLDyk3ObbeJkngtleD zE7C#@`G@*(-48N$za7%j{sTJ+TYMtV-q}Ce;Y$km*kx?OHs_-`(@t+d<=ShXvLlG` zfo%_S4Yc<|Rcm2fFi7v)NyLO$;>{)glz77**$Mh?GUl#K;x`2^&J*U8J|%O>0FpNk znDo=k8Hb@5&Yu2E&I)$CugIzQv+>%3Ucz{lRIQzWCB1IK`-y%i+U;*k)y4l>(;ALiXsZyru zL#L_~<`l{876(6?+feIrTnpX=;`SF?t^OE&30V;G2Ij)X6O1oUVsAU}C9p+B6e~1Q zupL1QG(gEKC*dVe1rB`%v^6#~!1Q8DI@;7}lv(b2)OT@b}IVb#~ z2rdy*C)Q38I3N7M-|4>ho8P(!-`PHg0Rl&#f;?=#@i%yQ$eoBc`sdh)(5kICRbhL= z42;*1Fey!{A1Q+=z+(!Z1rqPjSgqmbmCaSpQy*uQt&~Qc0_;7W-59EC@K2_P zY%ZoS8KXpO-N^vC>EYhudd5Rb;wJ}3(E(e-O@wm+jt^$92MPKPLiRK%Qpv-u&T?XW zt|3EQM!M(bxAlFO8lj12ZTlkjp-l>R#Veo>&Fqsj-T0xM+!6V z@(oWE>R&^=qN|S({p7PI+`0QHc=5n#ZV~PdR0l)hf?Ne^ep%-h%;uHmk4&gXmJee2 z7_^4ORCKmx-=v zmkb@hH&f)0`9AX_SjZ?}Vf{HCuwdRROvyYaKYaKiCmy+LlGuZU=$Xf>C;voCpnvJl zVC?HNIzEq|n*(b7CbG;W=Wbk|!{3Z?9pJf{Np+HwoMmu|0q=r*At3(D;$N)3Nmv&F zm{xvekMA9BwH@`B2^Nt-tf`ro;V=|^_aH)vaMPRk=O3^-QCwX`05%)0t@T>m7w@7X zF00pBI1qogP;+t1H+#I_XD8f4bqPTQKkp)flrj8LYA83JPvugEKAZ*){V;RAFr1l8 z!O19d5&4aO?`6KMeOdcdDsSkSA?hxuNqNNS#f zMOIT8mW*vJnSB%gT>-u}Raw>`9?JJBBCE=`D)M1Xd7#%S8bj>oA~%k(1>eH*$UJZ8 z@;zBsC1QOe$&Fs`FN^1VgnSfvBD{N!DIV-5DkoP*ZcXY!wCsx{wB9iUWVqnyE*5P% aHq)C71Y9pBiirubJ77bG&!+5*{(k^3(g*(l literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/data2vec/models/__pycache__/utils.cpython-310.pyc b/triton_model/1/aqc/examples/data2vec/models/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1457326a822366380a7e5be9960ed2507270ccf GIT binary patch literal 293 zcmYjM!D<3A5S{G0wiKlvdg&*4X=qOsqzE1hUcA-A5|TMA#LYI76@@+aamD>F4gZnxFk{kyivc5k;0sK)gmJEOKDr|59&^s5 zCoUayqJU@UvI*oc<(WNVabRQY2dv5Ycvsb?(nzt`DBGeJ>^qm^y9~Yxt}Z<`PAc;9 zRjO|-ajVviMUqAZ^|nDR*Uq3N*@nu}OS6lQoQd1AX+ud`SxlLfUN#av;rlEW<4eC? UcTolJ;{B8ZTZ 0 else cfg.final_proj_dim + self.quantizer = GumbelVectorQuantizer( + dim=self.extractor_embed, + num_vars=cfg.latent_vars, + temp=cfg.latent_temp, + groups=cfg.latent_groups, + combine_groups=False, + vq_dim=vq_dim, + time_first=True, + weight_proj_depth=cfg.quantizer_depth, + weight_proj_factor=cfg.quantizer_factor, + ) + self.project_q = nn.Linear(vq_dim, cfg.final_proj_dim) + + self.mask_emb = nn.Parameter( + torch.FloatTensor(cfg.encoder_embed_dim).uniform_() + ) + + self.encoder = TransformerEncoder(cfg) + self.layer_norm = LayerNorm(self.extractor_embed) + + self.final_proj = nn.Linear(self.embed, self.embed) + self.contr_proj = nn.Linear(self.embed, cfg.final_proj_dim) + + self.num_updates = 0 + + def make_ema_teacher(self): + ema_config = EMAModuleConfig( + ema_decay=self.cfg.ema_decay, + ema_fp32=True, + ) + skip_keys = set() + if self.cfg.ema_layers_only: + self.cfg.ema_transformer_only = True + for k, _ in self.encoder.pos_conv.named_parameters(): + skip_keys.add(f"pos_conv.{k}") + + self.ema = EMAModule( + self.encoder if self.cfg.ema_transformer_only else self, + ema_config, + skip_keys=skip_keys, + ) + + def set_num_updates(self, num_updates): + super().set_num_updates(num_updates) + + if self.ema is None and self.final_proj is not None: + logger.info(f"making ema teacher") + self.make_ema_teacher() + elif self.training and self.ema is not None: + if self.cfg.ema_decay != self.cfg.ema_end_decay: + if num_updates >= self.cfg.ema_anneal_end_step: + decay = self.cfg.ema_end_decay + else: + decay = get_annealed_rate( + self.cfg.ema_decay, + self.cfg.ema_end_decay, + num_updates, + self.cfg.ema_anneal_end_step, + ) + self.ema.set_decay(decay) + if self.ema.get_decay() < 1: + self.ema.step(self.encoder if self.cfg.ema_transformer_only else self) + + self.num_updates = num_updates + + def state_dict(self, destination=None, prefix="", keep_vars=False): + state = super().state_dict(destination, prefix, keep_vars) + + if self.ema is not None: + state[prefix + "_ema"] = self.ema.fp32_params + + return state + + def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): + if self.ema is not None: + k = prefix + "_ema" + assert k in state_dict + self.ema.restore(state_dict[k], True) + del state_dict[k] + return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + @classmethod + def build_model(cls, cfg: Data2VecAudioConfig, task=None): + """Build a new model instance.""" + + return cls(cfg) + + def apply_mask( + self, + x, + padding_mask, + mask_indices=None, + mask_channel_indices=None, + ): + B, T, C = x.shape + + if self.mask_channel_prob > 0 and self.mask_channel_before: + mask_channel_indices = compute_mask_indices( + (B, C), + None, + self.mask_channel_prob, + self.mask_channel_length, + self.mask_channel_selection, + self.mask_channel_other, + no_overlap=self.no_mask_channel_overlap, + min_space=self.mask_channel_min_space, + ) + mask_channel_indices = ( + torch.from_numpy(mask_channel_indices) + .to(x.device) + .unsqueeze(1) + .expand(-1, T, -1) + ) + x[mask_channel_indices] = 0 + + if self.mask_prob > 0: + if mask_indices is None: + mask_indices = compute_mask_indices( + (B, T), + padding_mask, + self.mask_prob, + self.mask_length, + self.mask_selection, + self.mask_other, + min_masks=1, + no_overlap=self.no_mask_overlap, + min_space=self.mask_min_space, + require_same_masks=self.cfg.require_same_masks, + mask_dropout=self.cfg.mask_dropout, + ) + mask_indices = torch.from_numpy(mask_indices).to(x.device) + x = index_put(x, mask_indices, self.mask_emb) + else: + mask_indices = None + + if self.mask_channel_prob > 0 and not self.mask_channel_before: + if mask_channel_indices is None: + mask_channel_indices = compute_mask_indices( + (B, C), + None, + self.mask_channel_prob, + self.mask_channel_length, + self.mask_channel_selection, + self.mask_channel_other, + no_overlap=self.no_mask_channel_overlap, + min_space=self.mask_channel_min_space, + ) + mask_channel_indices = ( + torch.from_numpy(mask_channel_indices) + .to(x.device) + .unsqueeze(1) + .expand(-1, T, -1) + ) + x = index_put(x, mask_channel_indices, 0) + + return x, mask_indices + + def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): + """ + Computes the output length of the convolutional layers + """ + + def _conv_out_length(input_length, kernel_size, stride): + return torch.floor((input_length - kernel_size) / stride + 1) + + conv_cfg_list = eval(self.cfg.conv_feature_layers) + + for i in range(len(conv_cfg_list)): + input_lengths = _conv_out_length( + input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2] + ) + + return input_lengths.to(torch.long) + + + def compute_preds_scale(self, x, y, negatives, filter_negs, sf=0.3): + + logit_temp=0.1 + neg_is_pos = (y == negatives).all(-1) + y = y.unsqueeze(0) + targets = torch.cat([y, negatives], dim=0) + + logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1) + logits = logits / logit_temp + logits = logits.type_as(x) + + if is_xla_tensor(logits) or neg_is_pos.any(): + if not hasattr(self, "_inftensor"): + fillval = -float(2**30) + self._inftensor = ( + torch.tensor(fillval).to(x.device) + if is_xla_tensor(logits) + else float("-inf") + ) + logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor) + + # applying the scaling factor for the logits corresponding to filtered negatives + logits[1:] = index_put_scale(logits[1:], filter_negs, sf) + + return logits + + def get_logits(self, logits): + logits = logits.transpose(0, 2) + logits = logits.reshape(-1, logits.size(-1)) + return logits + + def get_targets(self, x): + return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long) + + def sample_negatives(self, y, num, padding_count=None): + + if self.n_negatives == 0 and self.cross_sample_negatives == 0: + return y.new(0) + + bsz, tsz, fsz = y.shape + y = y.view(-1, fsz) # BTC => (BxT)C + + # FIXME: what happens if padding_count is specified? + cross_high = tsz * bsz + high = tsz - (padding_count or 0) + with torch.no_grad(): + assert high > 1, f"{bsz,tsz,fsz}" + + if self.n_negatives > 0: + tszs = ( + buffered_arange(num) + .unsqueeze(-1) + .expand(-1, self.n_negatives) + .flatten() + ) + + neg_idxs = torch.randint( + low=0, high=high - 1, size=(bsz, self.n_negatives * num) + ) + neg_idxs[neg_idxs >= tszs] += 1 + + if self.cross_sample_negatives > 0: + tszs = ( + buffered_arange(num) + .unsqueeze(-1) + .expand(-1, self.cross_sample_negatives) + .flatten() + ) + + cross_neg_idxs = torch.randint( + low=0, + high=cross_high - 1, + size=(bsz, self.cross_sample_negatives * num), + ) + cross_neg_idxs[cross_neg_idxs >= tszs] += 1 + + if self.n_negatives > 0: + neg_idxs = neg_idxs + (torch.arange(bsz).unsqueeze(1) * high) + else: + neg_idxs = cross_neg_idxs + + if self.cross_sample_negatives > 0 and self.n_negatives > 0: + neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) + + negs = y[neg_idxs.view(-1)] + negs = negs.view( + bsz, num, self.n_negatives + self.cross_sample_negatives, fsz + ).permute( + 2, 0, 1, 3 + ) # to NxBxTxC + return negs, neg_idxs + + + def forward( + self, + source, + padding_mask=None, + mask=True, + features_only=False, + layer=None, + mask_indices=None, + mask_channel_indices=None, + padding_count=None, + ): + + # extracting both the source and the augmented audios from source + source_audios = source[0] + aug_audios = source[0] + + if self.feature_grad_mult > 0: + features = self.feature_extractor(source_audios) #features + aug_features = self.feature_extractor(aug_audios) + if self.feature_grad_mult != 1.0: + features = GradMultiply.apply(features, self.feature_grad_mult) + else: + with torch.no_grad(): + features = self.feature_extractor(source_audios) #features + aug_features = self.feature_extractor(aug_audios) + + features_pen = features.float().pow(2).mean() + + features = features.transpose(1, 2) + aug_features = aug_features.transpose(1, 2) + + features = self.layer_norm(features) + aug_features = self.layer_norm(aug_features) + + unmasked_features = features.clone() + aug_unmasked_features = aug_features.clone() + + orig_padding_mask = padding_mask + + if padding_mask is not None and padding_mask.any(): + input_lengths = (1 - padding_mask.long()).sum(-1) + # apply conv formula to get real output_lengths + output_lengths = self._get_feat_extract_output_lengths(input_lengths) + + padding_mask = torch.zeros( + features.shape[:2], dtype=features.dtype, device=features.device + ) + + # these two operations makes sure that all values + # before the output lengths indices are attended to + padding_mask[ + ( + torch.arange(padding_mask.shape[0], device=padding_mask.device), + output_lengths - 1, + ) + ] = 1 + padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool() + else: + padding_mask = None + + if self.post_extract_proj is not None: + features = self.post_extract_proj(features) + aug_features = self.post_extract_proj(aug_features) + + pre_encoder_features = None + if self.cfg.ema_transformer_only: + pre_encoder_features = features.clone() + pre_encoder_aug_features = aug_features.clone() + + features = self.dropout_input(features) + aug_features = self.dropout_input(aug_features) + + unmasked_features = self.dropout_features(unmasked_features) + aug_unmasked_features = self.dropout_features(aug_unmasked_features) + + if mask: + x, mask_indices = self.apply_mask( + features, + padding_mask, + mask_indices=mask_indices, + mask_channel_indices=mask_channel_indices, + ) + # applying the same mask (as used for the original sample features) over the augmented sample features + # this consistency is needed as we are going to compute cross-contrastive losses as well + x_aug = index_put(aug_features, mask_indices, self.mask_emb) + if not is_xla_tensor(x) and mask_indices is not None: + # tpu-comment: reducing the size in a dynamic way causes + # too many recompilations on xla. + orig_mregion = unmasked_features[mask_indices].view( + unmasked_features.size(0), -1, unmasked_features.size(-1) + ) + orig_mregion_aug = aug_unmasked_features[mask_indices].view( + aug_unmasked_features.size(0), -1, aug_unmasked_features.size(-1) + ) + else: + orig_mregion = unmasked_features + orig_mregion_aug = aug_unmasked_features + else: + x = features + x_aug = aug_features + orig_mregion = unmasked_features + orig_mregion_aug = aug_unmasked_features + mask_indices = None + + if features_only == False: + x_aug, layer_results = self.encoder( + x_aug, + padding_mask=padding_mask, + layer=layer, + ) + + if features_only: + x, layer_results = self.encoder( + x, + padding_mask=padding_mask, + layer=layer, + ) + return { + "x": x, + "padding_mask": padding_mask, + "layer_results": layer_results, + } + + result = { + "losses": {}, + } + + + q = self.quantizer(orig_mregion, produce_targets=False) + orig_mregion = q["x"] + num_vars = q["num_vars"] + prob_ppl = q["prob_perplexity"] + + orig_mregion = self.project_q(orig_mregion) + + negs, neg_idxs = self.sample_negatives( + orig_mregion, + orig_mregion.size(1), + padding_count=padding_count, + ) + + q_aug = self.quantizer(orig_mregion_aug, produce_targets=False) + orig_mregion_aug = q_aug["x"] + orig_mregion_aug = self.project_q(orig_mregion_aug) + + negs_aug, neg_aug_idxs = self.sample_negatives( + orig_mregion_aug, + orig_mregion_aug.size(1), + padding_count=padding_count, + ) + + with torch.no_grad(): + self.ema.model.eval() + + if self.cfg.ema_transformer_only: + y, layer_results = self.ema.model.extract_features( + pre_encoder_features, + padding_mask=padding_mask, + min_layer=self.cfg.encoder_layers - self.average_top_k_layers, + ) + y = { + "x": y, + "padding_mask": padding_mask, + "layer_results": layer_results, + } + else: + y = self.ema.model.extract_features( + source=source, + padding_mask=orig_padding_mask, + mask=False, + ) + + target_layer_results = [l[2] for l in y["layer_results"]] + + permuted = False + if self.cfg.instance_norm_target_layer or self.cfg.batch_norm_target_layer: + target_layer_results = [ + tl.permute(1, 2, 0) for tl in target_layer_results # TBC -> BCT + ] + permuted = True + + if self.cfg.batch_norm_target_layer: + target_layer_results = [ + F.batch_norm( + tl.float(), running_mean=None, running_var=None, training=True + ) + for tl in target_layer_results + ] + + if self.cfg.instance_norm_target_layer: + target_layer_results = [ + F.instance_norm(tl.float()) for tl in target_layer_results + ] + + if permuted: + target_layer_results = [ + tl.transpose(1, 2) for tl in target_layer_results # BCT -> BTC + ] + + if self.cfg.group_norm_target_layer: + target_layer_results = [ + F.layer_norm(tl.float(), tl.shape[-2:]) + for tl in target_layer_results + ] + + if self.cfg.layer_norm_target_layer: + target_layer_results = [ + F.layer_norm(tl.float(), tl.shape[-1:]) + for tl in target_layer_results + ] + + y = sum(target_layer_results) / len(target_layer_results) + + if self.cfg.layer_norm_targets: + y = F.layer_norm(y.float(), y.shape[-1:]) + + if self.cfg.instance_norm_targets: + y = F.instance_norm(y.float().transpose(1, 2)).transpose(1, 2) + + if not permuted: + y = y.transpose(0, 1) + + + x_for_contr = x_aug[mask_indices].view(x_aug.size(0), -1, x_aug.size(-1)) + y_for_contr = y[mask_indices].view(y.size(0), -1, y.size(-1)) + x_aug = x_aug[mask_indices] + y = y[mask_indices] + x_aug = self.final_proj(x_aug) + + c_proj_x = self.contr_proj(x_for_contr) + c_proj_y = self.contr_proj(y_for_contr.half()) + + # Defining the k-means clustering module + kmeans = KMeans(n_clusters=max(2, x.shape[1]//self.cfg.cluster_factor), mode='cosine', verbose=0) + + # normalize the sampled negatives of the original and the augmentation before clustering + norm_orig_mregion = F.normalize(orig_mregion, dim=2) + norm_orig_mregion_aug = F.normalize(orig_mregion_aug, dim=2) + + bsz, tsz, fsz = norm_orig_mregion.shape + # pooling the normalized negatives before clustering + data = torch.stack([norm_orig_mregion.view(-1, fsz),norm_orig_mregion_aug.view(-1, fsz)]).view(-1, fsz) + data = data.to(torch.float) + + # clustering and obtaining the cluster IDs + labels = kmeans.fit_predict(data) + clus_labels, clus_labels_aug = torch.split(labels, bsz*tsz) + clus_labels = torch.reshape(clus_labels, (bsz, tsz)) + neg_labels = clus_labels.view(-1)[neg_idxs] + clus_labels = torch.reshape(clus_labels, (norm_orig_mregion.size(0), norm_orig_mregion.size(1), 1)) + neg_labels = neg_labels.view(norm_orig_mregion.size(0), norm_orig_mregion.size(1), self.n_negatives, 1).permute( + 2, 0, 1, 3 + ) + + # filter those negatives which belong to the same cluster as the positive for the original sample + filter_negs = (clus_labels == neg_labels).all(-1) + + clus_labels_aug = torch.reshape(clus_labels_aug, (bsz, tsz)) + neg_labels_aug = clus_labels_aug.view(-1)[neg_aug_idxs] + clus_labels_aug = torch.reshape(clus_labels_aug, (norm_orig_mregion_aug.size(0), norm_orig_mregion_aug.size(1), 1)) + neg_labels_aug = neg_labels_aug.view(norm_orig_mregion_aug.size(0), norm_orig_mregion_aug.size(1), self.n_negatives, 1).permute( + 2, 0, 1, 3 + ) + + # filter those negatives which belong to the same cluster as the positive for the original sample + filter_negs_aug = (clus_labels_aug == neg_labels_aug).all(-1) + + # logits for cross-contrastive loss + sims_s = self.compute_preds_scale(c_proj_x, orig_mregion, negs, filter_negs, self.cfg.scale_factor) + logits_s = self.get_logits(sims_s) + targets_s = self.get_targets(sims_s) + + # logits for cross-contrastive loss + sims_t = self.compute_preds_scale(c_proj_y, orig_mregion_aug, negs_aug, filter_negs_aug, self.cfg.scale_factor) + logits_t = self.get_logits(sims_t) + targets_t = self.get_targets(sims_t) + + sz = x_aug.size(-1) + + if self.loss_beta == 0: + loss_mse = F.mse_loss(x_aug.float(), y.float(), reduction="none").sum(dim=-1) + loss_s = F.cross_entropy(logits_s, targets_s, reduction="sum") + loss_t = F.cross_entropy(logits_t, targets_t, reduction="sum") + else: + loss = F.smooth_l1_loss( + x_aug.float(), y.float(), reduction="none", beta=self.loss_beta + ).sum(dim=-1) + + if self.loss_scale is not None: + scale = self.loss_scale + else: + scale = 1 / math.sqrt(sz) + scale_contrastive = 0.5 + + result["prob_perplexity"] = prob_ppl + result["features_pen"] = features_pen + result["num_vars"] = num_vars + + result["losses"]["regression"] = (loss_mse.sum() * scale) + (loss_s * scale_contrastive) + (loss_t * scale_contrastive) + + result["contr_loss"] = loss_s.detach().item() + loss_t.detach().item() + result["mse_loss"] = loss_mse.sum().detach().item() + + if "sample_size" not in result: + result["sample_size"] = loss_mse.numel() + result["contr_sample_size"] = targets_s.numel() + + with torch.no_grad(): + result["target_var"] = self.compute_var(y) + result["pred_var"] = self.compute_var(x_aug.float()) + + data.detach() + + if self.num_updates > 5000 and result["target_var"] < self.cfg.min_target_var: + logger.error( + f"target var is {result['target_var'].item()} < {self.cfg.min_target_var}, exiting" + ) + raise Exception( + f"target var is {result['target_var'].item()} < {self.cfg.min_target_var}, exiting" + ) + if self.num_updates > 5000 and result["pred_var"] < self.cfg.min_pred_var: + logger.error( + f"pred var is {result['pred_var'].item()} < {self.cfg.min_pred_var}, exiting" + ) + raise Exception( + f"pred var is {result['pred_var'].item()} < {self.cfg.min_pred_var}, exiting" + ) + + if self.ema is not None: + result["ema_decay"] = self.ema.get_decay() * 1000 + + return result + + def get_extra_losses(self, net_output): + pen = [] + + if "prob_perplexity" in net_output: + pen.append( + (net_output["num_vars"] - net_output["prob_perplexity"]) + / net_output["num_vars"] + ) + + if "features_pen" in net_output: + pen.append(net_output["features_pen"]) + + return pen + + @staticmethod + def compute_var(y): + y = y.view(-1, y.size(-1)) + if dist.is_initialized(): + zc = torch.tensor(y.size(0)).cuda() + zs = y.sum(dim=0) + zss = (y ** 2).sum(dim=0) + + dist.all_reduce(zc) + dist.all_reduce(zs) + dist.all_reduce(zss) + + var = zss / (zc - 1) - (zs ** 2) / (zc * (zc - 1)) + return torch.sqrt(var + 1e-6).mean() + else: + return torch.sqrt(y.var(dim=0) + 1e-6).mean() + + def extract_features( + self, source, padding_mask, mask=False, layer=None + ): + res = self.forward( + source, + padding_mask, + mask=mask, + features_only=True, + layer=layer, + ) + return res + + def remove_pretraining_modules(self, last_layer=None): + self.final_proj = None + self.ema = None + if last_layer is not None: + self.encoder.layers = nn.ModuleList( + l for i, l in enumerate(self.encoder.layers) if i <= last_layer + ) diff --git a/triton_model/1/aqc/examples/data2vec/models/utils.py b/triton_model/1/aqc/examples/data2vec/models/utils.py new file mode 100644 index 0000000..6deea3e --- /dev/null +++ b/triton_model/1/aqc/examples/data2vec/models/utils.py @@ -0,0 +1,3 @@ +def index_put_scale(tensor, indices, value): + tensor[indices] *= value + return tensor \ No newline at end of file diff --git a/triton_model/1/aqc/examples/language_model/README.adaptive_inputs.md b/triton_model/1/aqc/examples/language_model/README.adaptive_inputs.md new file mode 100644 index 0000000..6650d58 --- /dev/null +++ b/triton_model/1/aqc/examples/language_model/README.adaptive_inputs.md @@ -0,0 +1,39 @@ +# Adaptive Input Representations for Neural Language Modeling (Baevski and Auli, 2018) + +## Pre-trained models + +Description | Parameters | Dataset | Model and Test set(s) +---|---:|---|--- +Adaptive Inputs
([Baevski and Auli, 2018](https://arxiv.org/abs/1809.10853)) | 1026M | [Google Billion Words](https://github.com/ciprian-chelba/1-billion-word-language-modeling-benchmark) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2) +Adaptive Inputs
([Baevski and Auli, 2018](https://arxiv.org/abs/1809.10853)) | 247M | [WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2) + +## Training an LM with adaptive inputs + +First, see the general [language modeling README](README.md) for instructions on +preprocessing the WikiText-103 data. + +Then use the following training command to train a model with adaptive inputs +using the `transformer_lm_wiki103` model architecture: +```bash +fairseq-train --task language_modeling \ + data-bin/wikitext-103 \ + --save-dir checkpoints/transformer_wikitext-103 \ + --arch transformer_lm_wiki103 \ + --max-update 286000 --lr 1.0 --t-mult 2 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 \ + --warmup-updates 16000 --warmup-init-lr 1e-07 --stop-min-lr 1e-09 --optimizer nag --min-lr 0.0001 --clip-norm 0.1 \ + --criterion adaptive_loss --max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 \ + --sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=legacy_ddp +``` + +## Citation + +```bibtex +@inproceedings{ + baevski2018adaptive, + title={Adaptive Input Representations for Neural Language Modeling}, + author={Alexei Baevski and Michael Auli}, + booktitle={International Conference on Learning Representations}, + year={2019}, + url={https://openreview.net/forum?id=ByxZX20qFQ}, +} +``` diff --git a/triton_model/1/aqc/examples/language_model/README.conv.md b/triton_model/1/aqc/examples/language_model/README.conv.md new file mode 100644 index 0000000..1ff8635 --- /dev/null +++ b/triton_model/1/aqc/examples/language_model/README.conv.md @@ -0,0 +1,40 @@ +# Language Modeling with Gated Convolutional Networks (Dauphin et al., 2017) + +## Example usage + +First download and preprocess the data following the main [language modeling README](README.md). + +Then to train a convolutional LM using the `fconv_lm_dauphin_wikitext103` +architecture: +```bash +fairseq-train --task language_modeling \ + data-bin/wikitext-103 \ + --save-dir checkpoints/fconv_wikitext-103 \ + --arch fconv_lm_dauphin_wikitext103 \ + --adaptive-softmax-cutoff 10000,20000,200000 \ + --dropout 0.2 \ + --criterion adaptive_loss \ + --optimizer nag --clip-norm 0.1 --weight-decay 5e-06 \ + --lr 1.0 --lr-scheduler reduce_lr_on_plateau --lr-shrink 0.5 \ + --max-tokens 1024 --tokens-per-sample 1024 \ + --ddp-backend legacy_ddp \ + --max-epoch 35 +``` + +And evaluate with: +```bash +fairseq-eval-lm data-bin/wikitext-103 --path checkpoints/fconv_wiki103/checkpoint_best.pt +``` + +## Citation + +```bibtex +@inproceedings{dauphin2017language, + title={Language Modeling with Gated Convolutional Networks}, + author={Dauphin, Yann N and Fan, Angela and Auli, Michael and Grangier, David}, + booktitle={Proceedings of the 34th International Conference on Machine Learning-Volume 70}, + pages={933--941}, + year={2017}, + organization={JMLR} +} +``` diff --git a/triton_model/1/aqc/examples/language_model/README.md b/triton_model/1/aqc/examples/language_model/README.md new file mode 100644 index 0000000..e78ea48 --- /dev/null +++ b/triton_model/1/aqc/examples/language_model/README.md @@ -0,0 +1,123 @@ +# Neural Language Modeling + +## Pre-trained models + +Model | Description | Dataset | Download +---|---|---|--- +`transformer_lm.gbw.adaptive_huge` | Adaptive Inputs
([Baevski and Auli, 2018](https://arxiv.org/abs/1809.10853))
1026M params | [Google Billion Words](https://github.com/ciprian-chelba/1-billion-word-language-modeling-benchmark) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2) +`transformer_lm.wiki103.adaptive` | Adaptive Inputs
([Baevski and Auli, 2018](https://arxiv.org/abs/1809.10853))
247M params | [WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2) +`transformer_lm.wmt19.en` | English LM
([Ng et al., 2019](https://arxiv.org/abs/1907.06616)) | [WMT News Crawl](http://data.statmt.org/news-crawl/) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.gz) +`transformer_lm.wmt19.de` | German LM
([Ng et al., 2019](https://arxiv.org/abs/1907.06616)) | [WMT News Crawl](http://data.statmt.org/news-crawl/) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.gz) +`transformer_lm.wmt19.ru` | Russian LM
([Ng et al., 2019](https://arxiv.org/abs/1907.06616)) | [WMT News Crawl](http://data.statmt.org/news-crawl/) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.gz) + +## Example usage + +We require a few additional Python dependencies for preprocessing: +```bash +pip install fastBPE sacremoses +``` + +To sample from a language model using PyTorch Hub: +```python +import torch + +# List available models +torch.hub.list('pytorch/fairseq') # [..., 'transformer_lm.wmt19.en', ...] + +# Load an English LM trained on WMT'19 News Crawl data +en_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt19.en', tokenizer='moses', bpe='fastbpe') +en_lm.eval() # disable dropout + +# Move model to GPU +en_lm.cuda() + +# Sample from the language model +en_lm.sample('Barack Obama', beam=1, sampling=True, sampling_topk=10, temperature=0.8) +# "Barack Obama is coming to Sydney and New Zealand (...)" + +# Compute perplexity for a sequence +en_lm.score('Barack Obama is coming to Sydney and New Zealand')['positional_scores'].mean().neg().exp() +# tensor(15.1474) + +# The same interface can be used with custom models as well +from fairseq.models.transformer_lm import TransformerLanguageModel +custom_lm = TransformerLanguageModel.from_pretrained('/path/to/model/dir', 'checkpoint100.pt', tokenizer='moses', bpe='fastbpe') +custom_lm.sample('Barack Obama', beam=5) +# "Barack Obama (...)" +``` + +## Training a transformer language model with the CLI tools + +### 1) Preprocess the data + +First download and prepare the [WikiText-103 dataset](https://www.salesforce.com/products/einstein/ai-research/the-wikitext-dependency-language-modeling-dataset/): +```bash +cd examples/language_model/ +bash prepare-wikitext-103.sh +cd ../.. +``` + +Next preprocess/binarize the data: +```bash +TEXT=examples/language_model/wikitext-103 +fairseq-preprocess \ + --only-source \ + --trainpref $TEXT/wiki.train.tokens \ + --validpref $TEXT/wiki.valid.tokens \ + --testpref $TEXT/wiki.test.tokens \ + --destdir data-bin/wikitext-103 \ + --workers 20 +``` + +### 2) Train a language model + +Next we'll train a basic transformer language model on wikitext-103. For more +advanced usage, see the [adaptive inputs README](README.adaptive_inputs.md). + +To train a basic LM (assumes 2 GPUs): +``` +$ fairseq-train --task language_modeling \ + data-bin/wikitext-103 \ + --save-dir checkpoints/transformer_wikitext-103 \ + --arch transformer_lm --share-decoder-input-output-embed \ + --dropout 0.1 \ + --optimizer adam --adam-betas '(0.9, 0.98)' --weight-decay 0.01 --clip-norm 0.0 \ + --lr 0.0005 --lr-scheduler inverse_sqrt --warmup-updates 4000 --warmup-init-lr 1e-07 \ + --tokens-per-sample 512 --sample-break-mode none \ + --max-tokens 2048 --update-freq 16 \ + --fp16 \ + --max-update 50000 +``` + +If you run out of memory, try reducing `--max-tokens` (max number of tokens per +batch) or `--tokens-per-sample` (max sequence length). You can also adjust +`--update-freq` to accumulate gradients and simulate training on a different +number of GPUs. + +### 3) Evaluate + +```bash +fairseq-eval-lm data-bin/wikitext-103 \ + --path checkpoints/transformer_wiki103/checkpoint_best.pt \ + --batch-size 2 \ + --tokens-per-sample 512 \ + --context-window 400 +# | Evaluated 245569 tokens in 56.1s (4379.02 tokens/s) +# | Loss: 3.4164, Perplexity: 30.46 +``` + +*Note:* The `--context-window` option controls how much context is provided to +each token when computing perplexity. When the window size is 0, the dataset is +chunked into segments of length 512 and perplexity is computed over each segment +normally. However, this results in worse (higher) perplexity since tokens that +appear earlier in each segment have less conditioning. When the maximum window +size is used (511 in this case), then we compute perplexity for each token +fully conditioned on 511 tokens of context. This slows down evaluation +significantly, since we must run a separate forward pass for every token in the +dataset, but results in better (lower) perplexity. + + +## Convolutional language models + +Please see the [convolutional LM README](README.conv.md) for instructions on +training convolutional language models. diff --git a/triton_model/1/aqc/examples/language_model/prepare-wikitext-103.sh b/triton_model/1/aqc/examples/language_model/prepare-wikitext-103.sh new file mode 100644 index 0000000..7513021 --- /dev/null +++ b/triton_model/1/aqc/examples/language_model/prepare-wikitext-103.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh + +URLS=( + "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip" +) +FILES=( + "wikitext-103-v1.zip" +) + +for ((i=0;i<${#URLS[@]};++i)); do + file=${FILES[i]} + if [ -f $file ]; then + echo "$file already exists, skipping download" + else + url=${URLS[i]} + wget "$url" + if [ -f $file ]; then + echo "$url successfully downloaded." + else + echo "$url not successfully downloaded." + exit -1 + fi + if [ ${file: -4} == ".tgz" ]; then + tar zxvf $file + elif [ ${file: -4} == ".tar" ]; then + tar xvf $file + elif [ ${file: -4} == ".zip" ]; then + unzip $file + fi + fi +done +cd .. diff --git a/triton_model/1/aqc/examples/speech_recognition/README.md b/triton_model/1/aqc/examples/speech_recognition/README.md new file mode 100644 index 0000000..17030bf --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/README.md @@ -0,0 +1,83 @@ +### 2021 Update: We are merging this example into the [S2T framework](../speech_to_text), which supports more generic speech-to-text tasks (e.g. speech translation) and more flexible data processing pipelines. Please stay tuned. + +# Speech Recognition +`examples/speech_recognition` is implementing ASR task in Fairseq, along with needed features, datasets, models and loss functions to train and infer model described in [Transformers with convolutional context for ASR (Abdelrahman Mohamed et al., 2019)](https://arxiv.org/abs/1904.11660). + + +## Additional dependencies +On top of main fairseq dependencies there are couple more additional requirements. + +1) Please follow the instructions to install [torchaudio](https://github.com/pytorch/audio). This is required to compute audio fbank features. +2) [Sclite](http://www1.icsi.berkeley.edu/Speech/docs/sctk-1.2/sclite.htm#sclite_name_0) is used to measure WER. Sclite can be downloaded and installed from source from sctk package [here](http://www.openslr.org/4/). Training and inference doesn't require Sclite dependency. +3) [sentencepiece](https://github.com/google/sentencepiece) is required in order to create dataset with word-piece targets. + +## Preparing librispeech data +``` +./examples/speech_recognition/datasets/prepare-librispeech.sh $DIR_TO_SAVE_RAW_DATA $DIR_FOR_PREPROCESSED_DATA +``` + +## Training librispeech data +``` +python train.py $DIR_FOR_PREPROCESSED_DATA --save-dir $MODEL_PATH --max-epoch 80 --task speech_recognition --arch vggtransformer_2 --optimizer adadelta --lr 1.0 --adadelta-eps 1e-8 --adadelta-rho 0.95 --clip-norm 10.0 --max-tokens 5000 --log-format json --log-interval 1 --criterion cross_entropy_acc --user-dir examples/speech_recognition/ +``` + +## Inference for librispeech +`$SET` can be `test_clean` or `test_other` +Any checkpoint in `$MODEL_PATH` can be selected. In this example we are working with `checkpoint_last.pt` +``` +python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --max-tokens 25000 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --beam 20 --results-path $RES_DIR --batch-size 40 --gen-subset $SET --user-dir examples/speech_recognition/ +``` + +## Inference for librispeech +``` +sclite -r ${RES_DIR}/ref.word-checkpoint_last.pt-${SET}.txt -h ${RES_DIR}/hypo.word-checkpoint_last.pt-${SET}.txt -i rm -o all stdout > $RES_REPORT +``` +`Sum/Avg` row from first table of the report has WER + +## Using flashlight (previously called [wav2letter](https://github.com/facebookresearch/wav2letter)) components +[flashlight](https://github.com/facebookresearch/flashlight) now has integration with fairseq. Currently this includes: + +* AutoSegmentationCriterion (ASG) +* flashlight-style Conv/GLU model +* flashlight's beam search decoder + +To use these, follow the instructions on [this page](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) to install python bindings. + +## Training librispeech data (flashlight style, Conv/GLU + ASG loss) +Training command: +``` +python train.py $DIR_FOR_PREPROCESSED_DATA --save-dir $MODEL_PATH --max-epoch 100 --task speech_recognition --arch w2l_conv_glu_enc --batch-size 4 --optimizer sgd --lr 0.3,0.8 --momentum 0.8 --clip-norm 0.2 --max-tokens 50000 --log-format json --log-interval 100 --num-workers 0 --sentence-avg --criterion asg_loss --asg-transitions-init 5 --max-replabel 2 --linseg-updates 8789 --user-dir examples/speech_recognition +``` + +Note that ASG loss currently doesn't do well with word-pieces. You should prepare a dataset with character targets by setting `nbpe=31` in `prepare-librispeech.sh`. + +## Inference for librispeech (flashlight decoder, n-gram LM) +Inference command: +``` +python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --seed 1 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --gen-subset $SET --results-path $RES_DIR --w2l-decoder kenlm --kenlm-model $KENLM_MODEL_PATH --lexicon $LEXICON_PATH --beam 200 --beam-threshold 15 --lm-weight 1.5 --word-score 1.5 --sil-weight -0.3 --criterion asg_loss --max-replabel 2 --user-dir examples/speech_recognition +``` + +`$KENLM_MODEL_PATH` should be a standard n-gram language model file. `$LEXICON_PATH` should be a flashlight-style lexicon (list of known words and their spellings). For ASG inference, a lexicon line should look like this (note the repetition labels): +``` +doorbell D O 1 R B E L 1 ▁ +``` +For CTC inference with word-pieces, repetition labels are not used and the lexicon should have most common spellings for each word (one can use sentencepiece's `NBestEncodeAsPieces` for this): +``` +doorbell ▁DOOR BE LL +doorbell ▁DOOR B E LL +doorbell ▁DO OR BE LL +doorbell ▁DOOR B EL L +doorbell ▁DOOR BE L L +doorbell ▁DO OR B E LL +doorbell ▁DOOR B E L L +doorbell ▁DO OR B EL L +doorbell ▁DO O R BE LL +doorbell ▁DO OR BE L L +``` +Lowercase vs. uppercase matters: the *word* should match the case of the n-gram language model (i.e. `$KENLM_MODEL_PATH`), while the *spelling* should match the case of the token dictionary (i.e. `$DIR_FOR_PREPROCESSED_DATA/dict.txt`). + +## Inference for librispeech (flashlight decoder, viterbi only) +Inference command: +``` +python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --seed 1 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --gen-subset $SET --results-path $RES_DIR --w2l-decoder viterbi --criterion asg_loss --max-replabel 2 --user-dir examples/speech_recognition +``` diff --git a/triton_model/1/aqc/examples/speech_recognition/__init__.py b/triton_model/1/aqc/examples/speech_recognition/__init__.py new file mode 100644 index 0000000..0278f6a --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/__init__.py @@ -0,0 +1 @@ +from . import criterions, models, tasks # noqa diff --git a/triton_model/1/aqc/examples/speech_recognition/__pycache__/__init__.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..197b28c8941b9662a4b39950f6ad22bb52e3b80c GIT binary patch literal 223 zcmYjLI}XAy49%w?RH=Kf(1pN`Dt06mwk}bXT2PTRB`y+Nftzp=?vRyky7eu`6!zm)wy_htxbOj><_&C BL&X39 literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/__pycache__/w2l_decoder.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/__pycache__/w2l_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6e0047dd67a222181ad1aa1279c84df22fa8ced GIT binary patch literal 14196 zcmc&*Yj9lWSw6QtyL)!ETFa6w%Z{^7?8KYcQsUlSW0E*^nwuMH(=^*;mermkY3057 zJ7;Z6>_S_mjSHkrY0ITIR;57QfdU2EftJ$F0EGetIxxT~)9Fk*Fd2R@Oc>gRQu93T z*^5?E+mt`7=Dg=S-~IC5-%pLPF-ODi!JqoP_n)UV?Q8tl{Il_K7aqT$YnrPSG*@?x zs$S6LZ4^v-TeVoh)>&p&0bU=AtUX^3*&gls@dAE z!Y(PZs}r@|h24^lS0`&zg(*oVs?)VSg*}o^R%dE^3ww2K?H06lsnWz!NS4XO@*6kHy3WM9V#5E9WETM-BP%vc5C6*+L6K$U3*lk zkLeo!yks};j&0~j-Bvh+=kR0NeVUtb$Dh>PaqqTuvv51Uv+gc@@A7WP_fdRLxV!Pa zTfUEZX?Jo-bEiI}uN#Fqluo;QP`bxEgxd4?o^ki$d#}{K!!^!n`PsE;=S;QaFIOu| z%R$cd$_>|3{+#nHPvdM}53PqPevmi9nCo3^d7)D;)jT(7HLD&9v#olwRK8GDUb9+S z@TxxYVnNBjz{Gf|*{rS>gGSLW)uQ^zM$@Yo-AXw?`GInye#uiokzM-e$79__&08v= zWD#R#%gQSSUh#56xzZ|hs*RE>-#*GSr<<+URRT{fR30e>%i+|L7Zlx6P%1V9RV?|% zh1J0G^NDcmv|_bNqkew1>4j4dc~>eJQ${ zV&ZKE)=CU-+eOJr{F0_-+eSCx>Kl69L@K$Vq117WUg_;pYHYBNRDG9=I@0Q8l5h6% zWB5u-%aqIZW3GnTT4&c%C(A*(xKJ(CFPvJ7o%Frtsps`}J`*N_C14A1F*K`QJ#;QL z%B6*(Us?0QSbf3s1C_xd3g+qar^8ISk3hwM7!>FEmObDM65f^2TxcLIgM@Lvg8s^0 zXnM#TFZoNwK$Yr#MX>Ce8Ct$qT@0;~TJqHlhPbAwTk(4SZX97HXw*qgjro$V=I@*@ zT`bRgSA>52dB5p-a_|_AzP5XKlym)-BWFrJY~gv3-4RzJ9BCV_xmW9I8`{0vy56zcT32uD zZEM5m;AFc-JGP;7_t$Q`&#QY1gp@1g7D_>RIoDVOk}rCSJLJ!m!QKZyA`b^UhsQqv zP{Bbyk$T+d=p7)Bt6eY^-fiPu#zitOJ)CGN?^3bR3Yx6|%;SPr_hl>dW@yaa5$YF0 z{gPxv;0XE*os&Fs5L5Hi100AW+S-i1cI&3iSn5rC?qn5MQT0#F4O@lDI@nC5jxjM# z42-4bhht9X;D%$^1v1>Iw=g;AVC$q4 z{Ry4nlqoxH%hB!YX>-dd_Ur9D4e&MpJWer*WU;9l3;we#J{uijJd7`w{8A7o^$IqcWTQcsZqy)5 z3TZVPK8SV^!U4owZni=P+&!o)wHhtIcLwT3=tRAgJ;mfS!c>3rfj{bHEF~2VF-6^p z&qwh1X@G=5x=HI91E?}Hf-0L1zD&m2|1nMMB3}D128f6!5MtTTg>>}5cuHG@0t1$t zXhU#!^uX*|K@4dyL~Lwq?UiQ-8yVqvZecaIj2*5*#N@!9a>96y=I%asAB0U$6d%ob zL3xzGI3bw~Cq&NkCK6b@1KM@9z1r)bhg^gdz@UcObP|TT9d$!}Y0CzM$&*C4D@}Fk zarVfy8gHD|){bo1o@myaXsO?9g#0-oXSkbv5yjiuRc!@Ce@?rmLlsduPzRrS{f>}s-g`=RBct2$Gpu~n)@B`VJ$UQR%~R3zg~=OL)e38-lR){resXyE;m2`BAH zei`{AKVk!a&m98j>j8q0)CCelUEnETav?@^40mA}J+14zsR0A5pFp>PwjPNGW5a-I z3}&Io#i&#It){1hBoCFi^J2E3JWNggPqXVX1C2?D7|?0{a*qJB9h>I z7m2n`nf18YF*;_)TGBc(U>I<}rKvrE(KXu^Wf(bI$F4ifVflvMjYTCL8+kVL8gs*V z8%Vbs4-(xZztIkwNQd7}^c|!~!hkm6)hHbYQO9-dDeWnJG19jZL7MsznE`g!JUc=X zTnrINNa7YGmPoMU9sMeV?^Td3gs{G$ZD0wm{(fUge`h2aBl5jwgm#39*y(aZc_3?| z4Rd>V91-@yHA67EkiHaY)5Rv#>EQuDX0pD>%**J&=f>KaVa!~&udf~5ej1VV+e~Tw z=J_$GKM)D(W^|#>ayF?m_OL7wsQbfTcc5 z0k>j_U0?-vTkECcNX20bL|T0^a8N77ymi_V>0k`$44!fM&LWlW?s8!pbj)A^UsiYb z1}N!X?THgm8!bGs4tqzR)JZ$+Ki!=Ky~Z~TlqNdKwvF01bsXV(*aC=QM?`Z9L^cJB zB6Ugk)dO1Fd7tLm?bQ3U518xvn_-QtWV_StR6E{IZkUjqufKS=}M1DrUBhw#Yqz=SDm^JO}aI1ZziIx!FxToy0m1A$?f5(nsB7CrzR+9u96n zE=y$9c#q?`6%Sgh9L8g=Cs6MQ87z7<{m(z3xejOQrrdNjU%Ra{R=d5E0Sg{;Gfz^s z9+h>DWBy~WIVLY!q9kIZmMTc@=r^U7z>hVd2+lxpn^ zjIkDMvTY2Hh)ag|93`1BJZLCncx`avh`|YgLJOdg0Ny2B4AL6$z}z!7J6oE{x>*wPctaWT`fK4Kwl$mg)J+jj9WcwpP6C z(WMeP;=B;{TS#KPMM;K97>T`Poa%4MbrsFE3W@0XyQ`6d0)7C`i@Y*a^~$Asu2HY9 z<`$6IsC&7~@C@WiIqql<2JTX;wB+SPyA}Nn0$`%&t2l#>dZZif-JaKTfowYq3UYEQ zISVnzL|bg@OZbb-uxP8vrTQnKUmoT~OluCMpTMKm0AA1u3~aP(arF~y$$eHoN$^I3 zHxsNAaM$w3)a#ihCxss8!F2<(A&{@$Rw^EA6F$Mi;bAGVvoHl@ z!YM|kf>EuhNw%LQILLAfwmNJMxNB4Z!>!(`iJhzNU=3=ap@|Md8y+3_xKNRPBt^Uq z&4=%Py&9KJh&Q2GsRwE=+YQ(ECKfv~r5+v;VIrK8C5P!qoTy@>8TgSnonjw3wv6`* z!+k>s(Ok8%Fc)}Nf}S8!>aDE*l+^cEYYPqd@3N7!q@WW*3RAMeA|SkBdL^{DML^#s zL=-(-VL}ibs&J?mqlG8>h2dB~RTNdZ#$Jbm>aG&Gw#sY~ZahDpwog3Cz}B)MWMOmZ zStF~bjaj58;LU(GjeP6+gkk)5#x@KKIo9CMKq~PKCvE8{LfPn_^UX{ui~bxv3&A~X zDXqR`Bqwy|x(M$bc|_<4myzX!w(^3Os*l)wWL|tua}}=!fRbszu!i~vF|!XM2Z|?@ zNvM?<43)iT!{kQF-;X?<=JyN?G59$S!ktAm6Ji8)=0Iuf*o(P4#ZP zBAT$_kM`Nfzw7@3|ARH8k#sqka`{5U{g7J^6kt!l8-%)X9lKBWpQsigBQS``kN z)phIJu&Qo*;WLGav%qmz0K!B+{sCtrL=>EhA7mNL3$>5n*8#3ML!(GtU~ZK__;a19 zB0ynm;I?BtC5%T)@e5<(9fup5E8FS-k&(WP=Hg(Yv6R+l!D}<%#2}HR^$q*FHMTar z^YZe^9gQQZZtA@Rvca+##ZNhF0YeMPM?nXVPvoVAH2do8lwxj}Ia8`A-@Dk0?!bL( z!-Kq7xf1TG)Ddi|0i{aN%S714#3flqyOCcUUw7g09{}jvRgq_J=q^E^&h`*E@oYEY z#TOAeah)e)g#>e9_(LIo(n63@6!C)Nj)TAN3X_A`jd*X#9{8N*pj30jfe52t2OEH9 zjW`J)Y^xRVTp(?MNod6wZHgjoC!&mN;Cy*DB(x;WR1PLNusb&-~6iFx)+C3hB7 zHl}uAoJgH=sERRArB>BF%zY0)nDtASJZMgXO$(E?(v@PlR9;4ytlV--q2*mFRl})T z>4I16xuwxfaS;)!B9Rjcg|E1<-p567>%$!2GXyf!@OY0fkLtlU@c6V0;Qq#68baRI z69(K|NTv0)iNPig6#EgT%i|+r$Zmcb3EHX<(O|CFLAYKwH0rjrDDfRr6BVrv_X@n@ zIz8R*n8i-Qo`i=Sy9vLzE?#luzz)NX!x3XXZFcNO;pnpPw{GfMJ#nYzzvME`i+_*t zEG$@ex_VXMxU@L&4actqUy&&>EURt1He5Sn@UA1KMY|bRuW^zik9V_eFn*pVkS2Z> zjPX-g#d$EC6L_l^1B|d*vQ5&@IXusCU*={0u7R|+SN@!~b zhg&O3;byKGd0#OJ4)^SJ{PMfh1Um&jY3elua;`Tq^+y05GrGO&DHP^B;-6DiUTLTmtm)%ud?BX3wdfx*##Y1lA+?U#2|m z|LBV1t`pJyOBjrjjgGsHHT1J0u42ItX~GZLMhek2V8<*|U2}u@2aF0J;(>qn;pA;8 zKNi?M?6HA8cAdV=xH?{e?-Hp5>L<~I1Mg)Dm}kWbNt#>&>qLw(g?yt!Z&0e8 zcFlEqY2lv%eNbA~nsFrs*N9jN4YN$(}7~~Cf>0`$RdD6v+ZJiFvH{sF=#7GfV zre@7~j0~6C{5wLI0K5Txe?im?ZR)wzMk@zzM62pz@^Jp5OCRb$?xBa-=U~*$z2$N> z=*aXmEO^0X51pVthPgSB{t%4pRcl9c4aI#K%(Eo3%x}~Dx9`bd=p#Kbd^9JKyZnh< z6XKa;9jRlJWxtN*#9xwI#BiIBgj30~cHt10`IFyi3?m-h@Gc zFuypD3Nh#x#R~;!gzzS9)d*o&s!A>5AGjD4#T?0wBv!9ae|zobO)QAc{2})pwj7Nf zpql~bSAWJyodL+}>T^tgH;(^uG*e$djS+r7I$}TCrf3J9SDyzQIf9)>R)7Az#sy=G z##Mj8YJUl^W*x&_f&6T=t-`^Dp(1(1P!RHwA}^Rj=l)ta(4d5dv19!B&YT z=w#A{6X(?qcsg*kBLSHOJ?Oe^!j3?|+R*Lq*cSAp>j{e`)^)?0#Ml0Rv-U>8Y+M6D zJ&TbHw3@&dG^tT}$DOMuj_)hiu?1TF|q~n^K~S z8@vEO4SmRKVi5(9vkA_I#!@*%w5wY6%9}aKa84<&)(lo7ep2Ve=xBhV425}ZYB;$~ zwGd%HlTYnveo;?~l)3jJ6@KIOG^0qpsddcdWXND2Yk^oHw7Kvs)|BG~NM4IT-6QUXp}g z1bM~rZ8pG5TA167}tP?#`xJ3NNXC;9*KI*v|}rK zG51;I%y#!dx6_LIgB$pTSTk+D14vuXAQFQ2f$ohGABzV$=5`N4i__syx}VW6JW1$X zkb~+Y?F95H#*(1(ozxh2s_(+rZ1*O_w{(=Dua%qOWJSRQh zO}k_8{bcC*>5RiK#JP)r0jT`aXij)FvDJ?OlFp!J}dEyGh+SVl2jV18SYF$8o`Lh zC8xzDbqmm5Bp!;z*#lkxQrB>&N`%n}&*?L5(u`^m2%TPH>N3IVAW!5>37;Q8Z&AqR zjvYvGU|$b4k)g{>{W!a86LbikBzOzJwn?imD!4yG`;DZFe zMDSLErwF01SuJU#T~q5uY94|K+K>JM<`A1yZ#I7-$SrL@F9X_+ppt>0((sQVBj=mh?;lRu|GlVZM782}+IU2w@#waP z1fDYB%9q<&Z{s~81MWn*Jc#>9u&;%;&kuqHu|crF9s~=F9fAeMNU%W12)qK4Xb*w~ zSB(|ibfQ*raaO_x$(?Nt;+#*F1|}3Jg~nDJ5v_Cl29stgvO>&=?7ohw;?f5XGeLM; z9OYrEPmHklzQiNu?(j~tW30{Zw5?km`kUHsV?EquneaKybZju6+5fmfY#gkCR>xfNlp?uaW+Rc7!d5;W%w;ri1^-b zBL&|5?}H4%?h-z~%%zis@7LDuej&>iuPZM(?Z})z&VjZCFN6h3k)eJUHA0IWK=YLe z9Y#(xP0ot$Uifuc;U77-1oB{6AP$BY*s&jIhUz!jGdlSmn>dM5*+iRW`=-qs%8TDY z6>Q#rurtXWY~I)Un>VSg&ApJR4aHdA8HQHW@3HGo5C}Uw&eT2vdL$#R{1v7?Nx&(< zijb|7Im!OXoY?S-oYOQC5+N}%cG`}jBs0*^;c(;?$E~_=KW=}q-+#1DVTRK2S5XkK zV-ktn4SP&6FMnqkFBaWK8F$M58Q)=&*Zcx#@xFSH-|>b&$18uKB^Uqxm3cP)(E+X? zMXW=RzohCC?uSV%RtK-Si}zj;(<5{GpO_Mnd0YQ~7UpmRV9Wn3%u!0t4elwc7zc+aDw$x4^d58V_4;>PaX{ z9NdvO|0)XvV~2ppiA&AVRp3*k4T->7@q|SaE)7F2D^U-oqd&vMwFdpDZach_h$x~7 R8E9tj$nMWv%9@iu@xKw*cC7#a literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/criterions/ASG_loss.py b/triton_model/1/aqc/examples/speech_recognition/criterions/ASG_loss.py new file mode 100644 index 0000000..41f50bb --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/criterions/ASG_loss.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from examples.speech_recognition.data.replabels import pack_replabels +from fairseq import utils +from fairseq.criterions import FairseqCriterion, register_criterion + + +@register_criterion("asg_loss") +class ASGCriterion(FairseqCriterion): + @staticmethod + def add_args(parser): + group = parser.add_argument_group("ASG Loss") + group.add_argument( + "--asg-transitions-init", + help="initial diagonal value of transition matrix", + type=float, + default=0.0, + ) + group.add_argument( + "--max-replabel", help="maximum # of replabels", type=int, default=2 + ) + group.add_argument( + "--linseg-updates", + help="# of training updates to use LinSeg initialization", + type=int, + default=0, + ) + group.add_argument( + "--hide-linseg-messages", + help="hide messages about LinSeg initialization", + action="store_true", + ) + + def __init__( + self, + task, + silence_token, + asg_transitions_init, + max_replabel, + linseg_updates, + hide_linseg_messages, + ): + from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode + + super().__init__(task) + self.tgt_dict = task.target_dictionary + self.eos = self.tgt_dict.eos() + self.silence = ( + self.tgt_dict.index(silence_token) + if silence_token in self.tgt_dict + else None + ) + self.max_replabel = max_replabel + + num_labels = len(self.tgt_dict) + self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT) + self.asg.trans = torch.nn.Parameter( + asg_transitions_init * torch.eye(num_labels), requires_grad=True + ) + + self.linseg_progress = torch.nn.Parameter( + torch.tensor([0], dtype=torch.int), requires_grad=False + ) + self.linseg_maximum = linseg_updates + self.linseg_message_state = "none" if hide_linseg_messages else "start" + + @classmethod + def build_criterion(cls, args, task): + return cls( + task, + args.silence_token, + args.asg_transitions_init, + args.max_replabel, + args.linseg_updates, + args.hide_linseg_messages, + ) + + def linseg_step(self): + if not self.training: + return False + if self.linseg_progress.item() < self.linseg_maximum: + if self.linseg_message_state == "start": + print("| using LinSeg to initialize ASG") + self.linseg_message_state = "finish" + self.linseg_progress.add_(1) + return True + elif self.linseg_message_state == "finish": + print("| finished LinSeg initialization") + self.linseg_message_state = "none" + return False + + def replace_eos_with_silence(self, tgt): + if tgt[-1] != self.eos: + return tgt + elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence): + return tgt[:-1] + else: + return tgt[:-1] + [self.silence] + + def forward(self, model, sample, reduce=True): + """Compute the loss for the given sample. + + Returns a tuple with three elements: + 1) the loss + 2) the sample size, which is used as the denominator for the gradient + 3) logging outputs to display while training + """ + + net_output = model(**sample["net_input"]) + emissions = net_output["encoder_out"].transpose(0, 1).contiguous() + B = emissions.size(0) + T = emissions.size(1) + device = emissions.device + + target = torch.IntTensor(B, T) + target_size = torch.IntTensor(B) + using_linseg = self.linseg_step() + + for b in range(B): + initial_target_size = sample["target_lengths"][b].item() + if initial_target_size == 0: + raise ValueError("target size cannot be zero") + + tgt = sample["target"][b, :initial_target_size].tolist() + tgt = self.replace_eos_with_silence(tgt) + tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel) + tgt = tgt[:T] + + if using_linseg: + tgt = [tgt[t * len(tgt) // T] for t in range(T)] + + target[b][: len(tgt)] = torch.IntTensor(tgt) + target_size[b] = len(tgt) + + loss = self.asg.forward(emissions, target.to(device), target_size.to(device)) + + if reduce: + loss = torch.sum(loss) + + sample_size = ( + sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"] + ) + logging_output = { + "loss": utils.item(loss.data) if reduce else loss.data, + "ntokens": sample["ntokens"], + "nsentences": sample["target"].size(0), + "sample_size": sample_size, + } + return loss, sample_size, logging_output + + @staticmethod + def aggregate_logging_outputs(logging_outputs): + """Aggregate logging outputs from data parallel training.""" + loss_sum = sum(log.get("loss", 0) for log in logging_outputs) + ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) + nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) + sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) + agg_output = { + "loss": loss_sum / nsentences, + "ntokens": ntokens, + "nsentences": nsentences, + "sample_size": sample_size, + } + return agg_output diff --git a/triton_model/1/aqc/examples/speech_recognition/criterions/__init__.py b/triton_model/1/aqc/examples/speech_recognition/criterions/__init__.py new file mode 100644 index 0000000..579abd2 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/criterions/__init__.py @@ -0,0 +1,17 @@ +import importlib +import os + + +# ASG loss requires flashlight bindings +files_to_skip = set() +try: + import flashlight.lib.sequence.criterion +except ImportError: + files_to_skip.add("ASG_loss.py") + +for file in sorted(os.listdir(os.path.dirname(__file__))): + if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip: + criterion_name = file[: file.find(".py")] + importlib.import_module( + "examples.speech_recognition.criterions." + criterion_name + ) diff --git a/triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/ASG_loss.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/ASG_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..439665025ecf1319af4bf75757c42c40b8e9a56b GIT binary patch literal 5163 zcma)A-EZ8+5$E#jc+}llk!ATK21+*{$fcE(#BGWcu3Ohh+@OxyT528+ND2COb)v*0 z&$~RwK2ShgIj~&R1^S#8klLbvU;9tAf5X1^$uE6LpMs?BZ^*mT$w}Jqz~%06cQ`XU z^PAbrnoZxp_2b|CUHogwF#bt{*~`J;2e{RBG~D1UF*@`#6V^97CZoQUSbe)=mwh{N z`fkVV*E%(3h#GhH*foQ@ymrsvHR0}Ad#vMewr;e&$KYtJwV7@WqV7&8#UP1pibR3p z4D%QrQ_sI0#Zrk|S7n?F8E2_pkfIkW48rb&*6||M3zJN#F0q`wtnol~&=E$5aie1j zi!*NCGa!n6-`Hf_;`TkS;|P~K+%4%fUgO?91ESWsk7u3F^9FC;V;vu>Tl_>hYwo^b z8=VG}c}6#`tY4c-*<~|AnCQE>ui{o4XbR&1hCF1utYF)agj>1EZSFjT2}=shm0R4K zp+E{y=4L36hU9o?k=Y@^{MyKa2*GujVYIZo43jM9GD=mP!)$6fPUC#^4Qk^k33wd! zvJ{=$Q8E-kwiz7F4f;_o<2yY_T3&5S+gl&qAiO7mUe4o&`fCg6na*E_%UO z-gp#|7Lv?L%5BA5O!)PMQc({K+e8f}!yvku4f8MNqkWZUQiQo2inb|h;HurIOP^@l zdLiOGjAU=v7ik{$WHub=#w<-cn@JYs+KSU0KI#r4xTw@lIrWhtPlDek@2tXzd6tr2 zXRDEttFNv`x4Nt1PShVHLanNS5Zx`j?q)sOl`LJIdbC=_R+b*UGPtX~@yFFX7^|1j z_@>Rw@z4F#c0J}kv8`E3)%QOeotpK_l?kUV={&gz0VvKXSkynzP;f)ZI;c|^p<5W+?m_Jk&K%YXlSa1<`CLi$3iFVq2sON`7dBcypQAT>Y`0$2 zihAK~H@Ni>tHNg9+CH&8Us!qT;F+R+$QF$<6o_LpT&UinJ;_;*;kn4&hbDs4DOaB= zNS39-l_B@gJ58a$>yuvFUn^Y(n^c1tC^YoKG)ULGQ6k>WxB%eziUwgHqYVt3QrsHG zQYdVH#0ii%`TpD5PP0^KN9B>s+h_EJ%_LG=N!;7YSCaVV3Xp0j(ymyU?xX~$8uBz6 ztQrpBCSDklSBIf~23&d~4|!a=UL>*Ad9NC4OJqtramvLVT~l#FY;^-6HU&y;fv8&> zS8iN;Ya?9mgzG=Mv7s#lzINc=-7RgVsjmMtl2Km(Fv0RJWG@30 zMlYVS^W50eP(%zU9jh~FT-bM>`3y)3&#}inn~lyKv-Ql@;P0d+qTPT=M() z#N+><*S6%>(U;FtL(Y$UMoMc7NbRhXEGN z(F?~E|FV@ET^q|L7oiEo3zF=yL0%414GV% zw}*F)0vQG-eJihX%=tCL`x?DRo@jh%ZjucCq49u$*W5lb+XI$2_A%OS;%@C(57<8A z-o8Pbfmyzk_l^5((<}=@FR$&Re0&o()0~YbA4Ko7BPx{C8WJ`?q}Qz$=>D zr;%q(dRFgd=NB#B_=CNBqG%Sa;>4x}=V^`peKqS3hPepxEfG+`9BgK?9Q5MbA`Mho zo2>YLFurbxd?-^DL_t18)e?}?V7?S05Q(4yNqu)Z`PKH6@09jhMXMMGDjtb%1|Mw2 z-K`*2ROs;_QspWx(ySk+5i-}2P)NQpzA@!?u??wvJ;D_fchCfDGZeE5M~Az_7a~sZ zVY*1yQzY3q#dJM~G>RNALo6>#gEEqIYn+LZP<#278l9`grMy8mO4BS4Zi-+eWQNRH zX6?0OI7{(QH9kR&3efZr`|me);Y0l(M!j|6WjPfB>BpvBts#iE~7J9q-$xJk<}wx z+qCEM1~_OR#dwM$pevM~tF{Md01Zh6ls5G;cMmcpwBOCrJnjv%A<`Gk(JmLaax72G*gx+U7JG|{dUe29X{r>(DCMb||?R*DWo zn*C7Hfl$(+R?m$$pET<<%B(dmMl`$GIXO;4;mj;aX+sjsqn4@sar}{L-+n@`%eo{) zZyUF|jAo&}h`i!chA}DAG*Fdz_?^LMp0bX4hMhB8?7V4%a^5nZEZUTfOy+*-yQ9S~ z+VNvvNFM0mzDi%DY@e5<{5|+0K^4kx_#%K8XWuEs1O<>hL$N?{j!{th7uK^>`dEN( z7q9}z2B6eNuV3k*V(g(B{9Ise%B&yI76mvTMM)x(X=tt-lbT$Cq*k~;rsCmO6gzb~ z9V1Q9N!fhQV8$(zBW{rIO5-E97;C5oEfmm?O!+Q`sE1L$K9%h2mwF-?o^X`w=mD=Hdbl_-9JiD92w6f3*HZmbV){tOhi#7q zM`;NP4t@4Z&HhKcRy1e9T43~ZQ3BJC4!fflzuMll*+#ob$E_6S9~`&*kj{5Q9P>im z2*X>$C>g(L(0L~A;*_+Nad`^gEe{lWt~_Xr$=@R~0bB)O6xs4N8r>WpNV+0PCWn$z z$v0?92?G?EE!=7ejc1q$@5X8K0zkm`TE1sqTws#swCg3Lc3z(ROR)0!e+gD7rdFnZ zuarm%)U5s|tB`SFJUTjtDoLI%Da^_b&G_Tdet4HE`&=ehQw=m;tpWVO*~waB{{Ofa2482;HQx9mrsb2ydXq6QG2!T_=1$Y)7=Q#3i2L zb9lQ`q?EQ}87|T6GrA$8C0a%le(WO>IeN1@{}SAWVw0fVl54bi3p(!kLx~5l8((mT z@M<85p_YbSmPAhdZbs6VnmgdJ)Pw1Md7+$CN(O%E^!WeCQ zSW4+*%QukpgGyN^mGMzix~U&9tjihsM2PN2A$-(%pHMCBiQ<%@b)|7D%xAUqX zUz@J_7c(u7EBX#*n`Pj-C~#TX|`Si%jw zX2<(^g2`e-hD$ICsm%T>N1gRCHqw4p6f&|-7`MW7*GRu{*FNvx zCaeB=t>s09x6JGiExr0#W~Ht%Yt(t!{in=YqtCE#HdD&gQ(?{25f{42q;TTJB^FaL yE3&`bW&d-R-4(YP=ZXmByrVpRxhhYCK(q6RR19%6fB+^Wo52&8sn@cR!Y-pA_z literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/cross_entropy_acc.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/cross_entropy_acc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3e7a2db8460ea40a7baa5e4059edefc5689561b GIT binary patch literal 4910 zcmbVQTW{RP73Pp!E|*$Kj^f%;k_V=B+H9(>ash(EaoxDK(-ut>z!4gB+k&7uv%8dd zSkJ(I+a9#_ubXyEgBhteSkWV`q?@h|R(0J@*3$LCdU|Pai5cP&Z#`x|Hn_vvPYm7` z>yNF+Z15a6KQ?;qmssEE+0}Y@U**ZP5J8+ya#>U^j}KxM=UKHjk#SZ8!)X>3)V?&$ z;wa}LNa8}sFi|@_R<))@oT#2zU3x!^r4kQrOX{WZ8&Zs7g+>sa$p+6wGFK`PSt0Yu zqaci;h&HnLSWQFy2o+%r7&ivyr^b+R#?2@0z~UCSpBMw1x447f7GLEZ?mb}xN3{70 z?>;ebO!rg6HU=FWb**~&HZ6L$Uh|W<7~hJb**2rI^=MhDpS|h|8in!LIAJ9_Wy~ne z(g;j$KquSjSyf9-Cqhz}rx>8stHn+)4MT=zcJ?pq#w$-)LIdRAbPca^K=t0>gZ6qn&y-w+rR%z^c zrOn;X>|+;gcSv5^+4=G*9=KU2$w$FN=J!>#az89!lp(yWIQrY&d-p!Ncile}ekKI> zi`?JO@rAvHmcZK7<%p`1!P$OpAB@NBML%Q9-_-+Ybv|&wYf(Y^D^& zRA&A`m`sJA5B(I;kn~5Q2(nzJVGNZT{wmzghJl;cO}D3d`Shuts9T9CZaau;$hX1f=_jn z_EswUAQp$!<*(XS)gFh6&Q-Nx&{3R?s`g&R7Xr)p_KR30oF)RwKBB9u@lUqHVE5*n40==+o3poUGld;NZ8H`!2C>aHb$VSCj_1f|^8vHs9_Odg$!lf#W zIDx+Ic9cs%PEoarya*F$9m+5js<$FHY4*2hHu_{Vt}Hlr<>0E*@L{!@0_hL|fIUpt zkSBebL0D4R$O~y@^RNheUhOwdy=;Ag@6brvUS*RWxlQdisOXb$t#R1K!Or{62D$t4 z5SpgGiNe$V^8)@suy>3T=o&-A1J)-B(L-rXgiBIIr3)jo9( z6}ssdp2gXw@kjOtcHAo231yL%hC-cJN`NK6&pvJa$~2w09L?Zs6d#DpuWjDC;ZoT$;0|cs4AB|nytp+MEE;f{_d84$B+A>zciC(Yy2iJ-tvnWK3j;1+VVN7 z8F3o5G@*tl^~{xgTIVa(&JS@poEp+>sQ(^F&|^#ljfc?VCM)g#>p? z8+)sk*Ei+Y$2lCw(U>yGR3Xp7rgZsSWO*8AA%by!%QECK)>_i|cJIvgP3pt^9ClHY zBz%Ny87D|lNIjGZ<_FvT=*~yWxAVGxdkS#KisTVCfZfc!Luao%L&sq*_iIo5;N#s7 z?`A|A<%g6jm7mL{eKn-9>8#H<4iD&Lde#;u_)EP_4%W`Gt9@fhAq+FmLd*W~$($pk z;XX36AC1FogoEcks2s6hKSTy@_SEdVPV#E5N`4oyy`%959B+lNp>ZVHLyvqH1)!1= zqr67N_o*(5I zw4SH8{YF7V3F7P_en+o{s=uOAfR%w<8>MjvW{_A*5)%@n!i6ywB;bJy5*rd{ zNz#I(y(DoUahD`*@Wsx)Ek6JdIxiUoJ25#D;!8%x_)2L5A~6CmN@IQrQ0-m@h&H`Q z$$a&5S93;t-;xso(?uPn%hzrgr3>&}KYHiZXoQcF5R|S#qFFQyWuE$!n+U7m1tOWn zLx0J3Cd@@tLgD>8@sr=dYQOFrBfWz{5KA_Hj_d>NG~i-vRx%;4NR6mDzuz zvao~mx81oh5}9~7kvC(~Zlf@N`=W95(`Vjrcy_3B`A_w4BzXjozS*DY{o13*n^>%8 z)d^Q`SN;$Zd5el=r<8Xf+ao!7<^R;E8OH@Q?q66V(HBj4cAmY*KdJ z!A7rBtv4~$Ffuh*X^3M0QaI>n@<`Rnr36PcO+4%6w0&sJ%7g!Y4XNAwTGt+8rXmpC zur}EI-^-?Hn_zY(yP`|yAMV-}W?M(^{C^SDGi##P^XM}_0}IAyz83`e-kBy;?*_qx zX_(YK`r~_^1mq9scwL3BsyG4_8|PepK%&*=j{uRtM^&&8UF+zTgz}QmnW9dup>U0^ z>Dum9@2dIA2Gf1L4cbQi?Fbcg$r8-akc1hM@JSLj$)8a17K-Y5P1yTc)<64$4ye7E zWtBH?%U5V*yCHcsZQp2G{rO)WB+*>yQbGG%H~?)~AE0t?)Hb?Fk*_n?3oX|~#xgg+ Ly)QAxy2Abg^zZUQ literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/cross_entropy_acc.cpython-39.pyc b/triton_model/1/aqc/examples/speech_recognition/criterions/__pycache__/cross_entropy_acc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6470a634b77a561243756799b71828064929bd8a GIT binary patch literal 5081 zcmb_g&2QYs73bHI+|{QQ*R~QLm^Nv%sk}-YASfKyaAG@Yi$(?%p+Of72%0myONk%r z8LqxS^&$f~rH7t+$Uy>n@ukQ9F+3G$FYdMIp+y1r_lDGdsEJ@Sro`EqH#6_OdGqG? z-Y{5LXlZz!e(`($pUax|S87Z@4K(iJm4_&p#`IWA^sb&5T_Z8OW@2?Mo#;l~=r+`^ z-L=)P({F98Y3}z%X_!-cd6Y(B z#<(9xITt}JH`;pH80Aqc+eW$kaS#c~AKw$yOXF7r??)01KRly#FNGqL(&uR|vf+Ur zgkeaEOh0DbkRPGKwXV*zuJNVT)0xhUCvMkdCbOPsU5hoCjo$`aU`^I~qIYfXusJsW zM8h)OFEvZ+HnG&j^7=gz`k<2ed6W-648yUS;h8)VOZBr?UO^+*j^|I;vVI05$ z{!{JWkJkrT!q?MiXAs3)4%UYv+vZ^|*P|#;ZUj=?$YHrGU1vca+}!2ijo@*(&i8|4 zi1F)k$ay&M1rM`6G=R<5$Cj3;p0jjkcp&DW#w&QGg+kNk^}{zWGEV1gZJ|x$>!*od zYTv^vcTp7P30(P9KQaq_TZf$*%=pSUvU8)bnR#r;wcI?hm_=618$^R^nnzCI$e$JV ziA^qgit(s}4@!r)5hZuGn}uDNg|^))EarY?9l2<`J#yma#-(_YBU#Qp z3UU~w2WQL=|NdZe^Wo+#Z;yK^=giA9Zzs+6yuAU>2V8h+tf$6$y-au@VqorNqkK5Z zKkyE+(RiW+eTugie9;$GJC5*(Je)91OLW+DmV=#aBY%Z|5^ z?X8ho@P$#rAvlsA{&O+nYaY0IQL41*jldHm4Bd<3*b_k{xyScIKBNuDFr2V}b-XR1 zV<(JYc*N^3Xbd*$VCQmb?Zfuwbn4h-6l(thde1tL`y zOi*3TTm-2cW|G6fv7}P}xOCGv_G9>4IX4U#!+1Yp`|YM!fI4ME8KP{#vVLXR(#`_` zhTnJp%r7!9&-O`QcmNSpv zd2$zwSyZ30^9nIdDO^_6BCYaG2>J}X%vl9 zIDYlJ0}44m`ay-8YumQiEOchBYH)2FgVaW=v8o-pxqH$qtnC(NE6Vh!qUyD&SB-O2 zjri)!UInJFYLMML)tLLWbJQ%{qFEmdL5Exrf{7neL2gkxX`b!yRF+NJi716J%Q-1R zKjvvaAIP>N-lD;8)8JNmhGy7QsSw9k+wFvz0OaIlBhT_62G^bl5-!_wVwGn9fM%ml zgoDzAbC))@Itlj6g#DOlolafyH)v(U5Tw$@FN;Y>XjDp z5$n``hl&mn7i)*D9qhdCtdP6U4xwrC`zSPc5Bblv=a$(rUVUyfTDlErG!*^vzwFlG z`h|Y3`svMv~Fk}eXm`i=u z( z)Zv|(nKWDBH+k`zmq+l~oDb!i$52fv<&iTJDu|L7O7cj~H?o~9VpD~MsdeNdgzsA( zWg^%k!GY&7hYI=B7}ECLw%S61yBWo9R`hD8ylOdngD4zO1Q|)hIoOmopYb$HqBOu^ zob0j)SOi%!9N%l7*}e{an4iHeau^2(h?YoC5SjwyIca~gu&sypAI|R1+upqqz#+}! z11JD(Cf=c)lM+;dG!cTpa`F?~+Y=e?bpK}ezD?4it#UIkKrLnTK(hOApFwEM`akM$!R z6my1RfPx@6HO2%58Q>Jf0>z%8G(d4?C^jhW48=jF*xa$i9{_>&Yg(?K7!0BCH7zyR zTwwtmF#-TeWB!bgIDZ-NS@$9svxTo+l`UF3ruYlN=roVQWsA49!Ub$D9lrlzzmMBW zfMl-5pz%5Ai7fFbDiKOy|9Lz<37r|+888Jm0a-gQK^7%IDrMTZcwO;J zkagn)WhE~`7NtBdvaa+c$a?n$Wo2iws{0F_qCQ^r)RkY~ZM&1qR49un_A2u;kO|H@lG<&mWR|L>O97W^DiN;4Da(<}ZVPAY zTawJxZ#{NB%#;M}8tx`jf5vsoK%Pl|SE&4_z3A#!%)|Hpzti0|s>HM1qOTGuvT}Tt zX!$$!T=M*>^Sy*# zL9s_r%%F2G=`)9G%vX+{A%0hSO UCL)(nTr*tV#Y-`D*|hcl0z<1C>Hq)$ literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/criterions/cross_entropy_acc.py b/triton_model/1/aqc/examples/speech_recognition/criterions/cross_entropy_acc.py new file mode 100644 index 0000000..7c4d8ba --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/criterions/cross_entropy_acc.py @@ -0,0 +1,130 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from __future__ import absolute_import, division, print_function, unicode_literals + +import logging +import math + +import torch +import torch.nn.functional as F +from fairseq import utils +from fairseq.criterions import FairseqCriterion, register_criterion + + +@register_criterion("cross_entropy_acc") +class CrossEntropyWithAccCriterion(FairseqCriterion): + def __init__(self, task, sentence_avg): + super().__init__(task) + self.sentence_avg = sentence_avg + + def compute_loss(self, model, net_output, target, reduction, log_probs): + # N, T -> N * T + target = target.view(-1) + lprobs = model.get_normalized_probs(net_output, log_probs=log_probs) + if not hasattr(lprobs, "batch_first"): + logging.warning( + "ERROR: we need to know whether " + "batch first for the net output; " + "you need to set batch_first attribute for the return value of " + "model.get_normalized_probs. Now, we assume this is true, but " + "in the future, we will raise exception instead. " + ) + batch_first = getattr(lprobs, "batch_first", True) + if not batch_first: + lprobs = lprobs.transpose(0, 1) + + # N, T, D -> N * T, D + lprobs = lprobs.view(-1, lprobs.size(-1)) + loss = F.nll_loss( + lprobs, target, ignore_index=self.padding_idx, reduction=reduction + ) + return lprobs, loss + + def get_logging_output(self, sample, target, lprobs, loss): + target = target.view(-1) + mask = target != self.padding_idx + correct = torch.sum( + lprobs.argmax(1).masked_select(mask) == target.masked_select(mask) + ) + total = torch.sum(mask) + sample_size = ( + sample["target"].size(0) if self.sentence_avg else sample["ntokens"] + ) + + logging_output = { + "loss": utils.item(loss.data), # * sample['ntokens'], + "ntokens": sample["ntokens"], + "nsentences": sample["target"].size(0), + "sample_size": sample_size, + "correct": utils.item(correct.data), + "total": utils.item(total.data), + "nframes": torch.sum(sample["net_input"]["src_lengths"]).item(), + } + + return sample_size, logging_output + + def forward(self, model, sample, reduction="sum", log_probs=True): + """Computes the cross entropy with accuracy metric for the given sample. + + This is similar to CrossEntropyCriterion in fairseq, but also + computes accuracy metrics as part of logging + + Args: + logprobs (Torch.tensor) of shape N, T, D i.e. + batchsize, timesteps, dimensions + targets (Torch.tensor) of shape N, T i.e batchsize, timesteps + + Returns: + tuple: With three elements: + 1) the loss + 2) the sample size, which is used as the denominator for the gradient + 3) logging outputs to display while training + + TODO: + * Currently this Criterion will only work with LSTMEncoderModels or + FairseqModels which have decoder, or Models which return TorchTensor + as net_output. + We need to make a change to support all FairseqEncoder models. + """ + net_output = model(**sample["net_input"]) + target = model.get_targets(sample, net_output) + lprobs, loss = self.compute_loss( + model, net_output, target, reduction, log_probs + ) + sample_size, logging_output = self.get_logging_output( + sample, target, lprobs, loss + ) + return loss, sample_size, logging_output + + @staticmethod + def aggregate_logging_outputs(logging_outputs): + """Aggregate logging outputs from data parallel training.""" + correct_sum = sum(log.get("correct", 0) for log in logging_outputs) + total_sum = sum(log.get("total", 0) for log in logging_outputs) + loss_sum = sum(log.get("loss", 0) for log in logging_outputs) + ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) + nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) + sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) + nframes = sum(log.get("nframes", 0) for log in logging_outputs) + agg_output = { + "loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0, + # if args.sentence_avg, then sample_size is nsentences, then loss + # is per-sentence loss; else sample_size is ntokens, the loss + # becomes per-output token loss + "ntokens": ntokens, + "nsentences": nsentences, + "nframes": nframes, + "sample_size": sample_size, + "acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0, + "correct": correct_sum, + "total": total_sum, + # total is the number of validate tokens + } + if sample_size != ntokens: + agg_output["nll_loss"] = loss_sum / ntokens / math.log(2) + # loss: per output token loss + # nll_loss: per sentence loss + return agg_output diff --git a/triton_model/1/aqc/examples/speech_recognition/data/__init__.py b/triton_model/1/aqc/examples/speech_recognition/data/__init__.py new file mode 100644 index 0000000..47bb6e2 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/data/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from .asr_dataset import AsrDataset + + +__all__ = [ + "AsrDataset", +] diff --git a/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/__init__.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff5adb96434b4986d4073c7db977c354032d73ae GIT binary patch literal 230 zcmYjLF%H5o3{27jRiP_?=t9Jf5C}FT7Pc-{)I_CH+LRW%%`i9Q&L6_%SKNg}Bx70oCmk`**#GB}Vld{H!0+Qw^Vx55jj zyzPSfG|0xnS&+S9I|2||1Kf!Xap_N=4fnS6rUt7T!!g6>T%2;QE~09*a@;jaV4e5VVg|h#<82!xa}x)F?`%)M@BWLMvmhID>sYY$r;-!7upIwN(57(ZC*} zV5HsE?9R@}$z;R``^64VG!H#_L`LL;D!x+FOmoW|uQ+EqTk$H>gE=emBd24*yU}M1 zUoy#vDfExu#h@C?7(hQPyCHzI7GTdpPNG{oFGcMgipFeeV=>mE4StO(MPW>GC7F~d znB*JLdrf~)m2#^D{z=!8Uc?qr)eunmRb!I53k8573h>tcch58D^^e8(CyI@I_Dh~! Eu6wLT_5c6? literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/asr_dataset.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/asr_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fab920c4d7fc26e09618310ae8e16d3f6266cca1 GIT binary patch literal 4162 zcmaJ^&2JmW72nxiE|(M~`9pU6(T^?QCMFtDj@uNCYPfRZxUHKAMQ!xJG+nPaLu#$% zE;TchWrix+%7BFeIrP>;5AxA}N&kZ$_S!??bAcY(AnEVT@>6n3V&BfZnR)NcoA-Y6 zHd|b*TKN6qPk$3?*|PpYo!Os@&ITU!GZ1cZmRQZ=&6+m)cH(s1rpqkua5pJ+y{2c{ zrKH^Tn?AFCYH^R3pIN*t+(V~X;p~A`_g_JpwOMER(!D5_O6=Z^bfkoS#lQ!<%Fz$| zI!+XN%MZlvO*|iDNfK!xTeN!i=b*EJNBt5+SWU()SjU=9#jZFNm#^>=_nxt)%gfw< zW;IK^!mD_De1X^SF7rjcgtyO^`5C+`{476*ca@)qwHNa09VJH=M%SRM8sK*)AR}RP zAVp6KB~l%yoj~=3Xl(~k%7a!W!S7`$rvWXaRJCN>({YwIs)Y>MQR}8f5~Ds3dy(E& z>%sMpVx>P(T7Ftzho%I*AZrI;1W#hU9hhl>&S*`X1?@Nysxc>s_hm$yRM=G`adE2W z#eo4MQys;rv2a@ilVwmhkvB5#v?ATnBiTBWt?kA~S=xChQk9*sd&*ZQ6~^#ROQ<29_tu3Rj2osTZ5FXdtNfwdJd&CV!+&B%o zae`P9ICOHrM9|H58>d96?YIs9&PytoUoYsL8)HGQC>Q^U2fz0=MvZG5ErT}&+oEJvB&Ol?FIOFFTMaKa{qH$x5O>rOl4CN{I!UTe>NKAo$upO zmq7l2)t{CRV8M~KWFriLDLM=rw-TgZqHZ@Pi%xCOgMjlSDFErv25Sd6a&FNkFIh+Is`Unr z{Ll8T4L31K4#5r`N*i0&(90QrY0D*QEfo7Ufoirbaj#i<^%h}hE9>@d-#}lHPS09l z-dBUS{=eCibw=;Qx|3IW5y$qp*v~5>Q={gEZnh^v>gz1r5}i0DP^~ZLPLw1Qv*LkJ zxr2nvD-*cS{V|;74qDh4SGhk1l)N?z7G{%j1=hY<2=ZGb;G0CgO(Y=l9U|nzLOg|xq+=-@=#Nj7rl}u*)M_<$h1HnN z{IAZqE3Ei?Z2oWCHqF4_X0`tg&cDXlEsFB-k1&A_=L0M0d4ocd4Tt4=Pr58+nPgl5oT&4CQLk8Q&3)KFN zec&CGhtA^~WoO8FV{+@)NqpU2Px!XYW!ykb7buF|P8n_fQUOX8Il(@}ZQO%)NFdyOGX)+zUK>67A)s_Ewbc zoG?&QE;kpWUN3nXcK5;*t8$ME9ksUWRry`gS2B?=>uAfjiD9BQ)}kvLw7R57ZN8uk zsD!9OMDAsM-RtY-Lh%k;-MzF(P7`G&Ll?pjH$KS;!}}1VC`NsDj$N{=c8x9J?;?g= z^Y^}}dgtuH#kpvjET2Odf>R1G6HpXgngRzw@W7JP1%zD1%Vf$+X7+f}v~oxnYZ(3( z^ZrG$s%&s>PPG}Hi{mkJ8?}T{P--0mw_i{N%m#n|SfIiQqG@ecfG`zxbXXYa0zy}} zB8|eTC>N;?yE9iZ5~pzVxG)(Nc?B~VRaYc*1PxP3zY@AH)0z5{gi%qoKBqfS{a8m^ zi6FD+c@jxe(3(Ot3O>%bNG4?nym<|q&C4To=$oi1kE}fDT7QgK5mqU3TCFk%665hx z&Ye3`axSI(_)M9+0#UdVNV$BMh>5gmxK#;Nb+&_QHKE~KAcG4thdLcUrJAzDoa=Qo z#;Pf(I0G7;MV)~*nRAHR~th}NCKXNAl0aS)8|+_zMqa3uthg3nE<6~M{5kL$P* zJlu{|uooqL5unjmf(K8wMLO{{s(zHJRJli!ZxM4Sj~=bAcBB2(UZ%_~t{y#V6kr8N zBF7Z)VZwfcx+i?&l1&)~6eWo7US#fwbdyc&kx zjQ102*TQhOA0?v^UXL~CEbODkEZ(bEkNL9 zx=^eC_UrzN@AwP8Q}fL%0Q4#*<|RaK@2R8!MHM*nbtGw>b6cR?l>kP>K WTgA?96ObxGGQZ>lkZX8Woc{sm`!$#V literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/asr_dataset.cpython-39.pyc b/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/asr_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..431586e660be858a6985cf35dfb6fcf3f12ffa0b GIT binary patch literal 4169 zcmaJ^&5s*N6|d@U+ij0$CZ9_-;iEyzW{otS5IzJ$6q4O+fM|DwNVGy;LZ`=7wtJ@C z?WrnHCSDIhOr*?Z4!0mAoVI^f1}3y?cf=MddjY73m$it*YP;S;Kz~X>7B>^OgHithC&{7nw*)^O}PXc1@ri4o#eB zv{oO=-J2-yWl0hlsoFGq{&CT`gQ6dRNY>(lVRgLaic6v-%FlVr6BXeZPc!Pgh7;vBwJab8@&w7LDmVt2%g5K7uaEe$!JcT1)VsNy15`o3{^y$blBGuadB!E#esoErY4G0 zyTYCfX49ZV>T*|(k?AIz@W8` zlDk8hwx0#Xa3MR0aD{eHSGa%r6%7?Hf};uYZmx{5uMsAKsZ%-n!GaNK>#G)hj3zx%xAZ zBVfypr|S>c@Awh7^($02>;>y^;RyG+KX&f1Lw?BhP3n2UUhs}1JW-nVj9JKyGsXxz z|Ce^YZG-C^@*PJf3q05Ie!IXcS>7KPcxB7`3-QJrTHLV*>Wy8{(zpa#9+yEY;|i#M z#6YV@G_q!v1Q+Pr_Kxc>!N+&`CD2g#pVGV)!GJT>O+~=hCNg~-RLDBtLD6r3jM?Lr z1L!|uJKjf(vt8itL-vYqzG4k0cbYeHz7Mdh&%1+K!o+{c^uJ2xBedwP4^*}- z+eWX)vFWcxTCKtFOqQ-wB;DMT?X_sPy)O5o{s80G3t$TsoTr<{6lHecIwpn_By0|z z<<(nVnacfvy1fpq|3G1*w@>i18HNBG6Nb%O3F0!*x0|yGXLer3R$6(+GBY%QX2)v>&Gi@>#= z^#`|apsmT`Wj?z8|CchJkxaJb!wcG**8X zgw^Z3&aa|4|Mk6d{L>4@Lh81mVOIc7W?0 zU*2~dghJmvaJdaG!G>HlDa^+FO_m}=4iKu_C2Ad%OnJLzh^`XNVH zJI6~@|CMu4KB$b{?K%bY_A<%}%BpZb^$vVJG&4ab8>V6-_~fABp0X*0Z7gz+VyJO3ui2Z_UhDa_lX{apG{Lpdmd%|g z5piBS;p8RDDh`kiH+OLM9wiBRS?9t%~mFvU%?C}DHjQyK*NcoUq~}l>0DAu!YI?) zSkN8lVQiwUMAEXTJdKpia&2}R1s`TYCbQfG*8Cck)BZEYT{u%RE`|K`Ou4!SdAJWqx%xg4>uD$MRwGn3*$z_JWTu*rF3%n7$%r$` zDNgLklKiQ1ax1}sM*-3g)M-J79GK*E2rt~xCxyxJ#}G{J+Uq*dxDtX$!{#>s3SeY2 z#5LRu9`#}!>_y2?2B-|R6v5M;OlP*nuK@B_O6Q~54~jjMCr{Sa`qBQ{Ak+5t)=!=^ z3$Owtkzoq>ux@{cnx|~z(V8-J=rRcNe17#mqt!XfQ2&`Ao7sf!5CdN)D;!S!{o|9p zXG%zg%}jrFCQjN6-X&j`8vi{>5^PEOe?ZPYY#R*T!vB|;Z9Pi^L)NjPnz)mlbdZ-L zB=cQag!7NEh|~L8ooJ>ToBl+eVoc$4?&#{=_D&7kgw-f_RX&j_NMk9-eV9@j9_F<$ z>}O(_P`w_8yTd4%^Z++R)(%5;ohEJ&xkcp1MD7xKm&p4>PL5#aql&Cg*l5#1pL=iO z>f`L6wT6Gmcl{+4-;Mz^H!&9BMR@WqBd$lQj?ruFf8htX?(QTi80CDDiXhIjB1$raPMV6i zNb)Sm2SJ=hN(mJ_+)3h{AQ_EQF%ns!rxRyXh+r_uG=G#PMFQSIE`z;DCiz4KMWi06 zHDB}Jtofx1)J{H0dGH|1A3}0e1UxFDpvVL9XpFVB1QB6D714@%c9ij;*b$GXlgCl~ zAXwMxk`fpi1xW@O+ffnk2<3kXw;4UVT8Oq)o=yr8Lj7D8mB*94M8UwSJC>LdP7SCQ zCs`8bT!gSh$S74?Jy!XoTsSF`RK+Cn^kw1YV>I<05MlHg_Q~X|Z?(;~h1+gBe4e|! z^@R0p?(y~$qwjE^&*ACv4)&s3E!+{iAD~U$v;Plm@6V^cuD9g0qk|E4Dt)Zoq%aw+ zcC?E{M+nuFc48Oks`RRWE>Xx1Cgm-01s;cMI?u!z^=_=q~2+4E8+!S z$%Ba!TrZD3A7UT*bC}PBq`C9wr$VkE*#e+7@Z^(jG^A+7-EKx>rW|o(So^$irxE2;JPV~5=j)M@>+i2eyYaesq+?U9tFaLA z4u<1=m>~o5Y@H%zy-5$L*2a%3FN8Kp5r$uZOhs$Dp3CU=+5XD82w0mk$K;~v)k4QZ z(=OXVr_hM-1g^Vf9I``pV3t6FRpSWxf6NZ7=FKe7J7jRJb!;8k+`*V#IP~q7ac|*| z=7DqImM+{4q$pj?wvO3f%z?RK?7mgngcs2kw((}3?Fm@~QIIC8AaM8qF(9K6QeG!s z6u|!+!SEsC0TGo;9YGXDKaxZB(d>=BG!w6rtlmc#h=hEV?2F*`ACh}F1|d&IYr&^G z0zpe(5Q%5Rk;qhJ!(xXNOWws(uRuW^=-Cvzs)&jt4j>EaH3-+l(}}p>3ZzDMPTiSF zBwp)<)vnh@^lqRZj-p4knm{ztviU~KVu+@Z-ZQ0RN{s16+e~n;s9gj3vr!uNyaQIU zBS8N#`@~?zE`SA_Vd}ivdd3jH@K4XI%(eF_N2{VEwkDS#rD|^sMFuF4n;(*7TEhI( zim^{{a~g8i!X{Y5+M>0MG>?(!n`<)xdR0mBH$>Cd7IMgfZE_i}m6eHyl?AJzOOmEx z)fU-g1QabquO%0V#jU0HTa9n}j^3Hdl0sE1so36m0~XFH_721-%F`vU%id&*<_fdT z{ja{poHJ=enoug~G+Ke@3nb7X@>YnSc& zT{?_DV*r8nfnTNNGH|gi6r#i(6TyZXU5;j z+R$eCtTttUfD3pE;4O(|97)= zrQ<&-aO5_%PJNix^-o=-G$5#}kNRZR=R$DmT&T}Qf-0R&l}+cwR%IrW2_#$s32v+!e>ZncIwZ9qok!11d4+^Bbv~{94T9Y`AB{IZ zA!&p^%f|jY=kusRg;Q`aoA*l9Eb7bkn!0JrEe~pOm`Q9QAPspP^^t7 zg|@!x*10(4ul!8m4@hLOQ1W|_T`i5J*bDPXfhp5s`f42xROnScel{|c9+**?*|=(D zJd!ecT(xMEWfE6Tk;{0e@&_^>g*Y|Gk99>_*=e2)L6NMLe;fU~x~!_)Xgo&v%CC`l z8--R;b#y%(qNK$SRAr%nuPikgRZD{?kxJ}_Cx0FP+fq37LRiz@yMwQ(3$ei+t2 z+30t|aCZ`=%}6T@c^-$M#Q(pccT@h7dOAZ+h(^p71_;g-LBD6fm^PV1xBtTHb}mWc z=ym09@SrsZ6Z*x2a6)%!5up^4l;V|BN7)bQ!zO*`G;y* zRux;e1?nW{5@3KNXCsh~{tr3l2jp1SIn6mY50K=0MRsfP7K9BJi&d}Qd-ayD9y?lG zY+JZqKKiTp`-)|KMK5PB7cZaTR`=0xt8a0}?Uaq}zCCvO&e-j{W3TTq8h28^-!RW+ zzlo=tw#M!L0<(T;agY1YE$)lXk<(w~&I7C0IQXg^;CeVpR4~r@G!;RdXGN4`3Y|0+ zagpR%kPm`5jg%59crr@jQIL!$sThl_Fw2QDDnu}tW`;gWlOh4_AeX^@B$Irqf+AA8 zYNKuF?^g6u1!|N}Qy%PQ`4e!CihxH&6cl+No=za!a1ap$s)!_-)ltTSVkDl;7Ehvh zH`p}lk`kC22T2AVJ5dpjglc~cvspb?FGV{lPp5?lp?)q4-Q>xBqOd{Tok%PRXB*JV z(=3T|E<)@?$S76YJ*L~FTsSR~RK>*d?B(I*Gu-M18e#Pr{A6?1claXrdE+_jyS&L; z&#k`4+k62}pLgJau3mZ|_HN^z8Rg&~W|zO7S-5h{j;vbyHW-Mgm`V{`S27NZd{<zY}*+qRCy^C9Ygr;Pr6G=ajdny@zqBS1pLIp9>h6_szAr+0FY<6l43mr$?fKeOA{|Id zJ-I+lhnhudx@cZnav6_T@#ZKWi_I+A9VMwyqs@uTA0sr?W|9=+n~{q|jR{ajv zE(FNNj5sC>&aMVJP29$!BXkM_6VGAUd)5&At*xQR zfD`i0CwToAZtH+>=qwg(gmsvP)m}EX(mVzN+}W5fHWDR8-jG<(wTM6>;i$}e3|>|?M|l)zSV zg>~7t?Pa!RAAI*c+c}p&qzNUFNuUp4^8%@J#5miv<#GZ58d{V>NQz@5kHhVC_~9{I zx41L47Ovf!dht`GNLpmPYnQA(JBJ?b1>7CL32bq^%sT)G8#ZBo zX7ZoKC7gfVnLL2dVzKi0GG98C2OF;9zQULByY!`poq2N!p3!dQlo~dfX|iBAT|S*V z<%Hfdzv=JcIx1=_5h*QRWgi)FKH@hdOjg>s?x`PCQxVYkFiunSvWEjwkGUxTOD zwyd&?JX=5b?L1Yf9!LtD#&z*iowHRzR8=iC0Ge{7I<(E15S%#^sw0zN%H)%F=`h*W zcEWp({2roG`zoKxSZKdGp5F)%4?w6W>4pRm9fAmMty_Pw_Xww%VPrDtr7f=zQ>HE^ zR(p#eH_peCJD-C>65Jeod@+w|R5$|%^Hu+sV&5lGbll1x&U@ySkV^K@d_f&j<~9CQ zd6|uWfR5St%53AnM|Q&M?c35wAz+o4sd<~I-_azAdT44sp<Rm1rCyC zDh5S}BivL+uSxvhy2mcNs`!mN6)KPjBHC7nEP_2!eA z7#Og9T5IbC>f^9OL85womEaA0vQQzeTbU?O4o;?p@saLU89$@f?M&grOJuQ7^2ZRM zS0+;IhxxR?l36u>tqLTPq`R}KqT2C=%DcBzRRxV2J^S5B1P(l`-{p0(s!3*wrT*f%=?dgNq={}- zR-77+6b*C9g_O66$4{uag+_Y@oG*f5qt>HRzinL-ieox*dkqw>|ouFx;C)X+6^jL!QTBDDk(0WxOeW zMLmPPr;R~js*UK-P(<5gZVP2+^?TLsHZMt1F-rOW@vjJV;0Cms;Nef}b8A*(|C^I8F zHt0(epg_^~H%Q0+lKuxC+mmlS_0UWD-jI?NTfLQnxIE6hdCT{GoOEri;lT0bPrpmK z<~V=i!{Xt>!>91q>u?f|Ad&Ob5mdOZou@>2q5@Yc&WM_*za~#zu_78^@dVj%{M93v z<7@{6FXiFEwF3Y9@Ye!P#>t!snL4kCAVXKJ8Y<{3@&f^@E8HnL#5>R4QQ;lB8O_|G zGpS4+Yhm%L+eZ}EI%s_>nS42tfnjDKS(uE*hV>*j zV@QPc;}yoZKFbyCLJY5 zX`5@+-fZ(1L0i7$@hFnItw&M@19%;z{UkJDnzRKs+@3mP6GnP{w4YxHcocLnl1`ZH zY9oZ)T>=04JeqbIe{Bnpugt^CT_v?GLUB3^lkzn_ z8b$k^c(;?JDt;GC8j*U|^~fsxJfbeZFzI(_&DmJREdm$Q$Wp7v)(uf5aTTD0WFhY5OXzz*q9!qd-~)U&2*IR z*npc4%VIXsFHJ&8FYAvcM z8B%@y3rD!67JI@2D+Q|$){3=n#OByu^O%Z7!%jj*T!zFVvc2404ZYOqK`q?wSgo!U{9pmtUwFR;RPAV2CQmI(F zRJs#FS0>6r1CrWB9fV}&O|l8kS5C%pS1L@o=twzC|3?u}Wf$nYOfoTnnP|2!)n%oc zp$z?D*KIi#J+pYM4Fm1v5w>-ik9FKrG&lUBJ?N=W9W-wh_YCiCy_Y%o|AN?D2%?b3 zt&&Z}6Q+O^M5w{%dmI^-CbkE~4&dPGLQ$DW0hL&l7*$kV1erRxa-7?V_ddy85ypPC zknZY}Vn22TX62PIF(2J3G~CtW*!R@?U{?%ZHjZ7Yt7yfY)xxIY#)0>;5ZC*f&!U2+?1(K!$wWOuB_MwoJ z92@eb3D8U1Lw|vE%wO7Ld-AQvK2W4H+?A}@>a7HJmpe-iXNKSRE&0kyjX-<->=!9l z2>A_P=9>#IpTVWq;3kNm5qUy{BivWy2^F3w!P60^L`76z(I>805;d@Rf<7Yt@*&J2 zTLHyK(L8vT;JO2s7H~6UM8W@4y{7BezG! zr3q0FM^uzdd0ZJe6FPF=BUi|_^N7q_bBkZzI&@&I{l+(v$rpVY7-qVXg-L&4*tXRXYfE;0^)53q(Wz1xcW3j*^u-6SE zsk_a-N_QYq-3&t$uX3$cjS5YgGzB;O`cMX|{CUunFL>OC@lD;AGU&FH4AM>#ny?sX zN4EwhjPzQ6FFP0TC}?4ztuPsCBZM3-!Sebn+z35OtF%UIjz^d21**<~?ZoyWB<%Jf zBw%v@6cZ|(9#QX%$RlE4!(P&FodbH{j9@b^l96+sz{a@%33tm@RpD;O`EdmfZaeAc# zZ$$NXBX7JkfrKp%!3REt2Z@*X4*;hcgBGO8P%6U!L><%Btj^g6fbOd_=`_Dc0GSqj zKH?F<*bWn}_6lpwdiY#*^rr2hzrxQZKtomR|s&n|i7s=;pi({S-~gY9;uk~0@i zO@p;2=EyuZDQ_oq|!sAnzCAKnU3!BN*>Y=BIg zwi6mW0tyd^`pEKub|?}SSz*WyDDW5*cLI8Dbr_YF_8fiVOCsDtr#<0;)dA~6u$HWq z!dzev{3*&mITt||{0e^q%3O=* z5#@+lo7!|K2%>;H0F|>Lk3unxtbTLdXFz}?HLQMf!(U?$f`IE;BD*EXL?~V zZzIgBfZyh;f`J!dqYTWm5BVh|tS6BcvIr+f8WO&}YPvJdFVdK4S|`|V3p7=nx1l|!n+)-eu0Ut1tM;J8$Q6ctjX3yC|Xa7CglG@zwz6hKI3wn;YO z*~-ZvZcBwJ=Q~mi)BjNfl-D^LFVad(V5*i4Om$JIrYJ*yxa+nY^PX8e(1wBN@(8ze zk&iVzZ`9ZQe0$JSt~O}i%=--cw%&^z{C`1g%mtCl;%337{0&n;3L@0t-#v&7OB1^X z`R{-K@?24wNCA~tl^E4iod=oPzjTz_vF|?3ToJ~8IhXA6^7_Q`g*y;_9NBp$Zp*dMGo-Zgsg@t9|${%r!=R literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/replabels.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/replabels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42d64a5a6ae8be62c3a77ed3accf0bab2400e5e5 GIT binary patch literal 1545 zcmaKs&u<(x6vyrPo#`e`LjnSEumYl5DcWtga4A($i;#$fP*a46RE?amvpekU%&hHc zv(d~cJ@GeGs6BG!58%rGFjr3b7dQa#d9+~*1+VtmetzR;zt4B3+}w-_w4)z>l|P4s z{DzbD=D_4W{51n3$O#ctIM2xm6|V50cZ4qj=v@&WlVNmmFMcGeoS({^8N~~IR;syX zU|FptdzP6gJIlGA=GkOwZfo}0@fR#rnUN|hi&30XNV94<#{3W9uOl#qG~|*(9(wU( z%%EN8(#!d2nQKfUSXQj%IACe5lq^iXWZ&1?u>HgR1GfJ$n9*otcX>R`G^><+3Q-!5 zofTOz=?`1fbE!C7rku$_A23}qgW+>LKTD^)m`F@$rjni2rdHB!Ii#Jdje?G5KN01^ z;eUTPHgM6hNR+I~G1qE*IOdCVEYJD8%B3FbO3HK!%V{|&G6Mw}3vT!r;%!Css9JV6 zSF@zQ^ClJts}%Xlhc3ljtF{3HblM930)xJSg3cTrHxBev4H|L`R3{YX&O2n6w5|ch z{p`|tNS@qiy@tMYjN5pZ^c8KOze^VHO-Sntw+#es!-hcI2W`|uO)zuRr%l*6GlIQv zJ|S(m7{Q3nJoRPczw}^-@LlqXQpgL>q9?*x6Scv0OK&`UW~+kxVn8p0J@Dy|!0dV+-Ewy5F7+IbdNiWjG^UZW z1>=|oFvpR43qt=T3zUYH#S0kxmn^b3ivaZyr(jZ5aj%OQP^|sBWOa7e@`ibkUJqey&uhh8qgsH?@W4CYUwUKo~(=;uf{q z#;99pcEEHr`6t)y+Ft2zvWF0-_rV}p@0x5x9UwaZo&)4doj*KRy$xYs4ZSW*5>ci} j(w`Su+oiQ!F8ZvHqPz1_)VaKeYPX+MFO2*s8od7(yDook literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/replabels.cpython-39.pyc b/triton_model/1/aqc/examples/speech_recognition/data/__pycache__/replabels.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a9ed8a1ca83c15c51610f52aa9e60e9cb4cb004 GIT binary patch literal 1574 zcmaJ>&2Jnv6t`#RD@(EsZCXI$U_}VDDA}b|rCzEER6Zn-D%2DqBC5q1J3G^zueCjC zb~STK{{nK%&UUf*XG0L`M639*yNG&q~N2>r%=n zo+ud=(>yVNmrWe2mw@Y0xo9t~Mw1SbEyNB%B%boXM!1z#+W-fH7U18&=>QgV=IEW; zfu5?jYjOajPbrL@x5y4@dIluWjn14;$-`@nThnKb>DBHTeNJoWZYkWbQ zpeB&_c@x%Q?av&wtb^K_5$uI?i!{Ofd+_MYRX^3X7}q1X0OnYSLOdn#Km$%Q<%(6&A=OPPz|M(cNSY5O9HkSMMeZJ6;>Ag|Pi zS1&j^;b|o&rkEt+R3S^PucWS0)Amu1FaxSP3_*nU%ht02E2qx5_|2e%^J0Si@AOrO z>G#19cZa&pfL^9Mj!W?g={D^jahG^xeTVIlYcV1UD+f3&Fyu_vsZD)t$V?3 zNFCsI6BykDW;dz22ATJUZW~M{qKGDwZnwy_om#8Z!l F{R;#Pjqm^f literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/data/asr_dataset.py b/triton_model/1/aqc/examples/speech_recognition/data/asr_dataset.py new file mode 100644 index 0000000..63a6fca --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/data/asr_dataset.py @@ -0,0 +1,122 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import os + +import numpy as np +from fairseq.data import FairseqDataset + +from . import data_utils +from .collaters import Seq2SeqCollater + + +class AsrDataset(FairseqDataset): + """ + A dataset representing speech and corresponding transcription. + + Args: + aud_paths: (List[str]): A list of str with paths to audio files. + aud_durations_ms (List[int]): A list of int containing the durations of + audio files. + tgt (List[torch.LongTensor]): A list of LongTensors containing the indices + of target transcriptions. + tgt_dict (~fairseq.data.Dictionary): target vocabulary. + ids (List[str]): A list of utterance IDs. + speakers (List[str]): A list of speakers corresponding to utterances. + num_mel_bins (int): Number of triangular mel-frequency bins (default: 80) + frame_length (float): Frame length in milliseconds (default: 25.0) + frame_shift (float): Frame shift in milliseconds (default: 10.0) + """ + + def __init__( + self, + aud_paths, + aud_durations_ms, + tgt, + tgt_dict, + ids, + speakers, + num_mel_bins=80, + frame_length=25.0, + frame_shift=10.0, + ): + assert frame_length > 0 + assert frame_shift > 0 + assert all(x > frame_length for x in aud_durations_ms) + self.frame_sizes = [ + int(1 + (d - frame_length) / frame_shift) for d in aud_durations_ms + ] + + assert len(aud_paths) > 0 + assert len(aud_paths) == len(aud_durations_ms) + assert len(aud_paths) == len(tgt) + assert len(aud_paths) == len(ids) + assert len(aud_paths) == len(speakers) + self.aud_paths = aud_paths + self.tgt_dict = tgt_dict + self.tgt = tgt + self.ids = ids + self.speakers = speakers + self.num_mel_bins = num_mel_bins + self.frame_length = frame_length + self.frame_shift = frame_shift + + self.s2s_collater = Seq2SeqCollater( + 0, + 1, + pad_index=self.tgt_dict.pad(), + eos_index=self.tgt_dict.eos(), + move_eos_to_beginning=True, + ) + + def __getitem__(self, index): + import torchaudio + import torchaudio.compliance.kaldi as kaldi + + tgt_item = self.tgt[index] if self.tgt is not None else None + + path = self.aud_paths[index] + if not os.path.exists(path): + raise FileNotFoundError("Audio file not found: {}".format(path)) + sound, sample_rate = torchaudio.load_wav(path) + output = kaldi.fbank( + sound, + num_mel_bins=self.num_mel_bins, + frame_length=self.frame_length, + frame_shift=self.frame_shift, + ) + output_cmvn = data_utils.apply_mv_norm(output) + + return {"id": index, "data": [output_cmvn.detach(), tgt_item]} + + def __len__(self): + return len(self.aud_paths) + + def collater(self, samples): + """Merge a list of samples to form a mini-batch. + + Args: + samples (List[int]): sample indices to collate + + Returns: + dict: a mini-batch suitable for forwarding with a Model + """ + return self.s2s_collater.collate(samples) + + def num_tokens(self, index): + return self.frame_sizes[index] + + def size(self, index): + """Return an example's size as a float or tuple. This value is used when + filtering a dataset with ``--max-positions``.""" + return ( + self.frame_sizes[index], + len(self.tgt[index]) if self.tgt is not None else 0, + ) + + def ordered_indices(self): + """Return an ordered list of indices. Batches will be constructed based + on this order.""" + return np.arange(len(self)) diff --git a/triton_model/1/aqc/examples/speech_recognition/data/collaters.py b/triton_model/1/aqc/examples/speech_recognition/data/collaters.py new file mode 100644 index 0000000..6acfec8 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/data/collaters.py @@ -0,0 +1,131 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +""" + This module contains collection of classes which implement + collate functionalities for various tasks. + + Collaters should know what data to expect for each sample + and they should pack / collate them into batches +""" + + +from __future__ import absolute_import, division, print_function, unicode_literals + +import numpy as np +import torch +from fairseq.data import data_utils as fairseq_data_utils + + +class Seq2SeqCollater(object): + """ + Implements collate function mainly for seq2seq tasks + This expects each sample to contain feature (src_tokens) and + targets. + This collator is also used for aligned training task. + """ + + def __init__( + self, + feature_index=0, + label_index=1, + pad_index=1, + eos_index=2, + move_eos_to_beginning=True, + ): + self.feature_index = feature_index + self.label_index = label_index + self.pad_index = pad_index + self.eos_index = eos_index + self.move_eos_to_beginning = move_eos_to_beginning + + def _collate_frames(self, frames): + """Convert a list of 2d frames into a padded 3d tensor + Args: + frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is + length of i-th frame and f_dim is static dimension of features + Returns: + 3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] + """ + len_max = max(frame.size(0) for frame in frames) + f_dim = frames[0].size(1) + res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0) + + for i, v in enumerate(frames): + res[i, : v.size(0)] = v + + return res + + def collate(self, samples): + """ + utility function to collate samples into batch for speech recognition. + """ + if len(samples) == 0: + return {} + + # parse samples into torch tensors + parsed_samples = [] + for s in samples: + # skip invalid samples + if s["data"][self.feature_index] is None: + continue + source = s["data"][self.feature_index] + if isinstance(source, (np.ndarray, np.generic)): + source = torch.from_numpy(source) + target = s["data"][self.label_index] + if isinstance(target, (np.ndarray, np.generic)): + target = torch.from_numpy(target).long() + elif isinstance(target, list): + target = torch.LongTensor(target) + + parsed_sample = {"id": s["id"], "source": source, "target": target} + parsed_samples.append(parsed_sample) + samples = parsed_samples + + id = torch.LongTensor([s["id"] for s in samples]) + frames = self._collate_frames([s["source"] for s in samples]) + # sort samples by descending number of frames + frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples]) + frames_lengths, sort_order = frames_lengths.sort(descending=True) + id = id.index_select(0, sort_order) + frames = frames.index_select(0, sort_order) + + target = None + target_lengths = None + prev_output_tokens = None + if samples[0].get("target", None) is not None: + ntokens = sum(len(s["target"]) for s in samples) + target = fairseq_data_utils.collate_tokens( + [s["target"] for s in samples], + self.pad_index, + self.eos_index, + left_pad=False, + move_eos_to_beginning=False, + ) + target = target.index_select(0, sort_order) + target_lengths = torch.LongTensor( + [s["target"].size(0) for s in samples] + ).index_select(0, sort_order) + prev_output_tokens = fairseq_data_utils.collate_tokens( + [s["target"] for s in samples], + self.pad_index, + self.eos_index, + left_pad=False, + move_eos_to_beginning=self.move_eos_to_beginning, + ) + prev_output_tokens = prev_output_tokens.index_select(0, sort_order) + else: + ntokens = sum(len(s["source"]) for s in samples) + + batch = { + "id": id, + "ntokens": ntokens, + "net_input": {"src_tokens": frames, "src_lengths": frames_lengths}, + "target": target, + "target_lengths": target_lengths, + "nsentences": len(samples), + } + if prev_output_tokens is not None: + batch["net_input"]["prev_output_tokens"] = prev_output_tokens + return batch diff --git a/triton_model/1/aqc/examples/speech_recognition/data/data_utils.py b/triton_model/1/aqc/examples/speech_recognition/data/data_utils.py new file mode 100644 index 0000000..cc4729e --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/data/data_utils.py @@ -0,0 +1,100 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import torch + + +def calc_mean_invstddev(feature): + if len(feature.size()) != 2: + raise ValueError("We expect the input feature to be 2-D tensor") + mean = feature.mean(0) + var = feature.var(0) + # avoid division by ~zero + eps = 1e-8 + if (var < eps).any(): + return mean, 1.0 / (torch.sqrt(var) + eps) + return mean, 1.0 / torch.sqrt(var) + + +def apply_mv_norm(features): + # If there is less than 2 spectrograms, the variance cannot be computed (is NaN) + # and normalization is not possible, so return the item as it is + if features.size(0) < 2: + return features + mean, invstddev = calc_mean_invstddev(features) + res = (features - mean) * invstddev + return res + + +def lengths_to_encoder_padding_mask(lengths, batch_first=False): + """ + convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor + + Args: + lengths: a (B, )-shaped tensor + + Return: + max_length: maximum length of B sequences + encoder_padding_mask: a (max_length, B) binary mask, where + [t, b] = 0 for t < lengths[b] and 1 otherwise + + TODO: + kernelize this function if benchmarking shows this function is slow + """ + max_lengths = torch.max(lengths).item() + bsz = lengths.size(0) + encoder_padding_mask = torch.arange( + max_lengths + ).to( # a (T, ) tensor with [0, ..., T-1] + lengths.device + ).view( # move to the right device + 1, max_lengths + ).expand( # reshape to (1, T)-shaped tensor + bsz, -1 + ) >= lengths.view( # expand to (B, T)-shaped tensor + bsz, 1 + ).expand( + -1, max_lengths + ) + if not batch_first: + return encoder_padding_mask.t(), max_lengths + else: + return encoder_padding_mask, max_lengths + + +def encoder_padding_mask_to_lengths( + encoder_padding_mask, max_lengths, batch_size, device +): + """ + convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor + + Conventionally, encoder output contains a encoder_padding_mask, which is + a 2-D mask in a shape (T, B), whose (t, b) element indicate whether + encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we + need to convert this mask tensor to a 1-D tensor in shape (B, ), where + [b] denotes the valid length of b-th sequence + + Args: + encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None, + indicating all are valid + Return: + seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the + number of valid elements of b-th sequence + + max_lengths: maximum length of all sequence, if encoder_padding_mask is + not None, max_lengths must equal to encoder_padding_mask.size(0) + + batch_size: batch size; if encoder_padding_mask is + not None, max_lengths must equal to encoder_padding_mask.size(1) + + device: which device to put the result on + """ + if encoder_padding_mask is None: + return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device) + + assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match" + assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match" + + return max_lengths - torch.sum(encoder_padding_mask, dim=0) diff --git a/triton_model/1/aqc/examples/speech_recognition/data/replabels.py b/triton_model/1/aqc/examples/speech_recognition/data/replabels.py new file mode 100644 index 0000000..441f1bd --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/data/replabels.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Replabel transforms for use with flashlight's ASG criterion. +""" + + +def replabel_symbol(i): + """ + Replabel symbols used in flashlight, currently just "1", "2", ... + This prevents training with numeral tokens, so this might change in the future + """ + return str(i) + + +def pack_replabels(tokens, dictionary, max_reps): + """ + Pack a token sequence so that repeated symbols are replaced by replabels + """ + if len(tokens) == 0 or max_reps <= 0: + return tokens + + replabel_value_to_idx = [0] * (max_reps + 1) + for i in range(1, max_reps + 1): + replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i)) + + result = [] + prev_token = -1 + num_reps = 0 + for token in tokens: + if token == prev_token and num_reps < max_reps: + num_reps += 1 + else: + if num_reps > 0: + result.append(replabel_value_to_idx[num_reps]) + num_reps = 0 + result.append(token) + prev_token = token + if num_reps > 0: + result.append(replabel_value_to_idx[num_reps]) + return result + + +def unpack_replabels(tokens, dictionary, max_reps): + """ + Unpack a token sequence so that replabels are replaced by repeated symbols + """ + if len(tokens) == 0 or max_reps <= 0: + return tokens + + replabel_idx_to_value = {} + for i in range(1, max_reps + 1): + replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i + + result = [] + prev_token = -1 + for token in tokens: + try: + for _ in range(replabel_idx_to_value[token]): + result.append(prev_token) + prev_token = -1 + except KeyError: + result.append(token) + prev_token = token + return result diff --git a/triton_model/1/aqc/examples/speech_recognition/datasets/asr_prep_json.py b/triton_model/1/aqc/examples/speech_recognition/datasets/asr_prep_json.py new file mode 100644 index 0000000..b8db8ff --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/datasets/asr_prep_json.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from __future__ import absolute_import, division, print_function, unicode_literals + +import argparse +import concurrent.futures +import json +import multiprocessing +import os +from collections import namedtuple +from itertools import chain + +import sentencepiece as spm +from fairseq.data import Dictionary + + +MILLISECONDS_TO_SECONDS = 0.001 + + +def process_sample(aud_path, lable, utt_id, sp, tgt_dict): + import torchaudio + + input = {} + output = {} + si, ei = torchaudio.info(aud_path) + input["length_ms"] = int( + si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS + ) + input["path"] = aud_path + + token = " ".join(sp.EncodeAsPieces(lable)) + ids = tgt_dict.encode_line(token, append_eos=False) + output["text"] = lable + output["token"] = token + output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids])) + return {utt_id: {"input": input, "output": output}} + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--audio-dirs", + nargs="+", + default=["-"], + required=True, + help="input directories with audio files", + ) + parser.add_argument( + "--labels", + required=True, + help="aggregated input labels with format per line", + type=argparse.FileType("r", encoding="UTF-8"), + ) + parser.add_argument( + "--spm-model", + required=True, + help="sentencepiece model to use for encoding", + type=argparse.FileType("r", encoding="UTF-8"), + ) + parser.add_argument( + "--dictionary", + required=True, + help="file to load fairseq dictionary from", + type=argparse.FileType("r", encoding="UTF-8"), + ) + parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav") + parser.add_argument( + "--output", + required=True, + type=argparse.FileType("w"), + help="path to save json output", + ) + args = parser.parse_args() + + sp = spm.SentencePieceProcessor() + sp.Load(args.spm_model.name) + + tgt_dict = Dictionary.load(args.dictionary) + + labels = {} + for line in args.labels: + (utt_id, label) = line.split(" ", 1) + labels[utt_id] = label + if len(labels) == 0: + raise Exception("No labels found in ", args.labels_path) + + Sample = namedtuple("Sample", "aud_path utt_id") + samples = [] + for path, _, files in chain.from_iterable( + os.walk(path) for path in args.audio_dirs + ): + for f in files: + if f.endswith(args.audio_format): + if len(os.path.splitext(f)) != 2: + raise Exception("Expect file name. Got: ", f) + utt_id = os.path.splitext(f)[0] + if utt_id not in labels: + continue + samples.append(Sample(os.path.join(path, f), utt_id)) + + utts = {} + num_cpu = multiprocessing.cpu_count() + with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor: + future_to_sample = { + executor.submit( + process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict + ): s + for s in samples + } + for future in concurrent.futures.as_completed(future_to_sample): + try: + data = future.result() + except Exception as exc: + print("generated an exception: ", exc) + else: + utts.update(data) + json.dump({"utts": utts}, args.output, indent=4) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/speech_recognition/datasets/prepare-librispeech.sh b/triton_model/1/aqc/examples/speech_recognition/datasets/prepare-librispeech.sh new file mode 100755 index 0000000..9e9297f --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/datasets/prepare-librispeech.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +# Prepare librispeech dataset + +base_url=www.openslr.org/resources/12 +train_dir=train_960 + +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " + echo "e.g.: $0 /tmp/librispeech_raw/ ~/data/librispeech_final" + exit 1 +fi + +download_dir=${1%/} +out_dir=${2%/} + +fairseq_root=~/fairseq-py/ +mkdir -p ${out_dir} +cd ${out_dir} || exit + +nbpe=5000 +bpemode=unigram + +if [ ! -d "$fairseq_root" ]; then + echo "$0: Please set correct fairseq_root" + exit 1 +fi + +echo "Data Download" +for part in dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500; do + url=$base_url/$part.tar.gz + if ! wget -P $download_dir $url; then + echo "$0: wget failed for $url" + exit 1 + fi + if ! tar -C $download_dir -xvzf $download_dir/$part.tar.gz; then + echo "$0: error un-tarring archive $download_dir/$part.tar.gz" + exit 1 + fi +done + +echo "Merge all train packs into one" +mkdir -p ${download_dir}/LibriSpeech/${train_dir}/ +for part in train-clean-100 train-clean-360 train-other-500; do + mv ${download_dir}/LibriSpeech/${part}/* $download_dir/LibriSpeech/${train_dir}/ +done +echo "Merge train text" +find ${download_dir}/LibriSpeech/${train_dir}/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/${train_dir}/text + +# Use combined dev-clean and dev-other as validation set +find ${download_dir}/LibriSpeech/dev-clean/ ${download_dir}/LibriSpeech/dev-other/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/valid_text +find ${download_dir}/LibriSpeech/test-clean/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/test-clean/text +find ${download_dir}/LibriSpeech/test-other/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/test-other/text + + +dict=data/lang_char/${train_dir}_${bpemode}${nbpe}_units.txt +encoded=data/lang_char/${train_dir}_${bpemode}${nbpe}_encoded.txt +fairseq_dict=data/lang_char/${train_dir}_${bpemode}${nbpe}_fairseq_dict.txt +bpemodel=data/lang_char/${train_dir}_${bpemode}${nbpe} +echo "dictionary: ${dict}" +echo "Dictionary preparation" +mkdir -p data/lang_char/ +echo " 3" > ${dict} +echo " 2" >> ${dict} +echo " 1" >> ${dict} +cut -f 2- -d" " ${download_dir}/LibriSpeech/${train_dir}/text > data/lang_char/input.txt +spm_train --input=data/lang_char/input.txt --vocab_size=${nbpe} --model_type=${bpemode} --model_prefix=${bpemodel} --input_sentence_size=100000000 --unk_id=3 --eos_id=2 --pad_id=1 --bos_id=-1 --character_coverage=1 +spm_encode --model=${bpemodel}.model --output_format=piece < data/lang_char/input.txt > ${encoded} +cat ${encoded} | tr ' ' '\n' | sort | uniq | awk '{print $0 " " NR+3}' >> ${dict} +cat ${encoded} | tr ' ' '\n' | sort | uniq -c | awk '{print $2 " " $1}' > ${fairseq_dict} +wc -l ${dict} + +echo "Prepare train and test jsons" +for part in train_960 test-other test-clean; do + python ${fairseq_root}/examples/speech_recognition/datasets/asr_prep_json.py --audio-dirs ${download_dir}/LibriSpeech/${part} --labels ${download_dir}/LibriSpeech/${part}/text --spm-model ${bpemodel}.model --audio-format flac --dictionary ${fairseq_dict} --output ${part}.json +done +# fairseq expects to find train.json and valid.json during training +mv train_960.json train.json + +echo "Prepare valid json" +python ${fairseq_root}/examples/speech_recognition/datasets/asr_prep_json.py --audio-dirs ${download_dir}/LibriSpeech/dev-clean ${download_dir}/LibriSpeech/dev-other --labels ${download_dir}/LibriSpeech/valid_text --spm-model ${bpemodel}.model --audio-format flac --dictionary ${fairseq_dict} --output valid.json + +cp ${fairseq_dict} ./dict.txt +cp ${bpemodel}.model ./spm.model diff --git a/triton_model/1/aqc/examples/speech_recognition/infer.py b/triton_model/1/aqc/examples/speech_recognition/infer.py new file mode 100644 index 0000000..ce16bf4 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/infer.py @@ -0,0 +1,436 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Run inference for pre-processed data with a trained model. +""" + +import ast +import logging +import math +import os +import sys + +import editdistance +import numpy as np +import torch +from fairseq import checkpoint_utils, options, progress_bar, tasks, utils +from fairseq.data.data_utils import post_process +from fairseq.logging.meters import StopwatchMeter, TimeMeter + + +logging.basicConfig() +logging.root.setLevel(logging.INFO) +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def add_asr_eval_argument(parser): + parser.add_argument("--kspmodel", default=None, help="sentence piece model") + parser.add_argument( + "--wfstlm", default=None, help="wfstlm on dictonary output units" + ) + parser.add_argument( + "--rnnt_decoding_type", + default="greedy", + help="wfstlm on dictonary\ +output units", + ) + try: + parser.add_argument( + "--lm-weight", + "--lm_weight", + type=float, + default=0.2, + help="weight for lm while interpolating with neural score", + ) + except: + pass + parser.add_argument( + "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level" + ) + parser.add_argument( + "--w2l-decoder", + choices=["viterbi", "kenlm", "fairseqlm"], + help="use a w2l decoder", + ) + parser.add_argument("--lexicon", help="lexicon for w2l decoder") + parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm") + parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder") + parser.add_argument("--beam-threshold", type=float, default=25.0) + parser.add_argument("--beam-size-token", type=float, default=100) + parser.add_argument("--word-score", type=float, default=1.0) + parser.add_argument("--unk-weight", type=float, default=-math.inf) + parser.add_argument("--sil-weight", type=float, default=0.0) + parser.add_argument( + "--dump-emissions", + type=str, + default=None, + help="if present, dumps emissions into this file and exits", + ) + parser.add_argument( + "--dump-features", + type=str, + default=None, + help="if present, dumps features into this file and exits", + ) + parser.add_argument( + "--load-emissions", + type=str, + default=None, + help="if present, loads emissions from this file", + ) + return parser + + +def check_args(args): + # assert args.path is not None, "--path required for generation!" + # assert args.results_path is not None, "--results_path required for generation!" + assert ( + not args.sampling or args.nbest == args.beam + ), "--sampling requires --nbest to be equal to --beam" + assert ( + args.replace_unk is None or args.raw_text + ), "--replace-unk requires a raw text dataset (--raw-text)" + + +def get_dataset_itr(args, task, models): + return task.get_batch_iterator( + dataset=task.dataset(args.gen_subset), + max_tokens=args.max_tokens, + max_sentences=args.batch_size, + max_positions=(sys.maxsize, sys.maxsize), + ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, + required_batch_size_multiple=args.required_batch_size_multiple, + num_shards=args.num_shards, + shard_id=args.shard_id, + num_workers=args.num_workers, + data_buffer_size=args.data_buffer_size, + ).next_epoch_itr(shuffle=False) + + +def process_predictions( + args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id +): + for hypo in hypos[: min(len(hypos), args.nbest)]: + hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu()) + + if "words" in hypo: + hyp_words = " ".join(hypo["words"]) + else: + hyp_words = post_process(hyp_pieces, args.post_process) + + if res_files is not None: + print( + "{} ({}-{})".format(hyp_pieces, speaker, id), + file=res_files["hypo.units"], + ) + print( + "{} ({}-{})".format(hyp_words, speaker, id), + file=res_files["hypo.words"], + ) + + tgt_pieces = tgt_dict.string(target_tokens) + tgt_words = post_process(tgt_pieces, args.post_process) + + if res_files is not None: + print( + "{} ({}-{})".format(tgt_pieces, speaker, id), + file=res_files["ref.units"], + ) + print( + "{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"] + ) + + if not args.quiet: + logger.info("HYPO:" + hyp_words) + logger.info("TARGET:" + tgt_words) + logger.info("___________________") + + hyp_words = hyp_words.split() + tgt_words = tgt_words.split() + return editdistance.eval(hyp_words, tgt_words), len(tgt_words) + + +def prepare_result_files(args): + def get_res_file(file_prefix): + if args.num_shards > 1: + file_prefix = f"{args.shard_id}_{file_prefix}" + path = os.path.join( + args.results_path, + "{}-{}-{}.txt".format( + file_prefix, os.path.basename(args.path), args.gen_subset + ), + ) + return open(path, "w", buffering=1) + + if not args.results_path: + return None + + return { + "hypo.words": get_res_file("hypo.word"), + "hypo.units": get_res_file("hypo.units"), + "ref.words": get_res_file("ref.word"), + "ref.units": get_res_file("ref.units"), + } + + +def optimize_models(args, use_cuda, models): + """Optimize ensemble for generation""" + for model in models: + model.make_generation_fast_( + beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, + need_attn=args.print_alignment, + ) + if args.fp16: + model.half() + if use_cuda: + model.cuda() + + +def apply_half(t): + if t.dtype is torch.float32: + return t.to(dtype=torch.half) + return t + + +class ExistingEmissionsDecoder(object): + def __init__(self, decoder, emissions): + self.decoder = decoder + self.emissions = emissions + + def generate(self, models, sample, **unused): + ids = sample["id"].cpu().numpy() + try: + emissions = np.stack(self.emissions[ids]) + except: + print([x.shape for x in self.emissions[ids]]) + raise Exception("invalid sizes") + emissions = torch.from_numpy(emissions) + return self.decoder.decode(emissions) + + +def main(args, task=None, model_state=None): + check_args(args) + + use_fp16 = args.fp16 + if args.max_tokens is None and args.batch_size is None: + args.max_tokens = 4000000 + logger.info(args) + + use_cuda = torch.cuda.is_available() and not args.cpu + + logger.info("| decoding with criterion {}".format(args.criterion)) + + task = tasks.setup_task(args) + + # Load ensemble + if args.load_emissions: + models, criterions = [], [] + task.load_dataset(args.gen_subset) + else: + logger.info("| loading model(s) from {}".format(args.path)) + models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( + utils.split_paths(args.path, separator="\\"), + arg_overrides=ast.literal_eval(args.model_overrides), + task=task, + suffix=args.checkpoint_suffix, + strict=(args.checkpoint_shard_count == 1), + num_shards=args.checkpoint_shard_count, + state=model_state, + ) + optimize_models(args, use_cuda, models) + task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task) + + + # Set dictionary + tgt_dict = task.target_dictionary + + logger.info( + "| {} {} {} examples".format( + args.data, args.gen_subset, len(task.dataset(args.gen_subset)) + ) + ) + + # hack to pass transitions to W2lDecoder + if args.criterion == "asg_loss": + raise NotImplementedError("asg_loss is currently not supported") + # trans = criterions[0].asg.trans.data + # args.asg_transitions = torch.flatten(trans).tolist() + + # Load dataset (possibly sharded) + itr = get_dataset_itr(args, task, models) + + # Initialize generator + gen_timer = StopwatchMeter() + + def build_generator(args): + w2l_decoder = getattr(args, "w2l_decoder", None) + if w2l_decoder == "viterbi": + from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder + + return W2lViterbiDecoder(args, task.target_dictionary) + elif w2l_decoder == "kenlm": + from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder + + return W2lKenLMDecoder(args, task.target_dictionary) + elif w2l_decoder == "fairseqlm": + from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder + + return W2lFairseqLMDecoder(args, task.target_dictionary) + else: + print( + "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment" + ) + + # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task + generator = build_generator(args) + + if args.load_emissions: + generator = ExistingEmissionsDecoder( + generator, np.load(args.load_emissions, allow_pickle=True) + ) + logger.info("loaded emissions from " + args.load_emissions) + + num_sentences = 0 + + if args.results_path is not None and not os.path.exists(args.results_path): + os.makedirs(args.results_path) + + max_source_pos = ( + utils.resolve_max_positions( + task.max_positions(), *[model.max_positions() for model in models] + ), + ) + + if max_source_pos is not None: + max_source_pos = max_source_pos[0] + if max_source_pos is not None: + max_source_pos = max_source_pos[0] - 1 + + if args.dump_emissions: + emissions = {} + if args.dump_features: + features = {} + models[0].bert.proj = None + else: + res_files = prepare_result_files(args) + errs_t = 0 + lengths_t = 0 + with progress_bar.build_progress_bar(args, itr) as t: + wps_meter = TimeMeter() + for sample in t: + sample = utils.move_to_cuda(sample) if use_cuda else sample + if use_fp16: + sample = utils.apply_to_sample(apply_half, sample) + if "net_input" not in sample: + continue + + prefix_tokens = None + if args.prefix_size > 0: + prefix_tokens = sample["target"][:, : args.prefix_size] + + gen_timer.start() + if args.dump_emissions: + with torch.no_grad(): + encoder_out = models[0](**sample["net_input"]) + emm = models[0].get_normalized_probs(encoder_out, log_probs=True) + emm = emm.transpose(0, 1).cpu().numpy() + for i, id in enumerate(sample["id"]): + emissions[id.item()] = emm[i] + continue + elif args.dump_features: + with torch.no_grad(): + encoder_out = models[0](**sample["net_input"]) + feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy() + for i, id in enumerate(sample["id"]): + padding = ( + encoder_out["encoder_padding_mask"][i].cpu().numpy() + if encoder_out["encoder_padding_mask"] is not None + else None + ) + features[id.item()] = (feat[i], padding) + continue + hypos = task.inference_step(generator, models, sample, prefix_tokens) + num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) + gen_timer.stop(num_generated_tokens) + + for i, sample_id in enumerate(sample["id"].tolist()): + speaker = None + # id = task.dataset(args.gen_subset).ids[int(sample_id)] + id = sample_id + toks = ( + sample["target"][i, :] + if "target_label" not in sample + else sample["target_label"][i, :] + ) + target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu() + # Process top predictions + errs, length = process_predictions( + args, + hypos[i], + None, + tgt_dict, + target_tokens, + res_files, + speaker, + id, + ) + errs_t += errs + lengths_t += length + + wps_meter.update(num_generated_tokens) + t.log({"wps": round(wps_meter.avg)}) + num_sentences += ( + sample["nsentences"] if "nsentences" in sample else sample["id"].numel() + ) + + wer = None + if args.dump_emissions: + emm_arr = [] + for i in range(len(emissions)): + emm_arr.append(emissions[i]) + np.save(args.dump_emissions, emm_arr) + logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}") + elif args.dump_features: + feat_arr = [] + for i in range(len(features)): + feat_arr.append(features[i]) + np.save(args.dump_features, feat_arr) + logger.info(f"saved {len(features)} emissions to {args.dump_features}") + else: + if lengths_t > 0: + wer = errs_t * 100.0 / lengths_t + logger.info(f"WER: {wer}") + + logger.info( + "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}" + "sentences/s, {:.2f} tokens/s)".format( + num_sentences, + gen_timer.n, + gen_timer.sum, + num_sentences / gen_timer.sum, + 1.0 / gen_timer.avg, + ) + ) + logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam)) + return task, wer + + +def make_parser(): + parser = options.get_generation_parser() + parser = add_asr_eval_argument(parser) + return parser + + +def cli_main(): + parser = make_parser() + args = options.parse_args_and_arch(parser) + main(args) + + +if __name__ == "__main__": + cli_main() diff --git a/triton_model/1/aqc/examples/speech_recognition/kaldi/__init__.py b/triton_model/1/aqc/examples/speech_recognition/kaldi/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/triton_model/1/aqc/examples/speech_recognition/kaldi/add-self-loop-simple.cc b/triton_model/1/aqc/examples/speech_recognition/kaldi/add-self-loop-simple.cc new file mode 100644 index 0000000..e18fb62 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/kaldi/add-self-loop-simple.cc @@ -0,0 +1,94 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include "fstext/fstext-lib.h" // @manual +#include "util/common-utils.h" // @manual + +/* + * This program is to modify a FST without self-loop by: + * for each incoming arc with non-eps input symbol, add a self-loop arc + * with that non-eps symbol as input and eps as output. + * + * This is to make sure the resultant FST can do deduplication for repeated + * symbols, which is very common in acoustic model + * + */ +namespace { +int32 AddSelfLoopsSimple(fst::StdVectorFst* fst) { + typedef fst::MutableArcIterator IterType; + + int32 num_states_before = fst->NumStates(); + fst::MakePrecedingInputSymbolsSame(false, fst); + int32 num_states_after = fst->NumStates(); + KALDI_LOG << "There are " << num_states_before + << " states in the original FST; " + << " after MakePrecedingInputSymbolsSame, there are " + << num_states_after << " states " << std::endl; + + auto weight_one = fst::StdArc::Weight::One(); + + int32 num_arc_added = 0; + + fst::StdArc self_loop_arc; + self_loop_arc.weight = weight_one; + + int32 num_states = fst->NumStates(); + std::vector> incoming_non_eps_label_per_state(num_states); + + for (int32 state = 0; state < num_states; state++) { + for (IterType aiter(fst, state); !aiter.Done(); aiter.Next()) { + fst::StdArc arc(aiter.Value()); + if (arc.ilabel != 0) { + incoming_non_eps_label_per_state[arc.nextstate].insert(arc.ilabel); + } + } + } + + for (int32 state = 0; state < num_states; state++) { + if (!incoming_non_eps_label_per_state[state].empty()) { + auto& ilabel_set = incoming_non_eps_label_per_state[state]; + for (auto it = ilabel_set.begin(); it != ilabel_set.end(); it++) { + self_loop_arc.ilabel = *it; + self_loop_arc.olabel = 0; + self_loop_arc.nextstate = state; + fst->AddArc(state, self_loop_arc); + num_arc_added++; + } + } + } + return num_arc_added; +} + +void print_usage() { + std::cout << "add-self-loop-simple usage:\n" + "\tadd-self-loop-simple \n"; +} +} // namespace + +int main(int argc, char** argv) { + if (argc != 3) { + print_usage(); + exit(1); + } + + auto input = argv[1]; + auto output = argv[2]; + + auto fst = fst::ReadFstKaldi(input); + auto num_states = fst->NumStates(); + KALDI_LOG << "Loading FST from " << input << " with " << num_states + << " states." << std::endl; + + int32 num_arc_added = AddSelfLoopsSimple(fst); + KALDI_LOG << "Adding " << num_arc_added << " self-loop arcs " << std::endl; + + fst::WriteFstKaldi(*fst, std::string(output)); + KALDI_LOG << "Writing FST to " << output << std::endl; + + delete fst; +} diff --git a/triton_model/1/aqc/examples/speech_recognition/kaldi/config/kaldi_initializer.yaml b/triton_model/1/aqc/examples/speech_recognition/kaldi/config/kaldi_initializer.yaml new file mode 100644 index 0000000..be9ba98 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/kaldi/config/kaldi_initializer.yaml @@ -0,0 +1,8 @@ +# @package _group_ + +data_dir: ??? +fst_dir: ??? +in_labels: ??? +kaldi_root: ??? +lm_arpa: ??? +blank_symbol: diff --git a/triton_model/1/aqc/examples/speech_recognition/kaldi/kaldi_decoder.py b/triton_model/1/aqc/examples/speech_recognition/kaldi/kaldi_decoder.py new file mode 100644 index 0000000..5f62cc5 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/kaldi/kaldi_decoder.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from concurrent.futures import ThreadPoolExecutor +import logging +from omegaconf import MISSING +import os +import torch +from typing import Optional +import warnings + + +from dataclasses import dataclass +from fairseq.dataclass import FairseqDataclass +from .kaldi_initializer import KaldiInitializerConfig, initalize_kaldi + + +logger = logging.getLogger(__name__) + + +@dataclass +class KaldiDecoderConfig(FairseqDataclass): + hlg_graph_path: Optional[str] = None + output_dict: str = MISSING + + kaldi_initializer_config: Optional[KaldiInitializerConfig] = None + + acoustic_scale: float = 0.5 + max_active: int = 10000 + beam_delta: float = 0.5 + hash_ratio: float = 2.0 + + is_lattice: bool = False + lattice_beam: float = 10.0 + prune_interval: int = 25 + determinize_lattice: bool = True + prune_scale: float = 0.1 + max_mem: int = 0 + phone_determinize: bool = True + word_determinize: bool = True + minimize: bool = True + + num_threads: int = 1 + + +class KaldiDecoder(object): + def __init__( + self, + cfg: KaldiDecoderConfig, + beam: int, + nbest: int = 1, + ): + try: + from kaldi.asr import FasterRecognizer, LatticeFasterRecognizer + from kaldi.base import set_verbose_level + from kaldi.decoder import ( + FasterDecoder, + FasterDecoderOptions, + LatticeFasterDecoder, + LatticeFasterDecoderOptions, + ) + from kaldi.lat.functions import DeterminizeLatticePhonePrunedOptions + from kaldi.fstext import read_fst_kaldi, SymbolTable + except: + warnings.warn( + "pykaldi is required for this functionality. Please install from https://github.com/pykaldi/pykaldi" + ) + + # set_verbose_level(2) + + self.acoustic_scale = cfg.acoustic_scale + self.nbest = nbest + + if cfg.hlg_graph_path is None: + assert ( + cfg.kaldi_initializer_config is not None + ), "Must provide hlg graph path or kaldi initializer config" + cfg.hlg_graph_path = initalize_kaldi(cfg.kaldi_initializer_config) + + assert os.path.exists(cfg.hlg_graph_path), cfg.hlg_graph_path + + if cfg.is_lattice: + self.dec_cls = LatticeFasterDecoder + opt_cls = LatticeFasterDecoderOptions + self.rec_cls = LatticeFasterRecognizer + else: + assert self.nbest == 1, "nbest > 1 requires lattice decoder" + self.dec_cls = FasterDecoder + opt_cls = FasterDecoderOptions + self.rec_cls = FasterRecognizer + + self.decoder_options = opt_cls() + self.decoder_options.beam = beam + self.decoder_options.max_active = cfg.max_active + self.decoder_options.beam_delta = cfg.beam_delta + self.decoder_options.hash_ratio = cfg.hash_ratio + + if cfg.is_lattice: + self.decoder_options.lattice_beam = cfg.lattice_beam + self.decoder_options.prune_interval = cfg.prune_interval + self.decoder_options.determinize_lattice = cfg.determinize_lattice + self.decoder_options.prune_scale = cfg.prune_scale + det_opts = DeterminizeLatticePhonePrunedOptions() + det_opts.max_mem = cfg.max_mem + det_opts.phone_determinize = cfg.phone_determinize + det_opts.word_determinize = cfg.word_determinize + det_opts.minimize = cfg.minimize + self.decoder_options.det_opts = det_opts + + self.output_symbols = {} + with open(cfg.output_dict, "r") as f: + for line in f: + items = line.rstrip().split() + assert len(items) == 2 + self.output_symbols[int(items[1])] = items[0] + + logger.info(f"Loading FST from {cfg.hlg_graph_path}") + self.fst = read_fst_kaldi(cfg.hlg_graph_path) + self.symbol_table = SymbolTable.read_text(cfg.output_dict) + + self.executor = ThreadPoolExecutor(max_workers=cfg.num_threads) + + def generate(self, models, sample, **unused): + """Generate a batch of inferences.""" + # model.forward normally channels prev_output_tokens into the decoder + # separately, but SequenceGenerator directly calls model.encoder + encoder_input = { + k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" + } + emissions, padding = self.get_emissions(models, encoder_input) + return self.decode(emissions, padding) + + def get_emissions(self, models, encoder_input): + """Run encoder and normalize emissions""" + model = models[0] + + all_encoder_out = [m(**encoder_input) for m in models] + + if len(all_encoder_out) > 1: + + if "encoder_out" in all_encoder_out[0]: + encoder_out = { + "encoder_out": sum(e["encoder_out"] for e in all_encoder_out) + / len(all_encoder_out), + "encoder_padding_mask": all_encoder_out[0]["encoder_padding_mask"], + } + padding = encoder_out["encoder_padding_mask"] + else: + encoder_out = { + "logits": sum(e["logits"] for e in all_encoder_out) + / len(all_encoder_out), + "padding_mask": all_encoder_out[0]["padding_mask"], + } + padding = encoder_out["padding_mask"] + else: + encoder_out = all_encoder_out[0] + padding = ( + encoder_out["padding_mask"] + if "padding_mask" in encoder_out + else encoder_out["encoder_padding_mask"] + ) + + if hasattr(model, "get_logits"): + emissions = model.get_logits(encoder_out, normalize=True) + else: + emissions = model.get_normalized_probs(encoder_out, log_probs=True) + + return ( + emissions.cpu().float().transpose(0, 1), + padding.cpu() if padding is not None and padding.any() else None, + ) + + def decode_one(self, logits, padding): + from kaldi.matrix import Matrix + + decoder = self.dec_cls(self.fst, self.decoder_options) + asr = self.rec_cls( + decoder, self.symbol_table, acoustic_scale=self.acoustic_scale + ) + + if padding is not None: + logits = logits[~padding] + + mat = Matrix(logits.numpy()) + + out = asr.decode(mat) + + if self.nbest > 1: + from kaldi.fstext import shortestpath + from kaldi.fstext.utils import ( + convert_compact_lattice_to_lattice, + convert_lattice_to_std, + convert_nbest_to_list, + get_linear_symbol_sequence, + ) + + lat = out["lattice"] + + sp = shortestpath(lat, nshortest=self.nbest) + + sp = convert_compact_lattice_to_lattice(sp) + sp = convert_lattice_to_std(sp) + seq = convert_nbest_to_list(sp) + + results = [] + for s in seq: + _, o, w = get_linear_symbol_sequence(s) + words = list(self.output_symbols[z] for z in o) + results.append( + { + "tokens": words, + "words": words, + "score": w.value, + "emissions": logits, + } + ) + return results + else: + words = out["text"].split() + return [ + { + "tokens": words, + "words": words, + "score": out["likelihood"], + "emissions": logits, + } + ] + + def decode(self, emissions, padding): + if padding is None: + padding = [None] * len(emissions) + + ret = list( + map( + lambda e, p: self.executor.submit(self.decode_one, e, p), + emissions, + padding, + ) + ) + return ret diff --git a/triton_model/1/aqc/examples/speech_recognition/kaldi/kaldi_initializer.py b/triton_model/1/aqc/examples/speech_recognition/kaldi/kaldi_initializer.py new file mode 100644 index 0000000..6d2a2a4 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/kaldi/kaldi_initializer.py @@ -0,0 +1,698 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +import hydra +from hydra.core.config_store import ConfigStore +import logging +from omegaconf import MISSING, OmegaConf +import os +import os.path as osp +from pathlib import Path +import subprocess +from typing import Optional + +from fairseq.data.dictionary import Dictionary +from fairseq.dataclass import FairseqDataclass + +script_dir = Path(__file__).resolve().parent +config_path = script_dir / "config" + + +logger = logging.getLogger(__name__) + + +@dataclass +class KaldiInitializerConfig(FairseqDataclass): + data_dir: str = MISSING + fst_dir: Optional[str] = None + in_labels: str = MISSING + out_labels: Optional[str] = None + wav2letter_lexicon: Optional[str] = None + lm_arpa: str = MISSING + kaldi_root: str = MISSING + blank_symbol: str = "" + silence_symbol: Optional[str] = None + + +def create_units(fst_dir: Path, in_labels: str, vocab: Dictionary) -> Path: + in_units_file = fst_dir / f"kaldi_dict.{in_labels}.txt" + if not in_units_file.exists(): + + logger.info(f"Creating {in_units_file}") + + with open(in_units_file, "w") as f: + print(" 0", file=f) + i = 1 + for symb in vocab.symbols[vocab.nspecial :]: + if not symb.startswith("madeupword"): + print(f"{symb} {i}", file=f) + i += 1 + return in_units_file + + +def create_lexicon( + cfg: KaldiInitializerConfig, + fst_dir: Path, + unique_label: str, + in_units_file: Path, + out_words_file: Path, +) -> (Path, Path): + + disambig_in_units_file = fst_dir / f"kaldi_dict.{cfg.in_labels}_disambig.txt" + lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}.txt" + disambig_lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}_disambig.txt" + if ( + not lexicon_file.exists() + or not disambig_lexicon_file.exists() + or not disambig_in_units_file.exists() + ): + logger.info(f"Creating {lexicon_file} (in units file: {in_units_file})") + + assert cfg.wav2letter_lexicon is not None or cfg.in_labels == cfg.out_labels + + if cfg.wav2letter_lexicon is not None: + lm_words = set() + with open(out_words_file, "r") as lm_dict_f: + for line in lm_dict_f: + lm_words.add(line.split()[0]) + + num_skipped = 0 + total = 0 + with open(cfg.wav2letter_lexicon, "r") as w2l_lex_f, open( + lexicon_file, "w" + ) as out_f: + for line in w2l_lex_f: + items = line.rstrip().split("\t") + assert len(items) == 2, items + if items[0] in lm_words: + print(items[0], items[1], file=out_f) + else: + num_skipped += 1 + logger.debug( + f"Skipping word {items[0]} as it was not found in LM" + ) + total += 1 + if num_skipped > 0: + logger.warning( + f"Skipped {num_skipped} out of {total} words as they were not found in LM" + ) + else: + with open(in_units_file, "r") as in_f, open(lexicon_file, "w") as out_f: + for line in in_f: + symb = line.split()[0] + if symb != "" and symb != "" and symb != "": + print(symb, symb, file=out_f) + + lex_disambig_path = ( + Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_lex_disambig.pl" + ) + res = subprocess.run( + [lex_disambig_path, lexicon_file, disambig_lexicon_file], + check=True, + capture_output=True, + ) + ndisambig = int(res.stdout) + disamib_path = Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_disambig.pl" + res = subprocess.run( + [disamib_path, "--include-zero", in_units_file, str(ndisambig)], + check=True, + capture_output=True, + ) + with open(disambig_in_units_file, "wb") as f: + f.write(res.stdout) + + return disambig_lexicon_file, disambig_in_units_file + + +def create_G( + kaldi_root: Path, fst_dir: Path, lm_arpa: Path, arpa_base: str +) -> (Path, Path): + + out_words_file = fst_dir / f"kaldi_dict.{arpa_base}.txt" + grammar_graph = fst_dir / f"G_{arpa_base}.fst" + if not grammar_graph.exists() or not out_words_file.exists(): + logger.info(f"Creating {grammar_graph}") + arpa2fst = kaldi_root / "src/lmbin/arpa2fst" + subprocess.run( + [ + arpa2fst, + "--disambig-symbol=#0", + f"--write-symbol-table={out_words_file}", + lm_arpa, + grammar_graph, + ], + check=True, + ) + return grammar_graph, out_words_file + + +def create_L( + kaldi_root: Path, + fst_dir: Path, + unique_label: str, + lexicon_file: Path, + in_units_file: Path, + out_words_file: Path, +) -> Path: + lexicon_graph = fst_dir / f"L.{unique_label}.fst" + + if not lexicon_graph.exists(): + logger.info(f"Creating {lexicon_graph} (in units: {in_units_file})") + make_lex = kaldi_root / "egs/wsj/s5/utils/make_lexicon_fst.pl" + fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile" + fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops" + fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" + + def write_disambig_symbol(file): + with open(file, "r") as f: + for line in f: + items = line.rstrip().split() + if items[0] == "#0": + out_path = str(file) + "_disamig" + with open(out_path, "w") as out_f: + print(items[1], file=out_f) + return out_path + + return None + + in_disambig_sym = write_disambig_symbol(in_units_file) + assert in_disambig_sym is not None + out_disambig_sym = write_disambig_symbol(out_words_file) + assert out_disambig_sym is not None + + try: + with open(lexicon_graph, "wb") as out_f: + res = subprocess.run( + [make_lex, lexicon_file], capture_output=True, check=True + ) + assert len(res.stderr) == 0, res.stderr.decode("utf-8") + res = subprocess.run( + [ + fstcompile, + f"--isymbols={in_units_file}", + f"--osymbols={out_words_file}", + "--keep_isymbols=false", + "--keep_osymbols=false", + ], + input=res.stdout, + capture_output=True, + ) + assert len(res.stderr) == 0, res.stderr.decode("utf-8") + res = subprocess.run( + [fstaddselfloops, in_disambig_sym, out_disambig_sym], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstarcsort, "--sort_type=olabel"], + input=res.stdout, + capture_output=True, + check=True, + ) + out_f.write(res.stdout) + except subprocess.CalledProcessError as e: + logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") + os.remove(lexicon_graph) + raise + except AssertionError: + os.remove(lexicon_graph) + raise + + return lexicon_graph + + +def create_LG( + kaldi_root: Path, + fst_dir: Path, + unique_label: str, + lexicon_graph: Path, + grammar_graph: Path, +) -> Path: + lg_graph = fst_dir / f"LG.{unique_label}.fst" + + if not lg_graph.exists(): + logger.info(f"Creating {lg_graph}") + + fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" + fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" + fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" + fstpushspecial = kaldi_root / "src/fstbin/fstpushspecial" + fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" + + try: + with open(lg_graph, "wb") as out_f: + res = subprocess.run( + [fsttablecompose, lexicon_graph, grammar_graph], + capture_output=True, + check=True, + ) + res = subprocess.run( + [ + fstdeterminizestar, + "--use-log=true", + ], + input=res.stdout, + capture_output=True, + ) + res = subprocess.run( + [fstminimizeencoded], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstpushspecial], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstarcsort, "--sort_type=ilabel"], + input=res.stdout, + capture_output=True, + check=True, + ) + out_f.write(res.stdout) + except subprocess.CalledProcessError as e: + logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") + os.remove(lg_graph) + raise + + return lg_graph + + +def create_H( + kaldi_root: Path, + fst_dir: Path, + disambig_out_units_file: Path, + in_labels: str, + vocab: Dictionary, + blk_sym: str, + silence_symbol: Optional[str], +) -> (Path, Path, Path): + h_graph = ( + fst_dir / f"H.{in_labels}{'_' + silence_symbol if silence_symbol else ''}.fst" + ) + h_out_units_file = fst_dir / f"kaldi_dict.h_out.{in_labels}.txt" + disambig_in_units_file_int = Path(str(h_graph) + "isym_disambig.int") + disambig_out_units_file_int = Path(str(disambig_out_units_file) + ".int") + if ( + not h_graph.exists() + or not h_out_units_file.exists() + or not disambig_in_units_file_int.exists() + ): + logger.info(f"Creating {h_graph}") + eps_sym = "" + + num_disambig = 0 + osymbols = [] + + with open(disambig_out_units_file, "r") as f, open( + disambig_out_units_file_int, "w" + ) as out_f: + for line in f: + symb, id = line.rstrip().split() + if line.startswith("#"): + num_disambig += 1 + print(id, file=out_f) + else: + if len(osymbols) == 0: + assert symb == eps_sym, symb + osymbols.append((symb, id)) + + i_idx = 0 + isymbols = [(eps_sym, 0)] + + imap = {} + + for i, s in enumerate(vocab.symbols): + i_idx += 1 + isymbols.append((s, i_idx)) + imap[s] = i_idx + + fst_str = [] + + node_idx = 0 + root_node = node_idx + + special_symbols = [blk_sym] + if silence_symbol is not None: + special_symbols.append(silence_symbol) + + for ss in special_symbols: + fst_str.append("{} {} {} {}".format(root_node, root_node, ss, eps_sym)) + + for symbol, _ in osymbols: + if symbol == eps_sym or symbol.startswith("#"): + continue + + node_idx += 1 + # 1. from root to emitting state + fst_str.append("{} {} {} {}".format(root_node, node_idx, symbol, symbol)) + # 2. from emitting state back to root + fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym)) + # 3. from emitting state to optional blank state + pre_node = node_idx + node_idx += 1 + for ss in special_symbols: + fst_str.append("{} {} {} {}".format(pre_node, node_idx, ss, eps_sym)) + # 4. from blank state back to root + fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym)) + + fst_str.append("{}".format(root_node)) + + fst_str = "\n".join(fst_str) + h_str = str(h_graph) + isym_file = h_str + ".isym" + + with open(isym_file, "w") as f: + for sym, id in isymbols: + f.write("{} {}\n".format(sym, id)) + + with open(h_out_units_file, "w") as f: + for sym, id in osymbols: + f.write("{} {}\n".format(sym, id)) + + with open(disambig_in_units_file_int, "w") as f: + disam_sym_id = len(isymbols) + for _ in range(num_disambig): + f.write("{}\n".format(disam_sym_id)) + disam_sym_id += 1 + + fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile" + fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops" + fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" + + try: + with open(h_graph, "wb") as out_f: + res = subprocess.run( + [ + fstcompile, + f"--isymbols={isym_file}", + f"--osymbols={h_out_units_file}", + "--keep_isymbols=false", + "--keep_osymbols=false", + ], + input=str.encode(fst_str), + capture_output=True, + check=True, + ) + res = subprocess.run( + [ + fstaddselfloops, + disambig_in_units_file_int, + disambig_out_units_file_int, + ], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstarcsort, "--sort_type=olabel"], + input=res.stdout, + capture_output=True, + check=True, + ) + out_f.write(res.stdout) + except subprocess.CalledProcessError as e: + logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") + os.remove(h_graph) + raise + return h_graph, h_out_units_file, disambig_in_units_file_int + + +def create_HLGa( + kaldi_root: Path, + fst_dir: Path, + unique_label: str, + h_graph: Path, + lg_graph: Path, + disambig_in_words_file_int: Path, +) -> Path: + hlga_graph = fst_dir / f"HLGa.{unique_label}.fst" + + if not hlga_graph.exists(): + logger.info(f"Creating {hlga_graph}") + + fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" + fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" + fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols" + fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal" + fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" + + try: + with open(hlga_graph, "wb") as out_f: + res = subprocess.run( + [ + fsttablecompose, + h_graph, + lg_graph, + ], + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstdeterminizestar, "--use-log=true"], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstrmsymbols, disambig_in_words_file_int], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstrmepslocal], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstminimizeencoded], + input=res.stdout, + capture_output=True, + check=True, + ) + out_f.write(res.stdout) + except subprocess.CalledProcessError as e: + logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") + os.remove(hlga_graph) + raise + + return hlga_graph + + +def create_HLa( + kaldi_root: Path, + fst_dir: Path, + unique_label: str, + h_graph: Path, + l_graph: Path, + disambig_in_words_file_int: Path, +) -> Path: + hla_graph = fst_dir / f"HLa.{unique_label}.fst" + + if not hla_graph.exists(): + logger.info(f"Creating {hla_graph}") + + fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" + fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" + fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols" + fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal" + fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" + + try: + with open(hla_graph, "wb") as out_f: + res = subprocess.run( + [ + fsttablecompose, + h_graph, + l_graph, + ], + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstdeterminizestar, "--use-log=true"], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstrmsymbols, disambig_in_words_file_int], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstrmepslocal], + input=res.stdout, + capture_output=True, + check=True, + ) + res = subprocess.run( + [fstminimizeencoded], + input=res.stdout, + capture_output=True, + check=True, + ) + out_f.write(res.stdout) + except subprocess.CalledProcessError as e: + logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") + os.remove(hla_graph) + raise + + return hla_graph + + +def create_HLG( + kaldi_root: Path, + fst_dir: Path, + unique_label: str, + hlga_graph: Path, + prefix: str = "HLG", +) -> Path: + hlg_graph = fst_dir / f"{prefix}.{unique_label}.fst" + + if not hlg_graph.exists(): + logger.info(f"Creating {hlg_graph}") + + add_self_loop = script_dir / "add-self-loop-simple" + kaldi_src = kaldi_root / "src" + kaldi_lib = kaldi_src / "lib" + + try: + if not add_self_loop.exists(): + fst_include = kaldi_root / "tools/openfst-1.6.7/include" + add_self_loop_src = script_dir / "add-self-loop-simple.cc" + + subprocess.run( + [ + "c++", + f"-I{kaldi_src}", + f"-I{fst_include}", + f"-L{kaldi_lib}", + add_self_loop_src, + "-lkaldi-base", + "-lkaldi-fstext", + "-o", + add_self_loop, + ], + check=True, + ) + + my_env = os.environ.copy() + my_env["LD_LIBRARY_PATH"] = f"{kaldi_lib}:{my_env['LD_LIBRARY_PATH']}" + + subprocess.run( + [ + add_self_loop, + hlga_graph, + hlg_graph, + ], + check=True, + capture_output=True, + env=my_env, + ) + except subprocess.CalledProcessError as e: + logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") + raise + + return hlg_graph + + +def initalize_kaldi(cfg: KaldiInitializerConfig) -> Path: + if cfg.fst_dir is None: + cfg.fst_dir = osp.join(cfg.data_dir, "kaldi") + if cfg.out_labels is None: + cfg.out_labels = cfg.in_labels + + kaldi_root = Path(cfg.kaldi_root) + data_dir = Path(cfg.data_dir) + fst_dir = Path(cfg.fst_dir) + fst_dir.mkdir(parents=True, exist_ok=True) + + arpa_base = osp.splitext(osp.basename(cfg.lm_arpa))[0] + unique_label = f"{cfg.in_labels}.{arpa_base}" + + with open(data_dir / f"dict.{cfg.in_labels}.txt", "r") as f: + vocab = Dictionary.load(f) + + in_units_file = create_units(fst_dir, cfg.in_labels, vocab) + + grammar_graph, out_words_file = create_G( + kaldi_root, fst_dir, Path(cfg.lm_arpa), arpa_base + ) + + disambig_lexicon_file, disambig_L_in_units_file = create_lexicon( + cfg, fst_dir, unique_label, in_units_file, out_words_file + ) + + h_graph, h_out_units_file, disambig_in_units_file_int = create_H( + kaldi_root, + fst_dir, + disambig_L_in_units_file, + cfg.in_labels, + vocab, + cfg.blank_symbol, + cfg.silence_symbol, + ) + lexicon_graph = create_L( + kaldi_root, + fst_dir, + unique_label, + disambig_lexicon_file, + disambig_L_in_units_file, + out_words_file, + ) + lg_graph = create_LG( + kaldi_root, fst_dir, unique_label, lexicon_graph, grammar_graph + ) + hlga_graph = create_HLGa( + kaldi_root, fst_dir, unique_label, h_graph, lg_graph, disambig_in_units_file_int + ) + hlg_graph = create_HLG(kaldi_root, fst_dir, unique_label, hlga_graph) + + # for debugging + # hla_graph = create_HLa(kaldi_root, fst_dir, unique_label, h_graph, lexicon_graph, disambig_in_units_file_int) + # hl_graph = create_HLG(kaldi_root, fst_dir, unique_label, hla_graph, prefix="HL_looped") + # create_HLG(kaldi_root, fst_dir, "phnc", h_graph, prefix="H_looped") + + return hlg_graph + + +@hydra.main(config_path=config_path, config_name="kaldi_initializer") +def cli_main(cfg: KaldiInitializerConfig) -> None: + container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) + cfg = OmegaConf.create(container) + OmegaConf.set_struct(cfg, True) + initalize_kaldi(cfg) + + +if __name__ == "__main__": + + logging.root.setLevel(logging.INFO) + logging.basicConfig(level=logging.INFO) + + try: + from hydra._internal.utils import ( + get_args, + ) # pylint: disable=import-outside-toplevel + + cfg_name = get_args().config_name or "kaldi_initializer" + except ImportError: + logger.warning("Failed to get config name from hydra args") + cfg_name = "kaldi_initializer" + + cs = ConfigStore.instance() + cs.store(name=cfg_name, node=KaldiInitializerConfig) + + cli_main() diff --git a/triton_model/1/aqc/examples/speech_recognition/models/__init__.py b/triton_model/1/aqc/examples/speech_recognition/models/__init__.py new file mode 100644 index 0000000..54b5a1c --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/models/__init__.py @@ -0,0 +1,8 @@ +import importlib +import os + + +for file in sorted(os.listdir(os.path.dirname(__file__))): + if file.endswith(".py") and not file.startswith("_"): + model_name = file[: file.find(".py")] + importlib.import_module("examples.speech_recognition.models." + model_name) diff --git a/triton_model/1/aqc/examples/speech_recognition/models/__pycache__/__init__.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31a9553d4978e3af1941117cab5f16e8c8bd172a GIT binary patch literal 437 zcmZWmy-LJD5Z+00mt4+>=z%D9R@Y>)7qQSvu(1os5^qOuC4ZM)QLhrj#-~V1pTu|A z*2-591Sb~+!GWFmzM0*bZ`d-5LPmD>`i>J|?4u)pZAj#pj+W#klaR5bkB|U52#t$y z>oo%$w$Q>eoUqHi#7Dmgq|-vp35}%Nf+i$^SlH!XoE?^oku_`5!|^t20oY> zg3|h#(nhMkie?ssjm3}PZu-(tDpbZ&wKq`2t^AC(XAXD_PvkjJDBd; R2P}YHcLJNR4$hPJ{R0sNfB**y3#7!`>!TtqRPTP+tWVSV9VjMRotZb;RZaWSc$J#=#-00N84*;D${) zU!=sY*TJlO^&r+K_#(41MLV-4Vw?zradDq(r;B`+6$(>3n+$zlXJuhrs&9R(u)b@F zpz^&`Tc@<~?NYkL_Xy@PLmvupr&AQdx9jRd%$0rCv?Q`l8rNW&%axCIDq^>9*vJTK zJ*K#2W}hy*OW%qoMTR`r4+))U6JDBPiLtX>YnM%>HB)D_D{`)+lc!G@Pvt7+zxMcl hd%Us7h0x?zi26ms^G5oDev8@q^Fj!NV;I2!`~YuLj&cA1 literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/models/__pycache__/vggtransformer.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/models/__pycache__/vggtransformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..384363fe6698d73d8d5c88768e2066b1c80c89e7 GIT binary patch literal 24115 zcmdUXe~=v4o!4~Fub$c2*&kZ1e)&g7j_n<3cO^NpVs9M#Y)SSxkvAz>`N2BQXm+}H zHLKZ~)$3kan#?9X%f3J^&Rrn?Bp0~V!BwFE#Z^&23a+RE4i2g)in~8Zt}k$bqbdjr z3OFE%F0uH0zOQ?xdv;cdkGm=uso%bSzu))0_x=64Jvv%U;rHP${FeL0FQ-y}z?b+h ziE;+bH<$s{JkGF zCY?Q~vBxdlNH0Br_bF#D-uJp=cz+P>PD{J{oc&1M@0O)q8|4l-2a$5neE{X&;~aAC zLwkpv>0s+j$RS?{yA44?I zWdPF3R8m-mpdd<6xBH2y9(ex_0p!|HTp*;7>SzRtT zn0!CuO7)k}Ml;xx%+rYn!o(L_b>*(REx*XhFsERB3Vl}Rm?uwLWA9*iwEA%S7%(?uJ< zSh8oo*6=Uc^*$JCO}_c=HQxp(>}Ov-FLf^Y{)V@3>{v}*Yh0agtJPz*W$)ODCy$?; zKXKyJspC?po{r6zaZZ_ky-5Uq>PBkY=o($~rpcrBa_W|`c*_j3-sT3NY*nj`R>QAW zE9oHPxy_XzAMU9j@95oe%Loe9YQ0(WylVAU>duFc`KsZ!TUF(5w2xs4j-5DGyHY>q zUaPHdG+pnQx8b_=OUSObSJ5_B`Izhw?^trJ<~OdZJs9G{9IJ_7(&*eb1c39=80$bv zy&vN_Jg4z^UqR4KtpQ>AvZzkx?Tn#QvcAa$jip$UQi!D#rPfF+r6eh%v6Ql;jKxyM zrJadb%A}<1(J9Upw#42$N1t^ZTjoCpz|N%MrEg&|{p%ZUkY9KG z+SQs0GMC)u#+|)$b6A399-cWgHitd8(pU{LXFvJT^PS(3-BfE~bgFjUZm-z>CfCz; zYjxSIehpitwJHPFH8)j_%geIumTO0CFGx$T%(OTCYB(ZqR?=+Q^wrC*!knrekU^Bb z(QY@1!G>hDHrJ~>E7c|uL$9Sx&($f_7O+QU)}EiAf3*^J!xLxQgl7yn#OB<&b68JT zPf||by{5-SiohRh(bhwfrV=3WA{fY8Rk2plL^+#FZq4b;f0` z0OY9l1^^m0Q}canAOOEQkfGe>W|S5J<FWI#@6Y`45A6@jQ$3jwd>dx z%dWkMRi3wBT(QHQ>^igdhH9??$%h+5+KB@wY%AR7y?Xt|?~HQKNBi8nV*^0T*wENh z(RlQJk9JaL42!zz&$%e($mowtlmxKftn6PbT8PAKA7{?>ao#TD&~yg&1qy{{q4WKB zy)Ak@^r_JQAKOA^st<~7cP8ctf~#{eSbSE)Vkbr*jpB)jrha1XOq`lA4!qK9e*hEg z!~youv%Cm2Rsv%KPC}#0Al+#BLE7^bXi`q&OmzULUk3*f3#_n$H$%E`5kV$x8v0-E zN10qKJ{pUU$KqBj9xoq{_sSQ4G@NEd!`MNc^B9uQtoI_~r@F?P;hPA-+RER|4N&W~ zES{WWyqyINF&8V@U|J&*(uFGUeS}wou?XoBMG~1$#!g=^Ej&q8eHw{wf{{imYQ(GL zgS|<@dsZ`y>No}&WZbJY@Q1yg&|1{;biB7%dYSQlZ|Wr4!E&bSO)tn`JrVlgRR>pZ zba}JUbRxF)GpOX15twGiu<$Q~f4M*EyeBzU;j-ovHGd-1%{4;V^wS6darv7S$S0s) zbQ(Zy?N&`)4+?^12s`x{p)$hb62d{f4=I4+%ib`^ zTt#fL(g$N?V8+V<@uwJkkilLC(+mbCKBCuNn52DlU7$$a2jG zC%gg~%)gaYQ!H6*p`<<;L7~}R4U6VH;huwBQ>vce>b8Xk`<@3@)6LFmj3 zo7k{%sBu)PjFQ=J)z;l=H7Hgov~4ySFIB5oHfzl=Cts~P?RvE;oOw{fTKSDSxX4Rw zM;${m*!GkJ!j#@>sShDfMj4ET!b06`HqlOhtz=3BsTxD@03L5Y0xOjT`a=Mg|l8gpd`4=%$&)V6q0Bb5)r1h$js8M-7o}j&qv9-YADGX|Ly7R#511 zu|c87zXth`a}CA^TT%^xMO{J=l;ZrTKFs|c!OajH-hd!kQ*L#Dl!S^6Ra^Id9pyxt zwF)^S{x4%F4|%(<53xtIK8{`=hCurWGqOrqS*4t;QeIZcl2t17SE;BRw4+=GD+q## z5LNb}6zq$V`pY5fBVf<~S!31xQMo~&1$(2M1o+iDd;Mu(7F7o@0khvkHIEo)5N5<3 z>KZb*vlBn!7XG{q1aV!7OAQHhCD4*&R&t{I$T~UbJ}6jQ&|u}Af>Xp_%NcP>_*-zO z8%ZreQ{s%{?}#(uOyY0J+2`zWrl3I^b@n>bNGXfQO2h%J(;j;u1J6xLtzM_Kwz>eZP~nOoLYp^5w&7pg*4q(1D)BBTtV6| zxoQ@YSHrXM|B1m4dknf31r|a%ET*N2O3d1x>mqv{DkNy3+N~YkojLpbvmZNmp?Y@l zbQNHJ?%YRCf4F-3Ba6?!_(G!WoE=BoyLKEaq=n<;2og;ayih8|M{wc%vx_f3|B>@A zojpH%5E13-iyT7S`ZWY z6d_mfpJk34X5PFL>48imXZGVdmib}kZfPWAl#LAPXYLlWCGb_2k-J;WP(B++S`L~g zNHcdccPDfAq22ppX}P<(97>_2`HpqB^T1G2*0+3+w~2n_cPNlhCiK3DBhxjuQ#Vte zf^fQ(-OAw&A}V>9HE;-P>6;mdCi!g$t_}qB?JPwVKj%YSS+j0Hln13RY*~EkTA;o# zxo+jJ*r>raGiWonRY0r#7BXN2!F@=pXbrs;x?l_;97DX>E}(sKF-5dn+8XH=w@1KL zjT}mCmr!<;xmV8l4sP{bI(%Z)k4`%_zES6=V8B#?QXWt}{@$$x}V@4K0DieHDa1B|fz zt&H4xtB24$Rfm<98s`O=cpS%dY@8YoNOb18S^L7QecC=EbiYr@StT^^E<}ZCUIDET zau()kA^`Qhy$M4A4rU#A8;$lG5^ShtU1C+$MR1{FsB`$c_&o&p{kLaUb>zFxz$7>l zBcj1n^pY?w?}w2PrimbPwc)-NLx=@AdmC-f4T?lVjn&Qerl+WkQJ-V*27(}4Z^D3} zZb*Ek*{=CPrtUVHK?dooihDdr*IL)rQO2gisnX63I?@v3wt5>`YKws&Cc!6Jui1dm zZ1^D#3W|Ne2O}|v2D!_iK~O%Xm%UCfvR=DZg>jtlR)3yd8rPtbAwsJ*oNGZjM9^W8 zU{oWOa9DxislUWZliucXgh{KKMh1o6fR_?zso(h6K$SrWFAx5f^zvu1cpjB2ko8uez1JGSSTf2SJ3y8O|aO%;BV*%v<32Ls=@jodbr*cdahuqMPaMEJC_L{?WWkfvF9~sSTIx-f;bv1e8T<-^UuCe3pi7a5DG{;c;Ke$s(3v z3`=BiDc&%ypalVIA3JpHv**u$a&J>EpFbmO!FOtS@TZjoz98jaJ~mym4J2zoe=#9w67x4HJ~s^DHsD@hB~1Ow*n{) z#$euu_z$H4^hY-gCk>b%KpI5VH(*Yn04wX0y_`}H;h;bl=->cAKMvs*X>&Y0qDn}q z_aluxyq`Tl-;`dV#ynr!k4?~{A90R`XqEmUp(&Q;x0QCY*?x`kL9fOfWG7l_KM-T* zTWt!0QW=~yr4q=U^X`07ppEq0p6I2fE$7FY;FHDJ`dXV}NxWmq zIl}&npcA!{>=x)g+A~ibj}00pgpMf`Qf279zP0degK%mIdgk$2`xL~np7i2hs%bBp zE1Rym4iUt@R)w@QFuxt`h4X#IpF@NG(NnM5Gok|U8du#uRo&71+=)Ng?vLN2^Tp1o z#4N%^6*&+hXXGPB(X;^thL)PR8|0vO9@(|>rP@`O0wdhY?8ih1)3Q`XQ(EFBIlDs3 zD;a%&$J!{5zmJ)-pnUc%FCK^bUH0r&Mz^aM_?Rs+h3?h=bW$P>Ly(gt=}PCJaTLf8(0 zPyyqjcE-;E9~?(X!Kh$=b2#5m;~i`=G4f5*vBu+uU276G^&a@}fl)pi`v1tJ!q)pE?-gFWWyp!>EDY^8I)QO5&5lFZIX$#4 z$f(l>^G-_ISLTR&MITyP&IK{;Jo;iwh?w=c(i+z{{S?Msy$^Zv(z%QW6 zKOhd6MZ_vY>K4fwvUG-WHSoYY<5^1MpzxHPMS6w=o$Va|X)!{yjq5}M(T{OHLf{=l zh!SYZeG6om?34~y(jeD3_+l-%pMRq~mRtd^3ZhQJZKyf^?YGd&r)i1gJdHzdcD~fx zLVcwpfM2s`ApJoGJW~mUyk5Duyv21wQOb%@IY@#+;CtnJl15k@C=3Yft3BJ8|GHQ0V2m6@{gT^ z6drCzvT@KZM5>5amZ2Z^dTndMkVMGKR0&7JO_ZFAqn=PgUO@YO#c!X`EVcm<;ox-Z zedeOiNJjhxH+#FwVDrOqPrlBo|oq@Y1GYl(LLzWQuInqDxWFl?*HC-dtSO_0F27C1y zh8e-(Sb~WjBy59pO(Pf}f@Nrpzy*hDrx1C}0t7CMT^9&KXhFc~#uj_$K#yo^M{{L9 z2CMioXm*0(4yZ}pIG4yirTmVf1o9Cc2d(RQgswLfT|*rj?K08<_c zXz%!FYw5N@l#{K!o3R{U@a;LUApJf0#~z2<^cpL>OkiOC$sVB-90m~%WaoVKoO=N2 zLrfc5G><=`$jGa|j{x#_?OKq5)U_UDJFaTOOAjo`CG~48L1i9FG+-t1+d<}f~Fwh}fKS)!KAW}R8l{)>lpxv^d zLg7OZfr|EVyp`my`FhFh92rh=LhP2%8YT!(8r@LdKaIzG0s&cp1F3Bg7jpc31FDB~ z4^?pr1mvKO$MVz62YJsds=tW(0!PsW;_6KaJhe-;ViO!Myu*Y?VkvmexUW@#*ycu4{G_;u)KN6?<7)EfQMVoUTX9V7xw4WG zpA{${#b>1~H+&3ut;p>jc&*6I9(b*koKbqMh)>E0R(MQ3hbC+HVKL;iUWk#3(A|d8 zng<;Sg(zVDBh)X?N3J-UZ$A=BIP>;dUZs#kX%5(@W-o|C-NMCK+h?@v=sBvQFV0KR zBspg%_6$N1i06BbV-?yNaFxWtsMgoPE6m|i1vrf8zDW<|^qo=dna{51OJegdK-T8Z z^xRa@n9PGS>zD5}p&i7udBdI&E$n=&HGh_F`J{cQt`+EgdwZ{LtABNk=+2&5#`TX` zyS*XKwoOz8fg&O&9TffuTu(c41Pa^rG-M(Sf5u&@!SK9bU%dEyt?9WJFZQ|MR01(WgVBuEW<<_4T;tKd%~UnIW>o)ij3Q9wKkZWSCUH0Eh~AHmKBp82u~9(M$_ z!KXrVZte$X`gUe(q&1FIlx7O8Uf_NQN}#{OcBwleK0!tGihf7mqh0H!cm(xQ$~VDN z8>uUYdm|g8%`a>hl8}x;ee(0DGqz1D-jMvBPY_E?`0#LI&B!OnFFGb>3(h?BKN5Gm z*!bZ>+3V*wdi@}ESec!Dv3Atgm9JuiKnURspw0o@_lROV8z+xOkPk&8v7l9^`tAjy z4GYr94o1Q-gmrZ)4ufhJ&0u85SLtz`;Z=#KG*9ql}9&>nM!I zR;*S5wLTEmDx%hw)IxvYA3gy!0(c3I2{GAlNOg3a#&OA^jev7$qvcW;hA*8eX;-@$ zzB$>qbDxG_3-kQ*y^%Z*Q`#u$nI1nJ3{jT`HpdW)f>rzFhw31Y%^97iYxp9o##vIe;IlT$~kmy-*T^FA&JX zF`xGef#G2rs^dM;N@RRSSJ0HmND|Z`BrGecy)FV$SbkQl8*DMr8pjltnDZJ=OyW)y zgStPu<6NcRyKB#o?2!!iXkxDeh1-{v1o_unHtOHE14P%)-{4^`E_ zV({Ax{tbd)TKi1VEuq?2;fq{g)!0{j2u4_JU$U6kW~p;g-(^dJaYcTaTHaiN^OoyW zS2pP@svQz;ve3UnP_YCP2a`Kbl*}%e*s;OjzXjF|_dycV`gOMWTMTG;jlPDvA%m#0nfYt26^tTka|@#jgZu2y8)Kx%7C!{dm2) zIju3<#g8@kXN2L9BG@Ohc&XJ!uoZiXBmB~dgMnhy? z^=-5q?2BaEGiOiJpZMdUQ*lhF<3$V{2_XI~ejfe=a;nmRco#H9$1#5gf zD>CHL!JQwgh~)h`swRFtftIaD0*nZ5Cvo_oNhyl8g(XYG z*aA+-NN{kd!T_GGZZqKsftiTR!V@nZEpfSp+_79+QglJAn0im`MSK{?_`8G(+Z#7J z5AEK4kJsD%>keS2!Z_w^{>#m5g~hcDD-NE7I#Z>s)( z=A#F%sz*4>lOfX==^=o!!Ajyf@8DMp>~k+)cquHU9_3ojus%MQ0Sp&?orc%r=|t1H zEHigOD!{!`lTDjk=nfO}gz*ygb*dDSDtx;p?ZkQzzZ>zz2%S;FNYXx~P zhUp{l<1*g1c!!ya2OM)#{VduAH}~)H=cjOIuatrVJMJ+HYHu~K2fWS}J3D9f9i@*T zOFPDL4!V<5jJ==16AYeYaFW3n7?A6N=gkKge~Q5e83^tjSg;Qv<3JjaA(LQ1Z!FF= z5<-)Vf4Lu6#m*x`$0Y|uu=XTZI#KEZ%M)T8?(V|@_Iz+_B6M8>yxFnGzsN|j8x!u%Axa>o?& z>Ua{rNIxi?dqmY;(sDoyp0VlhY9&8KRanXjCFiH8EL^kHit4;jbPLcdsy{;8qIVRC zF4n0SiGGapk#(9ygzJPa9$=NA5tIwbT?^Yjk0V1K9K5>dzwkLX9)LwSfE-7ydtc*- z)=rbv?x^2tiextf(BQcEuQ3RlY7jKMCe5S&HZq)&^bJG_wrQr*$GZQ`VsWR z8kG0M&eY$y+O+^q2Uvy!+Hn9kg5@G*nvVTAbYDZQzJRK}j#r2SvVp(JERp^~x7NSL zgul+~QH7kFJ^CU7W=b%B$LVxzo;P)+JIAcDN&M5FiKgRi-#^Z68 za%0l$%)eB-)>z+M4=vrHcN4z*wb52|#)7vlwFx1SoUfQc9cN&9lm~&!LSp^ zBk(@<%^_N;gz4z}rRni;IV1_;gy10t6IijD&vOCt0cpy3J55(zeF6;8D1aEejza1? z2=EQNG8rpT=~9E`1*5&Vd=}*QP%1EXh=r_NiYe2ECIR+{9Cea(aK45s`ZuL~EiO z&>x)$H#Kbx*eApty665GleK65HFnB5@G^;5B@*2ce9S$K$9n?-*Pi4|%vgrJrG5RD zmbVP9-_yjEl{c(pXz*e^*kk1_FM0T)fV^4qHWJn=$Xm(Rc_Z?MZw*Mvl8~>lMIm0d z$-)ovQ`ebxl0k>T83eaqXY4G4=NWtfL69L^k1wX=!A6MUNXVgJsQ0swps6Pqdy;{) z$i{R#KVYoa7*sIAunj=>MHKohp-U-}#uK3ayCxW$+}(1z4AY8EDdVb-V%cCEjzlqG zp?YH1mH5LIlo!YhtORZ<5ca0zMX=ZF62YF{&A4_K!d|ZuzUd(KatUbX6VU!Bx)f;N zIr_@Xsgtwz`)2LO3ytJ^s~GjrnjW2kfD)U18-_>%}(Y1TeI ze*(_<{jf4yfrHSAlN#z5*`+|e%q>iS-`P7e^Vs94kKT2^tZ-76rE~1vb#wBTrN-H? zti3FIf5oL+Sz_5a6D&8dz>`S(B`okC9>&1RaC6lQ9C9%~E@$;3>-XZ1cdYMYyRNU= zgQhFtK37xhTZXrnF^=)hyM^efL5Rvhk@?|6yn0)|Tlk$)`&e}tXbvFb*8m!|9})Sm zSbSqsnA&yj-k-qU)>uaGZFqjf_BQ-K5rwUz-nTKNzjYpEu9px_J z{_PxiW#)YVX%ch1iUa(Z#uG=v!%a*f_#%QiDjlEKfkqK=c=ypgQM(#rcOp&S<9JIQ zDbA8Z_wXHLe3zh}MkK=4ar_Ou%`(B%0Nw__eGlC2kyiY1akpUP!tT3P9pvPnL=WmB z0!VPXBDi74_CD19ublD}DwpA?mBeV1*@}5Vb5w@@GpoOwB>WS;W7nil9Mo^#d~LZd zUtrK~hgRe~B%IaK(dbLU^YHE+_>!>pQKF+#RlaepFAe8I4<>EODM$WHqQUSKnNyWx zKvq>zg`g-MD>75TSgbTY``~Gouf+UO24te-Q)KE}jC~uyQqMPz_Jbk5al!z2EPjo_ z;|!i-@IwY7xl<_6E*<3IgtHPduShl&5v1Z_mMx?R_!xOPz#2T^f%O#Th*3X{0@^*p z%F=pc!G)6lY-7Kbv(V({GHKjRwV(jXT2teDQ=dzHZhR^Ax!j|;tNMyHTg(>=R)Ixf Nwwel`hplqi_&*61C^rBA literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/models/__pycache__/vggtransformer.cpython-39.pyc b/triton_model/1/aqc/examples/speech_recognition/models/__pycache__/vggtransformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be7a2015be1df9e53a37451a8d4edfd1e3d3261a GIT binary patch literal 24280 zcmdUXd5j#_nO{})-P3b$IJ|X~DO;MR<{)W`md3JWiKJxN)YzbCt0wXz z@Kp^(wtH+vl;VveIkw|JvR)@UTx5Z8bHG57jgbTaf-Hg!kQ+#xtvIoBAP$2?klQw6 zoB92|SJhqB(?eUE7}#yH-n@GE``&xs{i-%PS~T#x_3BsMx4&W-|DG@LUj{E9!Q;Jd z8ir#yX49zXzh=e6yVbPTQq+xMk`00F=zZuqjH}!;q1ZR`%z=k*^3%`-O`Oz zd1fd$!h4o_pn-E*Bh5zMpla`U_~I z8SG8w>BK`};`6P#a@XCKUu%Yyf-&W;G(3!}x-RvPB;TtwRbOrRZr$HhZaE$7JKt!z zHPz_2&Y5=WYP*Rswp+o0i>lW0mfLFGRbk8LYu8$-$b1di~rK$i}x8sT!Xu~VmW3d^#xVCEzHl?)V0RdxwcxFuPu4=C!RQdYVO2|)2EM1p?WGd zU&c9Q{`Dpi_{I%m+w7WM>!!t{_M&miT)brk8Es&bJzuR~t>&Tb$6ysSzk5$ zc5B|L`L&Z*-TG|pN`2nFR$Jdd`FU@{b?d8Dh1OTlH8#P#Y#?twxs-Do*VSIk;C-Bm zg}^X7_YHyXTr`C`a8~cfcoxqYJl@w3bd5FOGhf!+NxzdebxOv!xZbgpyrdLjDMhI@ z5=$vb%4jTQOj5>UDHGDpo>Rmj z9hpHiL7maxU^8|d+hWPJ7qQB7_VdejxRYIH#txA~xG|)iIDo>o!hPPW*KhpJDEEA{ z&%HY~0JMY+jXf2ONALG&Cw0cLs4M=gi(-z9{7q&IL`KQ=3F1=?J^Ec zXJB8TPE4}syFu_h7U=KaZi$G%~@I7EOG`b8@jg}vzJYRuiWi`%Jhk*KZa2T;5A6D?D zNf$05Fj8sL(*J&(&Bo%RvG_zRo{z=j<>T>K`QbDx8paOdox_NPX1x~?-{_iarf(qx z=PQ5HH$cqSGI+9%`A!Bj#9Az8g8dqikSeEPc z6O1%kQ6pYC7wk(C-gBCtRL3#MAnjhQfqm@tgw~>#_s4sSrH?V*?@gUTJ6O(Cz3ByM ztS3SrtnA?GjV^6Anoh*^eioIyF$9KXnQ1eRe`z!Qe>(3;j#ZejxkSyM33Y3YP`3OO zLO@*pW(4vHs280^ok+H(t_KCdGK8Hvi6I3eJT4&|)ccVFC{AANBZ0G;y9)Mb)Pst> zrbdCks{Q6pv$+H$aj+r?k4rcG1oZ?;s|5xFTR#L|cqrdOi5T!g@ZA%5Va){aC@_-W zlr4J<5pLNVCYh^<>)c8qjx9_MbSvF8x~XpZhJ}r5-Z}CN*VC@qE%!Ce1c6cU{93E- z&VeD_vs42SkIND2!{|aihoGEMA7uPV1|MRukHLNh0}~(7YcEXJpG3ZwM_^>~R>>@x zod1eg3+ex|!NaJ7zaswYq6Jm>0__MJ7C$0&x*OL`b(& z1yx+~f{~?~4^DU)BAS0Iqo!E0*g{EtGJ-<0y%HA9dcr*i*``!g2T-<;*3`%G`gJ@W zSzg1MFtcW7df3E=9X#Y}+fnoA7@MAALYUH9EA=tt$ryvtP?D&-%_iFEuar!P09E4%9>C*qE%R17 zQ%YH8@dBPg_!Ol~*}9dvZKKR>F2-$22hMYA#!uK4M+jExqP>|NiTl?>%YR^ z-+xNgSXhEr!?KFI^;cJ$-M;yBLW@px=~oPwLi zRt~#9hh1SJOm#B|)7><}OgD=#ixZKn=DHTbd^h)^p(aqZTmZ3BD?-MC!XlQ&+o;vu zU=L8I$oxG75gdq;`dpu|kinjQLdXU}s?&^OFj)h>xhf2L#194sqlU;b$GOa4Uz9_3 zwAb^kd{F3dutA~6y9T+CYYip_TT%_oO|2pbN^xFPALRai;8qBZZYUQ3aH|7kB-DAR z+Pe2mloLVMDCDv074XGJZNak5a0P^ivBxs`JYP7#0e z&WKaO--1K6iBW+P#hJk05oeDxiN7W1fV0<`f^u!t+2`y>%9tot#(_x>s>jf6=MXTO z{b*d163GC!dF*bRD707??2Z_Jd+?O-5JPzX68RckJLXN0D97p=IF!%^ZJO#dXuRcv zFnKe6#sRnWhUsU!7HB@o__^);O^~!|0o%f;7O_Q)>c|RHN@4xK>W{MYS>x4De%ji~ zZxyj6oBS<4BNO+8*ncBwpM?A8Q3C zbI4em*fOsyfqd>kN)gWpNa#4;CqZm9k=@(P-sEVekh-s%!j1`cFrIV63dh2nL;hi1=S$FLGTlGoWOeH|EX;CgYXS#i!pR;q zbh~_u_3-6h54)7w*&FLey?xod@=+WmzPKq!SIN#Hq+Ig35X<7sf3S`xRz->lY@R0kR91)C|*WKx8*r(10w1?$aC zkt#t4>z5(y_S)5rt!;X~O%^7qo0PNJRm5U1*bnKB?MRHZr`@@gxf#0wX`0R3k4LT8 z(5PX41D)BBT|wMeT{Q#QtKr%B|M*~sJr-RH0}G)f7Sq#2C1z~Tb&BSeH{ltYA&RrNjh=_jmg$^ufqXorlyl0(j3xP{23=#v_F(+zn7wqY#yX?={ zsrSXQqV@=7VsDK9D1A3QQi4ii%z|`%H=QX$ z++-}9E?SdlXX0);#ahL?*4=b=(tH4E_r=okS#YzI<$h}2?L07)l=W>N>zj$X%(%Zw?Y@}A;e<{Iok!a zPhO^ob`Nfibc@>~;H^fE7~3V39cAv7Kkbioiw+pccPwyICA41J9=~aL|H#R0mHdfr z#>r0^-NF%Ag%5x~*%}odsn{JwuVYSXYdjqPWH^4cmG}3e&q>a}pSodz2LfNE-@7^R zW3T~JOk3O69S4_T3YYY9^bTIf*xs+pbti770ZEJlT-~^1e!$ooZ;`)~wl#FXy#P+# zl9Sss(bF$u^apMlEekAh@f%cqjNEyvhtxb(hbfnO=LHyg9LIHRoE#5`bo!YY`{Im! z#y%#rzfa9sCA0!BM1^T&0lg5C7UpR}042V?33C7rXC1g3jrJQ7jHsnvVpi2faItKv z^Z2{?eFXUZ*QZu=K%qpJ(s}f*@0G!i=D9NPM~3uK7W_?lzi18tE&F`#eb1TG!R1j7^18r7;|| zq$L(`^$xPs76U;}f>SbHvjOqh^g}!p6#IY=Mq&^RvX?=Jpn^;-d7WTny>_h%3p(Mg zzRE7mYf#J(sZ|@!wO}kn(qWNcR3nyfSb^!OKg&v!-sVz-O{k=A2^C-w-BLU}gU3?{9CHiO##WlbHSmlH z$-n}Rfu$9v1kwr4{eGP4tem%0Hw&Bs-Rf56%F%8HSO}sdPH_ft;10)d(rxKZo6KcHJ&QvE zeV~KQ5B)eqTclNZa73Me2_ua?wAcf5PN`**ja=;@Hb9SZ#Q7N_RJxCZrdXQamfOu{ z`!z}jy&AKSoM@%}V2qh>wJ8QlW$@9INg#F3xpPTzHrAK)QLBTPc=qxpq?xdKqL-Ss zoXV$<*@m z^kXyjX$WII*~MS2X(yX2o36SJ0mQymg{(9%za8y`^L@piMT7pMr(dic-P9SwW}`0MR=Xr zCq)F)l2lsLSmGo(w?f6sX?=Lf%?Bl}aZm0Tm4%L82T$=IM}=?j6jS+clt`?=bHhlJ z=UK*^4kr-n%nv`$JLQ^idn5=Jab}NTy@8kTG*3VwXiWj{^-lCroL;R8loQ=02K_Kk z^ft*Fm}NQbFvbc|I}IWQY>V1yKMQsI*3yy z)}`(v(CPFURN{uuZq0S}+ZRJkB73iqnz+t;>~e0{xmSM&CrL9`ajAWMT$E#WUXC(6nhGOff0ke!idL89&)|9LS& zw2kXT1JRFhK0@FfM2He-%6$uDn5>izms23uIQU{QSm56%k0qDEr-G=HaGR>azx@_^ z`7|$)oTqT;t0)W(CXOOf#sar_<4_v+yB1ZN;EYW^E2d&PL;mad3 zSpqpHq>2BGRv zaSeqZRixUL59i;zOyu2GqML@(9E6Qc$lwv?!*+l!HHJs}ZG~)7FJX7nM<}{NosErE zp7mC18(W?7M9JAW>Ir4z1+?E+ z`}PUVVjBPv4oEajp0Bkg*2TG=n1deS3cYnzdvgUdjbYQ!lEcoxU6UDxjjAC_2>cvrpLa5mw*8u} z5o;`j4;_QOdJV&j;4q(Hq6Z1vAYIc428dt@`XX?_p~5Ld9y0)e3scucf)M%-aJsR@ z-Z{`?+Sburo{PaMz6_e3V7LQnQa8>evQH_$qbPxVjK@K1d0rvc4s&pVsl@w85VW){ukk0>(o>Te=|>|MJSq#<>!2bqqm+VIi?OR}ndgC(fOLx~1# zBz`-{I__1d+ah(!Utx*AhM7+?1`otDcUoV?yr#azwy6D5RM99(c#15ZFbaw*tNuBI zml^cez3&S~w~yb&L{4_aW(n#sMC&n zt~i$VU0F_x*9z2*;yYd;psH*@wmUZjvjX%4ujW-o|C-NL0<+h?`o z=y|H3FU?8OBr#_v_B28fhv#~(V`bVGaFN8psMgoPDa_(Z1^A2Twn-1+^j%Ty>CY|e z8)S35%rkek=cJ0pWFDMZzkII=?INbl8uqm4U*}q_xpQ>NC*?zRtvK)7+Iw|d{d;Xh zcJ}lVE`Q9}?G16YZK5j36VW)Spm0avdD@XHP}r`gArs;Ev+i;YM&||l(xqo>P0zh_ zsn?xgZ1A`D>ZiviU{P3pmi(~m%-K=9z1G-avJZ6~yQEHqL-ZqeoWnz8<9!pjEpj3i zKN}z!Xv1lx1CI+2n6ytfLF&%aH{eQJ1$P>GBl$&eq);!4`r!@ms^CYVD^Jn?2sXAH z>{u2!+!53UmkOP^br5{%JL#?SyB)3|cBH?ncs`@(i12kDsD)MzOi-=+<3NQs_L5KHXw;pN1-kx!6cbS%sl zoO$ShBrbWe8NiJ))>F6F6Viv3+SwcHN4>#@cM78fS_tO>l@6f4R}|x!IEgfaTqqui z{j5CIcQ6o*Sdcv zOdW~cwNhe0r`0aJW;M)Y{uHSa?GQ$R6uAf+6durH3hJf1`Lu;N5qK6CEocg#1f?#T zhs~ofeBZ+ip6MLfiT-+)@1%crp1tD{`h65fi(3f7<6(&15DPC`^h#fY{R75Wla>(? z0vAZvMU+P>MKclejco|)A}SgXGc&N3&@5%FWng7FxIk&B?rz{S1U#80g>bjpS{Z(nd+u^!VXuh`KbeIi^?@^0i-$NEoqKl9(_B zUbsDHlnA9$7`!NEYLXQcXuy`+yYZuOJN(8V>rj47phq@&X?IHs_~ ztk-a2Qg@;lRQ}O5=qkP6U3;3ukHoM?8G9W-%!5T9^ih{|Hu;ihj=e4@cX{Db?0$8R zuJ%dPQ0=5#zf1~tY!41%pjL3hht*AU3OyBH2$E<~b}T~D^uoq$TTnx}KNOvM&u)Yy z%f-*vT)M*bw?VJR9Z-NQt~Ubw<+O7{tj|TktD2prH67-{F?!YYE&jp&{(FRHsV#{a zE5=oTs_Gvz_$Lg$gCN+iy{71tP;D&pMJ};w>?$n{-jBQtk*^@$w-KSw5&f*UEEts`J^|&;0C~bRy$SU$V{1UKYc!I6 z)K70`{4DJHCQzmbS};%p-Gbhd@3C}CFuxe;YOgHQ3&TponcsrORzjk3EsTK{z#f%Y z7jXwHiU&ZDTdjH6xJvyV%IOUvGH)8HnoUI^BG|JG*Uc(T0o9hff_wAJK?-79kW()G zU2uh7uWnXrQ-T!lf(hfPUEi z+FgJ>h~7X%0C6>?*MlBj&|pJAy_JgbFtS28s@(_5Ub~i>7#yVfpB%*hV(`Bi3_$byNc*S#;iP$=>)6j6PVe;M>f|Dr zqam`d`Zih)4n#8U*>h*;Rs2%uS{xH@xt@wpQ5?(eg6uQMJ;9N3rVwN&LP7S<*y-YP z`H6f+WXa0monN$w}dAIk+MMK{EWze|{~ zJ$&5Zq20UhaeKReo*>pNkDP0I>M>m$KZYjJm~5{1`jcK zFM`gm3H^gP@c;Q+3ewQezCb<1Np^-dxjsYhg&zZu2G#pG%J1q?zKc&F#ExF5)sQC8 zOW$Dq2jG8jO_^GN#+kMc!CjMOeSC#Gv4xe3i4hI z(?{UVWxkW=O=hYeaLg_BQM3zw?%(3i&*9QuE(M2n+-4Tk-fCVCc(E;ZcFyU0N}oWM zc8uj5bSI}7`v8N-89c$@6oW4^Am0W5n-4PnB!dqz5ZpVkU>`%qp%frP7QujLs0CsU zKTrwf?+>%Z&cj2;B?KY7P>+kkZZj!@XV4%Yv~GCE5Jc1gyqa8*hRu_rDt*mq#RZkZ zm1$gv?el4(G`|Qt^dzdw#bNEM`RjZ=&)@|Hq9>M#3ETT|q?I|RJR-OR4NeNzpohB( z={f9n8@f2i6YOPoFEQXwSHHx7Ak<@fnXx{AWmqzzA0p%1JQgKH;L5~{%unJOSInv7 zN&JMVLW=P15k+@N#{n()#iqgwmi$&#p^_0Q&Tm!aaoJKUt8+rl^MGVg-9byDdlYCc z)~ObWevI>xHJU+$YlLqeV1=L+lxxY|Jhpre$Aug?-Ji&fYqwB*aU%Eg?D=s01bCvF zqP9s3LIuqh{G6v}aHZMeDXuiRDPx4%qu9S6Wec67Lw7DuRG*dLOc+*8$uMmg|%$apq5}ucKC9JXPPoD}({ryx(Hh_m~hmwf+Sr{6z+T34wfW zizXfIOFXcnS@~m}XAu!2J7$i-0FwRb;1@1p_+l=S%_#0fKgRi-!Q*k3a%0l!%)L;% z)>z+M4{hC{e-l0ow$WB}$AYgebqOJXoGV-8agEpCoO@dR7!ld>Fd=afV%wiPKNy9X zuL`MJzVGE#I0u{qoXXB4yRS^5PPs662>%89`8EUDQ3BbY@bzT|!_FU%!28)Zmm(iG z9{pf!e`!LFNJ1zfaEQAEQk<91Qvt#ODaw2&MOR%SfBJg`$ORdA9e>onVmuv;k);xK zF11%)FxrdDXF*saFn5TAtX$-STDg&MxXq(dOZmq>H3pwH{i`D-h%qS_M3-u2X;L6Jt z<*R?k_t)i}?#V$;_l)aoS``))z#l>#*z&6yvRvd~L}%&`mE|-&{~BA8<{FFL^-{`dG}E6Xb`E1PC$QnBHYxpFJPbC#UI^s z|AfifGyhs|?V(8$i$tP3f{!yigU5RV0oR_yObl73yrq2omXfzLuHe(im612BWN7kY zJy>MrEhl;Ss(`%ZQAEQ@7D^!DOr}z{JZPs)UCW4XT!4gvh4j8mu_W=W#>!`IkS6^_GK>c zQ4Gw0PfN7G!pZz3iA`iA<0w#^Vxt7* zyIrNzY9Ff(TL64v*!Yciu>kzQS^R)eoco2YfyQ?FjKK#bcE;d|B5(SPjcJTFYz;bd z)6=I(hsQ!h4>alX?XN?)@#?2mb84Y3&}MKM2tO4T7G8B)<27(9+Sq&))ro zLRV9W;TTNgvm|s&+(+EuokOoozaJn?Vz5_%NKR^ebu2_O#8`s2BABDn@i`r6R27GJ zpX`vts4;f;He%2*?22ZjjudCfp}YJ?$oL*Xy&n;yM>XPfM6paTHL$(G_1}wzkY@dU zX^2pU!tT3P9pL1jKo9B?0_XvDg}|_beF(Mx3#UAV%DfRfh*`_lMa|in`p=C1hLiAd z_?}*q-g!^}dULgQ`#xI>-e*|E_ z7n!%?N3qB3p;*6idTl#wua}KdB}23c(FRO_8|^t_1HeWnM>IEj8#$`eY68{GQSDB>zR{f{#lH0%*=;sDy=HxW zUbjtqH*)kwz0;_-+TG)G*Xv;1=w$@0dcD!^*4OK;_56VKbr3Y^y7;ZNnvF)M+w66k zz4;Y3Iyj@*$SK;bZfm{S>h!vEie8I&fGXwDUGMf98|`L$V=jK9-oz!F&{#^+YqeYT z=6a*unX94EXaU#Eb=Pjzd$?z>-ku-RXp*$eT!~OryS343tgrXx`e?MebQQefoc2be z*X`Buw2sf|)QL;3(oUlT9c@4}$CvF2iF2)XI*pBbw^iS0v|G8>8XIJ=X0F!`uGwm? zw|naiBKfL+)rpvWzZuwrQFzbZ2`09edgg#U_T-~wi|8gMo%KXX~&EOV}eLt9F;UL_90Qa3gG*`1X zTEZlj4x~dlk&k4G(odytB(B%)!b-cImMsHw9q&;M@7RX-e0;?-@> ziC3Y*)xoY4hR8wk=CqpW&JrY+>w`rEMOdKAU>S8)iQogchn?RyMj+mS!@R-hi& z1+x{)O;c)h#j4e6Ux|&BSLxY=&=t@j8>hnDG}~Z`+v!u*qrO{NlM-;%eZ9 z{9+8vd$H~M1G2Nj992CfprX7!;dcOPYr$g+PTY!oK-uqd2^7bN^p_L8uXHdqL(q`2`o_(^+=s*uAFGL zl4#JnviV+BWhD^t%OqJiVnZi@N4oZyjkY3Q-g06jrERnjakaq8L}`YggfAnCc_|p} zWTbi|5*gP6v6U@=_Sg|w70b`@M&v0=kWQ1QH@-u*=Us&v*jF_v|(6C%7Od`Io1vfN66;3KA#nLXl*3Q ztdMB$N?-l#OKqmliZf$YinYB`Qb_cqm>8~#05ubH&)h2?R^o-jMOQl#RS?xeKfD0i zrNfJfIfog5Ehi;lFYR4+3y&3XE8W$vpnt`k?%+-a-6;X=WocHp`O7yb7JeGiy4mLaa)0B-S^{N>-`TuuU-+s zl{#c8O;V_8-2Al@OuXBSjTl$Lkei5UIP!<+!QrHojzryv_(I@^p2PB`k(Gyt>$gDF zaJhcp^LKV*uDMR^aBVmVY>ZC4i+1ELi@ENM$6o02!X);Qn93=+2y{k~$jG7D8V0U^ zD9);uMVPbi5n`U-L>lW?puBlP<&~2Kjh9apG}a}C#q74cAf!2)5vYav60ayt6;)RZ zWIj_dMQ&J9mgUR%(d9*1dsb0_k(ZQ;fSI!Pbo%nS+;QCN)O!c50X^pi)*p>ib6im@7_DDC0f>p+H2zBn^zl^8EcN%24iUf(MwMRcI* zV`WwZ&N#;1y^<79a|0f|(wV*JvGTocA;$?ydA=$h;w$h7iF#j&?)*|h%n$_8mlObT zGl=XWnIl$8v3Xcd3PEL^yGtTJAvc_uH{_wS2tK)%W4 z&$w)J`2or%wS~6tK48D1LqDP6OnXq=9x?26e+OK&f?-i#l9!c51jG_Tf&S9ZG6JQd zSlY6zJku2Ae+pVU{oCnF=ffl8cdB)xNTKH8R(=RIZZApjH6>Aw6k5GW&767)rVgl5PJ+H(JI( z-H`E2p|16rq3GXg>hwqFg?kQ%bDunv!G)kCvG_NMGOc~&Jn)B;Vdj&m(T*#Fwe?Um z_?e&8s>*+`SNi><+B+=3L`YJVQz_3FaAD{?u;G2;TQ2%LsKv*craY&+uAQQ-8X_Yz z>e*R?2FjIemxg^BhBQ!TaYB>F&C07l+`${sE;K8$am{#rDeWit()}~v-sjXb_lFv` zqm_rM&`YT38Dz1-7ou1xVpWJ$vC>5K)i*`Sc=9o}ioQ4vX3=^V*foM4mlTVZ0wuE_ z)BLmoSqRua@QRjk5gxxRSC`psqWmoeZipzYg&}*BmY4EnZF>@?9eTjc<2n0+s1~== z|A*C3;JqMXqP9~QFp`^5so>X6(4Z_wwQOx?1udZvX%Q literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/models/__pycache__/w2l_conv_glu_enc.cpython-39.pyc b/triton_model/1/aqc/examples/speech_recognition/models/__pycache__/w2l_conv_glu_enc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a2a6df0ae9e06df308c8bcad2721b7ce8c4a7c2 GIT binary patch literal 5260 zcmZ`-&2!tv6~`Ar@I$gJJC2>SHJT<9GnOcRi%iorv7KbvCYm%&(hL(11|}d15(v@@ zP_|icrc=4olMn6m*qKVV=Fn@7J@?SRV6Q!N{4aDmaer?CP+tz{UGKhq`*z>=TiHuX zMG4>6cmL%6<3&mOH%(4{IZWO`3;&GHksR5V`pN%tUlwy^r1n)gfoUUEkz{F<>+1rd z`1z61H%5hiVN~oFN2PvAz*WCITIw%hto4`OqLVw2Kad>V$v>8yyjwU>59Iy@Kn$k< zNWnD@)c%TFa&k`bKygY=@v#Jqi;ny&sZxH5n@L+0jW54%dn|PCzZVR~j?4I^)ANr2 z^!YM#hh7-D%o>UL%ctj-%?3MOuvX13UA)Z4vA zZE;?wMSHv1a<|s5*P5-)@wuBd5U%%9gl4T)Z*^*$wdQ7az~&|p8gyO!t2P_;db`u; zwHv*~6}oMl(Wqw_t!AgW*=V+Vodrg(Ni=|!(CBP-di8Fr(dsVPuh$y5WCIdQP%C5|hOc>iPP;}_GLg3HZOEt#$sAv{BRI~a+HTjowNA6vt+$$))aqSQSR<2b8`o?$ zHe0>TI-%U{U$sMKJ!tsWU>xjQLw{hJ?yjPp)WV3D5``oz8Y?v6=rqvh!t@0b+k~hCeN-vpYv*V{ecYR?4z-cb{j2x z6J0FrBFIG$I|=-WCMS@bfaoV6`ADJglt7#!34M0+UB@xSRoB78z}xl)rp<=aksCyz z8I8?o$2BK5Vic>+uCA|p!DJe(Z@YH14hgRhcI+TPh?2Kvl~lB*KrtN;Bti(n1YQbD zu`4RqqDK>#=SOa2@7s)PJFY*OEfM*QWH>W}>F6HzjOxq0H2Z=dB zS?0lyeQ@@`}~-hlLMvq$WEpvk_Ej%)6|?N@SK9rz*F zfQUY_!#!~-~z#3?T8##91(ABin~XIu@eWy^xpg*%5-$ zHY$v$l4Ddg@nQf>*fPAB7yR)sMb$$Q$hhu{t*i{(V?$&&F#QZIBukOx6OH@}0o z=Us)Oa*@JuV2@nO;zf%jH}z>;wygV8+fPA6XFHL#TBX5j;_=BT?FdY9**tb>uyjy%MR8av=Xs zjakq3> zj+PFW9qmXYNMs8A@B(mG4ll;V1## zB+Ya8?>Ks#yNp1wt>7N-%;}CV(D_Hocb=R-cXCGUUm|fAww_XIo824-{v)$~I%Sw% zXbz@~AuC}K*?lr*k?SCI=F{l=lw^x}DK#vzr>B%(nV?!7BhURvME4VcoXC18a7Gi@ z+dbbkA9&G@`N4;uRjvrfO3bm41Sn)QUi{qlr|x@ z@+IF3T$^Q6BTWq<*KY%>?r{Bq>kW4zu2Dh3wb9hK(A&`tDwX>z;<`PVxPilS)5t?m zDkbQ`$r*VeTcWOtj_V(avnnOw;_PKwSj09F!un+hZxKv+`D8NVr4z}FwTWOpy)DlP zZq7ynDqp@r3+Z&kJVi&$GnArSL|o966?s*@jGrzqKhqEq4Y>$dS-@s5or@aBRZix5 zM3p>-Jm?o_;Z>+GmJg*P=_`c5d2X&_CJN0X-ug!&Z9I{pD6P@){Nt|`)hIjG&QWFN;&Z zH%v;jl~Y@gABWKn_N2&E12J#t1-5UdYvegeVKqJtNza2DZs zPoLpK`CUp&ILbZzayChQ*BT+$RxZNwk-c~!0{L(<4qaY^OGVyrI-Z6+2k6jcKcrKB zf{x4Ya}_TX_EUnqO^{qP##0!sBT%`v@3{}S?mnE@a3m!fbKPw6DG9U$N0@~2$3xcm?alL|u1a~`sf>Cj(Nccw7NZ;u&vI={hM_y#&jUzS&t zRe2e|6*vO@j~^hbN?9}I)o02xT}M&(?d;ll=SZoYD%r?SsA#y27OtVAg6*0FOH*Ry zNTJzV1XEAJRD!`?w5{awLS4iG)K&HZ$9-twiG()~iV7+f1Qsu#ptHsdf15bV=q2U~ zc=kdh&Hj~cw2DrmD(IPkT&q0O6zy9>oxOIRx#v(g_tc^2Ef^(^#a~a9S@k3Pp*Nb2 zQj1JfcAOQ=dyhn6pITY9qWp)h^xMbPciA2Wf|H7zaCt_3OC$TC1=|yExbSblP9AB> zbE@j (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat) + x = x.transpose(1, 2).transpose(0, 1) + x = x.contiguous().view(output_seq_len, bsz, -1) + + input_lengths = src_lengths.clone() + for s in self.pooling_kernel_sizes: + input_lengths = (input_lengths.float() / s).ceil().long() + + encoder_padding_mask, _ = lengths_to_encoder_padding_mask( + input_lengths, batch_first=True + ) + if not encoder_padding_mask.any(): + encoder_padding_mask = None + + subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5) + attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor) + + transformer_layer_idx = 0 + + for layer_idx in range(len(self.transformer_layers)): + + if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer): + x = self.transformer_layers[layer_idx]( + x, encoder_padding_mask, attn_mask + ) + + if self.transformer_sampling[transformer_layer_idx] != 1: + sampling_factor = self.transformer_sampling[transformer_layer_idx] + x, encoder_padding_mask, attn_mask = self.slice( + x, encoder_padding_mask, attn_mask, sampling_factor + ) + + transformer_layer_idx += 1 + + else: + x = self.transformer_layers[layer_idx](x) + + # encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate + # whether encoder_output[t, b] is valid or not (valid=0, invalid=1) + + return { + "encoder_out": x, # (T, B, C) + "encoder_padding_mask": encoder_padding_mask.t() + if encoder_padding_mask is not None + else None, + # (B, T) --> (T, B) + } + + def infer_conv_output_dim(self, in_channels, input_dim): + sample_seq_len = 200 + sample_bsz = 10 + x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim) + for i, _ in enumerate(self.conv_layers): + x = self.conv_layers[i](x) + x = x.transpose(1, 2) + mb, seq = x.size()[:2] + return x.contiguous().view(mb, seq, -1).size(-1) + + def validate_transformer_config(self, transformer_config): + for config in transformer_config: + input_dim, num_heads = config[:2] + if input_dim % num_heads != 0: + msg = ( + "ERROR in transformer config {}: ".format(config) + + "input dimension {} ".format(input_dim) + + "not dividable by number of heads {}".format(num_heads) + ) + raise ValueError(msg) + + def parse_transformer_context(self, transformer_context): + """ + transformer_context can be the following: + - None; indicates no context is used, i.e., + transformer can access full context + - a tuple/list of two int; indicates left and right context, + any number <0 indicates infinite context + * e.g., (5, 6) indicates that for query at x_t, transformer can + access [t-5, t+6] (inclusive) + * e.g., (-1, 6) indicates that for query at x_t, transformer can + access [0, t+6] (inclusive) + """ + if transformer_context is None: + return None + + if not isinstance(transformer_context, Iterable): + raise ValueError("transformer context must be Iterable if it is not None") + + if len(transformer_context) != 2: + raise ValueError("transformer context must have length 2") + + left_context = transformer_context[0] + if left_context < 0: + left_context = None + + right_context = transformer_context[1] + if right_context < 0: + right_context = None + + if left_context is None and right_context is None: + return None + + return (left_context, right_context) + + def parse_transformer_sampling(self, transformer_sampling, num_layers): + """ + parsing transformer sampling configuration + + Args: + - transformer_sampling, accepted input: + * None, indicating no sampling + * an Iterable with int (>0) as element + - num_layers, expected number of transformer layers, must match with + the length of transformer_sampling if it is not None + + Returns: + - A tuple with length num_layers + """ + if transformer_sampling is None: + return (1,) * num_layers + + if not isinstance(transformer_sampling, Iterable): + raise ValueError( + "transformer_sampling must be an iterable if it is not None" + ) + + if len(transformer_sampling) != num_layers: + raise ValueError( + "transformer_sampling {} does not match with the number " + "of layers {}".format(transformer_sampling, num_layers) + ) + + for layer, value in enumerate(transformer_sampling): + if not isinstance(value, int): + raise ValueError("Invalid value in transformer_sampling: ") + if value < 1: + raise ValueError( + "{} layer's subsampling is {}.".format(layer, value) + + " This is not allowed! " + ) + return transformer_sampling + + def slice(self, embedding, padding_mask, attn_mask, sampling_factor): + """ + embedding is a (T, B, D) tensor + padding_mask is a (B, T) tensor or None + attn_mask is a (T, T) tensor or None + """ + embedding = embedding[::sampling_factor, :, :] + if padding_mask is not None: + padding_mask = padding_mask[:, ::sampling_factor] + if attn_mask is not None: + attn_mask = attn_mask[::sampling_factor, ::sampling_factor] + + return embedding, padding_mask, attn_mask + + def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1): + """ + create attention mask according to sequence lengths and transformer + context + + Args: + - input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is + the length of b-th sequence + - subsampling_factor: int + * Note that the left_context and right_context is specified in + the input frame-level while input to transformer may already + go through subsampling (e.g., the use of striding in vggblock) + we use subsampling_factor to scale the left/right context + + Return: + - a (T, T) binary tensor or None, where T is max(input_lengths) + * if self.transformer_context is None, None + * if left_context is None, + * attn_mask[t, t + right_context + 1:] = 1 + * others = 0 + * if right_context is None, + * attn_mask[t, 0:t - left_context] = 1 + * others = 0 + * elsif + * attn_mask[t, t - left_context: t + right_context + 1] = 0 + * others = 1 + """ + if self.transformer_context is None: + return None + + maxT = torch.max(input_lengths).item() + attn_mask = torch.zeros(maxT, maxT) + + left_context = self.transformer_context[0] + right_context = self.transformer_context[1] + if left_context is not None: + left_context = math.ceil(self.transformer_context[0] / subsampling_factor) + if right_context is not None: + right_context = math.ceil(self.transformer_context[1] / subsampling_factor) + + for t in range(maxT): + if left_context is not None: + st = 0 + en = max(st, t - left_context) + attn_mask[t, st:en] = 1 + if right_context is not None: + st = t + right_context + 1 + st = min(st, maxT - 1) + attn_mask[t, st:] = 1 + + return attn_mask.to(input_lengths.device) + + def reorder_encoder_out(self, encoder_out, new_order): + encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( + 1, new_order + ) + if encoder_out["encoder_padding_mask"] is not None: + encoder_out["encoder_padding_mask"] = encoder_out[ + "encoder_padding_mask" + ].index_select(1, new_order) + return encoder_out + + +class TransformerDecoder(FairseqIncrementalDecoder): + """ + Transformer decoder consisting of *args.decoder_layers* layers. Each layer + is a :class:`TransformerDecoderLayer`. + Args: + args (argparse.Namespace): parsed command-line arguments + dictionary (~fairseq.data.Dictionary): decoding dictionary + embed_tokens (torch.nn.Embedding): output embedding + no_encoder_attn (bool, optional): whether to attend to encoder outputs. + Default: ``False`` + left_pad (bool, optional): whether the input is left-padded. Default: + ``False`` + """ + + def __init__( + self, + dictionary, + embed_dim=512, + transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, + conv_config=DEFAULT_DEC_CONV_CONFIG, + encoder_output_dim=512, + ): + + super().__init__(dictionary) + vocab_size = len(dictionary) + self.padding_idx = dictionary.pad() + self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx) + + self.conv_layers = nn.ModuleList() + for i in range(len(conv_config)): + out_channels, kernel_size, layer_norm = conv_config[i] + if i == 0: + conv_layer = LinearizedConv1d( + embed_dim, out_channels, kernel_size, padding=kernel_size - 1 + ) + else: + conv_layer = LinearizedConv1d( + conv_config[i - 1][0], + out_channels, + kernel_size, + padding=kernel_size - 1, + ) + self.conv_layers.append(conv_layer) + if layer_norm: + self.conv_layers.append(nn.LayerNorm(out_channels)) + self.conv_layers.append(nn.ReLU()) + + self.layers = nn.ModuleList() + if conv_config[-1][0] != transformer_config[0][0]: + self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0])) + self.layers.append( + TransformerDecoderLayer( + prepare_transformer_decoder_params(*transformer_config[0]) + ) + ) + + for i in range(1, len(transformer_config)): + if transformer_config[i - 1][0] != transformer_config[i][0]: + self.layers.append( + Linear(transformer_config[i - 1][0], transformer_config[i][0]) + ) + self.layers.append( + TransformerDecoderLayer( + prepare_transformer_decoder_params(*transformer_config[i]) + ) + ) + self.fc_out = Linear(transformer_config[-1][0], vocab_size) + + def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): + """ + Args: + prev_output_tokens (LongTensor): previous decoder outputs of shape + `(batch, tgt_len)`, for input feeding/teacher forcing + encoder_out (Tensor, optional): output from the encoder, used for + encoder-side attention + incremental_state (dict): dictionary used for storing state during + :ref:`Incremental decoding` + Returns: + tuple: + - the last decoder layer's output of shape `(batch, tgt_len, + vocab)` + - the last decoder layer's attention weights of shape `(batch, + tgt_len, src_len)` + """ + target_padding_mask = ( + (prev_output_tokens == self.padding_idx).to(prev_output_tokens.device) + if incremental_state is None + else None + ) + + if incremental_state is not None: + prev_output_tokens = prev_output_tokens[:, -1:] + + # embed tokens + x = self.embed_tokens(prev_output_tokens) + + # B x T x C -> T x B x C + x = self._transpose_if_training(x, incremental_state) + + for layer in self.conv_layers: + if isinstance(layer, LinearizedConvolution): + x = layer(x, incremental_state) + else: + x = layer(x) + + # B x T x C -> T x B x C + x = self._transpose_if_inference(x, incremental_state) + + # decoder layers + for layer in self.layers: + if isinstance(layer, TransformerDecoderLayer): + x, *_ = layer( + x, + (encoder_out["encoder_out"] if encoder_out is not None else None), + ( + encoder_out["encoder_padding_mask"].t() + if encoder_out["encoder_padding_mask"] is not None + else None + ), + incremental_state, + self_attn_mask=( + self.buffered_future_mask(x) + if incremental_state is None + else None + ), + self_attn_padding_mask=( + target_padding_mask if incremental_state is None else None + ), + ) + else: + x = layer(x) + + # T x B x C -> B x T x C + x = x.transpose(0, 1) + + x = self.fc_out(x) + + return x, None + + def buffered_future_mask(self, tensor): + dim = tensor.size(0) + if ( + not hasattr(self, "_future_mask") + or self._future_mask is None + or self._future_mask.device != tensor.device + ): + self._future_mask = torch.triu( + utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 + ) + if self._future_mask.size(0) < dim: + self._future_mask = torch.triu( + utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 + ) + return self._future_mask[:dim, :dim] + + def _transpose_if_training(self, x, incremental_state): + if incremental_state is None: + x = x.transpose(0, 1) + return x + + def _transpose_if_inference(self, x, incremental_state): + if incremental_state: + x = x.transpose(0, 1) + return x + + +@register_model("asr_vggtransformer_encoder") +class VGGTransformerEncoderModel(FairseqEncoderModel): + def __init__(self, encoder): + super().__init__(encoder) + + @staticmethod + def add_args(parser): + """Add model-specific arguments to the parser.""" + parser.add_argument( + "--input-feat-per-channel", + type=int, + metavar="N", + help="encoder input dimension per input channel", + ) + parser.add_argument( + "--vggblock-enc-config", + type=str, + metavar="EXPR", + help=""" + an array of tuples each containing the configuration of one vggblock + [(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...] + """, + ) + parser.add_argument( + "--transformer-enc-config", + type=str, + metavar="EXPR", + help=""" + a tuple containing the configuration of the Transformer layers + configurations: + [(input_dim, + num_heads, + ffn_dim, + normalize_before, + dropout, + attention_dropout, + relu_dropout), ]""", + ) + parser.add_argument( + "--enc-output-dim", + type=int, + metavar="N", + help="encoder output dimension, projecting the LSTM output", + ) + parser.add_argument( + "--in-channels", + type=int, + metavar="N", + help="number of encoder input channels", + ) + parser.add_argument( + "--transformer-context", + type=str, + metavar="EXPR", + help=""" + either None or a tuple of two ints, indicating left/right context a + transformer can have access to""", + ) + parser.add_argument( + "--transformer-sampling", + type=str, + metavar="EXPR", + help=""" + either None or a tuple of ints, indicating sampling factor in each layer""", + ) + + @classmethod + def build_model(cls, args, task): + """Build a new model instance.""" + base_architecture_enconly(args) + encoder = VGGTransformerEncoderOnly( + vocab_size=len(task.target_dictionary), + input_feat_per_channel=args.input_feat_per_channel, + vggblock_config=eval(args.vggblock_enc_config), + transformer_config=eval(args.transformer_enc_config), + encoder_output_dim=args.enc_output_dim, + in_channels=args.in_channels, + transformer_context=eval(args.transformer_context), + transformer_sampling=eval(args.transformer_sampling), + ) + return cls(encoder) + + def get_normalized_probs(self, net_output, log_probs, sample=None): + # net_output['encoder_out'] is a (T, B, D) tensor + lprobs = super().get_normalized_probs(net_output, log_probs, sample) + # lprobs is a (T, B, D) tensor + # we need to transoose to get (B, T, D) tensor + lprobs = lprobs.transpose(0, 1).contiguous() + lprobs.batch_first = True + return lprobs + + +class VGGTransformerEncoderOnly(VGGTransformerEncoder): + def __init__( + self, + vocab_size, + input_feat_per_channel, + vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG, + transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, + encoder_output_dim=512, + in_channels=1, + transformer_context=None, + transformer_sampling=None, + ): + super().__init__( + input_feat_per_channel=input_feat_per_channel, + vggblock_config=vggblock_config, + transformer_config=transformer_config, + encoder_output_dim=encoder_output_dim, + in_channels=in_channels, + transformer_context=transformer_context, + transformer_sampling=transformer_sampling, + ) + self.fc_out = Linear(self.encoder_output_dim, vocab_size) + + def forward(self, src_tokens, src_lengths, **kwargs): + """ + src_tokens: padded tensor (B, T, C * feat) + src_lengths: tensor of original lengths of input utterances (B,) + """ + + enc_out = super().forward(src_tokens, src_lengths) + x = self.fc_out(enc_out["encoder_out"]) + # x = F.log_softmax(x, dim=-1) + # Note: no need this line, because model.get_normalized_prob will call + # log_softmax + return { + "encoder_out": x, # (T, B, C) + "encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B) + } + + def max_positions(self): + """Maximum input length supported by the encoder.""" + return (1e6, 1e6) # an arbitrary large number + + +def Embedding(num_embeddings, embedding_dim, padding_idx): + m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) + # nn.init.uniform_(m.weight, -0.1, 0.1) + # nn.init.constant_(m.weight[padding_idx], 0) + return m + + +def Linear(in_features, out_features, bias=True, dropout=0): + """Linear layer (input: N x T x C)""" + m = nn.Linear(in_features, out_features, bias=bias) + # m.weight.data.uniform_(-0.1, 0.1) + # if bias: + # m.bias.data.uniform_(-0.1, 0.1) + return m + + +def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs): + """Weight-normalized Conv1d layer optimized for decoding""" + m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) + std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) + nn.init.normal_(m.weight, mean=0, std=std) + nn.init.constant_(m.bias, 0) + return nn.utils.weight_norm(m, dim=2) + + +def LayerNorm(embedding_dim): + m = nn.LayerNorm(embedding_dim) + return m + + +# seq2seq models +def base_architecture(args): + args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) + args.vggblock_enc_config = getattr( + args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG + ) + args.transformer_enc_config = getattr( + args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG + ) + args.enc_output_dim = getattr(args, "enc_output_dim", 512) + args.in_channels = getattr(args, "in_channels", 1) + args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) + args.transformer_dec_config = getattr( + args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG + ) + args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG) + args.transformer_context = getattr(args, "transformer_context", "None") + + +@register_model_architecture("asr_vggtransformer", "vggtransformer_1") +def vggtransformer_1(args): + args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) + args.vggblock_enc_config = getattr( + args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" + ) + args.transformer_enc_config = getattr( + args, + "transformer_enc_config", + "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14", + ) + args.enc_output_dim = getattr(args, "enc_output_dim", 1024) + args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) + args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") + args.transformer_dec_config = getattr( + args, + "transformer_dec_config", + "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4", + ) + + +@register_model_architecture("asr_vggtransformer", "vggtransformer_2") +def vggtransformer_2(args): + args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) + args.vggblock_enc_config = getattr( + args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" + ) + args.transformer_enc_config = getattr( + args, + "transformer_enc_config", + "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", + ) + args.enc_output_dim = getattr(args, "enc_output_dim", 1024) + args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) + args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") + args.transformer_dec_config = getattr( + args, + "transformer_dec_config", + "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6", + ) + + +@register_model_architecture("asr_vggtransformer", "vggtransformer_base") +def vggtransformer_base(args): + args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) + args.vggblock_enc_config = getattr( + args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" + ) + args.transformer_enc_config = getattr( + args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12" + ) + + args.enc_output_dim = getattr(args, "enc_output_dim", 512) + args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) + args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") + args.transformer_dec_config = getattr( + args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6" + ) + # Size estimations: + # Encoder: + # - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K + # Transformer: + # - input dimension adapter: 2560 x 512 -> 1.31M + # - transformer_layers (x12) --> 37.74M + # * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M + # * FFN weight: 512*2048*2 = 2.097M + # - output dimension adapter: 512 x 512 -> 0.26 M + # Decoder: + # - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3 + # - transformer_layer: (x6) --> 25.16M + # * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M + # * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M + # * FFN: 512*2048*2 = 2.097M + # Final FC: + # - FC: 512*5000 = 256K (assuming vocab size 5K) + # In total: + # ~65 M + + +# CTC models +def base_architecture_enconly(args): + args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) + args.vggblock_enc_config = getattr( + args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2" + ) + args.transformer_enc_config = getattr( + args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2" + ) + args.enc_output_dim = getattr(args, "enc_output_dim", 512) + args.in_channels = getattr(args, "in_channels", 1) + args.transformer_context = getattr(args, "transformer_context", "None") + args.transformer_sampling = getattr(args, "transformer_sampling", "None") + + +@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1") +def vggtransformer_enc_1(args): + # vggtransformer_1 is the same as vggtransformer_enc_big, except the number + # of layers is increased to 16 + # keep it here for backward compatiablity purpose + args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) + args.vggblock_enc_config = getattr( + args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" + ) + args.transformer_enc_config = getattr( + args, + "transformer_enc_config", + "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", + ) + args.enc_output_dim = getattr(args, "enc_output_dim", 1024) diff --git a/triton_model/1/aqc/examples/speech_recognition/models/w2l_conv_glu_enc.py b/triton_model/1/aqc/examples/speech_recognition/models/w2l_conv_glu_enc.py new file mode 100644 index 0000000..655a9b0 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/models/w2l_conv_glu_enc.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from fairseq.models import ( + FairseqEncoder, + FairseqEncoderModel, + register_model, + register_model_architecture, +) +from fairseq.modules.fairseq_dropout import FairseqDropout + + +default_conv_enc_config = """[ + (400, 13, 170, 0.2), + (440, 14, 0, 0.214), + (484, 15, 0, 0.22898), + (532, 16, 0, 0.2450086), + (584, 17, 0, 0.262159202), + (642, 18, 0, 0.28051034614), + (706, 19, 0, 0.30014607037), + (776, 20, 0, 0.321156295296), + (852, 21, 0, 0.343637235966), + (936, 22, 0, 0.367691842484), + (1028, 23, 0, 0.393430271458), + (1130, 24, 0, 0.42097039046), + (1242, 25, 0, 0.450438317792), + (1366, 26, 0, 0.481969000038), + (1502, 27, 0, 0.51570683004), + (1652, 28, 0, 0.551806308143), + (1816, 29, 0, 0.590432749713), +]""" + + +@register_model("asr_w2l_conv_glu_encoder") +class W2lConvGluEncoderModel(FairseqEncoderModel): + def __init__(self, encoder): + super().__init__(encoder) + + @staticmethod + def add_args(parser): + """Add model-specific arguments to the parser.""" + parser.add_argument( + "--input-feat-per-channel", + type=int, + metavar="N", + help="encoder input dimension per input channel", + ) + parser.add_argument( + "--in-channels", + type=int, + metavar="N", + help="number of encoder input channels", + ) + parser.add_argument( + "--conv-enc-config", + type=str, + metavar="EXPR", + help=""" + an array of tuples each containing the configuration of one conv layer + [(out_channels, kernel_size, padding, dropout), ...] + """, + ) + + @classmethod + def build_model(cls, args, task): + """Build a new model instance.""" + conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config) + encoder = W2lConvGluEncoder( + vocab_size=len(task.target_dictionary), + input_feat_per_channel=args.input_feat_per_channel, + in_channels=args.in_channels, + conv_enc_config=eval(conv_enc_config), + ) + return cls(encoder) + + def get_normalized_probs(self, net_output, log_probs, sample=None): + lprobs = super().get_normalized_probs(net_output, log_probs, sample) + lprobs.batch_first = False + return lprobs + + +class W2lConvGluEncoder(FairseqEncoder): + def __init__( + self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config + ): + super().__init__(None) + + self.input_dim = input_feat_per_channel + if in_channels != 1: + raise ValueError("only 1 input channel is currently supported") + + self.conv_layers = nn.ModuleList() + self.linear_layers = nn.ModuleList() + self.dropouts = [] + cur_channels = input_feat_per_channel + + for out_channels, kernel_size, padding, dropout in conv_enc_config: + layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding) + layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init + self.conv_layers.append(nn.utils.weight_norm(layer)) + self.dropouts.append( + FairseqDropout(dropout, module_name=self.__class__.__name__) + ) + if out_channels % 2 != 0: + raise ValueError("odd # of out_channels is incompatible with GLU") + cur_channels = out_channels // 2 # halved by GLU + + for out_channels in [2 * cur_channels, vocab_size]: + layer = nn.Linear(cur_channels, out_channels) + layer.weight.data.mul_(math.sqrt(3)) + self.linear_layers.append(nn.utils.weight_norm(layer)) + cur_channels = out_channels // 2 + + def forward(self, src_tokens, src_lengths, **kwargs): + + """ + src_tokens: padded tensor (B, T, C * feat) + src_lengths: tensor of original lengths of input utterances (B,) + """ + B, T, _ = src_tokens.size() + x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1 + + for layer_idx in range(len(self.conv_layers)): + x = self.conv_layers[layer_idx](x) + x = F.glu(x, dim=1) + x = self.dropouts[layer_idx](x) + + x = x.transpose(1, 2).contiguous() # (B, T, 908) + x = self.linear_layers[0](x) + x = F.glu(x, dim=2) + x = self.dropouts[-1](x) + x = self.linear_layers[1](x) + + assert x.size(0) == B + assert x.size(1) == T + + encoder_out = x.transpose(0, 1) # (T, B, vocab_size) + + # need to debug this -- find a simpler/elegant way in pytorch APIs + encoder_padding_mask = ( + torch.arange(T).view(1, T).expand(B, -1).to(x.device) + >= src_lengths.view(B, 1).expand(-1, T) + ).t() # (B x T) -> (T x B) + + return { + "encoder_out": encoder_out, # (T, B, vocab_size) + "encoder_padding_mask": encoder_padding_mask, # (T, B) + } + + def reorder_encoder_out(self, encoder_out, new_order): + encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( + 1, new_order + ) + encoder_out["encoder_padding_mask"] = encoder_out[ + "encoder_padding_mask" + ].index_select(1, new_order) + return encoder_out + + def max_positions(self): + """Maximum input length supported by the encoder.""" + return (1e6, 1e6) # an arbitrary large number + + +@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc") +def w2l_conv_glu_enc(args): + args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) + args.in_channels = getattr(args, "in_channels", 1) + args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config) diff --git a/triton_model/1/aqc/examples/speech_recognition/new/README.md b/triton_model/1/aqc/examples/speech_recognition/new/README.md new file mode 100644 index 0000000..5fa0e97 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/README.md @@ -0,0 +1,43 @@ +# Flashlight Decoder + +This script runs decoding for pre-trained speech recognition models. + +## Usage + +Assuming a few variables: + +```bash +checkpoint= +data= +lm_model= +lexicon= +``` + +Example usage for decoding a fine-tuned Wav2Vec model: + +```bash +python $FAIRSEQ_ROOT/examples/speech_recognition/new/infer.py --multirun \ + task=audio_pretraining \ + task.data=$data \ + task.labels=ltr \ + common_eval.path=$checkpoint \ + decoding.type=kenlm \ + decoding.lexicon=$lexicon \ + decoding.lmpath=$lm_model \ + dataset.gen_subset=dev_clean,dev_other,test_clean,test_other +``` + +Example usage for using Ax to sweep WER parameters (requires `pip install hydra-ax-sweeper`): + +```bash +python $FAIRSEQ_ROOT/examples/speech_recognition/new/infer.py --multirun \ + hydra/sweeper=ax \ + task=audio_pretraining \ + task.data=$data \ + task.labels=ltr \ + common_eval.path=$checkpoint \ + decoding.type=kenlm \ + decoding.lexicon=$lexicon \ + decoding.lmpath=$lm_model \ + dataset.gen_subset=dev_other +``` diff --git a/triton_model/1/aqc/examples/speech_recognition/new/__init__.py b/triton_model/1/aqc/examples/speech_recognition/new/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/triton_model/1/aqc/examples/speech_recognition/new/conf/hydra/sweeper/ax.yaml b/triton_model/1/aqc/examples/speech_recognition/new/conf/hydra/sweeper/ax.yaml new file mode 100644 index 0000000..38e9c22 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/conf/hydra/sweeper/ax.yaml @@ -0,0 +1,29 @@ +# @package hydra.sweeper +_target_: hydra_plugins.hydra_ax_sweeper.ax_sweeper.AxSweeper +max_batch_size: null +ax_config: + max_trials: 128 + early_stop: + minimize: true + max_epochs_without_improvement: 10 + epsilon: 0.025 + experiment: + name: ${dataset.gen_subset} + objective_name: wer + minimize: true + parameter_constraints: null + outcome_constraints: null + status_quo: null + client: + verbose_logging: false + random_seed: null + params: + decoding.lmweight: + type: range + bounds: [0.0, 5.0] + decoding.wordscore: + type: range + bounds: [-5.0, 5.0] + decoding.silweight: + type: range + bounds: [ -8.0, 0.0 ] diff --git a/triton_model/1/aqc/examples/speech_recognition/new/conf/infer.yaml b/triton_model/1/aqc/examples/speech_recognition/new/conf/infer.yaml new file mode 100644 index 0000000..21dd19f --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/conf/infer.yaml @@ -0,0 +1,25 @@ +# @package _group_ + +defaults: + - task: null + - model: null + +hydra: + run: + dir: ${common_eval.results_path}/${dataset.gen_subset} + sweep: + dir: /checkpoint/${env:USER}/${env:PREFIX}/${common_eval.results_path} + subdir: ${dataset.gen_subset} +common_eval: + results_path: null + path: null + post_process: letter + quiet: true +dataset: + max_tokens: 3000000 + gen_subset: test +distributed_training: + distributed_world_size: 1 +decoding: + beam: 5 + type: viterbi diff --git a/triton_model/1/aqc/examples/speech_recognition/new/decoders/__init__.py b/triton_model/1/aqc/examples/speech_recognition/new/decoders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/triton_model/1/aqc/examples/speech_recognition/new/decoders/base_decoder.py b/triton_model/1/aqc/examples/speech_recognition/new/decoders/base_decoder.py new file mode 100644 index 0000000..a097969 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/decoders/base_decoder.py @@ -0,0 +1,62 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import itertools as it +from typing import Any, Dict, List + +import torch +from fairseq.data.dictionary import Dictionary +from fairseq.models.fairseq_model import FairseqModel + + +class BaseDecoder: + def __init__(self, tgt_dict: Dictionary) -> None: + self.tgt_dict = tgt_dict + self.vocab_size = len(tgt_dict) + + self.blank = ( + tgt_dict.index("") + if "" in tgt_dict.indices + else tgt_dict.bos() + ) + if "" in tgt_dict.indices: + self.silence = tgt_dict.index("") + elif "|" in tgt_dict.indices: + self.silence = tgt_dict.index("|") + else: + self.silence = tgt_dict.eos() + + def generate( + self, models: List[FairseqModel], sample: Dict[str, Any], **unused + ) -> List[List[Dict[str, torch.LongTensor]]]: + encoder_input = { + k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" + } + emissions = self.get_emissions(models, encoder_input) + return self.decode(emissions) + + def get_emissions( + self, + models: List[FairseqModel], + encoder_input: Dict[str, Any], + ) -> torch.FloatTensor: + model = models[0] + encoder_out = model(**encoder_input) + if hasattr(model, "get_logits"): + emissions = model.get_logits(encoder_out) + else: + emissions = model.get_normalized_probs(encoder_out, log_probs=True) + return emissions.transpose(0, 1).float().cpu().contiguous() + + def get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor: + idxs = (g[0] for g in it.groupby(idxs)) + idxs = filter(lambda x: x != self.blank, idxs) + return torch.LongTensor(list(idxs)) + + def decode( + self, + emissions: torch.FloatTensor, + ) -> List[List[Dict[str, torch.LongTensor]]]: + raise NotImplementedError diff --git a/triton_model/1/aqc/examples/speech_recognition/new/decoders/decoder.py b/triton_model/1/aqc/examples/speech_recognition/new/decoders/decoder.py new file mode 100644 index 0000000..b5bec8c --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/decoders/decoder.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Union + +from fairseq.data.dictionary import Dictionary + +from .decoder_config import DecoderConfig, FlashlightDecoderConfig +from .base_decoder import BaseDecoder + + +def Decoder( + cfg: Union[DecoderConfig, FlashlightDecoderConfig], tgt_dict: Dictionary +) -> BaseDecoder: + + if cfg.type == "viterbi": + from .viterbi_decoder import ViterbiDecoder + + return ViterbiDecoder(tgt_dict) + if cfg.type == "kenlm": + from .flashlight_decoder import KenLMDecoder + + return KenLMDecoder(cfg, tgt_dict) + if cfg.type == "fairseqlm": + from .flashlight_decoder import FairseqLMDecoder + + return FairseqLMDecoder(cfg, tgt_dict) + raise NotImplementedError(f"Invalid decoder name: {cfg.name}") diff --git a/triton_model/1/aqc/examples/speech_recognition/new/decoders/decoder_config.py b/triton_model/1/aqc/examples/speech_recognition/new/decoders/decoder_config.py new file mode 100644 index 0000000..659eb94 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/decoders/decoder_config.py @@ -0,0 +1,70 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from dataclasses import dataclass, field +from typing import Optional + +from fairseq.dataclass.configs import FairseqDataclass +from fairseq.dataclass.constants import ChoiceEnum +from omegaconf import MISSING + + +DECODER_CHOICES = ChoiceEnum(["viterbi", "kenlm", "fairseqlm"]) + + +@dataclass +class DecoderConfig(FairseqDataclass): + type: DECODER_CHOICES = field( + default="viterbi", + metadata={"help": "The type of decoder to use"}, + ) + + +@dataclass +class FlashlightDecoderConfig(FairseqDataclass): + nbest: int = field( + default=1, + metadata={"help": "Number of decodings to return"}, + ) + unitlm: bool = field( + default=False, + metadata={"help": "If set, use unit language model"}, + ) + lmpath: str = field( + default=MISSING, + metadata={"help": "Language model for KenLM decoder"}, + ) + lexicon: Optional[str] = field( + default=None, + metadata={"help": "Lexicon for Flashlight decoder"}, + ) + beam: int = field( + default=50, + metadata={"help": "Number of beams to use for decoding"}, + ) + beamthreshold: float = field( + default=50.0, + metadata={"help": "Threshold for beam search decoding"}, + ) + beamsizetoken: Optional[int] = field( + default=None, metadata={"help": "Beam size to use"} + ) + wordscore: float = field( + default=-1, + metadata={"help": "Word score for KenLM decoder"}, + ) + unkweight: float = field( + default=-math.inf, + metadata={"help": "Unknown weight for KenLM decoder"}, + ) + silweight: float = field( + default=0, + metadata={"help": "Silence weight for KenLM decoder"}, + ) + lmweight: float = field( + default=2, + metadata={"help": "Weight for LM while interpolating score"}, + ) diff --git a/triton_model/1/aqc/examples/speech_recognition/new/decoders/flashlight_decoder.py b/triton_model/1/aqc/examples/speech_recognition/new/decoders/flashlight_decoder.py new file mode 100644 index 0000000..38c7ac4 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/decoders/flashlight_decoder.py @@ -0,0 +1,431 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import gc +import os.path as osp +import warnings +from collections import deque, namedtuple +from typing import Any, Dict, Tuple + +import numpy as np +import torch +from fairseq import tasks +from fairseq.data.dictionary import Dictionary +from fairseq.dataclass.utils import convert_namespace_to_omegaconf +from fairseq.models.fairseq_model import FairseqModel +from fairseq.utils import apply_to_sample +from omegaconf import open_dict, OmegaConf + +from typing import List + +from .decoder_config import FlashlightDecoderConfig +from .base_decoder import BaseDecoder + +try: + from flashlight.lib.text.decoder import ( + LM, + CriterionType, + DecodeResult, + KenLM, + LexiconDecoder, + LexiconDecoderOptions, + LexiconFreeDecoder, + LexiconFreeDecoderOptions, + LMState, + SmearingMode, + Trie, + ) + from flashlight.lib.text.dictionary import create_word_dict, load_words +except ImportError: + warnings.warn( + "flashlight python bindings are required to use this functionality. " + "Please install from " + "https://github.com/facebookresearch/flashlight/tree/master/bindings/python" + ) + LM = object + LMState = object + + +class KenLMDecoder(BaseDecoder): + def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: + super().__init__(tgt_dict) + + self.nbest = cfg.nbest + self.unitlm = cfg.unitlm + + if cfg.lexicon: + self.lexicon = load_words(cfg.lexicon) + self.word_dict = create_word_dict(self.lexicon) + self.unk_word = self.word_dict.get_index("") + + self.lm = KenLM(cfg.lmpath, self.word_dict) + self.trie = Trie(self.vocab_size, self.silence) + + start_state = self.lm.start(False) + for word, spellings in self.lexicon.items(): + word_idx = self.word_dict.get_index(word) + _, score = self.lm.score(start_state, word_idx) + for spelling in spellings: + spelling_idxs = [tgt_dict.index(token) for token in spelling] + assert ( + tgt_dict.unk() not in spelling_idxs + ), f"{word} {spelling} {spelling_idxs}" + self.trie.insert(spelling_idxs, word_idx, score) + self.trie.smear(SmearingMode.MAX) + + self.decoder_opts = LexiconDecoderOptions( + beam_size=cfg.beam, + beam_size_token=cfg.beamsizetoken or len(tgt_dict), + beam_threshold=cfg.beamthreshold, + lm_weight=cfg.lmweight, + word_score=cfg.wordscore, + unk_score=cfg.unkweight, + sil_score=cfg.silweight, + log_add=False, + criterion_type=CriterionType.CTC, + ) + + self.decoder = LexiconDecoder( + self.decoder_opts, + self.trie, + self.lm, + self.silence, + self.blank, + self.unk_word, + [], + self.unitlm, + ) + else: + assert self.unitlm, "Lexicon-free decoding requires unit LM" + + d = {w: [[w]] for w in tgt_dict.symbols} + self.word_dict = create_word_dict(d) + self.lm = KenLM(cfg.lmpath, self.word_dict) + self.decoder_opts = LexiconFreeDecoderOptions( + beam_size=cfg.beam, + beam_size_token=cfg.beamsizetoken or len(tgt_dict), + beam_threshold=cfg.beamthreshold, + lm_weight=cfg.lmweight, + sil_score=cfg.silweight, + log_add=False, + criterion_type=CriterionType.CTC, + ) + self.decoder = LexiconFreeDecoder( + self.decoder_opts, self.lm, self.silence, self.blank, [] + ) + + def get_timesteps(self, token_idxs: List[int]) -> List[int]: + """Returns frame numbers corresponding to every non-blank token. + + Parameters + ---------- + token_idxs : List[int] + IDs of decoded tokens. + + Returns + ------- + List[int] + Frame numbers corresponding to every non-blank token. + """ + timesteps = [] + for i, token_idx in enumerate(token_idxs): + if token_idx == self.blank: + continue + if i == 0 or token_idx != token_idxs[i-1]: + timesteps.append(i) + return timesteps + + def decode( + self, + emissions: torch.FloatTensor, + ) -> List[List[Dict[str, torch.LongTensor]]]: + B, T, N = emissions.size() + hypos = [] + for b in range(B): + emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) + results = self.decoder.decode(emissions_ptr, T, N) + + nbest_results = results[: self.nbest] + hypos.append( + [ + { + "tokens": self.get_tokens(result.tokens), + "score": result.score, + "timesteps": self.get_timesteps(result.tokens), + "words": [ + self.word_dict.get_entry(x) for x in result.words if x >= 0 + ], + } + for result in nbest_results + ] + ) + return hypos + + +FairseqLMState = namedtuple( + "FairseqLMState", + [ + "prefix", + "incremental_state", + "probs", + ], +) + + +class FairseqLM(LM): + def __init__(self, dictionary: Dictionary, model: FairseqModel) -> None: + super().__init__() + + self.dictionary = dictionary + self.model = model + self.unk = self.dictionary.unk() + + self.save_incremental = False # this currently does not work properly + self.max_cache = 20_000 + + if torch.cuda.is_available(): + model.cuda() + model.eval() + model.make_generation_fast_() + + self.states = {} + self.stateq = deque() + + def start(self, start_with_nothing: bool) -> LMState: + state = LMState() + prefix = torch.LongTensor([[self.dictionary.eos()]]) + incremental_state = {} if self.save_incremental else None + with torch.no_grad(): + res = self.model(prefix.cuda(), incremental_state=incremental_state) + probs = self.model.get_normalized_probs(res, log_probs=True, sample=None) + + if incremental_state is not None: + incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state) + self.states[state] = FairseqLMState( + prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy() + ) + self.stateq.append(state) + + return state + + def score( + self, + state: LMState, + token_index: int, + no_cache: bool = False, + ) -> Tuple[LMState, int]: + """ + Evaluate language model based on the current lm state and new word + Parameters: + ----------- + state: current lm state + token_index: index of the word + (can be lexicon index then you should store inside LM the + mapping between indices of lexicon and lm, or lm index of a word) + Returns: + -------- + (LMState, float): pair of (new state, score for the current word) + """ + curr_state = self.states[state] + + def trim_cache(targ_size: int) -> None: + while len(self.stateq) > targ_size: + rem_k = self.stateq.popleft() + rem_st = self.states[rem_k] + rem_st = FairseqLMState(rem_st.prefix, None, None) + self.states[rem_k] = rem_st + + if curr_state.probs is None: + new_incremental_state = ( + curr_state.incremental_state.copy() + if curr_state.incremental_state is not None + else None + ) + with torch.no_grad(): + if new_incremental_state is not None: + new_incremental_state = apply_to_sample( + lambda x: x.cuda(), new_incremental_state + ) + elif self.save_incremental: + new_incremental_state = {} + + res = self.model( + torch.from_numpy(curr_state.prefix).cuda(), + incremental_state=new_incremental_state, + ) + probs = self.model.get_normalized_probs( + res, log_probs=True, sample=None + ) + + if new_incremental_state is not None: + new_incremental_state = apply_to_sample( + lambda x: x.cpu(), new_incremental_state + ) + + curr_state = FairseqLMState( + curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy() + ) + + if not no_cache: + self.states[state] = curr_state + self.stateq.append(state) + + score = curr_state.probs[token_index].item() + + trim_cache(self.max_cache) + + outstate = state.child(token_index) + if outstate not in self.states and not no_cache: + prefix = np.concatenate( + [curr_state.prefix, torch.LongTensor([[token_index]])], -1 + ) + incr_state = curr_state.incremental_state + + self.states[outstate] = FairseqLMState(prefix, incr_state, None) + + if token_index == self.unk: + score = float("-inf") + + return outstate, score + + def finish(self, state: LMState) -> Tuple[LMState, int]: + """ + Evaluate eos for language model based on the current lm state + Returns: + -------- + (LMState, float): pair of (new state, score for the current word) + """ + return self.score(state, self.dictionary.eos()) + + def empty_cache(self) -> None: + self.states = {} + self.stateq = deque() + gc.collect() + + +class FairseqLMDecoder(BaseDecoder): + def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: + super().__init__(tgt_dict) + + self.nbest = cfg.nbest + self.unitlm = cfg.unitlm + + self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None + self.idx_to_wrd = {} + + checkpoint = torch.load(cfg.lmpath, map_location="cpu") + + if "cfg" in checkpoint and checkpoint["cfg"] is not None: + lm_args = checkpoint["cfg"] + else: + lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) + + if not OmegaConf.is_dict(lm_args): + lm_args = OmegaConf.create(lm_args) + + with open_dict(lm_args.task): + lm_args.task.data = osp.dirname(cfg.lmpath) + + task = tasks.setup_task(lm_args.task) + model = task.build_model(lm_args.model) + model.load_state_dict(checkpoint["model"], strict=False) + + self.trie = Trie(self.vocab_size, self.silence) + + self.word_dict = task.dictionary + self.unk_word = self.word_dict.unk() + self.lm = FairseqLM(self.word_dict, model) + + if self.lexicon: + start_state = self.lm.start(False) + for i, (word, spellings) in enumerate(self.lexicon.items()): + if self.unitlm: + word_idx = i + self.idx_to_wrd[i] = word + score = 0 + else: + word_idx = self.word_dict.index(word) + _, score = self.lm.score(start_state, word_idx, no_cache=True) + + for spelling in spellings: + spelling_idxs = [tgt_dict.index(token) for token in spelling] + assert ( + tgt_dict.unk() not in spelling_idxs + ), f"{spelling} {spelling_idxs}" + self.trie.insert(spelling_idxs, word_idx, score) + self.trie.smear(SmearingMode.MAX) + + self.decoder_opts = LexiconDecoderOptions( + beam_size=cfg.beam, + beam_size_token=cfg.beamsizetoken or len(tgt_dict), + beam_threshold=cfg.beamthreshold, + lm_weight=cfg.lmweight, + word_score=cfg.wordscore, + unk_score=cfg.unkweight, + sil_score=cfg.silweight, + log_add=False, + criterion_type=CriterionType.CTC, + ) + + self.decoder = LexiconDecoder( + self.decoder_opts, + self.trie, + self.lm, + self.silence, + self.blank, + self.unk_word, + [], + self.unitlm, + ) + else: + assert self.unitlm, "Lexicon-free decoding requires unit LM" + + d = {w: [[w]] for w in tgt_dict.symbols} + self.word_dict = create_word_dict(d) + self.lm = KenLM(cfg.lmpath, self.word_dict) + self.decoder_opts = LexiconFreeDecoderOptions( + beam_size=cfg.beam, + beam_size_token=cfg.beamsizetoken or len(tgt_dict), + beam_threshold=cfg.beamthreshold, + lm_weight=cfg.lmweight, + sil_score=cfg.silweight, + log_add=False, + criterion_type=CriterionType.CTC, + ) + self.decoder = LexiconFreeDecoder( + self.decoder_opts, self.lm, self.silence, self.blank, [] + ) + + def decode( + self, + emissions: torch.FloatTensor, + ) -> List[List[Dict[str, torch.LongTensor]]]: + B, T, N = emissions.size() + hypos = [] + + def make_hypo(result: DecodeResult) -> Dict[str, Any]: + hypo = { + "tokens": self.get_tokens(result.tokens), + "score": result.score, + } + if self.lexicon: + hypo["words"] = [ + self.idx_to_wrd[x] if self.unitlm else self.word_dict[x] + for x in result.words + if x >= 0 + ] + return hypo + + for b in range(B): + emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) + results = self.decoder.decode(emissions_ptr, T, N) + + nbest_results = results[: self.nbest] + hypos.append([make_hypo(result) for result in nbest_results]) + self.lm.empty_cache() + + return hypos diff --git a/triton_model/1/aqc/examples/speech_recognition/new/decoders/viterbi_decoder.py b/triton_model/1/aqc/examples/speech_recognition/new/decoders/viterbi_decoder.py new file mode 100644 index 0000000..b1c4786 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/decoders/viterbi_decoder.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from typing import List, Dict + +from .base_decoder import BaseDecoder + + +class ViterbiDecoder(BaseDecoder): + def decode( + self, + emissions: torch.FloatTensor, + ) -> List[List[Dict[str, torch.LongTensor]]]: + def get_pred(e): + toks = e.argmax(dim=-1).unique_consecutive() + return toks[toks != self.blank] + + return [[{"tokens": get_pred(x), "score": 0}] for x in emissions] diff --git a/triton_model/1/aqc/examples/speech_recognition/new/infer.py b/triton_model/1/aqc/examples/speech_recognition/new/infer.py new file mode 100644 index 0000000..e45e768 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/new/infer.py @@ -0,0 +1,475 @@ +#!/usr/bin/env python -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import ast +import hashlib +import logging +import os +import shutil +import sys +from dataclasses import dataclass, field, is_dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union + +import editdistance +import torch +import torch.distributed as dist +from examples.speech_recognition.new.decoders.decoder_config import ( + DecoderConfig, + FlashlightDecoderConfig, +) +from examples.speech_recognition.new.decoders.decoder import Decoder +from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils +from fairseq.data.data_utils import post_process +from fairseq.dataclass.configs import ( + CheckpointConfig, + CommonConfig, + CommonEvalConfig, + DatasetConfig, + DistributedTrainingConfig, + FairseqDataclass, +) +from fairseq.logging.meters import StopwatchMeter, TimeMeter +from fairseq.logging.progress_bar import BaseProgressBar +from fairseq.models.fairseq_model import FairseqModel +from omegaconf import OmegaConf + +import hydra +from hydra.core.config_store import ConfigStore + +logging.root.setLevel(logging.INFO) +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +config_path = Path(__file__).resolve().parent / "conf" + + +@dataclass +class DecodingConfig(DecoderConfig, FlashlightDecoderConfig): + unique_wer_file: bool = field( + default=False, + metadata={"help": "If set, use a unique file for storing WER"}, + ) + results_path: Optional[str] = field( + default=None, + metadata={ + "help": "If set, write hypothesis and reference sentences into this directory" + }, + ) + + +@dataclass +class InferConfig(FairseqDataclass): + task: Any = None + decoding: DecodingConfig = DecodingConfig() + common: CommonConfig = CommonConfig() + common_eval: CommonEvalConfig = CommonEvalConfig() + checkpoint: CheckpointConfig = CheckpointConfig() + distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() + dataset: DatasetConfig = DatasetConfig() + is_ax: bool = field( + default=False, + metadata={ + "help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume" + }, + ) + + +def reset_logging(): + root = logging.getLogger() + for handler in root.handlers: + root.removeHandler(handler) + root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper()) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter( + logging.Formatter( + fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + ) + root.addHandler(handler) + + +class InferenceProcessor: + cfg: InferConfig + + def __init__(self, cfg: InferConfig) -> None: + self.cfg = cfg + self.task = tasks.setup_task(cfg.task) + + models, saved_cfg = self.load_model_ensemble() + self.models = models + self.saved_cfg = saved_cfg + self.tgt_dict = self.task.target_dictionary + + self.task.load_dataset( + self.cfg.dataset.gen_subset, + task_cfg=saved_cfg.task, + ) + self.generator = Decoder(cfg.decoding, self.tgt_dict) + self.gen_timer = StopwatchMeter() + self.wps_meter = TimeMeter() + self.num_sentences = 0 + self.total_errors = 0 + self.total_length = 0 + + self.hypo_words_file = None + self.hypo_units_file = None + self.ref_words_file = None + self.ref_units_file = None + + self.progress_bar = self.build_progress_bar() + + def __enter__(self) -> "InferenceProcessor": + if self.cfg.decoding.results_path is not None: + self.hypo_words_file = self.get_res_file("hypo.word") + self.hypo_units_file = self.get_res_file("hypo.units") + self.ref_words_file = self.get_res_file("ref.word") + self.ref_units_file = self.get_res_file("ref.units") + return self + + def __exit__(self, *exc) -> bool: + if self.cfg.decoding.results_path is not None: + self.hypo_words_file.close() + self.hypo_units_file.close() + self.ref_words_file.close() + self.ref_units_file.close() + return False + + def __iter__(self) -> Any: + for sample in self.progress_bar: + if not self.cfg.common.cpu: + sample = utils.move_to_cuda(sample) + + # Happens on the last batch. + if "net_input" not in sample: + continue + yield sample + + def log(self, *args, **kwargs): + self.progress_bar.log(*args, **kwargs) + + def print(self, *args, **kwargs): + self.progress_bar.print(*args, **kwargs) + + def get_res_file(self, fname: str) -> None: + fname = os.path.join(self.cfg.decoding.results_path, fname) + if self.data_parallel_world_size > 1: + fname = f"{fname}.{self.data_parallel_rank}" + return open(fname, "w", buffering=1) + + def merge_shards(self) -> None: + """Merges all shard files into shard 0, then removes shard suffix.""" + + shard_id = self.data_parallel_rank + num_shards = self.data_parallel_world_size + + if self.data_parallel_world_size > 1: + + def merge_shards_with_root(fname: str) -> None: + fname = os.path.join(self.cfg.decoding.results_path, fname) + logger.info("Merging %s on shard %d", fname, shard_id) + base_fpath = Path(f"{fname}.0") + with open(base_fpath, "a") as out_file: + for s in range(1, num_shards): + shard_fpath = Path(f"{fname}.{s}") + with open(shard_fpath, "r") as in_file: + for line in in_file: + out_file.write(line) + shard_fpath.unlink() + shutil.move(f"{fname}.0", fname) + + dist.barrier() # ensure all shards finished writing + if shard_id == (0 % num_shards): + merge_shards_with_root("hypo.word") + if shard_id == (1 % num_shards): + merge_shards_with_root("hypo.units") + if shard_id == (2 % num_shards): + merge_shards_with_root("ref.word") + if shard_id == (3 % num_shards): + merge_shards_with_root("ref.units") + dist.barrier() + + def optimize_model(self, model: FairseqModel) -> None: + model.make_generation_fast_() + if self.cfg.common.fp16: + model.half() + if not self.cfg.common.cpu: + model.cuda() + + def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]: + arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides) + models, saved_cfg = checkpoint_utils.load_model_ensemble( + utils.split_paths(self.cfg.common_eval.path, separator="\\"), + arg_overrides=arg_overrides, + task=self.task, + suffix=self.cfg.checkpoint.checkpoint_suffix, + strict=(self.cfg.checkpoint.checkpoint_shard_count == 1), + num_shards=self.cfg.checkpoint.checkpoint_shard_count, + ) + for model in models: + self.optimize_model(model) + return models, saved_cfg + + def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None: + return self.task.get_batch_iterator( + dataset=self.task.dataset(self.cfg.dataset.gen_subset), + max_tokens=self.cfg.dataset.max_tokens, + max_sentences=self.cfg.dataset.batch_size, + max_positions=(sys.maxsize, sys.maxsize), + ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, + required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, + seed=self.cfg.common.seed, + num_shards=self.data_parallel_world_size, + shard_id=self.data_parallel_rank, + num_workers=self.cfg.dataset.num_workers, + data_buffer_size=self.cfg.dataset.data_buffer_size, + disable_iterator_cache=disable_iterator_cache, + ).next_epoch_itr(shuffle=False) + + def build_progress_bar( + self, + epoch: Optional[int] = None, + prefix: Optional[str] = None, + default_log_format: str = "tqdm", + ) -> BaseProgressBar: + return progress_bar.progress_bar( + iterator=self.get_dataset_itr(), + log_format=self.cfg.common.log_format, + log_interval=self.cfg.common.log_interval, + epoch=epoch, + prefix=prefix, + tensorboard_logdir=self.cfg.common.tensorboard_logdir, + default_log_format=default_log_format, + ) + + @property + def data_parallel_world_size(self): + if self.cfg.distributed_training.distributed_world_size == 1: + return 1 + return distributed_utils.get_data_parallel_world_size() + + @property + def data_parallel_rank(self): + if self.cfg.distributed_training.distributed_world_size == 1: + return 0 + return distributed_utils.get_data_parallel_rank() + + def process_sentence( + self, + sample: Dict[str, Any], + hypo: Dict[str, Any], + sid: int, + batch_id: int, + ) -> Tuple[int, int]: + speaker = None # Speaker can't be parsed from dataset. + + if "target_label" in sample: + toks = sample["target_label"] + else: + toks = sample["target"] + toks = toks[batch_id, :] + + # Processes hypothesis. + hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu()) + if "words" in hypo: + hyp_words = " ".join(hypo["words"]) + else: + hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process) + + # Processes target. + target_tokens = utils.strip_pad(toks, self.tgt_dict.pad()) + tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu()) + tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process) + + if self.cfg.decoding.results_path is not None: + print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file) + print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file) + print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file) + print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file) + + if not self.cfg.common_eval.quiet: + logger.info(f"HYPO: {hyp_words}") + logger.info(f"REF: {tgt_words}") + logger.info("---------------------") + + hyp_words, tgt_words = hyp_words.split(), tgt_words.split() + + return editdistance.eval(hyp_words, tgt_words), len(tgt_words) + + def process_sample(self, sample: Dict[str, Any]) -> None: + self.gen_timer.start() + hypos = self.task.inference_step( + generator=self.generator, + models=self.models, + sample=sample, + ) + num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) + self.gen_timer.stop(num_generated_tokens) + self.wps_meter.update(num_generated_tokens) + + for batch_id, sample_id in enumerate(sample["id"].tolist()): + errs, length = self.process_sentence( + sample=sample, + sid=sample_id, + batch_id=batch_id, + hypo=hypos[batch_id][0], + ) + self.total_errors += errs + self.total_length += length + + self.log({"wps": round(self.wps_meter.avg)}) + if "nsentences" in sample: + self.num_sentences += sample["nsentences"] + else: + self.num_sentences += sample["id"].numel() + + def log_generation_time(self) -> None: + logger.info( + "Processed %d sentences (%d tokens) in %.1fs %.2f " + "sentences per second, %.2f tokens per second)", + self.num_sentences, + self.gen_timer.n, + self.gen_timer.sum, + self.num_sentences / (self.gen_timer.sum + 1e-6), + 1.0 / (self.gen_timer.avg + 1e-6), + ) + + +def parse_wer(wer_file: Path) -> float: + with open(wer_file, "r") as f: + return float(f.readline().strip().split(" ")[1]) + + +def get_wer_file(cfg: InferConfig) -> Path: + """Hashes the decoding parameters to a unique file ID.""" + base_path = "wer" + if cfg.decoding.results_path is not None: + base_path = os.path.join(cfg.decoding.results_path, base_path) + + if cfg.decoding.unique_wer_file: + yaml_str = OmegaConf.to_yaml(cfg.decoding) + fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16) + return Path(f"{base_path}.{fid % 1000000}") + else: + return Path(base_path) + + +def main(cfg: InferConfig) -> float: + """Entry point for main processing logic. + + Args: + cfg: The inferance configuration to use. + wer: Optional shared memory pointer for returning the WER. If not None, + the final WER value will be written here instead of being returned. + + Returns: + The final WER if `wer` is None, otherwise None. + """ + + yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg) + + # Validates the provided configuration. + if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: + cfg.dataset.max_tokens = 4000000 + if not cfg.common.cpu and not torch.cuda.is_available(): + raise ValueError("CUDA not found; set `cpu=True` to run without CUDA") + + logger.info(cfg.common_eval.path) + + with InferenceProcessor(cfg) as processor: + for sample in processor: + processor.process_sample(sample) + + processor.log_generation_time() + + if cfg.decoding.results_path is not None: + processor.merge_shards() + + errs_t, leng_t = processor.total_errors, processor.total_length + + if cfg.common.cpu: + logger.warning("Merging WER requires CUDA.") + elif processor.data_parallel_world_size > 1: + stats = torch.LongTensor([errs_t, leng_t]).cuda() + dist.all_reduce(stats, op=dist.ReduceOp.SUM) + errs_t, leng_t = stats[0].item(), stats[1].item() + + wer = errs_t * 100.0 / leng_t + + if distributed_utils.is_master(cfg.distributed_training): + with open(wer_file, "w") as f: + f.write( + ( + f"WER: {wer}\n" + f"err / num_ref_words = {errs_t} / {leng_t}\n\n" + f"{yaml_str}" + ) + ) + + return wer + + +@hydra.main(config_path=config_path, config_name="infer") +def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]: + container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) + cfg = OmegaConf.create(container) + OmegaConf.set_struct(cfg, True) + + if cfg.common.reset_logging: + reset_logging() + + utils.import_user_module(cfg.common) + + # logger.info("Config:\n%s", OmegaConf.to_yaml(cfg)) + wer = float("inf") + + try: + if cfg.common.profile: + with torch.cuda.profiler.profile(): + with torch.autograd.profiler.emit_nvtx(): + distributed_utils.call_main(cfg, main) + else: + distributed_utils.call_main(cfg, main) + + wer = parse_wer(get_wer_file(cfg)) + except BaseException as e: # pylint: disable=broad-except + if not cfg.common.suppress_crashes: + raise + else: + logger.error("Crashed! %s", str(e)) + + logger.info("Word error rate: %.4f", wer) + if cfg.is_ax: + return wer, None + + return wer + + +def cli_main() -> None: + try: + from hydra._internal.utils import ( + get_args, + ) # pylint: disable=import-outside-toplevel + + cfg_name = get_args().config_name or "infer" + except ImportError: + logger.warning("Failed to get config name from hydra args") + cfg_name = "infer" + + cs = ConfigStore.instance() + cs.store(name=cfg_name, node=InferConfig) + + for k in InferConfig.__dataclass_fields__: + if is_dataclass(InferConfig.__dataclass_fields__[k].type): + v = InferConfig.__dataclass_fields__[k].default + cs.store(name=k, node=v) + + hydra_main() # pylint: disable=no-value-for-parameter + + +if __name__ == "__main__": + cli_main() diff --git a/triton_model/1/aqc/examples/speech_recognition/tasks/__init__.py b/triton_model/1/aqc/examples/speech_recognition/tasks/__init__.py new file mode 100644 index 0000000..7ac3b8d --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/tasks/__init__.py @@ -0,0 +1,8 @@ +import importlib +import os + + +for file in sorted(os.listdir(os.path.dirname(__file__))): + if file.endswith(".py") and not file.startswith("_"): + task_name = file[: file.find(".py")] + importlib.import_module("examples.speech_recognition.tasks." + task_name) diff --git a/triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/__init__.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddcc8d4c4c64dff5d34ba20a9292477c84e21119 GIT binary patch literal 434 zcmZWmJxjzu5Zy^~m)xBvet{@fcGqOF7qPfbu(1os5^qOuHOa>8ih7kGHvWpF^q2S_ zwzcvv2!fLfg5bc;ytg0o-mrQ+9x{rP*LPe8#y(o|H~K`5X&FgLG6@+=x(W%Phfw@nq}5cuJiVRqU-$gKd%kqfg-|q&5beCtVb}VI{(_md Sd&DBx^|xRHR>6A;zJCCBe1gCL literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/__init__.cpython-39.pyc b/triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a0c20e3e653823528664c782c6e91b1311f913f GIT binary patch literal 459 zcmZWmyH3L}6!mRFleR&cZLM))L%fPzf66()ckgjIkr z3s*bXZJ>cSxF*x{NrcfV7M%w4(4(FR8ZaIS@T}j?zjuzwWPoI~D?%I`kp_S)h5~NM zXy?l-k(*_Z)~^oO@)TcqSr=$hQzK?M)0nA;N?NI^)N(U7=_qkMS=LJ1Lf*NKGOlYp zLE)l88Y`rBVa;vsA`dHGqKg^3mjyEB!nV2?D`B3c-_kdhYrDmMtrD)+im=VGWUInT zB_0c06TRo3x=G&|%~grClJneOXmVO>HN(srDy1#Q+~~2@(yEFIZu!|0W@Em{=&w2Y i-yGeTql`&!ma+Cb;)|{FrEe86ZRcW$gA+J}1NZ^5T#Zry literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/speech_recognition.cpython-310.pyc b/triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/speech_recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49e645d83b7b0285bbef4e084f4a6fb6caebfc55 GIT binary patch literal 6081 zcmb7ITW=f372erhE|)jme2a@TT_`s*wkS)ro!C{K1aX`+an&FW8iZ)NUU62`N=q(1 zGqf#+4BFB_f&xYmpzTXvq`vu=^grxl`xNvsZw*@aJF}!j$x_-bvBSB~oO8Z&+oVz{ z8hGCS{a?dVrwrro)R=s7XuN?}{ssj%IE#&X_Rs1}x6Qh#|E;>E|LwYse>2XtoVvqk zt`+B7Zr#;&J1(?}^`frl;!>+zFSjc73fd0OKQ!u7VTHRx_Kv{|y!g=I#jrfIhO9o# z*=?g*`UbZ!R;w&6zA43xKm{^XXibZ-69;Qy?91)eS`yQQ8&N|=NjnhRRWqHv6*hy$ z_RSy?GQ9gyAh*(r2%C{qq42TGR>P#7M^735E?x_-Jjo108Jld!>>ImGR+Porj!pF; z`yErcK8PKfU$qCOa(424bC+@JtTC{Z+sDPZy=BSy_YBM|^sSxZuEBHM0bzOWKC%XO zMrPl>VGP+I$4f-?sO2<`L7wWLo*lTVv{UBgzRT^i#;!Ri^z%EFeqm>dS8(3c8AI2n zci9=-iy3!c=Q9&K75VH$tMrAj&iEX9+tF zw3u<*STf}BKK7;GCwTb@2;*e%1gt4~cs>ZgR*yp--lk02UeM-VLxh3KIw7Q*o_9M8 zJ=qXZN6E!N3cmxn`Lv?e+4k;lM2!uv6EwE6P1Z%y@OAE_aFJ1ic!o`crmk$i;2~oS#RZn&6p*Utbot7qg{4c&%U7?0 zTUW1OgMC)g;k8;OL8=e*7FpRlHPS+_ohWZT>9#qR4pLLB)ol`r07i@k72}8Mq*=G| z%9ALR0Ucszp+|V3J3~@+d-WTdnC7&Y(!!od9<$Vn+NAZ4)Hrx-h$+nc=jx&okxJS= zK$Q@@7MB)-yN$&WT;!tcgkfU?!;Pfbj>tGHlIkBd06eDdN*uJ-cyN6QyMIge;iS%gYZh6G=f-OuZ`Dz1jA!(h1+Rfg!e*d$N@IchBjVe!?(|(Xo3x& z76+^ky9!2(81l7w*91E(K?X?_P^312Pt*L^OwHQGlu6ynifNisT!~@Z8cC~j{Sw;x z_Rku@Ze=l(s&CAp^i=sDsu6F+Q>cT{+ zsZ5#w4RPn#^97-1k|lzi*!Q1$jb{Ads9ha&YAjwrMN>65(5?-Zn2j+>c6SH1dzX>L z98v2Wd&2`90>0rZvNhQyOE{@&iZl32S#%VWRxp$?>9zk$W&qzJpD%;?nf+ImF1 zXI}82J2}~jBPEWL(NA!^B*Zy}X+HPFdGZvc6d0VKS(DHO_t;<9m*CW{@%1FdIME6d zxks~t7Q3pcd=4*t2p?}?S1qa4>P%`TQXnodQYVsRX4Bc5C|478lT6LqLL?$}))UbR zRB9)ku$|iEPf|Nh0-jp18ZZ-yP$7pUQDIBMGKJ_01JT$>ouGpy7;1{7+Yto1YB`-E z-PStoYfXEe&S&fOvhL4}j;v2*+xnw+S_rya&zG=IT&6C}O|S*E7tiSRt+cc*f>!9u zs25@msLu$W$FR^^d-hmS&tZ}}Y{Iq@O^-~LwLlTk z^2i#sUBxy{gn5WSdFfGZ)LZ5h`W5(8*2n4QGrd>d5B0 zm3V3+L`VbMFKVex`01S{OwkavJRX4xl^{rEsl5@#o!<1qf&|3djc`G$Y?nmyw?F;1 z_aoiZoES~;&UIzTy*0#6%W)OnCeOq5pc^Z39cy~07ZzHQu{oaven*s z40!EsYb_LBvhHbMN-t{b`(_I~1j=2yM%5OtW4qK=+nq4Yw?Y-%3&epV6lf0B{t*#* zw!??$>+cs6?VTdGiOlvH-J3)2iYjpjtulAc7()|DN@0+rnX1yy4K-8lE~_b? z-*J#!*wiy5M!`X#MG=kLtKJS(x1%9P8swb|Yt7mPk1RLzVwAl_p{&+=F12gwf$Ggs zUUw+ibQ`MS$BC4^(h61A;-I~C9jK5^-K{-g?{t5E*KG&Ikfb=4I^7Bx;MF zQu{?JUZUbu!{4j zSDn;q#8QxGQfKQvAt5agar>@D{1AhO5ne>B+3o05m19^VaK<7}P-I1uAaWA+f6fF{ zyrWUQzroD|z$T;+Vl-OF#f+2PO>5Ies+%K-12wjC`bc{>^Fx4r)9ter1niKb)!V*B z5(~U$bT4dcvxc)^q!D1$va}6JgZCy#%~CdM(W7U0=cE$VYhD|!kxUU8%+Wvw2W|B) zc=v)h;uky>N>$xUVS4$`1~u~ctJT836vYRiLbEuNq?VtK|40o}mkcj%D62U^_CoxK zdh-aD*P?bPKBlf^DzrZCDP>wFV2OE+fGW{0JroEL z;cYq6ZD<}mE;Q^kx40d%mN_5~lp_&2l&c@w0&|skMS!QcQKwD zTN)CBw z+``y2#@-Lxw|+hzDXeE-SoG)#jE=sz?9HgUX&$a3P)ejPk2A9xeCMIetTu5a@oR7> zoy|Okju^A=I>JZ4xI@LeG-G--$UFt$PT@=PZ3BA(t?^ng#@+9bZM@W?y%nV31TUx%8;x-fJwxF6hC>{ zrwcEL+ zUu8FuY9U%7BkcQW(f5I&ZcO#E@89hP@n|IP`#fn-w}cKy4FrQ52^a6+KpnKCB@NqA z_b1f-D=J8pQnw?Jp$WCE@h3KDV2cJSqZp_W#_>1~(umaxsUsrFhi~B3>85MgrfC;h z`4pAKnR3CLEiyA#WHUKIGvU*#_{&^EjcT+u&A})%HtORKi1H}ALu#fI!St2yGf!F@ z$8KI`e!e`ZsybjuE8{MbwiM)R1*yDF%=A?!aJ=to+?9;~*U3&u${*dF3j>57d_DbU Lm}@!9r`dl1rj`9} literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/speech_recognition.cpython-39.pyc b/triton_model/1/aqc/examples/speech_recognition/tasks/__pycache__/speech_recognition.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6418c6269007837380204a2fbe62924abe57644 GIT binary patch literal 6132 zcmb7I*>l^*8OKEsJaqbs(+gFSiJ6F$rP_(@x^B`qPU_Wpm&$H z#gLh{@=WsLNz-Zj(if?3{+BR)YM=7lw|3g}_bo_?x{`(*EcW@cc#kq zLF~5m+2011a(D85Yu9AlP2j%CbB69u?V9I+ zmucMpm`#tJRAe(_qtYkFn#pD{-m#%6ZNnn+r{2p{@uQOspgA8?ya=a0fxC+64m z%tyMN>nbJZuKC%BP@22+>Z^;@rK^`N{-ApC;?m^cx3|2d#{LbDdDFV{vXlwK@xg;9_kp4jXd^AO{CG&Lv%S;+2X&B4p;?esg*GXMS<+ z;8wbKP9>YXox%73IuRsd1a#@j;^le&$}5Y9aN5Ig47<6-%dcFjE?!z%x_TAdy7~$Z z*k>hOUaMsir2ar3k+r=uLp$`_iSpNyZku82AhpD5-65d}=!nsvX7r;z+14FA@-!M{ zz>3WCup~UNo^3LAXXOh5OmkXHX<<(!PtDYh+GO{x)OzsL5R-uXS8HKCY4L@2w7E_S zv%b&~$p&vg)uKqXs?fKp5|K*U3#6z^4|t;*-ft`nwMj0>4(E;aK=4M=Y)9lh7RWao zw*Ylbz3Xw0>vQYA176vJ{FNx6NgdKm&5);7YSk{L zrqrXXnIe$l^%$Wpda%aQeF@x!NHo+#_f0iF%xSFbf2kY)^({2{%%lh&Nm6T1KB8YJr zfpg|x%}>FlUE}j{YH^#*6S+sPg5i6rsRD-3en@X#!>O9=mCAH#B~lO{Q#X?2nbVnD zXjc+-i@ejDLL?$}*AmeRRq7-i-cB6~L8%ibAxmvI8hDmOD9+$~6mLm5Gma@9ipF~C zh8=9dQd1<|jv!^Ml+#JFcx~x{w!iPw{pMP|tmiX7CF_&fvB7Yh7Q!ym@DeVK$<%`j z3OC^l#5ujcm6p~-*y4eVdK@sIJ|o=6vKD%J{zNg)0!ET<8qbu?Nt2#w(=uKBt^c^W zNsJxS!<>&E=SH*1P3fKO+Sy21t>H2xzGPCwa3!wpIYkqVl1n>);6_G74fJ*RUG zhvgo7b(iO}BoFf*JEb!pvtG~;B~}JRk(by6o76kY=ufd}-Jd}IGE2$fCvb`NDat(3 zGk3JJesAAbQ&!iztRoY35sHMbL>RTBcGJ)3@yA@d-%1#dvzT^3Q7P!;@w`E_Eu-lh z8}PS8(-X_gMxcmjd14R8o-#KqWQIsuS?Nh`I9p~DdTuhCV}$v1@1@rn^C_-Wfm9<} ziyD3?n%x#}E9tAmSL+-(5aNNTr4DIN?<{E)Ez!tl5vWiJ@>(->)_L6NO;xKB3f^w; zsy5axiRSNr{%!AjdZ;-u1n@6(C1?IBlC7nezN9kwUmC;*7#-{eB{<$LZjk{AK|#vQCl6J=388a4?=O^ z3I&?E+P@;AXm|J$jsD&x*_ORaRw_r z507@f28{hAR7V#asCX!pk#l~BPWU65VZ~!^pxQybVe7Hew^0GTq;kyKajCB>p!^Oh zon3R_^v#U|vo|eqyYK82nL}i@YkD?^*+;6xT#U-hJ!foNsB2~iIf7IZ{oJ-@%6+CL zS$@YwJ>t;JHZcm}0V4~H@_Lm!Ty;BI<;a5k3%u5>&HLoHVHd-KFXFON>v=S;sYj|e zOQqnU?9^+hMi3`b_Da{OyBde>%^P(ulubRYV_@%WfB)1i%4&U5Xjz#SbKp@HW1Nj7 zY72^c;yG$amBb6wP#6>!(4+-aJg790P^Lvvm8?olb0TavIV#}r5t7cVf)%7OX|5xX z;ff0S6*sjTu@pb0d3W<6X+l~c;`RfL_!brqYj_d)XSbuPTZUz=frdS;<3Q0W;$1XP zo72{;+4GOr>iq+59Z+o26w(;2Ej4sT_3?(i;h;Lsk%~iW9Od>=DR1Prq3j!8-@J|t zA9A#NTgxP|&~HW$cw4(Q+yy6%)S{Nvd8jh{*FkDl(BcS!Z>2{zT#3<_G+G9{-Z&SqWwyxurEdNE~wBf&LpYjN0Ct5T0BmB!`H$3>7*o4-pp@HqUJ4#AeGHPz1`6i`lTra&^flW>D#mujjEI zW8K4gZji@1zGaN|D;JI{4?DDWahi5og_X2~5E3v1V8%r~|9~`nHPSgg-0sW=OYtwW zshb%{nnUp#x3D&awRd=X`Q714y|9*jn9-}JusZxuvj*? zL)u0MkDWphWXuzX7?OZ8gM*oQX4yK>TD=#Jm;ND4j-(}&n!G_rll4J3I(qb$--Drx zED&GQk>T3CkB1zsk>=LO0cOPC%hDoP3>;pw8#ei#Wn}q~@Nx80GgI?}>MZVI=`%c% za$dvfy?8toM>u{m9i-9v%bwM%k5Ms9C%>3(=a7bUcn4;cC23icbC3BcxLZ5|!FY zCLQvEhb?p_yJ}25{sgG6A@InaBLotIw4kQZAwg}Jy0zBG|CzlTW5iNkm1(m6Y9^5f zQV!*3-#h#;j+2L3%0`ZkD5{~}*WutaMUEjh`^lP~9SUJ@RvbtD1nu7g`O6;VR`0pv zMSB3gV%|crg^Yz9aS)`%Ab<{aW9pZK;C?rZhb#FYU`d0fC2TlqAR$~&n0ONx3QAnk zl2+T{^e<@oSJaRxr5?WGAT?53TK@#6g>_n(80J9@9>=3xNNcP%NL>+A@qQDJd=`yo zIgVu(&y=a1E*8o&c<1O51c#{pB3@Zgpt4E)1WlTQOK7auN8cRfabAavOc#Rnm%hq5 zX") + elif args.criterion == "asg_loss": + for i in range(1, args.max_replabel + 1): + tgt_dict.add_symbol(replabel_symbol(i)) + + print("| dictionary: {} types".format(len(tgt_dict))) + return cls(args, tgt_dict) + + def load_dataset(self, split, combine=False, **kwargs): + """Load a given dataset split. + + Args: + split (str): name of the split (e.g., train, valid, test) + """ + data_json_path = os.path.join(self.args.data, "{}.json".format(split)) + self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict) + + def build_generator(self, models, args, **unused): + w2l_decoder = getattr(args, "w2l_decoder", None) + if w2l_decoder == "viterbi": + from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder + + return W2lViterbiDecoder(args, self.target_dictionary) + elif w2l_decoder == "kenlm": + from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder + + return W2lKenLMDecoder(args, self.target_dictionary) + elif w2l_decoder == "fairseqlm": + from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder + + return W2lFairseqLMDecoder(args, self.target_dictionary) + else: + return super().build_generator(models, args) + + @property + def target_dictionary(self): + """Return the :class:`~fairseq.data.Dictionary` for the language + model.""" + return self.tgt_dict + + @property + def source_dictionary(self): + """Return the source :class:`~fairseq.data.Dictionary` (if applicable + for this task).""" + return None + + def max_positions(self): + """Return the max speech and sentence length allowed by the task.""" + return (self.args.max_source_positions, self.args.max_target_positions) diff --git a/triton_model/1/aqc/examples/speech_recognition/utils/wer_utils.py b/triton_model/1/aqc/examples/speech_recognition/utils/wer_utils.py new file mode 100644 index 0000000..cf6f3d0 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/utils/wer_utils.py @@ -0,0 +1,381 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from __future__ import absolute_import, division, print_function, unicode_literals + +import re +from collections import deque +from enum import Enum + +import numpy as np + + +""" + Utility modules for computation of Word Error Rate, + Alignments, as well as more granular metrics like + deletion, insersion and substitutions. +""" + + +class Code(Enum): + match = 1 + substitution = 2 + insertion = 3 + deletion = 4 + + +class Token(object): + def __init__(self, lbl="", st=np.nan, en=np.nan): + if np.isnan(st): + self.label, self.start, self.end = "", 0.0, 0.0 + else: + self.label, self.start, self.end = lbl, st, en + + +class AlignmentResult(object): + def __init__(self, refs, hyps, codes, score): + self.refs = refs # std::deque + self.hyps = hyps # std::deque + self.codes = codes # std::deque + self.score = score # float + + +def coordinate_to_offset(row, col, ncols): + return int(row * ncols + col) + + +def offset_to_row(offset, ncols): + return int(offset / ncols) + + +def offset_to_col(offset, ncols): + return int(offset % ncols) + + +def trimWhitespace(str): + return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str))) + + +def str2toks(str): + pieces = trimWhitespace(str).split(" ") + toks = [] + for p in pieces: + toks.append(Token(p, 0.0, 0.0)) + return toks + + +class EditDistance(object): + def __init__(self, time_mediated): + self.time_mediated_ = time_mediated + self.scores_ = np.nan # Eigen::Matrix + self.backtraces_ = ( + np.nan + ) # Eigen::Matrix backtraces_; + self.confusion_pairs_ = {} + + def cost(self, ref, hyp, code): + if self.time_mediated_: + if code == Code.match: + return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + elif code == Code.insertion: + return hyp.end - hyp.start + elif code == Code.deletion: + return ref.end - ref.start + else: # substitution + return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1 + else: + if code == Code.match: + return 0 + elif code == Code.insertion or code == Code.deletion: + return 3 + else: # substitution + return 4 + + def get_result(self, refs, hyps): + res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan) + + num_rows, num_cols = self.scores_.shape + res.score = self.scores_[num_rows - 1, num_cols - 1] + + curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols) + + while curr_offset != 0: + curr_row = offset_to_row(curr_offset, num_cols) + curr_col = offset_to_col(curr_offset, num_cols) + + prev_offset = self.backtraces_[curr_row, curr_col] + + prev_row = offset_to_row(prev_offset, num_cols) + prev_col = offset_to_col(prev_offset, num_cols) + + res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++ + res.hyps.appendleft(curr_col - 1) + if curr_row - 1 == prev_row and curr_col == prev_col: + res.codes.appendleft(Code.deletion) + elif curr_row == prev_row and curr_col - 1 == prev_col: + res.codes.appendleft(Code.insertion) + else: + # assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col) + ref_str = refs[res.refs[0]].label + hyp_str = hyps[res.hyps[0]].label + + if ref_str == hyp_str: + res.codes.appendleft(Code.match) + else: + res.codes.appendleft(Code.substitution) + + confusion_pair = "%s -> %s" % (ref_str, hyp_str) + if confusion_pair not in self.confusion_pairs_: + self.confusion_pairs_[confusion_pair] = 1 + else: + self.confusion_pairs_[confusion_pair] += 1 + + curr_offset = prev_offset + + return res + + def align(self, refs, hyps): + if len(refs) == 0 and len(hyps) == 0: + return np.nan + + # NOTE: we're not resetting the values in these matrices because every value + # will be overridden in the loop below. If this assumption doesn't hold, + # be sure to set all entries in self.scores_ and self.backtraces_ to 0. + self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1)) + self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1)) + + num_rows, num_cols = self.scores_.shape + + for i in range(num_rows): + for j in range(num_cols): + if i == 0 and j == 0: + self.scores_[i, j] = 0.0 + self.backtraces_[i, j] = 0 + continue + + if i == 0: + self.scores_[i, j] = self.scores_[i, j - 1] + self.cost( + None, hyps[j - 1], Code.insertion + ) + self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols) + continue + + if j == 0: + self.scores_[i, j] = self.scores_[i - 1, j] + self.cost( + refs[i - 1], None, Code.deletion + ) + self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols) + continue + + # Below here both i and j are greater than 0 + ref = refs[i - 1] + hyp = hyps[j - 1] + best_score = self.scores_[i - 1, j - 1] + ( + self.cost(ref, hyp, Code.match) + if (ref.label == hyp.label) + else self.cost(ref, hyp, Code.substitution) + ) + + prev_row = i - 1 + prev_col = j - 1 + ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion) + if ins < best_score: + best_score = ins + prev_row = i + prev_col = j - 1 + + delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion) + if delt < best_score: + best_score = delt + prev_row = i - 1 + prev_col = j + + self.scores_[i, j] = best_score + self.backtraces_[i, j] = coordinate_to_offset( + prev_row, prev_col, num_cols + ) + + return self.get_result(refs, hyps) + + +class WERTransformer(object): + def __init__(self, hyp_str, ref_str, verbose=True): + self.ed_ = EditDistance(False) + self.id2oracle_errs_ = {} + self.utts_ = 0 + self.words_ = 0 + self.insertions_ = 0 + self.deletions_ = 0 + self.substitutions_ = 0 + + self.process(["dummy_str", hyp_str, ref_str]) + + if verbose: + print("'%s' vs '%s'" % (hyp_str, ref_str)) + self.report_result() + + def process(self, input): # std::vector&& input + if len(input) < 3: + print( + "Input must be of the form ... , got ", + len(input), + " inputs:", + ) + return None + + # Align + # std::vector hyps; + # std::vector refs; + + hyps = str2toks(input[-2]) + refs = str2toks(input[-1]) + + alignment = self.ed_.align(refs, hyps) + if alignment is None: + print("Alignment is null") + return np.nan + + # Tally errors + ins = 0 + dels = 0 + subs = 0 + for code in alignment.codes: + if code == Code.substitution: + subs += 1 + elif code == Code.insertion: + ins += 1 + elif code == Code.deletion: + dels += 1 + + # Output + row = input + row.append(str(len(refs))) + row.append(str(ins)) + row.append(str(dels)) + row.append(str(subs)) + # print(row) + + # Accumulate + kIdIndex = 0 + kNBestSep = "/" + + pieces = input[kIdIndex].split(kNBestSep) + + if len(pieces) == 0: + print( + "Error splitting ", + input[kIdIndex], + " on '", + kNBestSep, + "', got empty list", + ) + return np.nan + + id = pieces[0] + if id not in self.id2oracle_errs_: + self.utts_ += 1 + self.words_ += len(refs) + self.insertions_ += ins + self.deletions_ += dels + self.substitutions_ += subs + self.id2oracle_errs_[id] = [ins, dels, subs] + else: + curr_err = ins + dels + subs + prev_err = np.sum(self.id2oracle_errs_[id]) + if curr_err < prev_err: + self.id2oracle_errs_[id] = [ins, dels, subs] + + return 0 + + def report_result(self): + # print("---------- Summary ---------------") + if self.words_ == 0: + print("No words counted") + return + + # 1-best + best_wer = ( + 100.0 + * (self.insertions_ + self.deletions_ + self.substitutions_) + / self.words_ + ) + + print( + "\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, " + "%0.2f%% dels, %0.2f%% subs)" + % ( + best_wer, + self.utts_, + self.words_, + 100.0 * self.insertions_ / self.words_, + 100.0 * self.deletions_ / self.words_, + 100.0 * self.substitutions_ / self.words_, + ) + ) + + def wer(self): + if self.words_ == 0: + wer = np.nan + else: + wer = ( + 100.0 + * (self.insertions_ + self.deletions_ + self.substitutions_) + / self.words_ + ) + return wer + + def stats(self): + if self.words_ == 0: + stats = {} + else: + wer = ( + 100.0 + * (self.insertions_ + self.deletions_ + self.substitutions_) + / self.words_ + ) + stats = dict( + { + "wer": wer, + "utts": self.utts_, + "numwords": self.words_, + "ins": self.insertions_, + "dels": self.deletions_, + "subs": self.substitutions_, + "confusion_pairs": self.ed_.confusion_pairs_, + } + ) + return stats + + +def calc_wer(hyp_str, ref_str): + t = WERTransformer(hyp_str, ref_str, verbose=0) + return t.wer() + + +def calc_wer_stats(hyp_str, ref_str): + t = WERTransformer(hyp_str, ref_str, verbose=0) + return t.stats() + + +def get_wer_alignment_codes(hyp_str, ref_str): + """ + INPUT: hypothesis string, reference string + OUTPUT: List of alignment codes (intermediate results from WER computation) + """ + t = WERTransformer(hyp_str, ref_str, verbose=0) + return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes + + +def merge_counts(x, y): + # Merge two hashes which have 'counts' as their values + # This can be used for example to merge confusion pair counts + # conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs']) + for k, v in y.items(): + if k not in x: + x[k] = 0 + x[k] += v + return x diff --git a/triton_model/1/aqc/examples/speech_recognition/w2l_decoder.py b/triton_model/1/aqc/examples/speech_recognition/w2l_decoder.py new file mode 100644 index 0000000..fbf2d35 --- /dev/null +++ b/triton_model/1/aqc/examples/speech_recognition/w2l_decoder.py @@ -0,0 +1,486 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Flashlight decoders. +""" + +import gc +import itertools as it +import os.path as osp +from typing import List +import warnings +from collections import deque, namedtuple + +import numpy as np +import torch +from examples.speech_recognition.data.replabels import unpack_replabels +from fairseq import tasks +from fairseq.utils import apply_to_sample +from omegaconf import open_dict +from fairseq.dataclass.utils import convert_namespace_to_omegaconf + + +try: + from flashlight.lib.text.dictionary import create_word_dict, load_words + from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes + from flashlight.lib.text.decoder import ( + CriterionType, + LexiconDecoderOptions, + KenLM, + LM, + LMState, + SmearingMode, + Trie, + LexiconDecoder, + ) +except: + warnings.warn( + "flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python" + ) + LM = object + LMState = object + + +class W2lDecoder(object): + def __init__(self, args, tgt_dict): + self.tgt_dict = tgt_dict + self.vocab_size = len(tgt_dict) + self.nbest = args.nbest + + # criterion-specific init + self.criterion_type = CriterionType.CTC + self.blank = ( + tgt_dict.index("") + if "" in tgt_dict.indices + else tgt_dict.bos() + ) + if "" in tgt_dict.indices: + self.silence = tgt_dict.index("") + elif "|" in tgt_dict.indices: + self.silence = tgt_dict.index("|") + else: + self.silence = tgt_dict.eos() + self.asg_transitions = None + + def generate(self, models, sample, **unused): + """Generate a batch of inferences.""" + # model.forward normally channels prev_output_tokens into the decoder + # separately, but SequenceGenerator directly calls model.encoder + encoder_input = { + k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" + } + emissions = self.get_emissions(models, encoder_input) + return self.decode(emissions) + + def get_emissions(self, models, encoder_input): + """Run encoder and normalize emissions""" + model = models[0] + encoder_out = model(**encoder_input) + if hasattr(model, "get_logits"): + emissions = model.get_logits(encoder_out) # no need to normalize emissions + else: + emissions = model.get_normalized_probs(encoder_out, log_probs=True) + return emissions.transpose(0, 1).float().cpu().contiguous() + + def get_tokens(self, idxs): + """Normalize tokens by handling CTC blank, ASG replabels, etc.""" + idxs = (g[0] for g in it.groupby(idxs)) + idxs = filter(lambda x: x != self.blank, idxs) + return torch.LongTensor(list(idxs)) + + +class W2lViterbiDecoder(W2lDecoder): + def __init__(self, args, tgt_dict): + super().__init__(args, tgt_dict) + + def decode(self, emissions): + B, T, N = emissions.size() + hypos = [] + if self.asg_transitions is None: + transitions = torch.FloatTensor(N, N).zero_() + else: + transitions = torch.FloatTensor(self.asg_transitions).view(N, N) + viterbi_path = torch.IntTensor(B, T) + workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N)) + CpuViterbiPath.compute( + B, + T, + N, + get_data_ptr_as_bytes(emissions), + get_data_ptr_as_bytes(transitions), + get_data_ptr_as_bytes(viterbi_path), + get_data_ptr_as_bytes(workspace), + ) + return [ + [{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}] + for b in range(B) + ] + + +class W2lKenLMDecoder(W2lDecoder): + def __init__(self, args, tgt_dict): + super().__init__(args, tgt_dict) + + self.unit_lm = getattr(args, "unit_lm", False) + + if args.lexicon: + self.lexicon = load_words(args.lexicon) + self.word_dict = create_word_dict(self.lexicon) + self.unk_word = self.word_dict.get_index("") + + self.lm = KenLM(args.kenlm_model, self.word_dict) + self.trie = Trie(self.vocab_size, self.silence) + + start_state = self.lm.start(False) + for i, (word, spellings) in enumerate(self.lexicon.items()): + word_idx = self.word_dict.get_index(word) + _, score = self.lm.score(start_state, word_idx) + for spelling in spellings: + spelling_idxs = [tgt_dict.index(token) for token in spelling] + assert ( + tgt_dict.unk() not in spelling_idxs + ), f"{spelling} {spelling_idxs}" + self.trie.insert(spelling_idxs, word_idx, score) + self.trie.smear(SmearingMode.MAX) + + self.decoder_opts = LexiconDecoderOptions( + beam_size=args.beam, + beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), + beam_threshold=args.beam_threshold, + lm_weight=args.lm_weight, + word_score=args.word_score, + unk_score=args.unk_weight, + sil_score=args.sil_weight, + log_add=False, + criterion_type=self.criterion_type, + ) + + if self.asg_transitions is None: + N = 768 + # self.asg_transitions = torch.FloatTensor(N, N).zero_() + self.asg_transitions = [] + + self.decoder = LexiconDecoder( + self.decoder_opts, + self.trie, + self.lm, + self.silence, + self.blank, + self.unk_word, + self.asg_transitions, + self.unit_lm, + ) + else: + assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" + from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions + + d = {w: [[w]] for w in tgt_dict.symbols} + self.word_dict = create_word_dict(d) + self.lm = KenLM(args.kenlm_model, self.word_dict) + self.decoder_opts = LexiconFreeDecoderOptions( + beam_size=args.beam, + beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), + beam_threshold=args.beam_threshold, + lm_weight=args.lm_weight, + sil_score=args.sil_weight, + log_add=False, + criterion_type=self.criterion_type, + ) + self.decoder = LexiconFreeDecoder( + self.decoder_opts, self.lm, self.silence, self.blank, [] + ) + + def get_timesteps(self, token_idxs: List[int]) -> List[int]: + """Returns frame numbers corresponding to every non-blank token. + + Parameters + ---------- + token_idxs : List[int] + IDs of decoded tokens. + + Returns + ------- + List[int] + Frame numbers corresponding to every non-blank token. + """ + timesteps = [] + for i, token_idx in enumerate(token_idxs): + if token_idx == self.blank: + continue + if i == 0 or token_idx != token_idxs[i-1]: + timesteps.append(i) + return timesteps + + def decode(self, emissions): + B, T, N = emissions.size() + hypos = [] + for b in range(B): + emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) + results = self.decoder.decode(emissions_ptr, T, N) + + nbest_results = results[: self.nbest] + hypos.append( + [ + { + "tokens": self.get_tokens(result.tokens), + "score": result.score, + "timesteps": self.get_timesteps(result.tokens), + "words": [ + self.word_dict.get_entry(x) for x in result.words if x >= 0 + ], + } + for result in nbest_results + ] + ) + return hypos + + +FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"]) + + +class FairseqLM(LM): + def __init__(self, dictionary, model): + LM.__init__(self) + self.dictionary = dictionary + self.model = model + self.unk = self.dictionary.unk() + + self.save_incremental = False # this currently does not work properly + self.max_cache = 20_000 + + model.cuda() + model.eval() + model.make_generation_fast_() + + self.states = {} + self.stateq = deque() + + def start(self, start_with_nothing): + state = LMState() + prefix = torch.LongTensor([[self.dictionary.eos()]]) + incremental_state = {} if self.save_incremental else None + with torch.no_grad(): + res = self.model(prefix.cuda(), incremental_state=incremental_state) + probs = self.model.get_normalized_probs(res, log_probs=True, sample=None) + + if incremental_state is not None: + incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state) + self.states[state] = FairseqLMState( + prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy() + ) + self.stateq.append(state) + + return state + + def score(self, state: LMState, token_index: int, no_cache: bool = False): + """ + Evaluate language model based on the current lm state and new word + Parameters: + ----------- + state: current lm state + token_index: index of the word + (can be lexicon index then you should store inside LM the + mapping between indices of lexicon and lm, or lm index of a word) + + Returns: + -------- + (LMState, float): pair of (new state, score for the current word) + """ + curr_state = self.states[state] + + def trim_cache(targ_size): + while len(self.stateq) > targ_size: + rem_k = self.stateq.popleft() + rem_st = self.states[rem_k] + rem_st = FairseqLMState(rem_st.prefix, None, None) + self.states[rem_k] = rem_st + + if curr_state.probs is None: + new_incremental_state = ( + curr_state.incremental_state.copy() + if curr_state.incremental_state is not None + else None + ) + with torch.no_grad(): + if new_incremental_state is not None: + new_incremental_state = apply_to_sample( + lambda x: x.cuda(), new_incremental_state + ) + elif self.save_incremental: + new_incremental_state = {} + + res = self.model( + torch.from_numpy(curr_state.prefix).cuda(), + incremental_state=new_incremental_state, + ) + probs = self.model.get_normalized_probs( + res, log_probs=True, sample=None + ) + + if new_incremental_state is not None: + new_incremental_state = apply_to_sample( + lambda x: x.cpu(), new_incremental_state + ) + + curr_state = FairseqLMState( + curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy() + ) + + if not no_cache: + self.states[state] = curr_state + self.stateq.append(state) + + score = curr_state.probs[token_index].item() + + trim_cache(self.max_cache) + + outstate = state.child(token_index) + if outstate not in self.states and not no_cache: + prefix = np.concatenate( + [curr_state.prefix, torch.LongTensor([[token_index]])], -1 + ) + incr_state = curr_state.incremental_state + + self.states[outstate] = FairseqLMState(prefix, incr_state, None) + + if token_index == self.unk: + score = float("-inf") + + return outstate, score + + def finish(self, state: LMState): + """ + Evaluate eos for language model based on the current lm state + + Returns: + -------- + (LMState, float): pair of (new state, score for the current word) + """ + return self.score(state, self.dictionary.eos()) + + def empty_cache(self): + self.states = {} + self.stateq = deque() + gc.collect() + + +class W2lFairseqLMDecoder(W2lDecoder): + def __init__(self, args, tgt_dict): + super().__init__(args, tgt_dict) + + self.unit_lm = getattr(args, "unit_lm", False) + + self.lexicon = load_words(args.lexicon) if args.lexicon else None + self.idx_to_wrd = {} + + checkpoint = torch.load(args.kenlm_model, map_location="cpu") + + if "cfg" in checkpoint and checkpoint["cfg"] is not None: + lm_args = checkpoint["cfg"] + else: + lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) + + with open_dict(lm_args.task): + lm_args.task.data = osp.dirname(args.kenlm_model) + + task = tasks.setup_task(lm_args.task) + model = task.build_model(lm_args.model) + model.load_state_dict(checkpoint["model"], strict=False) + + self.trie = Trie(self.vocab_size, self.silence) + + self.word_dict = task.dictionary + self.unk_word = self.word_dict.unk() + self.lm = FairseqLM(self.word_dict, model) + + if self.lexicon: + start_state = self.lm.start(False) + for i, (word, spellings) in enumerate(self.lexicon.items()): + if self.unit_lm: + word_idx = i + self.idx_to_wrd[i] = word + score = 0 + else: + word_idx = self.word_dict.index(word) + _, score = self.lm.score(start_state, word_idx, no_cache=True) + + for spelling in spellings: + spelling_idxs = [tgt_dict.index(token) for token in spelling] + assert ( + tgt_dict.unk() not in spelling_idxs + ), f"{spelling} {spelling_idxs}" + self.trie.insert(spelling_idxs, word_idx, score) + self.trie.smear(SmearingMode.MAX) + + self.decoder_opts = LexiconDecoderOptions( + beam_size=args.beam, + beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), + beam_threshold=args.beam_threshold, + lm_weight=args.lm_weight, + word_score=args.word_score, + unk_score=args.unk_weight, + sil_score=args.sil_weight, + log_add=False, + criterion_type=self.criterion_type, + ) + + self.decoder = LexiconDecoder( + self.decoder_opts, + self.trie, + self.lm, + self.silence, + self.blank, + self.unk_word, + [], + self.unit_lm, + ) + else: + assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" + from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions + + d = {w: [[w]] for w in tgt_dict.symbols} + self.word_dict = create_word_dict(d) + self.lm = KenLM(args.kenlm_model, self.word_dict) + self.decoder_opts = LexiconFreeDecoderOptions( + beam_size=args.beam, + beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), + beam_threshold=args.beam_threshold, + lm_weight=args.lm_weight, + sil_score=args.sil_weight, + log_add=False, + criterion_type=self.criterion_type, + ) + self.decoder = LexiconFreeDecoder( + self.decoder_opts, self.lm, self.silence, self.blank, [] + ) + + def decode(self, emissions): + B, T, N = emissions.size() + hypos = [] + + def idx_to_word(idx): + if self.unit_lm: + return self.idx_to_wrd[idx] + else: + return self.word_dict[idx] + + def make_hypo(result): + hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score} + if self.lexicon: + hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0] + return hypo + + for b in range(B): + emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) + results = self.decoder.decode(emissions_ptr, T, N) + + nbest_results = results[: self.nbest] + hypos.append([make_hypo(result) for result in nbest_results]) + self.lm.empty_cache() + + return hypos diff --git a/triton_model/1/aqc/examples/wav2vec/README.md b/triton_model/1/aqc/examples/wav2vec/README.md new file mode 100644 index 0000000..e979733 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/README.md @@ -0,0 +1,426 @@ +# wav2vec 2.0 + +wav2vec 2.0 learns speech representations on unlabeled data as described in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations (Baevski et al., 2020)](https://arxiv.org/abs/2006.11477). + +We learned speech representations in multiple languages as well in [Unsupervised Cross-lingual Representation Learning for Speech Recognition (Conneau et al., 2020)](https://arxiv.org/abs/2006.13979). + +We also combined wav2vec 2.0 with self-training in [Self-training and Pre-training are Complementary for Speech Recognition (Xu et al., 2020)](https://arxiv.org/abs/2010.11430). + +We combined speech data from multiple domains in [Robust wav2vec 2.0: Analyzing Domain Shift in Self-Supervised Pre-Training (Hsu, et al., 2021)](https://arxiv.org/abs/2104.01027). + +We finetuned XLSR-53 on multiple languages to transcribe unseen languages in [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition (Xu et al., 2021)](https://arxiv.org/abs/2109.11680). + +## Pre-trained models + +Model | Finetuning split | Dataset | Model +|---|---|---|--- +Wav2Vec 2.0 Base | No finetuning | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt) +Wav2Vec 2.0 Base | 10 minutes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_10m.pt) +Wav2Vec 2.0 Base | 100 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_100h.pt) +Wav2Vec 2.0 Base | 960 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_960h.pt) +Wav2Vec 2.0 Large | No finetuning | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/libri960_big.pt) +Wav2Vec 2.0 Large | 10 minutes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_10m.pt) +Wav2Vec 2.0 Large | 100 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_100h.pt) +Wav2Vec 2.0 Large | 960 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_960h.pt) +Wav2Vec 2.0 Large (LV-60)* | No finetuning | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_new.pt) +Wav2Vec 2.0 Large conformer - rel_pos (LV-60)* | No finetuning | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](s3://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_relpos_PT_no_FT) +Wav2Vec 2.0 Large conformer - rope (LV-60)* | No finetuning | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](s3://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_rope_PT_no_FT) +Wav2Vec 2.0 Large (LV-60)* | 10 minutes | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_10m_new.pt) +Wav2Vec 2.0 Large (LV-60)* | 100 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_100h_new.pt) +Wav2Vec 2.0 Large conformer - rel_pos (LV-60)* | 100 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](s3://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_relpos_PT_100h_FT.pt) +Wav2Vec 2.0 Large conformer - rope (LV-60)* | 100 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](s3://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_rope_PT_100h_FT.pt) +Wav2Vec 2.0 Large (LV-60)* | 960 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec2_vox_960h_new.pt) +Wav2Vec 2.0 Large conformer - rel_pos (LV-60)* | 960 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](s3://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_relpos_PT_960h_FT.pt) +Wav2Vec 2.0 Large conformer - rope (LV-60)* | 960 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](s3://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_rope_PT_960h_FT.pt) +Wav2Vec 2.0 Large (LV-60) + Self Training * | 10 minutes | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_10m_pl.pt) +Wav2Vec 2.0 Large (LV-60) + Self Training * | 100 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_100h_pl.pt) +Wav2Vec 2.0 Large (LV-60) + Self Training * | 960 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt) +Wav2Vec 2.0 Large (LV-60 + CV + SWBD + FSH) ** | No finetuning | [Libri-Light](https://github.com/facebookresearch/libri-light) + [CommonVoice](https://commonvoice.mozilla.org/en/languages) + [Switchboard](https://catalog.ldc.upenn.edu/LDC97S62) + [Fisher](https://catalog.ldc.upenn.edu/LDC2004T19) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/w2v_large_lv_fsh_swbd_cv.pt) +Wav2Vec 2.0 Large (LV-60 + CV + SWBD + FSH) ** | 960 hours Librispeech | [Libri-Light](https://github.com/facebookresearch/libri-light) + [CommonVoice](https://commonvoice.mozilla.org/en/languages) + [Switchboard](https://catalog.ldc.upenn.edu/LDC97S62) + [Fisher](https://catalog.ldc.upenn.edu/LDC2004T19) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/w2v_large_lv_fsh_swbd_cv_ftls960_updated.pt) +Wav2Vec 2.0 Large (LV-60 + CV + SWBD + FSH) ** | 300 hours Switchboard | [Libri-Light](https://github.com/facebookresearch/libri-light) + [CommonVoice](https://commonvoice.mozilla.org/en/languages) + [Switchboard](https://catalog.ldc.upenn.edu/LDC97S62) + [Fisher](https://catalog.ldc.upenn.edu/LDC2004T19) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/w2v_large_lv_fsh_swbd_cv_ftsb300_updated.pt) + +\* updated (Oct. 24, 2020)\ +** updated (Nov. 13, 2021) + +We also release multilingual pre-trained wav2vec 2.0 (XLSR) models: + +Model | Architecture | Hours | Languages | Datasets | Model +|---|---|---|---|---|--- +XLSR-53 | Large | 56k | 53 | MLS, CommonVoice, BABEL | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr_53_56k.pt) + +The XLSR model uses the following datasets for multilingual pretraining: + +* **[MLS: Multilingual LibriSpeech](https://indico2.conference4me.psnc.pl/event/35/contributions/3585/attachments/1060/1101/Wed-2-6-10.pdf)** (8 languages, 50.7k hours): *Dutch, English, French, German, Italian, Polish, Portuguese, Spanish* + +* **[CommonVoice](https://commonvoice.mozilla.org/en/languages)** (36 languages, 3.6k hours): *Arabic, Basque, Breton, Chinese (CN), Chinese (HK), Chinese (TW), Chuvash, Dhivehi, Dutch, English, Esperanto, Estonian, French, German, Hakh-Chin, Indonesian, Interlingua, Irish, Italian, Japanese, Kabyle, Kinyarwanda, Kyrgyz, Latvian, Mongolian, Persian, Portuguese, Russian, Sakha, Slovenian, Spanish, Swedish, Tamil, Tatar, Turkish, Welsh* (see also [finetuning splits]([https://dl.fbaipublicfiles.com/cpc_audio/common_voices_splits.tar.gz]) from [this paper](https://arxiv.org/abs/2002.02848)). + +* **[Babel](https://catalog.ldc.upenn.edu/byyear)** (17 languages, 1.7k hours): *Assamese, Bengali, Cantonese, Cebuano, Georgian, Haitian, Kazakh, Kurmanji, Lao, Pashto, Swahili, Tagalog, Tamil, Tok, Turkish, Vietnamese, Zulu* + +We also finetuned several models on languages from [CommonVoice](https://commonvoice.mozilla.org/en/languages) (version 6.1) and [Babel](https://catalog.ldc.upenn.edu/byyear). Please refer to [our paper](https://arxiv.org/abs/2109.11680) for details about which languages are used. + +Pretrained Model | Fintune Dataset | # Languages | Phonemizer | Model | Dictionary +|---|---|---|---|---|--- +LV-60 | CommonVoice | 26 | [Espeak](https://github.com/espeak-ng/espeak-ng/blob/master/docs/languages.md) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/zero_shot/espeak_en_26lang_m10.pt) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/zero_shot/espeak_dict.txt) +XLSR-53 | CommonVoice | 26 | [Espeak](https://github.com/espeak-ng/espeak-ng/blob/master/docs/languages.md) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/zero_shot/espeak_26lang_m10.pt) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/zero_shot/espeak_dict.txt) +XLSR-53 | CommonVoice | 21 | [Phonetisaurus](https://github.com/AdolfVonKleist/Phonetisaurus) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/zero_shot/phonetisaurus_21lang_m10.pt) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/zero_shot/phonetisaurus_dict.txt) +XLSR-53 | CommonVoice, BABEL | 21, 19 | [Phonetisaurus](https://github.com/AdolfVonKleist/Phonetisaurus) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/zero_shot/phonetisaurus_40lang_m10.pt) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/zero_shot/phonetisaurus_40lang.dict.txt) + +We release 2 models that are finetuned on data from 2 different phonemizers. Although the phonemes are all [IPA](https://en.wikipedia.org/wiki/International_Phonetic_Alphabet) symbols, there are still subtle differences between the phonemized transcriptions from the 2 phonemizers. Thus, it's better to use the corresponding model, if your data is phonemized by either phonemizer above. + +## Training a new model with the CLI tools + +Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate file 10 to 30 seconds in length) + +### Prepare training data manifest + +First, install the `soundfile` library: + +```shell script +pip install soundfile +``` + +Next, run: + +```shell script +python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext $ext --valid-percent $valid +``` + +$ext should be set to flac, wav, or whatever format your dataset happens to use that soundfile can read. + +$valid should be set to some reasonable percentage (like 0.01) of training data to use for validation. +To use a pre-defined validation set (like dev-other from librispeech), set to it 0 and then overwrite valid.tsv with a +separately pre-processed manifest file. + +### Train a wav2vec 2.0 base model + +This configuration was used for the base model trained on the Librispeech dataset in the wav2vec 2.0 paper + +Note that the input is expected to be single channel, sampled at 16 kHz + +```shell script +$ fairseq-hydra-train \ + task.data=/path/to/data \ + --config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \ + --config-name wav2vec2_base_librispeech +``` + +Note: you can simulate 64 GPUs by using k GPUs and adding command line parameters (before `--config-dir`) +`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 64/k + +### Train a wav2vec 2.0 large model + +This configuration was used for the large model trained on the Libri-light dataset in the wav2vec 2.0 paper + +```shell script +$ fairseq-hydra-train \ + task.data=/path/to/data \ + --config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \ + --config-name wav2vec2_large_librivox +``` + +Note: you can simulate 128 GPUs by using k GPUs and adding command line parameters (before `--config-dir`) +`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 128/k + +### Train a wav2vec 2.0 model with conformer backbone + +To replace the transformer layers in the encoder with the conformer layers, set `--layer-type conformer --attn-type espnet --pos-enc-type ${POS_ENC_TYPE}`. `POS_ENC_TYPE` refers to positional encoding to be used in the conformer encoder. +Set it to `abs`, `rope` or `rel_pos` to use the absolute positional encoding, rotary positional encoding or relative positional encoding in the conformer layer respectively. + +To train a base model with conformer: + +```shell script +$ fairseq-hydra-train \ + task.data=/path/to/data \ + --config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \ + --config-name wav2vec2_conformer_base_librispeech \ + --attn-type espnet --pos-enc-type ${POS_ENC_TYPE} +``` + +To train a large model with conformer: + +```shell script +$ fairseq-hydra-train \ + task.data=/path/to/data \ + --config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \ + --config-name wav2vec2_conformer_large_librivox + --attn-type espnet --pos-enc-type ${POS_ENC_TYPE} + +``` + +### Fine-tune a pre-trained model with CTC + +Fine-tuning a model requires parallel audio and labels file, as well as a vocabulary file in fairseq format. +A letter vocabulary can be downloaded [here](https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt). +An example [script](libri_labels.py) that generates labels for the Librispeech dataset from the tsv file produced by wav2vec_manifest.py can be used as follows: + +```shell script +split=train +$ python libri_labels.py /path/to/tsv --output-dir /output/dir --output-name $split +``` + +Fine-tuning on 100h of Librispeech with letter targets: + +```shell script +$ fairseq-hydra-train \ + distributed_training.distributed_port=$PORT \ + task.data=/path/to/data \ + model.w2v_path=/path/to/model.pt \ + --config-dir /path/to/fairseq-py/examples/wav2vec/config/finetuning \ + --config-name base_100h +``` + +There are other config files in the config/finetuning directory that can be used to fine-tune on other splits. +You can specify the right config via the `--config-name` parameter. + +Note: you can simulate 24 GPUs by using k GPUs and adding command line parameters (before `--config-dir`) +`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 24/k + +Decoding with a language model during training requires flashlight [python bindings](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) (previously called [wav2letter](https://github.com/facebookresearch/wav2letter). +If you want to use a language model, add `+criterion.wer_args='[/path/to/kenlm, /path/to/lexicon, 2, -1]'` to the command line. + +### Evaluating a CTC model + +Evaluating a CTC model with a language model requires [flashlight python bindings](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) (previously called [wav2letter](https://github.com/facebookresearch/wav2letter) to be installed. + +Fairseq transformer language model used in the wav2vec 2.0 paper can be obtained from the [wav2letter model repository](https://github.com/facebookresearch/wav2letter/tree/master/recipes/sota/2019). +Be sure to upper-case the language model vocab after downloading it. + +Letter dictionary for pre-trained models can be found [here](https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt). + +Next, run the evaluation command: + +```shell script +$subset=dev_other +python examples/speech_recognition/infer.py /checkpoint/abaevski/data/speech/libri/10h/wav2vec/raw --task audio_finetuning \ +--nbest 1 --path /path/to/model --gen-subset $subset --results-path /path/to/save/results/for/sclite --w2l-decoder kenlm \ +--lm-model /path/to/kenlm.bin --lm-weight 2 --word-score -1 --sil-weight 0 --criterion ctc --labels ltr --max-tokens 4000000 \ +--post-process letter +``` + +To get raw numbers, use --w2l-decoder viterbi and omit the lexicon. To use the transformer language model, use --w2l-decoder fairseqlm. + +## Use wav2vec 2.0 with 🤗Transformers + +Wav2Vec2 is also available in the [🤗Transformers library](https://github.com/huggingface/transformers) since version 4.4. + +Pretrained Models can be found on the [hub](https://huggingface.co/models?filter=wav2vec2) +and documentation can be found [here](https://huggingface.co/transformers/master/model_doc/wav2vec2.html). + +Usage example: + +```python +# !pip install transformers +# !pip install datasets +import soundfile as sf +import torch +from datasets import load_dataset +from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor + +# load pretrained model +processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") +model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") + + +librispeech_samples_ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") + +# load audio +audio_input, sample_rate = sf.read(librispeech_samples_ds[0]["file"]) + +# pad input values and return pt tensor +input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values + +# INFERENCE + +# retrieve logits & take argmax +logits = model(input_values).logits +predicted_ids = torch.argmax(logits, dim=-1) + +# transcribe +transcription = processor.decode(predicted_ids[0]) + +# FINE-TUNE + +target_transcription = "A MAN SAID TO THE UNIVERSE I EXIST" + +# encode labels +with processor.as_target_processor(): + labels = processor(target_transcription, return_tensors="pt").input_ids + +# compute loss by passing labels +loss = model(input_values, labels=labels).loss +loss.backward() +``` + +# wav2vec + +Example to train a wav2vec model as described in [wav2vec: Unsupervised Pre-training for Speech Recognition (Schneider et al., 2019)](https://arxiv.org/abs/1904.05862). + +## Pre-trained models + +Description | Dataset | Model +---|---|--- +Wav2Vec large | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_large.pt) + +#### Example usage + +```python +import torch +import fairseq + +cp_path = '/path/to/wav2vec.pt' +model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path]) +model = model[0] +model.eval() + +wav_input_16khz = torch.randn(1,10000) +z = model.feature_extractor(wav_input_16khz) +c = model.feature_aggregator(z) +``` + +## Training a new model with the CLI tools + +Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate files 10 to 30 seconds in length) + +### Prepare training data manifest + +``` +python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext wav +``` + +### Train a wav2vec model + +``` +$ python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \ +--arch wav2vec --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \ +--conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \ +--conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \ +--skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \ +--max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test +``` + +### Run wav2vec2 pre-training on Google Cloud TPUs + +Wav2Vec2 is now supported on TPUs! It's currently pre-training only. + +#### Using hydra on a v3-8 + +``` +$ OMP_NUM_THREADS=1 fairseq-hydra-train \ + task.data=/manifest/path \ + --config-dir /PATH/TO/FAIRSEQ/examples/wav2vec/config/pretraining \ + --config-name wav2vec2_large_librivox_tpu.yaml +``` + +#### Using command line arguments on a v3-8 + +Note: Commandline arguments way of execution has a [known-problem](https://github.com/pytorch/fairseq/issues/3741) currently. + +``` +$ OMP_NUM_THREADS=1 python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \ +--arch wav2vec2 --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \ +--conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \ +--conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \ +--skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \ +--max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test \ +--tpu --distributed-world-size 8 --num-batch-buckets 3 --enable-padding \ +--encoder-layerdrop 0 --mask-channel-prob 0.1 +``` + +#### Using hydra on a pod slice (v3-N with N > 8) + +``` +$ OMP_NUM_THREADS=1 fairseq-hydra-train \ + task.data=/manifest/path \ + --config-dir /PATH/TO/FAIRSEQ/examples/wav2vec/config/pretraining \ + --config-name wav2vec2_large_librivox_tpu-pod.yaml # edit distributed-world-size accordingly +``` + +#### Using command line arguments on a pod slice (v3-N with N > 8) + +Note: Commandline arguments way of execution has a [known-problem](https://github.com/pytorch/fairseq/issues/3741) currently. + +``` +$ python -m torch_xla.distributed.xla_dist \ + --tpu ${TPUNAME} --conda-env=torch-xla-${TORCH_XLA_VERSION} --env OMP_NUM_THREADS=1 \ + -- \ +python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \ +--arch wav2vec2 --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \ +--conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \ +--conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \ +--skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \ +--max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test \ +--tpu --distributed-world-size ${WORLD_SIZE} --num-batch-buckets 3 --enable-padding \ +--encoder-layerdrop 0 --mask-channel-prob 0.1 +``` + +### Extract embeddings from the downstream task data + +``` +$ PYTHONPATH=/path/to/fairseq python examples/wav2vec/wav2vec_featurize.py --input /path/to/task/waves --output /path/to/output \ +--model /model/path/checkpoint_best.pt --split train valid test +``` + +# vq-wav2vec + +Example to train a vq-wav2vec model as described in [vq-wav2vec: Self-Supervised Learning of Discrete Speech Representations (Baevski et al., 2019)](https://arxiv.org/abs/1910.05453). + +These models are also used in [Effectiveness of self-supervised pre-training for speech recognition (Baevski et al., 2019)](https://arxiv.org/abs/1911.03912). + +## Pre-trained models + +Description | Dataset | Model +---|---|--- +vq-wav2vec Gumbel | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec.pt) +vq-wav2vec K-means | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec_kmeans.pt) +Roberta on K-means codes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/bert_kmeans.tar) + +#### Example usage + +```python +import torch +import fairseq + +cp = torch.load('/path/to/vq-wav2vec.pt') +model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp]) +model = model[0] +model.eval() + +wav_input_16khz = torch.randn(1,10000) +z = model.feature_extractor(wav_input_16khz) +_, idxs = model.vector_quantizer.forward_idx(z) +print(idxs.shape) # output: torch.Size([1, 60, 2]), 60 timesteps with 2 indexes corresponding to 2 groups in the model +``` + +## Training a new model with the CLI tools + +Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate file 10 to 30 seconds in length) + +### Prepare training data manifest + +``` +python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext wav +``` + +### Train a gumbel vq-wav2vec model + +``` +$ python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 \ +--save-interval 1 --no-epoch-checkpoints --arch wav2vec --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 \ +--optimizer adam --lr 1e-05 --lr-scheduler cosine \ +--conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)] \ +--conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \ +--activation gelu --offset auto --skip-connections-agg --residual-scale 0.5 \ +--log-keys ["prob_perplexity","code_perplexity","temp"] --vq-type gumbel --vq-groups 2 --vq-depth 2 \ +--combine-groups --vq-vars 320 --vq-temp (2,0.5,0.999995) --prediction-steps 12 --warmup-updates 1000 \ +--warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 --max-sample-size 150000 \ +--max-tokens 300000 --cross-sample-negatives 0 --update-freq 1 --seed 2 --skip-invalid-size-inputs-valid-test +``` + +for k-means training, set vq-type with "kmeans" and add --loss-weights [1] argument. Pre-trained models were trained on 16 GPUs. + +### Tokenize audio data (e.g. for BERT training) + +``` +$ PYTHONPATH=/path/to/fairseq python examples/wav2vec/vq-wav2vec_featurize.py --data-dir /manifest/path --output-dir /path/to/output \ +--checkpoint /model/path/checkpoint_best.pt --split train valid test --extension tsv +``` diff --git a/triton_model/1/aqc/examples/wav2vec/__init__.py b/triton_model/1/aqc/examples/wav2vec/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/triton_model/1/aqc/examples/wav2vec/__pycache__/__init__.cpython-39.pyc b/triton_model/1/aqc/examples/wav2vec/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..025186e0a3522ee75844939ebb8c9621f0fdd24a GIT binary patch literal 173 zcmYe~<>g`kf|aq8QbF`%5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;x_eenx(7s(xN( zc1C7SYH@~sK~a8IYH~@jer9G#u5My+k#0#*W=VdYeoA6VqET6DvTkBwvVLksVr~IY ny?%LO8Awn+K0Y%qvm`!Vub}c4hfQvNN@-529muZFK+FIDBfBhq literal 0 HcmV?d00001 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_100h.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_100h.yaml new file mode 100644 index 0000000..6251c18 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_100h.yaml @@ -0,0 +1,58 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: true + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 3200000 + skip_invalid_size_inputs_valid_test: true + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 2 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 80000 + lr: [0.00003] + sentence_avg: true + update_freq: [4] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.65 + mask_channel_prob: 0.5 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 0 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_10h.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_10h.yaml new file mode 100644 index 0000000..5044518 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_10h.yaml @@ -0,0 +1,63 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval: 50 + save_interval_updates: 10000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: false + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 3200000 + skip_invalid_size_inputs_valid_test: true + validate_after_updates: 10000 + validate_interval: 50 + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 2 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 20000 + lr: [0.00005] + sentence_avg: true + update_freq: [4] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.65 + mask_channel_prob: 0.5 + mask_channel_length: 64 + layerdrop: 0.05 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_10m.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_10m.yaml new file mode 100644 index 0000000..14abc01 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_10m.yaml @@ -0,0 +1,63 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval: 1000 + save_interval_updates: 50 + keep_interval_updates: 1 + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: false + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 3200000 + skip_invalid_size_inputs_valid_test: true + validate_after_updates: 10000 + validate_interval: 1000 + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 2 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 13000 + lr: [0.00005] + sentence_avg: true + update_freq: [4] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.65 + mask_channel_prob: 0.25 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_1h.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_1h.yaml new file mode 100644 index 0000000..a0af1cf --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_1h.yaml @@ -0,0 +1,63 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval: 50 + save_interval_updates: 1000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: false + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 3200000 + skip_invalid_size_inputs_valid_test: true + validate_after_updates: 10000 + validate_interval: 1000 + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 2 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 13000 + lr: [0.00005] + sentence_avg: true + update_freq: [4] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.65 + mask_channel_prob: 0.25 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_960h.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_960h.yaml new file mode 100644 index 0000000..3eadc36 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/base_960h.yaml @@ -0,0 +1,57 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: false + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 3200000 + skip_invalid_size_inputs_valid_test: true + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 8 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 320000 + lr: [0.0001] + sentence_avg: true + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.5 + mask_channel_prob: 0.1 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 0 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_100h.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_100h.yaml new file mode 100644 index 0000000..b8f81e5 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_100h.yaml @@ -0,0 +1,58 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: true + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 1280000 + skip_invalid_size_inputs_valid_test: true + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 4 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 80000 + lr: [0.00003] + sentence_avg: true + update_freq: [5] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.5 + mask_channel_prob: 0.5 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_10h.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_10h.yaml new file mode 100644 index 0000000..8f1ca71 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_10h.yaml @@ -0,0 +1,63 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval: 50 + save_interval_updates: 10000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: true + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 1280000 + skip_invalid_size_inputs_valid_test: true + validate_after_updates: 10000 + validate_interval: 50 + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 4 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 20000 + lr: [0.0001] + sentence_avg: true + update_freq: [5] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.75 + mask_channel_prob: 0.25 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_10m.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_10m.yaml new file mode 100644 index 0000000..07e327f --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_10m.yaml @@ -0,0 +1,63 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval: 1000 + save_interval_updates: 50 + keep_interval_updates: 1 + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: true + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 1280000 + skip_invalid_size_inputs_valid_test: true + validate_after_updates: 10000 + validate_interval: 1000 + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 4 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 13000 + lr: [0.0001] + sentence_avg: true + update_freq: [5] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.65 + mask_channel_prob: 0.25 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_1h.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_1h.yaml new file mode 100644 index 0000000..fac1bbb --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_1h.yaml @@ -0,0 +1,63 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval: 1000 + save_interval_updates: 50 + keep_interval_updates: 1 + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: true + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 1280000 + skip_invalid_size_inputs_valid_test: true + validate_after_updates: 10000 + validate_interval: 1000 + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 4 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 13000 + lr: [0.0003] + sentence_avg: true + update_freq: [5] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.75 + mask_channel_prob: 0.25 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 diff --git a/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_960h.yaml b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_960h.yaml new file mode 100644 index 0000000..9d72404 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/finetuning/vox_960h.yaml @@ -0,0 +1,57 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: true + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 1280000 + skip_invalid_size_inputs_valid_test: true + valid_subset: dev_other + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 24 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: 320000 + lr: [0.00003] + sentence_avg: true + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.5 + mask_channel_prob: 0.25 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 diff --git a/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_base_librispeech.yaml b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_base_librispeech.yaml new file mode 100644 index 0000000..b686e21 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_base_librispeech.yaml @@ -0,0 +1,57 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval_updates: 25000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + +task: + _name: audio_pretraining + data: ??? + max_sample_size: 250000 + min_sample_size: 32000 + normalize: false + +dataset: + num_workers: 6 + max_tokens: 1400000 + skip_invalid_size_inputs_valid_test: true + +distributed_training: + distributed_world_size: 64 + ddp_backend: legacy_ddp + +criterion: + _name: wav2vec + infonce: true + log_keys: ["prob_perplexity","code_perplexity","temp"] + loss_weights: [0.1, 10] + +optimization: + max_update: 400000 + lr: [0.0005] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-06 + weight_decay: 0.01 + +lr_scheduler: + _name: polynomial_decay + warmup_updates: 32000 + +model: + _name: wav2vec2 + quantize_targets: true + final_dim: 256 + encoder_layerdrop: 0.05 + dropout_input: 0.1 + dropout_features: 0.1 + feature_grad_mult: 0.1 + encoder_embed_dim: 768 diff --git a/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_conformer_base_librispeech.yaml b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_conformer_base_librispeech.yaml new file mode 100644 index 0000000..912ac15 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_conformer_base_librispeech.yaml @@ -0,0 +1,60 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval_updates: 25000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + +task: + _name: audio_pretraining + data: ??? + max_sample_size: 250000 + min_sample_size: 32000 + normalize: false + +dataset: + num_workers: 6 + max_tokens: 1400000 + skip_invalid_size_inputs_valid_test: true + +distributed_training: + distributed_world_size: 64 + ddp_backend: legacy_ddp + +criterion: + _name: wav2vec + infonce: true + log_keys: ["prob_perplexity","code_perplexity","temp"] + loss_weights: [0.1, 10] + +optimization: + max_update: 400000 + lr: [0.0005] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-06 + weight_decay: 0.01 + +lr_scheduler: + _name: polynomial_decay + warmup_updates: 32000 + +model: + _name: wav2vec2 + quantize_targets: true + final_dim: 256 + encoder_layerdrop: 0.05 + dropout_input: 0.1 + dropout_features: 0.1 + feature_grad_mult: 0.1 + encoder_embed_dim: 768 + layer_type: conformer + attn_type: espnet + pos_enc_type: rel_pos diff --git a/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_conformer_large_librivox.yaml b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_conformer_large_librivox.yaml new file mode 100644 index 0000000..676166b --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_conformer_large_librivox.yaml @@ -0,0 +1,72 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval_updates: 25000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + +task: + _name: audio_pretraining + data: ??? + max_sample_size: 320000 + min_sample_size: 32000 + normalize: true + +dataset: + num_workers: 6 + max_tokens: 1200000 + skip_invalid_size_inputs_valid_test: true + +distributed_training: + distributed_world_size: 128 + ddp_backend: legacy_ddp + +criterion: + _name: wav2vec + infonce: true + log_keys: ["prob_perplexity","code_perplexity","temp"] + loss_weights: [0.1, 0] + +optimization: + max_update: 1000000 + lr: [0.005] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-06 + weight_decay: 0.01 + +lr_scheduler: + _name: polynomial_decay + warmup_updates: 32000 + +model: + _name: wav2vec2 + quantize_targets: true + extractor_mode: layer_norm + layer_norm_first: true + final_dim: 768 + latent_temp: [2.0,0.1,0.999995] + encoder_layerdrop: 0.00 + dropout_input: 0.0 + dropout_features: 0.0 + dropout: 0.0 + attention_dropout: 0.0 + conv_bias: true + + encoder_layers: 24 + encoder_embed_dim: 1024 + encoder_ffn_embed_dim: 4096 + encoder_attention_heads: 16 + + feature_grad_mult: 1.0 + + layer_type: conformer + attn_type: espnet + pos_enc_type: rel_pos diff --git a/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml new file mode 100644 index 0000000..3192ce4 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml @@ -0,0 +1,70 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + +checkpoint: + save_interval_updates: 25000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + +task: + _name: audio_pretraining + data: ??? + max_sample_size: 320000 + min_sample_size: 32000 + normalize: true + +dataset: + batch_size: 4 + num_workers: 6 + max_tokens: 1200000 + skip_invalid_size_inputs_valid_test: true + +distributed_training: + distributed_world_size: 128 + ddp_backend: legacy_ddp + +criterion: + _name: wav2vec + infonce: true + log_keys: ["prob_perplexity","code_perplexity","temp"] + loss_weights: [0.1, 0] + +optimization: + max_update: 1000000 + lr: [0.005] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-06 + weight_decay: 0.01 + +lr_scheduler: + _name: polynomial_decay + warmup_updates: 32000 + +model: + _name: wav2vec2 + quantize_targets: true + extractor_mode: layer_norm + layer_norm_first: true + final_dim: 768 + latent_temp: [2.0,0.1,0.999995] + encoder_layerdrop: 0.00 + dropout_input: 0.0 + dropout_features: 0.0 + dropout: 0.0 + attention_dropout: 0.0 + conv_bias: true + + encoder_layers: 24 + encoder_embed_dim: 1024 + encoder_ffn_embed_dim: 4096 + encoder_attention_heads: 16 + + feature_grad_mult: 1.0 + diff --git a/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox_tpu-pod.yaml b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox_tpu-pod.yaml new file mode 100644 index 0000000..ff35a95 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox_tpu-pod.yaml @@ -0,0 +1,72 @@ +# @package _group_ + +common: + tpu: true + fp16: false + log_format: json + log_interval: 10 + +checkpoint: + save_interval_updates: 25000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + +task: + _name: audio_pretraining + data: ??? + max_sample_size: 250000 + min_sample_size: 32000 + normalize: true + num_batch_buckets: 3 + precompute_mask_indices: true + enable_padding: true + +dataset: + num_workers: 6 + max_tokens: 1200000 + skip_invalid_size_inputs_valid_test: true + +distributed_training: + distributed_world_size: 128 + ddp_backend: legacy_ddp + +criterion: + _name: wav2vec + infonce: true + log_keys: ["prob_perplexity","code_perplexity","temp"] + loss_weights: [0.1, 0] + +optimization: + max_update: 1000000 + lr: [0.005] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-06 + weight_decay: 0.01 + +lr_scheduler: + _name: polynomial_decay + warmup_updates: 32000 + +model: + _name: wav2vec2 + quantize_targets: true + extractor_mode: layer_norm + layer_norm_first: true + final_dim: 768 + latent_temp: [2.0,0.1,0.999995] + encoder_layerdrop: 0.00 + dropout_input: 0.0 + dropout_features: 0.0 + dropout: 0.0 + attention_dropout: 0.0 + conv_bias: true + + encoder_layers: 24 + encoder_embed_dim: 1024 + encoder_ffn_embed_dim: 4096 + encoder_attention_heads: 16 + + feature_grad_mult: 1.0 diff --git a/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox_tpu.yaml b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox_tpu.yaml new file mode 100644 index 0000000..ee55bda --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/config/pretraining/wav2vec2_large_librivox_tpu.yaml @@ -0,0 +1,77 @@ +# @package _group_ + +common: + tpu: true + fp16: false + log_format: json + log_interval: 10 + +checkpoint: + save_interval_updates: 25000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + +task: + _name: audio_pretraining + data: ??? + max_sample_size: 250000 + min_sample_size: 32000 + normalize: true + num_batch_buckets: 3 + precompute_mask_indices: true + enable_padding: true + inferred_w2v_config: + mask_prob: 0.65 + mask_selection: 'static' + mask_other: 0 + mask_channel_prob: 0.1 + +dataset: + num_workers: 6 + max_tokens: 1200000 + skip_invalid_size_inputs_valid_test: true + +distributed_training: + distributed_world_size: 8 + ddp_backend: legacy_ddp + +criterion: + _name: wav2vec + infonce: true + log_keys: ["prob_perplexity","code_perplexity","temp"] + loss_weights: [0.1, 0] + +optimization: + max_update: 1000000 + lr: [0.005] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-06 + weight_decay: 0.01 + +lr_scheduler: + _name: polynomial_decay + warmup_updates: 32000 + +model: + _name: wav2vec2 + quantize_targets: true + extractor_mode: layer_norm + layer_norm_first: true + final_dim: 768 + latent_temp: [2.0,0.1,0.999995] + encoder_layerdrop: 0.00 + dropout_input: 0.0 + dropout_features: 0.0 + dropout: 0.0 + attention_dropout: 0.0 + conv_bias: true + + encoder_layers: 24 + encoder_embed_dim: 1024 + encoder_ffn_embed_dim: 4096 + encoder_attention_heads: 16 + + feature_grad_mult: 1.0 diff --git a/triton_model/1/aqc/examples/wav2vec/libri_labels.py b/triton_model/1/aqc/examples/wav2vec/libri_labels.py new file mode 100644 index 0000000..694a202 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/libri_labels.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset +""" + +import argparse +import os + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("tsv") + parser.add_argument("--output-dir", required=True) + parser.add_argument("--output-name", required=True) + args = parser.parse_args() + + os.makedirs(args.output_dir, exist_ok=True) + + transcriptions = {} + + with open(args.tsv, "r") as tsv, open( + os.path.join(args.output_dir, args.output_name + ".ltr"), "w" + ) as ltr_out, open( + os.path.join(args.output_dir, args.output_name + ".wrd"), "w" + ) as wrd_out: + root = next(tsv).strip() + for line in tsv: + line = line.strip() + dir = os.path.dirname(line) + if dir not in transcriptions: + parts = dir.split(os.path.sep) + trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt" + path = os.path.join(root, dir, trans_path) + assert os.path.exists(path) + texts = {} + with open(path, "r") as trans_f: + for tline in trans_f: + items = tline.strip().split() + texts[items[0]] = " ".join(items[1:]) + transcriptions[dir] = texts + part = os.path.basename(line).split(".")[0] + assert part in transcriptions[dir] + print(transcriptions[dir][part], file=wrd_out) + print( + " ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |", + file=ltr_out, + ) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/scripts/binarize_manifest.sh b/triton_model/1/aqc/examples/wav2vec/scripts/binarize_manifest.sh new file mode 100644 index 0000000..6f201bd --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/scripts/binarize_manifest.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# usage: bash binarize_manifest + +DEST_DIR=$1 +TRAIN_SPLIT=$2 +VALID_SPLIT=$3 +FAIRSEQ_ROOT=$4 + +mkdir -p $DEST_DIR + +# split file path and lengths into separate files +cut -f1 $TRAIN_SPLIT.tsv > $DEST_DIR/train_fnames.txt +cut -f1 $VALID_SPLIT.tsv > $DEST_DIR/valid_fnames.txt +cut -f2 $TRAIN_SPLIT.tsv > $DEST_DIR/train.lengths +cut -f2 $VALID_SPLIT.tsv > $DEST_DIR/valid.lengths + +# copy root directory +head -1 $TRAIN_SPLIT.tsv > $DEST_DIR/train.root +head -1 $VALID_SPLIT.tsv > $DEST_DIR/valid.root + +# remove root directory +sed -i '1d' $DEST_DIR/train_fnames.txt +sed -i '1d' $DEST_DIR/valid_fnames.txt +sed -i '1d' $DEST_DIR/train.lengths +sed -i '1d' $DEST_DIR/valid.lengths + +# insert spaces between characters +sed -i -e 's/\(.\)/\1 /g' $DEST_DIR/train_fnames.txt +sed -i -e 's/\(.\)/\1 /g' $DEST_DIR/valid_fnames.txt + +# run preprocessor +PYTHONPATH=$FAIRSEQ_ROOT python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $DEST_DIR/train_fnames.txt --validpref $DEST_DIR/valid_fnames.txt --workers 60 --only-source --destdir $DEST_DIR diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/README.md b/triton_model/1/aqc/examples/wav2vec/unsupervised/README.md new file mode 100644 index 0000000..b9d6f67 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/README.md @@ -0,0 +1,119 @@ +# wav2vec Unsupervised (wav2vec-U) + +Wav2vec Unsupervised (wav2vec-U) and the 2.0 version are frameworks for building speech recognition systems without any labeled training data as described in [Unsupervised Speech Recognition (Baevski et al., 2021)](https://ai.facebook.com/research/publications/unsupervised-speech-recognition) and [Towards End-to-end Unsupervised Speech Recognition (Liu, et al., 2022)](https://arxiv.org/abs/2204.02492). The model takes as input wav2vec 2.0 or XLSR representations (see [pretrained models](https://github.com/pytorch/fairseq/blob/main/examples/wav2vec)) as well as unlabeled speech and text data. + + The training procedure consists of three consecutive main steps: +* Preparation of speech representations and text data +* Generative adversarial training (GAN) +* Iterative self-training + Kaldi LM-decoding + +## Preparation of speech and text data +Similar to [wav2vec 2.0](https://github.com/pytorch/fairseq/blob/main/examples/wav2vec/README.md), data folders contain {train,valid,test}.{tsv,wrd,phn} files, where audio paths are stored in tsv files, and word, letter or phoneme transcriptions are stored in .{wrd,ltr,phn}. + +In **/path/to/data/with_silence** you need a *train.tsv* file as well as (optionally) *{valid,test}.{tsv,wrd,phn}*. It is nice to have *10h.{tsv,phn}* files there too for reproducing the ablation study on layer selection. In **/path/to/data/without_silence** you have the same files, except *.tsv* files contain audios with silences removed using rVAD. + +Pre-requisites: +* set FAIRSEQ_ROOT environmental variable to your fairseq installation +* set RVAD_ROOT environmental variable to a checkout of [rVADfast](https://github.com/zhenghuatan/rVADfast) +* set KENLM_ROOT environmental variable to the location of [KenLM](https://github.com/kpu/kenlm) binaries +* install [PyKaldi](https://github.com/pykaldi/pykaldi) and set KALDI_ROOT environmental variable to the location of your kaldi installation. To use the version bundled with PyKaldi, you can use /path/to/pykaldi/tools/kaldi + +Create new audio files without silences: +```shell +# create a manifest file for the set original of audio files +python $FAIRSEQ_ROOT/examples/wav2vec/wav2vec_manifest.py /dir/to/save/audio/files --ext wav --dest /path/to/new/train.tsv --valid-percent 0 + +python scripts/vads.py -r $RVAD_ROOT < /path/to/train.tsv > train.vads + +python scripts/remove_silence.py --tsv /path/to/train.tsv --vads train.vads --out /dir/to/save/audio/files + +python $FAIRSEQ_ROOT/examples/wav2vec/wav2vec_manifest.py /dir/to/save/audio/files --ext wav --dest /path/to/new/train.tsv --valid-percent 0.01 +``` + +Next, we need to preprocess the audio data to better match phonemized text data: + +```shell +# wav2vec-U +zsh scripts/prepare_audio.sh /dir/with/{train,test,valid}.tsv /output/dir /path/to/wav2vec2/model.pt 512 14 +# wav2vec-U 2.0 +zsh scripts/prepare_audio_v2.sh /dir/with/{train,test,valid}.tsv /output/dir /path/to/wav2vec2/model.pt 64 14 +``` +Note that if you have splits different than train/valid/test, you will need to modify this script. The thrid argument is the PCA dimensionality for wav2vec-U and the number of MFCC clusters for wav2vec-U 2.0. The last argument is the 0-based index of the layer from which to extract representations. + +Now we need to prepare text data: +```shell +zsh scripts/prepare_text.sh language /path/to/text/file /output/dir 1000 espeak /path/to/fasttext/lid/model sil_prob +``` + +The fourth argument is minimum number observations of phones to keep. If your text corpus is small, you might want to reduce this number. + +The fifth argument is which phonemizer to use. Supported values are [espeak](http://espeak.sourceforge.net/), [espeak-ng](https://github.com/espeak-ng/espeak-ng), and [G2P](https://github.com/Kyubyong/g2p) (english only). + +Pre-trained fasttext LID models can be downloaded [here](https://fasttext.cc/docs/en/language-identification.html). + +The last argument is the probability to introduce silence (``) between the word boundaries. We found the value `0.25`/`0.5` works in general for wav2vec-U and the 2.0 version respectively, but you might want to vary for languages that are never tested. + +### Prepare TIMIT data +TIMIT transcripts include silence. Therefore VAD is not used for audio preprocessing, and we do not wrap transcripts with silences or insert random silence in between words. + +To prepare TIMIT data for both the matched an unmatched setup: +```shell +bash scripts/prepare_timit.sh /dir/to/timit/raw/data /output/dir /path/to/wav2vec2/model.pt +``` + +Note that we assume the TIMIT distribution with capitalized directories and filenames are used (e.g., `TRAIN/DR1/FCJF0/SA1.PHN`). + +## Generative adversarial training (GAN) + +We then use a GAN model to build a first unsupervised ASR model. The data preparation above of both speech features and text data is a necessary procedure that enables the generator to match speech to text in an unsupervised way. + +Launching GAN training on top of preprocessed features, with default hyperparameters can be done with: + +``` +PREFIX=w2v_unsup_gan_xp + +# For wav2vec-U, audio features are pre-segmented +CONFIG_NAME=w2vu +TASK_DATA=/path/to/features/precompute_unfiltered_pca512_cls128_mean_pooled + +# For wav2vec-U 2.0, use raw audio features +CONFIG_NAME=w2vu2 +TASK_DATA=/path/to/features/ + +# Unpaired text input +TEXT_DATA=/path/to/data/phones # path to fairseq-preprocessed GAN data (phones dir) +KENLM_PATH=/path/to/data/phones/kenlm.phn.o4.bin # KenLM 4-gram phoneme language model (LM data = GAN data here) + +PYTHONPATH=$FAIRSEQ_ROOT PREFIX=$PREFIX fairseq-hydra-train \ + -m --config-dir config/gan \ + --config-name $CONFIG_NAME \ + task.data=${TASK_DATA} \ + task.text_data=${TEXT_DATA} \ + task.kenlm_path=${KENLM_PATH} \ + common.user_dir=${FAIRSEQ_ROOT}/examples/wav2vec/unsupervised \ + model.code_penalty=2,4 model.gradient_penalty=1.5,2.0 \ + model.smoothness_weight=0.5,0.75,1.0 'common.seed=range(0,5)' +``` + + +Once we find the best checkpoint (chosen using unsupervised metric that combined language model perplexity and vocabulary usage), we can use it to generate phone labels (or word labels with an appropriate kaldi WFST): + +```shell +python w2vu_generate.py --config-dir config/generate --config-name viterbi \ +fairseq.common.user_dir=${FAIRSEQ_ROOT}/examples/wav2vec/unsupervised \ +fairseq.task.data=/path/to/dir/with/features \ +fairseq.common_eval.path=/path/to/gan/checkpoint \ +fairseq.dataset.gen_subset=valid results_path=/where/to/save/transcriptions +``` + +The decoding without LM works best on the same adjacent-mean-pooled features that the gan was trained on, while decoding with LM works better on features before the adjacent timestep mean-pooling step (without the "_pooled" suffix). + +While the generator of wav2vec-U 2.0 is trained with an output frequency of 16hz, we found decoding at a higher frequency produces better results. This can be done by adding `decode_stride=1` or `2` to the argument. + +## Iterative self-training + Kaldi LM-decoding +After the GAN training provides a first unsupervised model, we can then progressively refine the quality of transcriptions using several iterations of semi-supervised learning. We perform two iterations: first, pseudo-label the training data with the unsupervised GAN model and train an HMM on the pseudo-labels. Second, we relabel the training data with the HMM and then fine-tune the original wav2vec 2.0 model using the HMM pseudo-labels with a CTC loss. Note that HMM models use phonemes as output, while wav2vec 2.0 use letter. Both are decoded using WFST decoders into words. + + +Please see [this README](kaldi_self_train/README.md) for more instructions on how to do iterative self-training + Kaldi LM-decoding. + +*** Note: these instructions are a work in progress and will be updated over the next few days diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/__init__.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/finetuning/w2v_finetune.yaml b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/finetuning/w2v_finetune.yaml new file mode 100644 index 0000000..19a3ef3 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/finetuning/w2v_finetune.yaml @@ -0,0 +1,62 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + tensorboard_logdir: tb + +checkpoint: + no_epoch_checkpoints: true + save_interval_updates: 20000 + +task: + _name: audio_finetuning + data: ??? + normalize: true + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 800000 + skip_invalid_size_inputs_valid_test: true + train_subset: train + valid_subset: valid + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 8 + find_unused_parameters: True + +criterion: + _name: ctc + zero_infinity: true + post_process: letter + +optimization: + max_update: 80000 + lr: [0.00003] + sentence_avg: true + update_freq: [1] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.25 + mask_channel_prob: 0.1 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 0 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/gan/w2vu.yaml b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/gan/w2vu.yaml new file mode 100644 index 0000000..74f1829 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/gan/w2vu.yaml @@ -0,0 +1,115 @@ +# @package _group_ + +common: + fp16: false + fp16_no_flatten_grads: true + log_format: json + log_interval: 100 + tensorboard_logdir: tb + reset_logging: false + suppress_crashes: false + +checkpoint: + save_interval: 1000 + save_interval_updates: 1000 + no_epoch_checkpoints: true + best_checkpoint_metric: weighted_lm_ppl + save_dir: . + +distributed_training: + distributed_world_size: 1 + +task: + _name: unpaired_audio_text + data: ??? + text_data: ??? + labels: phn + sort_by_length: false + unfiltered: false + max_length: null + append_eos: false + kenlm_path: ??? + +dataset: + num_workers: 6 + batch_size: 160 + skip_invalid_size_inputs_valid_test: true + valid_subset: valid + validate_interval: 1000 + validate_interval_updates: 1000 + +criterion: + _name: model + log_keys: + - accuracy_dense + - accuracy_token + - temp + - code_ppl + +optimization: + max_update: 150000 + clip_norm: 5.0 + lr: [0] + +optimizer: + _name: composite + groups: + generator: + lr: [0.0004] + lr_float: null + optimizer: + _name: adam + adam_betas: [0.5,0.98] + adam_eps: 1e-06 + weight_decay: 0 + amsgrad: false + lr_scheduler: + _name: fixed + warmup_updates: 0 + discriminator: + lr: [ 0.0005 ] + lr_float: null + optimizer: + _name: adam + adam_betas: [0.5,0.98] + adam_eps: 1e-06 + weight_decay: 0.0001 + amsgrad: false + lr_scheduler: + _name: fixed + warmup_updates: 0 + +lr_scheduler: pass_through + +model: + _name: wav2vec_u + + discriminator_dim: 384 + discriminator_depth: 2 + discriminator_kernel: 6 + discriminator_linear_emb: false + discriminator_causal: true + discriminator_max_pool: false + discriminator_act_after_linear: false + discriminator_dropout: 0.0 + discriminator_weight_norm: false + + generator_stride: 1 + generator_kernel: 4 + generator_bias: false + generator_dropout: 0.1 + + smoothness_weight: 0.5 + smoothing: 0 + smoothing_one_sided: false + gumbel: false + hard_gumbel: false + gradient_penalty: 1.5 + code_penalty: 4.0 + temp: [ 2,0.1,0.99995 ] + input_dim: 512 + + segmentation: + type: JOIN + mean_pool_join: false + remove_zeros: false diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/gan/w2vu2.yaml b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/gan/w2vu2.yaml new file mode 100644 index 0000000..5201422 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/gan/w2vu2.yaml @@ -0,0 +1,154 @@ +# @package _group_ + +common: + fp16: false + fp16_no_flatten_grads: true + log_format: json + log_interval: 100 + tensorboard_logdir: tb + reset_logging: false + suppress_crashes: false + +checkpoint: + save_interval: 1000 + save_interval_updates: 1000 + no_epoch_checkpoints: true + best_checkpoint_metric: weighted_lm_ppl + save_dir: . + +distributed_training: + distributed_world_size: 1 + +task: + _name: unpaired_audio_text + data: ??? + text_data: ??? + labels: phn + sort_by_length: false + unfiltered: false + max_length: null + append_eos: false + kenlm_path: ??? + aux_target_postfix: km + +dataset: + num_workers: 6 + batch_size: 160 + skip_invalid_size_inputs_valid_test: true + valid_subset: valid + validate_interval: 1000 + validate_interval_updates: 1000 + +criterion: + _name: model + log_keys: + - accuracy_dense + - accuracy_token + - temp + - code_ppl + +optimization: + max_update: 150000 + clip_norm: 5.0 + lr: [0] + +optimizer: + _name: composite + groups: + generator: + lr: [0.00005] + lr_float: null + optimizer: + _name: adam + adam_betas: [0.5,0.98] + adam_eps: 1e-06 + weight_decay: 0 + amsgrad: false + lr_scheduler: + _name: fixed + warmup_updates: 0 + discriminator: + lr: [ 0.0003 ] + lr_float: null + optimizer: + _name: adam + adam_betas: [0.5,0.98] + adam_eps: 1e-06 + weight_decay: 0.0001 + amsgrad: false + lr_scheduler: + _name: fixed + warmup_updates: 0 + +lr_scheduler: pass_through + +model: + _name: wav2vec_u + + discriminator_dim: 384 + discriminator_depth: 2 + discriminator_kernel: 8 + discriminator_linear_emb: false + discriminator_causal: true + discriminator_max_pool: false + discriminator_act_after_linear: false + discriminator_dropout: 0.0 + discriminator_weight_norm: false + + generator_stride: 3 + generator_kernel: 9 + generator_bias: false + generator_dropout: 0.1 + generator_batch_norm: 30 + generator_residual: true + + smoothness_weight: 1.5 + smoothing: 0 + smoothing_one_sided: false + gumbel: false + hard_gumbel: false + gradient_penalty: 1.0 + code_penalty: 3.0 + temp: [ 2,0.1,0.99995 ] + input_dim: 1024 + mmi_weight: 0.5 + target_dim: 64 + + segmentation: + type: JOIN + mean_pool_join: false + remove_zeros: false + + +hydra: + job: + config: + override_dirname: + kv_sep: ':' + item_sep: '__' + exclude_keys: + - run_config + - distributed_training.distributed_port + - common.user_dir + - task.data + - task.kenlm_path + - task.text_data + - model.generator_layers + - task.labels + - task.force_model_seed + sweep: + dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}/${hydra.job.override_dirname} + subdir: ${hydra.job.num} + launcher: + submitit_folder: ${hydra.sweep.dir} + timeout_min: 3000 + cpus_per_task: 10 + gpus_per_node: 1 + tasks_per_node: 1 + mem_gb: 120 + nodes: 1 + name: ${env:PREFIX}_${hydra.job.config_name} + partition: devlab,learnlab,learnfair,scavenge + comment: intern_endding_soon + constraint: volta32gb + max_num_timeout: 30 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/generate/viterbi.yaml b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/generate/viterbi.yaml new file mode 100644 index 0000000..9c88bee --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/generate/viterbi.yaml @@ -0,0 +1,21 @@ +# @package _group_ + +fairseq: + task: + _name: unpaired_audio_text + labels: phn + data: ??? + sort_by_length: false + shuffle: false + text_data: '' + + common_eval: + path: ??? + quiet: true + + dataset: + gen_subset: valid + batch_size: 1 + +w2l_decoder: VITERBI +post_process: silence diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/test.uid b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/test.uid new file mode 100644 index 0000000..4010082 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/test.uid @@ -0,0 +1,192 @@ +FDHC0_SI1559 +FDHC0_SI2189 +FDHC0_SI929 +FDHC0_SX119 +FDHC0_SX209 +FDHC0_SX29 +FDHC0_SX299 +FDHC0_SX389 +FELC0_SI1386 +FELC0_SI2016 +FELC0_SI756 +FELC0_SX126 +FELC0_SX216 +FELC0_SX306 +FELC0_SX36 +FELC0_SX396 +FJLM0_SI1043 +FJLM0_SI1673 +FJLM0_SI2303 +FJLM0_SX143 +FJLM0_SX233 +FJLM0_SX323 +FJLM0_SX413 +FJLM0_SX53 +FMGD0_SI1564 +FMGD0_SI2194 +FMGD0_SI934 +FMGD0_SX124 +FMGD0_SX214 +FMGD0_SX304 +FMGD0_SX34 +FMGD0_SX394 +FMLD0_SI2185 +FMLD0_SI822 +FMLD0_SI925 +FMLD0_SX115 +FMLD0_SX205 +FMLD0_SX25 +FMLD0_SX295 +FMLD0_SX385 +FNLP0_SI1308 +FNLP0_SI1938 +FNLP0_SI678 +FNLP0_SX138 +FNLP0_SX228 +FNLP0_SX318 +FNLP0_SX408 +FNLP0_SX48 +FPAS0_SI1272 +FPAS0_SI2204 +FPAS0_SI944 +FPAS0_SX134 +FPAS0_SX224 +FPAS0_SX314 +FPAS0_SX404 +FPAS0_SX44 +FPKT0_SI1538 +FPKT0_SI2168 +FPKT0_SI908 +FPKT0_SX188 +FPKT0_SX278 +FPKT0_SX368 +FPKT0_SX8 +FPKT0_SX98 +MBPM0_SI1577 +MBPM0_SI1584 +MBPM0_SI947 +MBPM0_SX137 +MBPM0_SX227 +MBPM0_SX317 +MBPM0_SX407 +MBPM0_SX47 +MCMJ0_SI1094 +MCMJ0_SI464 +MCMJ0_SI602 +MCMJ0_SX104 +MCMJ0_SX14 +MCMJ0_SX194 +MCMJ0_SX284 +MCMJ0_SX374 +MDAB0_SI1039 +MDAB0_SI1669 +MDAB0_SI2299 +MDAB0_SX139 +MDAB0_SX229 +MDAB0_SX319 +MDAB0_SX409 +MDAB0_SX49 +MGRT0_SI1450 +MGRT0_SI2080 +MGRT0_SI820 +MGRT0_SX10 +MGRT0_SX100 +MGRT0_SX190 +MGRT0_SX280 +MGRT0_SX370 +MJDH0_SI1354 +MJDH0_SI1984 +MJDH0_SI724 +MJDH0_SX184 +MJDH0_SX274 +MJDH0_SX364 +MJDH0_SX4 +MJDH0_SX94 +MJLN0_SI1449 +MJLN0_SI2079 +MJLN0_SI819 +MJLN0_SX189 +MJLN0_SX279 +MJLN0_SX369 +MJLN0_SX9 +MJLN0_SX99 +MJMP0_SI1535 +MJMP0_SI1791 +MJMP0_SI905 +MJMP0_SX185 +MJMP0_SX275 +MJMP0_SX365 +MJMP0_SX5 +MJMP0_SX95 +MKLT0_SI1213 +MKLT0_SI1843 +MKLT0_SI583 +MKLT0_SX133 +MKLT0_SX223 +MKLT0_SX313 +MKLT0_SX403 +MKLT0_SX43 +MLLL0_SI1363 +MLLL0_SI1993 +MLLL0_SI733 +MLLL0_SX103 +MLLL0_SX13 +MLLL0_SX193 +MLLL0_SX283 +MLLL0_SX373 +MLNT0_SI1574 +MLNT0_SI1902 +MLNT0_SI642 +MLNT0_SX102 +MLNT0_SX12 +MLNT0_SX192 +MLNT0_SX282 +MLNT0_SX372 +MNJM0_SI1580 +MNJM0_SI2210 +MNJM0_SI950 +MNJM0_SX140 +MNJM0_SX230 +MNJM0_SX320 +MNJM0_SX410 +MNJM0_SX50 +MPAM0_SI1189 +MPAM0_SI1819 +MPAM0_SI1961 +MPAM0_SX109 +MPAM0_SX19 +MPAM0_SX199 +MPAM0_SX289 +MPAM0_SX379 +MTAS1_SI1473 +MTAS1_SI2098 +MTAS1_SI838 +MTAS1_SX118 +MTAS1_SX208 +MTAS1_SX28 +MTAS1_SX298 +MTAS1_SX388 +MTLS0_SI1370 +MTLS0_SI2000 +MTLS0_SI740 +MTLS0_SX110 +MTLS0_SX20 +MTLS0_SX200 +MTLS0_SX290 +MTLS0_SX380 +MWBT0_SI1553 +MWBT0_SI2183 +MWBT0_SI923 +MWBT0_SX113 +MWBT0_SX203 +MWBT0_SX23 +MWBT0_SX293 +MWBT0_SX383 +MWEW0_SI1361 +MWEW0_SI1991 +MWEW0_SI731 +MWEW0_SX101 +MWEW0_SX11 +MWEW0_SX191 +MWEW0_SX281 +MWEW0_SX371 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/train.uid b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/train.uid new file mode 100644 index 0000000..c39fd0b --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/train.uid @@ -0,0 +1,3696 @@ +FAEM0_SI1392 +FAEM0_SI2022 +FAEM0_SI762 +FAEM0_SX132 +FAEM0_SX222 +FAEM0_SX312 +FAEM0_SX402 +FAEM0_SX42 +FAJW0_SI1263 +FAJW0_SI1893 +FAJW0_SI633 +FAJW0_SX183 +FAJW0_SX273 +FAJW0_SX3 +FAJW0_SX363 +FAJW0_SX93 +FALK0_SI1086 +FALK0_SI456 +FALK0_SI658 +FALK0_SX186 +FALK0_SX276 +FALK0_SX366 +FALK0_SX6 +FALK0_SX96 +FALR0_SI1325 +FALR0_SI1955 +FALR0_SI695 +FALR0_SX155 +FALR0_SX245 +FALR0_SX335 +FALR0_SX425 +FALR0_SX65 +FAPB0_SI1063 +FAPB0_SI1693 +FAPB0_SI2323 +FAPB0_SX163 +FAPB0_SX253 +FAPB0_SX343 +FAPB0_SX433 +FAPB0_SX73 +FBAS0_SI1387 +FBAS0_SI1472 +FBAS0_SI2066 +FBAS0_SX127 +FBAS0_SX217 +FBAS0_SX307 +FBAS0_SX37 +FBAS0_SX397 +FBCG1_SI1612 +FBCG1_SI2242 +FBCG1_SI982 +FBCG1_SX172 +FBCG1_SX262 +FBCG1_SX352 +FBCG1_SX442 +FBCG1_SX82 +FBCH0_SI1586 +FBCH0_SI956 +FBCH0_SI959 +FBCH0_SX146 +FBCH0_SX236 +FBCH0_SX326 +FBCH0_SX416 +FBCH0_SX56 +FBJL0_SI1552 +FBJL0_SI2182 +FBJL0_SI922 +FBJL0_SX112 +FBJL0_SX202 +FBJL0_SX22 +FBJL0_SX292 +FBJL0_SX382 +FBLV0_SI1058 +FBLV0_SI1688 +FBLV0_SI2318 +FBLV0_SX158 +FBLV0_SX248 +FBLV0_SX338 +FBLV0_SX428 +FBLV0_SX68 +FBMH0_SI1136 +FBMH0_SI1766 +FBMH0_SI970 +FBMH0_SX146 +FBMH0_SX236 +FBMH0_SX326 +FBMH0_SX416 +FBMH0_SX56 +FBMJ0_SI1776 +FBMJ0_SI516 +FBMJ0_SI815 +FBMJ0_SX156 +FBMJ0_SX246 +FBMJ0_SX336 +FBMJ0_SX426 +FBMJ0_SX66 +FCAG0_SI1503 +FCAG0_SI1641 +FCAG0_SI2133 +FCAG0_SX153 +FCAG0_SX243 +FCAG0_SX333 +FCAG0_SX423 +FCAG0_SX63 +FCAJ0_SI1479 +FCAJ0_SI1804 +FCAJ0_SI849 +FCAJ0_SX129 +FCAJ0_SX219 +FCAJ0_SX309 +FCAJ0_SX39 +FCAJ0_SX399 +FCDR1_SI1186 +FCDR1_SI1816 +FCDR1_SI556 +FCDR1_SX106 +FCDR1_SX16 +FCDR1_SX196 +FCDR1_SX286 +FCDR1_SX376 +FCEG0_SI1248 +FCEG0_SI1878 +FCEG0_SI618 +FCEG0_SX168 +FCEG0_SX258 +FCEG0_SX348 +FCEG0_SX438 +FCEG0_SX78 +FCJF0_SI1027 +FCJF0_SI1657 +FCJF0_SI648 +FCJF0_SX127 +FCJF0_SX217 +FCJF0_SX307 +FCJF0_SX37 +FCJF0_SX397 +FCJS0_SI1607 +FCJS0_SI2237 +FCJS0_SI977 +FCJS0_SX167 +FCJS0_SX257 +FCJS0_SX347 +FCJS0_SX437 +FCJS0_SX77 +FCKE0_SI1111 +FCKE0_SI1741 +FCKE0_SI481 +FCKE0_SX121 +FCKE0_SX211 +FCKE0_SX301 +FCKE0_SX31 +FCKE0_SX391 +FCLT0_SI1438 +FCLT0_SI2068 +FCLT0_SI808 +FCLT0_SX178 +FCLT0_SX268 +FCLT0_SX358 +FCLT0_SX448 +FCLT0_SX88 +FCMG0_SI1142 +FCMG0_SI1242 +FCMG0_SI1872 +FCMG0_SX162 +FCMG0_SX252 +FCMG0_SX342 +FCMG0_SX432 +FCMG0_SX72 +FCMM0_SI1083 +FCMM0_SI1957 +FCMM0_SI453 +FCMM0_SX183 +FCMM0_SX273 +FCMM0_SX363 +FCMM0_SX420 +FCMM0_SX93 +FCRZ0_SI1913 +FCRZ0_SI2053 +FCRZ0_SI793 +FCRZ0_SX163 +FCRZ0_SX253 +FCRZ0_SX343 +FCRZ0_SX433 +FCRZ0_SX73 +FCYL0_SI1297 +FCYL0_SI1927 +FCYL0_SI667 +FCYL0_SX127 +FCYL0_SX217 +FCYL0_SX349 +FCYL0_SX37 +FCYL0_SX397 +FDAS1_SI1461 +FDAS1_SI2091 +FDAS1_SI831 +FDAS1_SX111 +FDAS1_SX201 +FDAS1_SX21 +FDAS1_SX291 +FDAS1_SX381 +FDAW0_SI1271 +FDAW0_SI1406 +FDAW0_SI2036 +FDAW0_SX146 +FDAW0_SX236 +FDAW0_SX326 +FDAW0_SX416 +FDAW0_SX56 +FDFB0_SI1318 +FDFB0_SI1948 +FDFB0_SI2010 +FDFB0_SX148 +FDFB0_SX238 +FDFB0_SX328 +FDFB0_SX418 +FDFB0_SX58 +FDJH0_SI1565 +FDJH0_SI2195 +FDJH0_SI935 +FDJH0_SX125 +FDJH0_SX215 +FDJH0_SX305 +FDJH0_SX35 +FDJH0_SX395 +FDKN0_SI1081 +FDKN0_SI1202 +FDKN0_SI1711 +FDKN0_SX181 +FDKN0_SX271 +FDKN0_SX361 +FDKN0_SX451 +FDKN0_SX91 +FDML0_SI1149 +FDML0_SI1779 +FDML0_SI2075 +FDML0_SX159 +FDML0_SX249 +FDML0_SX339 +FDML0_SX429 +FDML0_SX69 +FDMY0_SI1197 +FDMY0_SI567 +FDMY0_SI714 +FDMY0_SX117 +FDMY0_SX207 +FDMY0_SX27 +FDMY0_SX297 +FDMY0_SX387 +FDNC0_SI1278 +FDNC0_SI1908 +FDNC0_SI2287 +FDNC0_SX108 +FDNC0_SX18 +FDNC0_SX198 +FDNC0_SX288 +FDNC0_SX378 +FDTD0_SI1561 +FDTD0_SI2191 +FDTD0_SI931 +FDTD0_SX121 +FDTD0_SX211 +FDTD0_SX301 +FDTD0_SX321 +FDTD0_SX391 +FDXW0_SI1511 +FDXW0_SI2141 +FDXW0_SI881 +FDXW0_SX161 +FDXW0_SX251 +FDXW0_SX341 +FDXW0_SX431 +FDXW0_SX71 +FEAC0_SI1245 +FEAC0_SI1875 +FEAC0_SI615 +FEAC0_SX165 +FEAC0_SX255 +FEAC0_SX345 +FEAC0_SX435 +FEAC0_SX75 +FEAR0_SI1252 +FEAR0_SI1882 +FEAR0_SI622 +FEAR0_SX172 +FEAR0_SX262 +FEAR0_SX352 +FEAR0_SX442 +FEAR0_SX82 +FECD0_SI1418 +FECD0_SI2048 +FECD0_SI788 +FECD0_SX158 +FECD0_SX248 +FECD0_SX338 +FECD0_SX428 +FECD0_SX68 +FEEH0_SI1112 +FEEH0_SI1742 +FEEH0_SI471 +FEEH0_SX122 +FEEH0_SX212 +FEEH0_SX302 +FEEH0_SX32 +FEEH0_SX392 +FEME0_SI1505 +FEME0_SI2135 +FEME0_SI875 +FEME0_SX155 +FEME0_SX245 +FEME0_SX335 +FEME0_SX425 +FEME0_SX65 +FETB0_SI1148 +FETB0_SI1778 +FETB0_SI518 +FETB0_SX158 +FETB0_SX248 +FETB0_SX338 +FETB0_SX428 +FETB0_SX68 +FEXM0_SI1101 +FEXM0_SI1731 +FEXM0_SI482 +FEXM0_SX111 +FEXM0_SX201 +FEXM0_SX291 +FEXM0_SX366 +FEXM0_SX381 +FGCS0_SI1486 +FGCS0_SI2116 +FGCS0_SI856 +FGCS0_SX136 +FGCS0_SX226 +FGCS0_SX316 +FGCS0_SX406 +FGCS0_SX46 +FGDP0_SI1618 +FGDP0_SI2248 +FGDP0_SI988 +FGDP0_SX178 +FGDP0_SX268 +FGDP0_SX358 +FGDP0_SX448 +FGDP0_SX88 +FGMB0_SI1145 +FGMB0_SI1775 +FGMB0_SI515 +FGMB0_SX155 +FGMB0_SX245 +FGMB0_SX335 +FGMB0_SX425 +FGMB0_SX65 +FGRW0_SI1152 +FGRW0_SI1782 +FGRW0_SI1990 +FGRW0_SX162 +FGRW0_SX252 +FGRW0_SX342 +FGRW0_SX432 +FGRW0_SX72 +FHLM0_SI1560 +FHLM0_SI2190 +FHLM0_SI930 +FHLM0_SX120 +FHLM0_SX210 +FHLM0_SX300 +FHLM0_SX349 +FHLM0_SX390 +FHXS0_SI1075 +FHXS0_SI2302 +FHXS0_SI2335 +FHXS0_SX175 +FHXS0_SX265 +FHXS0_SX355 +FHXS0_SX445 +FHXS0_SX85 +FJDM2_SI1582 +FJDM2_SI1964 +FJDM2_SI2212 +FJDM2_SX142 +FJDM2_SX232 +FJDM2_SX322 +FJDM2_SX412 +FJDM2_SX52 +FJEN0_SI1047 +FJEN0_SI1677 +FJEN0_SI2307 +FJEN0_SX147 +FJEN0_SX237 +FJEN0_SX327 +FJEN0_SX417 +FJEN0_SX57 +FJHK0_SI1022 +FJHK0_SI1652 +FJHK0_SI2282 +FJHK0_SX122 +FJHK0_SX212 +FJHK0_SX302 +FJHK0_SX32 +FJHK0_SX392 +FJKL0_SI1562 +FJKL0_SI2192 +FJKL0_SI932 +FJKL0_SX122 +FJKL0_SX212 +FJKL0_SX302 +FJKL0_SX32 +FJKL0_SX392 +FJLG0_SI1506 +FJLG0_SI1889 +FJLG0_SI2306 +FJLG0_SX179 +FJLG0_SX269 +FJLG0_SX359 +FJLG0_SX449 +FJLG0_SX89 +FJLR0_SI1231 +FJLR0_SI1861 +FJLR0_SI601 +FJLR0_SX151 +FJLR0_SX241 +FJLR0_SX331 +FJLR0_SX421 +FJLR0_SX61 +FJRB0_SI1302 +FJRB0_SI1932 +FJRB0_SI672 +FJRB0_SX132 +FJRB0_SX222 +FJRB0_SX312 +FJRB0_SX402 +FJRB0_SX42 +FJRP1_SI1432 +FJRP1_SI2062 +FJRP1_SI802 +FJRP1_SX172 +FJRP1_SX262 +FJRP1_SX352 +FJRP1_SX442 +FJRP1_SX82 +FJSK0_SI1052 +FJSK0_SI1682 +FJSK0_SI2312 +FJSK0_SX152 +FJSK0_SX242 +FJSK0_SX332 +FJSK0_SX422 +FJSK0_SX62 +FJSP0_SI1434 +FJSP0_SI1763 +FJSP0_SI804 +FJSP0_SX174 +FJSP0_SX264 +FJSP0_SX354 +FJSP0_SX444 +FJSP0_SX84 +FJWB1_SI2055 +FJWB1_SI748 +FJWB1_SI795 +FJWB1_SX165 +FJWB1_SX255 +FJWB1_SX345 +FJWB1_SX435 +FJWB1_SX75 +FJXM0_SI1211 +FJXM0_SI1971 +FJXM0_SI581 +FJXM0_SX131 +FJXM0_SX221 +FJXM0_SX311 +FJXM0_SX401 +FJXM0_SX41 +FJXP0_SI1122 +FJXP0_SI1752 +FJXP0_SI492 +FJXP0_SX132 +FJXP0_SX222 +FJXP0_SX312 +FJXP0_SX402 +FJXP0_SX42 +FKAA0_SI1208 +FKAA0_SI1838 +FKAA0_SI578 +FKAA0_SX128 +FKAA0_SX218 +FKAA0_SX308 +FKAA0_SX38 +FKAA0_SX398 +FKDE0_SI1141 +FKDE0_SI1771 +FKDE0_SI2221 +FKDE0_SX151 +FKDE0_SX241 +FKDE0_SX331 +FKDE0_SX421 +FKDE0_SX61 +FKDW0_SI1207 +FKDW0_SI1891 +FKDW0_SI577 +FKDW0_SX127 +FKDW0_SX217 +FKDW0_SX307 +FKDW0_SX37 +FKDW0_SX397 +FKFB0_SI1608 +FKFB0_SI2238 +FKFB0_SI978 +FKFB0_SX168 +FKFB0_SX258 +FKFB0_SX348 +FKFB0_SX438 +FKFB0_SX78 +FKKH0_SI1290 +FKKH0_SI1920 +FKKH0_SI660 +FKKH0_SX120 +FKKH0_SX210 +FKKH0_SX30 +FKKH0_SX300 +FKKH0_SX390 +FKLC0_SI1615 +FKLC0_SI2245 +FKLC0_SI985 +FKLC0_SX175 +FKLC0_SX265 +FKLC0_SX355 +FKLC0_SX445 +FKLC0_SX85 +FKLC1_SI1048 +FKLC1_SI1678 +FKLC1_SI2308 +FKLC1_SX148 +FKLC1_SX238 +FKLC1_SX328 +FKLC1_SX418 +FKLC1_SX58 +FKLH0_SI1257 +FKLH0_SI1887 +FKLH0_SI627 +FKLH0_SX177 +FKLH0_SX267 +FKLH0_SX357 +FKLH0_SX447 +FKLH0_SX87 +FKSR0_SI1117 +FKSR0_SI1747 +FKSR0_SI487 +FKSR0_SX161 +FKSR0_SX217 +FKSR0_SX366 +FKSR0_SX37 +FKSR0_SX397 +FLAC0_SI1339 +FLAC0_SI2161 +FLAC0_SI901 +FLAC0_SX181 +FLAC0_SX271 +FLAC0_SX361 +FLAC0_SX451 +FLAC0_SX91 +FLAG0_SI1464 +FLAG0_SI2094 +FLAG0_SI834 +FLAG0_SX114 +FLAG0_SX204 +FLAG0_SX24 +FLAG0_SX294 +FLAG0_SX384 +FLEH0_SI1051 +FLEH0_SI1681 +FLEH0_SI2311 +FLEH0_SX151 +FLEH0_SX241 +FLEH0_SX331 +FLEH0_SX421 +FLEH0_SX61 +FLET0_SI1137 +FLET0_SI1767 +FLET0_SI507 +FLET0_SX147 +FLET0_SX237 +FLET0_SX277 +FLET0_SX417 +FLET0_SX57 +FLHD0_SI1344 +FLHD0_SI1827 +FLHD0_SI1974 +FLHD0_SX174 +FLHD0_SX264 +FLHD0_SX354 +FLHD0_SX444 +FLHD0_SX84 +FLJA0_SI1078 +FLJA0_SI1708 +FLJA0_SI2338 +FLJA0_SX178 +FLJA0_SX268 +FLJA0_SX358 +FLJA0_SX448 +FLJA0_SX88 +FLJD0_SI1516 +FLJD0_SI2146 +FLJD0_SI886 +FLJD0_SX166 +FLJD0_SX256 +FLJD0_SX346 +FLJD0_SX436 +FLJD0_SX76 +FLJG0_SI1611 +FLJG0_SI2241 +FLJG0_SI981 +FLJG0_SX171 +FLJG0_SX261 +FLJG0_SX351 +FLJG0_SX441 +FLJG0_SX81 +FLKM0_SI1880 +FLKM0_SI620 +FLKM0_SI686 +FLKM0_SX116 +FLKM0_SX260 +FLKM0_SX350 +FLKM0_SX440 +FLKM0_SX80 +FLMA0_SI1243 +FLMA0_SI1873 +FLMA0_SI613 +FLMA0_SX163 +FLMA0_SX253 +FLMA0_SX343 +FLMA0_SX433 +FLMA0_SX73 +FLMC0_SI1372 +FLMC0_SI2002 +FLMC0_SI742 +FLMC0_SX112 +FLMC0_SX22 +FLMC0_SX292 +FLMC0_SX336 +FLMC0_SX382 +FLMK0_SI1035 +FLMK0_SI1229 +FLMK0_SI2295 +FLMK0_SX135 +FLMK0_SX225 +FLMK0_SX315 +FLMK0_SX405 +FLMK0_SX45 +FLOD0_SI1287 +FLOD0_SI1917 +FLOD0_SI657 +FLOD0_SX117 +FLOD0_SX171 +FLOD0_SX207 +FLOD0_SX297 +FLOD0_SX387 +FLTM0_SI1070 +FLTM0_SI1700 +FLTM0_SI2330 +FLTM0_SX170 +FLTM0_SX260 +FLTM0_SX350 +FLTM0_SX440 +FLTM0_SX80 +FMAH1_SI1509 +FMAH1_SI2139 +FMAH1_SI879 +FMAH1_SX159 +FMAH1_SX249 +FMAH1_SX339 +FMAH1_SX429 +FMAH1_SX69 +FMBG0_SI1160 +FMBG0_SI1790 +FMBG0_SI2264 +FMBG0_SX260 +FMBG0_SX3 +FMBG0_SX350 +FMBG0_SX440 +FMBG0_SX80 +FMEM0_SI1377 +FMEM0_SI2007 +FMEM0_SI747 +FMEM0_SX117 +FMEM0_SX207 +FMEM0_SX297 +FMEM0_SX333 +FMEM0_SX387 +FMJB0_SI1177 +FMJB0_SI1807 +FMJB0_SI547 +FMJB0_SX187 +FMJB0_SX277 +FMJB0_SX367 +FMJB0_SX7 +FMJB0_SX97 +FMJF0_SI1254 +FMJF0_SI1884 +FMJF0_SI624 +FMJF0_SX174 +FMJF0_SX264 +FMJF0_SX354 +FMJF0_SX444 +FMJF0_SX84 +FMJU0_SI1389 +FMJU0_SI2019 +FMJU0_SI759 +FMJU0_SX129 +FMJU0_SX219 +FMJU0_SX309 +FMJU0_SX39 +FMJU0_SX399 +FMKC0_SI1041 +FMKC0_SI1072 +FMKC0_SI1702 +FMKC0_SX172 +FMKC0_SX262 +FMKC0_SX352 +FMKC0_SX442 +FMKC0_SX82 +FMKF0_SI1018 +FMKF0_SI1536 +FMKF0_SI906 +FMKF0_SX186 +FMKF0_SX276 +FMKF0_SX366 +FMKF0_SX6 +FMKF0_SX96 +FMMH0_SI1537 +FMMH0_SI2167 +FMMH0_SI907 +FMMH0_SX187 +FMMH0_SX367 +FMMH0_SX420 +FMMH0_SX7 +FMMH0_SX97 +FMPG0_SI1602 +FMPG0_SI2232 +FMPG0_SI972 +FMPG0_SX162 +FMPG0_SX252 +FMPG0_SX342 +FMPG0_SX432 +FMPG0_SX72 +FNKL0_SI1522 +FNKL0_SI2152 +FNKL0_SI892 +FNKL0_SX172 +FNKL0_SX196 +FNKL0_SX262 +FNKL0_SX442 +FNKL0_SX82 +FNTB0_SI1203 +FNTB0_SI573 +FNTB0_SI679 +FNTB0_SX123 +FNTB0_SX213 +FNTB0_SX303 +FNTB0_SX33 +FNTB0_SX393 +FPAB1_SI1471 +FPAB1_SI2101 +FPAB1_SI841 +FPAB1_SX121 +FPAB1_SX211 +FPAB1_SX301 +FPAB1_SX31 +FPAB1_SX391 +FPAC0_SI1921 +FPAC0_SI2011 +FPAC0_SI661 +FPAC0_SX121 +FPAC0_SX211 +FPAC0_SX301 +FPAC0_SX31 +FPAC0_SX391 +FPAD0_SI1346 +FPAD0_SI1976 +FPAD0_SI716 +FPAD0_SX176 +FPAD0_SX266 +FPAD0_SX356 +FPAD0_SX446 +FPAD0_SX86 +FPAF0_SI1054 +FPAF0_SI1684 +FPAF0_SI2314 +FPAF0_SX154 +FPAF0_SX244 +FPAF0_SX334 +FPAF0_SX424 +FPAF0_SX64 +FPAZ0_SI1593 +FPAZ0_SI2223 +FPAZ0_SI963 +FPAZ0_SX153 +FPAZ0_SX243 +FPAZ0_SX27 +FPAZ0_SX423 +FPAZ0_SX63 +FPJF0_SI1046 +FPJF0_SI1259 +FPJF0_SI1676 +FPJF0_SX146 +FPJF0_SX236 +FPJF0_SX326 +FPJF0_SX352 +FPJF0_SX56 +FPLS0_SI1590 +FPLS0_SI2220 +FPLS0_SI960 +FPLS0_SX150 +FPLS0_SX240 +FPLS0_SX3 +FPLS0_SX330 +FPLS0_SX60 +FPMY0_SI1153 +FPMY0_SI1783 +FPMY0_SI523 +FPMY0_SX163 +FPMY0_SX196 +FPMY0_SX253 +FPMY0_SX343 +FPMY0_SX73 +FREH0_SI1315 +FREH0_SI1945 +FREH0_SI685 +FREH0_SX145 +FREH0_SX235 +FREH0_SX325 +FREH0_SX415 +FREH0_SX55 +FRJB0_SI1427 +FRJB0_SI1470 +FRJB0_SI1794 +FRJB0_SX167 +FRJB0_SX257 +FRJB0_SX347 +FRJB0_SX437 +FRJB0_SX77 +FRLL0_SI1514 +FRLL0_SI805 +FRLL0_SI884 +FRLL0_SX164 +FRLL0_SX254 +FRLL0_SX344 +FRLL0_SX434 +FRLL0_SX74 +FSAG0_SI1323 +FSAG0_SI1953 +FSAG0_SI693 +FSAG0_SX153 +FSAG0_SX243 +FSAG0_SX333 +FSAG0_SX423 +FSAG0_SX63 +FSAH0_SI1244 +FSAH0_SI1874 +FSAH0_SI614 +FSAH0_SX164 +FSAH0_SX327 +FSAH0_SX344 +FSAH0_SX434 +FSAH0_SX74 +FSAK0_SI1300 +FSAK0_SI1930 +FSAK0_SI670 +FSAK0_SX130 +FSAK0_SX220 +FSAK0_SX310 +FSAK0_SX40 +FSAK0_SX400 +FSBK0_SI1069 +FSBK0_SI1699 +FSBK0_SI2329 +FSBK0_SX169 +FSBK0_SX259 +FSBK0_SX349 +FSBK0_SX439 +FSBK0_SX79 +FSCN0_SI1886 +FSCN0_SI626 +FSCN0_SI705 +FSCN0_SX176 +FSCN0_SX266 +FSCN0_SX356 +FSCN0_SX446 +FSCN0_SX86 +FSDC0_SI1312 +FSDC0_SI1942 +FSDC0_SI2234 +FSDC0_SX142 +FSDC0_SX232 +FSDC0_SX322 +FSDC0_SX412 +FSDC0_SX52 +FSDJ0_SI1115 +FSDJ0_SI1745 +FSDJ0_SI485 +FSDJ0_SX125 +FSDJ0_SX215 +FSDJ0_SX305 +FSDJ0_SX35 +FSDJ0_SX395 +FSGF0_SI1557 +FSGF0_SI2187 +FSGF0_SI927 +FSGF0_SX117 +FSGF0_SX207 +FSGF0_SX27 +FSGF0_SX297 +FSGF0_SX387 +FSJG0_SI1570 +FSJG0_SI2200 +FSJG0_SI940 +FSJG0_SX130 +FSJG0_SX220 +FSJG0_SX310 +FSJG0_SX40 +FSJG0_SX400 +FSJK1_SI1025 +FSJK1_SI2285 +FSJK1_SI696 +FSJK1_SX125 +FSJK1_SX215 +FSJK1_SX305 +FSJK1_SX35 +FSJK1_SX395 +FSJS0_SI1171 +FSJS0_SI1801 +FSJS0_SI541 +FSJS0_SX181 +FSJS0_SX271 +FSJS0_SX361 +FSJS0_SX451 +FSJS0_SX91 +FSJW0_SI1333 +FSJW0_SI1963 +FSJW0_SI703 +FSJW0_SX163 +FSJW0_SX253 +FSJW0_SX343 +FSJW0_SX433 +FSJW0_SX73 +FSKC0_SI1416 +FSKC0_SI2046 +FSKC0_SI786 +FSKC0_SX156 +FSKC0_SX246 +FSKC0_SX336 +FSKC0_SX426 +FSKC0_SX66 +FSKL0_SI1529 +FSKL0_SI2159 +FSKL0_SI899 +FSKL0_SX179 +FSKL0_SX269 +FSKL0_SX359 +FSKL0_SX449 +FSKL0_SX89 +FSKP0_SI1098 +FSKP0_SI1728 +FSKP0_SI468 +FSKP0_SX108 +FSKP0_SX18 +FSKP0_SX198 +FSKP0_SX288 +FSKP0_SX378 +FSLS0_SI1056 +FSLS0_SI1686 +FSLS0_SI2316 +FSLS0_SX156 +FSLS0_SX202 +FSLS0_SX246 +FSLS0_SX426 +FSLS0_SX66 +FSMA0_SI1621 +FSMA0_SI2251 +FSMA0_SI991 +FSMA0_SX181 +FSMA0_SX271 +FSMA0_SX361 +FSMA0_SX451 +FSMA0_SX91 +FSMM0_SI1314 +FSMM0_SI1944 +FSMM0_SI684 +FSMM0_SX144 +FSMM0_SX234 +FSMM0_SX324 +FSMM0_SX414 +FSMM0_SX54 +FSMS1_SI1504 +FSMS1_SI2134 +FSMS1_SI874 +FSMS1_SX154 +FSMS1_SX244 +FSMS1_SX334 +FSMS1_SX347 +FSMS1_SX64 +FSPM0_SI1241 +FSPM0_SI1871 +FSPM0_SI611 +FSPM0_SX161 +FSPM0_SX251 +FSPM0_SX341 +FSPM0_SX431 +FSPM0_SX71 +FSRH0_SI1719 +FSRH0_SI1931 +FSRH0_SI671 +FSRH0_SX131 +FSRH0_SX221 +FSRH0_SX311 +FSRH0_SX401 +FSRH0_SX41 +FSSB0_SI1082 +FSSB0_SI1712 +FSSB0_SI2342 +FSSB0_SX182 +FSSB0_SX272 +FSSB0_SX362 +FSSB0_SX452 +FSSB0_SX92 +FTAJ0_SI1329 +FTAJ0_SI474 +FTAJ0_SI699 +FTAJ0_SX159 +FTAJ0_SX249 +FTAJ0_SX339 +FTAJ0_SX429 +FTAJ0_SX69 +FTBR0_SI1402 +FTBR0_SI2181 +FTBR0_SI921 +FTBR0_SX111 +FTBR0_SX201 +FTBR0_SX21 +FTBR0_SX291 +FTBR0_SX381 +FTBW0_SI1345 +FTBW0_SI1975 +FTBW0_SI715 +FTBW0_SX175 +FTBW0_SX265 +FTBW0_SX355 +FTBW0_SX445 +FTBW0_SX85 +FTLG0_SI1743 +FTLG0_SI483 +FTLG0_SI840 +FTLG0_SX123 +FTLG0_SX213 +FTLG0_SX303 +FTLG0_SX33 +FTLG0_SX393 +FTMG0_SI1532 +FTMG0_SI2162 +FTMG0_SI902 +FTMG0_SX182 +FTMG0_SX272 +FTMG0_SX362 +FTMG0_SX452 +FTMG0_SX92 +FVFB0_SI1032 +FVFB0_SI1510 +FVFB0_SI2292 +FVFB0_SX132 +FVFB0_SX222 +FVFB0_SX312 +FVFB0_SX402 +FVFB0_SX42 +FVKB0_SI1159 +FVKB0_SI1789 +FVKB0_SI529 +FVKB0_SX169 +FVKB0_SX259 +FVKB0_SX349 +FVKB0_SX439 +FVKB0_SX79 +FVMH0_SI1466 +FVMH0_SI2096 +FVMH0_SI836 +FVMH0_SX116 +FVMH0_SX206 +FVMH0_SX26 +FVMH0_SX296 +FVMH0_SX386 +MABC0_SI1620 +MABC0_SI2041 +MABC0_SI781 +MABC0_SX151 +MABC0_SX241 +MABC0_SX331 +MABC0_SX421 +MABC0_SX61 +MADC0_SI1367 +MADC0_SI1997 +MADC0_SI737 +MADC0_SX107 +MADC0_SX17 +MADC0_SX197 +MADC0_SX287 +MADC0_SX377 +MADD0_SI1295 +MADD0_SI1798 +MADD0_SI538 +MADD0_SX178 +MADD0_SX268 +MADD0_SX358 +MADD0_SX448 +MADD0_SX88 +MAEB0_SI1411 +MAEB0_SI2250 +MAEB0_SI990 +MAEB0_SX180 +MAEB0_SX270 +MAEB0_SX360 +MAEB0_SX450 +MAEB0_SX90 +MAEO0_SI1326 +MAEO0_SI1655 +MAEO0_SI1956 +MAEO0_SX156 +MAEO0_SX246 +MAEO0_SX336 +MAEO0_SX426 +MAEO0_SX66 +MAFM0_SI1569 +MAFM0_SI2199 +MAFM0_SI939 +MAFM0_SX129 +MAFM0_SX219 +MAFM0_SX309 +MAFM0_SX39 +MAFM0_SX399 +MAJP0_SI1074 +MAJP0_SI1704 +MAJP0_SI2334 +MAJP0_SX174 +MAJP0_SX264 +MAJP0_SX354 +MAJP0_SX444 +MAJP0_SX84 +MAKB0_SI1016 +MAKB0_SI1646 +MAKB0_SI2276 +MAKB0_SX116 +MAKB0_SX206 +MAKB0_SX26 +MAKB0_SX296 +MAKB0_SX386 +MAKR0_SI1352 +MAKR0_SI1982 +MAKR0_SI722 +MAKR0_SX182 +MAKR0_SX272 +MAKR0_SX362 +MAKR0_SX452 +MAKR0_SX92 +MAPV0_SI1293 +MAPV0_SI1923 +MAPV0_SI663 +MAPV0_SX123 +MAPV0_SX213 +MAPV0_SX303 +MAPV0_SX33 +MAPV0_SX393 +MARC0_SI1188 +MARC0_SI1818 +MARC0_SI558 +MARC0_SX108 +MARC0_SX18 +MARC0_SX198 +MARC0_SX288 +MARC0_SX378 +MARW0_SI1276 +MARW0_SI1906 +MARW0_SI646 +MARW0_SX106 +MARW0_SX16 +MARW0_SX286 +MARW0_SX349 +MARW0_SX376 +MBAR0_SI1319 +MBAR0_SI1949 +MBAR0_SI689 +MBAR0_SX149 +MBAR0_SX239 +MBAR0_SX329 +MBAR0_SX419 +MBAR0_SX59 +MBBR0_SI1055 +MBBR0_SI1685 +MBBR0_SI2315 +MBBR0_SX155 +MBBR0_SX245 +MBBR0_SX335 +MBBR0_SX425 +MBBR0_SX65 +MBCG0_SI2217 +MBCG0_SI486 +MBCG0_SI957 +MBCG0_SX147 +MBCG0_SX237 +MBCG0_SX327 +MBCG0_SX417 +MBCG0_SX57 +MBEF0_SI1281 +MBEF0_SI1911 +MBEF0_SI651 +MBEF0_SX111 +MBEF0_SX201 +MBEF0_SX21 +MBEF0_SX291 +MBEF0_SX381 +MBGT0_SI1341 +MBGT0_SI1841 +MBGT0_SI711 +MBGT0_SX171 +MBGT0_SX261 +MBGT0_SX351 +MBGT0_SX441 +MBGT0_SX81 +MBJV0_SI1247 +MBJV0_SI1877 +MBJV0_SI617 +MBJV0_SX167 +MBJV0_SX257 +MBJV0_SX347 +MBJV0_SX437 +MBJV0_SX77 +MBMA0_SI1222 +MBMA0_SI1852 +MBMA0_SI592 +MBMA0_SX142 +MBMA0_SX232 +MBMA0_SX322 +MBMA0_SX412 +MBMA0_SX52 +MBMA1_SI2207 +MBMA1_SI2214 +MBMA1_SI954 +MBMA1_SX144 +MBMA1_SX234 +MBMA1_SX324 +MBMA1_SX414 +MBMA1_SX54 +MBML0_SI1169 +MBML0_SI1799 +MBML0_SI539 +MBML0_SX179 +MBML0_SX269 +MBML0_SX359 +MBML0_SX449 +MBML0_SX89 +MBOM0_SI1014 +MBOM0_SI1644 +MBOM0_SI2274 +MBOM0_SX114 +MBOM0_SX204 +MBOM0_SX294 +MBOM0_SX311 +MBOM0_SX384 +MBSB0_SI1353 +MBSB0_SI1983 +MBSB0_SI723 +MBSB0_SX183 +MBSB0_SX273 +MBSB0_SX3 +MBSB0_SX363 +MBSB0_SX93 +MBTH0_SI2102 +MBTH0_SI505 +MBTH0_SI757 +MBTH0_SX122 +MBTH0_SX212 +MBTH0_SX302 +MBTH0_SX32 +MBTH0_SX392 +MBWP0_SI1531 +MBWP0_SI1969 +MBWP0_SI709 +MBWP0_SX169 +MBWP0_SX259 +MBWP0_SX349 +MBWP0_SX439 +MBWP0_SX79 +MCAE0_SI1447 +MCAE0_SI2077 +MCAE0_SI817 +MCAE0_SX187 +MCAE0_SX277 +MCAE0_SX367 +MCAE0_SX7 +MCAE0_SX97 +MCAL0_SI1138 +MCAL0_SI1768 +MCAL0_SI508 +MCAL0_SX148 +MCAL0_SX238 +MCAL0_SX328 +MCAL0_SX418 +MCAL0_SX58 +MCDC0_SI1292 +MCDC0_SI1922 +MCDC0_SI662 +MCDC0_SX122 +MCDC0_SX212 +MCDC0_SX302 +MCDC0_SX32 +MCDC0_SX392 +MCDD0_SI1513 +MCDD0_SI2143 +MCDD0_SI883 +MCDD0_SX163 +MCDD0_SX253 +MCDD0_SX343 +MCDD0_SX433 +MCDD0_SX73 +MCDR0_SI1154 +MCDR0_SI1784 +MCDR0_SI524 +MCDR0_SX164 +MCDR0_SX254 +MCDR0_SX344 +MCDR0_SX434 +MCDR0_SX74 +MCEF0_SI1135 +MCEF0_SI1765 +MCEF0_SI842 +MCEF0_SX145 +MCEF0_SX235 +MCEF0_SX325 +MCEF0_SX415 +MCEF0_SX55 +MCEW0_SI1442 +MCEW0_SI2072 +MCEW0_SI812 +MCEW0_SX182 +MCEW0_SX272 +MCEW0_SX362 +MCEW0_SX452 +MCEW0_SX92 +MCHL0_SI1347 +MCHL0_SI1404 +MCHL0_SI1977 +MCHL0_SX177 +MCHL0_SX267 +MCHL0_SX357 +MCHL0_SX447 +MCHL0_SX87 +MCLK0_SI1660 +MCLK0_SI2290 +MCLK0_SI650 +MCLK0_SX130 +MCLK0_SX220 +MCLK0_SX310 +MCLK0_SX40 +MCLK0_SX400 +MCLM0_SI1456 +MCLM0_SI2086 +MCLM0_SI826 +MCLM0_SX106 +MCLM0_SX16 +MCLM0_SX196 +MCLM0_SX286 +MCLM0_SX376 +MCPM0_SI1194 +MCPM0_SI1824 +MCPM0_SI564 +MCPM0_SX114 +MCPM0_SX204 +MCPM0_SX24 +MCPM0_SX294 +MCPM0_SX384 +MCRE0_SI1121 +MCRE0_SI1725 +MCRE0_SI1751 +MCRE0_SX131 +MCRE0_SX221 +MCRE0_SX24 +MCRE0_SX401 +MCRE0_SX41 +MCSS0_SI1380 +MCSS0_SI688 +MCSS0_SI750 +MCSS0_SX120 +MCSS0_SX210 +MCSS0_SX30 +MCSS0_SX300 +MCSS0_SX390 +MCTH0_SI1209 +MCTH0_SI1839 +MCTH0_SI579 +MCTH0_SX129 +MCTH0_SX219 +MCTH0_SX309 +MCTH0_SX39 +MCTH0_SX399 +MCTM0_SI1350 +MCTM0_SI1980 +MCTM0_SI720 +MCTM0_SX180 +MCTM0_SX270 +MCTM0_SX360 +MCTM0_SX450 +MCTM0_SX90 +MCXM0_SI1351 +MCXM0_SI1981 +MCXM0_SI721 +MCXM0_SX181 +MCXM0_SX271 +MCXM0_SX361 +MCXM0_SX451 +MCXM0_SX91 +MDAC0_SI1261 +MDAC0_SI1837 +MDAC0_SI631 +MDAC0_SX181 +MDAC0_SX271 +MDAC0_SX361 +MDAC0_SX451 +MDAC0_SX91 +MDAS0_SI1266 +MDAS0_SI1896 +MDAS0_SI636 +MDAS0_SX186 +MDAS0_SX21 +MDAS0_SX276 +MDAS0_SX6 +MDAS0_SX96 +MDBB1_SI1006 +MDBB1_SI1636 +MDBB1_SI2056 +MDBB1_SX106 +MDBB1_SX16 +MDBB1_SX196 +MDBB1_SX286 +MDBB1_SX376 +MDBP0_SI1158 +MDBP0_SI1788 +MDBP0_SI528 +MDBP0_SX168 +MDBP0_SX258 +MDBP0_SX348 +MDBP0_SX438 +MDBP0_SX78 +MDCD0_SI1415 +MDCD0_SI2045 +MDCD0_SI785 +MDCD0_SX155 +MDCD0_SX245 +MDCD0_SX335 +MDCD0_SX425 +MDCD0_SX65 +MDCM0_SI1480 +MDCM0_SI2110 +MDCM0_SI850 +MDCM0_SX130 +MDCM0_SX220 +MDCM0_SX310 +MDCM0_SX40 +MDCM0_SX400 +MDDC0_SI1419 +MDDC0_SI2049 +MDDC0_SI789 +MDDC0_SX159 +MDDC0_SX249 +MDDC0_SX339 +MDDC0_SX429 +MDDC0_SX69 +MDED0_SI1170 +MDED0_SI1800 +MDED0_SI540 +MDED0_SX180 +MDED0_SX270 +MDED0_SX360 +MDED0_SX450 +MDED0_SX90 +MDEF0_SI1123 +MDEF0_SI1563 +MDEF0_SI2193 +MDEF0_SX123 +MDEF0_SX213 +MDEF0_SX303 +MDEF0_SX33 +MDEF0_SX393 +MDEM0_SI1868 +MDEM0_SI608 +MDEM0_SI800 +MDEM0_SX158 +MDEM0_SX248 +MDEM0_SX338 +MDEM0_SX428 +MDEM0_SX68 +MDHL0_SI1439 +MDHL0_SI2069 +MDHL0_SI809 +MDHL0_SX179 +MDHL0_SX269 +MDHL0_SX359 +MDHL0_SX449 +MDHL0_SX89 +MDHS0_SI1530 +MDHS0_SI2160 +MDHS0_SI900 +MDHS0_SX180 +MDHS0_SX270 +MDHS0_SX360 +MDHS0_SX450 +MDHS0_SX90 +MDJM0_SI1455 +MDJM0_SI2085 +MDJM0_SI825 +MDJM0_SX105 +MDJM0_SX15 +MDJM0_SX195 +MDJM0_SX285 +MDJM0_SX375 +MDKS0_SI1066 +MDKS0_SI1696 +MDKS0_SI2326 +MDKS0_SX166 +MDKS0_SX256 +MDKS0_SX346 +MDKS0_SX436 +MDKS0_SX76 +MDLB0_SI1306 +MDLB0_SI1936 +MDLB0_SI676 +MDLB0_SX136 +MDLB0_SX226 +MDLB0_SX316 +MDLB0_SX406 +MDLB0_SX46 +MDLC0_SI1395 +MDLC0_SI2025 +MDLC0_SI765 +MDLC0_SX135 +MDLC0_SX225 +MDLC0_SX315 +MDLC0_SX405 +MDLC0_SX45 +MDLC1_SI1435 +MDLC1_SI2065 +MDLC1_SI2144 +MDLC1_SX175 +MDLC1_SX265 +MDLC1_SX355 +MDLC1_SX445 +MDLC1_SX85 +MDLC2_SI1614 +MDLC2_SI2244 +MDLC2_SI984 +MDLC2_SX174 +MDLC2_SX264 +MDLC2_SX354 +MDLC2_SX444 +MDLC2_SX84 +MDLH0_SI1960 +MDLH0_SI574 +MDLH0_SI700 +MDLH0_SX160 +MDLH0_SX250 +MDLH0_SX340 +MDLH0_SX430 +MDLH0_SX70 +MDLM0_SI1234 +MDLM0_SI1864 +MDLM0_SI604 +MDLM0_SX154 +MDLM0_SX244 +MDLM0_SX334 +MDLM0_SX424 +MDLM0_SX64 +MDLR0_SI1233 +MDLR0_SI1863 +MDLR0_SI603 +MDLR0_SX153 +MDLR0_SX243 +MDLR0_SX333 +MDLR0_SX423 +MDLR0_SX63 +MDLR1_SI1299 +MDLR1_SI1929 +MDLR1_SI669 +MDLR1_SX129 +MDLR1_SX219 +MDLR1_SX309 +MDLR1_SX39 +MDLR1_SX399 +MDMA0_SI1238 +MDMA0_SI1430 +MDMA0_SI2060 +MDMA0_SX170 +MDMA0_SX260 +MDMA0_SX350 +MDMA0_SX440 +MDMA0_SX80 +MDMT0_SI1832 +MDMT0_SI2341 +MDMT0_SI572 +MDMT0_SX122 +MDMT0_SX212 +MDMT0_SX302 +MDMT0_SX32 +MDMT0_SX392 +MDNS0_SI1011 +MDNS0_SI2271 +MDNS0_SI873 +MDNS0_SX111 +MDNS0_SX201 +MDNS0_SX21 +MDNS0_SX291 +MDNS0_SX381 +MDPB0_SI1760 +MDPB0_SI2126 +MDPB0_SI866 +MDPB0_SX146 +MDPB0_SX236 +MDPB0_SX326 +MDPB0_SX416 +MDPB0_SX56 +MDPK0_SI1053 +MDPK0_SI1683 +MDPK0_SI552 +MDPK0_SX153 +MDPK0_SX243 +MDPK0_SX333 +MDPK0_SX423 +MDPK0_SX63 +MDPS0_SI1651 +MDPS0_SI1979 +MDPS0_SI719 +MDPS0_SX179 +MDPS0_SX269 +MDPS0_SX359 +MDPS0_SX449 +MDPS0_SX89 +MDRD0_SI1382 +MDRD0_SI2012 +MDRD0_SI752 +MDRD0_SX122 +MDRD0_SX212 +MDRD0_SX302 +MDRD0_SX32 +MDRD0_SX392 +MDSJ0_SI1462 +MDSJ0_SI2092 +MDSJ0_SI832 +MDSJ0_SX112 +MDSJ0_SX22 +MDSJ0_SX292 +MDSJ0_SX382 +MDSJ0_SX438 +MDSS0_SI1881 +MDSS0_SI2087 +MDSS0_SI621 +MDSS0_SX171 +MDSS0_SX261 +MDSS0_SX351 +MDSS0_SX441 +MDSS0_SX81 +MDSS1_SI1327 +MDSS1_SI1713 +MDSS1_SI697 +MDSS1_SX157 +MDSS1_SX247 +MDSS1_SX337 +MDSS1_SX427 +MDSS1_SX67 +MDTB0_SI1200 +MDTB0_SI1830 +MDTB0_SI570 +MDTB0_SX120 +MDTB0_SX210 +MDTB0_SX300 +MDTB0_SX321 +MDTB0_SX390 +MDWD0_SI1260 +MDWD0_SI1890 +MDWD0_SI557 +MDWD0_SX180 +MDWD0_SX270 +MDWD0_SX360 +MDWD0_SX450 +MDWD0_SX90 +MDWH0_SI1168 +MDWH0_SI1925 +MDWH0_SI665 +MDWH0_SX125 +MDWH0_SX215 +MDWH0_SX305 +MDWH0_SX35 +MDWH0_SX395 +MDWM0_SI1546 +MDWM0_SI2176 +MDWM0_SI916 +MDWM0_SX106 +MDWM0_SX16 +MDWM0_SX286 +MDWM0_SX376 +MDWM0_SX433 +MEAL0_SI1547 +MEAL0_SI2177 +MEAL0_SI917 +MEAL0_SX107 +MEAL0_SX197 +MEAL0_SX287 +MEAL0_SX347 +MEAL0_SX377 +MEDR0_SI1374 +MEDR0_SI2004 +MEDR0_SI744 +MEDR0_SX114 +MEDR0_SX204 +MEDR0_SX24 +MEDR0_SX294 +MEDR0_SX384 +MEFG0_SI465 +MEFG0_SI491 +MEFG0_SI598 +MEFG0_SX105 +MEFG0_SX15 +MEFG0_SX195 +MEFG0_SX285 +MEFG0_SX375 +MEGJ0_SI1337 +MEGJ0_SI1967 +MEGJ0_SI707 +MEGJ0_SX167 +MEGJ0_SX257 +MEGJ0_SX3 +MEGJ0_SX437 +MEGJ0_SX77 +MEJL0_SI1592 +MEJL0_SI1654 +MEJL0_SI962 +MEJL0_SX152 +MEJL0_SX242 +MEJL0_SX332 +MEJL0_SX422 +MEJL0_SX62 +MEJS0_SI1240 +MEJS0_SI1870 +MEJS0_SI610 +MEJS0_SX160 +MEJS0_SX250 +MEJS0_SX340 +MEJS0_SX430 +MEJS0_SX70 +MESG0_SI1332 +MESG0_SI1962 +MESG0_SI702 +MESG0_SX162 +MESG0_SX252 +MESG0_SX342 +MESG0_SX432 +MESG0_SX72 +MESJ0_SI2039 +MESJ0_SI2257 +MESJ0_SI997 +MESJ0_SX187 +MESJ0_SX277 +MESJ0_SX367 +MESJ0_SX7 +MESJ0_SX97 +MEWM0_SI1348 +MEWM0_SI1978 +MEWM0_SI718 +MEWM0_SX178 +MEWM0_SX268 +MEWM0_SX358 +MEWM0_SX448 +MEWM0_SX88 +MFER0_SI1492 +MFER0_SI2122 +MFER0_SI862 +MFER0_SX142 +MFER0_SX232 +MFER0_SX322 +MFER0_SX412 +MFER0_SX52 +MFMC0_SI1132 +MFMC0_SI1762 +MFMC0_SI502 +MFMC0_SX142 +MFMC0_SX232 +MFMC0_SX322 +MFMC0_SX412 +MFMC0_SX52 +MFRM0_SI1155 +MFRM0_SI1717 +MFRM0_SI1785 +MFRM0_SX165 +MFRM0_SX255 +MFRM0_SX345 +MFRM0_SX435 +MFRM0_SX75 +MFWK0_SI1249 +MFWK0_SI1879 +MFWK0_SI619 +MFWK0_SX169 +MFWK0_SX259 +MFWK0_SX349 +MFWK0_SX439 +MFWK0_SX79 +MFXS0_SI1674 +MFXS0_SI2225 +MFXS0_SI2304 +MFXS0_SX144 +MFXS0_SX234 +MFXS0_SX324 +MFXS0_SX414 +MFXS0_SX54 +MFXV0_SI1005 +MFXV0_SI1342 +MFXV0_SI1635 +MFXV0_SX105 +MFXV0_SX15 +MFXV0_SX195 +MFXV0_SX285 +MFXV0_SX375 +MGAF0_SI1282 +MGAF0_SI1912 +MGAF0_SI652 +MGAF0_SX112 +MGAF0_SX202 +MGAF0_SX22 +MGAF0_SX292 +MGAF0_SX382 +MGAG0_SI1321 +MGAG0_SI645 +MGAG0_SI691 +MGAG0_SX151 +MGAG0_SX241 +MGAG0_SX331 +MGAG0_SX421 +MGAG0_SX61 +MGAK0_SI1036 +MGAK0_SI1666 +MGAK0_SI2296 +MGAK0_SX136 +MGAK0_SX226 +MGAK0_SX316 +MGAK0_SX406 +MGAK0_SX46 +MGAR0_SI1212 +MGAR0_SI1694 +MGAR0_SI1842 +MGAR0_SX132 +MGAR0_SX222 +MGAR0_SX312 +MGAR0_SX402 +MGAR0_SX42 +MGAW0_SI1165 +MGAW0_SI1802 +MGAW0_SI535 +MGAW0_SX175 +MGAW0_SX265 +MGAW0_SX355 +MGAW0_SX445 +MGAW0_SX85 +MGES0_SI1481 +MGES0_SI2111 +MGES0_SI851 +MGES0_SX131 +MGES0_SX221 +MGES0_SX311 +MGES0_SX401 +MGES0_SX41 +MGJC0_SI1256 +MGJC0_SI1335 +MGJC0_SI1965 +MGJC0_SX165 +MGJC0_SX255 +MGJC0_SX345 +MGJC0_SX435 +MGJC0_SX75 +MGRL0_SI1497 +MGRL0_SI2127 +MGRL0_SI867 +MGRL0_SX147 +MGRL0_SX237 +MGRL0_SX327 +MGRL0_SX417 +MGRL0_SX57 +MGRP0_SI1317 +MGRP0_SI1947 +MGRP0_SI687 +MGRP0_SX147 +MGRP0_SX237 +MGRP0_SX327 +MGRP0_SX417 +MGRP0_SX57 +MGSH0_SI1176 +MGSH0_SI1806 +MGSH0_SI546 +MGSH0_SX127 +MGSH0_SX186 +MGSH0_SX276 +MGSH0_SX6 +MGSH0_SX96 +MGSL0_SI1164 +MGSL0_SI534 +MGSL0_SI797 +MGSL0_SX174 +MGSL0_SX264 +MGSL0_SX354 +MGSL0_SX444 +MGSL0_SX84 +MGXP0_SI1087 +MGXP0_SI457 +MGXP0_SI525 +MGXP0_SX187 +MGXP0_SX277 +MGXP0_SX367 +MGXP0_SX7 +MGXP0_SX97 +MHBS0_SI1575 +MHBS0_SI2205 +MHBS0_SI945 +MHBS0_SX135 +MHBS0_SX225 +MHBS0_SX315 +MHBS0_SX405 +MHBS0_SX45 +MHIT0_SI1613 +MHIT0_SI2243 +MHIT0_SI983 +MHIT0_SX173 +MHIT0_SX263 +MHIT0_SX353 +MHIT0_SX443 +MHIT0_SX83 +MHJB0_SI1017 +MHJB0_SI1647 +MHJB0_SI2277 +MHJB0_SX117 +MHJB0_SX207 +MHJB0_SX27 +MHJB0_SX297 +MHJB0_SX387 +MHMG0_SI1365 +MHMG0_SI1995 +MHMG0_SI735 +MHMG0_SX105 +MHMG0_SX15 +MHMG0_SX195 +MHMG0_SX285 +MHMG0_SX375 +MHMR0_SI1119 +MHMR0_SI1692 +MHMR0_SI489 +MHMR0_SX129 +MHMR0_SX219 +MHMR0_SX309 +MHMR0_SX39 +MHMR0_SX399 +MHRM0_SI1475 +MHRM0_SI2218 +MHRM0_SI958 +MHRM0_SX148 +MHRM0_SX238 +MHRM0_SX328 +MHRM0_SX418 +MHRM0_SX58 +MHXL0_SI1772 +MHXL0_SI512 +MHXL0_SI612 +MHXL0_SX152 +MHXL0_SX242 +MHXL0_SX332 +MHXL0_SX422 +MHXL0_SX62 +MILB0_SI2163 +MILB0_SI807 +MILB0_SI903 +MILB0_SX183 +MILB0_SX273 +MILB0_SX3 +MILB0_SX363 +MILB0_SX93 +MJAC0_SI1331 +MJAC0_SI2148 +MJAC0_SI701 +MJAC0_SX251 +MJAC0_SX307 +MJAC0_SX341 +MJAC0_SX431 +MJAC0_SX71 +MJAE0_SI1524 +MJAE0_SI1999 +MJAE0_SI2154 +MJAE0_SX174 +MJAE0_SX264 +MJAE0_SX354 +MJAE0_SX444 +MJAE0_SX84 +MJAI0_SI1604 +MJAI0_SI682 +MJAI0_SI710 +MJAI0_SX164 +MJAI0_SX254 +MJAI0_SX344 +MJAI0_SX434 +MJAI0_SX74 +MJBG0_SI1232 +MJBG0_SI1724 +MJBG0_SI1862 +MJBG0_SX152 +MJBG0_SX242 +MJBG0_SX332 +MJBG0_SX422 +MJBG0_SX62 +MJDA0_SI1031 +MJDA0_SI1661 +MJDA0_SI2291 +MJDA0_SX131 +MJDA0_SX221 +MJDA0_SX311 +MJDA0_SX401 +MJDA0_SX41 +MJDC0_SI1161 +MJDC0_SI2165 +MJDC0_SI531 +MJDC0_SX171 +MJDC0_SX261 +MJDC0_SX351 +MJDC0_SX441 +MJDC0_SX81 +MJDE0_SI1120 +MJDE0_SI463 +MJDE0_SI490 +MJDE0_SX130 +MJDE0_SX220 +MJDE0_SX310 +MJDE0_SX40 +MJDE0_SX400 +MJDG0_SI1042 +MJDG0_SI1672 +MJDG0_SI1705 +MJDG0_SX142 +MJDG0_SX232 +MJDG0_SX322 +MJDG0_SX412 +MJDG0_SX52 +MJDM0_SI1340 +MJDM0_SI1937 +MJDM0_SI974 +MJDM0_SX170 +MJDM0_SX260 +MJDM0_SX350 +MJDM0_SX440 +MJDM0_SX80 +MJEB0_SI1286 +MJEB0_SI1916 +MJEB0_SI656 +MJEB0_SX170 +MJEB0_SX206 +MJEB0_SX26 +MJEB0_SX296 +MJEB0_SX386 +MJEB1_SI1467 +MJEB1_SI2097 +MJEB1_SI837 +MJEB1_SX117 +MJEB1_SX207 +MJEB1_SX27 +MJEB1_SX297 +MJEB1_SX387 +MJEE0_SI1237 +MJEE0_SI1867 +MJEE0_SI607 +MJEE0_SX157 +MJEE0_SX247 +MJEE0_SX337 +MJEE0_SX427 +MJEE0_SX67 +MJFH0_SI1107 +MJFH0_SI1737 +MJFH0_SI477 +MJFH0_SX117 +MJFH0_SX207 +MJFH0_SX27 +MJFH0_SX297 +MJFH0_SX387 +MJFR0_SI1605 +MJFR0_SI2235 +MJFR0_SI975 +MJFR0_SX165 +MJFR0_SX255 +MJFR0_SX345 +MJFR0_SX435 +MJFR0_SX75 +MJHI0_SI1328 +MJHI0_SI555 +MJHI0_SI698 +MJHI0_SX158 +MJHI0_SX248 +MJHI0_SX338 +MJHI0_SX428 +MJHI0_SX68 +MJJB0_SI1139 +MJJB0_SI1277 +MJJB0_SI1769 +MJJB0_SX149 +MJJB0_SX239 +MJJB0_SX329 +MJJB0_SX419 +MJJB0_SX59 +MJJJ0_SI1163 +MJJJ0_SI1793 +MJJJ0_SI533 +MJJJ0_SX173 +MJJJ0_SX263 +MJJJ0_SX353 +MJJJ0_SX443 +MJJJ0_SX83 +MJJM0_SI1251 +MJJM0_SI1457 +MJJM0_SI827 +MJJM0_SX107 +MJJM0_SX17 +MJJM0_SX197 +MJJM0_SX287 +MJJM0_SX377 +MJKR0_SI1201 +MJKR0_SI1831 +MJKR0_SI571 +MJKR0_SX121 +MJKR0_SX211 +MJKR0_SX301 +MJKR0_SX31 +MJKR0_SX391 +MJLB0_SI1616 +MJLB0_SI2246 +MJLB0_SI986 +MJLB0_SX176 +MJLB0_SX266 +MJLB0_SX356 +MJLB0_SX446 +MJLB0_SX86 +MJLG1_SI1012 +MJLG1_SI1642 +MJLG1_SI2272 +MJLG1_SX112 +MJLG1_SX202 +MJLG1_SX22 +MJLG1_SX292 +MJLG1_SX382 +MJLS0_SI1096 +MJLS0_SI1726 +MJLS0_SI466 +MJLS0_SX106 +MJLS0_SX16 +MJLS0_SX196 +MJLS0_SX286 +MJLS0_SX376 +MJMA0_SI1495 +MJMA0_SI2125 +MJMA0_SI865 +MJMA0_SX145 +MJMA0_SX235 +MJMA0_SX325 +MJMA0_SX415 +MJMA0_SX55 +MJMD0_SI1028 +MJMD0_SI1658 +MJMD0_SI2288 +MJMD0_SX128 +MJMD0_SX218 +MJMD0_SX308 +MJMD0_SX38 +MJMD0_SX398 +MJMM0_SI1255 +MJMM0_SI1885 +MJMM0_SI625 +MJMM0_SX175 +MJMM0_SX265 +MJMM0_SX355 +MJMM0_SX445 +MJMM0_SX85 +MJPG0_SI1191 +MJPG0_SI1821 +MJPG0_SI561 +MJPG0_SX111 +MJPG0_SX201 +MJPG0_SX21 +MJPG0_SX291 +MJPG0_SX381 +MJPM0_SI1368 +MJPM0_SI1998 +MJPM0_SI738 +MJPM0_SX108 +MJPM0_SX18 +MJPM0_SX198 +MJPM0_SX288 +MJPM0_SX378 +MJPM1_SI1897 +MJPM1_SI2280 +MJPM1_SI761 +MJPM1_SX131 +MJPM1_SX221 +MJPM1_SX311 +MJPM1_SX401 +MJPM1_SX41 +MJRA0_SI1236 +MJRA0_SI1866 +MJRA0_SI606 +MJRA0_SX156 +MJRA0_SX246 +MJRA0_SX336 +MJRA0_SX426 +MJRA0_SX66 +MJRG0_SI1366 +MJRG0_SI1996 +MJRG0_SI736 +MJRG0_SX106 +MJRG0_SX16 +MJRG0_SX286 +MJRG0_SX352 +MJRG0_SX376 +MJRH0_SI1125 +MJRH0_SI1755 +MJRH0_SI1840 +MJRH0_SX135 +MJRH0_SX225 +MJRH0_SX315 +MJRH0_SX405 +MJRH0_SX45 +MJRH1_SI1558 +MJRH1_SI1774 +MJRH1_SI514 +MJRH1_SX154 +MJRH1_SX244 +MJRH1_SX334 +MJRH1_SX424 +MJRH1_SX64 +MJRK0_SI1662 +MJRK0_SI2103 +MJRK0_SI880 +MJRK0_SX160 +MJRK0_SX250 +MJRK0_SX340 +MJRK0_SX430 +MJRK0_SX70 +MJRP0_SI1835 +MJRP0_SI1845 +MJRP0_SI585 +MJRP0_SX135 +MJRP0_SX225 +MJRP0_SX315 +MJRP0_SX405 +MJRP0_SX45 +MJSR0_SI1424 +MJSR0_SI2054 +MJSR0_SI794 +MJSR0_SX164 +MJSR0_SX254 +MJSR0_SX344 +MJSR0_SX434 +MJSR0_SX74 +MJWG0_SI2155 +MJWG0_SI813 +MJWG0_SI895 +MJWG0_SX175 +MJWG0_SX265 +MJWG0_SX355 +MJWG0_SX445 +MJWG0_SX85 +MJWS0_SI1143 +MJWS0_SI1773 +MJWS0_SI513 +MJWS0_SX153 +MJWS0_SX243 +MJWS0_SX333 +MJWS0_SX423 +MJWS0_SX63 +MJWT0_SI1291 +MJWT0_SI1381 +MJWT0_SI751 +MJWT0_SX121 +MJWT0_SX211 +MJWT0_SX301 +MJWT0_SX31 +MJWT0_SX391 +MJXA0_SI1507 +MJXA0_SI2137 +MJXA0_SI877 +MJXA0_SX157 +MJXA0_SX247 +MJXA0_SX337 +MJXA0_SX427 +MJXA0_SX67 +MJXL0_SI1172 +MJXL0_SI1795 +MJXL0_SI542 +MJXL0_SX182 +MJXL0_SX272 +MJXL0_SX362 +MJXL0_SX452 +MJXL0_SX92 +MKAG0_SI1609 +MKAG0_SI2239 +MKAG0_SI979 +MKAG0_SX169 +MKAG0_SX259 +MKAG0_SX30 +MKAG0_SX439 +MKAG0_SX79 +MKAH0_SI1528 +MKAH0_SI2158 +MKAH0_SI898 +MKAH0_SX178 +MKAH0_SX268 +MKAH0_SX358 +MKAH0_SX448 +MKAH0_SX88 +MKAJ0_SI1414 +MKAJ0_SI2044 +MKAJ0_SI784 +MKAJ0_SX154 +MKAJ0_SX244 +MKAJ0_SX334 +MKAJ0_SX424 +MKAJ0_SX64 +MKAM0_SI1250 +MKAM0_SI1316 +MKAM0_SI1465 +MKAM0_SX146 +MKAM0_SX236 +MKAM0_SX326 +MKAM0_SX416 +MKAM0_SX56 +MKDB0_SI2132 +MKDB0_SI588 +MKDB0_SI872 +MKDB0_SX152 +MKDB0_SX242 +MKDB0_SX332 +MKDB0_SX422 +MKDB0_SX62 +MKDD0_SI1567 +MKDD0_SI2197 +MKDD0_SI937 +MKDD0_SX127 +MKDD0_SX217 +MKDD0_SX307 +MKDD0_SX37 +MKDD0_SX397 +MKDT0_SI2153 +MKDT0_SI814 +MKDT0_SI893 +MKDT0_SX173 +MKDT0_SX263 +MKDT0_SX353 +MKDT0_SX443 +MKDT0_SX83 +MKES0_SI1253 +MKES0_SI1883 +MKES0_SI623 +MKES0_SX173 +MKES0_SX263 +MKES0_SX353 +MKES0_SX443 +MKES0_SX83 +MKJO0_SI1517 +MKJO0_SI2147 +MKJO0_SI887 +MKJO0_SX167 +MKJO0_SX257 +MKJO0_SX424 +MKJO0_SX437 +MKJO0_SX77 +MKLN0_SI1598 +MKLN0_SI2228 +MKLN0_SI968 +MKLN0_SX158 +MKLN0_SX248 +MKLN0_SX338 +MKLN0_SX428 +MKLN0_SX68 +MKLR0_SI1059 +MKLR0_SI1689 +MKLR0_SI2319 +MKLR0_SX159 +MKLR0_SX249 +MKLR0_SX339 +MKLR0_SX429 +MKLR0_SX69 +MKLS0_SI1437 +MKLS0_SI1533 +MKLS0_SI2067 +MKLS0_SX177 +MKLS0_SX267 +MKLS0_SX357 +MKLS0_SX447 +MKLS0_SX87 +MKLS1_SI1545 +MKLS1_SI2175 +MKLS1_SI915 +MKLS1_SX105 +MKLS1_SX15 +MKLS1_SX195 +MKLS1_SX285 +MKLS1_SX375 +MKLW0_SI1571 +MKLW0_SI1844 +MKLW0_SI2201 +MKLW0_SX131 +MKLW0_SX221 +MKLW0_SX311 +MKLW0_SX401 +MKLW0_SX41 +MKRG0_SI1491 +MKRG0_SI2121 +MKRG0_SI861 +MKRG0_SX141 +MKRG0_SX231 +MKRG0_SX31 +MKRG0_SX411 +MKRG0_SX51 +MKXL0_SI1185 +MKXL0_SI1815 +MKXL0_SI1958 +MKXL0_SX105 +MKXL0_SX15 +MKXL0_SX195 +MKXL0_SX285 +MKXL0_SX375 +MLBC0_SI1239 +MLBC0_SI1869 +MLBC0_SI609 +MLBC0_SX159 +MLBC0_SX249 +MLBC0_SX339 +MLBC0_SX429 +MLBC0_SX69 +MLEL0_SI1246 +MLEL0_SI1876 +MLEL0_SI616 +MLEL0_SX166 +MLEL0_SX256 +MLEL0_SX346 +MLEL0_SX436 +MLEL0_SX76 +MLJC0_SI1225 +MLJC0_SI1855 +MLJC0_SI595 +MLJC0_SX145 +MLJC0_SX235 +MLJC0_SX325 +MLJC0_SX415 +MLJC0_SX55 +MLJH0_SI1324 +MLJH0_SI1422 +MLJH0_SI694 +MLJH0_SX154 +MLJH0_SX244 +MLJH0_SX334 +MLJH0_SX424 +MLJH0_SX64 +MLNS0_SI1407 +MLNS0_SI2037 +MLNS0_SI777 +MLNS0_SX147 +MLNS0_SX237 +MLNS0_SX327 +MLNS0_SX417 +MLNS0_SX57 +MLSH0_SI1417 +MLSH0_SI2047 +MLSH0_SI787 +MLSH0_SX157 +MLSH0_SX247 +MLSH0_SX337 +MLSH0_SX427 +MLSH0_SX67 +MMAA0_SI1588 +MMAA0_SI2105 +MMAA0_SI845 +MMAA0_SX125 +MMAA0_SX215 +MMAA0_SX305 +MMAA0_SX35 +MMAA0_SX395 +MMAB1_SI1494 +MMAB1_SI2124 +MMAB1_SI864 +MMAB1_SX144 +MMAB1_SX234 +MMAB1_SX324 +MMAB1_SX414 +MMAB1_SX54 +MMAG0_SI1126 +MMAG0_SI1756 +MMAG0_SI496 +MMAG0_SX136 +MMAG0_SX226 +MMAG0_SX316 +MMAG0_SX406 +MMAG0_SX46 +MMAM0_SI1597 +MMAM0_SI1668 +MMAM0_SI2227 +MMAM0_SX157 +MMAM0_SX247 +MMAM0_SX337 +MMAM0_SX427 +MMAM0_SX67 +MMAR0_SI1336 +MMAR0_SI1966 +MMAR0_SI706 +MMAR0_SX166 +MMAR0_SX256 +MMAR0_SX346 +MMAR0_SX436 +MMAR0_SX76 +MMBS0_SI1151 +MMBS0_SI1781 +MMBS0_SI521 +MMBS0_SX161 +MMBS0_SX251 +MMBS0_SX341 +MMBS0_SX431 +MMBS0_SX71 +MMCC0_SI1338 +MMCC0_SI1968 +MMCC0_SI708 +MMCC0_SX168 +MMCC0_SX258 +MMCC0_SX348 +MMCC0_SX438 +MMCC0_SX78 +MMDB0_SI1358 +MMDB0_SI1617 +MMDB0_SI987 +MMDB0_SX177 +MMDB0_SX267 +MMDB0_SX357 +MMDB0_SX447 +MMDB0_SX87 +MMDG0_SI1780 +MMDG0_SI2035 +MMDG0_SI520 +MMDG0_SX160 +MMDG0_SX250 +MMDG0_SX340 +MMDG0_SX430 +MMDG0_SX70 +MMDM0_SI1311 +MMDM0_SI1941 +MMDM0_SI681 +MMDM0_SX141 +MMDM0_SX231 +MMDM0_SX321 +MMDM0_SX411 +MMDM0_SX51 +MMDM1_SI1650 +MMDM1_SI2043 +MMDM1_SI783 +MMDM1_SX153 +MMDM1_SX243 +MMDM1_SX333 +MMDM1_SX423 +MMDM1_SX63 +MMDS0_SI1343 +MMDS0_SI1973 +MMDS0_SI713 +MMDS0_SX173 +MMDS0_SX263 +MMDS0_SX353 +MMDS0_SX443 +MMDS0_SX83 +MMEA0_SI1388 +MMEA0_SI2018 +MMEA0_SI758 +MMEA0_SX128 +MMEA0_SX218 +MMEA0_SX308 +MMEA0_SX38 +MMEA0_SX398 +MMEB0_SI1357 +MMEB0_SI1987 +MMEB0_SI727 +MMEB0_SX187 +MMEB0_SX327 +MMEB0_SX367 +MMEB0_SX7 +MMEB0_SX97 +MMGC0_SI1305 +MMGC0_SI1935 +MMGC0_SI2184 +MMGC0_SX135 +MMGC0_SX225 +MMGC0_SX315 +MMGC0_SX405 +MMGC0_SX45 +MMGG0_SI1079 +MMGG0_SI1709 +MMGG0_SI2339 +MMGG0_SX179 +MMGG0_SX269 +MMGG0_SX359 +MMGG0_SX449 +MMGG0_SX89 +MMGK0_SI1322 +MMGK0_SI1952 +MMGK0_SI692 +MMGK0_SX152 +MMGK0_SX242 +MMGK0_SX332 +MMGK0_SX422 +MMGK0_SX62 +MMJB1_SI1408 +MMJB1_SI2038 +MMJB1_SI778 +MMJB1_SX148 +MMJB1_SX238 +MMJB1_SX328 +MMJB1_SX418 +MMJB1_SX58 +MMLM0_SI1527 +MMLM0_SI2150 +MMLM0_SI897 +MMLM0_SX177 +MMLM0_SX267 +MMLM0_SX357 +MMLM0_SX447 +MMLM0_SX87 +MMPM0_SI1061 +MMPM0_SI1691 +MMPM0_SI2321 +MMPM0_SX161 +MMPM0_SX251 +MMPM0_SX341 +MMPM0_SX431 +MMPM0_SX71 +MMRP0_SI2034 +MMRP0_SI717 +MMRP0_SI774 +MMRP0_SX144 +MMRP0_SX234 +MMRP0_SX324 +MMRP0_SX414 +MMRP0_SX54 +MMSM0_SI1106 +MMSM0_SI1736 +MMSM0_SI476 +MMSM0_SX116 +MMSM0_SX206 +MMSM0_SX26 +MMSM0_SX296 +MMSM0_SX386 +MMVP0_SI1284 +MMVP0_SI1914 +MMVP0_SI654 +MMVP0_SX114 +MMVP0_SX204 +MMVP0_SX294 +MMVP0_SX347 +MMVP0_SX384 +MMWB0_SI1619 +MMWB0_SI2249 +MMWB0_SI989 +MMWB0_SX179 +MMWB0_SX269 +MMWB0_SX359 +MMWB0_SX449 +MMWB0_SX89 +MMWS0_SI1518 +MMWS0_SI559 +MMWS0_SI888 +MMWS0_SX168 +MMWS0_SX258 +MMWS0_SX348 +MMWS0_SX438 +MMWS0_SX78 +MMWS1_SI1071 +MMWS1_SI1701 +MMWS1_SI2331 +MMWS1_SX261 +MMWS1_SX27 +MMWS1_SX351 +MMWS1_SX441 +MMWS1_SX81 +MMXS0_SI2136 +MMXS0_SI629 +MMXS0_SI876 +MMXS0_SX156 +MMXS0_SX246 +MMXS0_SX336 +MMXS0_SX426 +MMXS0_SX66 +MNET0_SI1446 +MNET0_SI2076 +MNET0_SI816 +MNET0_SX186 +MNET0_SX276 +MNET0_SX366 +MNET0_SX6 +MNET0_SX96 +MNTW0_SI1068 +MNTW0_SI1698 +MNTW0_SI2328 +MNTW0_SX168 +MNTW0_SX202 +MNTW0_SX258 +MNTW0_SX348 +MNTW0_SX78 +MPAR0_SI1576 +MPAR0_SI2206 +MPAR0_SI946 +MPAR0_SX136 +MPAR0_SX226 +MPAR0_SX316 +MPAR0_SX406 +MPAR0_SX46 +MPEB0_SI1034 +MPEB0_SI1860 +MPEB0_SI600 +MPEB0_SX150 +MPEB0_SX240 +MPEB0_SX330 +MPEB0_SX420 +MPEB0_SX60 +MPFU0_SI1258 +MPFU0_SI1888 +MPFU0_SI628 +MPFU0_SX178 +MPFU0_SX268 +MPFU0_SX358 +MPFU0_SX448 +MPFU0_SX88 +MPGH0_SI1554 +MPGH0_SI675 +MPGH0_SI924 +MPGH0_SX114 +MPGH0_SX204 +MPGH0_SX24 +MPGH0_SX294 +MPGH0_SX384 +MPGR0_SI1410 +MPGR0_SI2040 +MPGR0_SI780 +MPGR0_SX150 +MPGR0_SX240 +MPGR0_SX330 +MPGR0_SX420 +MPGR0_SX60 +MPGR1_SI1269 +MPGR1_SI1499 +MPGR1_SI2129 +MPGR1_SX149 +MPGR1_SX239 +MPGR1_SX329 +MPGR1_SX419 +MPGR1_SX59 +MPMB0_SI1501 +MPMB0_SI2131 +MPMB0_SI871 +MPMB0_SX151 +MPMB0_SX241 +MPMB0_SX331 +MPMB0_SX421 +MPMB0_SX61 +MPPC0_SI1412 +MPPC0_SI2042 +MPPC0_SI782 +MPPC0_SX152 +MPPC0_SX242 +MPPC0_SX332 +MPPC0_SX422 +MPPC0_SX62 +MPRB0_SI1205 +MPRB0_SI1215 +MPRB0_SI575 +MPRB0_SX125 +MPRB0_SX215 +MPRB0_SX305 +MPRB0_SX35 +MPRB0_SX395 +MPRD0_SI1431 +MPRD0_SI2061 +MPRD0_SI801 +MPRD0_SX171 +MPRD0_SX261 +MPRD0_SX351 +MPRD0_SX441 +MPRD0_SX81 +MPRK0_SI1097 +MPRK0_SI1727 +MPRK0_SI467 +MPRK0_SX107 +MPRK0_SX17 +MPRK0_SX197 +MPRK0_SX287 +MPRK0_SX377 +MPRT0_SI1210 +MPRT0_SI495 +MPRT0_SI580 +MPRT0_SX130 +MPRT0_SX220 +MPRT0_SX310 +MPRT0_SX40 +MPRT0_SX400 +MPSW0_SI1067 +MPSW0_SI1697 +MPSW0_SI2327 +MPSW0_SX167 +MPSW0_SX24 +MPSW0_SX257 +MPSW0_SX437 +MPSW0_SX77 +MRAB0_SI1224 +MRAB0_SI1854 +MRAB0_SI594 +MRAB0_SX144 +MRAB0_SX234 +MRAB0_SX324 +MRAB0_SX414 +MRAB0_SX54 +MRAB1_SI1478 +MRAB1_SI2108 +MRAB1_SI848 +MRAB1_SX128 +MRAB1_SX218 +MRAB1_SX308 +MRAB1_SX38 +MRAB1_SX398 +MRAI0_SI1954 +MRAI0_SI2052 +MRAI0_SI792 +MRAI0_SX162 +MRAI0_SX252 +MRAI0_SX342 +MRAI0_SX432 +MRAI0_SX72 +MRAM0_SI1275 +MRAM0_SI1905 +MRAM0_SI1951 +MRAM0_SX105 +MRAM0_SX15 +MRAM0_SX195 +MRAM0_SX285 +MRAM0_SX375 +MRAV0_SI1008 +MRAV0_SI1638 +MRAV0_SI2268 +MRAV0_SX108 +MRAV0_SX18 +MRAV0_SX198 +MRAV0_SX288 +MRAV0_SX378 +MRBC0_SI1665 +MRBC0_SI1859 +MRBC0_SI599 +MRBC0_SX149 +MRBC0_SX239 +MRBC0_SX329 +MRBC0_SX419 +MRBC0_SX59 +MRCG0_SI1428 +MRCG0_SI2058 +MRCG0_SI798 +MRCG0_SX168 +MRCG0_SX258 +MRCG0_SX348 +MRCG0_SX438 +MRCG0_SX78 +MRCW0_SI1371 +MRCW0_SI2001 +MRCW0_SI741 +MRCW0_SX111 +MRCW0_SX201 +MRCW0_SX21 +MRCW0_SX291 +MRCW0_SX381 +MRDD0_SI1050 +MRDD0_SI1680 +MRDD0_SI2310 +MRDD0_SX150 +MRDD0_SX240 +MRDD0_SX277 +MRDD0_SX330 +MRDD0_SX60 +MRDM0_SI1044 +MRDM0_SI1595 +MRDM0_SI965 +MRDM0_SX155 +MRDM0_SX245 +MRDM0_SX335 +MRDM0_SX425 +MRDM0_SX65 +MRDS0_SI1167 +MRDS0_SI1797 +MRDS0_SI537 +MRDS0_SX177 +MRDS0_SX267 +MRDS0_SX357 +MRDS0_SX447 +MRDS0_SX87 +MREE0_SI1104 +MREE0_SI1734 +MREE0_SI1959 +MREE0_SX114 +MREE0_SX204 +MREE0_SX24 +MREE0_SX294 +MREE0_SX384 +MREH1_SI1599 +MREH1_SI2229 +MREH1_SI969 +MREH1_SX159 +MREH1_SX249 +MREH1_SX339 +MREH1_SX429 +MREH1_SX69 +MREM0_SI1591 +MREM0_SI511 +MREM0_SI961 +MREM0_SX151 +MREM0_SX241 +MREM0_SX331 +MREM0_SX421 +MREM0_SX61 +MREW1_SI1500 +MREW1_SI2130 +MREW1_SI870 +MREW1_SX150 +MREW1_SX240 +MREW1_SX330 +MREW1_SX420 +MREW1_SX60 +MRFK0_SI1076 +MRFK0_SI1706 +MRFK0_SI2336 +MRFK0_SX176 +MRFK0_SX266 +MRFK0_SX356 +MRFK0_SX446 +MRFK0_SX86 +MRFL0_SI1156 +MRFL0_SI1786 +MRFL0_SI526 +MRFL0_SX166 +MRFL0_SX256 +MRFL0_SX346 +MRFL0_SX436 +MRFL0_SX76 +MRGM0_SI1162 +MRGM0_SI1792 +MRGM0_SI532 +MRGM0_SX172 +MRGM0_SX262 +MRGM0_SX416 +MRGM0_SX442 +MRGM0_SX82 +MRGS0_SI1356 +MRGS0_SI1986 +MRGS0_SI726 +MRGS0_SX186 +MRGS0_SX276 +MRGS0_SX366 +MRGS0_SX6 +MRGS0_SX96 +MRHL0_SI1515 +MRHL0_SI2145 +MRHL0_SI885 +MRHL0_SX165 +MRHL0_SX255 +MRHL0_SX345 +MRHL0_SX435 +MRHL0_SX75 +MRJB1_SI1020 +MRJB1_SI1413 +MRJB1_SI2021 +MRJB1_SX120 +MRJB1_SX210 +MRJB1_SX30 +MRJB1_SX300 +MRJB1_SX390 +MRJH0_SI1519 +MRJH0_SI889 +MRJH0_SI914 +MRJH0_SX169 +MRJH0_SX259 +MRJH0_SX307 +MRJH0_SX439 +MRJH0_SX79 +MRJM0_SI1095 +MRJM0_SI1228 +MRJM0_SI1858 +MRJM0_SX148 +MRJM0_SX238 +MRJM0_SX328 +MRJM0_SX418 +MRJM0_SX58 +MRJM1_SI1298 +MRJM1_SI1928 +MRJM1_SI668 +MRJM1_SX128 +MRJM1_SX218 +MRJM1_SX308 +MRJM1_SX38 +MRJM1_SX398 +MRJT0_SI1498 +MRJT0_SI1805 +MRJT0_SI868 +MRJT0_SX148 +MRJT0_SX238 +MRJT0_SX328 +MRJT0_SX418 +MRJT0_SX58 +MRKM0_SI1267 +MRKM0_SI1391 +MRKM0_SI637 +MRKM0_SX187 +MRKM0_SX277 +MRKM0_SX367 +MRKM0_SX7 +MRKM0_SX97 +MRLD0_SI1594 +MRLD0_SI2224 +MRLD0_SI964 +MRLD0_SX154 +MRLD0_SX244 +MRLD0_SX334 +MRLD0_SX424 +MRLD0_SX64 +MRLJ0_SI1420 +MRLJ0_SI2050 +MRLJ0_SI790 +MRLJ0_SX160 +MRLJ0_SX250 +MRLJ0_SX340 +MRLJ0_SX430 +MRLJ0_SX70 +MRLJ1_SI1671 +MRLJ1_SI2301 +MRLJ1_SI2332 +MRLJ1_SX141 +MRLJ1_SX231 +MRLJ1_SX321 +MRLJ1_SX411 +MRLJ1_SX51 +MRLK0_SI1468 +MRLK0_SI2140 +MRLK0_SI843 +MRLK0_SX123 +MRLK0_SX213 +MRLK0_SX303 +MRLK0_SX33 +MRLK0_SX393 +MRLR0_SI1196 +MRLR0_SI1826 +MRLR0_SI566 +MRLR0_SX116 +MRLR0_SX206 +MRLR0_SX26 +MRLR0_SX296 +MRLR0_SX386 +MRMB0_SI1581 +MRMB0_SI2211 +MRMB0_SI951 +MRMB0_SX141 +MRMB0_SX231 +MRMB0_SX321 +MRMB0_SX411 +MRMB0_SX51 +MRMG0_SI1080 +MRMG0_SI1710 +MRMG0_SI2340 +MRMG0_SX180 +MRMG0_SX270 +MRMG0_SX360 +MRMG0_SX450 +MRMG0_SX90 +MRMH0_SI1021 +MRMH0_SI1349 +MRMH0_SI2281 +MRMH0_SX121 +MRMH0_SX211 +MRMH0_SX301 +MRMH0_SX31 +MRMH0_SX391 +MRML0_SI1421 +MRML0_SI2051 +MRML0_SI791 +MRML0_SX161 +MRML0_SX251 +MRML0_SX341 +MRML0_SX431 +MRML0_SX71 +MRMS0_SI1113 +MRMS0_SI2057 +MRMS0_SI2100 +MRMS0_SX120 +MRMS0_SX210 +MRMS0_SX30 +MRMS0_SX300 +MRMS0_SX390 +MRPC1_SI1482 +MRPC1_SI2026 +MRPC1_SI2112 +MRPC1_SX132 +MRPC1_SX222 +MRPC1_SX312 +MRPC1_SX402 +MRPC1_SX42 +MRRE0_SI1334 +MRRE0_SI704 +MRRE0_SI952 +MRRE0_SX164 +MRRE0_SX254 +MRRE0_SX344 +MRRE0_SX434 +MRRE0_SX74 +MRSO0_SI1206 +MRSO0_SI1659 +MRSO0_SI2289 +MRSO0_SX129 +MRSO0_SX219 +MRSO0_SX309 +MRSO0_SX39 +MRSO0_SX399 +MRSP0_SI1429 +MRSP0_SI2059 +MRSP0_SI799 +MRSP0_SX169 +MRSP0_SX196 +MRSP0_SX259 +MRSP0_SX439 +MRSP0_SX79 +MRTC0_SI1458 +MRTC0_SI2088 +MRTC0_SI828 +MRTC0_SX108 +MRTC0_SX18 +MRTC0_SX198 +MRTC0_SX288 +MRTC0_SX378 +MRTJ0_SI1551 +MRTJ0_SI2032 +MRTJ0_SI772 +MRTJ0_SX142 +MRTJ0_SX232 +MRTJ0_SX322 +MRTJ0_SX412 +MRTJ0_SX52 +MRVG0_SI1140 +MRVG0_SI1770 +MRVG0_SI510 +MRVG0_SX150 +MRVG0_SX240 +MRVG0_SX330 +MRVG0_SX420 +MRVG0_SX60 +MRWA0_SI1603 +MRWA0_SI2233 +MRWA0_SI973 +MRWA0_SX163 +MRWA0_SX253 +MRWA0_SX343 +MRWA0_SX433 +MRWA0_SX73 +MRWS0_SI1102 +MRWS0_SI1732 +MRWS0_SI472 +MRWS0_SX112 +MRWS0_SX202 +MRWS0_SX22 +MRWS0_SX292 +MRWS0_SX382 +MRXB0_SI1585 +MRXB0_SI2215 +MRXB0_SI955 +MRXB0_SX145 +MRXB0_SX235 +MRXB0_SX325 +MRXB0_SX415 +MRXB0_SX55 +MSAH1_SI1049 +MSAH1_SI1679 +MSAH1_SI2309 +MSAH1_SX149 +MSAH1_SX239 +MSAH1_SX329 +MSAH1_SX419 +MSAH1_SX59 +MSAS0_SI1376 +MSAS0_SI2006 +MSAS0_SI746 +MSAS0_SX116 +MSAS0_SX206 +MSAS0_SX26 +MSAS0_SX296 +MSAS0_SX386 +MSAT0_SI1526 +MSAT0_SI2156 +MSAT0_SI896 +MSAT0_SX176 +MSAT0_SX266 +MSAT0_SX356 +MSAT0_SX446 +MSAT0_SX86 +MSAT1_SI1073 +MSAT1_SI1703 +MSAT1_SI2333 +MSAT1_SX173 +MSAT1_SX263 +MSAT1_SX353 +MSAT1_SX443 +MSAT1_SX83 +MSDB0_SI1007 +MSDB0_SI1637 +MSDB0_SI2267 +MSDB0_SX107 +MSDB0_SX17 +MSDB0_SX197 +MSDB0_SX287 +MSDB0_SX377 +MSDH0_SI2113 +MSDH0_SI2240 +MSDH0_SI980 +MSDH0_SX170 +MSDH0_SX260 +MSDH0_SX350 +MSDH0_SX440 +MSDH0_SX80 +MSDS0_SI1077 +MSDS0_SI1707 +MSDS0_SI2337 +MSDS0_SX177 +MSDS0_SX267 +MSDS0_SX357 +MSDS0_SX447 +MSDS0_SX87 +MSEM1_SI1440 +MSEM1_SI2070 +MSEM1_SI810 +MSEM1_SX180 +MSEM1_SX270 +MSEM1_SX360 +MSEM1_SX450 +MSEM1_SX90 +MSES0_SI1589 +MSES0_SI2216 +MSES0_SI2219 +MSES0_SX149 +MSES0_SX239 +MSES0_SX329 +MSES0_SX419 +MSES0_SX59 +MSFH0_SI1216 +MSFH0_SI1738 +MSFH0_SI586 +MSFH0_SX136 +MSFH0_SX226 +MSFH0_SX316 +MSFH0_SX406 +MSFH0_SX46 +MSFV0_SI1262 +MSFV0_SI1892 +MSFV0_SI632 +MSFV0_SX182 +MSFV0_SX272 +MSFV0_SX362 +MSFV0_SX452 +MSFV0_SX92 +MSJK0_SI1596 +MSJK0_SI2226 +MSJK0_SI966 +MSJK0_SX156 +MSJK0_SX246 +MSJK0_SX336 +MSJK0_SX426 +MSJK0_SX66 +MSMC0_SI1907 +MSMC0_SI509 +MSMC0_SI647 +MSMC0_SX107 +MSMC0_SX17 +MSMC0_SX197 +MSMC0_SX287 +MSMC0_SX377 +MSMR0_SI1150 +MSMR0_SI1405 +MSMR0_SI775 +MSMR0_SX145 +MSMR0_SX235 +MSMR0_SX325 +MSMR0_SX415 +MSMR0_SX55 +MSMS0_SI1433 +MSMS0_SI2063 +MSMS0_SI803 +MSMS0_SX173 +MSMS0_SX263 +MSMS0_SX353 +MSMS0_SX443 +MSMS0_SX83 +MSRG0_SI1221 +MSRG0_SI1851 +MSRG0_SI591 +MSRG0_SX141 +MSRG0_SX231 +MSRG0_SX321 +MSRG0_SX411 +MSRG0_SX51 +MSRR0_SI1131 +MSRR0_SI1761 +MSRR0_SI501 +MSRR0_SX141 +MSRR0_SX231 +MSRR0_SX30 +MSRR0_SX411 +MSRR0_SX51 +MSTF0_SI1396 +MSTF0_SI766 +MSTF0_SI852 +MSTF0_SX136 +MSTF0_SX226 +MSTF0_SX316 +MSTF0_SX406 +MSTF0_SX46 +MSVS0_SI1568 +MSVS0_SI2198 +MSVS0_SI938 +MSVS0_SX128 +MSVS0_SX218 +MSVS0_SX308 +MSVS0_SX38 +MSVS0_SX398 +MTAB0_SI1572 +MTAB0_SI2202 +MTAB0_SI942 +MTAB0_SX132 +MTAB0_SX222 +MTAB0_SX312 +MTAB0_SX402 +MTAB0_SX42 +MTAS0_SI1385 +MTAS0_SI2015 +MTAS0_SI755 +MTAS0_SX125 +MTAS0_SX215 +MTAS0_SX305 +MTAS0_SX35 +MTAS0_SX395 +MTAT0_SI1110 +MTAT0_SI1740 +MTAT0_SI811 +MTAT0_SX120 +MTAT0_SX210 +MTAT0_SX30 +MTAT0_SX300 +MTAT0_SX390 +MTAT1_SI1409 +MTAT1_SI1627 +MTAT1_SI779 +MTAT1_SX149 +MTAT1_SX239 +MTAT1_SX329 +MTAT1_SX419 +MTAT1_SX59 +MTBC0_SI1173 +MTBC0_SI1803 +MTBC0_SI543 +MTBC0_SX183 +MTBC0_SX273 +MTBC0_SX347 +MTBC0_SX363 +MTBC0_SX93 +MTCS0_SI1972 +MTCS0_SI2265 +MTCS0_SI712 +MTCS0_SX172 +MTCS0_SX262 +MTCS0_SX352 +MTCS0_SX442 +MTCS0_SX82 +MTDB0_SI1401 +MTDB0_SI2031 +MTDB0_SI771 +MTDB0_SX141 +MTDB0_SX231 +MTDB0_SX321 +MTDB0_SX411 +MTDB0_SX51 +MTDP0_SI1274 +MTDP0_SI1521 +MTDP0_SI2151 +MTDP0_SX171 +MTDP0_SX261 +MTDP0_SX351 +MTDP0_SX441 +MTDP0_SX81 +MTER0_SI1157 +MTER0_SI1787 +MTER0_SI527 +MTER0_SX167 +MTER0_SX17 +MTER0_SX257 +MTER0_SX437 +MTER0_SX77 +MTJG0_SI1520 +MTJG0_SI2157 +MTJG0_SI890 +MTJG0_SX170 +MTJG0_SX260 +MTJG0_SX350 +MTJG0_SX440 +MTJG0_SX80 +MTJM0_SI1226 +MTJM0_SI1856 +MTJM0_SI655 +MTJM0_SX146 +MTJM0_SX236 +MTJM0_SX326 +MTJM0_SX416 +MTJM0_SX56 +MTJS0_SI1192 +MTJS0_SI1822 +MTJS0_SI562 +MTJS0_SX112 +MTJS0_SX202 +MTJS0_SX22 +MTJS0_SX292 +MTJS0_SX382 +MTJU0_SI2020 +MTJU0_SI2269 +MTJU0_SI760 +MTJU0_SX130 +MTJU0_SX220 +MTJU0_SX310 +MTJU0_SX40 +MTJU0_SX400 +MTKD0_SI1187 +MTKD0_SI1817 +MTKD0_SI630 +MTKD0_SX107 +MTKD0_SX17 +MTKD0_SX197 +MTKD0_SX287 +MTKD0_SX377 +MTKP0_SI1023 +MTKP0_SI2283 +MTKP0_SI454 +MTKP0_SX123 +MTKP0_SX213 +MTKP0_SX303 +MTKP0_SX33 +MTKP0_SX393 +MTLB0_SI1134 +MTLB0_SI1764 +MTLB0_SI504 +MTLB0_SX144 +MTLB0_SX234 +MTLB0_SX324 +MTLB0_SX414 +MTLB0_SX54 +MTLC0_SI1313 +MTLC0_SI1477 +MTLC0_SI847 +MTLC0_SX127 +MTLC0_SX217 +MTLC0_SX307 +MTLC0_SX37 +MTLC0_SX397 +MTML0_SI1065 +MTML0_SI1695 +MTML0_SI2325 +MTML0_SX165 +MTML0_SX255 +MTML0_SX345 +MTML0_SX435 +MTML0_SX75 +MTMN0_SI1064 +MTMN0_SI2324 +MTMN0_SI582 +MTMN0_SX164 +MTMN0_SX254 +MTMN0_SX344 +MTMN0_SX434 +MTMN0_SX74 +MTMT0_SI1118 +MTMT0_SI1748 +MTMT0_SI488 +MTMT0_SX128 +MTMT0_SX218 +MTMT0_SX308 +MTMT0_SX38 +MTMT0_SX398 +MTPF0_SI1235 +MTPF0_SI1865 +MTPF0_SI605 +MTPF0_SX155 +MTPF0_SX245 +MTPF0_SX335 +MTPF0_SX425 +MTPF0_SX65 +MTPG0_SI1383 +MTPG0_SI2013 +MTPG0_SI753 +MTPG0_SX123 +MTPG0_SX213 +MTPG0_SX303 +MTPG0_SX33 +MTPG0_SX393 +MTPP0_SI1508 +MTPP0_SI2138 +MTPP0_SI878 +MTPP0_SX158 +MTPP0_SX248 +MTPP0_SX338 +MTPP0_SX428 +MTPP0_SX68 +MTPR0_SI1600 +MTPR0_SI2230 +MTPR0_SI506 +MTPR0_SX160 +MTPR0_SX250 +MTPR0_SX340 +MTPR0_SX430 +MTPR0_SX70 +MTQC0_SI1441 +MTQC0_SI2071 +MTQC0_SI480 +MTQC0_SX181 +MTQC0_SX271 +MTQC0_SX361 +MTQC0_SX451 +MTQC0_SX91 +MTRC0_SI1623 +MTRC0_SI589 +MTRC0_SI993 +MTRC0_SX170 +MTRC0_SX183 +MTRC0_SX273 +MTRC0_SX363 +MTRC0_SX93 +MTRR0_SI1548 +MTRR0_SI2178 +MTRR0_SI918 +MTRR0_SX108 +MTRR0_SX18 +MTRR0_SX198 +MTRR0_SX288 +MTRR0_SX378 +MTRT0_SI1227 +MTRT0_SI1857 +MTRT0_SI597 +MTRT0_SX147 +MTRT0_SX237 +MTRT0_SX254 +MTRT0_SX417 +MTRT0_SX57 +MTWH1_SI1512 +MTWH1_SI2142 +MTWH1_SI882 +MTWH1_SX162 +MTWH1_SX252 +MTWH1_SX342 +MTWH1_SX432 +MTWH1_SX72 +MTXS0_SI1060 +MTXS0_SI1690 +MTXS0_SI2320 +MTXS0_SX160 +MTXS0_SX250 +MTXS0_SX340 +MTXS0_SX430 +MTXS0_SX70 +MVJH0_SI1556 +MVJH0_SI2186 +MVJH0_SI926 +MVJH0_SX116 +MVJH0_SX206 +MVJH0_SX26 +MVJH0_SX296 +MVJH0_SX386 +MVLO0_SI1147 +MVLO0_SI1777 +MVLO0_SI517 +MVLO0_SX157 +MVLO0_SX247 +MVLO0_SX337 +MVLO0_SX427 +MVLO0_SX67 +MVRW0_SI1485 +MVRW0_SI2115 +MVRW0_SI855 +MVRW0_SX135 +MVRW0_SX225 +MVRW0_SX315 +MVRW0_SX405 +MVRW0_SX45 +MWAC0_SI1601 +MWAC0_SI2231 +MWAC0_SI971 +MWAC0_SX161 +MWAC0_SX251 +MWAC0_SX341 +MWAC0_SX431 +MWAC0_SX71 +MWAD0_SI1062 +MWAD0_SI1749 +MWAD0_SI2322 +MWAD0_SX162 +MWAD0_SX252 +MWAD0_SX342 +MWAD0_SX432 +MWAD0_SX72 +MWAR0_SI1045 +MWAR0_SI1675 +MWAR0_SI2305 +MWAR0_SX145 +MWAR0_SX235 +MWAR0_SX325 +MWAR0_SX415 +MWAR0_SX55 +MWCH0_SI1622 +MWCH0_SI1895 +MWCH0_SI2252 +MWCH0_SX182 +MWCH0_SX272 +MWCH0_SX362 +MWCH0_SX452 +MWCH0_SX92 +MWDK0_SI1436 +MWDK0_SI2017 +MWDK0_SI806 +MWDK0_SX176 +MWDK0_SX266 +MWDK0_SX356 +MWDK0_SX446 +MWDK0_SX86 +MWEM0_SI1320 +MWEM0_SI1393 +MWEM0_SI1950 +MWEM0_SX150 +MWEM0_SX240 +MWEM0_SX330 +MWEM0_SX420 +MWEM0_SX60 +MWGR0_SI1606 +MWGR0_SI2236 +MWGR0_SI976 +MWGR0_SX166 +MWGR0_SX256 +MWGR0_SX346 +MWGR0_SX436 +MWGR0_SX76 +MWRE0_SI1057 +MWRE0_SI1687 +MWRE0_SI2317 +MWRE0_SX157 +MWRE0_SX247 +MWRE0_SX337 +MWRE0_SX427 +MWRE0_SX67 +MWRP0_SI1443 +MWRP0_SI1525 +MWRP0_SI2073 +MWRP0_SX183 +MWRP0_SX273 +MWRP0_SX3 +MWRP0_SX363 +MWRP0_SX93 +MWSB0_SI1626 +MWSB0_SI2256 +MWSB0_SI996 +MWSB0_SX186 +MWSB0_SX276 +MWSB0_SX366 +MWSB0_SX6 +MWSB0_SX96 +MWSH0_SI1426 +MWSH0_SI2266 +MWSH0_SI796 +MWSH0_SX166 +MWSH0_SX256 +MWSH0_SX346 +MWSH0_SX436 +MWSH0_SX76 +MZMB0_SI1166 +MZMB0_SI1796 +MZMB0_SI536 +MZMB0_SX176 +MZMB0_SX266 +MZMB0_SX356 +MZMB0_SX446 +MZMB0_SX86 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/train_text.uid b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/train_text.uid new file mode 100644 index 0000000..c39fd0b --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/train_text.uid @@ -0,0 +1,3696 @@ +FAEM0_SI1392 +FAEM0_SI2022 +FAEM0_SI762 +FAEM0_SX132 +FAEM0_SX222 +FAEM0_SX312 +FAEM0_SX402 +FAEM0_SX42 +FAJW0_SI1263 +FAJW0_SI1893 +FAJW0_SI633 +FAJW0_SX183 +FAJW0_SX273 +FAJW0_SX3 +FAJW0_SX363 +FAJW0_SX93 +FALK0_SI1086 +FALK0_SI456 +FALK0_SI658 +FALK0_SX186 +FALK0_SX276 +FALK0_SX366 +FALK0_SX6 +FALK0_SX96 +FALR0_SI1325 +FALR0_SI1955 +FALR0_SI695 +FALR0_SX155 +FALR0_SX245 +FALR0_SX335 +FALR0_SX425 +FALR0_SX65 +FAPB0_SI1063 +FAPB0_SI1693 +FAPB0_SI2323 +FAPB0_SX163 +FAPB0_SX253 +FAPB0_SX343 +FAPB0_SX433 +FAPB0_SX73 +FBAS0_SI1387 +FBAS0_SI1472 +FBAS0_SI2066 +FBAS0_SX127 +FBAS0_SX217 +FBAS0_SX307 +FBAS0_SX37 +FBAS0_SX397 +FBCG1_SI1612 +FBCG1_SI2242 +FBCG1_SI982 +FBCG1_SX172 +FBCG1_SX262 +FBCG1_SX352 +FBCG1_SX442 +FBCG1_SX82 +FBCH0_SI1586 +FBCH0_SI956 +FBCH0_SI959 +FBCH0_SX146 +FBCH0_SX236 +FBCH0_SX326 +FBCH0_SX416 +FBCH0_SX56 +FBJL0_SI1552 +FBJL0_SI2182 +FBJL0_SI922 +FBJL0_SX112 +FBJL0_SX202 +FBJL0_SX22 +FBJL0_SX292 +FBJL0_SX382 +FBLV0_SI1058 +FBLV0_SI1688 +FBLV0_SI2318 +FBLV0_SX158 +FBLV0_SX248 +FBLV0_SX338 +FBLV0_SX428 +FBLV0_SX68 +FBMH0_SI1136 +FBMH0_SI1766 +FBMH0_SI970 +FBMH0_SX146 +FBMH0_SX236 +FBMH0_SX326 +FBMH0_SX416 +FBMH0_SX56 +FBMJ0_SI1776 +FBMJ0_SI516 +FBMJ0_SI815 +FBMJ0_SX156 +FBMJ0_SX246 +FBMJ0_SX336 +FBMJ0_SX426 +FBMJ0_SX66 +FCAG0_SI1503 +FCAG0_SI1641 +FCAG0_SI2133 +FCAG0_SX153 +FCAG0_SX243 +FCAG0_SX333 +FCAG0_SX423 +FCAG0_SX63 +FCAJ0_SI1479 +FCAJ0_SI1804 +FCAJ0_SI849 +FCAJ0_SX129 +FCAJ0_SX219 +FCAJ0_SX309 +FCAJ0_SX39 +FCAJ0_SX399 +FCDR1_SI1186 +FCDR1_SI1816 +FCDR1_SI556 +FCDR1_SX106 +FCDR1_SX16 +FCDR1_SX196 +FCDR1_SX286 +FCDR1_SX376 +FCEG0_SI1248 +FCEG0_SI1878 +FCEG0_SI618 +FCEG0_SX168 +FCEG0_SX258 +FCEG0_SX348 +FCEG0_SX438 +FCEG0_SX78 +FCJF0_SI1027 +FCJF0_SI1657 +FCJF0_SI648 +FCJF0_SX127 +FCJF0_SX217 +FCJF0_SX307 +FCJF0_SX37 +FCJF0_SX397 +FCJS0_SI1607 +FCJS0_SI2237 +FCJS0_SI977 +FCJS0_SX167 +FCJS0_SX257 +FCJS0_SX347 +FCJS0_SX437 +FCJS0_SX77 +FCKE0_SI1111 +FCKE0_SI1741 +FCKE0_SI481 +FCKE0_SX121 +FCKE0_SX211 +FCKE0_SX301 +FCKE0_SX31 +FCKE0_SX391 +FCLT0_SI1438 +FCLT0_SI2068 +FCLT0_SI808 +FCLT0_SX178 +FCLT0_SX268 +FCLT0_SX358 +FCLT0_SX448 +FCLT0_SX88 +FCMG0_SI1142 +FCMG0_SI1242 +FCMG0_SI1872 +FCMG0_SX162 +FCMG0_SX252 +FCMG0_SX342 +FCMG0_SX432 +FCMG0_SX72 +FCMM0_SI1083 +FCMM0_SI1957 +FCMM0_SI453 +FCMM0_SX183 +FCMM0_SX273 +FCMM0_SX363 +FCMM0_SX420 +FCMM0_SX93 +FCRZ0_SI1913 +FCRZ0_SI2053 +FCRZ0_SI793 +FCRZ0_SX163 +FCRZ0_SX253 +FCRZ0_SX343 +FCRZ0_SX433 +FCRZ0_SX73 +FCYL0_SI1297 +FCYL0_SI1927 +FCYL0_SI667 +FCYL0_SX127 +FCYL0_SX217 +FCYL0_SX349 +FCYL0_SX37 +FCYL0_SX397 +FDAS1_SI1461 +FDAS1_SI2091 +FDAS1_SI831 +FDAS1_SX111 +FDAS1_SX201 +FDAS1_SX21 +FDAS1_SX291 +FDAS1_SX381 +FDAW0_SI1271 +FDAW0_SI1406 +FDAW0_SI2036 +FDAW0_SX146 +FDAW0_SX236 +FDAW0_SX326 +FDAW0_SX416 +FDAW0_SX56 +FDFB0_SI1318 +FDFB0_SI1948 +FDFB0_SI2010 +FDFB0_SX148 +FDFB0_SX238 +FDFB0_SX328 +FDFB0_SX418 +FDFB0_SX58 +FDJH0_SI1565 +FDJH0_SI2195 +FDJH0_SI935 +FDJH0_SX125 +FDJH0_SX215 +FDJH0_SX305 +FDJH0_SX35 +FDJH0_SX395 +FDKN0_SI1081 +FDKN0_SI1202 +FDKN0_SI1711 +FDKN0_SX181 +FDKN0_SX271 +FDKN0_SX361 +FDKN0_SX451 +FDKN0_SX91 +FDML0_SI1149 +FDML0_SI1779 +FDML0_SI2075 +FDML0_SX159 +FDML0_SX249 +FDML0_SX339 +FDML0_SX429 +FDML0_SX69 +FDMY0_SI1197 +FDMY0_SI567 +FDMY0_SI714 +FDMY0_SX117 +FDMY0_SX207 +FDMY0_SX27 +FDMY0_SX297 +FDMY0_SX387 +FDNC0_SI1278 +FDNC0_SI1908 +FDNC0_SI2287 +FDNC0_SX108 +FDNC0_SX18 +FDNC0_SX198 +FDNC0_SX288 +FDNC0_SX378 +FDTD0_SI1561 +FDTD0_SI2191 +FDTD0_SI931 +FDTD0_SX121 +FDTD0_SX211 +FDTD0_SX301 +FDTD0_SX321 +FDTD0_SX391 +FDXW0_SI1511 +FDXW0_SI2141 +FDXW0_SI881 +FDXW0_SX161 +FDXW0_SX251 +FDXW0_SX341 +FDXW0_SX431 +FDXW0_SX71 +FEAC0_SI1245 +FEAC0_SI1875 +FEAC0_SI615 +FEAC0_SX165 +FEAC0_SX255 +FEAC0_SX345 +FEAC0_SX435 +FEAC0_SX75 +FEAR0_SI1252 +FEAR0_SI1882 +FEAR0_SI622 +FEAR0_SX172 +FEAR0_SX262 +FEAR0_SX352 +FEAR0_SX442 +FEAR0_SX82 +FECD0_SI1418 +FECD0_SI2048 +FECD0_SI788 +FECD0_SX158 +FECD0_SX248 +FECD0_SX338 +FECD0_SX428 +FECD0_SX68 +FEEH0_SI1112 +FEEH0_SI1742 +FEEH0_SI471 +FEEH0_SX122 +FEEH0_SX212 +FEEH0_SX302 +FEEH0_SX32 +FEEH0_SX392 +FEME0_SI1505 +FEME0_SI2135 +FEME0_SI875 +FEME0_SX155 +FEME0_SX245 +FEME0_SX335 +FEME0_SX425 +FEME0_SX65 +FETB0_SI1148 +FETB0_SI1778 +FETB0_SI518 +FETB0_SX158 +FETB0_SX248 +FETB0_SX338 +FETB0_SX428 +FETB0_SX68 +FEXM0_SI1101 +FEXM0_SI1731 +FEXM0_SI482 +FEXM0_SX111 +FEXM0_SX201 +FEXM0_SX291 +FEXM0_SX366 +FEXM0_SX381 +FGCS0_SI1486 +FGCS0_SI2116 +FGCS0_SI856 +FGCS0_SX136 +FGCS0_SX226 +FGCS0_SX316 +FGCS0_SX406 +FGCS0_SX46 +FGDP0_SI1618 +FGDP0_SI2248 +FGDP0_SI988 +FGDP0_SX178 +FGDP0_SX268 +FGDP0_SX358 +FGDP0_SX448 +FGDP0_SX88 +FGMB0_SI1145 +FGMB0_SI1775 +FGMB0_SI515 +FGMB0_SX155 +FGMB0_SX245 +FGMB0_SX335 +FGMB0_SX425 +FGMB0_SX65 +FGRW0_SI1152 +FGRW0_SI1782 +FGRW0_SI1990 +FGRW0_SX162 +FGRW0_SX252 +FGRW0_SX342 +FGRW0_SX432 +FGRW0_SX72 +FHLM0_SI1560 +FHLM0_SI2190 +FHLM0_SI930 +FHLM0_SX120 +FHLM0_SX210 +FHLM0_SX300 +FHLM0_SX349 +FHLM0_SX390 +FHXS0_SI1075 +FHXS0_SI2302 +FHXS0_SI2335 +FHXS0_SX175 +FHXS0_SX265 +FHXS0_SX355 +FHXS0_SX445 +FHXS0_SX85 +FJDM2_SI1582 +FJDM2_SI1964 +FJDM2_SI2212 +FJDM2_SX142 +FJDM2_SX232 +FJDM2_SX322 +FJDM2_SX412 +FJDM2_SX52 +FJEN0_SI1047 +FJEN0_SI1677 +FJEN0_SI2307 +FJEN0_SX147 +FJEN0_SX237 +FJEN0_SX327 +FJEN0_SX417 +FJEN0_SX57 +FJHK0_SI1022 +FJHK0_SI1652 +FJHK0_SI2282 +FJHK0_SX122 +FJHK0_SX212 +FJHK0_SX302 +FJHK0_SX32 +FJHK0_SX392 +FJKL0_SI1562 +FJKL0_SI2192 +FJKL0_SI932 +FJKL0_SX122 +FJKL0_SX212 +FJKL0_SX302 +FJKL0_SX32 +FJKL0_SX392 +FJLG0_SI1506 +FJLG0_SI1889 +FJLG0_SI2306 +FJLG0_SX179 +FJLG0_SX269 +FJLG0_SX359 +FJLG0_SX449 +FJLG0_SX89 +FJLR0_SI1231 +FJLR0_SI1861 +FJLR0_SI601 +FJLR0_SX151 +FJLR0_SX241 +FJLR0_SX331 +FJLR0_SX421 +FJLR0_SX61 +FJRB0_SI1302 +FJRB0_SI1932 +FJRB0_SI672 +FJRB0_SX132 +FJRB0_SX222 +FJRB0_SX312 +FJRB0_SX402 +FJRB0_SX42 +FJRP1_SI1432 +FJRP1_SI2062 +FJRP1_SI802 +FJRP1_SX172 +FJRP1_SX262 +FJRP1_SX352 +FJRP1_SX442 +FJRP1_SX82 +FJSK0_SI1052 +FJSK0_SI1682 +FJSK0_SI2312 +FJSK0_SX152 +FJSK0_SX242 +FJSK0_SX332 +FJSK0_SX422 +FJSK0_SX62 +FJSP0_SI1434 +FJSP0_SI1763 +FJSP0_SI804 +FJSP0_SX174 +FJSP0_SX264 +FJSP0_SX354 +FJSP0_SX444 +FJSP0_SX84 +FJWB1_SI2055 +FJWB1_SI748 +FJWB1_SI795 +FJWB1_SX165 +FJWB1_SX255 +FJWB1_SX345 +FJWB1_SX435 +FJWB1_SX75 +FJXM0_SI1211 +FJXM0_SI1971 +FJXM0_SI581 +FJXM0_SX131 +FJXM0_SX221 +FJXM0_SX311 +FJXM0_SX401 +FJXM0_SX41 +FJXP0_SI1122 +FJXP0_SI1752 +FJXP0_SI492 +FJXP0_SX132 +FJXP0_SX222 +FJXP0_SX312 +FJXP0_SX402 +FJXP0_SX42 +FKAA0_SI1208 +FKAA0_SI1838 +FKAA0_SI578 +FKAA0_SX128 +FKAA0_SX218 +FKAA0_SX308 +FKAA0_SX38 +FKAA0_SX398 +FKDE0_SI1141 +FKDE0_SI1771 +FKDE0_SI2221 +FKDE0_SX151 +FKDE0_SX241 +FKDE0_SX331 +FKDE0_SX421 +FKDE0_SX61 +FKDW0_SI1207 +FKDW0_SI1891 +FKDW0_SI577 +FKDW0_SX127 +FKDW0_SX217 +FKDW0_SX307 +FKDW0_SX37 +FKDW0_SX397 +FKFB0_SI1608 +FKFB0_SI2238 +FKFB0_SI978 +FKFB0_SX168 +FKFB0_SX258 +FKFB0_SX348 +FKFB0_SX438 +FKFB0_SX78 +FKKH0_SI1290 +FKKH0_SI1920 +FKKH0_SI660 +FKKH0_SX120 +FKKH0_SX210 +FKKH0_SX30 +FKKH0_SX300 +FKKH0_SX390 +FKLC0_SI1615 +FKLC0_SI2245 +FKLC0_SI985 +FKLC0_SX175 +FKLC0_SX265 +FKLC0_SX355 +FKLC0_SX445 +FKLC0_SX85 +FKLC1_SI1048 +FKLC1_SI1678 +FKLC1_SI2308 +FKLC1_SX148 +FKLC1_SX238 +FKLC1_SX328 +FKLC1_SX418 +FKLC1_SX58 +FKLH0_SI1257 +FKLH0_SI1887 +FKLH0_SI627 +FKLH0_SX177 +FKLH0_SX267 +FKLH0_SX357 +FKLH0_SX447 +FKLH0_SX87 +FKSR0_SI1117 +FKSR0_SI1747 +FKSR0_SI487 +FKSR0_SX161 +FKSR0_SX217 +FKSR0_SX366 +FKSR0_SX37 +FKSR0_SX397 +FLAC0_SI1339 +FLAC0_SI2161 +FLAC0_SI901 +FLAC0_SX181 +FLAC0_SX271 +FLAC0_SX361 +FLAC0_SX451 +FLAC0_SX91 +FLAG0_SI1464 +FLAG0_SI2094 +FLAG0_SI834 +FLAG0_SX114 +FLAG0_SX204 +FLAG0_SX24 +FLAG0_SX294 +FLAG0_SX384 +FLEH0_SI1051 +FLEH0_SI1681 +FLEH0_SI2311 +FLEH0_SX151 +FLEH0_SX241 +FLEH0_SX331 +FLEH0_SX421 +FLEH0_SX61 +FLET0_SI1137 +FLET0_SI1767 +FLET0_SI507 +FLET0_SX147 +FLET0_SX237 +FLET0_SX277 +FLET0_SX417 +FLET0_SX57 +FLHD0_SI1344 +FLHD0_SI1827 +FLHD0_SI1974 +FLHD0_SX174 +FLHD0_SX264 +FLHD0_SX354 +FLHD0_SX444 +FLHD0_SX84 +FLJA0_SI1078 +FLJA0_SI1708 +FLJA0_SI2338 +FLJA0_SX178 +FLJA0_SX268 +FLJA0_SX358 +FLJA0_SX448 +FLJA0_SX88 +FLJD0_SI1516 +FLJD0_SI2146 +FLJD0_SI886 +FLJD0_SX166 +FLJD0_SX256 +FLJD0_SX346 +FLJD0_SX436 +FLJD0_SX76 +FLJG0_SI1611 +FLJG0_SI2241 +FLJG0_SI981 +FLJG0_SX171 +FLJG0_SX261 +FLJG0_SX351 +FLJG0_SX441 +FLJG0_SX81 +FLKM0_SI1880 +FLKM0_SI620 +FLKM0_SI686 +FLKM0_SX116 +FLKM0_SX260 +FLKM0_SX350 +FLKM0_SX440 +FLKM0_SX80 +FLMA0_SI1243 +FLMA0_SI1873 +FLMA0_SI613 +FLMA0_SX163 +FLMA0_SX253 +FLMA0_SX343 +FLMA0_SX433 +FLMA0_SX73 +FLMC0_SI1372 +FLMC0_SI2002 +FLMC0_SI742 +FLMC0_SX112 +FLMC0_SX22 +FLMC0_SX292 +FLMC0_SX336 +FLMC0_SX382 +FLMK0_SI1035 +FLMK0_SI1229 +FLMK0_SI2295 +FLMK0_SX135 +FLMK0_SX225 +FLMK0_SX315 +FLMK0_SX405 +FLMK0_SX45 +FLOD0_SI1287 +FLOD0_SI1917 +FLOD0_SI657 +FLOD0_SX117 +FLOD0_SX171 +FLOD0_SX207 +FLOD0_SX297 +FLOD0_SX387 +FLTM0_SI1070 +FLTM0_SI1700 +FLTM0_SI2330 +FLTM0_SX170 +FLTM0_SX260 +FLTM0_SX350 +FLTM0_SX440 +FLTM0_SX80 +FMAH1_SI1509 +FMAH1_SI2139 +FMAH1_SI879 +FMAH1_SX159 +FMAH1_SX249 +FMAH1_SX339 +FMAH1_SX429 +FMAH1_SX69 +FMBG0_SI1160 +FMBG0_SI1790 +FMBG0_SI2264 +FMBG0_SX260 +FMBG0_SX3 +FMBG0_SX350 +FMBG0_SX440 +FMBG0_SX80 +FMEM0_SI1377 +FMEM0_SI2007 +FMEM0_SI747 +FMEM0_SX117 +FMEM0_SX207 +FMEM0_SX297 +FMEM0_SX333 +FMEM0_SX387 +FMJB0_SI1177 +FMJB0_SI1807 +FMJB0_SI547 +FMJB0_SX187 +FMJB0_SX277 +FMJB0_SX367 +FMJB0_SX7 +FMJB0_SX97 +FMJF0_SI1254 +FMJF0_SI1884 +FMJF0_SI624 +FMJF0_SX174 +FMJF0_SX264 +FMJF0_SX354 +FMJF0_SX444 +FMJF0_SX84 +FMJU0_SI1389 +FMJU0_SI2019 +FMJU0_SI759 +FMJU0_SX129 +FMJU0_SX219 +FMJU0_SX309 +FMJU0_SX39 +FMJU0_SX399 +FMKC0_SI1041 +FMKC0_SI1072 +FMKC0_SI1702 +FMKC0_SX172 +FMKC0_SX262 +FMKC0_SX352 +FMKC0_SX442 +FMKC0_SX82 +FMKF0_SI1018 +FMKF0_SI1536 +FMKF0_SI906 +FMKF0_SX186 +FMKF0_SX276 +FMKF0_SX366 +FMKF0_SX6 +FMKF0_SX96 +FMMH0_SI1537 +FMMH0_SI2167 +FMMH0_SI907 +FMMH0_SX187 +FMMH0_SX367 +FMMH0_SX420 +FMMH0_SX7 +FMMH0_SX97 +FMPG0_SI1602 +FMPG0_SI2232 +FMPG0_SI972 +FMPG0_SX162 +FMPG0_SX252 +FMPG0_SX342 +FMPG0_SX432 +FMPG0_SX72 +FNKL0_SI1522 +FNKL0_SI2152 +FNKL0_SI892 +FNKL0_SX172 +FNKL0_SX196 +FNKL0_SX262 +FNKL0_SX442 +FNKL0_SX82 +FNTB0_SI1203 +FNTB0_SI573 +FNTB0_SI679 +FNTB0_SX123 +FNTB0_SX213 +FNTB0_SX303 +FNTB0_SX33 +FNTB0_SX393 +FPAB1_SI1471 +FPAB1_SI2101 +FPAB1_SI841 +FPAB1_SX121 +FPAB1_SX211 +FPAB1_SX301 +FPAB1_SX31 +FPAB1_SX391 +FPAC0_SI1921 +FPAC0_SI2011 +FPAC0_SI661 +FPAC0_SX121 +FPAC0_SX211 +FPAC0_SX301 +FPAC0_SX31 +FPAC0_SX391 +FPAD0_SI1346 +FPAD0_SI1976 +FPAD0_SI716 +FPAD0_SX176 +FPAD0_SX266 +FPAD0_SX356 +FPAD0_SX446 +FPAD0_SX86 +FPAF0_SI1054 +FPAF0_SI1684 +FPAF0_SI2314 +FPAF0_SX154 +FPAF0_SX244 +FPAF0_SX334 +FPAF0_SX424 +FPAF0_SX64 +FPAZ0_SI1593 +FPAZ0_SI2223 +FPAZ0_SI963 +FPAZ0_SX153 +FPAZ0_SX243 +FPAZ0_SX27 +FPAZ0_SX423 +FPAZ0_SX63 +FPJF0_SI1046 +FPJF0_SI1259 +FPJF0_SI1676 +FPJF0_SX146 +FPJF0_SX236 +FPJF0_SX326 +FPJF0_SX352 +FPJF0_SX56 +FPLS0_SI1590 +FPLS0_SI2220 +FPLS0_SI960 +FPLS0_SX150 +FPLS0_SX240 +FPLS0_SX3 +FPLS0_SX330 +FPLS0_SX60 +FPMY0_SI1153 +FPMY0_SI1783 +FPMY0_SI523 +FPMY0_SX163 +FPMY0_SX196 +FPMY0_SX253 +FPMY0_SX343 +FPMY0_SX73 +FREH0_SI1315 +FREH0_SI1945 +FREH0_SI685 +FREH0_SX145 +FREH0_SX235 +FREH0_SX325 +FREH0_SX415 +FREH0_SX55 +FRJB0_SI1427 +FRJB0_SI1470 +FRJB0_SI1794 +FRJB0_SX167 +FRJB0_SX257 +FRJB0_SX347 +FRJB0_SX437 +FRJB0_SX77 +FRLL0_SI1514 +FRLL0_SI805 +FRLL0_SI884 +FRLL0_SX164 +FRLL0_SX254 +FRLL0_SX344 +FRLL0_SX434 +FRLL0_SX74 +FSAG0_SI1323 +FSAG0_SI1953 +FSAG0_SI693 +FSAG0_SX153 +FSAG0_SX243 +FSAG0_SX333 +FSAG0_SX423 +FSAG0_SX63 +FSAH0_SI1244 +FSAH0_SI1874 +FSAH0_SI614 +FSAH0_SX164 +FSAH0_SX327 +FSAH0_SX344 +FSAH0_SX434 +FSAH0_SX74 +FSAK0_SI1300 +FSAK0_SI1930 +FSAK0_SI670 +FSAK0_SX130 +FSAK0_SX220 +FSAK0_SX310 +FSAK0_SX40 +FSAK0_SX400 +FSBK0_SI1069 +FSBK0_SI1699 +FSBK0_SI2329 +FSBK0_SX169 +FSBK0_SX259 +FSBK0_SX349 +FSBK0_SX439 +FSBK0_SX79 +FSCN0_SI1886 +FSCN0_SI626 +FSCN0_SI705 +FSCN0_SX176 +FSCN0_SX266 +FSCN0_SX356 +FSCN0_SX446 +FSCN0_SX86 +FSDC0_SI1312 +FSDC0_SI1942 +FSDC0_SI2234 +FSDC0_SX142 +FSDC0_SX232 +FSDC0_SX322 +FSDC0_SX412 +FSDC0_SX52 +FSDJ0_SI1115 +FSDJ0_SI1745 +FSDJ0_SI485 +FSDJ0_SX125 +FSDJ0_SX215 +FSDJ0_SX305 +FSDJ0_SX35 +FSDJ0_SX395 +FSGF0_SI1557 +FSGF0_SI2187 +FSGF0_SI927 +FSGF0_SX117 +FSGF0_SX207 +FSGF0_SX27 +FSGF0_SX297 +FSGF0_SX387 +FSJG0_SI1570 +FSJG0_SI2200 +FSJG0_SI940 +FSJG0_SX130 +FSJG0_SX220 +FSJG0_SX310 +FSJG0_SX40 +FSJG0_SX400 +FSJK1_SI1025 +FSJK1_SI2285 +FSJK1_SI696 +FSJK1_SX125 +FSJK1_SX215 +FSJK1_SX305 +FSJK1_SX35 +FSJK1_SX395 +FSJS0_SI1171 +FSJS0_SI1801 +FSJS0_SI541 +FSJS0_SX181 +FSJS0_SX271 +FSJS0_SX361 +FSJS0_SX451 +FSJS0_SX91 +FSJW0_SI1333 +FSJW0_SI1963 +FSJW0_SI703 +FSJW0_SX163 +FSJW0_SX253 +FSJW0_SX343 +FSJW0_SX433 +FSJW0_SX73 +FSKC0_SI1416 +FSKC0_SI2046 +FSKC0_SI786 +FSKC0_SX156 +FSKC0_SX246 +FSKC0_SX336 +FSKC0_SX426 +FSKC0_SX66 +FSKL0_SI1529 +FSKL0_SI2159 +FSKL0_SI899 +FSKL0_SX179 +FSKL0_SX269 +FSKL0_SX359 +FSKL0_SX449 +FSKL0_SX89 +FSKP0_SI1098 +FSKP0_SI1728 +FSKP0_SI468 +FSKP0_SX108 +FSKP0_SX18 +FSKP0_SX198 +FSKP0_SX288 +FSKP0_SX378 +FSLS0_SI1056 +FSLS0_SI1686 +FSLS0_SI2316 +FSLS0_SX156 +FSLS0_SX202 +FSLS0_SX246 +FSLS0_SX426 +FSLS0_SX66 +FSMA0_SI1621 +FSMA0_SI2251 +FSMA0_SI991 +FSMA0_SX181 +FSMA0_SX271 +FSMA0_SX361 +FSMA0_SX451 +FSMA0_SX91 +FSMM0_SI1314 +FSMM0_SI1944 +FSMM0_SI684 +FSMM0_SX144 +FSMM0_SX234 +FSMM0_SX324 +FSMM0_SX414 +FSMM0_SX54 +FSMS1_SI1504 +FSMS1_SI2134 +FSMS1_SI874 +FSMS1_SX154 +FSMS1_SX244 +FSMS1_SX334 +FSMS1_SX347 +FSMS1_SX64 +FSPM0_SI1241 +FSPM0_SI1871 +FSPM0_SI611 +FSPM0_SX161 +FSPM0_SX251 +FSPM0_SX341 +FSPM0_SX431 +FSPM0_SX71 +FSRH0_SI1719 +FSRH0_SI1931 +FSRH0_SI671 +FSRH0_SX131 +FSRH0_SX221 +FSRH0_SX311 +FSRH0_SX401 +FSRH0_SX41 +FSSB0_SI1082 +FSSB0_SI1712 +FSSB0_SI2342 +FSSB0_SX182 +FSSB0_SX272 +FSSB0_SX362 +FSSB0_SX452 +FSSB0_SX92 +FTAJ0_SI1329 +FTAJ0_SI474 +FTAJ0_SI699 +FTAJ0_SX159 +FTAJ0_SX249 +FTAJ0_SX339 +FTAJ0_SX429 +FTAJ0_SX69 +FTBR0_SI1402 +FTBR0_SI2181 +FTBR0_SI921 +FTBR0_SX111 +FTBR0_SX201 +FTBR0_SX21 +FTBR0_SX291 +FTBR0_SX381 +FTBW0_SI1345 +FTBW0_SI1975 +FTBW0_SI715 +FTBW0_SX175 +FTBW0_SX265 +FTBW0_SX355 +FTBW0_SX445 +FTBW0_SX85 +FTLG0_SI1743 +FTLG0_SI483 +FTLG0_SI840 +FTLG0_SX123 +FTLG0_SX213 +FTLG0_SX303 +FTLG0_SX33 +FTLG0_SX393 +FTMG0_SI1532 +FTMG0_SI2162 +FTMG0_SI902 +FTMG0_SX182 +FTMG0_SX272 +FTMG0_SX362 +FTMG0_SX452 +FTMG0_SX92 +FVFB0_SI1032 +FVFB0_SI1510 +FVFB0_SI2292 +FVFB0_SX132 +FVFB0_SX222 +FVFB0_SX312 +FVFB0_SX402 +FVFB0_SX42 +FVKB0_SI1159 +FVKB0_SI1789 +FVKB0_SI529 +FVKB0_SX169 +FVKB0_SX259 +FVKB0_SX349 +FVKB0_SX439 +FVKB0_SX79 +FVMH0_SI1466 +FVMH0_SI2096 +FVMH0_SI836 +FVMH0_SX116 +FVMH0_SX206 +FVMH0_SX26 +FVMH0_SX296 +FVMH0_SX386 +MABC0_SI1620 +MABC0_SI2041 +MABC0_SI781 +MABC0_SX151 +MABC0_SX241 +MABC0_SX331 +MABC0_SX421 +MABC0_SX61 +MADC0_SI1367 +MADC0_SI1997 +MADC0_SI737 +MADC0_SX107 +MADC0_SX17 +MADC0_SX197 +MADC0_SX287 +MADC0_SX377 +MADD0_SI1295 +MADD0_SI1798 +MADD0_SI538 +MADD0_SX178 +MADD0_SX268 +MADD0_SX358 +MADD0_SX448 +MADD0_SX88 +MAEB0_SI1411 +MAEB0_SI2250 +MAEB0_SI990 +MAEB0_SX180 +MAEB0_SX270 +MAEB0_SX360 +MAEB0_SX450 +MAEB0_SX90 +MAEO0_SI1326 +MAEO0_SI1655 +MAEO0_SI1956 +MAEO0_SX156 +MAEO0_SX246 +MAEO0_SX336 +MAEO0_SX426 +MAEO0_SX66 +MAFM0_SI1569 +MAFM0_SI2199 +MAFM0_SI939 +MAFM0_SX129 +MAFM0_SX219 +MAFM0_SX309 +MAFM0_SX39 +MAFM0_SX399 +MAJP0_SI1074 +MAJP0_SI1704 +MAJP0_SI2334 +MAJP0_SX174 +MAJP0_SX264 +MAJP0_SX354 +MAJP0_SX444 +MAJP0_SX84 +MAKB0_SI1016 +MAKB0_SI1646 +MAKB0_SI2276 +MAKB0_SX116 +MAKB0_SX206 +MAKB0_SX26 +MAKB0_SX296 +MAKB0_SX386 +MAKR0_SI1352 +MAKR0_SI1982 +MAKR0_SI722 +MAKR0_SX182 +MAKR0_SX272 +MAKR0_SX362 +MAKR0_SX452 +MAKR0_SX92 +MAPV0_SI1293 +MAPV0_SI1923 +MAPV0_SI663 +MAPV0_SX123 +MAPV0_SX213 +MAPV0_SX303 +MAPV0_SX33 +MAPV0_SX393 +MARC0_SI1188 +MARC0_SI1818 +MARC0_SI558 +MARC0_SX108 +MARC0_SX18 +MARC0_SX198 +MARC0_SX288 +MARC0_SX378 +MARW0_SI1276 +MARW0_SI1906 +MARW0_SI646 +MARW0_SX106 +MARW0_SX16 +MARW0_SX286 +MARW0_SX349 +MARW0_SX376 +MBAR0_SI1319 +MBAR0_SI1949 +MBAR0_SI689 +MBAR0_SX149 +MBAR0_SX239 +MBAR0_SX329 +MBAR0_SX419 +MBAR0_SX59 +MBBR0_SI1055 +MBBR0_SI1685 +MBBR0_SI2315 +MBBR0_SX155 +MBBR0_SX245 +MBBR0_SX335 +MBBR0_SX425 +MBBR0_SX65 +MBCG0_SI2217 +MBCG0_SI486 +MBCG0_SI957 +MBCG0_SX147 +MBCG0_SX237 +MBCG0_SX327 +MBCG0_SX417 +MBCG0_SX57 +MBEF0_SI1281 +MBEF0_SI1911 +MBEF0_SI651 +MBEF0_SX111 +MBEF0_SX201 +MBEF0_SX21 +MBEF0_SX291 +MBEF0_SX381 +MBGT0_SI1341 +MBGT0_SI1841 +MBGT0_SI711 +MBGT0_SX171 +MBGT0_SX261 +MBGT0_SX351 +MBGT0_SX441 +MBGT0_SX81 +MBJV0_SI1247 +MBJV0_SI1877 +MBJV0_SI617 +MBJV0_SX167 +MBJV0_SX257 +MBJV0_SX347 +MBJV0_SX437 +MBJV0_SX77 +MBMA0_SI1222 +MBMA0_SI1852 +MBMA0_SI592 +MBMA0_SX142 +MBMA0_SX232 +MBMA0_SX322 +MBMA0_SX412 +MBMA0_SX52 +MBMA1_SI2207 +MBMA1_SI2214 +MBMA1_SI954 +MBMA1_SX144 +MBMA1_SX234 +MBMA1_SX324 +MBMA1_SX414 +MBMA1_SX54 +MBML0_SI1169 +MBML0_SI1799 +MBML0_SI539 +MBML0_SX179 +MBML0_SX269 +MBML0_SX359 +MBML0_SX449 +MBML0_SX89 +MBOM0_SI1014 +MBOM0_SI1644 +MBOM0_SI2274 +MBOM0_SX114 +MBOM0_SX204 +MBOM0_SX294 +MBOM0_SX311 +MBOM0_SX384 +MBSB0_SI1353 +MBSB0_SI1983 +MBSB0_SI723 +MBSB0_SX183 +MBSB0_SX273 +MBSB0_SX3 +MBSB0_SX363 +MBSB0_SX93 +MBTH0_SI2102 +MBTH0_SI505 +MBTH0_SI757 +MBTH0_SX122 +MBTH0_SX212 +MBTH0_SX302 +MBTH0_SX32 +MBTH0_SX392 +MBWP0_SI1531 +MBWP0_SI1969 +MBWP0_SI709 +MBWP0_SX169 +MBWP0_SX259 +MBWP0_SX349 +MBWP0_SX439 +MBWP0_SX79 +MCAE0_SI1447 +MCAE0_SI2077 +MCAE0_SI817 +MCAE0_SX187 +MCAE0_SX277 +MCAE0_SX367 +MCAE0_SX7 +MCAE0_SX97 +MCAL0_SI1138 +MCAL0_SI1768 +MCAL0_SI508 +MCAL0_SX148 +MCAL0_SX238 +MCAL0_SX328 +MCAL0_SX418 +MCAL0_SX58 +MCDC0_SI1292 +MCDC0_SI1922 +MCDC0_SI662 +MCDC0_SX122 +MCDC0_SX212 +MCDC0_SX302 +MCDC0_SX32 +MCDC0_SX392 +MCDD0_SI1513 +MCDD0_SI2143 +MCDD0_SI883 +MCDD0_SX163 +MCDD0_SX253 +MCDD0_SX343 +MCDD0_SX433 +MCDD0_SX73 +MCDR0_SI1154 +MCDR0_SI1784 +MCDR0_SI524 +MCDR0_SX164 +MCDR0_SX254 +MCDR0_SX344 +MCDR0_SX434 +MCDR0_SX74 +MCEF0_SI1135 +MCEF0_SI1765 +MCEF0_SI842 +MCEF0_SX145 +MCEF0_SX235 +MCEF0_SX325 +MCEF0_SX415 +MCEF0_SX55 +MCEW0_SI1442 +MCEW0_SI2072 +MCEW0_SI812 +MCEW0_SX182 +MCEW0_SX272 +MCEW0_SX362 +MCEW0_SX452 +MCEW0_SX92 +MCHL0_SI1347 +MCHL0_SI1404 +MCHL0_SI1977 +MCHL0_SX177 +MCHL0_SX267 +MCHL0_SX357 +MCHL0_SX447 +MCHL0_SX87 +MCLK0_SI1660 +MCLK0_SI2290 +MCLK0_SI650 +MCLK0_SX130 +MCLK0_SX220 +MCLK0_SX310 +MCLK0_SX40 +MCLK0_SX400 +MCLM0_SI1456 +MCLM0_SI2086 +MCLM0_SI826 +MCLM0_SX106 +MCLM0_SX16 +MCLM0_SX196 +MCLM0_SX286 +MCLM0_SX376 +MCPM0_SI1194 +MCPM0_SI1824 +MCPM0_SI564 +MCPM0_SX114 +MCPM0_SX204 +MCPM0_SX24 +MCPM0_SX294 +MCPM0_SX384 +MCRE0_SI1121 +MCRE0_SI1725 +MCRE0_SI1751 +MCRE0_SX131 +MCRE0_SX221 +MCRE0_SX24 +MCRE0_SX401 +MCRE0_SX41 +MCSS0_SI1380 +MCSS0_SI688 +MCSS0_SI750 +MCSS0_SX120 +MCSS0_SX210 +MCSS0_SX30 +MCSS0_SX300 +MCSS0_SX390 +MCTH0_SI1209 +MCTH0_SI1839 +MCTH0_SI579 +MCTH0_SX129 +MCTH0_SX219 +MCTH0_SX309 +MCTH0_SX39 +MCTH0_SX399 +MCTM0_SI1350 +MCTM0_SI1980 +MCTM0_SI720 +MCTM0_SX180 +MCTM0_SX270 +MCTM0_SX360 +MCTM0_SX450 +MCTM0_SX90 +MCXM0_SI1351 +MCXM0_SI1981 +MCXM0_SI721 +MCXM0_SX181 +MCXM0_SX271 +MCXM0_SX361 +MCXM0_SX451 +MCXM0_SX91 +MDAC0_SI1261 +MDAC0_SI1837 +MDAC0_SI631 +MDAC0_SX181 +MDAC0_SX271 +MDAC0_SX361 +MDAC0_SX451 +MDAC0_SX91 +MDAS0_SI1266 +MDAS0_SI1896 +MDAS0_SI636 +MDAS0_SX186 +MDAS0_SX21 +MDAS0_SX276 +MDAS0_SX6 +MDAS0_SX96 +MDBB1_SI1006 +MDBB1_SI1636 +MDBB1_SI2056 +MDBB1_SX106 +MDBB1_SX16 +MDBB1_SX196 +MDBB1_SX286 +MDBB1_SX376 +MDBP0_SI1158 +MDBP0_SI1788 +MDBP0_SI528 +MDBP0_SX168 +MDBP0_SX258 +MDBP0_SX348 +MDBP0_SX438 +MDBP0_SX78 +MDCD0_SI1415 +MDCD0_SI2045 +MDCD0_SI785 +MDCD0_SX155 +MDCD0_SX245 +MDCD0_SX335 +MDCD0_SX425 +MDCD0_SX65 +MDCM0_SI1480 +MDCM0_SI2110 +MDCM0_SI850 +MDCM0_SX130 +MDCM0_SX220 +MDCM0_SX310 +MDCM0_SX40 +MDCM0_SX400 +MDDC0_SI1419 +MDDC0_SI2049 +MDDC0_SI789 +MDDC0_SX159 +MDDC0_SX249 +MDDC0_SX339 +MDDC0_SX429 +MDDC0_SX69 +MDED0_SI1170 +MDED0_SI1800 +MDED0_SI540 +MDED0_SX180 +MDED0_SX270 +MDED0_SX360 +MDED0_SX450 +MDED0_SX90 +MDEF0_SI1123 +MDEF0_SI1563 +MDEF0_SI2193 +MDEF0_SX123 +MDEF0_SX213 +MDEF0_SX303 +MDEF0_SX33 +MDEF0_SX393 +MDEM0_SI1868 +MDEM0_SI608 +MDEM0_SI800 +MDEM0_SX158 +MDEM0_SX248 +MDEM0_SX338 +MDEM0_SX428 +MDEM0_SX68 +MDHL0_SI1439 +MDHL0_SI2069 +MDHL0_SI809 +MDHL0_SX179 +MDHL0_SX269 +MDHL0_SX359 +MDHL0_SX449 +MDHL0_SX89 +MDHS0_SI1530 +MDHS0_SI2160 +MDHS0_SI900 +MDHS0_SX180 +MDHS0_SX270 +MDHS0_SX360 +MDHS0_SX450 +MDHS0_SX90 +MDJM0_SI1455 +MDJM0_SI2085 +MDJM0_SI825 +MDJM0_SX105 +MDJM0_SX15 +MDJM0_SX195 +MDJM0_SX285 +MDJM0_SX375 +MDKS0_SI1066 +MDKS0_SI1696 +MDKS0_SI2326 +MDKS0_SX166 +MDKS0_SX256 +MDKS0_SX346 +MDKS0_SX436 +MDKS0_SX76 +MDLB0_SI1306 +MDLB0_SI1936 +MDLB0_SI676 +MDLB0_SX136 +MDLB0_SX226 +MDLB0_SX316 +MDLB0_SX406 +MDLB0_SX46 +MDLC0_SI1395 +MDLC0_SI2025 +MDLC0_SI765 +MDLC0_SX135 +MDLC0_SX225 +MDLC0_SX315 +MDLC0_SX405 +MDLC0_SX45 +MDLC1_SI1435 +MDLC1_SI2065 +MDLC1_SI2144 +MDLC1_SX175 +MDLC1_SX265 +MDLC1_SX355 +MDLC1_SX445 +MDLC1_SX85 +MDLC2_SI1614 +MDLC2_SI2244 +MDLC2_SI984 +MDLC2_SX174 +MDLC2_SX264 +MDLC2_SX354 +MDLC2_SX444 +MDLC2_SX84 +MDLH0_SI1960 +MDLH0_SI574 +MDLH0_SI700 +MDLH0_SX160 +MDLH0_SX250 +MDLH0_SX340 +MDLH0_SX430 +MDLH0_SX70 +MDLM0_SI1234 +MDLM0_SI1864 +MDLM0_SI604 +MDLM0_SX154 +MDLM0_SX244 +MDLM0_SX334 +MDLM0_SX424 +MDLM0_SX64 +MDLR0_SI1233 +MDLR0_SI1863 +MDLR0_SI603 +MDLR0_SX153 +MDLR0_SX243 +MDLR0_SX333 +MDLR0_SX423 +MDLR0_SX63 +MDLR1_SI1299 +MDLR1_SI1929 +MDLR1_SI669 +MDLR1_SX129 +MDLR1_SX219 +MDLR1_SX309 +MDLR1_SX39 +MDLR1_SX399 +MDMA0_SI1238 +MDMA0_SI1430 +MDMA0_SI2060 +MDMA0_SX170 +MDMA0_SX260 +MDMA0_SX350 +MDMA0_SX440 +MDMA0_SX80 +MDMT0_SI1832 +MDMT0_SI2341 +MDMT0_SI572 +MDMT0_SX122 +MDMT0_SX212 +MDMT0_SX302 +MDMT0_SX32 +MDMT0_SX392 +MDNS0_SI1011 +MDNS0_SI2271 +MDNS0_SI873 +MDNS0_SX111 +MDNS0_SX201 +MDNS0_SX21 +MDNS0_SX291 +MDNS0_SX381 +MDPB0_SI1760 +MDPB0_SI2126 +MDPB0_SI866 +MDPB0_SX146 +MDPB0_SX236 +MDPB0_SX326 +MDPB0_SX416 +MDPB0_SX56 +MDPK0_SI1053 +MDPK0_SI1683 +MDPK0_SI552 +MDPK0_SX153 +MDPK0_SX243 +MDPK0_SX333 +MDPK0_SX423 +MDPK0_SX63 +MDPS0_SI1651 +MDPS0_SI1979 +MDPS0_SI719 +MDPS0_SX179 +MDPS0_SX269 +MDPS0_SX359 +MDPS0_SX449 +MDPS0_SX89 +MDRD0_SI1382 +MDRD0_SI2012 +MDRD0_SI752 +MDRD0_SX122 +MDRD0_SX212 +MDRD0_SX302 +MDRD0_SX32 +MDRD0_SX392 +MDSJ0_SI1462 +MDSJ0_SI2092 +MDSJ0_SI832 +MDSJ0_SX112 +MDSJ0_SX22 +MDSJ0_SX292 +MDSJ0_SX382 +MDSJ0_SX438 +MDSS0_SI1881 +MDSS0_SI2087 +MDSS0_SI621 +MDSS0_SX171 +MDSS0_SX261 +MDSS0_SX351 +MDSS0_SX441 +MDSS0_SX81 +MDSS1_SI1327 +MDSS1_SI1713 +MDSS1_SI697 +MDSS1_SX157 +MDSS1_SX247 +MDSS1_SX337 +MDSS1_SX427 +MDSS1_SX67 +MDTB0_SI1200 +MDTB0_SI1830 +MDTB0_SI570 +MDTB0_SX120 +MDTB0_SX210 +MDTB0_SX300 +MDTB0_SX321 +MDTB0_SX390 +MDWD0_SI1260 +MDWD0_SI1890 +MDWD0_SI557 +MDWD0_SX180 +MDWD0_SX270 +MDWD0_SX360 +MDWD0_SX450 +MDWD0_SX90 +MDWH0_SI1168 +MDWH0_SI1925 +MDWH0_SI665 +MDWH0_SX125 +MDWH0_SX215 +MDWH0_SX305 +MDWH0_SX35 +MDWH0_SX395 +MDWM0_SI1546 +MDWM0_SI2176 +MDWM0_SI916 +MDWM0_SX106 +MDWM0_SX16 +MDWM0_SX286 +MDWM0_SX376 +MDWM0_SX433 +MEAL0_SI1547 +MEAL0_SI2177 +MEAL0_SI917 +MEAL0_SX107 +MEAL0_SX197 +MEAL0_SX287 +MEAL0_SX347 +MEAL0_SX377 +MEDR0_SI1374 +MEDR0_SI2004 +MEDR0_SI744 +MEDR0_SX114 +MEDR0_SX204 +MEDR0_SX24 +MEDR0_SX294 +MEDR0_SX384 +MEFG0_SI465 +MEFG0_SI491 +MEFG0_SI598 +MEFG0_SX105 +MEFG0_SX15 +MEFG0_SX195 +MEFG0_SX285 +MEFG0_SX375 +MEGJ0_SI1337 +MEGJ0_SI1967 +MEGJ0_SI707 +MEGJ0_SX167 +MEGJ0_SX257 +MEGJ0_SX3 +MEGJ0_SX437 +MEGJ0_SX77 +MEJL0_SI1592 +MEJL0_SI1654 +MEJL0_SI962 +MEJL0_SX152 +MEJL0_SX242 +MEJL0_SX332 +MEJL0_SX422 +MEJL0_SX62 +MEJS0_SI1240 +MEJS0_SI1870 +MEJS0_SI610 +MEJS0_SX160 +MEJS0_SX250 +MEJS0_SX340 +MEJS0_SX430 +MEJS0_SX70 +MESG0_SI1332 +MESG0_SI1962 +MESG0_SI702 +MESG0_SX162 +MESG0_SX252 +MESG0_SX342 +MESG0_SX432 +MESG0_SX72 +MESJ0_SI2039 +MESJ0_SI2257 +MESJ0_SI997 +MESJ0_SX187 +MESJ0_SX277 +MESJ0_SX367 +MESJ0_SX7 +MESJ0_SX97 +MEWM0_SI1348 +MEWM0_SI1978 +MEWM0_SI718 +MEWM0_SX178 +MEWM0_SX268 +MEWM0_SX358 +MEWM0_SX448 +MEWM0_SX88 +MFER0_SI1492 +MFER0_SI2122 +MFER0_SI862 +MFER0_SX142 +MFER0_SX232 +MFER0_SX322 +MFER0_SX412 +MFER0_SX52 +MFMC0_SI1132 +MFMC0_SI1762 +MFMC0_SI502 +MFMC0_SX142 +MFMC0_SX232 +MFMC0_SX322 +MFMC0_SX412 +MFMC0_SX52 +MFRM0_SI1155 +MFRM0_SI1717 +MFRM0_SI1785 +MFRM0_SX165 +MFRM0_SX255 +MFRM0_SX345 +MFRM0_SX435 +MFRM0_SX75 +MFWK0_SI1249 +MFWK0_SI1879 +MFWK0_SI619 +MFWK0_SX169 +MFWK0_SX259 +MFWK0_SX349 +MFWK0_SX439 +MFWK0_SX79 +MFXS0_SI1674 +MFXS0_SI2225 +MFXS0_SI2304 +MFXS0_SX144 +MFXS0_SX234 +MFXS0_SX324 +MFXS0_SX414 +MFXS0_SX54 +MFXV0_SI1005 +MFXV0_SI1342 +MFXV0_SI1635 +MFXV0_SX105 +MFXV0_SX15 +MFXV0_SX195 +MFXV0_SX285 +MFXV0_SX375 +MGAF0_SI1282 +MGAF0_SI1912 +MGAF0_SI652 +MGAF0_SX112 +MGAF0_SX202 +MGAF0_SX22 +MGAF0_SX292 +MGAF0_SX382 +MGAG0_SI1321 +MGAG0_SI645 +MGAG0_SI691 +MGAG0_SX151 +MGAG0_SX241 +MGAG0_SX331 +MGAG0_SX421 +MGAG0_SX61 +MGAK0_SI1036 +MGAK0_SI1666 +MGAK0_SI2296 +MGAK0_SX136 +MGAK0_SX226 +MGAK0_SX316 +MGAK0_SX406 +MGAK0_SX46 +MGAR0_SI1212 +MGAR0_SI1694 +MGAR0_SI1842 +MGAR0_SX132 +MGAR0_SX222 +MGAR0_SX312 +MGAR0_SX402 +MGAR0_SX42 +MGAW0_SI1165 +MGAW0_SI1802 +MGAW0_SI535 +MGAW0_SX175 +MGAW0_SX265 +MGAW0_SX355 +MGAW0_SX445 +MGAW0_SX85 +MGES0_SI1481 +MGES0_SI2111 +MGES0_SI851 +MGES0_SX131 +MGES0_SX221 +MGES0_SX311 +MGES0_SX401 +MGES0_SX41 +MGJC0_SI1256 +MGJC0_SI1335 +MGJC0_SI1965 +MGJC0_SX165 +MGJC0_SX255 +MGJC0_SX345 +MGJC0_SX435 +MGJC0_SX75 +MGRL0_SI1497 +MGRL0_SI2127 +MGRL0_SI867 +MGRL0_SX147 +MGRL0_SX237 +MGRL0_SX327 +MGRL0_SX417 +MGRL0_SX57 +MGRP0_SI1317 +MGRP0_SI1947 +MGRP0_SI687 +MGRP0_SX147 +MGRP0_SX237 +MGRP0_SX327 +MGRP0_SX417 +MGRP0_SX57 +MGSH0_SI1176 +MGSH0_SI1806 +MGSH0_SI546 +MGSH0_SX127 +MGSH0_SX186 +MGSH0_SX276 +MGSH0_SX6 +MGSH0_SX96 +MGSL0_SI1164 +MGSL0_SI534 +MGSL0_SI797 +MGSL0_SX174 +MGSL0_SX264 +MGSL0_SX354 +MGSL0_SX444 +MGSL0_SX84 +MGXP0_SI1087 +MGXP0_SI457 +MGXP0_SI525 +MGXP0_SX187 +MGXP0_SX277 +MGXP0_SX367 +MGXP0_SX7 +MGXP0_SX97 +MHBS0_SI1575 +MHBS0_SI2205 +MHBS0_SI945 +MHBS0_SX135 +MHBS0_SX225 +MHBS0_SX315 +MHBS0_SX405 +MHBS0_SX45 +MHIT0_SI1613 +MHIT0_SI2243 +MHIT0_SI983 +MHIT0_SX173 +MHIT0_SX263 +MHIT0_SX353 +MHIT0_SX443 +MHIT0_SX83 +MHJB0_SI1017 +MHJB0_SI1647 +MHJB0_SI2277 +MHJB0_SX117 +MHJB0_SX207 +MHJB0_SX27 +MHJB0_SX297 +MHJB0_SX387 +MHMG0_SI1365 +MHMG0_SI1995 +MHMG0_SI735 +MHMG0_SX105 +MHMG0_SX15 +MHMG0_SX195 +MHMG0_SX285 +MHMG0_SX375 +MHMR0_SI1119 +MHMR0_SI1692 +MHMR0_SI489 +MHMR0_SX129 +MHMR0_SX219 +MHMR0_SX309 +MHMR0_SX39 +MHMR0_SX399 +MHRM0_SI1475 +MHRM0_SI2218 +MHRM0_SI958 +MHRM0_SX148 +MHRM0_SX238 +MHRM0_SX328 +MHRM0_SX418 +MHRM0_SX58 +MHXL0_SI1772 +MHXL0_SI512 +MHXL0_SI612 +MHXL0_SX152 +MHXL0_SX242 +MHXL0_SX332 +MHXL0_SX422 +MHXL0_SX62 +MILB0_SI2163 +MILB0_SI807 +MILB0_SI903 +MILB0_SX183 +MILB0_SX273 +MILB0_SX3 +MILB0_SX363 +MILB0_SX93 +MJAC0_SI1331 +MJAC0_SI2148 +MJAC0_SI701 +MJAC0_SX251 +MJAC0_SX307 +MJAC0_SX341 +MJAC0_SX431 +MJAC0_SX71 +MJAE0_SI1524 +MJAE0_SI1999 +MJAE0_SI2154 +MJAE0_SX174 +MJAE0_SX264 +MJAE0_SX354 +MJAE0_SX444 +MJAE0_SX84 +MJAI0_SI1604 +MJAI0_SI682 +MJAI0_SI710 +MJAI0_SX164 +MJAI0_SX254 +MJAI0_SX344 +MJAI0_SX434 +MJAI0_SX74 +MJBG0_SI1232 +MJBG0_SI1724 +MJBG0_SI1862 +MJBG0_SX152 +MJBG0_SX242 +MJBG0_SX332 +MJBG0_SX422 +MJBG0_SX62 +MJDA0_SI1031 +MJDA0_SI1661 +MJDA0_SI2291 +MJDA0_SX131 +MJDA0_SX221 +MJDA0_SX311 +MJDA0_SX401 +MJDA0_SX41 +MJDC0_SI1161 +MJDC0_SI2165 +MJDC0_SI531 +MJDC0_SX171 +MJDC0_SX261 +MJDC0_SX351 +MJDC0_SX441 +MJDC0_SX81 +MJDE0_SI1120 +MJDE0_SI463 +MJDE0_SI490 +MJDE0_SX130 +MJDE0_SX220 +MJDE0_SX310 +MJDE0_SX40 +MJDE0_SX400 +MJDG0_SI1042 +MJDG0_SI1672 +MJDG0_SI1705 +MJDG0_SX142 +MJDG0_SX232 +MJDG0_SX322 +MJDG0_SX412 +MJDG0_SX52 +MJDM0_SI1340 +MJDM0_SI1937 +MJDM0_SI974 +MJDM0_SX170 +MJDM0_SX260 +MJDM0_SX350 +MJDM0_SX440 +MJDM0_SX80 +MJEB0_SI1286 +MJEB0_SI1916 +MJEB0_SI656 +MJEB0_SX170 +MJEB0_SX206 +MJEB0_SX26 +MJEB0_SX296 +MJEB0_SX386 +MJEB1_SI1467 +MJEB1_SI2097 +MJEB1_SI837 +MJEB1_SX117 +MJEB1_SX207 +MJEB1_SX27 +MJEB1_SX297 +MJEB1_SX387 +MJEE0_SI1237 +MJEE0_SI1867 +MJEE0_SI607 +MJEE0_SX157 +MJEE0_SX247 +MJEE0_SX337 +MJEE0_SX427 +MJEE0_SX67 +MJFH0_SI1107 +MJFH0_SI1737 +MJFH0_SI477 +MJFH0_SX117 +MJFH0_SX207 +MJFH0_SX27 +MJFH0_SX297 +MJFH0_SX387 +MJFR0_SI1605 +MJFR0_SI2235 +MJFR0_SI975 +MJFR0_SX165 +MJFR0_SX255 +MJFR0_SX345 +MJFR0_SX435 +MJFR0_SX75 +MJHI0_SI1328 +MJHI0_SI555 +MJHI0_SI698 +MJHI0_SX158 +MJHI0_SX248 +MJHI0_SX338 +MJHI0_SX428 +MJHI0_SX68 +MJJB0_SI1139 +MJJB0_SI1277 +MJJB0_SI1769 +MJJB0_SX149 +MJJB0_SX239 +MJJB0_SX329 +MJJB0_SX419 +MJJB0_SX59 +MJJJ0_SI1163 +MJJJ0_SI1793 +MJJJ0_SI533 +MJJJ0_SX173 +MJJJ0_SX263 +MJJJ0_SX353 +MJJJ0_SX443 +MJJJ0_SX83 +MJJM0_SI1251 +MJJM0_SI1457 +MJJM0_SI827 +MJJM0_SX107 +MJJM0_SX17 +MJJM0_SX197 +MJJM0_SX287 +MJJM0_SX377 +MJKR0_SI1201 +MJKR0_SI1831 +MJKR0_SI571 +MJKR0_SX121 +MJKR0_SX211 +MJKR0_SX301 +MJKR0_SX31 +MJKR0_SX391 +MJLB0_SI1616 +MJLB0_SI2246 +MJLB0_SI986 +MJLB0_SX176 +MJLB0_SX266 +MJLB0_SX356 +MJLB0_SX446 +MJLB0_SX86 +MJLG1_SI1012 +MJLG1_SI1642 +MJLG1_SI2272 +MJLG1_SX112 +MJLG1_SX202 +MJLG1_SX22 +MJLG1_SX292 +MJLG1_SX382 +MJLS0_SI1096 +MJLS0_SI1726 +MJLS0_SI466 +MJLS0_SX106 +MJLS0_SX16 +MJLS0_SX196 +MJLS0_SX286 +MJLS0_SX376 +MJMA0_SI1495 +MJMA0_SI2125 +MJMA0_SI865 +MJMA0_SX145 +MJMA0_SX235 +MJMA0_SX325 +MJMA0_SX415 +MJMA0_SX55 +MJMD0_SI1028 +MJMD0_SI1658 +MJMD0_SI2288 +MJMD0_SX128 +MJMD0_SX218 +MJMD0_SX308 +MJMD0_SX38 +MJMD0_SX398 +MJMM0_SI1255 +MJMM0_SI1885 +MJMM0_SI625 +MJMM0_SX175 +MJMM0_SX265 +MJMM0_SX355 +MJMM0_SX445 +MJMM0_SX85 +MJPG0_SI1191 +MJPG0_SI1821 +MJPG0_SI561 +MJPG0_SX111 +MJPG0_SX201 +MJPG0_SX21 +MJPG0_SX291 +MJPG0_SX381 +MJPM0_SI1368 +MJPM0_SI1998 +MJPM0_SI738 +MJPM0_SX108 +MJPM0_SX18 +MJPM0_SX198 +MJPM0_SX288 +MJPM0_SX378 +MJPM1_SI1897 +MJPM1_SI2280 +MJPM1_SI761 +MJPM1_SX131 +MJPM1_SX221 +MJPM1_SX311 +MJPM1_SX401 +MJPM1_SX41 +MJRA0_SI1236 +MJRA0_SI1866 +MJRA0_SI606 +MJRA0_SX156 +MJRA0_SX246 +MJRA0_SX336 +MJRA0_SX426 +MJRA0_SX66 +MJRG0_SI1366 +MJRG0_SI1996 +MJRG0_SI736 +MJRG0_SX106 +MJRG0_SX16 +MJRG0_SX286 +MJRG0_SX352 +MJRG0_SX376 +MJRH0_SI1125 +MJRH0_SI1755 +MJRH0_SI1840 +MJRH0_SX135 +MJRH0_SX225 +MJRH0_SX315 +MJRH0_SX405 +MJRH0_SX45 +MJRH1_SI1558 +MJRH1_SI1774 +MJRH1_SI514 +MJRH1_SX154 +MJRH1_SX244 +MJRH1_SX334 +MJRH1_SX424 +MJRH1_SX64 +MJRK0_SI1662 +MJRK0_SI2103 +MJRK0_SI880 +MJRK0_SX160 +MJRK0_SX250 +MJRK0_SX340 +MJRK0_SX430 +MJRK0_SX70 +MJRP0_SI1835 +MJRP0_SI1845 +MJRP0_SI585 +MJRP0_SX135 +MJRP0_SX225 +MJRP0_SX315 +MJRP0_SX405 +MJRP0_SX45 +MJSR0_SI1424 +MJSR0_SI2054 +MJSR0_SI794 +MJSR0_SX164 +MJSR0_SX254 +MJSR0_SX344 +MJSR0_SX434 +MJSR0_SX74 +MJWG0_SI2155 +MJWG0_SI813 +MJWG0_SI895 +MJWG0_SX175 +MJWG0_SX265 +MJWG0_SX355 +MJWG0_SX445 +MJWG0_SX85 +MJWS0_SI1143 +MJWS0_SI1773 +MJWS0_SI513 +MJWS0_SX153 +MJWS0_SX243 +MJWS0_SX333 +MJWS0_SX423 +MJWS0_SX63 +MJWT0_SI1291 +MJWT0_SI1381 +MJWT0_SI751 +MJWT0_SX121 +MJWT0_SX211 +MJWT0_SX301 +MJWT0_SX31 +MJWT0_SX391 +MJXA0_SI1507 +MJXA0_SI2137 +MJXA0_SI877 +MJXA0_SX157 +MJXA0_SX247 +MJXA0_SX337 +MJXA0_SX427 +MJXA0_SX67 +MJXL0_SI1172 +MJXL0_SI1795 +MJXL0_SI542 +MJXL0_SX182 +MJXL0_SX272 +MJXL0_SX362 +MJXL0_SX452 +MJXL0_SX92 +MKAG0_SI1609 +MKAG0_SI2239 +MKAG0_SI979 +MKAG0_SX169 +MKAG0_SX259 +MKAG0_SX30 +MKAG0_SX439 +MKAG0_SX79 +MKAH0_SI1528 +MKAH0_SI2158 +MKAH0_SI898 +MKAH0_SX178 +MKAH0_SX268 +MKAH0_SX358 +MKAH0_SX448 +MKAH0_SX88 +MKAJ0_SI1414 +MKAJ0_SI2044 +MKAJ0_SI784 +MKAJ0_SX154 +MKAJ0_SX244 +MKAJ0_SX334 +MKAJ0_SX424 +MKAJ0_SX64 +MKAM0_SI1250 +MKAM0_SI1316 +MKAM0_SI1465 +MKAM0_SX146 +MKAM0_SX236 +MKAM0_SX326 +MKAM0_SX416 +MKAM0_SX56 +MKDB0_SI2132 +MKDB0_SI588 +MKDB0_SI872 +MKDB0_SX152 +MKDB0_SX242 +MKDB0_SX332 +MKDB0_SX422 +MKDB0_SX62 +MKDD0_SI1567 +MKDD0_SI2197 +MKDD0_SI937 +MKDD0_SX127 +MKDD0_SX217 +MKDD0_SX307 +MKDD0_SX37 +MKDD0_SX397 +MKDT0_SI2153 +MKDT0_SI814 +MKDT0_SI893 +MKDT0_SX173 +MKDT0_SX263 +MKDT0_SX353 +MKDT0_SX443 +MKDT0_SX83 +MKES0_SI1253 +MKES0_SI1883 +MKES0_SI623 +MKES0_SX173 +MKES0_SX263 +MKES0_SX353 +MKES0_SX443 +MKES0_SX83 +MKJO0_SI1517 +MKJO0_SI2147 +MKJO0_SI887 +MKJO0_SX167 +MKJO0_SX257 +MKJO0_SX424 +MKJO0_SX437 +MKJO0_SX77 +MKLN0_SI1598 +MKLN0_SI2228 +MKLN0_SI968 +MKLN0_SX158 +MKLN0_SX248 +MKLN0_SX338 +MKLN0_SX428 +MKLN0_SX68 +MKLR0_SI1059 +MKLR0_SI1689 +MKLR0_SI2319 +MKLR0_SX159 +MKLR0_SX249 +MKLR0_SX339 +MKLR0_SX429 +MKLR0_SX69 +MKLS0_SI1437 +MKLS0_SI1533 +MKLS0_SI2067 +MKLS0_SX177 +MKLS0_SX267 +MKLS0_SX357 +MKLS0_SX447 +MKLS0_SX87 +MKLS1_SI1545 +MKLS1_SI2175 +MKLS1_SI915 +MKLS1_SX105 +MKLS1_SX15 +MKLS1_SX195 +MKLS1_SX285 +MKLS1_SX375 +MKLW0_SI1571 +MKLW0_SI1844 +MKLW0_SI2201 +MKLW0_SX131 +MKLW0_SX221 +MKLW0_SX311 +MKLW0_SX401 +MKLW0_SX41 +MKRG0_SI1491 +MKRG0_SI2121 +MKRG0_SI861 +MKRG0_SX141 +MKRG0_SX231 +MKRG0_SX31 +MKRG0_SX411 +MKRG0_SX51 +MKXL0_SI1185 +MKXL0_SI1815 +MKXL0_SI1958 +MKXL0_SX105 +MKXL0_SX15 +MKXL0_SX195 +MKXL0_SX285 +MKXL0_SX375 +MLBC0_SI1239 +MLBC0_SI1869 +MLBC0_SI609 +MLBC0_SX159 +MLBC0_SX249 +MLBC0_SX339 +MLBC0_SX429 +MLBC0_SX69 +MLEL0_SI1246 +MLEL0_SI1876 +MLEL0_SI616 +MLEL0_SX166 +MLEL0_SX256 +MLEL0_SX346 +MLEL0_SX436 +MLEL0_SX76 +MLJC0_SI1225 +MLJC0_SI1855 +MLJC0_SI595 +MLJC0_SX145 +MLJC0_SX235 +MLJC0_SX325 +MLJC0_SX415 +MLJC0_SX55 +MLJH0_SI1324 +MLJH0_SI1422 +MLJH0_SI694 +MLJH0_SX154 +MLJH0_SX244 +MLJH0_SX334 +MLJH0_SX424 +MLJH0_SX64 +MLNS0_SI1407 +MLNS0_SI2037 +MLNS0_SI777 +MLNS0_SX147 +MLNS0_SX237 +MLNS0_SX327 +MLNS0_SX417 +MLNS0_SX57 +MLSH0_SI1417 +MLSH0_SI2047 +MLSH0_SI787 +MLSH0_SX157 +MLSH0_SX247 +MLSH0_SX337 +MLSH0_SX427 +MLSH0_SX67 +MMAA0_SI1588 +MMAA0_SI2105 +MMAA0_SI845 +MMAA0_SX125 +MMAA0_SX215 +MMAA0_SX305 +MMAA0_SX35 +MMAA0_SX395 +MMAB1_SI1494 +MMAB1_SI2124 +MMAB1_SI864 +MMAB1_SX144 +MMAB1_SX234 +MMAB1_SX324 +MMAB1_SX414 +MMAB1_SX54 +MMAG0_SI1126 +MMAG0_SI1756 +MMAG0_SI496 +MMAG0_SX136 +MMAG0_SX226 +MMAG0_SX316 +MMAG0_SX406 +MMAG0_SX46 +MMAM0_SI1597 +MMAM0_SI1668 +MMAM0_SI2227 +MMAM0_SX157 +MMAM0_SX247 +MMAM0_SX337 +MMAM0_SX427 +MMAM0_SX67 +MMAR0_SI1336 +MMAR0_SI1966 +MMAR0_SI706 +MMAR0_SX166 +MMAR0_SX256 +MMAR0_SX346 +MMAR0_SX436 +MMAR0_SX76 +MMBS0_SI1151 +MMBS0_SI1781 +MMBS0_SI521 +MMBS0_SX161 +MMBS0_SX251 +MMBS0_SX341 +MMBS0_SX431 +MMBS0_SX71 +MMCC0_SI1338 +MMCC0_SI1968 +MMCC0_SI708 +MMCC0_SX168 +MMCC0_SX258 +MMCC0_SX348 +MMCC0_SX438 +MMCC0_SX78 +MMDB0_SI1358 +MMDB0_SI1617 +MMDB0_SI987 +MMDB0_SX177 +MMDB0_SX267 +MMDB0_SX357 +MMDB0_SX447 +MMDB0_SX87 +MMDG0_SI1780 +MMDG0_SI2035 +MMDG0_SI520 +MMDG0_SX160 +MMDG0_SX250 +MMDG0_SX340 +MMDG0_SX430 +MMDG0_SX70 +MMDM0_SI1311 +MMDM0_SI1941 +MMDM0_SI681 +MMDM0_SX141 +MMDM0_SX231 +MMDM0_SX321 +MMDM0_SX411 +MMDM0_SX51 +MMDM1_SI1650 +MMDM1_SI2043 +MMDM1_SI783 +MMDM1_SX153 +MMDM1_SX243 +MMDM1_SX333 +MMDM1_SX423 +MMDM1_SX63 +MMDS0_SI1343 +MMDS0_SI1973 +MMDS0_SI713 +MMDS0_SX173 +MMDS0_SX263 +MMDS0_SX353 +MMDS0_SX443 +MMDS0_SX83 +MMEA0_SI1388 +MMEA0_SI2018 +MMEA0_SI758 +MMEA0_SX128 +MMEA0_SX218 +MMEA0_SX308 +MMEA0_SX38 +MMEA0_SX398 +MMEB0_SI1357 +MMEB0_SI1987 +MMEB0_SI727 +MMEB0_SX187 +MMEB0_SX327 +MMEB0_SX367 +MMEB0_SX7 +MMEB0_SX97 +MMGC0_SI1305 +MMGC0_SI1935 +MMGC0_SI2184 +MMGC0_SX135 +MMGC0_SX225 +MMGC0_SX315 +MMGC0_SX405 +MMGC0_SX45 +MMGG0_SI1079 +MMGG0_SI1709 +MMGG0_SI2339 +MMGG0_SX179 +MMGG0_SX269 +MMGG0_SX359 +MMGG0_SX449 +MMGG0_SX89 +MMGK0_SI1322 +MMGK0_SI1952 +MMGK0_SI692 +MMGK0_SX152 +MMGK0_SX242 +MMGK0_SX332 +MMGK0_SX422 +MMGK0_SX62 +MMJB1_SI1408 +MMJB1_SI2038 +MMJB1_SI778 +MMJB1_SX148 +MMJB1_SX238 +MMJB1_SX328 +MMJB1_SX418 +MMJB1_SX58 +MMLM0_SI1527 +MMLM0_SI2150 +MMLM0_SI897 +MMLM0_SX177 +MMLM0_SX267 +MMLM0_SX357 +MMLM0_SX447 +MMLM0_SX87 +MMPM0_SI1061 +MMPM0_SI1691 +MMPM0_SI2321 +MMPM0_SX161 +MMPM0_SX251 +MMPM0_SX341 +MMPM0_SX431 +MMPM0_SX71 +MMRP0_SI2034 +MMRP0_SI717 +MMRP0_SI774 +MMRP0_SX144 +MMRP0_SX234 +MMRP0_SX324 +MMRP0_SX414 +MMRP0_SX54 +MMSM0_SI1106 +MMSM0_SI1736 +MMSM0_SI476 +MMSM0_SX116 +MMSM0_SX206 +MMSM0_SX26 +MMSM0_SX296 +MMSM0_SX386 +MMVP0_SI1284 +MMVP0_SI1914 +MMVP0_SI654 +MMVP0_SX114 +MMVP0_SX204 +MMVP0_SX294 +MMVP0_SX347 +MMVP0_SX384 +MMWB0_SI1619 +MMWB0_SI2249 +MMWB0_SI989 +MMWB0_SX179 +MMWB0_SX269 +MMWB0_SX359 +MMWB0_SX449 +MMWB0_SX89 +MMWS0_SI1518 +MMWS0_SI559 +MMWS0_SI888 +MMWS0_SX168 +MMWS0_SX258 +MMWS0_SX348 +MMWS0_SX438 +MMWS0_SX78 +MMWS1_SI1071 +MMWS1_SI1701 +MMWS1_SI2331 +MMWS1_SX261 +MMWS1_SX27 +MMWS1_SX351 +MMWS1_SX441 +MMWS1_SX81 +MMXS0_SI2136 +MMXS0_SI629 +MMXS0_SI876 +MMXS0_SX156 +MMXS0_SX246 +MMXS0_SX336 +MMXS0_SX426 +MMXS0_SX66 +MNET0_SI1446 +MNET0_SI2076 +MNET0_SI816 +MNET0_SX186 +MNET0_SX276 +MNET0_SX366 +MNET0_SX6 +MNET0_SX96 +MNTW0_SI1068 +MNTW0_SI1698 +MNTW0_SI2328 +MNTW0_SX168 +MNTW0_SX202 +MNTW0_SX258 +MNTW0_SX348 +MNTW0_SX78 +MPAR0_SI1576 +MPAR0_SI2206 +MPAR0_SI946 +MPAR0_SX136 +MPAR0_SX226 +MPAR0_SX316 +MPAR0_SX406 +MPAR0_SX46 +MPEB0_SI1034 +MPEB0_SI1860 +MPEB0_SI600 +MPEB0_SX150 +MPEB0_SX240 +MPEB0_SX330 +MPEB0_SX420 +MPEB0_SX60 +MPFU0_SI1258 +MPFU0_SI1888 +MPFU0_SI628 +MPFU0_SX178 +MPFU0_SX268 +MPFU0_SX358 +MPFU0_SX448 +MPFU0_SX88 +MPGH0_SI1554 +MPGH0_SI675 +MPGH0_SI924 +MPGH0_SX114 +MPGH0_SX204 +MPGH0_SX24 +MPGH0_SX294 +MPGH0_SX384 +MPGR0_SI1410 +MPGR0_SI2040 +MPGR0_SI780 +MPGR0_SX150 +MPGR0_SX240 +MPGR0_SX330 +MPGR0_SX420 +MPGR0_SX60 +MPGR1_SI1269 +MPGR1_SI1499 +MPGR1_SI2129 +MPGR1_SX149 +MPGR1_SX239 +MPGR1_SX329 +MPGR1_SX419 +MPGR1_SX59 +MPMB0_SI1501 +MPMB0_SI2131 +MPMB0_SI871 +MPMB0_SX151 +MPMB0_SX241 +MPMB0_SX331 +MPMB0_SX421 +MPMB0_SX61 +MPPC0_SI1412 +MPPC0_SI2042 +MPPC0_SI782 +MPPC0_SX152 +MPPC0_SX242 +MPPC0_SX332 +MPPC0_SX422 +MPPC0_SX62 +MPRB0_SI1205 +MPRB0_SI1215 +MPRB0_SI575 +MPRB0_SX125 +MPRB0_SX215 +MPRB0_SX305 +MPRB0_SX35 +MPRB0_SX395 +MPRD0_SI1431 +MPRD0_SI2061 +MPRD0_SI801 +MPRD0_SX171 +MPRD0_SX261 +MPRD0_SX351 +MPRD0_SX441 +MPRD0_SX81 +MPRK0_SI1097 +MPRK0_SI1727 +MPRK0_SI467 +MPRK0_SX107 +MPRK0_SX17 +MPRK0_SX197 +MPRK0_SX287 +MPRK0_SX377 +MPRT0_SI1210 +MPRT0_SI495 +MPRT0_SI580 +MPRT0_SX130 +MPRT0_SX220 +MPRT0_SX310 +MPRT0_SX40 +MPRT0_SX400 +MPSW0_SI1067 +MPSW0_SI1697 +MPSW0_SI2327 +MPSW0_SX167 +MPSW0_SX24 +MPSW0_SX257 +MPSW0_SX437 +MPSW0_SX77 +MRAB0_SI1224 +MRAB0_SI1854 +MRAB0_SI594 +MRAB0_SX144 +MRAB0_SX234 +MRAB0_SX324 +MRAB0_SX414 +MRAB0_SX54 +MRAB1_SI1478 +MRAB1_SI2108 +MRAB1_SI848 +MRAB1_SX128 +MRAB1_SX218 +MRAB1_SX308 +MRAB1_SX38 +MRAB1_SX398 +MRAI0_SI1954 +MRAI0_SI2052 +MRAI0_SI792 +MRAI0_SX162 +MRAI0_SX252 +MRAI0_SX342 +MRAI0_SX432 +MRAI0_SX72 +MRAM0_SI1275 +MRAM0_SI1905 +MRAM0_SI1951 +MRAM0_SX105 +MRAM0_SX15 +MRAM0_SX195 +MRAM0_SX285 +MRAM0_SX375 +MRAV0_SI1008 +MRAV0_SI1638 +MRAV0_SI2268 +MRAV0_SX108 +MRAV0_SX18 +MRAV0_SX198 +MRAV0_SX288 +MRAV0_SX378 +MRBC0_SI1665 +MRBC0_SI1859 +MRBC0_SI599 +MRBC0_SX149 +MRBC0_SX239 +MRBC0_SX329 +MRBC0_SX419 +MRBC0_SX59 +MRCG0_SI1428 +MRCG0_SI2058 +MRCG0_SI798 +MRCG0_SX168 +MRCG0_SX258 +MRCG0_SX348 +MRCG0_SX438 +MRCG0_SX78 +MRCW0_SI1371 +MRCW0_SI2001 +MRCW0_SI741 +MRCW0_SX111 +MRCW0_SX201 +MRCW0_SX21 +MRCW0_SX291 +MRCW0_SX381 +MRDD0_SI1050 +MRDD0_SI1680 +MRDD0_SI2310 +MRDD0_SX150 +MRDD0_SX240 +MRDD0_SX277 +MRDD0_SX330 +MRDD0_SX60 +MRDM0_SI1044 +MRDM0_SI1595 +MRDM0_SI965 +MRDM0_SX155 +MRDM0_SX245 +MRDM0_SX335 +MRDM0_SX425 +MRDM0_SX65 +MRDS0_SI1167 +MRDS0_SI1797 +MRDS0_SI537 +MRDS0_SX177 +MRDS0_SX267 +MRDS0_SX357 +MRDS0_SX447 +MRDS0_SX87 +MREE0_SI1104 +MREE0_SI1734 +MREE0_SI1959 +MREE0_SX114 +MREE0_SX204 +MREE0_SX24 +MREE0_SX294 +MREE0_SX384 +MREH1_SI1599 +MREH1_SI2229 +MREH1_SI969 +MREH1_SX159 +MREH1_SX249 +MREH1_SX339 +MREH1_SX429 +MREH1_SX69 +MREM0_SI1591 +MREM0_SI511 +MREM0_SI961 +MREM0_SX151 +MREM0_SX241 +MREM0_SX331 +MREM0_SX421 +MREM0_SX61 +MREW1_SI1500 +MREW1_SI2130 +MREW1_SI870 +MREW1_SX150 +MREW1_SX240 +MREW1_SX330 +MREW1_SX420 +MREW1_SX60 +MRFK0_SI1076 +MRFK0_SI1706 +MRFK0_SI2336 +MRFK0_SX176 +MRFK0_SX266 +MRFK0_SX356 +MRFK0_SX446 +MRFK0_SX86 +MRFL0_SI1156 +MRFL0_SI1786 +MRFL0_SI526 +MRFL0_SX166 +MRFL0_SX256 +MRFL0_SX346 +MRFL0_SX436 +MRFL0_SX76 +MRGM0_SI1162 +MRGM0_SI1792 +MRGM0_SI532 +MRGM0_SX172 +MRGM0_SX262 +MRGM0_SX416 +MRGM0_SX442 +MRGM0_SX82 +MRGS0_SI1356 +MRGS0_SI1986 +MRGS0_SI726 +MRGS0_SX186 +MRGS0_SX276 +MRGS0_SX366 +MRGS0_SX6 +MRGS0_SX96 +MRHL0_SI1515 +MRHL0_SI2145 +MRHL0_SI885 +MRHL0_SX165 +MRHL0_SX255 +MRHL0_SX345 +MRHL0_SX435 +MRHL0_SX75 +MRJB1_SI1020 +MRJB1_SI1413 +MRJB1_SI2021 +MRJB1_SX120 +MRJB1_SX210 +MRJB1_SX30 +MRJB1_SX300 +MRJB1_SX390 +MRJH0_SI1519 +MRJH0_SI889 +MRJH0_SI914 +MRJH0_SX169 +MRJH0_SX259 +MRJH0_SX307 +MRJH0_SX439 +MRJH0_SX79 +MRJM0_SI1095 +MRJM0_SI1228 +MRJM0_SI1858 +MRJM0_SX148 +MRJM0_SX238 +MRJM0_SX328 +MRJM0_SX418 +MRJM0_SX58 +MRJM1_SI1298 +MRJM1_SI1928 +MRJM1_SI668 +MRJM1_SX128 +MRJM1_SX218 +MRJM1_SX308 +MRJM1_SX38 +MRJM1_SX398 +MRJT0_SI1498 +MRJT0_SI1805 +MRJT0_SI868 +MRJT0_SX148 +MRJT0_SX238 +MRJT0_SX328 +MRJT0_SX418 +MRJT0_SX58 +MRKM0_SI1267 +MRKM0_SI1391 +MRKM0_SI637 +MRKM0_SX187 +MRKM0_SX277 +MRKM0_SX367 +MRKM0_SX7 +MRKM0_SX97 +MRLD0_SI1594 +MRLD0_SI2224 +MRLD0_SI964 +MRLD0_SX154 +MRLD0_SX244 +MRLD0_SX334 +MRLD0_SX424 +MRLD0_SX64 +MRLJ0_SI1420 +MRLJ0_SI2050 +MRLJ0_SI790 +MRLJ0_SX160 +MRLJ0_SX250 +MRLJ0_SX340 +MRLJ0_SX430 +MRLJ0_SX70 +MRLJ1_SI1671 +MRLJ1_SI2301 +MRLJ1_SI2332 +MRLJ1_SX141 +MRLJ1_SX231 +MRLJ1_SX321 +MRLJ1_SX411 +MRLJ1_SX51 +MRLK0_SI1468 +MRLK0_SI2140 +MRLK0_SI843 +MRLK0_SX123 +MRLK0_SX213 +MRLK0_SX303 +MRLK0_SX33 +MRLK0_SX393 +MRLR0_SI1196 +MRLR0_SI1826 +MRLR0_SI566 +MRLR0_SX116 +MRLR0_SX206 +MRLR0_SX26 +MRLR0_SX296 +MRLR0_SX386 +MRMB0_SI1581 +MRMB0_SI2211 +MRMB0_SI951 +MRMB0_SX141 +MRMB0_SX231 +MRMB0_SX321 +MRMB0_SX411 +MRMB0_SX51 +MRMG0_SI1080 +MRMG0_SI1710 +MRMG0_SI2340 +MRMG0_SX180 +MRMG0_SX270 +MRMG0_SX360 +MRMG0_SX450 +MRMG0_SX90 +MRMH0_SI1021 +MRMH0_SI1349 +MRMH0_SI2281 +MRMH0_SX121 +MRMH0_SX211 +MRMH0_SX301 +MRMH0_SX31 +MRMH0_SX391 +MRML0_SI1421 +MRML0_SI2051 +MRML0_SI791 +MRML0_SX161 +MRML0_SX251 +MRML0_SX341 +MRML0_SX431 +MRML0_SX71 +MRMS0_SI1113 +MRMS0_SI2057 +MRMS0_SI2100 +MRMS0_SX120 +MRMS0_SX210 +MRMS0_SX30 +MRMS0_SX300 +MRMS0_SX390 +MRPC1_SI1482 +MRPC1_SI2026 +MRPC1_SI2112 +MRPC1_SX132 +MRPC1_SX222 +MRPC1_SX312 +MRPC1_SX402 +MRPC1_SX42 +MRRE0_SI1334 +MRRE0_SI704 +MRRE0_SI952 +MRRE0_SX164 +MRRE0_SX254 +MRRE0_SX344 +MRRE0_SX434 +MRRE0_SX74 +MRSO0_SI1206 +MRSO0_SI1659 +MRSO0_SI2289 +MRSO0_SX129 +MRSO0_SX219 +MRSO0_SX309 +MRSO0_SX39 +MRSO0_SX399 +MRSP0_SI1429 +MRSP0_SI2059 +MRSP0_SI799 +MRSP0_SX169 +MRSP0_SX196 +MRSP0_SX259 +MRSP0_SX439 +MRSP0_SX79 +MRTC0_SI1458 +MRTC0_SI2088 +MRTC0_SI828 +MRTC0_SX108 +MRTC0_SX18 +MRTC0_SX198 +MRTC0_SX288 +MRTC0_SX378 +MRTJ0_SI1551 +MRTJ0_SI2032 +MRTJ0_SI772 +MRTJ0_SX142 +MRTJ0_SX232 +MRTJ0_SX322 +MRTJ0_SX412 +MRTJ0_SX52 +MRVG0_SI1140 +MRVG0_SI1770 +MRVG0_SI510 +MRVG0_SX150 +MRVG0_SX240 +MRVG0_SX330 +MRVG0_SX420 +MRVG0_SX60 +MRWA0_SI1603 +MRWA0_SI2233 +MRWA0_SI973 +MRWA0_SX163 +MRWA0_SX253 +MRWA0_SX343 +MRWA0_SX433 +MRWA0_SX73 +MRWS0_SI1102 +MRWS0_SI1732 +MRWS0_SI472 +MRWS0_SX112 +MRWS0_SX202 +MRWS0_SX22 +MRWS0_SX292 +MRWS0_SX382 +MRXB0_SI1585 +MRXB0_SI2215 +MRXB0_SI955 +MRXB0_SX145 +MRXB0_SX235 +MRXB0_SX325 +MRXB0_SX415 +MRXB0_SX55 +MSAH1_SI1049 +MSAH1_SI1679 +MSAH1_SI2309 +MSAH1_SX149 +MSAH1_SX239 +MSAH1_SX329 +MSAH1_SX419 +MSAH1_SX59 +MSAS0_SI1376 +MSAS0_SI2006 +MSAS0_SI746 +MSAS0_SX116 +MSAS0_SX206 +MSAS0_SX26 +MSAS0_SX296 +MSAS0_SX386 +MSAT0_SI1526 +MSAT0_SI2156 +MSAT0_SI896 +MSAT0_SX176 +MSAT0_SX266 +MSAT0_SX356 +MSAT0_SX446 +MSAT0_SX86 +MSAT1_SI1073 +MSAT1_SI1703 +MSAT1_SI2333 +MSAT1_SX173 +MSAT1_SX263 +MSAT1_SX353 +MSAT1_SX443 +MSAT1_SX83 +MSDB0_SI1007 +MSDB0_SI1637 +MSDB0_SI2267 +MSDB0_SX107 +MSDB0_SX17 +MSDB0_SX197 +MSDB0_SX287 +MSDB0_SX377 +MSDH0_SI2113 +MSDH0_SI2240 +MSDH0_SI980 +MSDH0_SX170 +MSDH0_SX260 +MSDH0_SX350 +MSDH0_SX440 +MSDH0_SX80 +MSDS0_SI1077 +MSDS0_SI1707 +MSDS0_SI2337 +MSDS0_SX177 +MSDS0_SX267 +MSDS0_SX357 +MSDS0_SX447 +MSDS0_SX87 +MSEM1_SI1440 +MSEM1_SI2070 +MSEM1_SI810 +MSEM1_SX180 +MSEM1_SX270 +MSEM1_SX360 +MSEM1_SX450 +MSEM1_SX90 +MSES0_SI1589 +MSES0_SI2216 +MSES0_SI2219 +MSES0_SX149 +MSES0_SX239 +MSES0_SX329 +MSES0_SX419 +MSES0_SX59 +MSFH0_SI1216 +MSFH0_SI1738 +MSFH0_SI586 +MSFH0_SX136 +MSFH0_SX226 +MSFH0_SX316 +MSFH0_SX406 +MSFH0_SX46 +MSFV0_SI1262 +MSFV0_SI1892 +MSFV0_SI632 +MSFV0_SX182 +MSFV0_SX272 +MSFV0_SX362 +MSFV0_SX452 +MSFV0_SX92 +MSJK0_SI1596 +MSJK0_SI2226 +MSJK0_SI966 +MSJK0_SX156 +MSJK0_SX246 +MSJK0_SX336 +MSJK0_SX426 +MSJK0_SX66 +MSMC0_SI1907 +MSMC0_SI509 +MSMC0_SI647 +MSMC0_SX107 +MSMC0_SX17 +MSMC0_SX197 +MSMC0_SX287 +MSMC0_SX377 +MSMR0_SI1150 +MSMR0_SI1405 +MSMR0_SI775 +MSMR0_SX145 +MSMR0_SX235 +MSMR0_SX325 +MSMR0_SX415 +MSMR0_SX55 +MSMS0_SI1433 +MSMS0_SI2063 +MSMS0_SI803 +MSMS0_SX173 +MSMS0_SX263 +MSMS0_SX353 +MSMS0_SX443 +MSMS0_SX83 +MSRG0_SI1221 +MSRG0_SI1851 +MSRG0_SI591 +MSRG0_SX141 +MSRG0_SX231 +MSRG0_SX321 +MSRG0_SX411 +MSRG0_SX51 +MSRR0_SI1131 +MSRR0_SI1761 +MSRR0_SI501 +MSRR0_SX141 +MSRR0_SX231 +MSRR0_SX30 +MSRR0_SX411 +MSRR0_SX51 +MSTF0_SI1396 +MSTF0_SI766 +MSTF0_SI852 +MSTF0_SX136 +MSTF0_SX226 +MSTF0_SX316 +MSTF0_SX406 +MSTF0_SX46 +MSVS0_SI1568 +MSVS0_SI2198 +MSVS0_SI938 +MSVS0_SX128 +MSVS0_SX218 +MSVS0_SX308 +MSVS0_SX38 +MSVS0_SX398 +MTAB0_SI1572 +MTAB0_SI2202 +MTAB0_SI942 +MTAB0_SX132 +MTAB0_SX222 +MTAB0_SX312 +MTAB0_SX402 +MTAB0_SX42 +MTAS0_SI1385 +MTAS0_SI2015 +MTAS0_SI755 +MTAS0_SX125 +MTAS0_SX215 +MTAS0_SX305 +MTAS0_SX35 +MTAS0_SX395 +MTAT0_SI1110 +MTAT0_SI1740 +MTAT0_SI811 +MTAT0_SX120 +MTAT0_SX210 +MTAT0_SX30 +MTAT0_SX300 +MTAT0_SX390 +MTAT1_SI1409 +MTAT1_SI1627 +MTAT1_SI779 +MTAT1_SX149 +MTAT1_SX239 +MTAT1_SX329 +MTAT1_SX419 +MTAT1_SX59 +MTBC0_SI1173 +MTBC0_SI1803 +MTBC0_SI543 +MTBC0_SX183 +MTBC0_SX273 +MTBC0_SX347 +MTBC0_SX363 +MTBC0_SX93 +MTCS0_SI1972 +MTCS0_SI2265 +MTCS0_SI712 +MTCS0_SX172 +MTCS0_SX262 +MTCS0_SX352 +MTCS0_SX442 +MTCS0_SX82 +MTDB0_SI1401 +MTDB0_SI2031 +MTDB0_SI771 +MTDB0_SX141 +MTDB0_SX231 +MTDB0_SX321 +MTDB0_SX411 +MTDB0_SX51 +MTDP0_SI1274 +MTDP0_SI1521 +MTDP0_SI2151 +MTDP0_SX171 +MTDP0_SX261 +MTDP0_SX351 +MTDP0_SX441 +MTDP0_SX81 +MTER0_SI1157 +MTER0_SI1787 +MTER0_SI527 +MTER0_SX167 +MTER0_SX17 +MTER0_SX257 +MTER0_SX437 +MTER0_SX77 +MTJG0_SI1520 +MTJG0_SI2157 +MTJG0_SI890 +MTJG0_SX170 +MTJG0_SX260 +MTJG0_SX350 +MTJG0_SX440 +MTJG0_SX80 +MTJM0_SI1226 +MTJM0_SI1856 +MTJM0_SI655 +MTJM0_SX146 +MTJM0_SX236 +MTJM0_SX326 +MTJM0_SX416 +MTJM0_SX56 +MTJS0_SI1192 +MTJS0_SI1822 +MTJS0_SI562 +MTJS0_SX112 +MTJS0_SX202 +MTJS0_SX22 +MTJS0_SX292 +MTJS0_SX382 +MTJU0_SI2020 +MTJU0_SI2269 +MTJU0_SI760 +MTJU0_SX130 +MTJU0_SX220 +MTJU0_SX310 +MTJU0_SX40 +MTJU0_SX400 +MTKD0_SI1187 +MTKD0_SI1817 +MTKD0_SI630 +MTKD0_SX107 +MTKD0_SX17 +MTKD0_SX197 +MTKD0_SX287 +MTKD0_SX377 +MTKP0_SI1023 +MTKP0_SI2283 +MTKP0_SI454 +MTKP0_SX123 +MTKP0_SX213 +MTKP0_SX303 +MTKP0_SX33 +MTKP0_SX393 +MTLB0_SI1134 +MTLB0_SI1764 +MTLB0_SI504 +MTLB0_SX144 +MTLB0_SX234 +MTLB0_SX324 +MTLB0_SX414 +MTLB0_SX54 +MTLC0_SI1313 +MTLC0_SI1477 +MTLC0_SI847 +MTLC0_SX127 +MTLC0_SX217 +MTLC0_SX307 +MTLC0_SX37 +MTLC0_SX397 +MTML0_SI1065 +MTML0_SI1695 +MTML0_SI2325 +MTML0_SX165 +MTML0_SX255 +MTML0_SX345 +MTML0_SX435 +MTML0_SX75 +MTMN0_SI1064 +MTMN0_SI2324 +MTMN0_SI582 +MTMN0_SX164 +MTMN0_SX254 +MTMN0_SX344 +MTMN0_SX434 +MTMN0_SX74 +MTMT0_SI1118 +MTMT0_SI1748 +MTMT0_SI488 +MTMT0_SX128 +MTMT0_SX218 +MTMT0_SX308 +MTMT0_SX38 +MTMT0_SX398 +MTPF0_SI1235 +MTPF0_SI1865 +MTPF0_SI605 +MTPF0_SX155 +MTPF0_SX245 +MTPF0_SX335 +MTPF0_SX425 +MTPF0_SX65 +MTPG0_SI1383 +MTPG0_SI2013 +MTPG0_SI753 +MTPG0_SX123 +MTPG0_SX213 +MTPG0_SX303 +MTPG0_SX33 +MTPG0_SX393 +MTPP0_SI1508 +MTPP0_SI2138 +MTPP0_SI878 +MTPP0_SX158 +MTPP0_SX248 +MTPP0_SX338 +MTPP0_SX428 +MTPP0_SX68 +MTPR0_SI1600 +MTPR0_SI2230 +MTPR0_SI506 +MTPR0_SX160 +MTPR0_SX250 +MTPR0_SX340 +MTPR0_SX430 +MTPR0_SX70 +MTQC0_SI1441 +MTQC0_SI2071 +MTQC0_SI480 +MTQC0_SX181 +MTQC0_SX271 +MTQC0_SX361 +MTQC0_SX451 +MTQC0_SX91 +MTRC0_SI1623 +MTRC0_SI589 +MTRC0_SI993 +MTRC0_SX170 +MTRC0_SX183 +MTRC0_SX273 +MTRC0_SX363 +MTRC0_SX93 +MTRR0_SI1548 +MTRR0_SI2178 +MTRR0_SI918 +MTRR0_SX108 +MTRR0_SX18 +MTRR0_SX198 +MTRR0_SX288 +MTRR0_SX378 +MTRT0_SI1227 +MTRT0_SI1857 +MTRT0_SI597 +MTRT0_SX147 +MTRT0_SX237 +MTRT0_SX254 +MTRT0_SX417 +MTRT0_SX57 +MTWH1_SI1512 +MTWH1_SI2142 +MTWH1_SI882 +MTWH1_SX162 +MTWH1_SX252 +MTWH1_SX342 +MTWH1_SX432 +MTWH1_SX72 +MTXS0_SI1060 +MTXS0_SI1690 +MTXS0_SI2320 +MTXS0_SX160 +MTXS0_SX250 +MTXS0_SX340 +MTXS0_SX430 +MTXS0_SX70 +MVJH0_SI1556 +MVJH0_SI2186 +MVJH0_SI926 +MVJH0_SX116 +MVJH0_SX206 +MVJH0_SX26 +MVJH0_SX296 +MVJH0_SX386 +MVLO0_SI1147 +MVLO0_SI1777 +MVLO0_SI517 +MVLO0_SX157 +MVLO0_SX247 +MVLO0_SX337 +MVLO0_SX427 +MVLO0_SX67 +MVRW0_SI1485 +MVRW0_SI2115 +MVRW0_SI855 +MVRW0_SX135 +MVRW0_SX225 +MVRW0_SX315 +MVRW0_SX405 +MVRW0_SX45 +MWAC0_SI1601 +MWAC0_SI2231 +MWAC0_SI971 +MWAC0_SX161 +MWAC0_SX251 +MWAC0_SX341 +MWAC0_SX431 +MWAC0_SX71 +MWAD0_SI1062 +MWAD0_SI1749 +MWAD0_SI2322 +MWAD0_SX162 +MWAD0_SX252 +MWAD0_SX342 +MWAD0_SX432 +MWAD0_SX72 +MWAR0_SI1045 +MWAR0_SI1675 +MWAR0_SI2305 +MWAR0_SX145 +MWAR0_SX235 +MWAR0_SX325 +MWAR0_SX415 +MWAR0_SX55 +MWCH0_SI1622 +MWCH0_SI1895 +MWCH0_SI2252 +MWCH0_SX182 +MWCH0_SX272 +MWCH0_SX362 +MWCH0_SX452 +MWCH0_SX92 +MWDK0_SI1436 +MWDK0_SI2017 +MWDK0_SI806 +MWDK0_SX176 +MWDK0_SX266 +MWDK0_SX356 +MWDK0_SX446 +MWDK0_SX86 +MWEM0_SI1320 +MWEM0_SI1393 +MWEM0_SI1950 +MWEM0_SX150 +MWEM0_SX240 +MWEM0_SX330 +MWEM0_SX420 +MWEM0_SX60 +MWGR0_SI1606 +MWGR0_SI2236 +MWGR0_SI976 +MWGR0_SX166 +MWGR0_SX256 +MWGR0_SX346 +MWGR0_SX436 +MWGR0_SX76 +MWRE0_SI1057 +MWRE0_SI1687 +MWRE0_SI2317 +MWRE0_SX157 +MWRE0_SX247 +MWRE0_SX337 +MWRE0_SX427 +MWRE0_SX67 +MWRP0_SI1443 +MWRP0_SI1525 +MWRP0_SI2073 +MWRP0_SX183 +MWRP0_SX273 +MWRP0_SX3 +MWRP0_SX363 +MWRP0_SX93 +MWSB0_SI1626 +MWSB0_SI2256 +MWSB0_SI996 +MWSB0_SX186 +MWSB0_SX276 +MWSB0_SX366 +MWSB0_SX6 +MWSB0_SX96 +MWSH0_SI1426 +MWSH0_SI2266 +MWSH0_SI796 +MWSH0_SX166 +MWSH0_SX256 +MWSH0_SX346 +MWSH0_SX436 +MWSH0_SX76 +MZMB0_SI1166 +MZMB0_SI1796 +MZMB0_SI536 +MZMB0_SX176 +MZMB0_SX266 +MZMB0_SX356 +MZMB0_SX446 +MZMB0_SX86 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/valid.uid b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/valid.uid new file mode 100644 index 0000000..ab5ef38 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_matched/valid.uid @@ -0,0 +1,400 @@ +FADG0_SI1279 +FADG0_SI1909 +FADG0_SI649 +FADG0_SX109 +FADG0_SX19 +FADG0_SX199 +FADG0_SX289 +FADG0_SX379 +FAKS0_SI1573 +FAKS0_SI2203 +FAKS0_SI943 +FAKS0_SX133 +FAKS0_SX223 +FAKS0_SX313 +FAKS0_SX403 +FAKS0_SX43 +FCAL1_SI1403 +FCAL1_SI2033 +FCAL1_SI773 +FCAL1_SX143 +FCAL1_SX233 +FCAL1_SX323 +FCAL1_SX413 +FCAL1_SX53 +FCMH0_SI1454 +FCMH0_SI2084 +FCMH0_SI824 +FCMH0_SX104 +FCMH0_SX14 +FCMH0_SX194 +FCMH0_SX284 +FCMH0_SX374 +FDAC1_SI1474 +FDAC1_SI2104 +FDAC1_SI844 +FDAC1_SX124 +FDAC1_SX214 +FDAC1_SX304 +FDAC1_SX34 +FDAC1_SX394 +FDMS0_SI1218 +FDMS0_SI1502 +FDMS0_SI1848 +FDMS0_SX138 +FDMS0_SX228 +FDMS0_SX318 +FDMS0_SX408 +FDMS0_SX48 +FDRW0_SI1283 +FDRW0_SI1423 +FDRW0_SI653 +FDRW0_SX113 +FDRW0_SX203 +FDRW0_SX23 +FDRW0_SX293 +FDRW0_SX383 +FEDW0_SI1084 +FEDW0_SI1653 +FEDW0_SI1714 +FEDW0_SX184 +FEDW0_SX274 +FEDW0_SX364 +FEDW0_SX4 +FEDW0_SX94 +FGJD0_SI1179 +FGJD0_SI549 +FGJD0_SI818 +FGJD0_SX189 +FGJD0_SX279 +FGJD0_SX369 +FGJD0_SX9 +FGJD0_SX99 +FJEM0_SI1264 +FJEM0_SI1894 +FJEM0_SI634 +FJEM0_SX184 +FJEM0_SX274 +FJEM0_SX364 +FJEM0_SX4 +FJEM0_SX94 +FJMG0_SI1181 +FJMG0_SI1811 +FJMG0_SI551 +FJMG0_SX101 +FJMG0_SX11 +FJMG0_SX191 +FJMG0_SX281 +FJMG0_SX371 +FJSJ0_SI1484 +FJSJ0_SI2114 +FJSJ0_SI854 +FJSJ0_SX134 +FJSJ0_SX224 +FJSJ0_SX314 +FJSJ0_SX404 +FJSJ0_SX44 +FKMS0_SI1490 +FKMS0_SI2120 +FKMS0_SI860 +FKMS0_SX140 +FKMS0_SX230 +FKMS0_SX320 +FKMS0_SX410 +FKMS0_SX50 +FMAH0_SI1289 +FMAH0_SI1919 +FMAH0_SI659 +FMAH0_SX119 +FMAH0_SX209 +FMAH0_SX29 +FMAH0_SX299 +FMAH0_SX389 +FMML0_SI1040 +FMML0_SI1670 +FMML0_SI2300 +FMML0_SX140 +FMML0_SX230 +FMML0_SX320 +FMML0_SX410 +FMML0_SX50 +FNMR0_SI1399 +FNMR0_SI2029 +FNMR0_SI769 +FNMR0_SX139 +FNMR0_SX229 +FNMR0_SX319 +FNMR0_SX409 +FNMR0_SX49 +FREW0_SI1030 +FREW0_SI1280 +FREW0_SI1910 +FREW0_SX110 +FREW0_SX20 +FREW0_SX200 +FREW0_SX290 +FREW0_SX380 +FSEM0_SI1198 +FSEM0_SI1828 +FSEM0_SI568 +FSEM0_SX118 +FSEM0_SX208 +FSEM0_SX28 +FSEM0_SX298 +FSEM0_SX388 +MAJC0_SI1946 +MAJC0_SI2095 +MAJC0_SI835 +MAJC0_SX115 +MAJC0_SX205 +MAJC0_SX25 +MAJC0_SX295 +MAJC0_SX385 +MBDG0_SI1463 +MBDG0_SI2093 +MBDG0_SI833 +MBDG0_SX113 +MBDG0_SX203 +MBDG0_SX23 +MBDG0_SX293 +MBDG0_SX383 +MBNS0_SI1220 +MBNS0_SI1850 +MBNS0_SI590 +MBNS0_SX140 +MBNS0_SX230 +MBNS0_SX320 +MBNS0_SX410 +MBNS0_SX50 +MBWM0_SI1304 +MBWM0_SI1934 +MBWM0_SI674 +MBWM0_SX134 +MBWM0_SX224 +MBWM0_SX314 +MBWM0_SX404 +MBWM0_SX44 +MCSH0_SI1549 +MCSH0_SI2179 +MCSH0_SI919 +MCSH0_SX109 +MCSH0_SX19 +MCSH0_SX199 +MCSH0_SX289 +MCSH0_SX379 +MDLF0_SI1583 +MDLF0_SI2213 +MDLF0_SI953 +MDLF0_SX143 +MDLF0_SX233 +MDLF0_SX323 +MDLF0_SX413 +MDLF0_SX53 +MDLS0_SI1628 +MDLS0_SI2258 +MDLS0_SI998 +MDLS0_SX188 +MDLS0_SX278 +MDLS0_SX368 +MDLS0_SX8 +MDLS0_SX98 +MDVC0_SI2174 +MDVC0_SI2196 +MDVC0_SI936 +MDVC0_SX126 +MDVC0_SX216 +MDVC0_SX306 +MDVC0_SX36 +MDVC0_SX396 +MERS0_SI1019 +MERS0_SI1649 +MERS0_SI497 +MERS0_SX119 +MERS0_SX209 +MERS0_SX29 +MERS0_SX299 +MERS0_SX389 +MGJF0_SI1901 +MGJF0_SI641 +MGJF0_SI776 +MGJF0_SX101 +MGJF0_SX11 +MGJF0_SX191 +MGJF0_SX281 +MGJF0_SX371 +MGLB0_SI1534 +MGLB0_SI2164 +MGLB0_SI904 +MGLB0_SX184 +MGLB0_SX274 +MGLB0_SX364 +MGLB0_SX4 +MGLB0_SX94 +MGWT0_SI1539 +MGWT0_SI2169 +MGWT0_SI909 +MGWT0_SX189 +MGWT0_SX279 +MGWT0_SX369 +MGWT0_SX9 +MGWT0_SX99 +MJAR0_SI1988 +MJAR0_SI2247 +MJAR0_SI728 +MJAR0_SX188 +MJAR0_SX278 +MJAR0_SX368 +MJAR0_SX8 +MJAR0_SX98 +MJFC0_SI1033 +MJFC0_SI1663 +MJFC0_SI2293 +MJFC0_SX133 +MJFC0_SX223 +MJFC0_SX313 +MJFC0_SX403 +MJFC0_SX43 +MJSW0_SI1010 +MJSW0_SI1640 +MJSW0_SI2270 +MJSW0_SX110 +MJSW0_SX20 +MJSW0_SX200 +MJSW0_SX290 +MJSW0_SX380 +MMDB1_SI1625 +MMDB1_SI2255 +MMDB1_SI995 +MMDB1_SX185 +MMDB1_SX275 +MMDB1_SX365 +MMDB1_SX5 +MMDB1_SX95 +MMDM2_SI1452 +MMDM2_SI1555 +MMDM2_SI2082 +MMDM2_SX102 +MMDM2_SX12 +MMDM2_SX192 +MMDM2_SX282 +MMDM2_SX372 +MMJR0_SI1648 +MMJR0_SI2166 +MMJR0_SI2278 +MMJR0_SX118 +MMJR0_SX208 +MMJR0_SX28 +MMJR0_SX298 +MMJR0_SX388 +MMWH0_SI1089 +MMWH0_SI1301 +MMWH0_SI459 +MMWH0_SX189 +MMWH0_SX279 +MMWH0_SX369 +MMWH0_SX9 +MMWH0_SX99 +MPDF0_SI1542 +MPDF0_SI2172 +MPDF0_SI912 +MPDF0_SX102 +MPDF0_SX12 +MPDF0_SX192 +MPDF0_SX282 +MPDF0_SX372 +MRCS0_SI1223 +MRCS0_SI1853 +MRCS0_SI593 +MRCS0_SX143 +MRCS0_SX233 +MRCS0_SX323 +MRCS0_SX413 +MRCS0_SX53 +MREB0_SI1375 +MREB0_SI2005 +MREB0_SI745 +MREB0_SX115 +MREB0_SX205 +MREB0_SX25 +MREB0_SX295 +MREB0_SX385 +MRJM4_SI1489 +MRJM4_SI2119 +MRJM4_SI859 +MRJM4_SX139 +MRJM4_SX229 +MRJM4_SX319 +MRJM4_SX409 +MRJM4_SX49 +MRJR0_SI1182 +MRJR0_SI1812 +MRJR0_SI2313 +MRJR0_SX102 +MRJR0_SX12 +MRJR0_SX192 +MRJR0_SX282 +MRJR0_SX372 +MROA0_SI1307 +MROA0_SI1970 +MROA0_SI677 +MROA0_SX137 +MROA0_SX227 +MROA0_SX317 +MROA0_SX407 +MROA0_SX47 +MRTK0_SI1093 +MRTK0_SI1723 +MRTK0_SI1750 +MRTK0_SX103 +MRTK0_SX13 +MRTK0_SX193 +MRTK0_SX283 +MRTK0_SX373 +MRWS1_SI1130 +MRWS1_SI1496 +MRWS1_SI500 +MRWS1_SX140 +MRWS1_SX230 +MRWS1_SX320 +MRWS1_SX410 +MRWS1_SX50 +MTAA0_SI1285 +MTAA0_SI1915 +MTAA0_SI596 +MTAA0_SX115 +MTAA0_SX205 +MTAA0_SX25 +MTAA0_SX295 +MTAA0_SX385 +MTDT0_SI1994 +MTDT0_SI2254 +MTDT0_SI994 +MTDT0_SX184 +MTDT0_SX274 +MTDT0_SX364 +MTDT0_SX4 +MTDT0_SX94 +MTEB0_SI1133 +MTEB0_SI2064 +MTEB0_SI503 +MTEB0_SX143 +MTEB0_SX233 +MTEB0_SX323 +MTEB0_SX413 +MTEB0_SX53 +MTHC0_SI1015 +MTHC0_SI1645 +MTHC0_SI2275 +MTHC0_SX115 +MTHC0_SX205 +MTHC0_SX25 +MTHC0_SX295 +MTHC0_SX385 +MWJG0_SI1124 +MWJG0_SI1754 +MWJG0_SI494 +MWJG0_SX134 +MWJG0_SX224 +MWJG0_SX314 +MWJG0_SX404 +MWJG0_SX44 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/test.uid b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/test.uid new file mode 100644 index 0000000..e3967e4 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/test.uid @@ -0,0 +1,1680 @@ +FADG0_SA1 +FADG0_SA2 +FADG0_SI1279 +FADG0_SI1909 +FADG0_SI649 +FADG0_SX109 +FADG0_SX19 +FADG0_SX199 +FADG0_SX289 +FADG0_SX379 +FAKS0_SA1 +FAKS0_SA2 +FAKS0_SI1573 +FAKS0_SI2203 +FAKS0_SI943 +FAKS0_SX133 +FAKS0_SX223 +FAKS0_SX313 +FAKS0_SX403 +FAKS0_SX43 +FASW0_SA1 +FASW0_SA2 +FASW0_SI1550 +FASW0_SI2180 +FASW0_SI920 +FASW0_SX110 +FASW0_SX20 +FASW0_SX200 +FASW0_SX290 +FASW0_SX380 +FAWF0_SA1 +FAWF0_SA2 +FAWF0_SI1000 +FAWF0_SI1630 +FAWF0_SI2260 +FAWF0_SX10 +FAWF0_SX100 +FAWF0_SX190 +FAWF0_SX280 +FAWF0_SX370 +FCAL1_SA1 +FCAL1_SA2 +FCAL1_SI1403 +FCAL1_SI2033 +FCAL1_SI773 +FCAL1_SX143 +FCAL1_SX233 +FCAL1_SX323 +FCAL1_SX413 +FCAL1_SX53 +FCAU0_SA1 +FCAU0_SA2 +FCAU0_SI1037 +FCAU0_SI1667 +FCAU0_SI2297 +FCAU0_SX137 +FCAU0_SX227 +FCAU0_SX317 +FCAU0_SX407 +FCAU0_SX47 +FCFT0_SA1 +FCFT0_SA2 +FCFT0_SI1178 +FCFT0_SI1808 +FCFT0_SI548 +FCFT0_SX188 +FCFT0_SX278 +FCFT0_SX368 +FCFT0_SX8 +FCFT0_SX98 +FCMH0_SA1 +FCMH0_SA2 +FCMH0_SI1454 +FCMH0_SI2084 +FCMH0_SI824 +FCMH0_SX104 +FCMH0_SX14 +FCMH0_SX194 +FCMH0_SX284 +FCMH0_SX374 +FCMH1_SA1 +FCMH1_SA2 +FCMH1_SI1493 +FCMH1_SI2123 +FCMH1_SI863 +FCMH1_SX143 +FCMH1_SX233 +FCMH1_SX323 +FCMH1_SX413 +FCMH1_SX53 +FCMR0_SA1 +FCMR0_SA2 +FCMR0_SI1105 +FCMR0_SI1735 +FCMR0_SI475 +FCMR0_SX115 +FCMR0_SX205 +FCMR0_SX25 +FCMR0_SX295 +FCMR0_SX385 +FCRH0_SA1 +FCRH0_SA2 +FCRH0_SI1088 +FCRH0_SI1718 +FCRH0_SI458 +FCRH0_SX188 +FCRH0_SX278 +FCRH0_SX368 +FCRH0_SX8 +FCRH0_SX98 +FDAC1_SA1 +FDAC1_SA2 +FDAC1_SI1474 +FDAC1_SI2104 +FDAC1_SI844 +FDAC1_SX124 +FDAC1_SX214 +FDAC1_SX304 +FDAC1_SX34 +FDAC1_SX394 +FDHC0_SA1 +FDHC0_SA2 +FDHC0_SI1559 +FDHC0_SI2189 +FDHC0_SI929 +FDHC0_SX119 +FDHC0_SX209 +FDHC0_SX29 +FDHC0_SX299 +FDHC0_SX389 +FDMS0_SA1 +FDMS0_SA2 +FDMS0_SI1218 +FDMS0_SI1502 +FDMS0_SI1848 +FDMS0_SX138 +FDMS0_SX228 +FDMS0_SX318 +FDMS0_SX408 +FDMS0_SX48 +FDRD1_SA1 +FDRD1_SA2 +FDRD1_SI1544 +FDRD1_SI1566 +FDRD1_SI2149 +FDRD1_SX104 +FDRD1_SX14 +FDRD1_SX194 +FDRD1_SX284 +FDRD1_SX374 +FDRW0_SA1 +FDRW0_SA2 +FDRW0_SI1283 +FDRW0_SI1423 +FDRW0_SI653 +FDRW0_SX113 +FDRW0_SX203 +FDRW0_SX23 +FDRW0_SX293 +FDRW0_SX383 +FEDW0_SA1 +FEDW0_SA2 +FEDW0_SI1084 +FEDW0_SI1653 +FEDW0_SI1714 +FEDW0_SX184 +FEDW0_SX274 +FEDW0_SX364 +FEDW0_SX4 +FEDW0_SX94 +FELC0_SA1 +FELC0_SA2 +FELC0_SI1386 +FELC0_SI2016 +FELC0_SI756 +FELC0_SX126 +FELC0_SX216 +FELC0_SX306 +FELC0_SX36 +FELC0_SX396 +FGJD0_SA1 +FGJD0_SA2 +FGJD0_SI1179 +FGJD0_SI549 +FGJD0_SI818 +FGJD0_SX189 +FGJD0_SX279 +FGJD0_SX369 +FGJD0_SX9 +FGJD0_SX99 +FGMD0_SA1 +FGMD0_SA2 +FGMD0_SI1943 +FGMD0_SI2107 +FGMD0_SI683 +FGMD0_SX143 +FGMD0_SX233 +FGMD0_SX323 +FGMD0_SX413 +FGMD0_SX53 +FGWR0_SA1 +FGWR0_SA2 +FGWR0_SI1578 +FGWR0_SI2208 +FGWR0_SI948 +FGWR0_SX138 +FGWR0_SX228 +FGWR0_SX318 +FGWR0_SX408 +FGWR0_SX48 +FHES0_SA1 +FHES0_SA2 +FHES0_SI1109 +FHES0_SI1739 +FHES0_SI479 +FHES0_SX119 +FHES0_SX209 +FHES0_SX29 +FHES0_SX299 +FHES0_SX389 +FHEW0_SA1 +FHEW0_SA2 +FHEW0_SI2023 +FHEW0_SI690 +FHEW0_SI763 +FHEW0_SX133 +FHEW0_SX223 +FHEW0_SX313 +FHEW0_SX403 +FHEW0_SX43 +FISB0_SA1 +FISB0_SA2 +FISB0_SI1579 +FISB0_SI2209 +FISB0_SI949 +FISB0_SX139 +FISB0_SX229 +FISB0_SX319 +FISB0_SX409 +FISB0_SX49 +FJAS0_SA1 +FJAS0_SA2 +FJAS0_SI1400 +FJAS0_SI2030 +FJAS0_SI770 +FJAS0_SX140 +FJAS0_SX230 +FJAS0_SX320 +FJAS0_SX410 +FJAS0_SX50 +FJCS0_SA1 +FJCS0_SA2 +FJCS0_SI1309 +FJCS0_SI1833 +FJCS0_SI1939 +FJCS0_SX139 +FJCS0_SX229 +FJCS0_SX319 +FJCS0_SX409 +FJCS0_SX49 +FJEM0_SA1 +FJEM0_SA2 +FJEM0_SI1264 +FJEM0_SI1894 +FJEM0_SI634 +FJEM0_SX184 +FJEM0_SX274 +FJEM0_SX364 +FJEM0_SX4 +FJEM0_SX94 +FJLM0_SA1 +FJLM0_SA2 +FJLM0_SI1043 +FJLM0_SI1673 +FJLM0_SI2303 +FJLM0_SX143 +FJLM0_SX233 +FJLM0_SX323 +FJLM0_SX413 +FJLM0_SX53 +FJMG0_SA1 +FJMG0_SA2 +FJMG0_SI1181 +FJMG0_SI1811 +FJMG0_SI551 +FJMG0_SX101 +FJMG0_SX11 +FJMG0_SX191 +FJMG0_SX281 +FJMG0_SX371 +FJRE0_SA1 +FJRE0_SA2 +FJRE0_SI1116 +FJRE0_SI1587 +FJRE0_SI1746 +FJRE0_SX126 +FJRE0_SX216 +FJRE0_SX306 +FJRE0_SX36 +FJRE0_SX396 +FJSA0_SA1 +FJSA0_SA2 +FJSA0_SI1379 +FJSA0_SI2009 +FJSA0_SI749 +FJSA0_SX119 +FJSA0_SX209 +FJSA0_SX29 +FJSA0_SX299 +FJSA0_SX389 +FJSJ0_SA1 +FJSJ0_SA2 +FJSJ0_SI1484 +FJSJ0_SI2114 +FJSJ0_SI854 +FJSJ0_SX134 +FJSJ0_SX224 +FJSJ0_SX314 +FJSJ0_SX404 +FJSJ0_SX44 +FJWB0_SA1 +FJWB0_SA2 +FJWB0_SI1265 +FJWB0_SI635 +FJWB0_SI992 +FJWB0_SX185 +FJWB0_SX275 +FJWB0_SX365 +FJWB0_SX5 +FJWB0_SX95 +FKMS0_SA1 +FKMS0_SA2 +FKMS0_SI1490 +FKMS0_SI2120 +FKMS0_SI860 +FKMS0_SX140 +FKMS0_SX230 +FKMS0_SX320 +FKMS0_SX410 +FKMS0_SX50 +FLAS0_SA1 +FLAS0_SA2 +FLAS0_SI1026 +FLAS0_SI1488 +FLAS0_SI858 +FLAS0_SX138 +FLAS0_SX228 +FLAS0_SX318 +FLAS0_SX408 +FLAS0_SX48 +FLBW0_SA1 +FLBW0_SA2 +FLBW0_SI1219 +FLBW0_SI1849 +FLBW0_SI2253 +FLBW0_SX139 +FLBW0_SX229 +FLBW0_SX319 +FLBW0_SX409 +FLBW0_SX49 +FLKD0_SA1 +FLKD0_SA2 +FLKD0_SI1369 +FLKD0_SI739 +FLKD0_SI894 +FLKD0_SX109 +FLKD0_SX19 +FLKD0_SX199 +FLKD0_SX289 +FLKD0_SX379 +FLNH0_SA1 +FLNH0_SA2 +FLNH0_SI1214 +FLNH0_SI584 +FLNH0_SI941 +FLNH0_SX134 +FLNH0_SX224 +FLNH0_SX314 +FLNH0_SX404 +FLNH0_SX44 +FMAF0_SA1 +FMAF0_SA2 +FMAF0_SI1459 +FMAF0_SI2089 +FMAF0_SI829 +FMAF0_SX109 +FMAF0_SX19 +FMAF0_SX199 +FMAF0_SX289 +FMAF0_SX379 +FMAH0_SA1 +FMAH0_SA2 +FMAH0_SI1289 +FMAH0_SI1919 +FMAH0_SI659 +FMAH0_SX119 +FMAH0_SX209 +FMAH0_SX29 +FMAH0_SX299 +FMAH0_SX389 +FMCM0_SA1 +FMCM0_SA2 +FMCM0_SI1180 +FMCM0_SI1810 +FMCM0_SI550 +FMCM0_SX10 +FMCM0_SX100 +FMCM0_SX190 +FMCM0_SX280 +FMCM0_SX370 +FMGD0_SA1 +FMGD0_SA2 +FMGD0_SI1564 +FMGD0_SI2194 +FMGD0_SI934 +FMGD0_SX124 +FMGD0_SX214 +FMGD0_SX304 +FMGD0_SX34 +FMGD0_SX394 +FMLD0_SA1 +FMLD0_SA2 +FMLD0_SI2185 +FMLD0_SI822 +FMLD0_SI925 +FMLD0_SX115 +FMLD0_SX205 +FMLD0_SX25 +FMLD0_SX295 +FMLD0_SX385 +FMML0_SA1 +FMML0_SA2 +FMML0_SI1040 +FMML0_SI1670 +FMML0_SI2300 +FMML0_SX140 +FMML0_SX230 +FMML0_SX320 +FMML0_SX410 +FMML0_SX50 +FNLP0_SA1 +FNLP0_SA2 +FNLP0_SI1308 +FNLP0_SI1938 +FNLP0_SI678 +FNLP0_SX138 +FNLP0_SX228 +FNLP0_SX318 +FNLP0_SX408 +FNLP0_SX48 +FNMR0_SA1 +FNMR0_SA2 +FNMR0_SI1399 +FNMR0_SI2029 +FNMR0_SI769 +FNMR0_SX139 +FNMR0_SX229 +FNMR0_SX319 +FNMR0_SX409 +FNMR0_SX49 +FPAS0_SA1 +FPAS0_SA2 +FPAS0_SI1272 +FPAS0_SI2204 +FPAS0_SI944 +FPAS0_SX134 +FPAS0_SX224 +FPAS0_SX314 +FPAS0_SX404 +FPAS0_SX44 +FPKT0_SA1 +FPKT0_SA2 +FPKT0_SI1538 +FPKT0_SI2168 +FPKT0_SI908 +FPKT0_SX188 +FPKT0_SX278 +FPKT0_SX368 +FPKT0_SX8 +FPKT0_SX98 +FRAM1_SA1 +FRAM1_SA2 +FRAM1_SI1360 +FRAM1_SI522 +FRAM1_SI730 +FRAM1_SX10 +FRAM1_SX100 +FRAM1_SX190 +FRAM1_SX280 +FRAM1_SX370 +FREW0_SA1 +FREW0_SA2 +FREW0_SI1030 +FREW0_SI1280 +FREW0_SI1910 +FREW0_SX110 +FREW0_SX20 +FREW0_SX200 +FREW0_SX290 +FREW0_SX380 +FRNG0_SA1 +FRNG0_SA2 +FRNG0_SI1355 +FRNG0_SI1985 +FRNG0_SI725 +FRNG0_SX185 +FRNG0_SX275 +FRNG0_SX365 +FRNG0_SX5 +FRNG0_SX95 +FSEM0_SA1 +FSEM0_SA2 +FSEM0_SI1198 +FSEM0_SI1828 +FSEM0_SI568 +FSEM0_SX118 +FSEM0_SX208 +FSEM0_SX28 +FSEM0_SX298 +FSEM0_SX388 +FSLB1_SA1 +FSLB1_SA2 +FSLB1_SI1904 +FSLB1_SI644 +FSLB1_SI891 +FSLB1_SX104 +FSLB1_SX14 +FSLB1_SX194 +FSLB1_SX284 +FSLB1_SX374 +FSXA0_SA1 +FSXA0_SA2 +FSXA0_SI1108 +FSXA0_SI1846 +FSXA0_SI478 +FSXA0_SX118 +FSXA0_SX208 +FSXA0_SX28 +FSXA0_SX298 +FSXA0_SX388 +FTLH0_SA1 +FTLH0_SA2 +FTLH0_SI1009 +FTLH0_SI1390 +FTLH0_SI1639 +FTLH0_SX109 +FTLH0_SX19 +FTLH0_SX199 +FTLH0_SX289 +FTLH0_SX379 +FUTB0_SA1 +FUTB0_SA2 +FUTB0_SI1204 +FUTB0_SI1330 +FUTB0_SI1834 +FUTB0_SX124 +FUTB0_SX214 +FUTB0_SX304 +FUTB0_SX34 +FUTB0_SX394 +MABW0_SA1 +MABW0_SA2 +MABW0_SI1230 +MABW0_SI1664 +MABW0_SI2294 +MABW0_SX134 +MABW0_SX224 +MABW0_SX314 +MABW0_SX404 +MABW0_SX44 +MAHH0_SA1 +MAHH0_SA2 +MAHH0_SI1294 +MAHH0_SI1924 +MAHH0_SI664 +MAHH0_SX124 +MAHH0_SX214 +MAHH0_SX304 +MAHH0_SX34 +MAHH0_SX394 +MAJC0_SA1 +MAJC0_SA2 +MAJC0_SI1946 +MAJC0_SI2095 +MAJC0_SI835 +MAJC0_SX115 +MAJC0_SX205 +MAJC0_SX25 +MAJC0_SX295 +MAJC0_SX385 +MBDG0_SA1 +MBDG0_SA2 +MBDG0_SI1463 +MBDG0_SI2093 +MBDG0_SI833 +MBDG0_SX113 +MBDG0_SX203 +MBDG0_SX23 +MBDG0_SX293 +MBDG0_SX383 +MBJK0_SA1 +MBJK0_SA2 +MBJK0_SI1175 +MBJK0_SI2128 +MBJK0_SI545 +MBJK0_SX185 +MBJK0_SX275 +MBJK0_SX365 +MBJK0_SX5 +MBJK0_SX95 +MBNS0_SA1 +MBNS0_SA2 +MBNS0_SI1220 +MBNS0_SI1850 +MBNS0_SI590 +MBNS0_SX140 +MBNS0_SX230 +MBNS0_SX320 +MBNS0_SX410 +MBNS0_SX50 +MBPM0_SA1 +MBPM0_SA2 +MBPM0_SI1577 +MBPM0_SI1584 +MBPM0_SI947 +MBPM0_SX137 +MBPM0_SX227 +MBPM0_SX317 +MBPM0_SX407 +MBPM0_SX47 +MBWM0_SA1 +MBWM0_SA2 +MBWM0_SI1304 +MBWM0_SI1934 +MBWM0_SI674 +MBWM0_SX134 +MBWM0_SX224 +MBWM0_SX314 +MBWM0_SX404 +MBWM0_SX44 +MCCS0_SA1 +MCCS0_SA2 +MCCS0_SI1469 +MCCS0_SI2099 +MCCS0_SI839 +MCCS0_SX119 +MCCS0_SX209 +MCCS0_SX29 +MCCS0_SX299 +MCCS0_SX389 +MCEM0_SA1 +MCEM0_SA2 +MCEM0_SI1398 +MCEM0_SI2028 +MCEM0_SI768 +MCEM0_SX138 +MCEM0_SX228 +MCEM0_SX318 +MCEM0_SX408 +MCEM0_SX48 +MCHH0_SA1 +MCHH0_SA2 +MCHH0_SI1004 +MCHH0_SI1634 +MCHH0_SI530 +MCHH0_SX104 +MCHH0_SX14 +MCHH0_SX194 +MCHH0_SX284 +MCHH0_SX374 +MCMB0_SA1 +MCMB0_SA2 +MCMB0_SI1268 +MCMB0_SI1898 +MCMB0_SI638 +MCMB0_SX188 +MCMB0_SX278 +MCMB0_SX368 +MCMB0_SX8 +MCMB0_SX98 +MCMJ0_SA1 +MCMJ0_SA2 +MCMJ0_SI1094 +MCMJ0_SI464 +MCMJ0_SI602 +MCMJ0_SX104 +MCMJ0_SX14 +MCMJ0_SX194 +MCMJ0_SX284 +MCMJ0_SX374 +MCRC0_SA1 +MCRC0_SA2 +MCRC0_SI1092 +MCRC0_SI1722 +MCRC0_SI462 +MCRC0_SX102 +MCRC0_SX12 +MCRC0_SX192 +MCRC0_SX282 +MCRC0_SX372 +MCSH0_SA1 +MCSH0_SA2 +MCSH0_SI1549 +MCSH0_SI2179 +MCSH0_SI919 +MCSH0_SX109 +MCSH0_SX19 +MCSH0_SX199 +MCSH0_SX289 +MCSH0_SX379 +MCTT0_SA1 +MCTT0_SA2 +MCTT0_SI1144 +MCTT0_SI2188 +MCTT0_SI928 +MCTT0_SX118 +MCTT0_SX208 +MCTT0_SX28 +MCTT0_SX298 +MCTT0_SX388 +MCTW0_SA1 +MCTW0_SA2 +MCTW0_SI1373 +MCTW0_SI2003 +MCTW0_SI743 +MCTW0_SX113 +MCTW0_SX203 +MCTW0_SX23 +MCTW0_SX293 +MCTW0_SX383 +MDAB0_SA1 +MDAB0_SA2 +MDAB0_SI1039 +MDAB0_SI1669 +MDAB0_SI2299 +MDAB0_SX139 +MDAB0_SX229 +MDAB0_SX319 +MDAB0_SX409 +MDAB0_SX49 +MDAC2_SA1 +MDAC2_SA2 +MDAC2_SI2259 +MDAC2_SI560 +MDAC2_SI999 +MDAC2_SX189 +MDAC2_SX279 +MDAC2_SX369 +MDAC2_SX9 +MDAC2_SX99 +MDAW1_SA1 +MDAW1_SA2 +MDAW1_SI1453 +MDAW1_SI2083 +MDAW1_SI823 +MDAW1_SX103 +MDAW1_SX13 +MDAW1_SX193 +MDAW1_SX283 +MDAW1_SX373 +MDBB0_SA1 +MDBB0_SA2 +MDBB0_SI1195 +MDBB0_SI1825 +MDBB0_SI565 +MDBB0_SX115 +MDBB0_SX205 +MDBB0_SX25 +MDBB0_SX295 +MDBB0_SX385 +MDLD0_SA1 +MDLD0_SA2 +MDLD0_SI1543 +MDLD0_SI2173 +MDLD0_SI913 +MDLD0_SX103 +MDLD0_SX13 +MDLD0_SX193 +MDLD0_SX283 +MDLD0_SX373 +MDLF0_SA1 +MDLF0_SA2 +MDLF0_SI1583 +MDLF0_SI2213 +MDLF0_SI953 +MDLF0_SX143 +MDLF0_SX233 +MDLF0_SX323 +MDLF0_SX413 +MDLF0_SX53 +MDLS0_SA1 +MDLS0_SA2 +MDLS0_SI1628 +MDLS0_SI2258 +MDLS0_SI998 +MDLS0_SX188 +MDLS0_SX278 +MDLS0_SX368 +MDLS0_SX8 +MDLS0_SX98 +MDRB0_SA1 +MDRB0_SA2 +MDRB0_SI1174 +MDRB0_SI2109 +MDRB0_SI544 +MDRB0_SX184 +MDRB0_SX274 +MDRB0_SX364 +MDRB0_SX4 +MDRB0_SX94 +MDRM0_SA1 +MDRM0_SA2 +MDRM0_SI1013 +MDRM0_SI1643 +MDRM0_SI2273 +MDRM0_SX113 +MDRM0_SX203 +MDRM0_SX23 +MDRM0_SX293 +MDRM0_SX383 +MDSC0_SA1 +MDSC0_SA2 +MDSC0_SI1038 +MDSC0_SI2298 +MDSC0_SI967 +MDSC0_SX138 +MDSC0_SX228 +MDSC0_SX318 +MDSC0_SX408 +MDSC0_SX48 +MDVC0_SA1 +MDVC0_SA2 +MDVC0_SI2174 +MDVC0_SI2196 +MDVC0_SI936 +MDVC0_SX126 +MDVC0_SX216 +MDVC0_SX306 +MDVC0_SX36 +MDVC0_SX396 +MDWA0_SA1 +MDWA0_SA2 +MDWA0_SI1146 +MDWA0_SI1445 +MDWA0_SI519 +MDWA0_SX185 +MDWA0_SX275 +MDWA0_SX365 +MDWA0_SX5 +MDWA0_SX95 +MDWK0_SA1 +MDWK0_SA2 +MDWK0_SI1540 +MDWK0_SI2170 +MDWK0_SI910 +MDWK0_SX10 +MDWK0_SX100 +MDWK0_SX190 +MDWK0_SX280 +MDWK0_SX370 +MERS0_SA1 +MERS0_SA2 +MERS0_SI1019 +MERS0_SI1649 +MERS0_SI497 +MERS0_SX119 +MERS0_SX209 +MERS0_SX29 +MERS0_SX299 +MERS0_SX389 +MESD0_SA1 +MESD0_SA2 +MESD0_SI1002 +MESD0_SI1632 +MESD0_SI2262 +MESD0_SX102 +MESD0_SX12 +MESD0_SX192 +MESD0_SX282 +MESD0_SX372 +MFGK0_SA1 +MFGK0_SA2 +MFGK0_SI1451 +MFGK0_SI1744 +MFGK0_SI484 +MFGK0_SX124 +MFGK0_SX214 +MFGK0_SX304 +MFGK0_SX34 +MFGK0_SX394 +MGJF0_SA1 +MGJF0_SA2 +MGJF0_SI1901 +MGJF0_SI641 +MGJF0_SI776 +MGJF0_SX101 +MGJF0_SX11 +MGJF0_SX191 +MGJF0_SX281 +MGJF0_SX371 +MGLB0_SA1 +MGLB0_SA2 +MGLB0_SI1534 +MGLB0_SI2164 +MGLB0_SI904 +MGLB0_SX184 +MGLB0_SX274 +MGLB0_SX364 +MGLB0_SX4 +MGLB0_SX94 +MGMM0_SA1 +MGMM0_SA2 +MGMM0_SI1129 +MGMM0_SI1759 +MGMM0_SI499 +MGMM0_SX139 +MGMM0_SX229 +MGMM0_SX319 +MGMM0_SX409 +MGMM0_SX49 +MGRT0_SA1 +MGRT0_SA2 +MGRT0_SI1450 +MGRT0_SI2080 +MGRT0_SI820 +MGRT0_SX10 +MGRT0_SX100 +MGRT0_SX190 +MGRT0_SX280 +MGRT0_SX370 +MGWT0_SA1 +MGWT0_SA2 +MGWT0_SI1539 +MGWT0_SI2169 +MGWT0_SI909 +MGWT0_SX189 +MGWT0_SX279 +MGWT0_SX369 +MGWT0_SX9 +MGWT0_SX99 +MHPG0_SA1 +MHPG0_SA2 +MHPG0_SI1090 +MHPG0_SI1720 +MHPG0_SI460 +MHPG0_SX10 +MHPG0_SX100 +MHPG0_SX190 +MHPG0_SX280 +MHPG0_SX370 +MJAR0_SA1 +MJAR0_SA2 +MJAR0_SI1988 +MJAR0_SI2247 +MJAR0_SI728 +MJAR0_SX188 +MJAR0_SX278 +MJAR0_SX368 +MJAR0_SX8 +MJAR0_SX98 +MJBR0_SA1 +MJBR0_SA2 +MJBR0_SI1001 +MJBR0_SI1631 +MJBR0_SI2261 +MJBR0_SX101 +MJBR0_SX11 +MJBR0_SX191 +MJBR0_SX281 +MJBR0_SX371 +MJDH0_SA1 +MJDH0_SA2 +MJDH0_SI1354 +MJDH0_SI1984 +MJDH0_SI724 +MJDH0_SX184 +MJDH0_SX274 +MJDH0_SX364 +MJDH0_SX4 +MJDH0_SX94 +MJDM1_SA1 +MJDM1_SA2 +MJDM1_SI1085 +MJDM1_SI1715 +MJDM1_SI455 +MJDM1_SX185 +MJDM1_SX275 +MJDM1_SX365 +MJDM1_SX5 +MJDM1_SX95 +MJES0_SA1 +MJES0_SA2 +MJES0_SI1384 +MJES0_SI2014 +MJES0_SI754 +MJES0_SX124 +MJES0_SX214 +MJES0_SX304 +MJES0_SX34 +MJES0_SX394 +MJFC0_SA1 +MJFC0_SA2 +MJFC0_SI1033 +MJFC0_SI1663 +MJFC0_SI2293 +MJFC0_SX133 +MJFC0_SX223 +MJFC0_SX313 +MJFC0_SX403 +MJFC0_SX43 +MJJG0_SA1 +MJJG0_SA2 +MJJG0_SI1003 +MJJG0_SI1633 +MJJG0_SI2263 +MJJG0_SX103 +MJJG0_SX13 +MJJG0_SX193 +MJJG0_SX283 +MJJG0_SX373 +MJLN0_SA1 +MJLN0_SA2 +MJLN0_SI1449 +MJLN0_SI2079 +MJLN0_SI819 +MJLN0_SX189 +MJLN0_SX279 +MJLN0_SX369 +MJLN0_SX9 +MJLN0_SX99 +MJMP0_SA1 +MJMP0_SA2 +MJMP0_SI1535 +MJMP0_SI1791 +MJMP0_SI905 +MJMP0_SX185 +MJMP0_SX275 +MJMP0_SX365 +MJMP0_SX5 +MJMP0_SX95 +MJRF0_SA1 +MJRF0_SA2 +MJRF0_SI1114 +MJRF0_SI2081 +MJRF0_SI821 +MJRF0_SX101 +MJRF0_SX11 +MJRF0_SX191 +MJRF0_SX281 +MJRF0_SX371 +MJSW0_SA1 +MJSW0_SA2 +MJSW0_SI1010 +MJSW0_SI1640 +MJSW0_SI2270 +MJSW0_SX110 +MJSW0_SX20 +MJSW0_SX200 +MJSW0_SX290 +MJSW0_SX380 +MJTC0_SA1 +MJTC0_SA2 +MJTC0_SI1460 +MJTC0_SI2090 +MJTC0_SI830 +MJTC0_SX110 +MJTC0_SX20 +MJTC0_SX200 +MJTC0_SX290 +MJTC0_SX380 +MJTH0_SA1 +MJTH0_SA2 +MJTH0_SI1296 +MJTH0_SI1926 +MJTH0_SI666 +MJTH0_SX126 +MJTH0_SX216 +MJTH0_SX306 +MJTH0_SX36 +MJTH0_SX396 +MJVW0_SA1 +MJVW0_SA2 +MJVW0_SI1733 +MJVW0_SI1758 +MJVW0_SI473 +MJVW0_SX113 +MJVW0_SX203 +MJVW0_SX23 +MJVW0_SX293 +MJVW0_SX383 +MKCH0_SA1 +MKCH0_SA2 +MKCH0_SI1378 +MKCH0_SI1425 +MKCH0_SI2008 +MKCH0_SX118 +MKCH0_SX208 +MKCH0_SX28 +MKCH0_SX298 +MKCH0_SX388 +MKCL0_SA1 +MKCL0_SA2 +MKCL0_SI1091 +MKCL0_SI1721 +MKCL0_SI461 +MKCL0_SX101 +MKCL0_SX11 +MKCL0_SX191 +MKCL0_SX281 +MKCL0_SX371 +MKDR0_SA1 +MKDR0_SA2 +MKDR0_SI1273 +MKDR0_SI1903 +MKDR0_SI643 +MKDR0_SX103 +MKDR0_SX13 +MKDR0_SX193 +MKDR0_SX283 +MKDR0_SX373 +MKJL0_SA1 +MKJL0_SA2 +MKJL0_SI1100 +MKJL0_SI1730 +MKJL0_SI470 +MKJL0_SX110 +MKJL0_SX20 +MKJL0_SX200 +MKJL0_SX290 +MKJL0_SX380 +MKLT0_SA1 +MKLT0_SA2 +MKLT0_SI1213 +MKLT0_SI1843 +MKLT0_SI583 +MKLT0_SX133 +MKLT0_SX223 +MKLT0_SX313 +MKLT0_SX403 +MKLT0_SX43 +MLIH0_SA1 +MLIH0_SA2 +MLIH0_SI1183 +MLIH0_SI1813 +MLIH0_SI553 +MLIH0_SX103 +MLIH0_SX13 +MLIH0_SX193 +MLIH0_SX283 +MLIH0_SX373 +MLJB0_SA1 +MLJB0_SA2 +MLJB0_SI1310 +MLJB0_SI1940 +MLJB0_SI680 +MLJB0_SX140 +MLJB0_SX230 +MLJB0_SX320 +MLJB0_SX410 +MLJB0_SX50 +MLLL0_SA1 +MLLL0_SA2 +MLLL0_SI1363 +MLLL0_SI1993 +MLLL0_SI733 +MLLL0_SX103 +MLLL0_SX13 +MLLL0_SX193 +MLLL0_SX283 +MLLL0_SX373 +MLNT0_SA1 +MLNT0_SA2 +MLNT0_SI1574 +MLNT0_SI1902 +MLNT0_SI642 +MLNT0_SX102 +MLNT0_SX12 +MLNT0_SX192 +MLNT0_SX282 +MLNT0_SX372 +MMAB0_SA1 +MMAB0_SA2 +MMAB0_SI1362 +MMAB0_SI1992 +MMAB0_SI732 +MMAB0_SX102 +MMAB0_SX12 +MMAB0_SX192 +MMAB0_SX282 +MMAB0_SX372 +MMDB1_SA1 +MMDB1_SA2 +MMDB1_SI1625 +MMDB1_SI2255 +MMDB1_SI995 +MMDB1_SX185 +MMDB1_SX275 +MMDB1_SX365 +MMDB1_SX5 +MMDB1_SX95 +MMDH0_SA1 +MMDH0_SA2 +MMDH0_SI1656 +MMDH0_SI2118 +MMDH0_SI2286 +MMDH0_SX126 +MMDH0_SX216 +MMDH0_SX306 +MMDH0_SX36 +MMDH0_SX396 +MMDM2_SA1 +MMDM2_SA2 +MMDM2_SI1452 +MMDM2_SI1555 +MMDM2_SI2082 +MMDM2_SX102 +MMDM2_SX12 +MMDM2_SX192 +MMDM2_SX282 +MMDM2_SX372 +MMJR0_SA1 +MMJR0_SA2 +MMJR0_SI1648 +MMJR0_SI2166 +MMJR0_SI2278 +MMJR0_SX118 +MMJR0_SX208 +MMJR0_SX28 +MMJR0_SX298 +MMJR0_SX388 +MMWH0_SA1 +MMWH0_SA2 +MMWH0_SI1089 +MMWH0_SI1301 +MMWH0_SI459 +MMWH0_SX189 +MMWH0_SX279 +MMWH0_SX369 +MMWH0_SX9 +MMWH0_SX99 +MNJM0_SA1 +MNJM0_SA2 +MNJM0_SI1580 +MNJM0_SI2210 +MNJM0_SI950 +MNJM0_SX140 +MNJM0_SX230 +MNJM0_SX320 +MNJM0_SX410 +MNJM0_SX50 +MNLS0_SA1 +MNLS0_SA2 +MNLS0_SI1483 +MNLS0_SI1610 +MNLS0_SI853 +MNLS0_SX133 +MNLS0_SX223 +MNLS0_SX313 +MNLS0_SX403 +MNLS0_SX43 +MPAB0_SA1 +MPAB0_SA2 +MPAB0_SI1103 +MPAB0_SI1128 +MPAB0_SI498 +MPAB0_SX138 +MPAB0_SX228 +MPAB0_SX318 +MPAB0_SX408 +MPAB0_SX48 +MPAM0_SA1 +MPAM0_SA2 +MPAM0_SI1189 +MPAM0_SI1819 +MPAM0_SI1961 +MPAM0_SX109 +MPAM0_SX19 +MPAM0_SX199 +MPAM0_SX289 +MPAM0_SX379 +MPAM1_SA1 +MPAM1_SA2 +MPAM1_SI1029 +MPAM1_SI1836 +MPAM1_SI576 +MPAM1_SX126 +MPAM1_SX216 +MPAM1_SX306 +MPAM1_SX36 +MPAM1_SX396 +MPCS0_SA1 +MPCS0_SA2 +MPCS0_SI1359 +MPCS0_SI1989 +MPCS0_SI729 +MPCS0_SX189 +MPCS0_SX279 +MPCS0_SX369 +MPCS0_SX9 +MPCS0_SX99 +MPDF0_SA1 +MPDF0_SA2 +MPDF0_SI1542 +MPDF0_SI2172 +MPDF0_SI912 +MPDF0_SX102 +MPDF0_SX12 +MPDF0_SX192 +MPDF0_SX282 +MPDF0_SX372 +MPGL0_SA1 +MPGL0_SA2 +MPGL0_SI1099 +MPGL0_SI1729 +MPGL0_SI469 +MPGL0_SX109 +MPGL0_SX19 +MPGL0_SX199 +MPGL0_SX289 +MPGL0_SX379 +MPLB0_SA1 +MPLB0_SA2 +MPLB0_SI1394 +MPLB0_SI2024 +MPLB0_SI764 +MPLB0_SX134 +MPLB0_SX224 +MPLB0_SX314 +MPLB0_SX404 +MPLB0_SX44 +MPWM0_SA1 +MPWM0_SA2 +MPWM0_SI1127 +MPWM0_SI1757 +MPWM0_SI2279 +MPWM0_SX137 +MPWM0_SX227 +MPWM0_SX317 +MPWM0_SX407 +MPWM0_SX47 +MRCS0_SA1 +MRCS0_SA2 +MRCS0_SI1223 +MRCS0_SI1853 +MRCS0_SI593 +MRCS0_SX143 +MRCS0_SX233 +MRCS0_SX323 +MRCS0_SX413 +MRCS0_SX53 +MRCZ0_SA1 +MRCZ0_SA2 +MRCZ0_SI1541 +MRCZ0_SI2171 +MRCZ0_SI911 +MRCZ0_SX101 +MRCZ0_SX11 +MRCZ0_SX191 +MRCZ0_SX281 +MRCZ0_SX371 +MREB0_SA1 +MREB0_SA2 +MREB0_SI1375 +MREB0_SI2005 +MREB0_SI745 +MREB0_SX115 +MREB0_SX205 +MREB0_SX25 +MREB0_SX295 +MREB0_SX385 +MRES0_SA1 +MRES0_SA2 +MRES0_SI1217 +MRES0_SI1847 +MRES0_SI587 +MRES0_SX137 +MRES0_SX227 +MRES0_SX317 +MRES0_SX407 +MRES0_SX47 +MRGG0_SA1 +MRGG0_SA2 +MRGG0_SI1199 +MRGG0_SI1829 +MRGG0_SI569 +MRGG0_SX119 +MRGG0_SX209 +MRGG0_SX29 +MRGG0_SX299 +MRGG0_SX389 +MRJM3_SA1 +MRJM3_SA2 +MRJM3_SI1448 +MRJM3_SI1809 +MRJM3_SI2078 +MRJM3_SX188 +MRJM3_SX278 +MRJM3_SX368 +MRJM3_SX8 +MRJM3_SX98 +MRJM4_SA1 +MRJM4_SA2 +MRJM4_SI1489 +MRJM4_SI2119 +MRJM4_SI859 +MRJM4_SX139 +MRJM4_SX229 +MRJM4_SX319 +MRJM4_SX409 +MRJM4_SX49 +MRJO0_SA1 +MRJO0_SA2 +MRJO0_SI1364 +MRJO0_SI1624 +MRJO0_SI734 +MRJO0_SX104 +MRJO0_SX14 +MRJO0_SX194 +MRJO0_SX284 +MRJO0_SX374 +MRJR0_SA1 +MRJR0_SA2 +MRJR0_SI1182 +MRJR0_SI1812 +MRJR0_SI2313 +MRJR0_SX102 +MRJR0_SX12 +MRJR0_SX192 +MRJR0_SX282 +MRJR0_SX372 +MRJS0_SA1 +MRJS0_SA2 +MRJS0_SI1444 +MRJS0_SI1523 +MRJS0_SI2074 +MRJS0_SX184 +MRJS0_SX274 +MRJS0_SX364 +MRJS0_SX4 +MRJS0_SX94 +MRKO0_SA1 +MRKO0_SA2 +MRKO0_SI1397 +MRKO0_SI2027 +MRKO0_SI767 +MRKO0_SX137 +MRKO0_SX227 +MRKO0_SX317 +MRKO0_SX407 +MRKO0_SX47 +MRMS1_SA1 +MRMS1_SA2 +MRMS1_SI1487 +MRMS1_SI2117 +MRMS1_SI857 +MRMS1_SX137 +MRMS1_SX227 +MRMS1_SX317 +MRMS1_SX407 +MRMS1_SX47 +MROA0_SA1 +MROA0_SA2 +MROA0_SI1307 +MROA0_SI1970 +MROA0_SI677 +MROA0_SX137 +MROA0_SX227 +MROA0_SX317 +MROA0_SX407 +MROA0_SX47 +MRPC0_SA1 +MRPC0_SA2 +MRPC0_SI1753 +MRPC0_SI493 +MRPC0_SI933 +MRPC0_SX133 +MRPC0_SX223 +MRPC0_SX313 +MRPC0_SX403 +MRPC0_SX43 +MRPP0_SA1 +MRPP0_SA2 +MRPP0_SI1184 +MRPP0_SI1814 +MRPP0_SI554 +MRPP0_SX104 +MRPP0_SX14 +MRPP0_SX194 +MRPP0_SX284 +MRPP0_SX374 +MRRK0_SA1 +MRRK0_SA2 +MRRK0_SI1288 +MRRK0_SI1716 +MRRK0_SI1918 +MRRK0_SX118 +MRRK0_SX208 +MRRK0_SX28 +MRRK0_SX298 +MRRK0_SX388 +MRTK0_SA1 +MRTK0_SA2 +MRTK0_SI1093 +MRTK0_SI1723 +MRTK0_SI1750 +MRTK0_SX103 +MRTK0_SX13 +MRTK0_SX193 +MRTK0_SX283 +MRTK0_SX373 +MRWS1_SA1 +MRWS1_SA2 +MRWS1_SI1130 +MRWS1_SI1496 +MRWS1_SI500 +MRWS1_SX140 +MRWS1_SX230 +MRWS1_SX320 +MRWS1_SX410 +MRWS1_SX50 +MSFH1_SA1 +MSFH1_SA2 +MSFH1_SI1270 +MSFH1_SI1900 +MSFH1_SI640 +MSFH1_SX10 +MSFH1_SX100 +MSFH1_SX190 +MSFH1_SX280 +MSFH1_SX370 +MSJS1_SA1 +MSJS1_SA2 +MSJS1_SI1899 +MSJS1_SI639 +MSJS1_SI869 +MSJS1_SX189 +MSJS1_SX279 +MSJS1_SX369 +MSJS1_SX9 +MSJS1_SX99 +MSLB0_SA1 +MSLB0_SA2 +MSLB0_SI1193 +MSLB0_SI1823 +MSLB0_SI563 +MSLB0_SX113 +MSLB0_SX203 +MSLB0_SX23 +MSLB0_SX293 +MSLB0_SX383 +MSTK0_SA1 +MSTK0_SA2 +MSTK0_SI1024 +MSTK0_SI2222 +MSTK0_SI2284 +MSTK0_SX124 +MSTK0_SX214 +MSTK0_SX304 +MSTK0_SX34 +MSTK0_SX394 +MTAA0_SA1 +MTAA0_SA2 +MTAA0_SI1285 +MTAA0_SI1915 +MTAA0_SI596 +MTAA0_SX115 +MTAA0_SX205 +MTAA0_SX25 +MTAA0_SX295 +MTAA0_SX385 +MTAS1_SA1 +MTAS1_SA2 +MTAS1_SI1473 +MTAS1_SI2098 +MTAS1_SI838 +MTAS1_SX118 +MTAS1_SX208 +MTAS1_SX28 +MTAS1_SX298 +MTAS1_SX388 +MTDT0_SA1 +MTDT0_SA2 +MTDT0_SI1994 +MTDT0_SI2254 +MTDT0_SI994 +MTDT0_SX184 +MTDT0_SX274 +MTDT0_SX364 +MTDT0_SX4 +MTDT0_SX94 +MTEB0_SA1 +MTEB0_SA2 +MTEB0_SI1133 +MTEB0_SI2064 +MTEB0_SI503 +MTEB0_SX143 +MTEB0_SX233 +MTEB0_SX323 +MTEB0_SX413 +MTEB0_SX53 +MTHC0_SA1 +MTHC0_SA2 +MTHC0_SI1015 +MTHC0_SI1645 +MTHC0_SI2275 +MTHC0_SX115 +MTHC0_SX205 +MTHC0_SX25 +MTHC0_SX295 +MTHC0_SX385 +MTLS0_SA1 +MTLS0_SA2 +MTLS0_SI1370 +MTLS0_SI2000 +MTLS0_SI740 +MTLS0_SX110 +MTLS0_SX20 +MTLS0_SX200 +MTLS0_SX290 +MTLS0_SX380 +MTMR0_SA1 +MTMR0_SA2 +MTMR0_SI1303 +MTMR0_SI1933 +MTMR0_SI673 +MTMR0_SX133 +MTMR0_SX223 +MTMR0_SX313 +MTMR0_SX403 +MTMR0_SX43 +MTWH0_SA1 +MTWH0_SA2 +MTWH0_SI1190 +MTWH0_SI1629 +MTWH0_SI1820 +MTWH0_SX110 +MTWH0_SX20 +MTWH0_SX200 +MTWH0_SX290 +MTWH0_SX380 +MWBT0_SA1 +MWBT0_SA2 +MWBT0_SI1553 +MWBT0_SI2183 +MWBT0_SI923 +MWBT0_SX113 +MWBT0_SX203 +MWBT0_SX23 +MWBT0_SX293 +MWBT0_SX383 +MWEW0_SA1 +MWEW0_SA2 +MWEW0_SI1361 +MWEW0_SI1991 +MWEW0_SI731 +MWEW0_SX101 +MWEW0_SX11 +MWEW0_SX191 +MWEW0_SX281 +MWEW0_SX371 +MWJG0_SA1 +MWJG0_SA2 +MWJG0_SI1124 +MWJG0_SI1754 +MWJG0_SI494 +MWJG0_SX134 +MWJG0_SX224 +MWJG0_SX314 +MWJG0_SX404 +MWJG0_SX44 +MWVW0_SA1 +MWVW0_SA2 +MWVW0_SI1476 +MWVW0_SI2106 +MWVW0_SI846 +MWVW0_SX126 +MWVW0_SX216 +MWVW0_SX306 +MWVW0_SX36 +MWVW0_SX396 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/train.uid b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/train.uid new file mode 100644 index 0000000..35b02e7 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/train.uid @@ -0,0 +1,3000 @@ +FAEM0_SA1 +FAEM0_SA2 +FAEM0_SI2022 +FAEM0_SX132 +FAEM0_SX222 +FAEM0_SX312 +FAEM0_SX402 +FAJW0_SA2 +FAJW0_SI1893 +FAJW0_SX183 +FAJW0_SX273 +FAJW0_SX363 +FALK0_SA1 +FALK0_SA2 +FALK0_SI1086 +FALK0_SI456 +FALK0_SX276 +FALK0_SX366 +FALK0_SX96 +FALR0_SA1 +FALR0_SA2 +FALR0_SI1955 +FALR0_SI695 +FALR0_SX155 +FALR0_SX245 +FALR0_SX425 +FALR0_SX65 +FAPB0_SA1 +FAPB0_SA2 +FAPB0_SI1693 +FAPB0_SX163 +FAPB0_SX253 +FAPB0_SX343 +FAPB0_SX73 +FBAS0_SA2 +FBAS0_SI1387 +FBAS0_SX127 +FBAS0_SX307 +FBAS0_SX37 +FBAS0_SX397 +FBCG1_SA2 +FBCG1_SI1612 +FBCG1_SI2242 +FBCG1_SI982 +FBCG1_SX262 +FBCG1_SX82 +FBCH0_SA1 +FBCH0_SA2 +FBCH0_SI1586 +FBCH0_SI956 +FBCH0_SX146 +FBCH0_SX326 +FBCH0_SX56 +FBJL0_SA1 +FBJL0_SA2 +FBJL0_SI1552 +FBJL0_SI2182 +FBJL0_SX112 +FBJL0_SX202 +FBJL0_SX22 +FBJL0_SX292 +FBJL0_SX382 +FBLV0_SA2 +FBLV0_SI2318 +FBLV0_SX158 +FBLV0_SX248 +FBLV0_SX428 +FBMH0_SA2 +FBMH0_SI1766 +FBMH0_SX146 +FBMH0_SX236 +FBMH0_SX326 +FBMH0_SX416 +FBMH0_SX56 +FBMJ0_SA2 +FBMJ0_SX156 +FBMJ0_SX246 +FBMJ0_SX426 +FBMJ0_SX66 +FCAG0_SA2 +FCAG0_SI1503 +FCAG0_SI1641 +FCAG0_SI2133 +FCAG0_SX333 +FCAG0_SX423 +FCAG0_SX63 +FCAJ0_SA1 +FCAJ0_SA2 +FCAJ0_SI1804 +FCAJ0_SI849 +FCAJ0_SX129 +FCAJ0_SX219 +FCAJ0_SX39 +FCAJ0_SX399 +FCDR1_SA1 +FCDR1_SA2 +FCDR1_SX16 +FCDR1_SX376 +FCEG0_SA1 +FCEG0_SI1248 +FCEG0_SI1878 +FCEG0_SI618 +FCEG0_SX168 +FCEG0_SX258 +FCEG0_SX348 +FCEG0_SX438 +FCEG0_SX78 +FCJF0_SA2 +FCJF0_SI1027 +FCJF0_SI1657 +FCJF0_SI648 +FCJF0_SX217 +FCJF0_SX307 +FCJF0_SX37 +FCJF0_SX397 +FCJS0_SA1 +FCJS0_SA2 +FCJS0_SI977 +FCJS0_SX167 +FCJS0_SX347 +FCJS0_SX437 +FCJS0_SX77 +FCKE0_SA1 +FCKE0_SI1111 +FCKE0_SX211 +FCKE0_SX301 +FCKE0_SX31 +FCKE0_SX391 +FCLT0_SA1 +FCLT0_SA2 +FCLT0_SI1438 +FCLT0_SX178 +FCLT0_SX268 +FCLT0_SX358 +FCMG0_SA1 +FCMG0_SI1242 +FCMG0_SX162 +FCMG0_SX252 +FCMG0_SX342 +FCMM0_SI1083 +FCMM0_SI453 +FCMM0_SX273 +FCMM0_SX363 +FCMM0_SX93 +FCRZ0_SA1 +FCRZ0_SA2 +FCRZ0_SI1913 +FCRZ0_SI793 +FCRZ0_SX163 +FCRZ0_SX253 +FCRZ0_SX343 +FCRZ0_SX73 +FCYL0_SA2 +FCYL0_SI1297 +FCYL0_SI1927 +FCYL0_SX127 +FCYL0_SX217 +FCYL0_SX397 +FDAS1_SA1 +FDAS1_SA2 +FDAS1_SX111 +FDAS1_SX21 +FDAS1_SX291 +FDAW0_SA1 +FDAW0_SA2 +FDAW0_SX146 +FDAW0_SX236 +FDAW0_SX326 +FDAW0_SX416 +FDAW0_SX56 +FDFB0_SI1318 +FDFB0_SI1948 +FDFB0_SX148 +FDFB0_SX238 +FDFB0_SX328 +FDFB0_SX418 +FDJH0_SA1 +FDJH0_SA2 +FDJH0_SI1565 +FDJH0_SI2195 +FDJH0_SX125 +FDJH0_SX215 +FDJH0_SX35 +FDJH0_SX395 +FDKN0_SA1 +FDKN0_SA2 +FDKN0_SI1081 +FDKN0_SI1711 +FDKN0_SX271 +FDKN0_SX361 +FDKN0_SX91 +FDML0_SA1 +FDML0_SI1149 +FDML0_SI1779 +FDML0_SI2075 +FDML0_SX339 +FDML0_SX69 +FDMY0_SI1197 +FDMY0_SX117 +FDMY0_SX207 +FDMY0_SX297 +FDNC0_SA1 +FDNC0_SA2 +FDNC0_SI2287 +FDNC0_SX108 +FDNC0_SX18 +FDNC0_SX378 +FDTD0_SA2 +FDTD0_SI1561 +FDTD0_SI2191 +FDTD0_SI931 +FDTD0_SX121 +FDTD0_SX301 +FDTD0_SX391 +FDXW0_SA2 +FDXW0_SI1511 +FDXW0_SI2141 +FDXW0_SI881 +FDXW0_SX161 +FDXW0_SX431 +FEAC0_SA1 +FEAC0_SA2 +FEAC0_SI1245 +FEAC0_SI1875 +FEAC0_SX255 +FEAC0_SX345 +FEAC0_SX435 +FEAR0_SA1 +FEAR0_SA2 +FEAR0_SI1252 +FEAR0_SI1882 +FEAR0_SX172 +FEAR0_SX262 +FEAR0_SX442 +FEAR0_SX82 +FECD0_SA2 +FECD0_SI2048 +FECD0_SX158 +FECD0_SX248 +FECD0_SX338 +FECD0_SX428 +FEEH0_SA2 +FEEH0_SI1112 +FEEH0_SX212 +FEEH0_SX302 +FEEH0_SX32 +FEEH0_SX392 +FEME0_SA2 +FEME0_SI1505 +FEME0_SI2135 +FEME0_SX245 +FEME0_SX425 +FETB0_SA2 +FETB0_SI1778 +FETB0_SI518 +FETB0_SX248 +FETB0_SX338 +FETB0_SX428 +FETB0_SX68 +FEXM0_SA2 +FEXM0_SI1731 +FEXM0_SX111 +FEXM0_SX201 +FEXM0_SX291 +FEXM0_SX381 +FGCS0_SA1 +FGCS0_SA2 +FGCS0_SI1486 +FGCS0_SI2116 +FGCS0_SI856 +FGCS0_SX46 +FGDP0_SA2 +FGDP0_SI1618 +FGDP0_SI2248 +FGDP0_SX178 +FGDP0_SX268 +FGDP0_SX358 +FGDP0_SX448 +FGMB0_SA1 +FGMB0_SA2 +FGMB0_SI515 +FGMB0_SX155 +FGMB0_SX425 +FGMB0_SX65 +FGRW0_SA2 +FGRW0_SI1782 +FGRW0_SI1990 +FGRW0_SX252 +FGRW0_SX342 +FGRW0_SX72 +FHLM0_SA1 +FHLM0_SA2 +FHLM0_SI1560 +FHLM0_SI2190 +FHLM0_SI930 +FHLM0_SX210 +FHLM0_SX300 +FHXS0_SI2335 +FHXS0_SX265 +FHXS0_SX355 +FHXS0_SX85 +FJDM2_SI1582 +FJDM2_SI1964 +FJDM2_SI2212 +FJDM2_SX322 +FJDM2_SX412 +FJEN0_SA2 +FJEN0_SI1047 +FJEN0_SI1677 +FJEN0_SI2307 +FJEN0_SX147 +FJEN0_SX237 +FJEN0_SX57 +FJHK0_SA1 +FJHK0_SA2 +FJHK0_SI1022 +FJHK0_SI1652 +FJHK0_SX122 +FJHK0_SX212 +FJHK0_SX32 +FJHK0_SX392 +FJKL0_SA1 +FJKL0_SA2 +FJKL0_SI1562 +FJKL0_SI2192 +FJKL0_SX122 +FJKL0_SX302 +FJKL0_SX32 +FJLG0_SA1 +FJLG0_SA2 +FJLG0_SI1506 +FJLG0_SX179 +FJLG0_SX269 +FJLG0_SX359 +FJLG0_SX449 +FJLG0_SX89 +FJLR0_SA2 +FJLR0_SI1861 +FJLR0_SI601 +FJLR0_SX151 +FJLR0_SX241 +FJLR0_SX331 +FJLR0_SX421 +FJLR0_SX61 +FJRB0_SA1 +FJRB0_SA2 +FJRB0_SI1302 +FJRB0_SI1932 +FJRB0_SI672 +FJRB0_SX132 +FJRB0_SX222 +FJRB0_SX312 +FJRB0_SX42 +FJRP1_SA2 +FJRP1_SI802 +FJRP1_SX172 +FJRP1_SX442 +FJSK0_SA2 +FJSK0_SI1682 +FJSK0_SI2312 +FJSK0_SX152 +FJSK0_SX242 +FJSK0_SX332 +FJSK0_SX422 +FJSK0_SX62 +FJSP0_SA1 +FJSP0_SA2 +FJSP0_SI1763 +FJSP0_SI804 +FJSP0_SX174 +FJSP0_SX84 +FJWB1_SA2 +FJWB1_SI2055 +FJWB1_SI795 +FJWB1_SX165 +FJWB1_SX255 +FJWB1_SX75 +FJXM0_SA2 +FJXM0_SI1211 +FJXM0_SI1971 +FJXM0_SX131 +FJXM0_SX221 +FJXP0_SA2 +FJXP0_SI492 +FJXP0_SX222 +FJXP0_SX312 +FJXP0_SX402 +FJXP0_SX42 +FKAA0_SA2 +FKAA0_SI1208 +FKAA0_SI1838 +FKAA0_SI578 +FKAA0_SX218 +FKAA0_SX308 +FKAA0_SX38 +FKDE0_SA2 +FKDE0_SI2221 +FKDE0_SX331 +FKDW0_SA1 +FKDW0_SA2 +FKDW0_SI577 +FKDW0_SX127 +FKDW0_SX217 +FKDW0_SX307 +FKDW0_SX37 +FKFB0_SA1 +FKFB0_SI2238 +FKFB0_SI978 +FKFB0_SX168 +FKFB0_SX258 +FKKH0_SI660 +FKKH0_SX210 +FKKH0_SX30 +FKKH0_SX300 +FKLC0_SA1 +FKLC0_SA2 +FKLC0_SI1615 +FKLC0_SI2245 +FKLC0_SX265 +FKLC0_SX445 +FKLC0_SX85 +FKLC1_SA1 +FKLC1_SA2 +FKLC1_SI1678 +FKLC1_SX148 +FKLC1_SX58 +FKLH0_SA1 +FKLH0_SI1887 +FKLH0_SI627 +FKLH0_SX267 +FKLH0_SX357 +FKLH0_SX447 +FKLH0_SX87 +FKSR0_SI1117 +FKSR0_SX161 +FKSR0_SX37 +FKSR0_SX397 +FLAC0_SA1 +FLAC0_SA2 +FLAC0_SI2161 +FLAC0_SI901 +FLAC0_SX181 +FLAC0_SX271 +FLAC0_SX361 +FLAC0_SX91 +FLAG0_SA1 +FLAG0_SI2094 +FLAG0_SX294 +FLEH0_SA1 +FLEH0_SA2 +FLEH0_SX151 +FLEH0_SX241 +FLEH0_SX421 +FLEH0_SX61 +FLET0_SA2 +FLET0_SI1137 +FLET0_SI1767 +FLET0_SX147 +FLET0_SX237 +FLET0_SX277 +FLET0_SX417 +FLET0_SX57 +FLHD0_SA1 +FLHD0_SA2 +FLHD0_SI1344 +FLHD0_SI1974 +FLHD0_SX174 +FLHD0_SX264 +FLHD0_SX444 +FLHD0_SX84 +FLJA0_SA2 +FLJA0_SI1708 +FLJA0_SX268 +FLJA0_SX358 +FLJA0_SX448 +FLJA0_SX88 +FLJD0_SA1 +FLJD0_SA2 +FLJD0_SI2146 +FLJD0_SX166 +FLJD0_SX256 +FLJD0_SX346 +FLJD0_SX436 +FLJG0_SA1 +FLJG0_SI1611 +FLJG0_SI2241 +FLJG0_SX261 +FLJG0_SX441 +FLJG0_SX81 +FLKM0_SI1880 +FLKM0_SX116 +FLMA0_SA2 +FLMA0_SI1243 +FLMA0_SI1873 +FLMA0_SX163 +FLMA0_SX253 +FLMA0_SX343 +FLMC0_SA1 +FLMC0_SA2 +FLMC0_SI2002 +FLMC0_SI742 +FLMC0_SX112 +FLMC0_SX292 +FLMC0_SX336 +FLMC0_SX382 +FLMK0_SA2 +FLMK0_SI2295 +FLMK0_SX135 +FLMK0_SX225 +FLMK0_SX45 +FLOD0_SA1 +FLOD0_SA2 +FLOD0_SI1287 +FLOD0_SI657 +FLOD0_SX207 +FLOD0_SX387 +FLTM0_SA2 +FLTM0_SI1700 +FLTM0_SX260 +FLTM0_SX80 +FMAH1_SA1 +FMAH1_SI1509 +FMAH1_SI2139 +FMAH1_SX249 +FMAH1_SX339 +FMAH1_SX429 +FMAH1_SX69 +FMBG0_SA1 +FMBG0_SI1790 +FMBG0_SX260 +FMBG0_SX3 +FMBG0_SX350 +FMBG0_SX440 +FMBG0_SX80 +FMEM0_SA2 +FMEM0_SI1377 +FMEM0_SI2007 +FMEM0_SX117 +FMEM0_SX207 +FMEM0_SX297 +FMJB0_SA1 +FMJB0_SA2 +FMJB0_SI1807 +FMJB0_SX187 +FMJB0_SX277 +FMJB0_SX367 +FMJB0_SX7 +FMJF0_SA1 +FMJF0_SI1254 +FMJF0_SI1884 +FMJF0_SX264 +FMJF0_SX354 +FMJF0_SX444 +FMJU0_SA1 +FMJU0_SA2 +FMJU0_SI2019 +FMJU0_SI759 +FMJU0_SX129 +FMJU0_SX219 +FMJU0_SX39 +FMKC0_SA1 +FMKC0_SA2 +FMKC0_SI1072 +FMKC0_SX172 +FMKC0_SX262 +FMKC0_SX352 +FMKF0_SA1 +FMKF0_SA2 +FMKF0_SI1536 +FMKF0_SI906 +FMKF0_SX276 +FMKF0_SX366 +FMKF0_SX6 +FMKF0_SX96 +FMMH0_SA1 +FMMH0_SA2 +FMMH0_SI1537 +FMMH0_SI2167 +FMMH0_SI907 +FMMH0_SX187 +FMMH0_SX367 +FMMH0_SX420 +FMMH0_SX7 +FMMH0_SX97 +FMPG0_SI1602 +FMPG0_SI2232 +FMPG0_SX252 +FMPG0_SX72 +FNKL0_SA1 +FNKL0_SA2 +FNKL0_SI2152 +FNKL0_SX172 +FNKL0_SX196 +FNKL0_SX262 +FNKL0_SX442 +FNKL0_SX82 +FNTB0_SA1 +FNTB0_SA2 +FNTB0_SX123 +FNTB0_SX213 +FNTB0_SX33 +FNTB0_SX393 +FPAB1_SA2 +FPAB1_SX121 +FPAB1_SX301 +FPAB1_SX31 +FPAB1_SX391 +FPAC0_SA1 +FPAC0_SI2011 +FPAC0_SX121 +FPAC0_SX211 +FPAC0_SX301 +FPAC0_SX31 +FPAC0_SX391 +FPAD0_SA1 +FPAD0_SI1346 +FPAD0_SI1976 +FPAD0_SX266 +FPAD0_SX446 +FPAF0_SI1684 +FPAF0_SI2314 +FPAF0_SX244 +FPAF0_SX334 +FPAF0_SX424 +FPAF0_SX64 +FPAZ0_SI1593 +FPAZ0_SX153 +FPAZ0_SX27 +FPAZ0_SX423 +FPAZ0_SX63 +FPJF0_SA2 +FPJF0_SI1046 +FPJF0_SI1676 +FPJF0_SX236 +FPJF0_SX326 +FPLS0_SA1 +FPLS0_SA2 +FPLS0_SI2220 +FPLS0_SX150 +FPLS0_SX240 +FPLS0_SX3 +FPLS0_SX60 +FPMY0_SA2 +FPMY0_SI1783 +FPMY0_SX163 +FPMY0_SX196 +FPMY0_SX253 +FPMY0_SX73 +FREH0_SI1315 +FREH0_SI685 +FREH0_SX145 +FREH0_SX235 +FREH0_SX325 +FREH0_SX55 +FRJB0_SA1 +FRJB0_SA2 +FRJB0_SI1427 +FRJB0_SI1470 +FRJB0_SI1794 +FRJB0_SX167 +FRJB0_SX257 +FRJB0_SX437 +FRJB0_SX77 +FRLL0_SA1 +FRLL0_SA2 +FRLL0_SI1514 +FRLL0_SI884 +FRLL0_SX164 +FRLL0_SX254 +FRLL0_SX344 +FRLL0_SX74 +FSAG0_SA2 +FSAG0_SI1953 +FSAG0_SI693 +FSAG0_SX63 +FSAH0_SI1244 +FSAH0_SI1874 +FSAH0_SX344 +FSAH0_SX74 +FSAK0_SA1 +FSAK0_SA2 +FSAK0_SI1930 +FSAK0_SI670 +FSAK0_SX130 +FSAK0_SX220 +FSAK0_SX310 +FSAK0_SX40 +FSAK0_SX400 +FSBK0_SA1 +FSBK0_SI1699 +FSBK0_SI2329 +FSBK0_SX259 +FSBK0_SX439 +FSBK0_SX79 +FSCN0_SI1886 +FSCN0_SX356 +FSDC0_SA1 +FSDC0_SI1942 +FSDC0_SI2234 +FSDC0_SX232 +FSDC0_SX412 +FSDJ0_SA1 +FSDJ0_SA2 +FSDJ0_SI1745 +FSDJ0_SX125 +FSDJ0_SX35 +FSGF0_SA1 +FSGF0_SA2 +FSGF0_SI1557 +FSGF0_SX207 +FSGF0_SX27 +FSGF0_SX297 +FSGF0_SX387 +FSJG0_SI1570 +FSJG0_SI2200 +FSJG0_SX310 +FSJK1_SA1 +FSJK1_SI1025 +FSJK1_SI2285 +FSJK1_SI696 +FSJK1_SX215 +FSJK1_SX305 +FSJK1_SX395 +FSJS0_SA2 +FSJS0_SI1171 +FSJS0_SI1801 +FSJS0_SI541 +FSJS0_SX271 +FSJS0_SX361 +FSJS0_SX91 +FSJW0_SA1 +FSJW0_SA2 +FSJW0_SI703 +FSJW0_SX163 +FSJW0_SX253 +FSJW0_SX343 +FSJW0_SX73 +FSKC0_SA1 +FSKC0_SA2 +FSKC0_SI2046 +FSKC0_SX156 +FSKC0_SX336 +FSKC0_SX426 +FSKC0_SX66 +FSKL0_SA1 +FSKL0_SA2 +FSKL0_SI2159 +FSKL0_SI899 +FSKL0_SX179 +FSKL0_SX269 +FSKL0_SX359 +FSKL0_SX89 +FSKP0_SA1 +FSKP0_SI1728 +FSKP0_SI468 +FSKP0_SX108 +FSKP0_SX18 +FSKP0_SX198 +FSKP0_SX288 +FSKP0_SX378 +FSLS0_SA1 +FSLS0_SA2 +FSLS0_SI1056 +FSLS0_SI1686 +FSLS0_SI2316 +FSLS0_SX202 +FSLS0_SX246 +FSLS0_SX66 +FSMA0_SA1 +FSMA0_SI1621 +FSMA0_SI2251 +FSMA0_SX271 +FSMA0_SX361 +FSMA0_SX91 +FSMM0_SA1 +FSMM0_SA2 +FSMM0_SI1314 +FSMM0_SI1944 +FSMM0_SI684 +FSMM0_SX414 +FSMM0_SX54 +FSMS1_SA1 +FSMS1_SA2 +FSMS1_SI1504 +FSMS1_SI2134 +FSMS1_SI874 +FSMS1_SX154 +FSMS1_SX334 +FSMS1_SX64 +FSPM0_SA1 +FSPM0_SI1871 +FSPM0_SI611 +FSPM0_SX341 +FSPM0_SX431 +FSRH0_SA1 +FSRH0_SA2 +FSRH0_SI1719 +FSRH0_SX131 +FSRH0_SX41 +FSSB0_SA1 +FSSB0_SA2 +FSSB0_SI1082 +FSSB0_SI2342 +FSSB0_SX182 +FSSB0_SX272 +FSSB0_SX452 +FSSB0_SX92 +FTAJ0_SA1 +FTAJ0_SA2 +FTAJ0_SI1329 +FTAJ0_SI474 +FTAJ0_SX339 +FTAJ0_SX69 +FTBR0_SA1 +FTBR0_SA2 +FTBR0_SI2181 +FTBR0_SX111 +FTBR0_SX201 +FTBR0_SX291 +FTBR0_SX381 +FTBW0_SA2 +FTBW0_SI1345 +FTBW0_SI1975 +FTBW0_SX265 +FTBW0_SX355 +FTBW0_SX445 +FTBW0_SX85 +FTLG0_SA1 +FTLG0_SA2 +FTLG0_SI840 +FTLG0_SX123 +FTLG0_SX213 +FTLG0_SX303 +FTLG0_SX33 +FTLG0_SX393 +FTMG0_SA1 +FTMG0_SA2 +FTMG0_SX182 +FTMG0_SX272 +FTMG0_SX362 +FTMG0_SX92 +FVFB0_SA1 +FVFB0_SI1032 +FVFB0_SI2292 +FVFB0_SX222 +FVFB0_SX312 +FVFB0_SX402 +FVKB0_SA2 +FVKB0_SI1159 +FVKB0_SI1789 +FVKB0_SI529 +FVKB0_SX169 +FVKB0_SX259 +FVKB0_SX439 +FVKB0_SX79 +FVMH0_SA1 +FVMH0_SI2096 +FVMH0_SX206 +FVMH0_SX296 +FVMH0_SX386 +MABC0_SA1 +MABC0_SA2 +MABC0_SX151 +MABC0_SX241 +MABC0_SX331 +MABC0_SX421 +MABC0_SX61 +MADC0_SA1 +MADC0_SA2 +MADC0_SI1997 +MADC0_SX17 +MADC0_SX197 +MADC0_SX287 +MADD0_SA1 +MADD0_SI1798 +MADD0_SI538 +MADD0_SX358 +MADD0_SX448 +MAEB0_SA1 +MAEB0_SA2 +MAEB0_SI2250 +MAEB0_SI990 +MAEB0_SX180 +MAEB0_SX270 +MAEB0_SX360 +MAEB0_SX90 +MAEO0_SA2 +MAEO0_SI1655 +MAEO0_SI1956 +MAEO0_SX156 +MAEO0_SX246 +MAEO0_SX336 +MAEO0_SX426 +MAEO0_SX66 +MAFM0_SA1 +MAFM0_SA2 +MAFM0_SI1569 +MAFM0_SI2199 +MAFM0_SX219 +MAFM0_SX39 +MAFM0_SX399 +MAJP0_SA1 +MAJP0_SI1074 +MAJP0_SI2334 +MAJP0_SX264 +MAJP0_SX354 +MAJP0_SX444 +MAJP0_SX84 +MAKB0_SA1 +MAKB0_SX206 +MAKB0_SX296 +MAKR0_SA1 +MAKR0_SA2 +MAKR0_SI1352 +MAKR0_SI1982 +MAKR0_SI722 +MAKR0_SX182 +MAKR0_SX272 +MAKR0_SX452 +MAPV0_SA1 +MAPV0_SA2 +MAPV0_SI1923 +MAPV0_SX123 +MAPV0_SX303 +MAPV0_SX33 +MAPV0_SX393 +MARC0_SA1 +MARC0_SI1188 +MARC0_SI1818 +MARC0_SI558 +MARC0_SX288 +MARC0_SX378 +MARW0_SA1 +MARW0_SA2 +MARW0_SI1276 +MARW0_SI646 +MARW0_SX106 +MARW0_SX16 +MARW0_SX376 +MBAR0_SA2 +MBAR0_SI1319 +MBAR0_SI1949 +MBAR0_SI689 +MBAR0_SX149 +MBAR0_SX239 +MBAR0_SX329 +MBBR0_SA1 +MBBR0_SA2 +MBBR0_SI1685 +MBBR0_SX155 +MBBR0_SX245 +MBBR0_SX425 +MBCG0_SA2 +MBCG0_SI2217 +MBCG0_SX147 +MBCG0_SX237 +MBCG0_SX417 +MBCG0_SX57 +MBEF0_SA1 +MBEF0_SA2 +MBEF0_SX111 +MBEF0_SX201 +MBEF0_SX291 +MBGT0_SA1 +MBGT0_SI1341 +MBGT0_SI711 +MBGT0_SX81 +MBJV0_SA2 +MBJV0_SI1247 +MBJV0_SI1877 +MBJV0_SX167 +MBJV0_SX257 +MBJV0_SX437 +MBJV0_SX77 +MBMA0_SA1 +MBMA0_SA2 +MBMA0_SI1852 +MBMA0_SX142 +MBMA0_SX322 +MBMA0_SX412 +MBMA1_SA1 +MBMA1_SA2 +MBMA1_SI2207 +MBMA1_SX144 +MBMA1_SX234 +MBMA1_SX414 +MBML0_SA1 +MBML0_SI1799 +MBML0_SI539 +MBML0_SX179 +MBML0_SX269 +MBML0_SX359 +MBML0_SX449 +MBOM0_SA1 +MBOM0_SI1014 +MBOM0_SI1644 +MBOM0_SX114 +MBOM0_SX204 +MBOM0_SX311 +MBOM0_SX384 +MBSB0_SA2 +MBSB0_SI1353 +MBSB0_SI1983 +MBSB0_SI723 +MBSB0_SX183 +MBSB0_SX273 +MBSB0_SX363 +MBSB0_SX93 +MBTH0_SA1 +MBTH0_SI505 +MBTH0_SI757 +MBTH0_SX212 +MBTH0_SX302 +MBTH0_SX392 +MBWP0_SA1 +MBWP0_SA2 +MBWP0_SI1531 +MBWP0_SI1969 +MBWP0_SI709 +MBWP0_SX169 +MBWP0_SX259 +MBWP0_SX439 +MBWP0_SX79 +MCAE0_SA1 +MCAE0_SA2 +MCAE0_SX187 +MCAE0_SX367 +MCAE0_SX7 +MCAE0_SX97 +MCAL0_SA1 +MCAL0_SI508 +MCAL0_SX148 +MCAL0_SX238 +MCAL0_SX328 +MCAL0_SX418 +MCAL0_SX58 +MCDC0_SA2 +MCDC0_SI1292 +MCDC0_SI1922 +MCDC0_SI662 +MCDC0_SX122 +MCDC0_SX302 +MCDC0_SX32 +MCDC0_SX392 +MCDD0_SA1 +MCDD0_SI1513 +MCDD0_SI2143 +MCDD0_SX163 +MCDD0_SX343 +MCDD0_SX73 +MCDR0_SA1 +MCDR0_SA2 +MCDR0_SX164 +MCDR0_SX254 +MCDR0_SX344 +MCDR0_SX434 +MCDR0_SX74 +MCEF0_SA1 +MCEF0_SA2 +MCEF0_SI1135 +MCEF0_SI1765 +MCEF0_SX145 +MCEF0_SX325 +MCEF0_SX55 +MCEW0_SI1442 +MCEW0_SX182 +MCEW0_SX272 +MCEW0_SX92 +MCHL0_SA1 +MCHL0_SA2 +MCHL0_SI1977 +MCHL0_SX177 +MCHL0_SX267 +MCHL0_SX357 +MCHL0_SX447 +MCLK0_SA1 +MCLK0_SA2 +MCLK0_SI1660 +MCLK0_SX130 +MCLK0_SX220 +MCLK0_SX40 +MCLK0_SX400 +MCLM0_SA2 +MCLM0_SI1456 +MCLM0_SX106 +MCLM0_SX16 +MCLM0_SX196 +MCLM0_SX286 +MCLM0_SX376 +MCPM0_SA2 +MCPM0_SI1194 +MCPM0_SI564 +MCPM0_SX204 +MCPM0_SX24 +MCRE0_SA1 +MCRE0_SA2 +MCRE0_SI1121 +MCRE0_SI1725 +MCRE0_SI1751 +MCRE0_SX131 +MCRE0_SX221 +MCRE0_SX24 +MCRE0_SX401 +MCRE0_SX41 +MCSS0_SA1 +MCSS0_SA2 +MCSS0_SX120 +MCSS0_SX210 +MCSS0_SX30 +MCSS0_SX300 +MCSS0_SX390 +MCTH0_SA2 +MCTH0_SI1209 +MCTH0_SI1839 +MCTH0_SI579 +MCTH0_SX129 +MCTH0_SX219 +MCTH0_SX309 +MCTH0_SX399 +MCTM0_SA1 +MCTM0_SA2 +MCTM0_SI720 +MCTM0_SX180 +MCTM0_SX270 +MCTM0_SX360 +MCTM0_SX450 +MCTM0_SX90 +MCXM0_SA1 +MCXM0_SA2 +MCXM0_SI1351 +MCXM0_SI1981 +MCXM0_SI721 +MCXM0_SX181 +MCXM0_SX271 +MCXM0_SX361 +MCXM0_SX451 +MDAC0_SA2 +MDAC0_SI1261 +MDAC0_SI1837 +MDAC0_SX271 +MDAC0_SX451 +MDAC0_SX91 +MDAS0_SA1 +MDAS0_SA2 +MDAS0_SI1266 +MDAS0_SX186 +MDAS0_SX21 +MDAS0_SX276 +MDAS0_SX96 +MDBB1_SA1 +MDBB1_SA2 +MDBB1_SI1006 +MDBB1_SI1636 +MDBB1_SI2056 +MDBB1_SX196 +MDBB1_SX286 +MDBP0_SA1 +MDBP0_SA2 +MDBP0_SI1158 +MDBP0_SI1788 +MDBP0_SX258 +MDBP0_SX348 +MDBP0_SX78 +MDCD0_SA1 +MDCD0_SA2 +MDCD0_SI2045 +MDCD0_SX155 +MDCD0_SX65 +MDCM0_SA1 +MDCM0_SA2 +MDCM0_SI2110 +MDCM0_SI850 +MDCM0_SX130 +MDCM0_SX220 +MDCM0_SX310 +MDDC0_SA1 +MDDC0_SA2 +MDDC0_SX249 +MDDC0_SX339 +MDDC0_SX429 +MDED0_SI1170 +MDED0_SI1800 +MDED0_SX180 +MDED0_SX270 +MDED0_SX360 +MDED0_SX450 +MDED0_SX90 +MDEF0_SA1 +MDEF0_SA2 +MDEF0_SI1563 +MDEF0_SI2193 +MDEF0_SX213 +MDEF0_SX33 +MDEF0_SX393 +MDEM0_SA2 +MDEM0_SI1868 +MDEM0_SX158 +MDEM0_SX248 +MDEM0_SX338 +MDEM0_SX68 +MDHL0_SA1 +MDHL0_SA2 +MDHL0_SI2069 +MDHL0_SI809 +MDHL0_SX179 +MDHL0_SX359 +MDHL0_SX89 +MDHS0_SX180 +MDHS0_SX270 +MDHS0_SX360 +MDHS0_SX450 +MDHS0_SX90 +MDJM0_SA1 +MDJM0_SA2 +MDJM0_SI2085 +MDJM0_SI825 +MDJM0_SX195 +MDJM0_SX285 +MDJM0_SX375 +MDKS0_SA1 +MDKS0_SA2 +MDKS0_SI1066 +MDKS0_SI1696 +MDKS0_SI2326 +MDKS0_SX256 +MDKS0_SX76 +MDLB0_SA1 +MDLB0_SI1936 +MDLB0_SI676 +MDLB0_SX226 +MDLB0_SX316 +MDLB0_SX46 +MDLC0_SA1 +MDLC0_SA2 +MDLC0_SI765 +MDLC0_SX135 +MDLC0_SX225 +MDLC0_SX315 +MDLC0_SX45 +MDLC1_SA1 +MDLC1_SX175 +MDLC1_SX265 +MDLC1_SX355 +MDLC1_SX85 +MDLC2_SA1 +MDLC2_SA2 +MDLC2_SI1614 +MDLC2_SI984 +MDLC2_SX174 +MDLC2_SX264 +MDLC2_SX444 +MDLC2_SX84 +MDLH0_SA1 +MDLH0_SI1960 +MDLH0_SI574 +MDLH0_SI700 +MDLH0_SX250 +MDLH0_SX340 +MDLH0_SX70 +MDLM0_SA1 +MDLM0_SA2 +MDLM0_SX244 +MDLM0_SX334 +MDLM0_SX64 +MDLR0_SI1233 +MDLR0_SX243 +MDLR0_SX423 +MDLR0_SX63 +MDLR1_SI1299 +MDLR1_SI1929 +MDLR1_SX129 +MDLR1_SX219 +MDLR1_SX309 +MDLR1_SX39 +MDLR1_SX399 +MDMA0_SA1 +MDMA0_SA2 +MDMA0_SI1238 +MDMA0_SI2060 +MDMT0_SI2341 +MDMT0_SI572 +MDMT0_SX212 +MDMT0_SX302 +MDMT0_SX392 +MDNS0_SA1 +MDNS0_SX111 +MDNS0_SX291 +MDNS0_SX381 +MDPB0_SA1 +MDPB0_SA2 +MDPB0_SI2126 +MDPB0_SX146 +MDPB0_SX236 +MDPB0_SX326 +MDPB0_SX56 +MDPK0_SA1 +MDPK0_SA2 +MDPK0_SI1683 +MDPK0_SI552 +MDPK0_SX153 +MDPK0_SX243 +MDPK0_SX63 +MDPS0_SA1 +MDPS0_SA2 +MDPS0_SI1651 +MDPS0_SI1979 +MDPS0_SX179 +MDPS0_SX269 +MDPS0_SX449 +MDPS0_SX89 +MDRD0_SA2 +MDRD0_SI1382 +MDRD0_SI2012 +MDRD0_SX122 +MDRD0_SX212 +MDRD0_SX302 +MDRD0_SX392 +MDSJ0_SA1 +MDSJ0_SA2 +MDSJ0_SI832 +MDSJ0_SX112 +MDSJ0_SX22 +MDSJ0_SX292 +MDSJ0_SX382 +MDSS0_SA1 +MDSS0_SI1881 +MDSS0_SI2087 +MDSS0_SI621 +MDSS0_SX171 +MDSS0_SX261 +MDSS0_SX351 +MDSS0_SX81 +MDSS1_SA2 +MDSS1_SI1713 +MDSS1_SX247 +MDSS1_SX337 +MDSS1_SX427 +MDTB0_SA1 +MDTB0_SA2 +MDTB0_SI570 +MDTB0_SX210 +MDTB0_SX300 +MDTB0_SX321 +MDTB0_SX390 +MDWD0_SA1 +MDWD0_SI1890 +MDWD0_SI557 +MDWD0_SX180 +MDWD0_SX360 +MDWD0_SX450 +MDWH0_SA2 +MDWH0_SI1925 +MDWH0_SX125 +MDWH0_SX35 +MDWH0_SX395 +MDWM0_SI1546 +MDWM0_SI2176 +MDWM0_SX106 +MDWM0_SX376 +MDWM0_SX433 +MEAL0_SA1 +MEAL0_SI1547 +MEAL0_SI917 +MEAL0_SX197 +MEAL0_SX287 +MEAL0_SX377 +MEDR0_SI744 +MEDR0_SX114 +MEDR0_SX204 +MEDR0_SX24 +MEDR0_SX294 +MEDR0_SX384 +MEFG0_SA2 +MEFG0_SI465 +MEFG0_SX105 +MEFG0_SX15 +MEFG0_SX195 +MEFG0_SX285 +MEFG0_SX375 +MEGJ0_SI1967 +MEGJ0_SX437 +MEGJ0_SX77 +MEJL0_SA2 +MEJL0_SI1592 +MEJL0_SI1654 +MEJL0_SI962 +MEJL0_SX332 +MEJL0_SX422 +MEJL0_SX62 +MEJS0_SA1 +MEJS0_SA2 +MEJS0_SI1870 +MEJS0_SX250 +MEJS0_SX430 +MEJS0_SX70 +MESG0_SA1 +MESG0_SA2 +MESG0_SI1332 +MESG0_SI1962 +MESG0_SX162 +MESG0_SX252 +MESG0_SX342 +MESG0_SX72 +MESJ0_SA1 +MESJ0_SA2 +MESJ0_SI2257 +MESJ0_SI997 +MESJ0_SX277 +MESJ0_SX367 +MESJ0_SX7 +MEWM0_SA1 +MEWM0_SA2 +MEWM0_SI1348 +MEWM0_SI1978 +MEWM0_SX268 +MEWM0_SX358 +MEWM0_SX448 +MFER0_SA1 +MFER0_SA2 +MFER0_SI1492 +MFER0_SI2122 +MFER0_SX232 +MFER0_SX322 +MFER0_SX412 +MFER0_SX52 +MFMC0_SA1 +MFMC0_SA2 +MFMC0_SI1132 +MFMC0_SI1762 +MFMC0_SI502 +MFMC0_SX142 +MFMC0_SX232 +MFMC0_SX322 +MFMC0_SX412 +MFMC0_SX52 +MFRM0_SA1 +MFRM0_SA2 +MFRM0_SI1155 +MFRM0_SI1717 +MFRM0_SI1785 +MFRM0_SX165 +MFRM0_SX255 +MFRM0_SX75 +MFWK0_SA1 +MFWK0_SA2 +MFWK0_SI1249 +MFWK0_SI619 +MFWK0_SX259 +MFWK0_SX439 +MFWK0_SX79 +MFXS0_SA1 +MFXS0_SA2 +MFXS0_SI1674 +MFXS0_SI2225 +MFXS0_SI2304 +MFXS0_SX144 +MFXS0_SX234 +MFXS0_SX414 +MFXV0_SA1 +MFXV0_SI1635 +MFXV0_SX15 +MFXV0_SX195 +MFXV0_SX285 +MFXV0_SX375 +MGAF0_SA2 +MGAF0_SI1912 +MGAF0_SI652 +MGAF0_SX112 +MGAF0_SX202 +MGAF0_SX292 +MGAG0_SA1 +MGAG0_SI1321 +MGAG0_SI645 +MGAG0_SX151 +MGAG0_SX241 +MGAG0_SX331 +MGAG0_SX421 +MGAG0_SX61 +MGAK0_SA1 +MGAK0_SA2 +MGAK0_SI1666 +MGAK0_SI2296 +MGAK0_SX316 +MGAK0_SX406 +MGAR0_SA1 +MGAR0_SA2 +MGAR0_SI1212 +MGAR0_SI1694 +MGAR0_SI1842 +MGAR0_SX222 +MGAR0_SX402 +MGAR0_SX42 +MGAW0_SA1 +MGAW0_SA2 +MGAW0_SI1802 +MGAW0_SX265 +MGAW0_SX355 +MGAW0_SX445 +MGAW0_SX85 +MGES0_SA2 +MGES0_SI1481 +MGES0_SX131 +MGES0_SX221 +MGES0_SX401 +MGES0_SX41 +MGJC0_SA1 +MGJC0_SI1256 +MGJC0_SI1335 +MGJC0_SI1965 +MGJC0_SX165 +MGJC0_SX255 +MGJC0_SX345 +MGRL0_SA1 +MGRL0_SA2 +MGRL0_SI1497 +MGRL0_SX237 +MGRL0_SX417 +MGRL0_SX57 +MGRP0_SA1 +MGRP0_SI1947 +MGRP0_SI687 +MGRP0_SX147 +MGRP0_SX237 +MGRP0_SX417 +MGRP0_SX57 +MGSH0_SA1 +MGSH0_SX186 +MGSH0_SX96 +MGSL0_SA2 +MGSL0_SI1164 +MGSL0_SX174 +MGSL0_SX354 +MGSL0_SX444 +MGSL0_SX84 +MGXP0_SA1 +MGXP0_SA2 +MGXP0_SI457 +MGXP0_SX277 +MGXP0_SX367 +MGXP0_SX97 +MHBS0_SA1 +MHBS0_SA2 +MHBS0_SI1575 +MHBS0_SI2205 +MHBS0_SX135 +MHBS0_SX225 +MHBS0_SX405 +MHIT0_SA2 +MHIT0_SI1613 +MHIT0_SI2243 +MHIT0_SX173 +MHIT0_SX263 +MHIT0_SX353 +MHIT0_SX443 +MHIT0_SX83 +MHJB0_SA2 +MHJB0_SI1647 +MHJB0_SI2277 +MHJB0_SX117 +MHJB0_SX207 +MHJB0_SX27 +MHJB0_SX297 +MHJB0_SX387 +MHMG0_SA1 +MHMG0_SA2 +MHMG0_SI1365 +MHMG0_SI1995 +MHMG0_SX105 +MHMG0_SX15 +MHMG0_SX285 +MHMG0_SX375 +MHMR0_SA2 +MHMR0_SI1119 +MHMR0_SX129 +MHMR0_SX219 +MHMR0_SX309 +MHMR0_SX39 +MHMR0_SX399 +MHRM0_SA2 +MHRM0_SI1475 +MHRM0_SI2218 +MHRM0_SX238 +MHRM0_SX328 +MHRM0_SX418 +MHXL0_SA1 +MHXL0_SA2 +MHXL0_SI512 +MHXL0_SI612 +MHXL0_SX152 +MHXL0_SX332 +MHXL0_SX422 +MHXL0_SX62 +MILB0_SA1 +MILB0_SI2163 +MILB0_SI807 +MILB0_SX183 +MILB0_SX273 +MILB0_SX3 +MILB0_SX363 +MILB0_SX93 +MJAC0_SA1 +MJAC0_SA2 +MJAC0_SI1331 +MJAC0_SI2148 +MJAC0_SX341 +MJAC0_SX431 +MJAE0_SA1 +MJAE0_SA2 +MJAE0_SI1524 +MJAE0_SI1999 +MJAE0_SI2154 +MJAE0_SX264 +MJAE0_SX354 +MJAE0_SX444 +MJAI0_SI1604 +MJAI0_SX164 +MJAI0_SX254 +MJAI0_SX344 +MJAI0_SX434 +MJAI0_SX74 +MJBG0_SA1 +MJBG0_SA2 +MJBG0_SI1232 +MJBG0_SI1724 +MJBG0_SI1862 +MJBG0_SX152 +MJBG0_SX242 +MJBG0_SX332 +MJBG0_SX422 +MJDA0_SA1 +MJDA0_SA2 +MJDA0_SI1661 +MJDA0_SI2291 +MJDA0_SX131 +MJDA0_SX221 +MJDA0_SX401 +MJDA0_SX41 +MJDC0_SA1 +MJDC0_SA2 +MJDC0_SI1161 +MJDC0_SI2165 +MJDC0_SX171 +MJDC0_SX261 +MJDC0_SX351 +MJDC0_SX441 +MJDC0_SX81 +MJDE0_SA2 +MJDE0_SX130 +MJDE0_SX310 +MJDE0_SX40 +MJDE0_SX400 +MJDG0_SA1 +MJDG0_SI1672 +MJDG0_SX142 +MJDG0_SX232 +MJDG0_SX322 +MJDG0_SX412 +MJDG0_SX52 +MJDM0_SA2 +MJDM0_SI1937 +MJDM0_SX260 +MJDM0_SX440 +MJDM0_SX80 +MJEB0_SA1 +MJEB0_SA2 +MJEB0_SI1286 +MJEB0_SI1916 +MJEB0_SX206 +MJEB0_SX26 +MJEB0_SX386 +MJEB1_SA1 +MJEB1_SI2097 +MJEB1_SX117 +MJEB1_SX27 +MJEB1_SX297 +MJEE0_SA2 +MJEE0_SI1237 +MJEE0_SI1867 +MJEE0_SI607 +MJEE0_SX157 +MJEE0_SX427 +MJEE0_SX67 +MJFH0_SA1 +MJFH0_SI1737 +MJFH0_SI477 +MJFH0_SX117 +MJFH0_SX207 +MJFH0_SX27 +MJFH0_SX297 +MJFH0_SX387 +MJFR0_SA2 +MJFR0_SI1605 +MJFR0_SI2235 +MJFR0_SI975 +MJFR0_SX165 +MJFR0_SX255 +MJFR0_SX345 +MJHI0_SA2 +MJHI0_SI555 +MJHI0_SI698 +MJHI0_SX248 +MJHI0_SX338 +MJHI0_SX428 +MJHI0_SX68 +MJJB0_SA2 +MJJB0_SI1139 +MJJB0_SI1277 +MJJB0_SI1769 +MJJB0_SX149 +MJJB0_SX329 +MJJB0_SX419 +MJJB0_SX59 +MJJJ0_SA1 +MJJJ0_SA2 +MJJJ0_SI1793 +MJJJ0_SI533 +MJJJ0_SX173 +MJJJ0_SX263 +MJJJ0_SX353 +MJJJ0_SX83 +MJJM0_SA1 +MJJM0_SI1457 +MJJM0_SX17 +MJJM0_SX197 +MJJM0_SX287 +MJJM0_SX377 +MJKR0_SA2 +MJKR0_SI1201 +MJKR0_SI1831 +MJKR0_SX121 +MJKR0_SX211 +MJKR0_SX301 +MJKR0_SX31 +MJKR0_SX391 +MJLB0_SA1 +MJLB0_SA2 +MJLB0_SI2246 +MJLB0_SI986 +MJLB0_SX266 +MJLB0_SX356 +MJLB0_SX446 +MJLB0_SX86 +MJLG1_SA1 +MJLG1_SA2 +MJLG1_SI1012 +MJLG1_SI1642 +MJLG1_SI2272 +MJLG1_SX112 +MJLG1_SX202 +MJLG1_SX22 +MJLG1_SX382 +MJLS0_SA1 +MJLS0_SA2 +MJLS0_SI1096 +MJLS0_SI466 +MJLS0_SX16 +MJLS0_SX196 +MJLS0_SX286 +MJLS0_SX376 +MJMA0_SI1495 +MJMA0_SI865 +MJMA0_SX145 +MJMA0_SX235 +MJMA0_SX325 +MJMA0_SX415 +MJMA0_SX55 +MJMD0_SA1 +MJMD0_SI1028 +MJMD0_SI1658 +MJMD0_SX128 +MJMD0_SX218 +MJMD0_SX398 +MJMM0_SA1 +MJMM0_SA2 +MJMM0_SI1885 +MJMM0_SI625 +MJMM0_SX265 +MJMM0_SX355 +MJMM0_SX445 +MJPG0_SA1 +MJPG0_SA2 +MJPG0_SI561 +MJPG0_SX291 +MJPG0_SX381 +MJPM0_SA1 +MJPM0_SI1998 +MJPM0_SI738 +MJPM0_SX108 +MJPM0_SX18 +MJPM0_SX198 +MJPM0_SX288 +MJPM1_SA1 +MJPM1_SA2 +MJPM1_SI1897 +MJPM1_SI761 +MJPM1_SX131 +MJPM1_SX221 +MJPM1_SX41 +MJRA0_SI606 +MJRA0_SX156 +MJRA0_SX246 +MJRA0_SX66 +MJRG0_SA1 +MJRG0_SA2 +MJRG0_SX106 +MJRG0_SX16 +MJRG0_SX286 +MJRH0_SA1 +MJRH0_SA2 +MJRH0_SI1125 +MJRH0_SI1755 +MJRH0_SX135 +MJRH0_SX315 +MJRH0_SX405 +MJRH0_SX45 +MJRH1_SA2 +MJRH1_SI1774 +MJRH1_SX334 +MJRH1_SX64 +MJRK0_SI2103 +MJRK0_SX340 +MJRK0_SX70 +MJRP0_SI1835 +MJRP0_SI585 +MJRP0_SX135 +MJRP0_SX315 +MJRP0_SX405 +MJRP0_SX45 +MJSR0_SA2 +MJSR0_SX164 +MJSR0_SX254 +MJSR0_SX434 +MJSR0_SX74 +MJWG0_SA2 +MJWG0_SI2155 +MJWG0_SX355 +MJWG0_SX445 +MJWG0_SX85 +MJWS0_SA1 +MJWS0_SA2 +MJWS0_SI1143 +MJWS0_SI1773 +MJWS0_SX243 +MJWS0_SX423 +MJWT0_SA2 +MJWT0_SI751 +MJXA0_SA1 +MJXA0_SA2 +MJXA0_SI1507 +MJXA0_SI2137 +MJXA0_SI877 +MJXA0_SX157 +MJXA0_SX247 +MJXA0_SX337 +MJXA0_SX67 +MJXL0_SA1 +MJXL0_SA2 +MJXL0_SI1795 +MJXL0_SX182 +MJXL0_SX272 +MJXL0_SX362 +MJXL0_SX452 +MJXL0_SX92 +MKAG0_SA2 +MKAG0_SI1609 +MKAG0_SI2239 +MKAG0_SX169 +MKAG0_SX30 +MKAG0_SX439 +MKAG0_SX79 +MKAH0_SA1 +MKAH0_SA2 +MKAH0_SI1528 +MKAH0_SI2158 +MKAH0_SI898 +MKAH0_SX268 +MKAH0_SX358 +MKAH0_SX448 +MKAH0_SX88 +MKAJ0_SA1 +MKAJ0_SI1414 +MKAJ0_SI2044 +MKAJ0_SI784 +MKAJ0_SX244 +MKAJ0_SX334 +MKAJ0_SX424 +MKAJ0_SX64 +MKAM0_SA2 +MKAM0_SI1316 +MKAM0_SX236 +MKAM0_SX416 +MKDB0_SI2132 +MKDB0_SI588 +MKDB0_SI872 +MKDB0_SX242 +MKDB0_SX332 +MKDB0_SX422 +MKDB0_SX62 +MKDD0_SA1 +MKDD0_SX127 +MKDD0_SX217 +MKDD0_SX307 +MKDD0_SX37 +MKDD0_SX397 +MKDT0_SA1 +MKDT0_SA2 +MKDT0_SI2153 +MKDT0_SI893 +MKDT0_SX173 +MKDT0_SX263 +MKDT0_SX353 +MKDT0_SX443 +MKDT0_SX83 +MKES0_SA2 +MKES0_SX263 +MKES0_SX353 +MKES0_SX443 +MKES0_SX83 +MKJO0_SA1 +MKJO0_SA2 +MKJO0_SI2147 +MKJO0_SX167 +MKJO0_SX257 +MKJO0_SX424 +MKJO0_SX77 +MKLN0_SA1 +MKLN0_SA2 +MKLN0_SI1598 +MKLN0_SI2228 +MKLN0_SX158 +MKLN0_SX338 +MKLN0_SX428 +MKLN0_SX68 +MKLR0_SA1 +MKLR0_SI1059 +MKLR0_SI2319 +MKLR0_SX159 +MKLR0_SX249 +MKLR0_SX339 +MKLR0_SX429 +MKLR0_SX69 +MKLS0_SA2 +MKLS0_SI1533 +MKLS0_SX177 +MKLS0_SX267 +MKLS0_SX447 +MKLS1_SI1545 +MKLS1_SI2175 +MKLS1_SX105 +MKLS1_SX15 +MKLS1_SX195 +MKLS1_SX285 +MKLW0_SA2 +MKLW0_SI1844 +MKLW0_SI2201 +MKLW0_SX131 +MKLW0_SX221 +MKLW0_SX401 +MKLW0_SX41 +MKRG0_SA1 +MKRG0_SA2 +MKRG0_SI1491 +MKRG0_SI2121 +MKRG0_SX141 +MKRG0_SX231 +MKRG0_SX31 +MKRG0_SX51 +MKXL0_SA1 +MKXL0_SI1185 +MKXL0_SX105 +MKXL0_SX195 +MKXL0_SX285 +MLBC0_SA2 +MLBC0_SI609 +MLBC0_SX159 +MLBC0_SX339 +MLBC0_SX429 +MLBC0_SX69 +MLEL0_SI1876 +MLEL0_SX346 +MLEL0_SX76 +MLJC0_SA1 +MLJC0_SA2 +MLJC0_SI1855 +MLJC0_SI595 +MLJC0_SX235 +MLJC0_SX325 +MLJC0_SX55 +MLJH0_SI1324 +MLJH0_SX154 +MLJH0_SX334 +MLJH0_SX424 +MLNS0_SA1 +MLNS0_SA2 +MLNS0_SI1407 +MLNS0_SI777 +MLNS0_SX147 +MLNS0_SX237 +MLNS0_SX327 +MLNS0_SX417 +MLNS0_SX57 +MLSH0_SA1 +MLSH0_SA2 +MLSH0_SI2047 +MLSH0_SI787 +MLSH0_SX157 +MLSH0_SX337 +MLSH0_SX427 +MLSH0_SX67 +MMAA0_SI2105 +MMAA0_SX125 +MMAA0_SX215 +MMAA0_SX305 +MMAA0_SX395 +MMAB1_SA1 +MMAB1_SA2 +MMAB1_SI2124 +MMAB1_SX144 +MMAB1_SX414 +MMAB1_SX54 +MMAG0_SI496 +MMAG0_SX226 +MMAG0_SX406 +MMAG0_SX46 +MMAM0_SA1 +MMAM0_SA2 +MMAM0_SI1597 +MMAM0_SI1668 +MMAM0_SX247 +MMAM0_SX337 +MMAM0_SX67 +MMAR0_SA1 +MMAR0_SA2 +MMAR0_SI1336 +MMAR0_SI706 +MMAR0_SX436 +MMAR0_SX76 +MMBS0_SA1 +MMBS0_SA2 +MMBS0_SI1151 +MMBS0_SX251 +MMBS0_SX341 +MMBS0_SX431 +MMBS0_SX71 +MMCC0_SA1 +MMCC0_SI1968 +MMCC0_SI708 +MMCC0_SX168 +MMCC0_SX258 +MMCC0_SX348 +MMCC0_SX438 +MMCC0_SX78 +MMDB0_SA1 +MMDB0_SA2 +MMDB0_SI1358 +MMDB0_SI1617 +MMDB0_SX267 +MMDB0_SX357 +MMDB0_SX447 +MMDB0_SX87 +MMDG0_SI2035 +MMDG0_SX340 +MMDG0_SX430 +MMDG0_SX70 +MMDM0_SA1 +MMDM0_SA2 +MMDM0_SX231 +MMDM0_SX321 +MMDM0_SX411 +MMDM0_SX51 +MMDM1_SA1 +MMDM1_SI1650 +MMDM1_SI783 +MMDM1_SX243 +MMDS0_SA2 +MMDS0_SI1343 +MMDS0_SI1973 +MMDS0_SI713 +MMDS0_SX173 +MMDS0_SX263 +MMDS0_SX353 +MMDS0_SX443 +MMDS0_SX83 +MMEA0_SA2 +MMEA0_SI1388 +MMEA0_SI2018 +MMEA0_SI758 +MMEA0_SX218 +MMEA0_SX308 +MMEA0_SX38 +MMEB0_SA1 +MMEB0_SI1357 +MMEB0_SI1987 +MMEB0_SI727 +MMEB0_SX7 +MMEB0_SX97 +MMGC0_SA1 +MMGC0_SI1935 +MMGC0_SI2184 +MMGC0_SX315 +MMGC0_SX405 +MMGC0_SX45 +MMGG0_SA1 +MMGG0_SA2 +MMGG0_SI1709 +MMGG0_SI2339 +MMGG0_SX179 +MMGG0_SX359 +MMGG0_SX89 +MMGK0_SA1 +MMGK0_SA2 +MMGK0_SI1322 +MMGK0_SI1952 +MMGK0_SI692 +MMGK0_SX152 +MMGK0_SX242 +MMGK0_SX422 +MMJB1_SA1 +MMJB1_SI1408 +MMJB1_SI2038 +MMJB1_SI778 +MMJB1_SX148 +MMJB1_SX238 +MMJB1_SX328 +MMJB1_SX418 +MMJB1_SX58 +MMLM0_SA1 +MMLM0_SA2 +MMLM0_SI1527 +MMLM0_SI897 +MMLM0_SX177 +MMLM0_SX267 +MMLM0_SX357 +MMLM0_SX447 +MMLM0_SX87 +MMPM0_SA1 +MMPM0_SA2 +MMPM0_SI1061 +MMPM0_SI1691 +MMPM0_SI2321 +MMPM0_SX251 +MMPM0_SX341 +MMPM0_SX431 +MMPM0_SX71 +MMRP0_SA1 +MMRP0_SI2034 +MMRP0_SI717 +MMRP0_SI774 +MMRP0_SX234 +MMRP0_SX414 +MMRP0_SX54 +MMSM0_SA1 +MMSM0_SA2 +MMSM0_SI1736 +MMSM0_SX26 +MMSM0_SX296 +MMSM0_SX386 +MMVP0_SI1284 +MMVP0_SI1914 +MMVP0_SX114 +MMVP0_SX204 +MMVP0_SX294 +MMVP0_SX384 +MMWB0_SA2 +MMWB0_SI1619 +MMWB0_SX179 +MMWB0_SX269 +MMWS0_SA1 +MMWS0_SI1518 +MMWS0_SI559 +MMWS0_SI888 +MMWS0_SX258 +MMWS0_SX78 +MMWS1_SA1 +MMWS1_SA2 +MMWS1_SI1071 +MMWS1_SI2331 +MMWS1_SX261 +MMWS1_SX27 +MMWS1_SX351 +MMWS1_SX441 +MMWS1_SX81 +MMXS0_SA1 +MMXS0_SA2 +MMXS0_SI629 +MMXS0_SI876 +MMXS0_SX156 +MMXS0_SX336 +MMXS0_SX66 +MNET0_SA1 +MNET0_SA2 +MNET0_SI1446 +MNET0_SI2076 +MNET0_SX186 +MNET0_SX276 +MNET0_SX366 +MNET0_SX96 +MNTW0_SA1 +MNTW0_SI2328 +MNTW0_SX202 +MNTW0_SX258 +MNTW0_SX348 +MPAR0_SA1 +MPAR0_SA2 +MPAR0_SI1576 +MPAR0_SX226 +MPAR0_SX406 +MPAR0_SX46 +MPEB0_SA1 +MPEB0_SA2 +MPEB0_SX150 +MPEB0_SX420 +MPEB0_SX60 +MPFU0_SA1 +MPFU0_SA2 +MPFU0_SI1888 +MPFU0_SX178 +MPFU0_SX268 +MPFU0_SX358 +MPFU0_SX88 +MPGH0_SA1 +MPGH0_SA2 +MPGH0_SI1554 +MPGH0_SI924 +MPGH0_SX204 +MPGH0_SX294 +MPGH0_SX384 +MPGR0_SA1 +MPGR0_SA2 +MPGR0_SI2040 +MPGR0_SI780 +MPGR0_SX150 +MPGR0_SX420 +MPGR0_SX60 +MPGR1_SA1 +MPGR1_SA2 +MPGR1_SI1269 +MPGR1_SI2129 +MPGR1_SX239 +MPGR1_SX329 +MPGR1_SX419 +MPGR1_SX59 +MPMB0_SX241 +MPPC0_SA2 +MPPC0_SI2042 +MPPC0_SI782 +MPPC0_SX152 +MPPC0_SX242 +MPPC0_SX332 +MPPC0_SX422 +MPPC0_SX62 +MPRB0_SA1 +MPRB0_SA2 +MPRB0_SI1205 +MPRB0_SX125 +MPRB0_SX215 +MPRB0_SX305 +MPRB0_SX35 +MPRB0_SX395 +MPRD0_SA2 +MPRD0_SI1431 +MPRD0_SI2061 +MPRK0_SA2 +MPRK0_SX17 +MPRK0_SX197 +MPRT0_SA2 +MPRT0_SI1210 +MPRT0_SI495 +MPRT0_SI580 +MPRT0_SX130 +MPRT0_SX220 +MPRT0_SX40 +MPRT0_SX400 +MPSW0_SA1 +MPSW0_SA2 +MPSW0_SI1697 +MPSW0_SI2327 +MPSW0_SX24 +MPSW0_SX257 +MPSW0_SX77 +MRAB0_SA1 +MRAB0_SA2 +MRAB0_SI1224 +MRAB0_SI594 +MRAB0_SX144 +MRAB0_SX234 +MRAB0_SX324 +MRAB0_SX414 +MRAB0_SX54 +MRAB1_SA1 +MRAB1_SA2 +MRAB1_SI1478 +MRAB1_SI2108 +MRAB1_SX218 +MRAB1_SX38 +MRAB1_SX398 +MRAI0_SI1954 +MRAI0_SX162 +MRAI0_SX252 +MRAI0_SX342 +MRAM0_SI1275 +MRAM0_SI1905 +MRAM0_SX105 +MRAM0_SX195 +MRAM0_SX285 +MRAM0_SX375 +MRAV0_SA1 +MRAV0_SA2 +MRAV0_SI1008 +MRAV0_SI1638 +MRAV0_SI2268 +MRAV0_SX108 +MRAV0_SX18 +MRAV0_SX198 +MRAV0_SX288 +MRAV0_SX378 +MRBC0_SA1 +MRBC0_SA2 +MRBC0_SI1665 +MRBC0_SI599 +MRBC0_SX149 +MRBC0_SX239 +MRBC0_SX59 +MRCG0_SA1 +MRCG0_SI2058 +MRCG0_SX258 +MRCG0_SX78 +MRCW0_SA2 +MRCW0_SI1371 +MRCW0_SI2001 +MRCW0_SX111 +MRCW0_SX201 +MRCW0_SX21 +MRCW0_SX381 +MRDD0_SA1 +MRDD0_SA2 +MRDD0_SI1050 +MRDD0_SI2310 +MRDD0_SX240 +MRDD0_SX330 +MRDM0_SA1 +MRDM0_SA2 +MRDM0_SI965 +MRDM0_SX155 +MRDM0_SX245 +MRDM0_SX425 +MRDS0_SA2 +MRDS0_SI1167 +MRDS0_SI1797 +MRDS0_SI537 +MRDS0_SX177 +MRDS0_SX267 +MRDS0_SX357 +MRDS0_SX447 +MRDS0_SX87 +MREE0_SA1 +MREE0_SA2 +MREE0_SI1734 +MREE0_SX114 +MREE0_SX204 +MREE0_SX294 +MREE0_SX384 +MREH1_SA2 +MREH1_SI2229 +MREH1_SX159 +MREH1_SX339 +MREH1_SX429 +MREM0_SA1 +MREM0_SI1591 +MREM0_SI961 +MREM0_SX151 +MREM0_SX241 +MREM0_SX331 +MREM0_SX421 +MREM0_SX61 +MREW1_SA1 +MREW1_SA2 +MREW1_SI1500 +MREW1_SI2130 +MREW1_SX150 +MREW1_SX240 +MREW1_SX330 +MREW1_SX420 +MREW1_SX60 +MRFK0_SA1 +MRFK0_SA2 +MRFK0_SI1706 +MRFK0_SI2336 +MRFK0_SX176 +MRFK0_SX266 +MRFK0_SX356 +MRFK0_SX86 +MRFL0_SA2 +MRFL0_SI1786 +MRFL0_SX346 +MRGM0_SA1 +MRGM0_SI1162 +MRGM0_SI1792 +MRGM0_SX416 +MRGM0_SX82 +MRGS0_SA1 +MRGS0_SI1986 +MRGS0_SX276 +MRGS0_SX366 +MRGS0_SX96 +MRHL0_SA1 +MRHL0_SA2 +MRHL0_SI1515 +MRHL0_SI2145 +MRHL0_SX165 +MRHL0_SX255 +MRHL0_SX75 +MRJB1_SI1020 +MRJB1_SX300 +MRJH0_SA1 +MRJH0_SI914 +MRJH0_SX259 +MRJH0_SX439 +MRJM0_SA1 +MRJM0_SA2 +MRJM0_SI1095 +MRJM0_SI1228 +MRJM0_SI1858 +MRJM0_SX238 +MRJM0_SX328 +MRJM0_SX418 +MRJM0_SX58 +MRJM1_SA1 +MRJM1_SI668 +MRJM1_SX218 +MRJM1_SX308 +MRJM1_SX38 +MRJM1_SX398 +MRJT0_SA1 +MRJT0_SI1805 +MRJT0_SX148 +MRJT0_SX238 +MRKM0_SA1 +MRKM0_SX187 +MRKM0_SX277 +MRKM0_SX7 +MRKM0_SX97 +MRLD0_SA1 +MRLD0_SI1594 +MRLD0_SI964 +MRLD0_SX244 +MRLD0_SX334 +MRLD0_SX64 +MRLJ0_SA2 +MRLJ0_SI1420 +MRLJ0_SI2050 +MRLJ0_SX160 +MRLJ0_SX430 +MRLJ0_SX70 +MRLJ1_SI1671 +MRLJ1_SI2332 +MRLJ1_SX141 +MRLJ1_SX231 +MRLJ1_SX411 +MRLJ1_SX51 +MRLK0_SA1 +MRLK0_SA2 +MRLK0_SI2140 +MRLK0_SX303 +MRLK0_SX33 +MRLK0_SX393 +MRLR0_SA1 +MRLR0_SA2 +MRLR0_SI1826 +MRLR0_SI566 +MRLR0_SX116 +MRLR0_SX206 +MRLR0_SX26 +MRLR0_SX296 +MRLR0_SX386 +MRMB0_SA1 +MRMB0_SI2211 +MRMB0_SI951 +MRMB0_SX141 +MRMB0_SX231 +MRMB0_SX321 +MRMB0_SX51 +MRMG0_SA2 +MRMG0_SI1710 +MRMG0_SI2340 +MRMG0_SX180 +MRMG0_SX270 +MRMG0_SX360 +MRMG0_SX90 +MRMH0_SA1 +MRMH0_SA2 +MRMH0_SI1021 +MRMH0_SX211 +MRMH0_SX301 +MRMH0_SX31 +MRMH0_SX391 +MRML0_SI2051 +MRML0_SI791 +MRML0_SX431 +MRML0_SX71 +MRMS0_SA1 +MRMS0_SA2 +MRMS0_SI1113 +MRMS0_SI2100 +MRMS0_SX120 +MRMS0_SX210 +MRMS0_SX30 +MRMS0_SX300 +MRMS0_SX390 +MRPC1_SA1 +MRPC1_SA2 +MRPC1_SI1482 +MRPC1_SI2026 +MRPC1_SX132 +MRPC1_SX222 +MRPC1_SX312 +MRPC1_SX402 +MRPC1_SX42 +MRRE0_SI704 +MRRE0_SX254 +MRRE0_SX434 +MRSO0_SA1 +MRSO0_SA2 +MRSO0_SI1659 +MRSO0_SI2289 +MRSO0_SX219 +MRSO0_SX309 +MRSO0_SX399 +MRSP0_SA1 +MRSP0_SA2 +MRSP0_SI2059 +MRSP0_SI799 +MRSP0_SX169 +MRSP0_SX196 +MRSP0_SX439 +MRSP0_SX79 +MRTC0_SA1 +MRTC0_SA2 +MRTC0_SI2088 +MRTC0_SI828 +MRTC0_SX108 +MRTC0_SX18 +MRTC0_SX198 +MRTC0_SX288 +MRTJ0_SA2 +MRTJ0_SI1551 +MRTJ0_SI2032 +MRTJ0_SX322 +MRTJ0_SX412 +MRVG0_SA1 +MRVG0_SA2 +MRVG0_SI1770 +MRVG0_SI510 +MRVG0_SX150 +MRVG0_SX330 +MRVG0_SX420 +MRVG0_SX60 +MRWA0_SA1 +MRWA0_SA2 +MRWA0_SI1603 +MRWA0_SI2233 +MRWA0_SX253 +MRWA0_SX343 +MRWA0_SX433 +MRWS0_SA1 +MRWS0_SA2 +MRWS0_SX112 +MRWS0_SX202 +MRWS0_SX292 +MRXB0_SA1 +MRXB0_SI1585 +MRXB0_SX145 +MRXB0_SX235 +MRXB0_SX325 +MRXB0_SX55 +MSAH1_SA1 +MSAH1_SA2 +MSAH1_SI1049 +MSAH1_SI2309 +MSAH1_SX149 +MSAH1_SX239 +MSAH1_SX329 +MSAH1_SX419 +MSAH1_SX59 +MSAS0_SA1 +MSAS0_SA2 +MSAS0_SI2006 +MSAS0_SX26 +MSAS0_SX296 +MSAT0_SA2 +MSAT0_SI1526 +MSAT0_SI2156 +MSAT0_SI896 +MSAT0_SX176 +MSAT0_SX266 +MSAT0_SX356 +MSAT0_SX446 +MSAT0_SX86 +MSAT1_SA1 +MSAT1_SA2 +MSAT1_SI1073 +MSAT1_SI1703 +MSAT1_SI2333 +MSAT1_SX173 +MSAT1_SX353 +MSDB0_SA1 +MSDB0_SA2 +MSDB0_SI1007 +MSDB0_SI1637 +MSDB0_SI2267 +MSDB0_SX107 +MSDB0_SX17 +MSDH0_SA1 +MSDH0_SA2 +MSDH0_SI2113 +MSDH0_SX260 +MSDH0_SX350 +MSDS0_SA2 +MSDS0_SI1707 +MSDS0_SI2337 +MSDS0_SX177 +MSDS0_SX447 +MSDS0_SX87 +MSEM1_SA1 +MSEM1_SA2 +MSEM1_SX360 +MSEM1_SX450 +MSEM1_SX90 +MSES0_SA1 +MSES0_SA2 +MSES0_SI2216 +MSES0_SI2219 +MSES0_SX149 +MSES0_SX329 +MSES0_SX59 +MSFH0_SA2 +MSFH0_SI1216 +MSFH0_SI586 +MSFH0_SX226 +MSFH0_SX46 +MSFV0_SA1 +MSFV0_SA2 +MSFV0_SI1262 +MSFV0_SX182 +MSFV0_SX272 +MSFV0_SX452 +MSJK0_SA1 +MSJK0_SA2 +MSJK0_SI2226 +MSJK0_SI966 +MSJK0_SX156 +MSJK0_SX246 +MSJK0_SX426 +MSJK0_SX66 +MSMC0_SA1 +MSMC0_SA2 +MSMC0_SI1907 +MSMC0_SI647 +MSMC0_SX107 +MSMC0_SX17 +MSMC0_SX197 +MSMC0_SX287 +MSMC0_SX377 +MSMR0_SA1 +MSMR0_SA2 +MSMR0_SI1405 +MSMR0_SI775 +MSMR0_SX145 +MSMR0_SX235 +MSMR0_SX325 +MSMR0_SX55 +MSMS0_SA2 +MSMS0_SI2063 +MSMS0_SI803 +MSMS0_SX263 +MSMS0_SX353 +MSMS0_SX443 +MSRG0_SA2 +MSRG0_SI1851 +MSRG0_SI591 +MSRG0_SX141 +MSRG0_SX231 +MSRG0_SX321 +MSRG0_SX411 +MSRG0_SX51 +MSRR0_SA1 +MSRR0_SA2 +MSRR0_SI1131 +MSRR0_SX141 +MSRR0_SX231 +MSRR0_SX30 +MSRR0_SX411 +MSRR0_SX51 +MSTF0_SA1 +MSTF0_SA2 +MSTF0_SI1396 +MSTF0_SX136 +MSTF0_SX226 +MSTF0_SX406 +MSVS0_SA1 +MSVS0_SI1568 +MSVS0_SX128 +MSVS0_SX218 +MSVS0_SX38 +MTAB0_SA1 +MTAB0_SA2 +MTAB0_SI2202 +MTAB0_SI942 +MTAB0_SX132 +MTAB0_SX222 +MTAB0_SX402 +MTAB0_SX42 +MTAS0_SA1 +MTAS0_SA2 +MTAS0_SI1385 +MTAS0_SI2015 +MTAS0_SI755 +MTAS0_SX125 +MTAS0_SX305 +MTAT0_SA2 +MTAT0_SI1740 +MTAT0_SX120 +MTAT0_SX210 +MTAT0_SX30 +MTAT0_SX300 +MTAT1_SA1 +MTAT1_SA2 +MTAT1_SI1409 +MTAT1_SI1627 +MTAT1_SX239 +MTAT1_SX419 +MTBC0_SA1 +MTBC0_SA2 +MTBC0_SI1173 +MTBC0_SX183 +MTBC0_SX273 +MTBC0_SX347 +MTBC0_SX363 +MTBC0_SX93 +MTCS0_SA1 +MTCS0_SI1972 +MTCS0_SX172 +MTCS0_SX262 +MTCS0_SX352 +MTCS0_SX442 +MTDB0_SA1 +MTDB0_SA2 +MTDB0_SI2031 +MTDB0_SX141 +MTDB0_SX231 +MTDB0_SX321 +MTDB0_SX411 +MTDB0_SX51 +MTDP0_SI1274 +MTDP0_SI2151 +MTDP0_SX261 +MTDP0_SX441 +MTDP0_SX81 +MTER0_SI527 +MTER0_SX167 +MTER0_SX17 +MTER0_SX257 +MTER0_SX77 +MTJG0_SA2 +MTJG0_SI1520 +MTJG0_SI890 +MTJG0_SX350 +MTJG0_SX440 +MTJG0_SX80 +MTJM0_SA1 +MTJM0_SA2 +MTJM0_SI1226 +MTJM0_SI655 +MTJM0_SX236 +MTJM0_SX326 +MTJM0_SX416 +MTJM0_SX56 +MTJS0_SA1 +MTJS0_SI1192 +MTJS0_SX112 +MTJS0_SX202 +MTJS0_SX22 +MTJS0_SX292 +MTJU0_SA1 +MTJU0_SA2 +MTJU0_SI2269 +MTJU0_SI760 +MTJU0_SX220 +MTJU0_SX310 +MTJU0_SX40 +MTKD0_SA1 +MTKD0_SA2 +MTKD0_SI1187 +MTKD0_SI1817 +MTKD0_SX17 +MTKD0_SX197 +MTKD0_SX377 +MTKP0_SA1 +MTKP0_SA2 +MTKP0_SX123 +MTKP0_SX213 +MTKP0_SX303 +MTKP0_SX33 +MTKP0_SX393 +MTLB0_SA2 +MTLB0_SI1764 +MTLB0_SI504 +MTLB0_SX144 +MTLB0_SX414 +MTLB0_SX54 +MTLC0_SA2 +MTLC0_SI847 +MTLC0_SX127 +MTLC0_SX217 +MTLC0_SX307 +MTLC0_SX37 +MTLC0_SX397 +MTML0_SA1 +MTML0_SA2 +MTML0_SI1065 +MTML0_SI1695 +MTML0_SX255 +MTML0_SX345 +MTML0_SX75 +MTMN0_SA1 +MTMN0_SX164 +MTMN0_SX254 +MTMN0_SX344 +MTMN0_SX74 +MTMT0_SA1 +MTMT0_SI1118 +MTMT0_SX128 +MTMT0_SX218 +MTMT0_SX308 +MTMT0_SX38 +MTMT0_SX398 +MTPF0_SA1 +MTPF0_SA2 +MTPF0_SI1235 +MTPF0_SI1865 +MTPF0_SI605 +MTPF0_SX155 +MTPF0_SX245 +MTPF0_SX335 +MTPF0_SX425 +MTPG0_SA1 +MTPG0_SA2 +MTPG0_SI2013 +MTPG0_SX123 +MTPG0_SX213 +MTPG0_SX33 +MTPG0_SX393 +MTPP0_SA1 +MTPP0_SA2 +MTPP0_SI2138 +MTPP0_SI878 +MTPP0_SX158 +MTPP0_SX248 +MTPP0_SX428 +MTPP0_SX68 +MTPR0_SA1 +MTPR0_SA2 +MTPR0_SI1600 +MTPR0_SI506 +MTPR0_SX250 +MTPR0_SX70 +MTQC0_SA2 +MTQC0_SI2071 +MTQC0_SX271 +MTQC0_SX361 +MTRC0_SA1 +MTRC0_SA2 +MTRC0_SI1623 +MTRC0_SI993 +MTRC0_SX170 +MTRC0_SX183 +MTRC0_SX273 +MTRC0_SX363 +MTRC0_SX93 +MTRR0_SA1 +MTRR0_SA2 +MTRR0_SI1548 +MTRR0_SI2178 +MTRR0_SX108 +MTRR0_SX18 +MTRR0_SX378 +MTRT0_SA1 +MTRT0_SI1857 +MTRT0_SI597 +MTRT0_SX147 +MTRT0_SX237 +MTRT0_SX417 +MTWH1_SA1 +MTWH1_SA2 +MTWH1_SI1512 +MTWH1_SI2142 +MTWH1_SI882 +MTWH1_SX162 +MTWH1_SX252 +MTWH1_SX342 +MTWH1_SX432 +MTXS0_SI1690 +MTXS0_SX250 +MTXS0_SX340 +MTXS0_SX70 +MVJH0_SA1 +MVJH0_SA2 +MVJH0_SI2186 +MVJH0_SX116 +MVJH0_SX26 +MVJH0_SX386 +MVLO0_SA2 +MVLO0_SI1147 +MVLO0_SI1777 +MVLO0_SX157 +MVLO0_SX247 +MVLO0_SX337 +MVLO0_SX427 +MVLO0_SX67 +MVRW0_SA1 +MVRW0_SI1485 +MVRW0_SI2115 +MVRW0_SI855 +MVRW0_SX315 +MVRW0_SX405 +MVRW0_SX45 +MWAC0_SA1 +MWAC0_SI2231 +MWAC0_SI971 +MWAC0_SX71 +MWAD0_SA1 +MWAD0_SA2 +MWAD0_SI1062 +MWAD0_SI1749 +MWAD0_SI2322 +MWAD0_SX162 +MWAD0_SX252 +MWAD0_SX342 +MWAR0_SA2 +MWAR0_SI2305 +MWAR0_SX145 +MWAR0_SX235 +MWAR0_SX325 +MWAR0_SX415 +MWAR0_SX55 +MWCH0_SA1 +MWCH0_SA2 +MWCH0_SI1622 +MWCH0_SX272 +MWCH0_SX362 +MWCH0_SX92 +MWDK0_SX266 +MWDK0_SX356 +MWDK0_SX446 +MWEM0_SA1 +MWEM0_SI1950 +MWEM0_SX240 +MWEM0_SX330 +MWEM0_SX60 +MWGR0_SA1 +MWGR0_SA2 +MWGR0_SI1606 +MWGR0_SI2236 +MWGR0_SI976 +MWGR0_SX166 +MWGR0_SX256 +MWGR0_SX436 +MWGR0_SX76 +MWRE0_SA1 +MWRE0_SI1687 +MWRE0_SI2317 +MWRE0_SX157 +MWRP0_SA2 +MWRP0_SI1525 +MWRP0_SI2073 +MWRP0_SX183 +MWRP0_SX3 +MWRP0_SX93 +MWSB0_SA1 +MWSB0_SA2 +MWSB0_SI1626 +MWSB0_SI2256 +MWSB0_SX186 +MWSB0_SX366 +MWSB0_SX6 +MWSB0_SX96 +MWSH0_SA1 +MWSH0_SA2 +MWSH0_SI2266 +MWSH0_SX346 +MWSH0_SX436 +MZMB0_SA2 +MZMB0_SI1166 +MZMB0_SI1796 +MZMB0_SI536 +MZMB0_SX176 +MZMB0_SX266 +MZMB0_SX356 +MZMB0_SX446 +MZMB0_SX86 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/train_text.uid b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/train_text.uid new file mode 100644 index 0000000..0e0c251 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/train_text.uid @@ -0,0 +1,1000 @@ +FAEM0_SI762 +FAEM0_SX42 +FAJW0_SA1 +FAJW0_SX3 +FAJW0_SX93 +FALK0_SX186 +FALK0_SX6 +FALR0_SI1325 +FBAS0_SA1 +FBAS0_SX217 +FBCG1_SA1 +FBCG1_SX172 +FBCG1_SX442 +FBCH0_SX236 +FBCH0_SX416 +FBLV0_SA1 +FBLV0_SI1058 +FBLV0_SX338 +FBLV0_SX68 +FBMH0_SA1 +FBMJ0_SI815 +FCAG0_SA1 +FCAG0_SX153 +FCAG0_SX243 +FCAJ0_SI1479 +FCAJ0_SX309 +FCDR1_SX106 +FCDR1_SX196 +FCEG0_SA2 +FCJF0_SA1 +FCJF0_SX127 +FCJS0_SI1607 +FCJS0_SI2237 +FCJS0_SX257 +FCKE0_SA2 +FCKE0_SX121 +FCLT0_SI2068 +FCLT0_SX448 +FCLT0_SX88 +FCMG0_SA2 +FCMG0_SI1872 +FCMG0_SX72 +FCMM0_SA1 +FCMM0_SA2 +FCMM0_SX183 +FCRZ0_SI2053 +FCRZ0_SX433 +FCYL0_SA1 +FCYL0_SX37 +FDAS1_SI2091 +FDAS1_SX201 +FDAS1_SX381 +FDAW0_SI1406 +FDFB0_SA1 +FDFB0_SA2 +FDFB0_SI2010 +FDFB0_SX58 +FDJH0_SX305 +FDML0_SA2 +FDML0_SX159 +FDML0_SX249 +FDML0_SX429 +FDMY0_SA2 +FDMY0_SX27 +FDNC0_SX198 +FDNC0_SX288 +FDTD0_SX211 +FDXW0_SA1 +FDXW0_SX251 +FDXW0_SX341 +FDXW0_SX71 +FEAC0_SX165 +FEAC0_SX75 +FEAR0_SI622 +FECD0_SX68 +FEEH0_SA1 +FEEH0_SI1742 +FEEH0_SI471 +FEEH0_SX122 +FEME0_SA1 +FEME0_SX155 +FEME0_SX65 +FETB0_SA1 +FETB0_SI1148 +FETB0_SX158 +FEXM0_SI1101 +FGCS0_SX136 +FGCS0_SX226 +FGCS0_SX316 +FGCS0_SX406 +FGDP0_SA1 +FGMB0_SI1775 +FGMB0_SX245 +FHLM0_SX390 +FHXS0_SA2 +FHXS0_SX445 +FJDM2_SA1 +FJDM2_SX232 +FJDM2_SX52 +FJHK0_SX302 +FJKL0_SX212 +FJKL0_SX392 +FJLG0_SI2306 +FJLR0_SA1 +FJRP1_SI2062 +FJRP1_SX82 +FJSK0_SA1 +FJSP0_SX264 +FJSP0_SX354 +FJSP0_SX444 +FJWB1_SA1 +FJWB1_SX345 +FJWB1_SX435 +FJXM0_SA1 +FJXM0_SI581 +FJXM0_SX401 +FJXP0_SA1 +FJXP0_SI1122 +FJXP0_SX132 +FKAA0_SX128 +FKAA0_SX398 +FKDE0_SA1 +FKDE0_SX151 +FKDE0_SX241 +FKDE0_SX421 +FKDE0_SX61 +FKDW0_SX397 +FKFB0_SA2 +FKFB0_SX348 +FKFB0_SX78 +FKKH0_SA1 +FKKH0_SA2 +FKKH0_SX120 +FKKH0_SX390 +FKLC0_SX355 +FKLC1_SI2308 +FKLC1_SX238 +FKLC1_SX328 +FKLC1_SX418 +FKLH0_SA2 +FKLH0_SX177 +FKSR0_SA1 +FKSR0_SA2 +FKSR0_SI1747 +FKSR0_SI487 +FKSR0_SX217 +FLAC0_SX451 +FLAG0_SA2 +FLAG0_SX114 +FLAG0_SX204 +FLAG0_SX24 +FLAG0_SX384 +FLEH0_SI1681 +FLEH0_SI2311 +FLEH0_SX331 +FLET0_SA1 +FLHD0_SI1827 +FLHD0_SX354 +FLJA0_SA1 +FLJA0_SI2338 +FLJD0_SI886 +FLJD0_SX76 +FLJG0_SA2 +FLKM0_SA2 +FLKM0_SI686 +FLKM0_SX260 +FLKM0_SX80 +FLMA0_SA1 +FLMA0_SI613 +FLMA0_SX433 +FLMA0_SX73 +FLMC0_SX22 +FLMK0_SI1035 +FLMK0_SX315 +FLMK0_SX405 +FLOD0_SI1917 +FLOD0_SX117 +FLOD0_SX171 +FLOD0_SX297 +FLTM0_SA1 +FLTM0_SI1070 +FLTM0_SI2330 +FMAH1_SA2 +FMAH1_SX159 +FMBG0_SA2 +FMBG0_SI2264 +FMEM0_SI747 +FMEM0_SX387 +FMJB0_SI547 +FMJB0_SX97 +FMJF0_SA2 +FMJU0_SX309 +FMJU0_SX399 +FMKC0_SI1702 +FMKC0_SX442 +FMKC0_SX82 +FMKF0_SX186 +FMPG0_SA2 +FNKL0_SI1522 +FNTB0_SI1203 +FNTB0_SI573 +FNTB0_SX303 +FPAB1_SI1471 +FPAB1_SX211 +FPAC0_SA2 +FPAD0_SA2 +FPAD0_SX356 +FPAD0_SX86 +FPAF0_SA2 +FPAF0_SX154 +FPAZ0_SA1 +FPAZ0_SA2 +FPAZ0_SX243 +FPJF0_SA1 +FPJF0_SX146 +FPJF0_SX56 +FPLS0_SI1590 +FPLS0_SX330 +FPMY0_SA1 +FPMY0_SX343 +FREH0_SA1 +FREH0_SA2 +FREH0_SX415 +FRJB0_SX347 +FRLL0_SX434 +FSAG0_SA1 +FSAG0_SX243 +FSAH0_SA1 +FSAH0_SA2 +FSAH0_SX164 +FSAH0_SX434 +FSBK0_SA2 +FSBK0_SI1069 +FSBK0_SX169 +FSCN0_SA2 +FSCN0_SI626 +FSCN0_SX266 +FSCN0_SX446 +FSCN0_SX86 +FSDC0_SA2 +FSDC0_SX142 +FSDC0_SX322 +FSDC0_SX52 +FSDJ0_SI485 +FSDJ0_SX215 +FSDJ0_SX305 +FSDJ0_SX395 +FSGF0_SX117 +FSJG0_SX130 +FSJK1_SA2 +FSJK1_SX125 +FSJK1_SX35 +FSJS0_SX181 +FSJW0_SI1963 +FSJW0_SX433 +FSKC0_SI1416 +FSKC0_SI786 +FSKC0_SX246 +FSKL0_SI1529 +FSKL0_SX449 +FSKP0_SA2 +FSLS0_SX156 +FSLS0_SX426 +FSMA0_SA2 +FSMA0_SX181 +FSMM0_SX144 +FSMM0_SX234 +FSMS1_SX244 +FSMS1_SX347 +FSPM0_SA2 +FSPM0_SX161 +FSPM0_SX71 +FSRH0_SI1931 +FSRH0_SI671 +FSRH0_SX221 +FSRH0_SX401 +FTAJ0_SI699 +FTAJ0_SX159 +FTAJ0_SX249 +FTAJ0_SX429 +FTBR0_SX21 +FTBW0_SA1 +FTMG0_SI1532 +FTMG0_SI2162 +FTMG0_SX452 +FVFB0_SA2 +FVFB0_SX132 +FVFB0_SX42 +FVKB0_SA1 +FVMH0_SA2 +FVMH0_SX116 +FVMH0_SX26 +MABC0_SI1620 +MABC0_SI2041 +MABC0_SI781 +MADC0_SX107 +MADC0_SX377 +MADD0_SA2 +MADD0_SI1295 +MADD0_SX178 +MADD0_SX268 +MADD0_SX88 +MAEB0_SX450 +MAEO0_SA1 +MAFM0_SI939 +MAFM0_SX129 +MAFM0_SX309 +MAJP0_SA2 +MAKB0_SI1646 +MAKB0_SX26 +MAKB0_SX386 +MAKR0_SX362 +MAKR0_SX92 +MAPV0_SX213 +MARC0_SA2 +MARC0_SX108 +MARC0_SX18 +MARC0_SX198 +MARW0_SI1906 +MBAR0_SA1 +MBAR0_SX419 +MBAR0_SX59 +MBBR0_SI2315 +MBBR0_SX65 +MBCG0_SA1 +MBCG0_SI486 +MBEF0_SI1281 +MBEF0_SI1911 +MBEF0_SI651 +MBEF0_SX21 +MBEF0_SX381 +MBGT0_SA2 +MBGT0_SX261 +MBGT0_SX351 +MBGT0_SX441 +MBJV0_SA1 +MBJV0_SI617 +MBJV0_SX347 +MBMA0_SI592 +MBMA0_SX232 +MBMA0_SX52 +MBMA1_SI2214 +MBMA1_SX54 +MBML0_SA2 +MBML0_SI1169 +MBML0_SX89 +MBOM0_SA2 +MBOM0_SI2274 +MBOM0_SX294 +MBSB0_SA1 +MBSB0_SX3 +MBTH0_SA2 +MBTH0_SX122 +MBTH0_SX32 +MCAE0_SX277 +MCAL0_SA2 +MCAL0_SI1768 +MCDC0_SA1 +MCDC0_SX212 +MCDD0_SA2 +MCDD0_SI883 +MCDD0_SX253 +MCDD0_SX433 +MCDR0_SI1154 +MCEF0_SX235 +MCEF0_SX415 +MCEW0_SA2 +MCHL0_SX87 +MCLK0_SX310 +MCLM0_SA1 +MCLM0_SI2086 +MCLM0_SI826 +MCPM0_SA1 +MCPM0_SX114 +MCPM0_SX294 +MCPM0_SX384 +MCSS0_SI750 +MCTH0_SA1 +MCTH0_SX39 +MCXM0_SX91 +MDAC0_SA1 +MDAC0_SX181 +MDAC0_SX361 +MDAS0_SX6 +MDBB1_SX106 +MDBB1_SX16 +MDBB1_SX376 +MDBP0_SX168 +MDCD0_SI1415 +MDCD0_SX245 +MDCD0_SX425 +MDCM0_SX40 +MDCM0_SX400 +MDDC0_SI2049 +MDDC0_SI789 +MDDC0_SX159 +MDDC0_SX69 +MDED0_SA1 +MDED0_SA2 +MDEF0_SX123 +MDEF0_SX303 +MDHL0_SI1439 +MDHL0_SX269 +MDHL0_SX449 +MDHS0_SA1 +MDHS0_SA2 +MDHS0_SI1530 +MDHS0_SI2160 +MDJM0_SX105 +MDJM0_SX15 +MDKS0_SX436 +MDLB0_SA2 +MDLC0_SX405 +MDLC1_SA2 +MDLC1_SI2065 +MDLC1_SI2144 +MDLC1_SX445 +MDLC2_SI2244 +MDLC2_SX354 +MDLH0_SA2 +MDLM0_SI1234 +MDLM0_SI1864 +MDLM0_SX154 +MDLM0_SX424 +MDLR0_SA1 +MDLR0_SA2 +MDLR0_SI1863 +MDLR0_SI603 +MDLR0_SX153 +MDLR1_SA1 +MDLR1_SA2 +MDMA0_SI1430 +MDMA0_SX260 +MDMA0_SX80 +MDMT0_SA1 +MDMT0_SA2 +MDMT0_SI1832 +MDMT0_SX122 +MDMT0_SX32 +MDNS0_SA2 +MDNS0_SI2271 +MDNS0_SX201 +MDNS0_SX21 +MDPB0_SX416 +MDPK0_SI1053 +MDPK0_SX333 +MDPK0_SX423 +MDPS0_SI719 +MDPS0_SX359 +MDRD0_SA1 +MDRD0_SX32 +MDSJ0_SI2092 +MDSS0_SA2 +MDSS0_SX441 +MDSS1_SA1 +MDSS1_SI1327 +MDSS1_SI697 +MDSS1_SX157 +MDSS1_SX67 +MDTB0_SI1200 +MDTB0_SI1830 +MDTB0_SX120 +MDWD0_SA2 +MDWD0_SX270 +MDWD0_SX90 +MDWH0_SX215 +MDWH0_SX305 +MDWM0_SA1 +MDWM0_SA2 +MDWM0_SX16 +MDWM0_SX286 +MEAL0_SA2 +MEAL0_SI2177 +MEAL0_SX107 +MEAL0_SX347 +MEDR0_SA1 +MEDR0_SA2 +MEDR0_SI1374 +MEFG0_SA1 +MEGJ0_SA2 +MEGJ0_SX257 +MEGJ0_SX3 +MEJL0_SA1 +MEJL0_SX152 +MEJL0_SX242 +MEJS0_SI610 +MEJS0_SX160 +MEJS0_SX340 +MESG0_SX432 +MESJ0_SX187 +MESJ0_SX97 +MEWM0_SI718 +MEWM0_SX178 +MEWM0_SX88 +MFER0_SI862 +MFER0_SX142 +MFRM0_SX345 +MFRM0_SX435 +MFWK0_SI1879 +MFWK0_SX169 +MFXS0_SX54 +MFXV0_SA2 +MFXV0_SX105 +MGAF0_SA1 +MGAF0_SX22 +MGAF0_SX382 +MGAG0_SA2 +MGAK0_SX226 +MGAK0_SX46 +MGAR0_SX132 +MGAW0_SI535 +MGAW0_SX175 +MGES0_SA1 +MGES0_SI2111 +MGES0_SI851 +MGJC0_SA2 +MGJC0_SX75 +MGRL0_SI2127 +MGRL0_SI867 +MGRL0_SX147 +MGRP0_SA2 +MGSH0_SA2 +MGSH0_SI1806 +MGSH0_SX127 +MGSH0_SX276 +MGSH0_SX6 +MGSL0_SA1 +MGSL0_SI534 +MGSL0_SX264 +MGXP0_SX187 +MGXP0_SX7 +MHBS0_SX315 +MHBS0_SX45 +MHIT0_SA1 +MHJB0_SA1 +MHJB0_SI1017 +MHMG0_SX195 +MHMR0_SA1 +MHMR0_SI489 +MHRM0_SA1 +MHRM0_SI958 +MHRM0_SX148 +MHRM0_SX58 +MHXL0_SI1772 +MHXL0_SX242 +MILB0_SA2 +MJAC0_SX307 +MJAC0_SX71 +MJAE0_SX174 +MJAI0_SA1 +MJAI0_SA2 +MJBG0_SX62 +MJDA0_SI1031 +MJDA0_SX311 +MJDE0_SI463 +MJDG0_SA2 +MJDG0_SI1042 +MJDG0_SI1705 +MJDM0_SA1 +MJDM0_SI974 +MJEB0_SI656 +MJEB0_SX296 +MJEB1_SA2 +MJEB1_SX207 +MJEB1_SX387 +MJEE0_SA1 +MJEE0_SX247 +MJEE0_SX337 +MJFH0_SA2 +MJFH0_SI1107 +MJFR0_SX75 +MJHI0_SA1 +MJHI0_SX158 +MJJB0_SA1 +MJJB0_SX239 +MJJJ0_SX443 +MJJM0_SA2 +MJJM0_SI827 +MJJM0_SX107 +MJKR0_SA1 +MJKR0_SI571 +MJLB0_SX176 +MJLG1_SX292 +MJLS0_SX106 +MJMA0_SA1 +MJMA0_SA2 +MJMD0_SA2 +MJMD0_SX308 +MJMD0_SX38 +MJMM0_SX85 +MJPG0_SI1191 +MJPG0_SX111 +MJPG0_SX201 +MJPG0_SX21 +MJPM0_SA2 +MJPM0_SX378 +MJPM1_SI2280 +MJPM1_SX401 +MJRA0_SA1 +MJRA0_SA2 +MJRA0_SI1236 +MJRA0_SI1866 +MJRA0_SX426 +MJRG0_SI1366 +MJRG0_SI1996 +MJRG0_SX376 +MJRH0_SX225 +MJRH1_SA1 +MJRH1_SI514 +MJRH1_SX154 +MJRH1_SX244 +MJRH1_SX424 +MJRK0_SA1 +MJRK0_SA2 +MJRK0_SI1662 +MJRK0_SX160 +MJRK0_SX250 +MJRK0_SX430 +MJRP0_SA1 +MJRP0_SA2 +MJRP0_SX225 +MJSR0_SA1 +MJSR0_SI1424 +MJSR0_SX344 +MJWG0_SA1 +MJWG0_SX265 +MJWS0_SI513 +MJWS0_SX153 +MJWS0_SX63 +MJWT0_SA1 +MJWT0_SX121 +MJWT0_SX211 +MJWT0_SX301 +MJWT0_SX31 +MJWT0_SX391 +MJXA0_SX427 +MJXL0_SI542 +MKAG0_SA1 +MKAG0_SX259 +MKAJ0_SA2 +MKAJ0_SX154 +MKAM0_SA1 +MKAM0_SX146 +MKAM0_SX326 +MKAM0_SX56 +MKDB0_SA1 +MKDB0_SA2 +MKDB0_SX152 +MKDD0_SA2 +MKES0_SA1 +MKES0_SI1253 +MKES0_SI1883 +MKES0_SX173 +MKJO0_SI1517 +MKJO0_SI887 +MKJO0_SX437 +MKLN0_SI968 +MKLN0_SX248 +MKLR0_SA2 +MKLR0_SI1689 +MKLS0_SA1 +MKLS0_SX357 +MKLS0_SX87 +MKLS1_SA1 +MKLS1_SA2 +MKLS1_SX375 +MKLW0_SA1 +MKRG0_SX411 +MKXL0_SA2 +MKXL0_SX15 +MKXL0_SX375 +MLBC0_SA1 +MLBC0_SI1869 +MLBC0_SX249 +MLEL0_SA1 +MLEL0_SA2 +MLEL0_SI1246 +MLEL0_SX256 +MLEL0_SX436 +MLJC0_SX145 +MLJC0_SX415 +MLJH0_SX64 +MLNS0_SI2037 +MMAA0_SA1 +MMAA0_SA2 +MMAA0_SX35 +MMAB1_SI1494 +MMAB1_SX234 +MMAG0_SA2 +MMAG0_SI1126 +MMAG0_SX316 +MMAM0_SI2227 +MMAM0_SX157 +MMAM0_SX427 +MMAR0_SX256 +MMBS0_SI1781 +MMCC0_SA2 +MMDB0_SX177 +MMDG0_SA1 +MMDG0_SA2 +MMDG0_SI520 +MMDG0_SX160 +MMDG0_SX250 +MMDM0_SI1941 +MMDM0_SI681 +MMDM0_SX141 +MMDM1_SA2 +MMDM1_SI2043 +MMDM1_SX423 +MMDM1_SX63 +MMDS0_SA1 +MMEA0_SA1 +MMEA0_SX128 +MMEA0_SX398 +MMEB0_SA2 +MMEB0_SX187 +MMEB0_SX367 +MMGC0_SA2 +MMGC0_SX135 +MMGC0_SX225 +MMGG0_SX269 +MMGK0_SX332 +MMGK0_SX62 +MMJB1_SA2 +MMRP0_SA2 +MMRP0_SX144 +MMSM0_SX116 +MMSM0_SX206 +MMVP0_SA1 +MMVP0_SA2 +MMWB0_SI989 +MMWB0_SX89 +MMWS0_SA2 +MMWS0_SX168 +MMWS0_SX348 +MMWS0_SX438 +MMWS1_SI1701 +MMXS0_SI2136 +MMXS0_SX246 +MMXS0_SX426 +MNET0_SI816 +MNET0_SX6 +MNTW0_SA2 +MNTW0_SX168 +MNTW0_SX78 +MPAR0_SI2206 +MPAR0_SI946 +MPAR0_SX136 +MPAR0_SX316 +MPEB0_SI1034 +MPEB0_SI1860 +MPEB0_SX240 +MPEB0_SX330 +MPFU0_SI628 +MPFU0_SX448 +MPGH0_SX114 +MPGH0_SX24 +MPGR0_SX240 +MPGR0_SX330 +MPGR1_SX149 +MPPC0_SA1 +MPRD0_SA1 +MPRD0_SX261 +MPRD0_SX351 +MPRD0_SX441 +MPRD0_SX81 +MPRK0_SI1727 +MPRK0_SX107 +MPRK0_SX377 +MPRT0_SA1 +MPRT0_SX310 +MPSW0_SI1067 +MPSW0_SX167 +MPSW0_SX437 +MRAB1_SX128 +MRAB1_SX308 +MRAI0_SA1 +MRAI0_SA2 +MRAI0_SX72 +MRAM0_SA1 +MRAM0_SA2 +MRAM0_SX15 +MRBC0_SI1859 +MRBC0_SX329 +MRBC0_SX419 +MRCG0_SI798 +MRCG0_SX168 +MRCW0_SA1 +MRCW0_SX291 +MRDD0_SI1680 +MRDD0_SX150 +MRDD0_SX277 +MRDD0_SX60 +MRDM0_SI1595 +MRDM0_SX65 +MRDS0_SA1 +MREE0_SX24 +MREH1_SX249 +MREH1_SX69 +MREM0_SA2 +MREW1_SI870 +MRFK0_SX446 +MRFL0_SA1 +MRFL0_SX256 +MRFL0_SX436 +MRFL0_SX76 +MRGM0_SA2 +MRGM0_SX262 +MRGS0_SA2 +MRGS0_SX186 +MRHL0_SI885 +MRHL0_SX345 +MRHL0_SX435 +MRJB1_SA1 +MRJB1_SA2 +MRJB1_SX210 +MRJB1_SX30 +MRJB1_SX390 +MRJH0_SA2 +MRJH0_SX307 +MRJH0_SX79 +MRJM0_SX148 +MRJM1_SA2 +MRJM1_SI1298 +MRJM1_SI1928 +MRJM1_SX128 +MRJT0_SA2 +MRJT0_SI1498 +MRJT0_SX328 +MRJT0_SX418 +MRKM0_SA2 +MRKM0_SX367 +MRLD0_SA2 +MRLD0_SI2224 +MRLD0_SX154 +MRLD0_SX424 +MRLJ0_SA1 +MRLJ0_SX250 +MRLJ0_SX340 +MRLJ1_SA1 +MRLJ1_SA2 +MRLJ1_SX321 +MRLK0_SI843 +MRLK0_SX123 +MRLK0_SX213 +MRMB0_SA2 +MRMB0_SI1581 +MRMB0_SX411 +MRMG0_SA1 +MRMG0_SI1080 +MRMG0_SX450 +MRMH0_SI1349 +MRMH0_SI2281 +MRMH0_SX121 +MRML0_SA2 +MRML0_SX341 +MRPC1_SI2112 +MRRE0_SA2 +MRRE0_SX164 +MRRE0_SX344 +MRRE0_SX74 +MRSO0_SX129 +MRSO0_SX39 +MRSP0_SX259 +MRTC0_SX378 +MRVG0_SI1140 +MRVG0_SX240 +MRWA0_SI973 +MRWA0_SX163 +MRWA0_SX73 +MRWS0_SI1732 +MRWS0_SI472 +MRWS0_SX22 +MRWS0_SX382 +MRXB0_SA2 +MRXB0_SX415 +MSAH1_SI1679 +MSAS0_SX116 +MSAS0_SX206 +MSAS0_SX386 +MSAT0_SA1 +MSAT1_SX263 +MSAT1_SX443 +MSAT1_SX83 +MSDB0_SX197 +MSDB0_SX287 +MSDB0_SX377 +MSDH0_SI2240 +MSDH0_SX440 +MSDH0_SX80 +MSDS0_SA1 +MSEM1_SI1440 +MSEM1_SX180 +MSEM1_SX270 +MSES0_SI1589 +MSES0_SX239 +MSES0_SX419 +MSFH0_SX316 +MSFV0_SI1892 +MSFV0_SX362 +MSFV0_SX92 +MSMR0_SX415 +MSMS0_SA1 +MSMS0_SX173 +MSMS0_SX83 +MSRG0_SA1 +MSRG0_SI1221 +MSTF0_SI766 +MSTF0_SX316 +MSTF0_SX46 +MSVS0_SA2 +MSVS0_SX308 +MTAS0_SX215 +MTAS0_SX35 +MTAS0_SX395 +MTAT0_SX390 +MTAT1_SX59 +MTBC0_SI1803 +MTCS0_SA2 +MTCS0_SI2265 +MTCS0_SX82 +MTDP0_SA2 +MTER0_SA2 +MTER0_SI1787 +MTJG0_SA1 +MTJG0_SI2157 +MTJG0_SX260 +MTJM0_SI1856 +MTJM0_SX146 +MTJU0_SX130 +MTJU0_SX400 +MTKD0_SX107 +MTKD0_SX287 +MTKP0_SI1023 +MTLB0_SA1 +MTLB0_SX234 +MTLC0_SA1 +MTML0_SI2325 +MTML0_SX165 +MTMN0_SA2 +MTMN0_SI1064 +MTMN0_SI2324 +MTMN0_SX434 +MTMT0_SA2 +MTMT0_SI1748 +MTPF0_SX65 +MTPG0_SI1383 +MTPG0_SI753 +MTPG0_SX303 +MTPP0_SX338 +MTPR0_SX340 +MTQC0_SI480 +MTQC0_SX91 +MTRR0_SX198 +MTRR0_SX288 +MTRT0_SA2 +MTRT0_SX254 +MTRT0_SX57 +MTWH1_SX72 +MTXS0_SA1 +MTXS0_SA2 +MVJH0_SI926 +MVJH0_SX206 +MVJH0_SX296 +MVLO0_SA1 +MVRW0_SA2 +MVRW0_SX135 +MVRW0_SX225 +MWAC0_SA2 +MWAC0_SX341 +MWAC0_SX431 +MWAD0_SX432 +MWAD0_SX72 +MWAR0_SA1 +MWAR0_SI1675 +MWCH0_SI1895 +MWCH0_SI2252 +MWCH0_SX182 +MWCH0_SX452 +MWDK0_SA1 +MWDK0_SA2 +MWDK0_SI2017 +MWDK0_SI806 +MWDK0_SX176 +MWDK0_SX86 +MWEM0_SA2 +MWEM0_SI1320 +MWEM0_SI1393 +MWEM0_SX150 +MWGR0_SX346 +MWRE0_SX247 +MWRE0_SX337 +MWRE0_SX427 +MWRP0_SA1 +MWRP0_SX273 +MWRP0_SX363 +MWSB0_SX276 +MWSH0_SX256 +MWSH0_SX76 +MZMB0_SA1 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/valid.uid b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/valid.uid new file mode 100644 index 0000000..e99edfe --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/config/timit_unmatched/valid.uid @@ -0,0 +1,620 @@ +FAEM0_SI1392 +FAJW0_SI1263 +FAJW0_SI633 +FALK0_SI658 +FALR0_SX335 +FAPB0_SI1063 +FAPB0_SI2323 +FAPB0_SX433 +FBAS0_SI1472 +FBAS0_SI2066 +FBCG1_SX352 +FBCH0_SI959 +FBJL0_SI922 +FBLV0_SI1688 +FBMH0_SI1136 +FBMH0_SI970 +FBMJ0_SA1 +FBMJ0_SI1776 +FBMJ0_SI516 +FBMJ0_SX336 +FCDR1_SI1186 +FCDR1_SI1816 +FCDR1_SI556 +FCDR1_SX286 +FCKE0_SI1741 +FCKE0_SI481 +FCLT0_SI808 +FCMG0_SI1142 +FCMG0_SX432 +FCMM0_SI1957 +FCMM0_SX420 +FCYL0_SI667 +FCYL0_SX349 +FDAS1_SI1461 +FDAS1_SI831 +FDAW0_SI1271 +FDAW0_SI2036 +FDJH0_SI935 +FDKN0_SI1202 +FDKN0_SX181 +FDKN0_SX451 +FDMY0_SA1 +FDMY0_SI567 +FDMY0_SI714 +FDMY0_SX387 +FDNC0_SI1278 +FDNC0_SI1908 +FDTD0_SA1 +FDTD0_SX321 +FEAC0_SI615 +FEAR0_SX352 +FECD0_SA1 +FECD0_SI1418 +FECD0_SI788 +FEME0_SI875 +FEME0_SX335 +FEXM0_SA1 +FEXM0_SI482 +FEXM0_SX366 +FGDP0_SI988 +FGDP0_SX88 +FGMB0_SI1145 +FGMB0_SX335 +FGRW0_SA1 +FGRW0_SI1152 +FGRW0_SX162 +FGRW0_SX432 +FHLM0_SX120 +FHLM0_SX349 +FHXS0_SA1 +FHXS0_SI1075 +FHXS0_SI2302 +FHXS0_SX175 +FJDM2_SA2 +FJDM2_SX142 +FJEN0_SA1 +FJEN0_SX327 +FJEN0_SX417 +FJHK0_SI2282 +FJKL0_SI932 +FJLG0_SI1889 +FJLR0_SI1231 +FJRB0_SX402 +FJRP1_SA1 +FJRP1_SI1432 +FJRP1_SX262 +FJRP1_SX352 +FJSK0_SI1052 +FJSP0_SI1434 +FJWB1_SI748 +FJXM0_SX311 +FJXM0_SX41 +FJXP0_SI1752 +FKAA0_SA1 +FKDE0_SI1141 +FKDE0_SI1771 +FKDW0_SI1207 +FKDW0_SI1891 +FKFB0_SI1608 +FKFB0_SX438 +FKKH0_SI1290 +FKKH0_SI1920 +FKLC0_SI985 +FKLC0_SX175 +FKLC1_SI1048 +FKLH0_SI1257 +FKSR0_SX366 +FLAC0_SI1339 +FLAG0_SI1464 +FLAG0_SI834 +FLEH0_SI1051 +FLET0_SI507 +FLJA0_SI1078 +FLJA0_SX178 +FLJD0_SI1516 +FLJG0_SI981 +FLJG0_SX171 +FLJG0_SX351 +FLKM0_SA1 +FLKM0_SI620 +FLKM0_SX350 +FLKM0_SX440 +FLMC0_SI1372 +FLMK0_SA1 +FLMK0_SI1229 +FLTM0_SX170 +FLTM0_SX350 +FLTM0_SX440 +FMAH1_SI879 +FMBG0_SI1160 +FMEM0_SA1 +FMEM0_SX333 +FMJB0_SI1177 +FMJF0_SI624 +FMJF0_SX174 +FMJF0_SX84 +FMJU0_SI1389 +FMKC0_SI1041 +FMKF0_SI1018 +FMPG0_SA1 +FMPG0_SI972 +FMPG0_SX162 +FMPG0_SX342 +FMPG0_SX432 +FNKL0_SI892 +FNTB0_SI679 +FPAB1_SA1 +FPAB1_SI2101 +FPAB1_SI841 +FPAC0_SI1921 +FPAC0_SI661 +FPAD0_SI716 +FPAD0_SX176 +FPAF0_SA1 +FPAF0_SI1054 +FPAZ0_SI2223 +FPAZ0_SI963 +FPJF0_SI1259 +FPJF0_SX352 +FPLS0_SI960 +FPMY0_SI1153 +FPMY0_SI523 +FREH0_SI1945 +FRLL0_SI805 +FSAG0_SI1323 +FSAG0_SX153 +FSAG0_SX333 +FSAG0_SX423 +FSAH0_SI614 +FSAH0_SX327 +FSAK0_SI1300 +FSBK0_SX349 +FSCN0_SA1 +FSCN0_SI705 +FSCN0_SX176 +FSDC0_SI1312 +FSDJ0_SI1115 +FSGF0_SI2187 +FSGF0_SI927 +FSJG0_SA1 +FSJG0_SA2 +FSJG0_SI940 +FSJG0_SX220 +FSJG0_SX40 +FSJG0_SX400 +FSJS0_SA1 +FSJS0_SX451 +FSJW0_SI1333 +FSKP0_SI1098 +FSMA0_SI991 +FSMA0_SX451 +FSMM0_SX324 +FSPM0_SI1241 +FSPM0_SX251 +FSRH0_SX311 +FSSB0_SI1712 +FSSB0_SX362 +FTBR0_SI1402 +FTBR0_SI921 +FTBW0_SI715 +FTBW0_SX175 +FTLG0_SI1743 +FTLG0_SI483 +FTMG0_SI902 +FVFB0_SI1510 +FVKB0_SX349 +FVMH0_SI1466 +FVMH0_SI836 +MADC0_SI1367 +MADC0_SI737 +MAEB0_SI1411 +MAEO0_SI1326 +MAJP0_SI1704 +MAJP0_SX174 +MAKB0_SA2 +MAKB0_SI1016 +MAKB0_SI2276 +MAKB0_SX116 +MAPV0_SI1293 +MAPV0_SI663 +MARW0_SX286 +MARW0_SX349 +MBBR0_SI1055 +MBBR0_SX335 +MBCG0_SI957 +MBCG0_SX327 +MBGT0_SI1841 +MBGT0_SX171 +MBMA0_SI1222 +MBMA1_SI954 +MBMA1_SX324 +MBTH0_SI2102 +MBWP0_SX349 +MCAE0_SI1447 +MCAE0_SI2077 +MCAE0_SI817 +MCAL0_SI1138 +MCDR0_SI1784 +MCDR0_SI524 +MCEF0_SI842 +MCEW0_SA1 +MCEW0_SI2072 +MCEW0_SI812 +MCEW0_SX362 +MCEW0_SX452 +MCHL0_SI1347 +MCHL0_SI1404 +MCLK0_SI2290 +MCLK0_SI650 +MCPM0_SI1824 +MCSS0_SI1380 +MCSS0_SI688 +MCTM0_SI1350 +MCTM0_SI1980 +MDAC0_SI631 +MDAS0_SI1896 +MDAS0_SI636 +MDBP0_SI528 +MDBP0_SX438 +MDCD0_SI785 +MDCD0_SX335 +MDCM0_SI1480 +MDDC0_SI1419 +MDED0_SI540 +MDEF0_SI1123 +MDEM0_SA1 +MDEM0_SI608 +MDEM0_SI800 +MDEM0_SX428 +MDHS0_SI900 +MDJM0_SI1455 +MDKS0_SX166 +MDKS0_SX346 +MDLB0_SI1306 +MDLB0_SX136 +MDLB0_SX406 +MDLC0_SI1395 +MDLC0_SI2025 +MDLC1_SI1435 +MDLH0_SX160 +MDLH0_SX430 +MDLM0_SI604 +MDLR0_SX333 +MDLR1_SI669 +MDMA0_SX170 +MDMA0_SX350 +MDMA0_SX440 +MDNS0_SI1011 +MDNS0_SI873 +MDPB0_SI1760 +MDPB0_SI866 +MDRD0_SI752 +MDSJ0_SI1462 +MDSJ0_SX438 +MDWD0_SI1260 +MDWH0_SA1 +MDWH0_SI1168 +MDWH0_SI665 +MDWM0_SI916 +MEDR0_SI2004 +MEFG0_SI491 +MEFG0_SI598 +MEGJ0_SA1 +MEGJ0_SI1337 +MEGJ0_SI707 +MEGJ0_SX167 +MEJS0_SI1240 +MESG0_SI702 +MESJ0_SI2039 +MFWK0_SX349 +MFXS0_SX324 +MFXV0_SI1005 +MFXV0_SI1342 +MGAF0_SI1282 +MGAG0_SI691 +MGAK0_SI1036 +MGAK0_SX136 +MGAR0_SX312 +MGAW0_SI1165 +MGES0_SX311 +MGJC0_SX435 +MGRL0_SX327 +MGRP0_SI1317 +MGRP0_SX327 +MGSH0_SI1176 +MGSH0_SI546 +MGSL0_SI797 +MGXP0_SI1087 +MGXP0_SI525 +MHBS0_SI945 +MHIT0_SI983 +MHMG0_SI735 +MHMR0_SI1692 +MILB0_SI903 +MJAC0_SI701 +MJAC0_SX251 +MJAE0_SX84 +MJAI0_SI682 +MJAI0_SI710 +MJDC0_SI531 +MJDE0_SA1 +MJDE0_SI1120 +MJDE0_SI490 +MJDE0_SX220 +MJDM0_SI1340 +MJDM0_SX170 +MJDM0_SX350 +MJEB0_SX170 +MJEB1_SI1467 +MJEB1_SI837 +MJFR0_SA1 +MJFR0_SX435 +MJHI0_SI1328 +MJJJ0_SI1163 +MJJM0_SI1251 +MJLB0_SI1616 +MJLS0_SI1726 +MJMA0_SI2125 +MJMD0_SI2288 +MJMM0_SI1255 +MJMM0_SX175 +MJPG0_SI1821 +MJPM0_SI1368 +MJPM1_SX311 +MJRA0_SX336 +MJRG0_SI736 +MJRG0_SX352 +MJRH0_SI1840 +MJRH1_SI1558 +MJRK0_SI880 +MJRP0_SI1845 +MJSR0_SI2054 +MJSR0_SI794 +MJWG0_SI813 +MJWG0_SI895 +MJWG0_SX175 +MJWS0_SX333 +MJWT0_SI1291 +MJWT0_SI1381 +MJXL0_SI1172 +MKAG0_SI979 +MKAH0_SX178 +MKAM0_SI1250 +MKAM0_SI1465 +MKDD0_SI1567 +MKDD0_SI2197 +MKDD0_SI937 +MKDT0_SI814 +MKES0_SI623 +MKLS0_SI1437 +MKLS0_SI2067 +MKLS1_SI915 +MKLW0_SI1571 +MKLW0_SX311 +MKRG0_SI861 +MKXL0_SI1815 +MKXL0_SI1958 +MLBC0_SI1239 +MLEL0_SI616 +MLEL0_SX166 +MLJC0_SI1225 +MLJH0_SA1 +MLJH0_SA2 +MLJH0_SI1422 +MLJH0_SI694 +MLJH0_SX244 +MLSH0_SI1417 +MLSH0_SX247 +MMAA0_SI1588 +MMAA0_SI845 +MMAB1_SI864 +MMAB1_SX324 +MMAG0_SA1 +MMAG0_SI1756 +MMAG0_SX136 +MMAR0_SI1966 +MMAR0_SX166 +MMAR0_SX346 +MMBS0_SI521 +MMBS0_SX161 +MMCC0_SI1338 +MMDB0_SI987 +MMDG0_SI1780 +MMDM0_SI1311 +MMDM1_SX153 +MMDM1_SX333 +MMEB0_SX327 +MMGC0_SI1305 +MMGG0_SI1079 +MMGG0_SX449 +MMLM0_SI2150 +MMPM0_SX161 +MMRP0_SX324 +MMSM0_SI1106 +MMSM0_SI476 +MMVP0_SI654 +MMVP0_SX347 +MMWB0_SA1 +MMWB0_SI2249 +MMWB0_SX359 +MMWB0_SX449 +MNTW0_SI1068 +MNTW0_SI1698 +MPEB0_SI600 +MPFU0_SI1258 +MPGH0_SI675 +MPGR0_SI1410 +MPGR1_SI1499 +MPMB0_SA1 +MPMB0_SA2 +MPMB0_SI1501 +MPMB0_SI2131 +MPMB0_SI871 +MPMB0_SX151 +MPMB0_SX331 +MPMB0_SX421 +MPMB0_SX61 +MPPC0_SI1412 +MPRB0_SI1215 +MPRB0_SI575 +MPRD0_SI801 +MPRD0_SX171 +MPRK0_SA1 +MPRK0_SI1097 +MPRK0_SI467 +MPRK0_SX287 +MRAB0_SI1854 +MRAB1_SI848 +MRAI0_SI2052 +MRAI0_SI792 +MRAI0_SX432 +MRAM0_SI1951 +MRCG0_SA2 +MRCG0_SI1428 +MRCG0_SX348 +MRCG0_SX438 +MRCW0_SI741 +MRDM0_SI1044 +MRDM0_SX335 +MREE0_SI1104 +MREE0_SI1959 +MREH1_SA1 +MREH1_SI1599 +MREH1_SI969 +MREM0_SI511 +MRFK0_SI1076 +MRFL0_SI1156 +MRFL0_SI526 +MRFL0_SX166 +MRGM0_SI532 +MRGM0_SX172 +MRGM0_SX442 +MRGS0_SI1356 +MRGS0_SI726 +MRGS0_SX6 +MRJB1_SI1413 +MRJB1_SI2021 +MRJB1_SX120 +MRJH0_SI1519 +MRJH0_SI889 +MRJH0_SX169 +MRJT0_SI868 +MRJT0_SX58 +MRKM0_SI1267 +MRKM0_SI1391 +MRKM0_SI637 +MRLJ0_SI790 +MRLJ1_SI2301 +MRLK0_SI1468 +MRLR0_SI1196 +MRML0_SA1 +MRML0_SI1421 +MRML0_SX161 +MRML0_SX251 +MRMS0_SI2057 +MRRE0_SA1 +MRRE0_SI1334 +MRRE0_SI952 +MRSO0_SI1206 +MRSP0_SI1429 +MRTC0_SI1458 +MRTJ0_SA1 +MRTJ0_SI772 +MRTJ0_SX142 +MRTJ0_SX232 +MRTJ0_SX52 +MRWS0_SI1102 +MRXB0_SI2215 +MRXB0_SI955 +MSAS0_SI1376 +MSAS0_SI746 +MSDH0_SI980 +MSDH0_SX170 +MSDS0_SI1077 +MSDS0_SX267 +MSDS0_SX357 +MSEM1_SI2070 +MSEM1_SI810 +MSFH0_SA1 +MSFH0_SI1738 +MSFH0_SX136 +MSFH0_SX406 +MSFV0_SI632 +MSJK0_SI1596 +MSJK0_SX336 +MSMC0_SI509 +MSMR0_SI1150 +MSMS0_SI1433 +MSRR0_SI1761 +MSRR0_SI501 +MSTF0_SI852 +MSVS0_SI2198 +MSVS0_SI938 +MSVS0_SX398 +MTAB0_SI1572 +MTAB0_SX312 +MTAT0_SA1 +MTAT0_SI1110 +MTAT0_SI811 +MTAT1_SI779 +MTAT1_SX149 +MTAT1_SX329 +MTBC0_SI543 +MTCS0_SI712 +MTDB0_SI1401 +MTDB0_SI771 +MTDP0_SA1 +MTDP0_SI1521 +MTDP0_SX171 +MTDP0_SX351 +MTER0_SA1 +MTER0_SI1157 +MTER0_SX437 +MTJG0_SX170 +MTJS0_SA2 +MTJS0_SI1822 +MTJS0_SI562 +MTJS0_SX382 +MTJU0_SI2020 +MTKD0_SI630 +MTKP0_SI2283 +MTKP0_SI454 +MTLB0_SI1134 +MTLB0_SX324 +MTLC0_SI1313 +MTLC0_SI1477 +MTML0_SX435 +MTMN0_SI582 +MTMT0_SI488 +MTPP0_SI1508 +MTPR0_SI2230 +MTPR0_SX160 +MTPR0_SX430 +MTQC0_SA1 +MTQC0_SI1441 +MTQC0_SX181 +MTQC0_SX451 +MTRC0_SI589 +MTRR0_SI918 +MTRT0_SI1227 +MTXS0_SI1060 +MTXS0_SI2320 +MTXS0_SX160 +MTXS0_SX430 +MVJH0_SI1556 +MVLO0_SI517 +MWAC0_SI1601 +MWAC0_SX161 +MWAC0_SX251 +MWAR0_SI1045 +MWDK0_SI1436 +MWEM0_SX420 +MWRE0_SA2 +MWRE0_SI1057 +MWRE0_SX67 +MWRP0_SI1443 +MWSB0_SI996 +MWSH0_SI1426 +MWSH0_SI796 +MWSH0_SX166 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/data/__init__.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/data/__init__.py new file mode 100644 index 0000000..d054562 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/data/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from .extracted_features_dataset import ExtractedFeaturesDataset +from .random_input_dataset import RandomInputDataset + + +__all__ = [ + "ExtractedFeaturesDataset", + "RandomInputDataset", +] diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/data/extracted_features_dataset.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/data/extracted_features_dataset.py new file mode 100644 index 0000000..7f7a58c --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/data/extracted_features_dataset.py @@ -0,0 +1,167 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +import logging +import os +import contextlib + +import numpy as np +import torch + +from fairseq.data import FairseqDataset, data_utils + + +logger = logging.getLogger(__name__) + + +class ExtractedFeaturesDataset(FairseqDataset): + def __init__( + self, + path, + split, + min_length=3, + max_length=None, + labels=None, + label_dict=None, + shuffle=True, + sort_by_length=True, + aux_target_postfix=None, + ): + super().__init__() + + self.min_length = min_length + self.max_length = max_length + self.shuffle = shuffle + self.sort_by_length = sort_by_length + self.label_dict = label_dict + + if labels is not None: + assert label_dict is not None + + self.sizes = [] + self.offsets = [] + self.labels = [] + self.aux_tgt = None + + path = os.path.join(path, split) + data_path = path + self.data = np.load(data_path + ".npy", mmap_mode="r") + + offset = 0 + skipped = 0 + + if not os.path.exists(path + f".{labels}"): + labels = None + + with open(data_path + ".lengths", "r") as len_f, open( + path + f".{labels}", "r" + ) if labels is not None else contextlib.ExitStack() as lbl_f: + for line in len_f: + length = int(line.rstrip()) + lbl = None if labels is None else next(lbl_f).rstrip().split() + if length >= min_length and ( + max_length is None or length <= max_length + ): + self.sizes.append(length) + self.offsets.append(offset) + if lbl is not None: + self.labels.append(lbl) + offset += length + + self.sizes = np.asarray(self.sizes) + self.offsets = np.asarray(self.offsets) + + if aux_target_postfix is not None: + if not os.path.exists(path+f".{aux_target_postfix}"): + logger.info(f"auxaliry target for {split} missing") + else: + with open(path+f".{aux_target_postfix}", "r") as t_f: + self.aux_tgt = [ + torch.LongTensor(list(map(int,seg.strip().split())))\ + for seg in t_f] + + logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples") + + def __getitem__(self, index): + offset = self.offsets[index] + end = self.sizes[index] + offset + feats = torch.from_numpy(self.data[offset:end].copy()).float() + + res = {"id": index, "features": feats} + if len(self.labels) > 0: + res["target"] = self.label_dict.encode_line( + self.labels[index], + line_tokenizer=lambda x: x, + append_eos=False, + ) + + if self.aux_tgt: + res["aux_target"] = self.aux_tgt[index] + + return res + + def __len__(self): + return len(self.sizes) + + def collater(self, samples): + if len(samples) == 0: + return {} + + features = [s["features"] for s in samples] + sizes = [len(s) for s in features] + + target_size = max(sizes) + + collated_features = features[0].new_zeros( + len(features), target_size, features[0].size(-1) + ) + padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False) + for i, (f, size) in enumerate(zip(features, sizes)): + collated_features[i, :size] = f + padding_mask[i, size:] = True + + res = { + "id": torch.LongTensor([s["id"] for s in samples]), + "net_input": {"features": collated_features, "padding_mask": padding_mask}, + } + + if len(self.labels) > 0: + target = data_utils.collate_tokens( + [s["target"] for s in samples], + pad_idx=self.label_dict.pad(), + left_pad=False, + ) + res["target"] = target + + if self.aux_tgt: + idxs = torch.nn.utils.rnn.pad_sequence( + [s["aux_target"] for s in samples], + batch_first=True, + padding_value=-1, + ) + res["net_input"]["aux_target"] = idxs + + return res + + def num_tokens(self, index): + return self.size(index) + + def size(self, index): + return self.sizes[index] + + def ordered_indices(self): + """Return an ordered list of indices. Batches will be constructed based + on this order.""" + if self.shuffle: + order = [np.random.permutation(len(self))] + else: + order = [np.arange(len(self))] + + if self.sort_by_length: + order.append(self.sizes) + return np.lexsort(order)[::-1] + else: + return order[0] diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/data/random_input_dataset.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/data/random_input_dataset.py new file mode 100644 index 0000000..8865056 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/data/random_input_dataset.py @@ -0,0 +1,62 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import random +from typing import List + +from fairseq.data import BaseWrapperDataset, data_utils + + +class RandomInputDataset(BaseWrapperDataset): + def __init__( + self, + dataset, + random_input_dataset, + input_key_path: List[str], + add_to_input, + pad_idx, + ): + super().__init__(dataset) + self.random_input_dataset = random_input_dataset + if isinstance(input_key_path, str): + input_key_path = [input_key_path] + assert len(input_key_path) > 0 + self.input_key_path = input_key_path + self.add_to_input = add_to_input + self.pad_idx = pad_idx + + def get_target(self, item): + target_loc = item + for p in self.input_key_path[:-1]: + target_loc = target_loc[p] + return self.input_key_path[-1], target_loc + + def get_target_value(self, item): + k, target_loc = self.get_target(item) + return target_loc[k] + + def __getitem__(self, index): + item = self.dataset[index] + k, target_loc = self.get_target(item) + target_loc[k] = random.choice(self.random_input_dataset) + return item + + def collater(self, samples): + collated = self.dataset.collater(samples) + if len(collated) == 0: + return collated + indices = set(collated["id"].tolist()) + + random_inputs = data_utils.collate_tokens( + [self.get_target_value(s) for s in samples if s["id"] in indices], + pad_idx=self.pad_idx, + left_pad=False, + ) + k, target_loc = self.get_target( + collated if not self.add_to_input else collated["net_input"] + ) + target_loc[k] = random_inputs + + return collated diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/README.md b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/README.md new file mode 100644 index 0000000..314984f --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/README.md @@ -0,0 +1,56 @@ +# Self-Training with Kaldi HMM Models +This folder contains recipes for self-training on pseudo phone transcripts and +decoding into phones or words with [kaldi](https://github.com/kaldi-asr/kaldi). + +To start, download and install kaldi follow its instruction, and place this +folder in `path/to/kaldi/egs`. + +## Training +Assuming the following has been prepared: +- `w2v_dir`: contains features `{train,valid}.{npy,lengths}`, real transcripts `{train,valid}.${label}`, and dict `dict.${label}.txt` +- `lab_dir`: contains pseudo labels `{train,valid}.txt` +- `arpa_lm`: Arpa-format n-gram phone LM for decoding +- `arpa_lm_bin`: Arpa-format n-gram phone LM for unsupervised model selection to be used with KenLM + +Set these variables in `train.sh`, as well as `out_dir`, the output directory, +and then run it. + +The output will be: +``` +==== WER w.r.t. real transcript (select based on unsupervised metric) +INFO:root:./out/exp/mono/decode_valid/scoring/14.0.0.tra.txt: score 0.9178 wer 28.71% lm_ppl 24.4500 gt_wer 25.57% +INFO:root:./out/exp/tri1/decode_valid/scoring/17.1.0.tra.txt: score 0.9257 wer 26.99% lm_ppl 30.8494 gt_wer 21.90% +INFO:root:./out/exp/tri2b/decode_valid/scoring/8.0.0.tra.txt: score 0.7506 wer 23.15% lm_ppl 25.5944 gt_wer 15.78% +``` +where `wer` is the word eror rate with respect to the pseudo label, `gt_wer` to +the ground truth label, `lm_ppl` the language model perplexity of HMM prediced +transcripts, and `score` is the unsupervised metric for model selection. We +choose the model and the LM parameter of the one with the lowest score. In the +example above, it is `tri2b`, `8.0.0`. + + +## Decoding into Phones +In `decode_phone.sh`, set `out_dir` the same as used in `train.sh`, set +`dec_exp` and `dec_lmparam` to the selected model and LM parameter (e.g. +`tri2b` and `8.0.0` in the above example). `dec_script` needs to be set +according to `dec_exp`: for mono/tri1/tri2b, use `decode.sh`; for tri3b, use +`decode_fmllr.sh`. + +The output will be saved at `out_dir/dec_data` + + +## Decoding into Words +`decode_word_step1.sh` prepares WFSTs for word decoding. Besides the variables +mentioned above, set +- `wrd_arpa_lm`: Arpa-format n-gram word LM for decoding +- `wrd_arpa_lm_bin`: Arpa-format n-gram word LM for unsupervised model selection + +`decode_word_step1.sh` decodes the `train` and `valid` split into word and runs +unsupervised model selection using the `valid` split. The output is like: +``` +INFO:root:./out/exp/tri2b/decodeword_valid/scoring/17.0.0.tra.txt: score 1.8693 wer 24.97% lm_ppl 1785.5333 gt_wer 31.45% +``` + +After determining the LM parameter (`17.0.0` in the example above), set it in +`decode_word_step2.sh` and run it. The output will be saved at +`out_dir/dec_data_word`. diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh new file mode 100644 index 0000000..e749531 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh @@ -0,0 +1,15 @@ +# you can change cmd.sh depending on what type of queue you are using. +# If you have no queueing system and want to run on a local machine, you +# can change all instances 'queue.pl' to run.pl (but be careful and run +# commands one by one: most recipes will exhaust the memory on your +# machine). queue.pl works with GridEngine (qsub). slurm.pl works +# with slurm. Different queues are configured differently, with different +# queue names and different ways of specifying things like memory; +# to account for these differences you can create and edit the file +# conf/queue.conf to match your queue's configuration. Search for +# conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information, +# or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl. + +export train_cmd="run.pl --mem 2G" +export decode_cmd="run.pl --mem 4G" +export mkgraph_cmd="run.pl --mem 8G" diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_phone.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_phone.sh new file mode 100644 index 0000000..947342a --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_phone.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# decode into phones (and prepare a new data directory for HMM outputs) + +. ./path.sh + +set -eu + +out_dir= # same as in train.sh +dec_lmparam= # LM hyperparameters (e.g., 7.0.0) +dec_exp= +dec_script= +dec_splits="train valid" +dec_data_dir=$out_dir/dec_data # where to write HMM output + +data_dir=${out_dir}/data + +local/decode.sh --nj 40 --graph_name graph \ + --val_sets "$dec_splits" --decode_script $dec_script \ + $out_dir/exp/$dec_exp $data_dir $data_dir/lang_test + +if [ ! -z $dec_lmparam ]; then + for x in $dec_splits; do + mkdir -p $dec_data_dir/$x + cp $data_dir/$x/{feats.scp,cmvn.scp,utt2spk,spk2utt} $dec_data_dir/$x/ + + tra=$out_dir/exp/$dec_exp/decode_${x}/scoring/${dec_lmparam}.tra + cat $tra | utils/int2sym.pl -f 2- $data_dir/lang/words.txt | \ + sed 's:::g' | sed 's:::g' > $dec_data_dir/${x}/text + utils/fix_data_dir.sh $dec_data_dir/${x} + echo "WER on ${x} is" $(compute-wer ark:$data_dir/${x}_gt/text ark:$dec_data_dir/$x/text | cut -d" " -f2-) + done +fi diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_word_step1.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_word_step1.sh new file mode 100644 index 0000000..c1276bb --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_word_step1.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# prepare word WFSTs, reference data, and decode + +set -eu + +w2v_dir= # same as in train.sh +out_dir= # same as in train.sh +lexicon= # word to phone mapping +wrd_arpa_lm= # word LM +wrd_arpa_lm_bin= # word LM for KenLM, used in unsupervised selection + +dec_exp= # what HMM stage to decode (e.g., tri3b) +dec_script= # what decoding script to use (e.g., steps/decode_fmllr.sh) +phn_label=phnc +wrd_label=wrd +dec_suffix=word +dec_splits="train valid" +valid_split="valid" + +data_dir=$out_dir/data +wrd_data_dir=$out_dir/data_word + +lexicon_clean=$(mktemp) +cat $lexicon | sort | uniq > $lexicon_clean +local/prepare_lang_word.sh $w2v_dir/dict.${phn_label}.txt $data_dir $lexicon_clean && rm $lexicon_clean +local/prepare_lm.sh --langdir $data_dir/lang_word --lmdir $data_dir/lang_test_word $wrd_arpa_lm $data_dir + +for x in $dec_splits; do + x_gt=${x}_gt + mkdir -p $wrd_data_dir/$x_gt + cp $data_dir/$x_gt/{feats.scp,cmvn.scp,utt2spk,spk2utt} $wrd_data_dir/$x_gt/ + python local/copy_aligned_text.py < $w2v_dir/$x.$wrd_label > $wrd_data_dir/$x_gt/text +done + +local/decode.sh --nj 40 --graph_name graph${dec_suffix} --decode_suffix $dec_suffix \ + --val_sets "$dec_splits" --decode_script $dec_script \ + $out_dir/exp/$dec_exp $data_dir $data_dir/lang_test_word + +local/unsup_select_decode_word.sh \ + --split $valid_split --kenlm_path $wrd_arpa_lm_bin \ + --ref_txt $wrd_data_dir/${valid_split}_gt/text \ + --psd_txt $data_dir/${valid_split}/text \ + --dec_name decode${dec_suffix} --graph_name graph${dec_suffix} \ + --phonemize_lexicon $data_dir/local/dict_word/lexicon.txt \ + $out_dir/exp diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_word_step2.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_word_step2.sh new file mode 100644 index 0000000..59a6cbb --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_word_step2.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# prepare a new data directory of HMM word output + +. ./path.sh + +set -eu + +out_dir= # same as in train.sh +dec_lmparam= # LM hyperparameters (e.g., 7.0.0) + +dec_exp=tri3b # what HMM stage to decode (e.g., tri3b) +dec_suffix=word +dec_splits="train valid" +dec_data_dir=$out_dir/dec_data_word # where to write HMM output + +data_dir=$out_dir/data +wrd_data_dir=$out_dir/data_word + +for x in $dec_splits; do + mkdir -p $dec_data_dir/$x + cp $data_dir/$x/{feats.scp,cmvn.scp,utt2spk,spk2utt} $dec_data_dir/$x/ + + tra=$out_dir/exp/$dec_exp/decode${dec_suffix}_${x}/scoring/${dec_lmparam}.tra + cat $tra | utils/int2sym.pl -f 2- $data_dir/lang_word/words.txt | \ + sed 's:::g' | sed 's:::g' > $dec_data_dir/$x/text + utils/fix_data_dir.sh $dec_data_dir/$x + echo "WER on $x is" $(compute-wer ark:$wrd_data_dir/${x}_gt/text ark:$dec_data_dir/$x/text | cut -d" " -f2-) +done + diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py new file mode 100644 index 0000000..5f4faa9 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py @@ -0,0 +1,4 @@ +import sys + +for idx, line in enumerate(sys.stdin): + print(f"utt{idx:010d} {line}", end='') \ No newline at end of file diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh new file mode 100755 index 0000000..811cb63 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/decode.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -u + +val_sets="dev_other" +graph_name=graph +decode_suffix="" +decode_script="steps/decode_fmllr.sh" +decode_args="" +nj=60 + +. ./cmd.sh +. ./path.sh +. parse_options.sh + +set -x +exp_dir=$1 +data_root=$2 +lang_test=$3 + +graph=$exp_dir/$graph_name + +if [ ! -d $graph ]; then + utils/mkgraph.sh $lang_test $exp_dir $graph +fi + +for part in $val_sets; do + dec_dir=$exp_dir/decode${decode_suffix}_${part} + if [ ! -d $dec_dir ]; then + echo "decoding $part for $exp_dir" + $decode_script --nj $nj --cmd "$decode_cmd" $decode_args \ + $graph $data_root/$part $dec_dir & + else + echo "$dec_dir exists. skip" + fi +done + +wait diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py new file mode 100644 index 0000000..66954ea --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py @@ -0,0 +1,56 @@ +import kaldi_io +import numpy as np +import os + + +def get_parser(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("w2v_dir", help="wav2vec feature and text directory") + parser.add_argument("tar_root", help="output data directory in kaldi's format") + parser.add_argument("split", help="name of the subset") + parser.add_argument("--label", default="", help="if specified, copy labels too") + return parser + +def main(): + parser = get_parser() + args = parser.parse_args() + + tar_dir = os.path.join(args.tar_root, args.split) + os.makedirs(tar_dir, exist_ok=True) + + lengths_path = os.path.join(args.w2v_dir, f"{args.split}.lengths") + with open(lengths_path) as f: + lengths = [int(line.rstrip()) for line in f] + offsets = [0] + np.cumsum(lengths[:-1]).tolist() + feats = np.load( + os.path.join(args.w2v_dir, f"{args.split}.npy"), + mmap_mode="r" + ) + assert feats.shape[0] == sum(lengths), \ + f"lengths mismatch {feats.shape[0]} != {sum(lengths)}" + + ark_path = os.path.join(tar_dir, "feats.ark") + scp_path = os.path.join(tar_dir, "feats.scp") + wspec = f"ark:| copy-feats --compress=true ark:- ark,scp:{ark_path},{scp_path}" + with kaldi_io.open_or_fd(wspec, "wb") as f: + for idx, (offset, length) in enumerate(zip(offsets, lengths)): + feat = feats[offset:offset+length] + kaldi_io.write_mat(f, feat, key=f"utt{idx:010d}") + + u2s_path = os.path.join(tar_dir, "utt2spk") + s2u_path = os.path.join(tar_dir, "spk2utt") + with open(u2s_path, "w") as f_u2s, open(s2u_path, "w") as f_s2u: + for idx in range(len(lengths)): + f_u2s.write(f"utt{idx:010d} utt{idx:010d}\n") + f_s2u.write(f"utt{idx:010d} utt{idx:010d}\n") + + if bool(args.label): + lab_path = os.path.join(args.w2v_dir, f"{args.split}.{args.label}") + txt_path = os.path.join(tar_dir, "text") + with open(lab_path) as f_lab, open(txt_path, "w") as f_txt: + for idx, line in enumerate(f_lab): + f_txt.write(f"utt{idx:010d} {line}") + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang.sh new file mode 100755 index 0000000..e9a8000 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +sil_prob=0.5 +num_sil_states=3 +num_nonsil_states=1 + +. ./cmd.sh +. ./path.sh +. parse_options.sh + +set -eux + +dict=$1 +data_dir=$2 + +dict_dir=$data_dir/local/dict +tmplm_dir=$data_dir/local/lang_tmp +lm_dir=$data_dir/lang + +mkdir -p $dict_dir $tmplm_dir $lm_dir + +# prepare dict +echo "SIL" > $dict_dir/silence_phones.txt +echo "SIL" > $dict_dir/optional_silence.txt +awk '{print $1}' $dict > $dict_dir/nonsilence_phones.txt + +echo "SIL SIL" > $dict_dir/lexicon.txt +echo " SIL" >> $dict_dir/lexicon.txt +awk '{print $1" "$1}' $dict >> $dict_dir/lexicon.txt + +echo "SIL" > $dict_dir/extra_questions.txt +awk '{printf $1" "} END {printf "\n"}' $dict >> $dict_dir/extra_questions.txt + +# prepare lang +utils/prepare_lang.sh --sil-prob $sil_prob --position-dependent-phones false \ + --num_sil_states $num_sil_states --num_nonsil_states $num_nonsil_states \ + $dict_dir "" $tmplm_dir $lm_dir diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh new file mode 100755 index 0000000..a7ea387 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +num_sil_states=3 +num_nonsil_states=1 + +. ./cmd.sh +. ./path.sh +. parse_options.sh + +set -eux + +dict=$1 +data_dir=$2 +lexicon=$3 + +dict_dir=$data_dir/local/dict_word +tmplm_dir=$data_dir/local/lang_tmp_word +lm_dir=$data_dir/lang_word + +mkdir -p $dict_dir $tmplm_dir $lm_dir + +# prepare dict +echo "SIL" > $dict_dir/silence_phones.txt +echo "SIL" > $dict_dir/optional_silence.txt +awk '{print $1}' $dict > $dict_dir/nonsilence_phones.txt + +(echo "!SIL SIL"; echo " SIL";) | cat - $lexicon > $dict_dir/lexicon.txt + +echo "SIL" > $dict_dir/extra_questions.txt +awk '{printf $1" "} END {printf "\n"}' $dict >> $dict_dir/extra_questions.txt + +# prepare lang +utils/prepare_lang.sh --position-dependent-phones false \ + --num_sil_states $num_sil_states --num_nonsil_states $num_nonsil_states \ + $dict_dir "" $tmplm_dir $lm_dir diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lm.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lm.sh new file mode 100755 index 0000000..c2edcef --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lm.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +langdir="" +lmdir="" + +. ./cmd.sh +. ./path.sh +. parse_options.sh + +arpa_lm=$1 +data=$2 + +if [ -z $langdir ]; then + langdir=$data/lang +fi +if [ -z $lmdir ]; then + lmdir=$data/lang_test +fi + +if [ ! -d $langdir ]; then + echo "$langdir not found. run local/prepare_lang.sh first" && exit 1 +fi + +mkdir -p $lmdir +cp -r $langdir/* $lmdir + +if [[ "$arpa_lm" == *.gz ]]; then + gunzip -c $arpa_lm | arpa2fst --disambig-symbol=#0 --read-symbol-table=$lmdir/words.txt - $lmdir/G.fst +else + arpa2fst --disambig-symbol=#0 --read-symbol-table=$lmdir/words.txt $arpa_lm $lmdir/G.fst +fi +fstisstochastic $lmdir/G.fst +utils/validate_lang.pl $lmdir || exit 1 + +echo "done preparing lm ($lmdir)" diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/score.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/score.sh new file mode 100755 index 0000000..cb5bbb7 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/score.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# 2014 Guoguo Chen +# Apache 2.0 + +[ -f ./path.sh ] && . ./path.sh + +# begin configuration section. +cmd=run.pl +stage=0 +decode_mbr=true +word_ins_penalty=0.0,0.5,1.0 +min_lmwt=7 +max_lmwt=17 +iter=final +#end configuration section. + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: local/score.sh [--cmd (run.pl|queue.pl...)] " + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + echo " --stage (0|1|2) # start scoring script from part-way through." + echo " --decode_mbr (true/false) # maximum bayes risk decoding (confusion network)." + echo " --min_lmwt # minumum LM-weight for lattice rescoring " + echo " --max_lmwt # maximum LM-weight for lattice rescoring " + exit 1; +fi + +data=$1 +lang_or_graph=$2 +dir=$3 + +symtab=$lang_or_graph/words.txt + +for f in $symtab $dir/lat.1.gz $data/text; do + [ ! -f $f ] && echo "score.sh: no such file $f" && exit 1; +done + +mkdir -p $dir/scoring/log + +cat $data/text | sed 's:::g' | sed 's:::g' > $dir/scoring/test_filt.txt + +for wip in $(echo $word_ins_penalty | sed 's/,/ /g'); do + $cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/best_path.LMWT.$wip.log \ + lattice-scale --inv-acoustic-scale=LMWT "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \ + lattice-add-penalty --word-ins-penalty=$wip ark:- ark:- \| \ + lattice-best-path --word-symbol-table=$symtab \ + ark:- ark,t:$dir/scoring/LMWT.$wip.tra || exit 1; +done + +# Note: the double level of quoting for the sed command +for wip in $(echo $word_ins_penalty | sed 's/,/ /g'); do + $cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/score.LMWT.$wip.log \ + cat $dir/scoring/LMWT.$wip.tra \| \ + utils/int2sym.pl -f 2- $symtab \| sed 's:\::g' \| \ + compute-wer --text --mode=present \ + ark:$dir/scoring/test_filt.txt ark,p:- ">&" $dir/wer_LMWT_$wip || exit 1; +done + +exit 0; diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/show_wer.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/show_wer.sh new file mode 100755 index 0000000..9ecf169 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/show_wer.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +split="dev_other" +ref_data="" +get_best_wer=true +dec_name="decode" +graph_name="graph" + +. ./cmd.sh +. ./path.sh +. parse_options.sh + +exp_root=$1 + +set -eu + +echo "==== WER w.r.t. pseudo transcript" +for x in $exp_root/*/${dec_name}_${split}*; do grep WER $x/wer_* 2>/dev/null | utils/best_wer.sh; done + + +if [ ! -z $ref_data ]; then + echo "==== WER w.r.t. real transcript (select based on pseudo WER)" + ref_txt=$ref_data/$split/text + for x in $exp_root/*/${dec_name}_${split}*; do + lang=$(dirname $x)/$graph_name + + lmwt=$( + grep WER $x/wer_* 2>/dev/null | utils/best_wer.sh | + sed 's/.*wer_\(.*\)$/\1/g' | sed 's/_/./g' + ) + tra=$x/scoring/$lmwt.tra + cat $tra | utils/int2sym.pl -f 2- $lang/words.txt | sed 's:::g' | sed 's:::g' | \ + compute-wer --text --mode=present \ + ark:$ref_txt ark,p:- 2> /dev/null | grep WER | xargs -I{} echo {} $tra + done +fi + +if [ ! -z $ref_data ] && $get_best_wer; then + echo "==== WER w.r.t. real transcript (select based on true WER)" + ref_txt=$ref_data/$split/text + for x in $exp_root/*/${dec_name}_${split}*; do + lang=$(dirname $x)/$graph_name + + for tra in $x/scoring/*.tra; do + cat $tra | utils/int2sym.pl -f 2- $lang/words.txt | sed 's:::g' | sed 's:::g' | \ + compute-wer --text --mode=present \ + ark:$ref_txt ark,p:- 2> /dev/null | grep WER | xargs -I{} echo {} $tra + done | sort -k2n | head -n1 + done +fi + +exit 0; diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/train_subset_lgbeam.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/train_subset_lgbeam.sh new file mode 100755 index 0000000..913c1d8 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/train_subset_lgbeam.sh @@ -0,0 +1,129 @@ +#!/usr/bin/env bash + +out_root=/tmp +out_name=train_${RANDOM} +num_nonsil_states=1 + +valid="dev_other" +train="train" +mono_size="-1" # 2000 +tri1_size="-1" # 5000 +tri2b_size="-1" # 10000 +tri3b_size="-1" # 10000 + +# Acoustic model parameters +numLeavesTri1=2000 +numGaussTri1=10000 +numLeavesMLLT=2500 +numGaussMLLT=15000 +numLeavesSAT=2500 +numGaussSAT=15000 + +stage=1 +max_stage=1 + +. ./cmd.sh +. ./path.sh +. parse_options.sh + +data=$1 +lang=$2 +lang_test=$3 + +exp_root=$out_root/$out_name + +# you might not want to do this for interactive shells. +set -e + + +if [ $stage -le 1 ] && [ $max_stage -ge 1 ]; then + # train a monophone system + if [ ! $mono_size -eq -1 ]; then + utils/subset_data_dir.sh $data/$train $mono_size $data/${train}_${mono_size} + mono_train=${train}_${mono_size} + else + mono_train=${train} + fi + + steps/train_mono.sh --boost-silence 1.25 --nj 20 --cmd "$train_cmd" \ + --initial-beam 40 --regular-beam 60 --retry-beam 120 \ + $data/$mono_train $lang $exp_root/mono + + utils/mkgraph.sh $lang_test $exp_root/mono $exp_root/mono/graph + steps/decode.sh --nj 20 --cmd "$decode_cmd" \ + $exp_root/mono/graph $data/$valid $exp_root/mono/decode_$valid & +fi + + +if [ $stage -le 2 ] && [ $max_stage -ge 2 ]; then + # train a first delta + delta-delta triphone system on a subset of 5000 utterances + if [ ! $tri1_size -eq -1 ]; then + utils/subset_data_dir.sh $data/$train $tri1_size $data/${train}_${tri1_size} + tri1_train=${train}_${tri1_size} + else + tri1_train=${train} + fi + + steps/align_si.sh --boost-silence 1.25 --nj 10 --cmd "$train_cmd" \ + $data/$tri1_train $lang \ + $exp_root/mono $exp_root/mono_ali_${tri1_train} + + steps_gan/train_deltas.sh --boost-silence 1.25 --cmd "$train_cmd" \ + --num_nonsil_states $num_nonsil_states $numLeavesTri1 $numGaussTri1 \ + $data/$tri1_train $lang \ + $exp_root/mono_ali_${tri1_train} $exp_root/tri1 + + utils/mkgraph.sh $lang_test $exp_root/tri1 $exp_root/tri1/graph + steps/decode.sh --nj 20 --cmd "$decode_cmd" \ + $exp_root/tri1/graph $data/$valid $exp_root/tri1/decode_$valid & +fi + +if [ $stage -le 3 ] && [ $max_stage -ge 3 ]; then + # train an LDA+MLLT system. + if [ ! $tri2b_size -eq -1 ]; then + utils/subset_data_dir.sh $data/$train $tri2b_size $data/${train}_${tri2b_size} + tri2b_train=${train}_${tri2b_size} + else + tri2b_train=${train} + fi + + steps/align_si.sh --nj 10 --cmd "$train_cmd" \ + $data/$tri2b_train $lang \ + $exp_root/tri1 $exp_root/tri1_ali_${tri2b_train} + + steps_gan/train_lda_mllt.sh --cmd "$train_cmd" \ + --num_nonsil_states $num_nonsil_states \ + --splice-opts "--left-context=3 --right-context=3" $numLeavesMLLT $numGaussMLLT \ + $data/$tri2b_train $lang \ + $exp_root/tri1_ali_${tri2b_train} $exp_root/tri2b + + utils/mkgraph.sh $lang_test $exp_root/tri2b $exp_root/tri2b/graph + steps/decode.sh --nj 20 --cmd "$decode_cmd" \ + $exp_root/tri2b/graph $data/$valid $exp_root/tri2b/decode_$valid & +fi + + +if [ $stage -le 4 ] && [ $max_stage -ge 4 ]; then + # Train tri3b, which is LDA+MLLT+SAT on 10k utts + if [ ! $tri3b_size -eq -1 ]; then + utils/subset_data_dir.sh $data/$train $tri3b_size $data/${train}_${tri3b_size} + tri3b_train=${train}_${tri3b_size} + else + tri3b_train=${train} + fi + + steps/align_si.sh --nj 10 --cmd "$train_cmd" --use-graphs true \ + $data/$tri3b_train $lang \ + $exp_root/tri2b $exp_root/tri2b_ali_${tri2b_train} + + steps_gan/train_sat.sh --cmd "$train_cmd" \ + --num_nonsil_states $num_nonsil_states $numLeavesSAT $numGaussSAT \ + $data/$tri3b_train $lang \ + $exp_root/tri2b_ali_${tri2b_train} $exp_root/tri3b + + utils/mkgraph.sh $lang_test $exp_root/tri3b $exp_root/tri3b/graph + steps/decode_fmllr.sh --nj 20 --cmd "$decode_cmd" \ + $exp_root/tri3b/graph $data/$valid $exp_root/tri3b/decode_$valid & +fi + +wait diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select.py new file mode 100644 index 0000000..1122c88 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select.py @@ -0,0 +1,135 @@ +""" +Implement unsupervised metric for decoding hyperparameter selection: + $$ alpha * LM_PPL + ViterbitUER(%) * 100 $$ +""" +import argparse +import logging +import math +import sys + +import kenlm +import editdistance +from g2p_en import G2p + +logging.root.setLevel(logging.INFO) +logging.basicConfig(stream=sys.stdout, level=logging.INFO) +logger = logging.getLogger(__name__) + + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("ref_tra", help="reference pseudo labels") + parser.add_argument("hyp_tra", help="decoded pseudo labels to be assess") + parser.add_argument("--kenlm_path", default="/checkpoint/abaevski/data/speech/libri/librispeech_lm_novox.phnc_o5.bin", help="") + parser.add_argument("--uppercase", action="store_true", help="") + parser.add_argument("--skipwords", default="", help="") + parser.add_argument("--gt_tra", default="", help="ground truth pseudo labels for computing oracle WER") + parser.add_argument("--min_vt_uer", default=0.0, type=float) + parser.add_argument("--phonemize", action="store_true", help="phonemize word hypotheses, used when reference is phone transcript") + parser.add_argument("--phonemize_lexicon", default="", type=str, help="use a lexicon for phonemizing") + return parser + +def load_tra(tra_path): + with open(tra_path, "r") as f: + uid_to_tra = {} + for line in f: + toks = line.rstrip().split() + uid, tra = toks[0], " ".join(toks[1:]) + uid_to_tra[uid] = tra + logger.debug(f"loaded {len(uid_to_tra)} utterances from {tra_path}") + return uid_to_tra + +def load_lex(lex_path): + with open(lex_path, "r") as f: + w2p = {} + for line in f: + w, p = line.rstrip().split(None, 1) + w2p[w] = p.split() + return w2p + +def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict): + d_cnt = 0 + w_cnt = 0 + w_cnt_h = 0 + for uid in hyp_uid_to_tra: + ref = ref_uid_to_tra[uid].split() + if g2p_dict is not None: + hyp = [] + for word in hyp_uid_to_tra[uid].split(): + if word in g2p_dict: + hyp = hyp + g2p_dict[word] + else: + logger.warning(f"{word} not in g2p_dict") + elif g2p is not None: + hyp = g2p(hyp_uid_to_tra[uid]) + hyp = [p for p in hyp if p != "'" and p != " "] + hyp = [p[:-1] if p[-1].isnumeric() else p for p in hyp] + else: + hyp = hyp_uid_to_tra[uid].split() + logger.debug(( + f"======================\n" + f"HYP: {' '.join(hyp)}\n" + f"REF: {' '.join(ref)}" + )) + d_cnt += editdistance.eval(ref, hyp) + w_cnt += len(ref) + w_cnt_h += len(hyp) + wer = float(d_cnt) / w_cnt + logger.debug(( + f"wer = {wer*100:.2f}%; num. of ref words = {w_cnt}; " + f"num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}" + )) + return wer + +def compute_lm_ppl(hyp_uid_to_tra, score_fn): + lm_score = 0. + w_cnt = 0 + for hyp in hyp_uid_to_tra.values(): + cur_score = score_fn(hyp) + cur_cnt = len(hyp.split()) + 1 # plus one for + lm_score += cur_score + w_cnt += cur_cnt + logger.debug(( + f"======================\n" + f"score sum/avg = {cur_score:.2f}/{cur_score/cur_cnt:.2f}\n" + f"hyp = {hyp}" + )) + lm_ppl = math.pow(10, -lm_score / w_cnt) + logger.debug(f"lm ppl = {lm_ppl:.2f}; num. of words = {w_cnt}") + return lm_ppl + +def main(): + args = get_parser().parse_args() + logger.debug(f"Args: {args}") + + ref_uid_to_tra = load_tra(args.ref_tra) + hyp_uid_to_tra = load_tra(args.hyp_tra) + assert not bool(set(hyp_uid_to_tra.keys()) - set(ref_uid_to_tra.keys())) + + lm = kenlm.Model(args.kenlm_path) + skipwords = set(args.skipwords.split(",")) + def compute_lm_score(s): + s = " ".join(w for w in s.split() if w not in skipwords) + s = s.upper() if args.uppercase else s + return lm.score(s) + + g2p, g2p_dict = None, None + if args.phonemize: + if args.phonemize_lexicon: + g2p_dict = load_lex(args.phonemize_lexicon) + else: + g2p = G2p() + + wer = compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict) + lm_ppl = compute_lm_ppl(hyp_uid_to_tra, compute_lm_score) + + gt_wer = -math.inf + if args.gt_tra: + gt_uid_to_tra = load_tra(args.gt_tra) + gt_wer = compute_wer(gt_uid_to_tra, hyp_uid_to_tra, None, None) + + score = math.log(lm_ppl) * max(wer, args.min_vt_uer) + logging.info(f"{args.hyp_tra}: score={score:.4f}; wer={wer*100:.2f}%; lm_ppl={lm_ppl:.4f}; gt_wer={gt_wer*100:.2f}%") + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode.sh new file mode 100755 index 0000000..b34c5b6 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +split="dev_other" +ref_txt="" # ground truth transcript path +psd_txt="" # pseudo transcript path +get_best_wer=true +dec_name="decode" +graph_name="graph" +kenlm_path=/checkpoint/abaevski/data/speech/libri/librispeech_lm_novox.phnc_o6.bin + +. ./cmd.sh +. ./path.sh +. parse_options.sh + +exp_root=$1 +unsup_args="" +if [ $# -ge 2 ]; then + unsup_args=$2 +fi + +set -eu + +if [ ! -z $ref_txt ] && $get_best_wer; then + echo "==== WER w.r.t. real transcript (select based on unsupervised metric)" + for x in $exp_root/*/${dec_name}_${split}*; do + lang=$(dirname $x)/$graph_name + + ( + for tra in $x/scoring/*.tra; do + cat $tra | utils/int2sym.pl -f 2- $lang/words.txt | sed 's:::g' | sed 's:::g' > $tra.txt + python local/unsup_select.py $psd_txt $tra.txt --kenlm_path $kenlm_path --gt_tra $ref_txt $unsup_args + done 2>/dev/null | grep "score=" | sed 's/=/ /g' | sed 's/;//g' | sort -k3n | head -n1 + ) & + done +fi +wait + diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode_word.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode_word.sh new file mode 100755 index 0000000..c10a6b8 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode_word.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +split="dev_other" +ref_txt="" # ground truth transcript path +psd_txt="" # pseudo transcript path +get_best_wer=true +dec_name="decode" +graph_name="graph" +kenlm_path=/checkpoint/abaevski/data/speech/libri/librispeech_lm_novox.phnc_o6.bin +phonemize_lexicon="" + +. ./cmd.sh +. ./path.sh +. parse_options.sh +. /private/home/wnhsu/unsup_asr/fairseq-py-unsup/env.sh + +exp_root=$1 + +set -eu + +if [ ! -z $ref_txt ] && $get_best_wer; then + echo "==== WER w.r.t. real transcript (select based on unsupervised metric)" + for x in $exp_root/*/${dec_name}_${split}*; do + lang=$(dirname $x)/$graph_name + + for tra in $x/scoring/*.tra; do + cat $tra | utils/int2sym.pl -f 2- $lang/words.txt | sed 's:\::g' > $tra.txt + python local/unsup_select.py $psd_txt $tra.txt \ + --kenlm_path $kenlm_path --gt_tra $ref_txt --phonemize \ + --phonemize_lexicon "$phonemize_lexicon" + done | grep "score=" | sed 's/=/ /g' | sed 's/;//g' | sort -k3n | head -n1 + done +fi + + diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh new file mode 100755 index 0000000..1a6fb5f --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh @@ -0,0 +1,5 @@ +export KALDI_ROOT=`pwd`/../../.. +export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH +[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1 +. $KALDI_ROOT/tools/config/common_path.sh +export LC_ALL=C diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps new file mode 120000 index 0000000..6e99bf5 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps @@ -0,0 +1 @@ +../../wsj/s5/steps \ No newline at end of file diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_deltas.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_deltas.sh new file mode 100755 index 0000000..af68715 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_deltas.sh @@ -0,0 +1,175 @@ +#!/usr/bin/env bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Begin configuration. +stage=-4 # This allows restarting after partway, when something when wrong. +config= +cmd=run.pl +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +realign_iters="10 20 30"; +num_iters=35 # Number of iterations of training +max_iter_inc=25 # Last iter to increase #Gauss on. +beam=10 +careful=false +retry_beam=40 +boost_silence=1.0 # Factor by which to boost silence likelihoods in alignment +power=0.25 # Exponent for number of gaussians according to occurrence counts +cluster_thresh=-1 # for build-tree control final bottom-up clustering of leaves +norm_vars=false # deprecated. Prefer --cmvn-opts "--norm-vars=true" + # use the option --cmvn-opts "--norm-means=false" +cmvn_opts= +delta_opts= +context_opts= # use"--context-width=5 --central-position=2" for quinphone +num_nonsil_states=3 +# End configuration. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh; +. parse_options.sh || exit 1; + +if [ $# != 6 ]; then + echo "Usage: steps/train_deltas.sh " + echo "e.g.: steps/train_deltas.sh 2000 10000 data/train_si84_half data/lang exp/mono_ali exp/tri1" + echo "main options (for others, see top of script file)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --config # config containing options" + echo " --stage # stage to do partial re-run from." + exit 1; +fi + +numleaves=$1 +totgauss=$2 +data=$3 +lang=$4 +alidir=$5 +dir=$6 + +for f in $alidir/final.mdl $alidir/ali.1.gz $data/feats.scp $lang/phones.txt; do + [ ! -f $f ] && echo "train_deltas.sh: no such file $f" && exit 1; +done + +numgauss=$numleaves +incgauss=$[($totgauss-$numgauss)/$max_iter_inc] # per-iter increment for #Gauss +oov=`cat $lang/oov.int` || exit 1; +ciphonelist=`cat $lang/phones/context_indep.csl` || exit 1; +nj=`cat $alidir/num_jobs` || exit 1; +mkdir -p $dir/log +echo $nj > $dir/num_jobs + +utils/lang/check_phones_compatible.sh $lang/phones.txt $alidir/phones.txt || exit 1; +cp $lang/phones.txt $dir || exit 1; + +sdata=$data/split$nj; +split_data.sh $data $nj || exit 1; + + +[ $(cat $alidir/cmvn_opts 2>/dev/null | wc -c) -gt 1 ] && [ -z "$cmvn_opts" ] && \ + echo "$0: warning: ignoring CMVN options from source directory $alidir" +$norm_vars && cmvn_opts="--norm-vars=true $cmvn_opts" +echo $cmvn_opts > $dir/cmvn_opts # keep track of options to CMVN. +[ ! -z $delta_opts ] && echo $delta_opts > $dir/delta_opts + +feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |" + +rm $dir/.error 2>/dev/null + +if [ $stage -le -3 ]; then + echo "$0: accumulating tree stats" + $cmd JOB=1:$nj $dir/log/acc_tree.JOB.log \ + acc-tree-stats $context_opts \ + --ci-phones=$ciphonelist $alidir/final.mdl "$feats" \ + "ark:gunzip -c $alidir/ali.JOB.gz|" $dir/JOB.treeacc || exit 1; + sum-tree-stats $dir/treeacc $dir/*.treeacc 2>$dir/log/sum_tree_acc.log || exit 1; + rm $dir/*.treeacc +fi + +if [ $stage -le -2 ]; then + echo "$0: getting questions for tree-building, via clustering" + # preparing questions, roots file... + cluster-phones --pdf-class-list=$(($num_nonsil_states / 2)) $context_opts \ + $dir/treeacc $lang/phones/sets.int \ + $dir/questions.int 2> $dir/log/questions.log || exit 1; + cat $lang/phones/extra_questions.int >> $dir/questions.int + compile-questions $context_opts $lang/topo $dir/questions.int \ + $dir/questions.qst 2>$dir/log/compile_questions.log || exit 1; + + echo "$0: building the tree" + $cmd $dir/log/build_tree.log \ + build-tree $context_opts --verbose=1 --max-leaves=$numleaves \ + --cluster-thresh=$cluster_thresh $dir/treeacc $lang/phones/roots.int \ + $dir/questions.qst $lang/topo $dir/tree || exit 1; + + $cmd $dir/log/init_model.log \ + gmm-init-model --write-occs=$dir/1.occs \ + $dir/tree $dir/treeacc $lang/topo $dir/1.mdl || exit 1; + if grep 'no stats' $dir/log/init_model.log; then + echo "** The warnings above about 'no stats' generally mean you have phones **" + echo "** (or groups of phones) in your phone set that had no corresponding data. **" + echo "** You should probably figure out whether something went wrong, **" + echo "** or whether your data just doesn't happen to have examples of those **" + echo "** phones. **" + fi + + gmm-mixup --mix-up=$numgauss $dir/1.mdl $dir/1.occs $dir/1.mdl 2>$dir/log/mixup.log || exit 1; + rm $dir/treeacc +fi + +if [ $stage -le -1 ]; then + # Convert the alignments. + echo "$0: converting alignments from $alidir to use current tree" + $cmd JOB=1:$nj $dir/log/convert.JOB.log \ + convert-ali $alidir/final.mdl $dir/1.mdl $dir/tree \ + "ark:gunzip -c $alidir/ali.JOB.gz|" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 0 ]; then + echo "$0: compiling graphs of transcripts" + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs --read-disambig-syms=$lang/phones/disambig.int $dir/tree $dir/1.mdl $lang/L.fst \ + "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt < $sdata/JOB/text |" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; +fi + +x=1 +while [ $x -lt $num_iters ]; do + echo "$0: training pass $x" + if [ $stage -le $x ]; then + if echo $realign_iters | grep -w $x >/dev/null; then + echo "$0: aligning data" + mdl="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $dir/$x.mdl - |" + $cmd JOB=1:$nj $dir/log/align.$x.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" \ + "ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \ + "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; + fi + $cmd JOB=1:$nj $dir/log/acc.$x.JOB.log \ + gmm-acc-stats-ali $dir/$x.mdl "$feats" \ + "ark,s,cs:gunzip -c $dir/ali.JOB.gz|" $dir/$x.JOB.acc || exit 1; + $cmd $dir/log/update.$x.log \ + gmm-est --mix-up=$numgauss --power=$power \ + --write-occs=$dir/$[$x+1].occs $dir/$x.mdl \ + "gmm-sum-accs - $dir/$x.*.acc |" $dir/$[$x+1].mdl || exit 1; + rm $dir/$x.mdl $dir/$x.*.acc + rm $dir/$x.occs + fi + [ $x -le $max_iter_inc ] && numgauss=$[$numgauss+$incgauss]; + x=$[$x+1]; +done + +rm $dir/final.mdl $dir/final.occs 2>/dev/null +ln -s $x.mdl $dir/final.mdl +ln -s $x.occs $dir/final.occs + +steps/diagnostic/analyze_alignments.sh --cmd "$cmd" $lang $dir + +# Summarize warning messages... +utils/summarize_warnings.pl $dir/log + +steps/info/gmm_dir_info.pl $dir + +echo "$0: Done training system with delta+delta-delta features in $dir" + +exit 0 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_lda_mllt.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_lda_mllt.sh new file mode 100755 index 0000000..9d8c319 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_lda_mllt.sh @@ -0,0 +1,239 @@ +#!/usr/bin/env bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# +# LDA+MLLT refers to the way we transform the features after computing +# the MFCCs: we splice across several frames, reduce the dimension (to 40 +# by default) using Linear Discriminant Analysis), and then later estimate, +# over multiple iterations, a diagonalizing transform known as MLLT or STC. +# See http://kaldi-asr.org/doc/transform.html for more explanation. +# +# Apache 2.0. + +# Begin configuration. +cmd=run.pl +config= +stage=-5 +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +realign_iters="10 20 30"; +mllt_iters="2 4 6 12"; +num_iters=35 # Number of iterations of training +max_iter_inc=25 # Last iter to increase #Gauss on. +dim=40 +beam=10 +retry_beam=40 +careful=false +boost_silence=1.0 # Factor by which to boost silence likelihoods in alignment +power=0.25 # Exponent for number of gaussians according to occurrence counts +randprune=4.0 # This is approximately the ratio by which we will speed up the + # LDA and MLLT calculations via randomized pruning. +splice_opts= +cluster_thresh=-1 # for build-tree control final bottom-up clustering of leaves +norm_vars=false # deprecated. Prefer --cmvn-opts "--norm-vars=false" +cmvn_opts= +context_opts= # use "--context-width=5 --central-position=2" for quinphone. +# End configuration. +train_tree=true # if false, don't actually train the tree. +use_lda_mat= # If supplied, use this LDA[+MLLT] matrix. +num_nonsil_states=3 + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# != 6 ]; then + echo "Usage: steps/train_lda_mllt.sh [options] <#leaves> <#gauss> " + echo " e.g.: steps/train_lda_mllt.sh 2500 15000 data/train_si84 data/lang exp/tri1_ali_si84 exp/tri2b" + echo "Main options (for others, see top of script file)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --config # config containing options" + echo " --stage # stage to do partial re-run from." + exit 1; +fi + +numleaves=$1 +totgauss=$2 +data=$3 +lang=$4 +alidir=$5 +dir=$6 + +for f in $alidir/final.mdl $alidir/ali.1.gz $data/feats.scp $lang/phones.txt; do + [ ! -f $f ] && echo "train_lda_mllt.sh: no such file $f" && exit 1; +done + +numgauss=$numleaves +incgauss=$[($totgauss-$numgauss)/$max_iter_inc] # per-iter #gauss increment +oov=`cat $lang/oov.int` || exit 1; +nj=`cat $alidir/num_jobs` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +ciphonelist=`cat $lang/phones/context_indep.csl` || exit 1; + +mkdir -p $dir/log + +utils/lang/check_phones_compatible.sh $lang/phones.txt $alidir/phones.txt || exit 1; +cp $lang/phones.txt $dir || exit 1; + +echo $nj >$dir/num_jobs +echo "$splice_opts" >$dir/splice_opts # keep track of frame-splicing options + # so that later stages of system building can know what they were. + + +[ $(cat $alidir/cmvn_opts 2>/dev/null | wc -c) -gt 1 ] && [ -z "$cmvn_opts" ] && \ + echo "$0: warning: ignoring CMVN options from source directory $alidir" +$norm_vars && cmvn_opts="--norm-vars=true $cmvn_opts" +echo $cmvn_opts > $dir/cmvn_opts # keep track of options to CMVN. + +sdata=$data/split$nj; +split_data.sh $data $nj || exit 1; + +splicedfeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- |" +# Note: $feats gets overwritten later in the script. +feats="$splicedfeats transform-feats $dir/0.mat ark:- ark:- |" + + + +if [ $stage -le -5 ]; then + if [ -z "$use_lda_mat" ]; then + echo "$0: Accumulating LDA statistics." + rm $dir/lda.*.acc 2>/dev/null + $cmd JOB=1:$nj $dir/log/lda_acc.JOB.log \ + ali-to-post "ark:gunzip -c $alidir/ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alidir/final.mdl ark:- ark:- \| \ + acc-lda --rand-prune=$randprune $alidir/final.mdl "$splicedfeats" ark,s,cs:- \ + $dir/lda.JOB.acc || exit 1; + est-lda --write-full-matrix=$dir/full.mat --dim=$dim $dir/0.mat $dir/lda.*.acc \ + 2>$dir/log/lda_est.log || exit 1; + rm $dir/lda.*.acc + else + echo "$0: Using supplied LDA matrix $use_lda_mat" + cp $use_lda_mat $dir/0.mat || exit 1; + [ ! -z "$mllt_iters" ] && \ + echo "$0: Warning: using supplied LDA matrix $use_lda_mat but we will do MLLT," && \ + echo " which you might not want; to disable MLLT, specify --mllt-iters ''" && \ + sleep 5 + fi +fi + +cur_lda_iter=0 + +if [ $stage -le -4 ] && $train_tree; then + echo "$0: Accumulating tree stats" + $cmd JOB=1:$nj $dir/log/acc_tree.JOB.log \ + acc-tree-stats $context_opts \ + --ci-phones=$ciphonelist $alidir/final.mdl "$feats" \ + "ark:gunzip -c $alidir/ali.JOB.gz|" $dir/JOB.treeacc || exit 1; + [ `ls $dir/*.treeacc | wc -w` -ne "$nj" ] && echo "$0: Wrong #tree-accs" && exit 1; + $cmd $dir/log/sum_tree_acc.log \ + sum-tree-stats $dir/treeacc $dir/*.treeacc || exit 1; + rm $dir/*.treeacc +fi + + +if [ $stage -le -3 ] && $train_tree; then + echo "$0: Getting questions for tree clustering." + # preparing questions, roots file... + cluster-phones --pdf-class-list=$(($num_nonsil_states / 2)) $context_opts $dir/treeacc $lang/phones/sets.int \ + $dir/questions.int 2> $dir/log/questions.log || exit 1; + cat $lang/phones/extra_questions.int >> $dir/questions.int + compile-questions $context_opts $lang/topo $dir/questions.int \ + $dir/questions.qst 2>$dir/log/compile_questions.log || exit 1; + + echo "$0: Building the tree" + $cmd $dir/log/build_tree.log \ + build-tree $context_opts --verbose=1 --max-leaves=$numleaves \ + --cluster-thresh=$cluster_thresh $dir/treeacc $lang/phones/roots.int \ + $dir/questions.qst $lang/topo $dir/tree || exit 1; +fi + +if [ $stage -le -2 ]; then + echo "$0: Initializing the model" + if $train_tree; then + gmm-init-model --write-occs=$dir/1.occs \ + $dir/tree $dir/treeacc $lang/topo $dir/1.mdl 2> $dir/log/init_model.log || exit 1; + grep 'no stats' $dir/log/init_model.log && echo "This is a bad warning."; + rm $dir/treeacc + else + cp $alidir/tree $dir/ || exit 1; + $cmd JOB=1 $dir/log/init_model.log \ + gmm-init-model-flat $dir/tree $lang/topo $dir/1.mdl \ + "$feats subset-feats ark:- ark:-|" || exit 1; + fi +fi + + +if [ $stage -le -1 ]; then + # Convert the alignments. + echo "$0: Converting alignments from $alidir to use current tree" + $cmd JOB=1:$nj $dir/log/convert.JOB.log \ + convert-ali $alidir/final.mdl $dir/1.mdl $dir/tree \ + "ark:gunzip -c $alidir/ali.JOB.gz|" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 0 ] && [ "$realign_iters" != "" ]; then + echo "$0: Compiling graphs of transcripts" + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs --read-disambig-syms=$lang/phones/disambig.int $dir/tree $dir/1.mdl $lang/L.fst \ + "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt < $data/split$nj/JOB/text |" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; +fi + + +x=1 +while [ $x -lt $num_iters ]; do + echo Training pass $x + if echo $realign_iters | grep -w $x >/dev/null && [ $stage -le $x ]; then + echo Aligning data + mdl="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $dir/$x.mdl - |" + $cmd JOB=1:$nj $dir/log/align.$x.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" \ + "ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \ + "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; + fi + if echo $mllt_iters | grep -w $x >/dev/null; then + if [ $stage -le $x ]; then + echo "$0: Estimating MLLT" + $cmd JOB=1:$nj $dir/log/macc.$x.JOB.log \ + ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $dir/$x.mdl ark:- ark:- \| \ + gmm-acc-mllt --rand-prune=$randprune $dir/$x.mdl "$feats" ark:- $dir/$x.JOB.macc \ + || exit 1; + est-mllt $dir/$x.mat.new $dir/$x.*.macc 2> $dir/log/mupdate.$x.log || exit 1; + gmm-transform-means $dir/$x.mat.new $dir/$x.mdl $dir/$x.mdl \ + 2> $dir/log/transform_means.$x.log || exit 1; + compose-transforms --print-args=false $dir/$x.mat.new $dir/$cur_lda_iter.mat $dir/$x.mat || exit 1; + rm $dir/$x.*.macc + fi + feats="$splicedfeats transform-feats $dir/$x.mat ark:- ark:- |" + cur_lda_iter=$x + fi + + if [ $stage -le $x ]; then + $cmd JOB=1:$nj $dir/log/acc.$x.JOB.log \ + gmm-acc-stats-ali $dir/$x.mdl "$feats" \ + "ark,s,cs:gunzip -c $dir/ali.JOB.gz|" $dir/$x.JOB.acc || exit 1; + $cmd $dir/log/update.$x.log \ + gmm-est --write-occs=$dir/$[$x+1].occs --mix-up=$numgauss --power=$power \ + $dir/$x.mdl "gmm-sum-accs - $dir/$x.*.acc |" $dir/$[$x+1].mdl || exit 1; + rm $dir/$x.mdl $dir/$x.*.acc $dir/$x.occs + fi + [ $x -le $max_iter_inc ] && numgauss=$[$numgauss+$incgauss]; + x=$[$x+1]; +done + +rm $dir/final.{mdl,mat,occs} 2>/dev/null +ln -s $x.mdl $dir/final.mdl +ln -s $x.occs $dir/final.occs +ln -s $cur_lda_iter.mat $dir/final.mat + +steps/diagnostic/analyze_alignments.sh --cmd "$cmd" $lang $dir + +# Summarize warning messages... +utils/summarize_warnings.pl $dir/log + +steps/info/gmm_dir_info.pl $dir + +echo "$0: Done training system with LDA+MLLT features in $dir" + +exit 0 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh new file mode 100755 index 0000000..f75afaf --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh @@ -0,0 +1,281 @@ +#!/usr/bin/env bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + + +# This does Speaker Adapted Training (SAT), i.e. train on +# fMLLR-adapted features. It can be done on top of either LDA+MLLT, or +# delta and delta-delta features. If there are no transforms supplied +# in the alignment directory, it will estimate transforms itself before +# building the tree (and in any case, it estimates transforms a number +# of times during training). + + +# Begin configuration section. +stage=-5 +exit_stage=-100 # you can use this to require it to exit at the + # beginning of a specific stage. Not all values are + # supported. +fmllr_update_type=full +cmd=run.pl +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +careful=false +boost_silence=1.0 # Factor by which to boost silence likelihoods in alignment +context_opts= # e.g. set this to "--context-width 5 --central-position 2" for quinphone. +realign_iters="10 20 30"; +fmllr_iters="2 4 6 12"; +silence_weight=0.0 # Weight on silence in fMLLR estimation. +num_iters=35 # Number of iterations of training +max_iter_inc=25 # Last iter to increase #Gauss on. +power=0.2 # Exponent for number of gaussians according to occurrence counts +cluster_thresh=-1 # for build-tree control final bottom-up clustering of leaves +phone_map= +train_tree=true +tree_stats_opts= +cluster_phones_opts= +compile_questions_opts= +# End configuration section. +num_nonsil_states=3 + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# != 6 ]; then + echo "Usage: steps/train_sat.sh <#leaves> <#gauss> " + echo " e.g.: steps/train_sat.sh 2500 15000 data/train_si84 data/lang exp/tri2b_ali_si84 exp/tri3b" + echo "Main options (for others, see top of script file)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --config # config containing options" + echo " --stage # stage to do partial re-run from." + exit 1; +fi + +numleaves=$1 +totgauss=$2 +data=$3 +lang=$4 +alidir=$5 +dir=$6 + +for f in $data/feats.scp $lang/phones.txt $alidir/final.mdl $alidir/ali.1.gz; do + [ ! -f $f ] && echo "train_sat.sh: no such file $f" && exit 1; +done + +numgauss=$numleaves +incgauss=$[($totgauss-$numgauss)/$max_iter_inc] # per-iter #gauss increment +oov=`cat $lang/oov.int` +nj=`cat $alidir/num_jobs` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` +ciphonelist=`cat $lang/phones/context_indep.csl` || exit 1; +sdata=$data/split$nj; +splice_opts=`cat $alidir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $alidir/cmvn_opts 2>/dev/null` +delta_opts=`cat $alidir/delta_opts 2>/dev/null` +phone_map_opt= +[ ! -z "$phone_map" ] && phone_map_opt="--phone-map='$phone_map'" + +mkdir -p $dir/log +cp $alidir/splice_opts $dir 2>/dev/null # frame-splicing options. +cp $alidir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. +cp $alidir/delta_opts $dir 2>/dev/null # delta option. + +utils/lang/check_phones_compatible.sh $lang/phones.txt $alidir/phones.txt || exit 1; +cp $lang/phones.txt $dir || exit 1; + +echo $nj >$dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +# Set up features. + +if [ -f $alidir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +## Set up speaker-independent features. +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $alidir/final.mat ark:- ark:- |" + cp $alidir/final.mat $dir + cp $alidir/full.mat $dir 2>/dev/null + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac + +## Get initial fMLLR transforms (possibly from alignment dir) +if [ -f $alidir/trans.1 ]; then + echo "$0: Using transforms from $alidir" + feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$alidir/trans.JOB ark:- ark:- |" + cur_trans_dir=$alidir +else + if [ $stage -le -5 ]; then + echo "$0: obtaining initial fMLLR transforms since not present in $alidir" + # The next line is necessary because of $silphonelist otherwise being incorrect; would require + # old $lang dir which would require another option. Not needed anyway. + [ ! -z "$phone_map" ] && \ + echo "$0: error: you must provide transforms if you use the --phone-map option." && exit 1; + $cmd JOB=1:$nj $dir/log/fmllr.0.JOB.log \ + ali-to-post "ark:gunzip -c $alidir/ali.JOB.gz|" ark:- \| \ + weight-silence-post $silence_weight $silphonelist $alidir/final.mdl ark:- ark:- \| \ + gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $alidir/final.mdl "$sifeats" \ + ark:- ark:$dir/trans.JOB || exit 1; + fi + feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" + cur_trans_dir=$dir +fi + +if [ $stage -le -4 ] && $train_tree; then + # Get tree stats. + echo "$0: Accumulating tree stats" + $cmd JOB=1:$nj $dir/log/acc_tree.JOB.log \ + acc-tree-stats $context_opts $tree_stats_opts $phone_map_opt --ci-phones=$ciphonelist $alidir/final.mdl "$feats" \ + "ark:gunzip -c $alidir/ali.JOB.gz|" $dir/JOB.treeacc || exit 1; + [ "`ls $dir/*.treeacc | wc -w`" -ne "$nj" ] && echo "$0: Wrong #tree-accs" && exit 1; + $cmd $dir/log/sum_tree_acc.log \ + sum-tree-stats $dir/treeacc $dir/*.treeacc || exit 1; + rm $dir/*.treeacc +fi + +if [ $stage -le -3 ] && $train_tree; then + echo "$0: Getting questions for tree clustering." + # preparing questions, roots file... + cluster-phones --pdf-class-list=$(($num_nonsil_states / 2)) \ + $cluster_phones_opts $context_opts \ + $dir/treeacc $lang/phones/sets.int $dir/questions.int 2>$dir/log/questions.log || exit 1; + cat $lang/phones/extra_questions.int >> $dir/questions.int + compile-questions $context_opts $compile_questions_opts $lang/topo $dir/questions.int $dir/questions.qst 2>$dir/log/compile_questions.log || exit 1; + + echo "$0: Building the tree" + $cmd $dir/log/build_tree.log \ + build-tree $context_opts --verbose=1 --max-leaves=$numleaves \ + --cluster-thresh=$cluster_thresh $dir/treeacc $lang/phones/roots.int \ + $dir/questions.qst $lang/topo $dir/tree || exit 1; +fi + +if [ $stage -le -2 ]; then + echo "$0: Initializing the model" + if $train_tree; then + gmm-init-model --write-occs=$dir/1.occs \ + $dir/tree $dir/treeacc $lang/topo $dir/1.mdl 2> $dir/log/init_model.log || exit 1; + grep 'no stats' $dir/log/init_model.log && echo "This is a bad warning."; + rm $dir/treeacc + else + cp $alidir/tree $dir/ || exit 1; + $cmd JOB=1 $dir/log/init_model.log \ + gmm-init-model-flat $dir/tree $lang/topo $dir/1.mdl \ + "$feats subset-feats ark:- ark:-|" || exit 1; + fi +fi + +if [ $stage -le -1 ]; then + # Convert the alignments. + echo "$0: Converting alignments from $alidir to use current tree" + $cmd JOB=1:$nj $dir/log/convert.JOB.log \ + convert-ali $phone_map_opt $alidir/final.mdl $dir/1.mdl $dir/tree \ + "ark:gunzip -c $alidir/ali.JOB.gz|" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +[ "$exit_stage" -eq 0 ] && echo "$0: Exiting early: --exit-stage $exit_stage" && exit 0; + +if [ $stage -le 0 ] && [ "$realign_iters" != "" ]; then + echo "$0: Compiling graphs of transcripts" + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs --read-disambig-syms=$lang/phones/disambig.int $dir/tree $dir/1.mdl $lang/L.fst \ + "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt < $sdata/JOB/text |" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; +fi + +x=1 +while [ $x -lt $num_iters ]; do + echo Pass $x + if echo $realign_iters | grep -w $x >/dev/null && [ $stage -le $x ]; then + echo Aligning data + mdl="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $dir/$x.mdl - |" + $cmd JOB=1:$nj $dir/log/align.$x.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" \ + "ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \ + "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; + fi + + if echo $fmllr_iters | grep -w $x >/dev/null; then + if [ $stage -le $x ]; then + echo Estimating fMLLR transforms + # We estimate a transform that's additional to the previous transform; + # we'll compose them. + $cmd JOB=1:$nj $dir/log/fmllr.$x.JOB.log \ + ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ + weight-silence-post $silence_weight $silphonelist $dir/$x.mdl ark:- ark:- \| \ + gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $dir/$x.mdl \ + "$feats" ark:- ark:$dir/tmp_trans.JOB || exit 1; + for n in `seq $nj`; do + ! ( compose-transforms --b-is-affine=true \ + ark:$dir/tmp_trans.$n ark:$cur_trans_dir/trans.$n ark:$dir/composed_trans.$n \ + && mv $dir/composed_trans.$n $dir/trans.$n && \ + rm $dir/tmp_trans.$n ) 2>$dir/log/compose_transforms.$x.log \ + && echo "$0: Error composing transforms" && exit 1; + done + fi + feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + cur_trans_dir=$dir + fi + + if [ $stage -le $x ]; then + $cmd JOB=1:$nj $dir/log/acc.$x.JOB.log \ + gmm-acc-stats-ali $dir/$x.mdl "$feats" \ + "ark,s,cs:gunzip -c $dir/ali.JOB.gz|" $dir/$x.JOB.acc || exit 1; + [ `ls $dir/$x.*.acc | wc -w` -ne "$nj" ] && echo "$0: Wrong #accs" && exit 1; + $cmd $dir/log/update.$x.log \ + gmm-est --power=$power --write-occs=$dir/$[$x+1].occs --mix-up=$numgauss $dir/$x.mdl \ + "gmm-sum-accs - $dir/$x.*.acc |" $dir/$[$x+1].mdl || exit 1; + rm $dir/$x.mdl $dir/$x.*.acc + rm $dir/$x.occs + fi + [ $x -le $max_iter_inc ] && numgauss=$[$numgauss+$incgauss]; + x=$[$x+1]; +done + + +if [ $stage -le $x ]; then + # Accumulate stats for "alignment model"-- this model is + # computed with the speaker-independent features, but matches Gaussian-for-Gaussian + # with the final speaker-adapted model. + $cmd JOB=1:$nj $dir/log/acc_alimdl.JOB.log \ + ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ + gmm-acc-stats-twofeats $dir/$x.mdl "$feats" "$sifeats" \ + ark,s,cs:- $dir/$x.JOB.acc || exit 1; + [ `ls $dir/$x.*.acc | wc -w` -ne "$nj" ] && echo "$0: Wrong #accs" && exit 1; + # Update model. + $cmd $dir/log/est_alimdl.log \ + gmm-est --power=$power --remove-low-count-gaussians=false $dir/$x.mdl \ + "gmm-sum-accs - $dir/$x.*.acc|" $dir/$x.alimdl || exit 1; + rm $dir/$x.*.acc +fi + +rm $dir/final.{mdl,alimdl,occs} 2>/dev/null +ln -s $x.mdl $dir/final.mdl +ln -s $x.occs $dir/final.occs +ln -s $x.alimdl $dir/final.alimdl + + +steps/diagnostic/analyze_alignments.sh --cmd "$cmd" $lang $dir + +utils/summarize_warnings.pl $dir/log +( + echo "$0: Likelihood evolution:" + for x in `seq $[$num_iters-1]`; do + tail -n 30 $dir/log/acc.$x.*.log | awk '/Overall avg like/{l += $(NF-3)*$(NF-1); t += $(NF-1); } + /Overall average logdet/{d += $(NF-3)*$(NF-1); t2 += $(NF-1);} + END{ d /= t2; l /= t; printf("%s ", d+l); } ' + done + echo +) | tee $dir/log/summary.log + + +steps/info/gmm_dir_info.pl $dir + +echo "$0: done training SAT system in $dir" + +exit 0 diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh new file mode 100644 index 0000000..f3a3d3f --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +set -eu + +w2v_dir= # contains features `{train,valid}.{npy,lengths}`, real transcripts `{train,valid}.${label}`, and dict `dict.${label}.txt` +lab_dir= # contains pseudo labels `{train,valid}.txt` +out_dir= # output root +arpa_lm= # phone LM +arpa_lm_bin= # (binary) phone LM for KenLM, used in unsupervised selection + +label=phnc +train_name="train" +valid_name="valid" +data_dir=${out_dir}/data + +mkdir -p ${out_dir}/exp +local/prepare_lang.sh $w2v_dir/dict.${label}.txt $data_dir +local/prepare_lm.sh $arpa_lm $data_dir + +for x in $train_name $valid_name; do + x_gt=${x}_gt + + # prepare pseudo data + python local/prepare_data_from_w2v.py $w2v_dir $data_dir $x + steps/compute_cmvn_stats.sh $data_dir/$x $out_dir/exp/make_feat/$x $out_dir/feats/$x + python local/copy_aligned_text.py < $lab_dir/$x.txt > $data_dir/$x/text + + # prepare ground truth data + mkdir $data_dir/$x_gt + cp $data_dir/$x/{feats.scp,cmvn.scp,utt2spk,spk2utt} $data_dir/$x_gt/ + python local/copy_aligned_text.py < $w2v_dir/$x.$label > $data_dir/$x_gt/text +done + +local/train_subset_lgbeam.sh \ + --out_root ${out_dir} --out_name exp --train $train_name --valid $valid_name \ + --mono_size 2000 --tri1_size 5000 --tri2b_size -1 --tri3b_size -1 \ + --stage 1 --max_stage 3 $data_dir $data_dir/lang $data_dir/lang_test + +local/unsup_select_decode.sh \ + --split $valid_name --kenlm_path $arpa_lm_bin \ + --ref_txt $data_dir/${valid_name}_gt/text \ + --psd_txt $data_dir/${valid_name}/text \ + $out_dir/exp diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/utils b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/utils new file mode 120000 index 0000000..b240885 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/kaldi_self_train/st/utils @@ -0,0 +1 @@ +../../wsj/s5/utils \ No newline at end of file diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/models/__init__.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/models/__init__.py new file mode 100644 index 0000000..3e3039b --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/models/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from .wav2vec_u import Wav2vec_U + + +__all__ = [ + "Wav2vec_U", +] diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/models/wav2vec_u.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/models/wav2vec_u.py new file mode 100644 index 0000000..8a1e905 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/models/wav2vec_u.py @@ -0,0 +1,687 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +from enum import Enum, auto +import math +import numpy as np +from typing import Tuple, List, Optional, Dict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import autograd + +from fairseq import checkpoint_utils, utils +from fairseq.dataclass import FairseqDataclass +from fairseq.models import BaseFairseqModel, register_model +from fairseq.modules import ( + SamePad, + TransposeLast, +) + + +class SegmentationType(Enum): + NONE = auto() + RANDOM = auto() + UNIFORM_RANDOM = auto() + UNIFORM_RANDOM_JOIN = auto() + JOIN = auto() + + +@dataclass +class SegmentationConfig(FairseqDataclass): + type: SegmentationType = SegmentationType.NONE + subsample_rate: float = 0.25 + mean_pool: bool = True + mean_pool_join: bool = False + remove_zeros: bool = False + + +@dataclass +class Wav2vec_UConfig(FairseqDataclass): + discriminator_kernel: int = 3 + discriminator_dilation: int = 1 + discriminator_dim: int = 256 + discriminator_causal: bool = True + discriminator_linear_emb: bool = False + discriminator_depth: int = 1 + discriminator_max_pool: bool = False + discriminator_act_after_linear: bool = False + discriminator_dropout: float = 0.0 + discriminator_spectral_norm: bool = False + discriminator_weight_norm: bool = False + + generator_kernel: int = 4 + generator_dilation: int = 1 + generator_stride: int = 1 + generator_pad: int = -1 + generator_bias: bool = False + generator_dropout: float = 0.0 + generator_batch_norm: int = 0 + generator_residual: bool = False + + blank_weight: float = 0 + blank_mode: str = "add" + blank_is_sil: bool = False + no_softmax: bool = False + + smoothness_weight: float = 0.0 + smoothing: float = 0.0 + smoothing_one_sided: bool = False + gradient_penalty: float = 0.0 + probabilistic_grad_penalty_slicing: bool = False + code_penalty: float = 0.0 + mmi_weight: float = 0.0 + target_dim: int = 64 + target_downsample_rate: int = 2 + gumbel: bool = False + hard_gumbel: bool = True + temp: Tuple[float, float, float] = (2, 0.1, 0.99995) + input_dim: int = 128 + + segmentation: SegmentationConfig = SegmentationConfig() + + +class Segmenter(nn.Module): + cfg: SegmentationConfig + + def __init__(self, cfg: SegmentationConfig): + super().__init__() + self.cfg = cfg + self.subsample_rate = cfg.subsample_rate + + def pre_segment(self, dense_x, dense_padding_mask): + return dense_x, dense_padding_mask + + def logit_segment(self, logits, padding_mask): + return logits, padding_mask + + +class RandomSegmenter(Segmenter): + def pre_segment(self, dense_x, dense_padding_mask): + target_num = math.ceil(dense_x.size(1) * self.subsample_rate) + ones = torch.ones(dense_x.shape[:-1], device=dense_x.device) + indices, _ = ones.multinomial(target_num).sort(dim=-1) + indices_ld = indices.unsqueeze(-1).expand(-1, -1, dense_x.size(-1)) + dense_x = dense_x.gather(1, indices_ld) + dense_padding_mask = dense_padding_mask.gather(1, index=indices) + return dense_x, dense_padding_mask + + +class UniformRandomSegmenter(Segmenter): + def pre_segment(self, dense_x, dense_padding_mask): + bsz, tsz, fsz = dense_x.shape + + target_num = math.ceil(tsz * self.subsample_rate) + + rem = tsz % target_num + + if rem > 0: + dense_x = F.pad(dense_x, [0, 0, 0, target_num - rem]) + dense_padding_mask = F.pad( + dense_padding_mask, [0, target_num - rem], value=True + ) + + dense_x = dense_x.view(bsz, target_num, -1, fsz) + dense_padding_mask = dense_padding_mask.view(bsz, target_num, -1) + + if self.cfg.mean_pool: + dense_x = dense_x.mean(dim=-2) + dense_padding_mask = dense_padding_mask.all(dim=-1) + else: + ones = torch.ones((bsz, dense_x.size(2)), device=dense_x.device) + indices = ones.multinomial(1) + indices = indices.unsqueeze(-1).expand(-1, target_num, -1) + indices_ld = indices.unsqueeze(-1).expand(-1, -1, -1, fsz) + dense_x = dense_x.gather(2, indices_ld).reshape(bsz, -1, fsz) + dense_padding_mask = dense_padding_mask.gather(2, index=indices).reshape( + bsz, -1 + ) + return dense_x, dense_padding_mask + + +class JoinSegmenter(Segmenter): + def logit_segment(self, logits, padding_mask): + preds = logits.argmax(dim=-1) + + if padding_mask.any(): + preds[padding_mask] = -1 # mark pad + uniques = [] + + bsz, tsz, csz = logits.shape + + for p in preds: + uniques.append( + p.cpu().unique_consecutive(return_inverse=True, return_counts=True) + ) + + new_tsz = max(u[0].numel() for u in uniques) + new_logits = logits.new_zeros(bsz, new_tsz, csz) + new_pad = padding_mask.new_zeros(bsz, new_tsz) + + for b in range(bsz): + u, idx, c = uniques[b] + keep = u != -1 + + if self.cfg.remove_zeros: + keep.logical_and_(u != 0) + + if self.training and not self.cfg.mean_pool_join: + u[0] = 0 + u[1:] = c.cumsum(0)[:-1] + m = c > 1 + r = torch.rand(m.sum()) + o = (c[m] * r).long() + u[m] += o + new_logits[b, : u.numel()] = logits[b, u] + else: + new_logits[b].index_add_( + dim=0, index=idx.to(new_logits.device), source=logits[b] + ) + new_logits[b, : c.numel()] /= c.unsqueeze(-1).to(new_logits.device) + + new_sz = keep.sum() + if not keep.all(): + kept_logits = new_logits[b, : c.numel()][keep] + new_logits[b, :new_sz] = kept_logits + + if new_sz < new_tsz: + pad = new_tsz - new_sz + new_logits[b, -pad:] = 0 + new_pad[b, -pad:] = True + + return new_logits, new_pad + + +class UniformRandomJoinSegmenter(UniformRandomSegmenter, JoinSegmenter): + pass + + +SEGMENT_FACTORY = { + SegmentationType.NONE: Segmenter, + SegmentationType.RANDOM: RandomSegmenter, + SegmentationType.UNIFORM_RANDOM: UniformRandomSegmenter, + SegmentationType.UNIFORM_RANDOM_JOIN: UniformRandomJoinSegmenter, + SegmentationType.JOIN: JoinSegmenter, +} + + +class Discriminator(nn.Module): + def __init__(self, dim, cfg: Wav2vec_UConfig): + super().__init__() + + inner_dim = cfg.discriminator_dim + kernel = cfg.discriminator_kernel + dilation = cfg.discriminator_dilation + self.max_pool = cfg.discriminator_max_pool + + if cfg.discriminator_causal: + padding = kernel - 1 + else: + padding = kernel // 2 + + def make_conv(in_d, out_d, k, p=0, has_dilation=True): + conv = nn.Conv1d( + in_d, + out_d, + kernel_size=k, + padding=p, + dilation=dilation if has_dilation else 1, + ) + if cfg.discriminator_spectral_norm: + conv = nn.utils.spectral_norm(conv) + elif cfg.discriminator_weight_norm: + conv = nn.utils.weight_norm(conv) + return conv + + inner_net = [ + nn.Sequential( + make_conv(inner_dim, inner_dim, kernel, padding), + SamePad(kernel_size=kernel, causal=cfg.discriminator_causal), + nn.Dropout(cfg.discriminator_dropout), + nn.GELU(), + ) + for _ in range(cfg.discriminator_depth - 1) + ] + [ + make_conv(inner_dim, 1, kernel, padding, has_dilation=False), + SamePad(kernel_size=kernel, causal=cfg.discriminator_causal), + ] + + if cfg.discriminator_linear_emb: + emb_net = [make_conv(dim, inner_dim, 1)] + else: + emb_net = [ + make_conv(dim, inner_dim, kernel, padding), + SamePad(kernel_size=kernel, causal=cfg.discriminator_causal), + ] + + if cfg.discriminator_act_after_linear: + emb_net.append(nn.GELU()) + + self.net = nn.Sequential( + *emb_net, + nn.Dropout(cfg.discriminator_dropout), + *inner_net, + ) + + def forward(self, x, padding_mask): + x = x.transpose(1, 2) # BTC -> BCT + x = self.net(x) + x = x.transpose(1, 2) + x_sz = x.size(1) + if padding_mask is not None and padding_mask.any() and padding_mask.dim() > 1: + padding_mask = padding_mask[:, : x.size(1)] + x[padding_mask] = float("-inf") if self.max_pool else 0 + x_sz = x_sz - padding_mask.sum(dim=-1) + x = x.squeeze(-1) + if self.max_pool: + x, _ = x.max(dim=-1) + else: + x = x.sum(dim=-1) + x = x / x_sz + return x + + +class Generator(nn.Module): + def __init__(self, input_dim, output_dim, cfg: Wav2vec_UConfig): + super().__init__() + + self.cfg = cfg + self.output_dim = output_dim + self.stride = cfg.generator_stride + self.dropout = nn.Dropout(cfg.generator_dropout) + self.batch_norm = cfg.generator_batch_norm != 0 + self.residual = cfg.generator_residual + + padding = ( + cfg.generator_kernel // 2 if cfg.generator_pad < 0 else cfg.generator_pad + ) + self.proj = nn.Sequential( + TransposeLast(), + nn.Conv1d( + input_dim, + output_dim, + kernel_size=cfg.generator_kernel, + stride=cfg.generator_stride, + dilation=cfg.generator_dilation, + padding=padding, + bias=cfg.generator_bias, + ), + TransposeLast(), + ) + + if self.batch_norm: + self.bn = nn.BatchNorm1d(input_dim) + self.bn.weight.data.fill_(cfg.generator_batch_norm) + if self.residual: + self.in_proj = nn.Linear(input_dim, input_dim) + + def forward(self, dense_x, tokens, dense_padding_mask): + result = {} + + if self.batch_norm: + dense_x = self.bn_padded_data(dense_x, dense_padding_mask) + if self.residual: + inter_x = self.in_proj(self.dropout(dense_x)) + dense_x = dense_x + inter_x + result["inter_x"] = inter_x + + dense_x = self.dropout(dense_x) + + dense_x = self.proj(dense_x) + if self.stride > 1: + dense_padding_mask = dense_padding_mask[:, :: self.stride] + + if dense_padding_mask.size(1) != dense_x.size(1): + new_padding = dense_padding_mask.new_zeros(dense_x.shape[:-1]) + diff = new_padding.size(1) - dense_padding_mask.size(1) + + if diff > 0: + new_padding[:, diff:] = dense_padding_mask + else: + assert diff < 0 + new_padding = dense_padding_mask[:, :diff] + + dense_padding_mask = new_padding + + token_x = None + if tokens is not None: + token_x = dense_x.new_zeros(tokens.numel(), self.output_dim) + token_x.scatter_(1, tokens.view(-1, 1).long(), 1) + token_x = token_x.view(tokens.shape + (self.output_dim,)) + + result["dense_x"] = dense_x + result["token_x"] = token_x + result["dense_padding_mask"] = dense_padding_mask + + return result + + def bn_padded_data(self, feature, padding_mask): + normed_feature = feature.clone() + normed_feature[~padding_mask] = self.bn( + feature[~padding_mask].unsqueeze(-1) + ).squeeze(-1) + return normed_feature + + +@register_model("wav2vec_u", dataclass=Wav2vec_UConfig) +class Wav2vec_U(BaseFairseqModel): + def calc_gradient_penalty(self, real_data, fake_data): + + b_size = min(real_data.size(0), fake_data.size(0)) + t_size = min(real_data.size(1), fake_data.size(1)) + + if self.cfg.probabilistic_grad_penalty_slicing: + + def get_slice(data, dim, target_size): + + size = data.size(dim) + diff = size - target_size + if diff <= 0: + return data + + start = np.random.randint(0, diff + 1) + return data.narrow(dim=dim, start=start, length=target_size) + + real_data = get_slice(real_data, 0, b_size) + real_data = get_slice(real_data, 1, t_size) + fake_data = get_slice(fake_data, 0, b_size) + fake_data = get_slice(fake_data, 1, t_size) + + else: + real_data = real_data[:b_size, :t_size] + fake_data = fake_data[:b_size, :t_size] + + alpha = torch.rand(real_data.size(0), 1, 1) + alpha = alpha.expand(real_data.size()) + alpha = alpha.to(real_data.device) + + interpolates = alpha * real_data + ((1 - alpha) * fake_data) + + disc_interpolates = self.discriminator(interpolates, None) + + gradients = autograd.grad( + outputs=disc_interpolates, + inputs=interpolates, + grad_outputs=torch.ones(disc_interpolates.size(), device=real_data.device), + create_graph=True, + retain_graph=True, + only_inputs=True, + )[0] + + gradient_penalty = (gradients.norm(2, dim=1) - 1) ** 2 + return gradient_penalty + + def set_num_updates(self, num_updates): + super().set_num_updates(num_updates) + self.update_num = num_updates + self.curr_temp = max( + self.max_temp * self.temp_decay ** num_updates, self.min_temp + ) + + def discrim_step(self, num_updates): + return num_updates % 2 == 1 + + def get_groups_for_update(self, num_updates): + return "discriminator" if self.discrim_step(num_updates) else "generator" + + def __init__(self, cfg: Wav2vec_UConfig, target_dict): + super().__init__() + + self.cfg = cfg + self.zero_index = target_dict.index("") if "" in target_dict else 0 + self.smoothness_weight = cfg.smoothness_weight + + output_size = len(target_dict) + self.pad = target_dict.pad() + self.eos = target_dict.eos() + self.smoothing = cfg.smoothing + self.smoothing_one_sided = cfg.smoothing_one_sided + self.no_softmax = cfg.no_softmax + self.gumbel = cfg.gumbel + self.hard_gumbel = cfg.hard_gumbel + self.last_acc = None + + self.gradient_penalty = cfg.gradient_penalty + self.code_penalty = cfg.code_penalty + self.mmi_weight = cfg.mmi_weight + self.blank_weight = cfg.blank_weight + self.blank_mode = cfg.blank_mode + self.blank_index = target_dict.index("") if cfg.blank_is_sil else 0 + assert self.blank_index != target_dict.unk() + + self.discriminator = Discriminator(output_size, cfg) + for p in self.discriminator.parameters(): + p.param_group = "discriminator" + + self.pca_A = self.pca_b = None + d = cfg.input_dim + + self.segmenter = SEGMENT_FACTORY[cfg.segmentation.type](cfg.segmentation) + + self.generator = Generator(d, output_size, cfg) + + for p in self.generator.parameters(): + p.param_group = "generator" + + for p in self.segmenter.parameters(): + p.param_group = "generator" + + self.max_temp, self.min_temp, self.temp_decay = cfg.temp + self.curr_temp = self.max_temp + self.update_num = 0 + + if self.mmi_weight > 0: + self.target_downsample_rate = cfg.target_downsample_rate + self.decoder = nn.Linear(d, cfg.target_dim) + for p in self.decoder.parameters(): + p.param_group = "generator" + + @classmethod + def build_model(cls, cfg, task): + return cls(cfg, task.target_dictionary) + + def get_logits( + self, + net_output: Optional[Dict[str, List[Optional[torch.Tensor]]]], + normalize: bool = False, + ): + logits = net_output["logits"] + + if self.blank_weight != 0: + if self.blank_mode == "add": + logits[..., self.blank_index] += self.blank_weight + elif self.blank_mode == "set": + logits[..., self.blank_index] = self.blank_weight + else: + raise Exception(f"invalid blank mode {self.blank_mode}") + + padding = net_output["padding_mask"] + if padding.any(): + logits[padding] = float("-inf") + logits[padding][..., self.blank_index] = float("inf") + + if normalize: + logits = utils.log_softmax(logits.float(), dim=-1) + + return logits.transpose(0, 1) + + def get_normalized_probs( + self, + net_output: Tuple[ + torch.Tensor, Optional[Dict[str, List[Optional[torch.Tensor]]]] + ], + log_probs: bool, + sample: Optional[Dict[str, torch.Tensor]] = None, + ): + logits = self.get_logits(net_output) + + probs = super().get_normalized_probs(logits, log_probs, sample) + # BTC -> TBC for ctc + probs = probs.transpose(0, 1) + return probs + + def normalize(self, dense_x): + + bsz, tsz, csz = dense_x.shape + + if dense_x.numel() == 0: + raise Exception(dense_x.shape) + _, k = dense_x.max(-1) + hard_x = ( + dense_x.new_zeros(bsz * tsz, csz) + .scatter_(-1, k.view(-1, 1), 1.0) + .view(-1, csz) + ) + hard_probs = torch.mean(hard_x.float(), dim=0) + code_perplexity = torch.exp( + -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1) + ) + + avg_probs = torch.softmax(dense_x.reshape(-1, csz).float(), dim=-1).mean(dim=0) + prob_perplexity = torch.exp( + -torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1) + ) + + if not self.no_softmax: + if self.training and self.gumbel: + dense_x = F.gumbel_softmax( + dense_x.float(), tau=self.curr_temp, hard=self.hard_gumbel + ).type_as(dense_x) + else: + dense_x = dense_x.softmax(-1) + + return dense_x, code_perplexity, prob_perplexity + + def forward( + self, + features, + padding_mask, + random_label=None, + dense_x_only=False, + segment=True, + aux_target=None, + ): + if segment: + features, padding_mask = self.segmenter.pre_segment(features, padding_mask) + + orig_size = features.size(0) * features.size(1) - padding_mask.sum() + + gen_result = self.generator(features, random_label, padding_mask) + + orig_dense_x, token_x = gen_result["dense_x"], gen_result["token_x"] + orig_dense_padding_mask = gen_result["dense_padding_mask"] + + if segment: + dense_x, dense_padding_mask = self.segmenter.logit_segment( + orig_dense_x, orig_dense_padding_mask + ) + else: + dense_x = orig_dense_x + dense_padding_mask = orig_dense_padding_mask + + dense_logits = dense_x + prob_perplexity = None + code_perplexity = None + + if not (self.no_softmax and dense_x_only): + dense_x, code_perplexity, prob_perplexity = self.normalize(dense_logits) + + if dense_x_only or self.discriminator is None: + return { + "logits": dense_x, + "padding_mask": dense_padding_mask, + } + + token_padding_mask = random_label == self.pad + + dense_y = self.discriminator(dense_x, dense_padding_mask) + token_y = self.discriminator(token_x, token_padding_mask) + + sample_size = features.size(0) + + d_step = self.discrim_step(self.update_num) + + fake_smooth = self.smoothing + real_smooth = self.smoothing + if self.smoothing_one_sided: + fake_smooth = 0 + + zero_loss = None + smoothness_loss = None + code_pen = None + mmi_loss = None + + if d_step: + loss_dense = F.binary_cross_entropy_with_logits( + dense_y, + dense_y.new_ones(dense_y.shape) - fake_smooth, + reduction="sum", + ) + loss_token = F.binary_cross_entropy_with_logits( + token_y, + token_y.new_zeros(token_y.shape) + real_smooth, + reduction="sum", + ) + if self.training and self.gradient_penalty > 0: + grad_pen = self.calc_gradient_penalty(token_x, dense_x) + grad_pen = grad_pen.sum() * self.gradient_penalty + else: + grad_pen = None + else: + grad_pen = None + loss_token = None + loss_dense = F.binary_cross_entropy_with_logits( + dense_y, + dense_y.new_zeros(dense_y.shape) + fake_smooth, + reduction="sum", + ) + num_vars = dense_x.size(-1) + if prob_perplexity is not None: + code_pen = (num_vars - prob_perplexity) / num_vars + code_pen = code_pen * sample_size * self.code_penalty + + if self.smoothness_weight > 0: + smoothness_loss = F.mse_loss( + dense_logits[:, :-1], dense_logits[:, 1:], reduction="none" + ) + smoothness_loss[dense_padding_mask[:, 1:]] = 0 + smoothness_loss = ( + smoothness_loss.mean() * sample_size * self.smoothness_weight + ) + + if (self.mmi_weight > 0) and (aux_target is not None): + inter_x = self.decoder(gen_result["inter_x"]) + if self.target_downsample_rate > 1: + aux_target = aux_target[:, :: self.target_downsample_rate] + max_t_len = min(aux_target.shape[1], inter_x.shape[1]) + mmi_loss = F.cross_entropy( + inter_x[:, :max_t_len].transpose(1, 2), + aux_target[:, :max_t_len], + ignore_index=-1, + reduction="none", + ) + mmi_loss = mmi_loss.mean() * mmi_loss.shape[0] * self.mmi_weight + + result = { + "losses": { + "grad_pen": grad_pen, + "code_pen": code_pen, + "smoothness": smoothness_loss, + "mmi": mmi_loss, + }, + "temp": self.curr_temp, + "code_ppl": code_perplexity, + "prob_ppl": prob_perplexity, + "d_steps": int(d_step), + "sample_size": sample_size, + } + + suff = "_d" if d_step else "_g" + result["losses"]["dense" + suff] = loss_dense + result["losses"]["token" + suff] = loss_token + + return result diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/apply_pca.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/apply_pca.py new file mode 100644 index 0000000..10ad6ce --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/apply_pca.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +import os.path as osp +import math +import numpy as np +import tqdm +import torch +from shutil import copyfile + +from npy_append_array import NpyAppendArray + + +def get_parser(): + parser = argparse.ArgumentParser( + description="transforms features via a given pca and stored them in target dir" + ) + # fmt: off + parser.add_argument('source', help='directory with features') + parser.add_argument('--split', help='which split to read', required=True) + parser.add_argument('--save-dir', help='where to save the output', required=True) + parser.add_argument('--pca-path', type=str, help='pca location. will append _A.npy and _b.npy', required=True) + parser.add_argument('--batch-size', type=int, default=2048000, help='batch size') + parser.add_argument('--unfiltered', action='store_true', help='process the unfiltered version') + # fmt: on + + return parser + + +def main(): + parser = get_parser() + args = parser.parse_args() + + source_path = osp.join(args.source, args.split) + data_poth = source_path + "_unfiltered" if args.unfiltered else source_path + + print(f"data path: {data_poth}") + + features = np.load(data_poth + ".npy", mmap_mode="r") + pca_A = torch.from_numpy(np.load(args.pca_path + "_A.npy")).cuda() + pca_b = torch.from_numpy(np.load(args.pca_path + "_b.npy")).cuda() + + os.makedirs(args.save_dir, exist_ok=True) + save_path = osp.join(args.save_dir, args.split) + + copyfile(source_path + ".tsv", save_path + ".tsv") + copyfile(data_poth + ".lengths", save_path + ".lengths") + + if osp.exists(source_path + ".phn"): + copyfile(source_path + ".phn", save_path + ".phn") + + if osp.exists(source_path + ".wrd"): + copyfile(source_path + ".wrd", save_path + ".wrd") + + if osp.exists(save_path + ".npy"): + os.remove(save_path + ".npy") + npaa = NpyAppendArray(save_path + ".npy") + + batches = math.ceil(features.shape[0] / args.batch_size) + + with torch.no_grad(): + for b in tqdm.trange(batches): + start = b * args.batch_size + end = start + args.batch_size + x = torch.from_numpy(features[start:end]).cuda() + x = torch.matmul(x, pca_A) + pca_b + npaa.append(x.cpu().numpy()) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/copy_labels.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/copy_labels.py new file mode 100644 index 0000000..9898683 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/copy_labels.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import sys + +for idx, line in enumerate(sys.stdin): + print(f"utt{idx:010d} {line}", end="") diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/filter_lexicon.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/filter_lexicon.py new file mode 100644 index 0000000..5bf3e51 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/filter_lexicon.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import sys + +from fairseq.data import Dictionary + + +def get_parser(): + parser = argparse.ArgumentParser( + description="filters a lexicon given a unit dictionary" + ) + parser.add_argument("-d", "--unit-dict", help="unit dictionary", required=True) + return parser + + +def main(): + parser = get_parser() + args = parser.parse_args() + + d = Dictionary.load(args.unit_dict) + symbols = set(d.symbols) + + for line in sys.stdin: + items = line.rstrip().split() + skip = len(items) < 2 + for x in items[1:]: + if x not in symbols: + skip = True + break + if not skip: + print(line, end="") + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/filter_tsv.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/filter_tsv.py new file mode 100644 index 0000000..a09d79a --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/filter_tsv.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import os +import argparse +import sys + + +parser = argparse.ArgumentParser() +parser.add_argument("--tsv", required=True, type=str) +parser.add_argument("--no-skip", action="store_true") +parser.add_argument("--keep", action="store_true") +params = parser.parse_args() + + +def get_fname(line): + p = os.path.basename(line.split("\t")[0]) + p = os.path.splitext(p)[0] + return p + + +# filenames to exclude +seen = set() +with open(params.tsv) as f: + if not params.no_skip: + root = next(f).rstrip() + for line in f: + seen.add(get_fname(line)) + +for i, line in enumerate(sys.stdin): + exists = get_fname(line) in seen + keep = (exists and params.keep) or (not exists and not params.keep) + if i == 0 or keep: + print(line, end="") diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py new file mode 100644 index 0000000..2e31c30 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import sys + +from g2p_en import G2p + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--compact", + action="store_true", + help="if set, compacts phones", + ) + args = parser.parse_args() + + compact = args.compact + + wrd_to_phn = {} + g2p = G2p() + for line in sys.stdin: + words = line.strip().split() + phones = [] + for w in words: + if w not in wrd_to_phn: + wrd_to_phn[w] = g2p(w) + if compact: + wrd_to_phn[w] = [ + p[:-1] if p[-1].isnumeric() else p for p in wrd_to_phn[w] + ] + phones.extend(wrd_to_phn[w]) + try: + print(" ".join(phones)) + except: + print(wrd_to_phn, words, phones, file=sys.stderr) + raise + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py new file mode 100644 index 0000000..36c85d1 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import sys + + +def main(): + for line in sys.stdin: + print(line.replace(" ", "").replace("|", " ").strip()) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/mean_pool.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/mean_pool.py new file mode 100644 index 0000000..4eea048 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/mean_pool.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +import os.path as osp +import math +import numpy as np +import tqdm +import torch +import torch.nn.functional as F +from shutil import copyfile + +from npy_append_array import NpyAppendArray + + +def get_parser(): + parser = argparse.ArgumentParser( + description="mean pools representations by compressing uniform splits of the data" + ) + # fmt: off + parser.add_argument('source', help='directory with features') + parser.add_argument('--split', help='which split to read', required=True) + parser.add_argument('--save-dir', help='where to save the output', required=True) + parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to') + + parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s') + # fmt: on + + return parser + + +def main(): + parser = get_parser() + args = parser.parse_args() + + source_path = osp.join(args.source, args.split) + + print(f"data path: {source_path}") + + features = np.load(source_path + ".npy", mmap_mode="r") + + os.makedirs(args.save_dir, exist_ok=True) + save_path = osp.join(args.save_dir, args.split) + + copyfile(source_path + ".tsv", save_path + ".tsv") + + if os.path.exists(source_path + ".phn"): + copyfile(source_path + ".phn", save_path + ".phn") + if os.path.exists(source_path + ".wrd"): + copyfile(source_path + ".wrd", save_path + ".wrd") + + if os.path.exists(osp.join(args.source, "dict.phn.txt")): + copyfile( + osp.join(args.source, "dict.phn.txt"), + osp.join(args.save_dir, "dict.phn.txt"), + ) + + if osp.exists(save_path + ".npy"): + os.remove(save_path + ".npy") + npaa = NpyAppendArray(save_path + ".npy") + + with open(source_path + ".lengths", "r") as lf: + lengths = lf.readlines() + + fsz = features.shape[-1] + start = 0 + with torch.no_grad(): + with open(save_path + ".lengths", "w") as lengths_out: + for length in tqdm.tqdm(lengths): + length = int(length) + end = start + length + feats = features[start:end] + start += length + x = torch.from_numpy(feats).cuda() + target_num = math.ceil(length * args.subsample_rate) + rem = length % target_num + + if rem > 0: + if args.remove_extra: + to_rem = target_num - rem + target_num -= 1 + x = x[:-to_rem] + else: + to_add = target_num - rem + x = F.pad(x, [0, 0, 0, to_add]) + x[-to_add:] = x[-to_add - 1] + + x = x.view(target_num, -1, fsz) + x = x.mean(dim=-2) + print(target_num, file=lengths_out) + npaa.append(x.cpu().numpy()) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/merge_clusters.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/merge_clusters.py new file mode 100644 index 0000000..2780f9d --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/merge_clusters.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +import os.path as osp +import numpy as np +import tqdm +import torch +import random +from shutil import copyfile + +from npy_append_array import NpyAppendArray + + +def get_parser(): + parser = argparse.ArgumentParser( + description="transforms features via a given pca and stored them in target dir" + ) + # fmt: off + parser.add_argument('source', help='directory with features') + parser.add_argument('--split', help='which split to read', required=True) + parser.add_argument('--save-dir', help='where to save the output', required=True) + parser.add_argument('--cluster-dir', help='where the clusters are') + parser.add_argument('--pooling', type=str, default='mean', choices=['mean', 'sample'], help='how to pool') + # fmt: on + + return parser + + +def main(): + parser = get_parser() + args = parser.parse_args() + + source_path = osp.join(args.source, args.split) + cluster_path = osp.join(args.cluster_dir, args.split + ".src") + print(f"data path: {source_path}") + + features = np.load(source_path + ".npy", mmap_mode="r") + sizes = [] + offsets = [] + offset = 0 + with open(source_path + ".lengths", "r") as len_f: + for line in len_f: + length = int(line.rstrip()) + sizes.append(length) + offsets.append(offset) + offset += length + + clusters = [] + with open(cluster_path, "r") as cf: + for line in cf: + line = line.rstrip() + items = line.split() + items = list(map(int, items)) + clusters.append(items) + + os.makedirs(args.save_dir, exist_ok=True) + save_path = osp.join(args.save_dir, args.split) + + copyfile(source_path + ".tsv", save_path + ".tsv") + + if os.path.exists(source_path + ".phn"): + copyfile(source_path + ".phn", save_path + ".phn") + if os.path.exists(osp.join(args.source, "dict.phn.txt")): + copyfile( + osp.join(args.source, "dict.phn.txt"), + osp.join(args.save_dir, "dict.phn.txt"), + ) + if os.path.exists(source_path + ".wrd"): + copyfile(source_path + ".wrd", save_path + ".wrd") + + if osp.exists(save_path + ".npy"): + os.remove(save_path + ".npy") + npaa = NpyAppendArray(save_path + ".npy") + + def merge(feats, clust): + feats = torch.from_numpy(feats.copy()) + clust = torch.LongTensor(clust) + _, counts = clust.unique_consecutive(return_counts=True) + curr = 0 + + merged = [] + for c in counts: + c = c.item() + start = curr + end = curr + c + curr += c + if args.pooling == "mean": + new_x = feats[start:end].mean(dim=0) + elif args.pooling == "sample": + new_x = feats[start + int(random.random() * c)] + else: + raise NotImplementedError() + merged.append(new_x) + + return torch.stack(merged, dim=0).numpy() + + with open(save_path + ".lengths", "w") as l_f: + for size, offset, clust in tqdm.tqdm( + zip(sizes, offsets, clusters), total=len(sizes) + ): + end = size + offset + feats = features[offset:end] + feats = merge(feats, clust) + print(len(feats), file=l_f) + npaa.append(feats) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py new file mode 100644 index 0000000..c2bd16e --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import fasttext as ft +import os +import regex +import sys + + +def get_parser(): + parser = argparse.ArgumentParser( + description="reads text from stdin and outputs normalized, lid-filtered version to stdout" + ) + parser.add_argument( + "--fasttext-model", + help="path to fasttext model", + default="lid.187.bin", + ) + parser.add_argument("--lang", help="language id", required=True) + parser.add_argument( + "--lid-threshold", + type=float, + help="threshold for this lang id probability", + default=0.4, + ) + + return parser + + +def main(): + parser = get_parser() + args = parser.parse_args() + filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]") + + lg = args.lang.lower() + lg_label = f"__label__{lg}" + thresh = args.lid_threshold + + if os.path.exists(args.fasttext_model): + model = ft.load_model(args.fasttext_model) + else: + print( + f"fasttext language id model {args.fasttext_model} not found. Proceeding without language filtering. " + f"To enable language filtering, please download the latest language id model " + f"from https://fasttext.cc/docs/en/language-identification.html", + file=sys.stderr, + ) + model = None + + for line in sys.stdin: + line = line.strip() + line = filter_r.sub(" ", line) + line = " ".join(line.split()) + + if model is not None: + lid, prob = model.predict(line, k=100) + try: + target_idx = lid.index(lg_label) + except ValueError: + continue + if target_idx == 0 or prob[target_idx] >= thresh: + print(line) + else: + print(line) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/normalize_text.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/normalize_text.py new file mode 100644 index 0000000..9d0ffeb --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/normalize_text.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import regex +import sys + + +def main(): + filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]") + + for line in sys.stdin: + line = line.strip() + line = filter_r.sub(" ", line) + line = " ".join(line.split()) + print(line) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/pca.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/pca.py new file mode 100644 index 0000000..948cf53 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/pca.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +import os.path as osp +import numpy as np + +import faiss + + + +def get_parser(): + parser = argparse.ArgumentParser( + description="compute a pca matrix given an array of numpy features" + ) + # fmt: off + parser.add_argument('data', help='numpy file containing features') + parser.add_argument('--output', help='where to save the pca matrix', required=True) + parser.add_argument('--dim', type=int, help='dim for pca reduction', required=True) + parser.add_argument('--eigen-power', type=float, default=0, help='eigen power, -0.5 for whitening') + + return parser + + +def main(): + parser = get_parser() + args = parser.parse_args() + + print("Reading features") + x = np.load(args.data, mmap_mode="r") + + print("Computing PCA") + pca = faiss.PCAMatrix(x.shape[-1], args.dim, args.eigen_power) + pca.train(x) + b = faiss.vector_to_array(pca.b) + A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in) + + os.makedirs(args.output, exist_ok=True) + + prefix = str(args.dim) + if args.eigen_power != 0: + prefix += f"_{args.eigen_power}" + + np.save(osp.join(args.output, f"{prefix}_pca_A"), A.T) + np.save(osp.join(args.output, f"{prefix}_pca_b"), b) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py new file mode 100644 index 0000000..c6512d7 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import numpy as np +import sys + + +def get_parser(): + parser = argparse.ArgumentParser( + description="converts words to phones adding optional silences around in between words" + ) + parser.add_argument( + "--sil-prob", + "-s", + type=float, + default=0, + help="probability of inserting silence between each word", + ) + parser.add_argument( + "--surround", + action="store_true", + help="if set, surrounds each example with silence", + ) + parser.add_argument( + "--lexicon", + help="lexicon to convert to phones", + required=True, + ) + + return parser + + +def main(): + parser = get_parser() + args = parser.parse_args() + + sil_prob = args.sil_prob + surround = args.surround + sil = "" + + wrd_to_phn = {} + + with open(args.lexicon, "r") as lf: + for line in lf: + items = line.rstrip().split() + assert len(items) > 1, line + assert items[0] not in wrd_to_phn, items + wrd_to_phn[items[0]] = items[1:] + + for line in sys.stdin: + words = line.strip().split() + + if not all(w in wrd_to_phn for w in words): + continue + + phones = [] + if surround: + phones.append(sil) + + sample_sil_probs = None + if sil_prob > 0 and len(words) > 1: + sample_sil_probs = np.random.random(len(words) - 1) + + for i, w in enumerate(words): + phones.extend(wrd_to_phn[w]) + if ( + sample_sil_probs is not None + and i < len(sample_sil_probs) + and sample_sil_probs[i] < sil_prob + ): + phones.append(sil) + + if surround: + phones.append(sil) + print(" ".join(phones)) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_audio.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_audio.sh new file mode 100644 index 0000000..013f7a9 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_audio.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env zsh +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +source_dir=$1 +tgt_dir=$2 +model=$3 + +if [ -z "$4" ] + then + dim=512 + else + dim=$4 +fi + +echo "using $dim dim for PCA" + +if [ -z "$5" ] + then + layer=14 + else + layer=$5 +fi + +echo "extracting from layer $layer" + +train_split=train +valid_split=valid +test_split=test + +all_splits=($train_split) + +if [[ -f "$source_dir/valid.tsv" ]]; then + all_splits+=('valid') +fi + +if [[ -f "$source_dir/test.tsv" ]]; then + all_splits+=('test') +fi + +echo "processing splits: $all_splits" + +mkdir -p $tgt_dir + +cp $source_dir/*.tsv $tgt_dir +cp $source_dir/*.wrd $tgt_dir +cp $source_dir/*.ltr $tgt_dir +cp $source_dir/*.phn $tgt_dir +cp $source_dir/dict* $tgt_dir + +setopt shwordsplit + +for split in $all_splits; do + python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py $source_dir --split $split \ + --save-dir $tgt_dir --checkpoint $model --layer $layer +done + +python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py $tgt_dir/${train_split}.tsv \ +--checkpoint $model --save-dir $tgt_dir -f "CLUS128" --sample-pct 1.0 + +for split in $all_splits; do + python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py $tgt_dir \ + --checkpoint $model --path $tgt_dir/CLUS128 --split $split +done + +python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/pca.py $tgt_dir/${train_split}.npy --output $tgt_dir/pca --dim $dim + +for split in $all_splits; do + python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/apply_pca.py $tgt_dir --split $split --save-dir $tgt_dir/precompute_pca$dim --pca-path $tgt_dir/pca/${dim}_pca --batch-size 1048000 + + python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/merge_clusters.py $tgt_dir/precompute_pca$dim --cluster-dir $tgt_dir/CLUS128 \ + --split $split --save-dir $tgt_dir/precompute_pca${dim}_cls128_mean --pooling mean + + python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/mean_pool.py $tgt_dir/precompute_pca${dim}_cls128_mean \ + --save-dir $tgt_dir/precompute_pca${dim}_cls128_mean_pooled --split $split +done diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_audio_v2.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_audio_v2.sh new file mode 100644 index 0000000..96a52c5 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_audio_v2.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env zsh +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +source_dir=$1 +tgt_dir=$2 +model=$3 + +if [ -z "$4" ] + then + dim=64 + else + dim=$4 +fi + +echo "using $dim clusters for auxilary target" + +if [ -z "$5" ] + then + layer=14 + else + layer=$5 +fi + +echo "extracting from layer $layer" + +train_split=train +valid_split=valid +test_split=test + +all_splits=($train_split) + +if [[ -f "$source_dir/valid.tsv" ]]; then + all_splits+=('valid') +fi + +if [[ -f "$source_dir/test.tsv" ]]; then + all_splits+=('test') +fi + +echo "processing splits: $all_splits" + +mkdir -p $tgt_dir + +cp $source_dir/*.tsv $tgt_dir +cp $source_dir/*.wrd $tgt_dir +cp $source_dir/*.ltr $tgt_dir +cp $source_dir/*.phn $tgt_dir +cp $source_dir/dict* $tgt_dir + +setopt shwordsplit + +for split in $all_splits; do + python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py $source_dir --split $split \ + --save-dir $tgt_dir --checkpoint $model --layer $layer +done + + +mkdir -p $tgt_dir/mfcc + +# Consider spliting corpus into chuncks for large corpus, see HuBERT preprocessing for more details +python $FAIRSEQ_ROOT/examples/hubert/simple_kmeans/dump_mfcc_feature.py \ + $tgt_dir $train_split 1 0 $tgt_dir/mfcc +python $FAIRSEQ_ROOT/examples/hubert/simple_kmeans/dump_km_label.py \ + $tgt_dir/mfcc $train_split $tgt_dir/mfcc/cls$dim 1 0 $tgt_dir/mfcc/cls${dim}_idx +cp $tgt_dir/mfcc/cls${dim}_idx/${train_split}_0_1.km $tgt_dir/$train_split.km diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_text.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_text.sh new file mode 100644 index 0000000..dbd17a2 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_text.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env zsh +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +lg=$1 +text_path=$2 +target_dir=$3 +min_phones=$4 +phonemizer=$5 +lid_path=$6 +sil_prob=$7 + +if [ -z "$lid_path" ]; then + lid_path="lid.187.bin" +fi + +ph_lg=${lg:l} +if test "$lg" = 'fr'; then + ph_lg='fr-fr' +elif test "$lg" = 'en'; then + ph_lg='en-us' +elif test "$lg" = 'pt'; then + ph_lg='pt-br' +fi + +ESPEAK_PATH='' +if test "$phonemizer" = 'espeak'; then + ESPEAK_PATH=$(which espeak) +elif test "$phonemizer" = 'espeak-ng'; then + ESPEAK_PATH=$(which espeak-ng) +elif test "$phonemizer" = 'G2P'; then + ESPEAK_PATH='' +else + echo "Unknown phonemizer $phonemizer. Valid options are espeak, espean-ng and G2P" + exit 1 +fi + +echo $lg +echo $ph_lg +echo $text_path +echo $target_dir +echo "min phone seen threshold is $min_phones" + +mkdir -p $target_dir +python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py --lang $lg --fasttext-model $lid_path < $text_path | grep -v '\-\-\-' >! $target_dir/lm.upper.lid.txt +python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/lm.upper.lid.txt --only-source --destdir $target_dir --thresholdsrc 2 --padding-factor 1 --dict-only +cut -f1 -d' ' $target_dir/dict.txt | grep -v -x '[[:punct:]]*' | grep -Pv '\d\d\d\d\d+' >! $target_dir/words.txt + + +if [ -z "$ESPEAK_PATH" ]; then + python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py --compact < $target_dir/words.txt > $target_dir/phones.txt +else + # echoing 1 into corpus will prevent the mismatch lines between lexicon and phones in case the phonemizer fails + one=$(echo "1" | PHONEMIZER_ESPEAK_PATH=$ESPEAK_PATH phonemize -p ' ' -w '' -l $ph_lg --language-switch remove-flags) + sed 's/$/ 1/' $target_dir/words.txt | PHONEMIZER_ESPEAK_PATH=$ESPEAK_PATH phonemize -o $target_dir/phones.txt -p ' ' -w '' -l $ph_lg -j 70 --language-switch remove-flags + echo "one is ${one}" + sed -i "s/${one}$//" $target_dir/phones.txt +fi + +paste $target_dir/words.txt $target_dir/phones.txt >! $target_dir/lexicon.lst + +python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/phones.txt --only-source --destdir $target_dir/phones --thresholdsrc $min_phones --padding-factor 1 --dict-only + +python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/filter_lexicon.py -d $target_dir/phones/dict.txt < $target_dir/lexicon.lst >! $target_dir/lexicon_filtered.lst +python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py -s $sil_prob --surround --lexicon $target_dir/lexicon_filtered.lst < $target_dir/lm.upper.lid.txt >! $target_dir/phones/lm.phones.filtered.txt +cp $target_dir/phones/dict.txt $target_dir/phones/dict.phn.txt +echo " 0" >> $target_dir/phones/dict.phn.txt +python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/phones/lm.phones.filtered.txt --workers 70 --only-source --destdir $target_dir/phones --srcdict $target_dir/phones/dict.phn.txt + +$KENLM_ROOT/lmplz -o 4 < $target_dir/lm.upper.lid.txt --discount_fallback --prune 0 0 0 3 >! $target_dir/kenlm.wrd.o40003.arpa +$KENLM_ROOT/build_binary $target_dir/kenlm.wrd.o40003.arpa $target_dir/kenlm.wrd.o40003.bin + +lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_words_sil lm_arpa=$target_dir/kenlm.wrd.o40003.arpa wav2letter_lexicon=$target_dir/lexicon_filtered.lst data_dir=$target_dir/phones in_labels=phn "blank_symbol=''" +lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_words lm_arpa=$target_dir/kenlm.wrd.o40003.arpa wav2letter_lexicon=$target_dir/lexicon_filtered.lst data_dir=$target_dir/phones in_labels=phn + +$KENLM_ROOT/lmplz -o 4 < $target_dir/phones/lm.phones.filtered.txt --discount_fallback >! $target_dir/phones/lm.phones.filtered.04.arpa +$KENLM_ROOT/build_binary $target_dir/phones/lm.phones.filtered.04.arpa $target_dir/phones/lm.phones.filtered.04.bin +$KENLM_ROOT/lmplz -o 6 < $target_dir/phones/lm.phones.filtered.txt --discount_fallback >! $target_dir/phones/lm.phones.filtered.06.arpa +$KENLM_ROOT/build_binary $target_dir/phones/lm.phones.filtered.06.arpa $target_dir/phones/lm.phones.filtered.06.bin + +lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_phn_sil lm_arpa=$target_dir/phones/lm.phones.filtered.06.arpa data_dir=$target_dir/phones in_labels=phn "blank_symbol=''" diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_timit.sh b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_timit.sh new file mode 100644 index 0000000..d8f5d59 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/prepare_timit.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +timit_root=$1 # assume it is the upper-cased version +tgt_dir=$2 +model=$3 + +set -eu + +setups="matched unmatched" +splits="test valid train train_text" + +tgt_dir=$(realpath $tgt_dir) +sph2wav=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe +wav_dir=$tgt_dir/wav + + +mkdir -p $tgt_dir $wav_dir +find $timit_root/{TRAIN,TEST} -iname "*.WAV" > $tgt_dir/all_sph.flist +cat $tgt_dir/all_sph.flist | sed -e 's#//*#/#g' -e 's#.*/\([^/]*\)/\([^/]*\).WAV#\1_\2#g' > $tgt_dir/all.uid +paste -d' ' $tgt_dir/{all_sph.flist,all.uid} | \ + awk -v sph2wav=$sph2wav -v wav_dir=$wav_dir '{print sph2wav " -f wav " $1 " > " wav_dir "/" $2 ".wav"}' \ + > $tgt_dir/sph2wav.sh +bash $tgt_dir/sph2wav.sh +cat $tgt_dir/all.uid | awk -v wav_dir=$(pwd)/$wav_dir '{print $1" "wav_dir"/"$1".wav"}' | sort > $tgt_dir/all_wav.scp +cut -d' ' -f2 $tgt_dir/all_wav.scp | xargs -I{} soxi -s {} > $tgt_dir/all.dur +paste -d' ' $tgt_dir/{all_wav.scp,all.dur} > $tgt_dir/all_wav_dur.scp +rm $tgt_dir/{all.uid,all_sph.flist,sph2wav.sh} + +find $timit_root/{TRAIN,TEST} -iname "*.PHN" > $tgt_dir/all_phn60.flist +while read line; do + if [ ! -f $line ]; then + >&2 echo "Cannot find transcription file '$line'" && exit 1; + fi + cut -f3 -d' ' "$line" | tr '\n' ' ' | perl -ape 's: *$:\n:;' +done < $tgt_dir/all_phn60.flist > $tgt_dir/all.phn60 +cat $tgt_dir/all_phn60.flist | sed -e 's#//*#/#g' -e 's#.*/\([^/]*\)/\([^/]*\).PHN#\1_\2#g' | \ + paste -d' ' - $tgt_dir/all.phn60 | \ + $KALDI_ROOT/egs/timit/s5/local/timit_norm_trans.pl -i - -m $KALDI_ROOT/egs/timit/s5/conf/phones.60-48-39.map -to 39 | \ + sort > $tgt_dir/all.phn +echo "done preparing wav and 39-phone transcripts" + + +for s in $setups; do + mkdir -p $tgt_dir/$s + for x in $splits; do + uid_path=config/timit_${s}/${x}.uid + grep -w -f $uid_path $tgt_dir/all.phn | cut -d' ' -f2- > $tgt_dir/$s/$x.phn + ln -sf $(realpath $tgt_dir/$s/$x.phn) $tgt_dir/$s/$x.wrd + + echo "/" > $tgt_dir/$s/$x.tsv && grep -w -f $uid_path $tgt_dir/all_wav_dur.scp | cut -d' ' -f2- | sed 's# #\t#' >> $tgt_dir/$s/$x.tsv + done + + for x in $splits; do + cat $tgt_dir/$s/$x.phn + done | tr ' ' '\n' | sort -u | awk '{print $1" "1}' > $tgt_dir/$s/dict.phn.txt + ln -sf $(realpath $tgt_dir/$s/dict.phn.txt) $tgt_dir/$s/dict.wrd.txt +done +echo "done preparing unmatched and matched setups for TIMIT" + + +for s in $setups; do + zsh scripts/prepare_audio.sh $tgt_dir/$s $tgt_dir/$s/feat $model + + lm_dir=$tgt_dir/$s/phones + fst_dir=$tgt_dir/$s/fst/phn_to_phn + + python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $tgt_dir/$s/train_text.phn --workers 10 --only-source --destdir $lm_dir --srcdict $tgt_dir/$s/dict.phn.txt + $KENLM_ROOT/lmplz -o 3 < $tgt_dir/$s/train_text.phn --discount_fallback >$lm_dir/train_text_phn.03.arpa + $KENLM_ROOT/build_binary $lm_dir/train_text_phn.03.arpa $lm_dir/train_text_phn.03.bin + $KENLM_ROOT/lmplz -o 4 < $tgt_dir/$s/train_text.phn --discount_fallback >$lm_dir/train_text_phn.04.arpa + $KENLM_ROOT/build_binary $lm_dir/train_text_phn.04.arpa $lm_dir/train_text_phn.04.bin + + python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$fst_dir lm_arpa=$lm_dir/train_text_phn.03.arpa data_dir=$tgt_dir/$s in_labels=phn +done +echo "done preprocessing audio and text for wav2vec-U" diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/remove_silence.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/remove_silence.py new file mode 100644 index 0000000..fac88b9 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/remove_silence.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +get intervals from .vads file, specify output data, and this script removes silences and saves the audio data in out path folder +paths=shards/train.tsv +vads=shards/train.vads +python remove_silence.py --paths $paths --vads $vads +""" + +import os +import argparse +import torch +import torchaudio +import tqdm + + +parser = argparse.ArgumentParser() +parser.add_argument("--tsv", default="", type=str) +parser.add_argument("--vads", default="", type=str) +parser.add_argument("--out", type=str) +params = parser.parse_args() + +# load paths +paths = [] +with open(params.tsv) as f: + root = next(f).rstrip() + for line in f: + paths.append(os.path.join(root, line.rstrip().split("\t")[0])) + +# load vads +list_intervals = [] +with open(params.vads) as f: + for line in f: + interval = [ + [int(w.split(":")[0]), int(w.split(":")[1])] for w in line.rstrip().split() + ] + list_intervals.append(interval) + + +# load audio and keep only intervals (i.e. remove silences) +for i in tqdm.trange(len(paths)): + data, _ = torchaudio.load(paths[i]) + if len(list_intervals[i]) > 0: + data_filtered = torch.cat( + [data[0][int(it[0]) : int(it[1])] for it in list_intervals[i]] + ).unsqueeze(0) + else: + data_filtered = data + + # YOU MAY NEED TO MODIFY THIS TO GET THE RIGHT SUBPATH + # outpath = params.out + '/'.join(paths[i].split('/')[-1]) + outpath = params.out + "/" + "/".join(paths[i].split("/")[-2:]) + + if not os.path.isdir("/".join(outpath.split("/")[:-1])): + os.makedirs("/".join(outpath.split("/")[:-1])) + if not os.path.exists(outpath): + torchaudio.save(outpath, data_filtered, sample_rate=16000) + else: + print(outpath, "exists!") diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/vads.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/vads.py new file mode 100644 index 0000000..2398da9 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/vads.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import sys + +from copy import deepcopy +from scipy.signal import lfilter + +import numpy as np +from tqdm import tqdm +import soundfile as sf +import os.path as osp + + +def get_parser(): + parser = argparse.ArgumentParser(description="compute vad segments") + parser.add_argument( + "--rvad-home", + "-r", + help="path to rvad home (see https://github.com/zhenghuatan/rVADfast)", + required=True, + ) + + return parser + + +def rvad(speechproc, path): + winlen, ovrlen, pre_coef, nfilter, nftt = 0.025, 0.01, 0.97, 20, 512 + ftThres = 0.5 + vadThres = 0.4 + opts = 1 + + data, fs = sf.read(path) + assert fs == 16_000, "sample rate must be 16khz" + ft, flen, fsh10, nfr10 = speechproc.sflux(data, fs, winlen, ovrlen, nftt) + + # --spectral flatness -- + pv01 = np.zeros(ft.shape[0]) + pv01[np.less_equal(ft, ftThres)] = 1 + pitch = deepcopy(ft) + + pvblk = speechproc.pitchblockdetect(pv01, pitch, nfr10, opts) + + # --filtering-- + ENERGYFLOOR = np.exp(-50) + b = np.array([0.9770, -0.9770]) + a = np.array([1.0000, -0.9540]) + fdata = lfilter(b, a, data, axis=0) + + # --pass 1-- + noise_samp, noise_seg, n_noise_samp = speechproc.snre_highenergy( + fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk + ) + + # sets noisy segments to zero + for j in range(n_noise_samp): + fdata[range(int(noise_samp[j, 0]), int(noise_samp[j, 1]) + 1)] = 0 + + vad_seg = speechproc.snre_vad( + fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk, vadThres + ) + return vad_seg, data + + +def main(): + parser = get_parser() + args = parser.parse_args() + + sys.path.append(args.rvad_home) + import speechproc + + stride = 160 + lines = sys.stdin.readlines() + root = lines[0].rstrip() + for fpath in tqdm(lines[1:]): + path = osp.join(root, fpath.split()[0]) + vads, wav = rvad(speechproc, path) + + start = None + vad_segs = [] + for i, v in enumerate(vads): + if start is None and v == 1: + start = i * stride + elif start is not None and v == 0: + vad_segs.append((start, i * stride)) + start = None + if start is not None: + vad_segs.append((start, len(wav))) + + print(" ".join(f"{v[0]}:{v[1]}" for v in vad_segs)) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py new file mode 100644 index 0000000..a5dd7ae --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +import os.path as osp +import numpy as np +import tqdm +import torch +import sys + +import faiss +import torch.nn.functional as F + +from wav2vec_cluster_faiss import parse_faiss_specs, Wav2VecFeatureReader + + +def get_parser(): + parser = argparse.ArgumentParser(description="apply clusters") + # fmt: off + parser.add_argument('data', help='location of tsv files') + parser.add_argument('--split', help='split to process', required=True) + parser.add_argument('--labels', help='split to process', default="phn") + parser.add_argument('--path', help='path to pca and centroids', required=True) + parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True) + parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14) + parser.add_argument('--max-tsz', type=int, help='batch kmeans up to this much', default=14) + # fmt: on + + return parser + + +def get_iterator(args): + label_path = osp.join(args.data, f"{args.split}.{args.labels}") + if osp.exists(label_path): + lp = open(label_path, "r") + else: + lp = None + + with open(osp.join(args.data, f"{args.split}.tsv"), "r") as fp: + lines = fp.read().split("\n") + root = lines.pop(0).strip() + files = [line.rstrip() for line in lines if len(line) > 0] + + if lp is not None: + lbls = [line.rstrip() for line in lp] + else: + lbls = [None] * len(files) + + num = len(files) + reader = Wav2VecFeatureReader(args.checkpoint, args.layer) + + def iterate(): + for fname, lbl in zip(files, lbls): + file = osp.join(root, fname.split("\t")[0]) + feats = reader.get_feats(file) + yield feats.data, fname, lbl + + return iterate, num, root + + +def main(): + parser = get_parser() + args = parser.parse_args() + + spec = osp.basename(args.path) + + try: + faiss_spec = parse_faiss_specs(spec.rstrip("/"))[0] + except: + print(spec) + raise + + print("Faiss Spec:", faiss_spec, file=sys.stderr) + + if faiss_spec.pca: + A = torch.from_numpy(np.load(osp.join(args.path, "pca_A.npy"))).cuda() + b = torch.from_numpy(np.load(osp.join(args.path, "pca_b.npy"))).cuda() + print("Loaded PCA", file=sys.stderr) + + centroids = np.load(osp.join(args.path, "centroids.npy")) + print("Loaded centroids", centroids.shape, file=sys.stderr) + + res = faiss.StandardGpuResources() + index_flat = ( + faiss.IndexFlatL2(centroids.shape[1]) + if not faiss_spec.sphere + else faiss.IndexFlatIP(centroids.shape[1]) + ) + faiss_index = faiss.index_cpu_to_gpu(res, 0, index_flat) + faiss_index.add(centroids) + + generator, num, root = get_iterator(args) + iterator = generator() + + had_labels = False + label_path = osp.join(args.path, f"{args.split}.{args.labels}") + + with torch.no_grad(): + with open(osp.join(args.path, f"{args.split}.src"), "w") as fp, open( + osp.join(args.path, f"{args.split}.tsv"), "w" + ) as pp, open(label_path, "w") as lp: + print(root, file=pp) + for f, fname, lbl in tqdm.tqdm(iterator, total=num): + if faiss_spec.pca: + f = torch.mm(f, A) + b + if faiss_spec.norm: + f = F.normalize(f, p=2, dim=-1) + + f = f.cpu().numpy() + + _, z = faiss_index.search(f, 1) + + print(" ".join(str(x.item()) for x in z), file=fp) + print(fname, file=pp) + + if lbl is not None: + print(lbl, file=lp) + had_labels = True + if not had_labels: + os.remove(label_path) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py new file mode 100644 index 0000000..632a69e --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import gc +import os +import os.path as osp +import random +import numpy as np +import tqdm +import torch + +from collections import namedtuple + +import faiss + +import fairseq +import soundfile as sf + + +def get_parser(): + parser = argparse.ArgumentParser( + description="compute kmeans codebook from kaldi-computed feats" + ) + # fmt: off + parser.add_argument('data', help='location of tsv files') + parser.add_argument('--save-dir', help='where to save the output', required=True) + parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True) + parser.add_argument('--sample-pct', '-r', type=float, help='percentage of timesteps to sample', default=0) + parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14) + parser.add_argument('--faiss-specs', '-f', type=str, + help='faiss index specs; separated by space ' + 'format is: PCAx_NORM_CLUSx_SPHERICAL -> ' + 'PCAx if exists first apply PCA ' + 'NORM if exists, normalize the vector by L2 norm ' + 'CLUSx must exist, cluster to x clusters ' + 'SPEHRICAL if exists, apply spherical kmeans', + default='l2') + # fmt: on + + return parser + + +faiss_spec = namedtuple("faiss_spec", ["pca", "norm", "n_clus", "sphere", "spec_str"]) + + +def parse_faiss_specs(specs_str): + specs = [] + for ss in specs_str.split(): + comps = ss.split("_") + pca = 0 + norm = False + n_clus = 0 + sphere = False + for c in comps: + if c.startswith("PCA"): + pca = int(c[3:]) + elif c == "NORM": + norm = True + elif c.startswith("CLUS"): + n_clus = int(c[4:]) + elif c == "SPHERICAL": + sphere = True + assert n_clus > 0 + specs.append( + faiss_spec(pca=pca, norm=norm, n_clus=n_clus, sphere=sphere, spec_str=ss) + ) + return specs + + +class Wav2VecFeatureReader(object): + def __init__(self, cp_file, layer): + state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(cp_file) + + self.layer = layer + + if "cfg" in state: + w2v_args = state["cfg"] + task = fairseq.tasks.setup_task(w2v_args.task) + model = task.build_model(w2v_args.model) + else: + w2v_args = state["args"] + task = fairseq.tasks.setup_task(w2v_args) + model = task.build_model(w2v_args) + model.load_state_dict(state["model"], strict=True) + model.eval() + model.cuda() + self.model = model + + def read_audio(self, fname): + """Load an audio file and return PCM along with the sample rate""" + wav, sr = sf.read(fname) + assert sr == 16e3 + + return wav + + def get_feats(self, loc): + x = self.read_audio(loc) + with torch.no_grad(): + source = torch.from_numpy(x).view(1, -1).float().cuda() + res = self.model( + source=source, mask=False, features_only=True, layer=self.layer + ) + return res["layer_results"][self.layer][0].squeeze(1) + + +def get_iterator(args): + with open(args.data, "r") as fp: + lines = fp.read().split("\n") + root = lines.pop(0).strip() + files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0] + + if getattr(args, "sample_pct", 0) > 0: + files = random.sample(files, int(args.sample_pct * len(files))) + num = len(files) + reader = Wav2VecFeatureReader(args.checkpoint, args.layer) + + def iterate(): + for fname in files: + feats = reader.get_feats(fname) + yield feats.cpu().numpy() + + return iterate, num + + +def main(): + parser = get_parser() + args = parser.parse_args() + + faiss_specs = parse_faiss_specs(args.faiss_specs) + print("Faiss Specs:", faiss_specs) + + feat_path = osp.join(args.save_dir, "features") + if osp.exists(feat_path + ".npy"): + feats = np.load(feat_path + ".npy") + else: + generator, num = get_iterator(args) + iterator = generator() + + feats = [] + for f in tqdm.tqdm(iterator, total=num): + feats.append(f) + + del iterator + del generator + + feats = np.concatenate(feats) + + print(feats.shape) + + os.makedirs(args.save_dir, exist_ok=True) + # np.save(feat_path, feats) + + gc.collect() + torch.cuda.empty_cache() + + reload = False + for spec in faiss_specs: + print("Processing spec", spec) + + if reload: + print("Reloading...") + del feats + gc.collect() + feats = np.load(feat_path + ".npy") + + save_path = osp.join(args.save_dir, spec.spec_str) + os.makedirs(save_path, exist_ok=True) + d = feats.shape[-1] + x = feats + if spec.pca > 0: + print("Computing PCA") + pca = faiss.PCAMatrix(d, spec.pca) + pca.train(x) + d = spec.pca + b = faiss.vector_to_array(pca.b) + A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in) + np.save(osp.join(save_path, "pca_A"), A.T) + np.save(osp.join(save_path, "pca_b"), b) + print("Applying PCA") + x = pca.apply_py(x) + + if spec.norm: + reload = spec.pca <= 0 + print("Normalizing") + faiss.normalize_L2(x) + + print("Computing kmeans") + kmeans = faiss.Kmeans( + d, + spec.n_clus, + niter=50, + verbose=True, + spherical=spec.sphere, + max_points_per_centroid=feats.shape[0], + gpu=True, + nredo=3, + ) + kmeans.train(x) + np.save(osp.join(save_path, "centroids"), kmeans.centroids) + del kmeans + del x + gc.collect() + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py new file mode 100644 index 0000000..b07e274 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +import os.path as osp +import tqdm +import torch +import torch.nn.functional as F +from shutil import copyfile + +from npy_append_array import NpyAppendArray + +import fairseq +import soundfile as sf + + +def get_parser(): + parser = argparse.ArgumentParser( + description="compute kmeans codebook from kaldi-computed feats" + ) + # fmt: off + parser.add_argument('data', help='location of tsv files') + parser.add_argument('--split', help='which split to read', required=True) + parser.add_argument('--save-dir', help='where to save the output', required=True) + parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec ctc model', required=True) + parser.add_argument('--layer', type=int, default=14, help='which layer to use') + # fmt: on + + return parser + + +class Wav2VecFeatureReader(object): + def __init__(self, cp_file, layer): + model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task( + [cp_file] + ) + model = model[0] + model.eval() + model.cuda() + self.model = model + self.task = task + self.layer = layer + + def read_audio(self, fname): + """Load an audio file and return PCM along with the sample rate""" + wav, sr = sf.read(fname) + assert sr == 16e3 + + return wav + + def get_feats(self, loc): + x = self.read_audio(loc) + with torch.no_grad(): + source = torch.from_numpy(x).float().cuda() + if self.task.cfg.normalize: + assert source.dim() == 1, source.dim() + with torch.no_grad(): + source = F.layer_norm(source, source.shape) + source = source.view(1, -1) + + m_res = self.model(source=source, mask=False, features_only=True, layer=self.layer) + return m_res["x"].squeeze(0).cpu() + + +def get_iterator(args): + with open(osp.join(args.data, args.split) + ".tsv", "r") as fp: + lines = fp.read().split("\n") + root = lines.pop(0).strip() + files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0] + + num = len(files) + reader = Wav2VecFeatureReader(args.checkpoint, args.layer) + + def iterate(): + for fname in files: + w2v_feats = reader.get_feats(fname) + yield w2v_feats + + return iterate, num + + +def main(): + parser = get_parser() + args = parser.parse_args() + + os.makedirs(args.save_dir, exist_ok=True) + + def create_files(dest): + copyfile(osp.join(args.data, args.split) + ".tsv", dest + ".tsv") + if osp.exists(osp.join(args.data, args.split) + ".wrd"): + copyfile(osp.join(args.data, args.split) + ".wrd", dest + ".wrd") + if osp.exists(osp.join(args.data, args.split) + ".phn"): + copyfile(osp.join(args.data, args.split) + ".phn", dest + ".phn") + + if osp.exists(dest + ".npy"): + os.remove(dest + ".npy") + npaa = NpyAppendArray(dest + ".npy") + return npaa + + save_path = osp.join(args.save_dir, args.split) + npaa = create_files(save_path) + + generator, num = get_iterator(args) + iterator = generator() + + with open(save_path + ".lengths", "w") as l_f: + for w2v_feats in tqdm.tqdm(iterator, total=num): + print(len(w2v_feats), file=l_f) + + if len(w2v_feats) > 0: + npaa.append(w2v_feats.numpy()) + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wer.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wer.py new file mode 100644 index 0000000..613ab50 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wer.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Implement unsupervised metric for decoding hyperparameter selection: + $$ alpha * LM_PPL + ViterbitUER(%) * 100 $$ +""" +import argparse +import logging +import sys + +import editdistance + +logging.root.setLevel(logging.INFO) +logging.basicConfig(stream=sys.stdout, level=logging.INFO) +logger = logging.getLogger(__name__) + + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("-s", "--hypo", help="hypo transcription", required=True) + parser.add_argument( + "-r", "--reference", help="reference transcription", required=True + ) + return parser + + +def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p): + d_cnt = 0 + w_cnt = 0 + w_cnt_h = 0 + for uid in hyp_uid_to_tra: + ref = ref_uid_to_tra[uid].split() + if g2p is not None: + hyp = g2p(hyp_uid_to_tra[uid]) + hyp = [p for p in hyp if p != "'" and p != " "] + hyp = [p[:-1] if p[-1].isnumeric() else p for p in hyp] + else: + hyp = hyp_uid_to_tra[uid].split() + d_cnt += editdistance.eval(ref, hyp) + w_cnt += len(ref) + w_cnt_h += len(hyp) + wer = float(d_cnt) / w_cnt + logger.debug( + ( + f"wer = {wer * 100:.2f}%; num. of ref words = {w_cnt}; " + f"num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}" + ) + ) + return wer + + +def main(): + args = get_parser().parse_args() + + errs = 0 + count = 0 + with open(args.hypo, "r") as hf, open(args.reference, "r") as rf: + for h, r in zip(hf, rf): + h = h.rstrip().split() + r = r.rstrip().split() + errs += editdistance.eval(r, h) + count += len(r) + + logger.info(f"UER: {errs / count * 100:.2f}%") + + +if __name__ == "__main__": + main() + + +def load_tra(tra_path): + with open(tra_path, "r") as f: + uid_to_tra = {} + for line in f: + uid, tra = line.split(None, 1) + uid_to_tra[uid] = tra + logger.debug(f"loaded {len(uid_to_tra)} utterances from {tra_path}") + return uid_to_tra diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py new file mode 100644 index 0000000..f834714 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import sys + + +def main(): + for line in sys.stdin: + print(" ".join(list(line.strip().replace(" ", "|"))) + " |") + + +if __name__ == "__main__": + main() diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/tasks/__init__.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/tasks/__init__.py new file mode 100644 index 0000000..6d7dd62 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/tasks/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from .unpaired_audio_text import UnpairedAudioText + + +__all__ = [ + "UnpairedAudioText", +] diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py new file mode 100644 index 0000000..b6b65d5 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py @@ -0,0 +1,452 @@ +# Copyright (c) 2017-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the license found in the LICENSE file in +# the root directory of this source tree. An additional grant of patent rights +# can be found in the PATENTS file in the same directory. + +from dataclasses import dataclass, field +import logging +import math +import os +from typing import Optional +import torch + +from fairseq.logging import metrics +from fairseq.tasks import FairseqTask, register_task +from ..data import ExtractedFeaturesDataset, RandomInputDataset + +from fairseq.data import ( + Dictionary, + data_utils, + StripTokenDataset, +) +from fairseq.dataclass import FairseqDataclass +from fairseq.distributed.utils import get_data_parallel_world_size +from omegaconf import MISSING + +from examples.speech_recognition.kaldi.kaldi_decoder import ( + KaldiDecoder, + KaldiDecoderConfig, +) + + +logger = logging.getLogger(__name__) + + +@dataclass +class DecodingConfig(FairseqDataclass): + kenlm_path: Optional[str] = None + lm_weight: float = 0 + blank_weight: float = 0 + + +@dataclass +class UnpairedAudioTextConfig(FairseqDataclass): + data: str = field( + default=MISSING, metadata={"help": "path to data directory containing audio"} + ) + text_data: str = field( + default=MISSING, metadata={"help": "path to data directory containing text"} + ) + max_length: Optional[int] = None + labels: Optional[str] = field( + default=None, + metadata={"help": "extension of the label file to load, used for fine-tuning"}, + ) + aux_target_postfix: Optional[str] = field( + default=None, + metadata={"help": "auxaliry target filename extension"}, + ) + unfiltered: bool = field( + default=False, metadata={"help": "load data with _unfiltered suffix"} + ) + ctc_eval: bool = field( + default=False, metadata={"help": "eval UER as if computed by CTC"} + ) + sort_by_length: bool = field( + default=True, metadata={"help": "sort examples by length of audio timesteps"} + ) + shuffle: bool = field(default=True, metadata={"help": "shuffle examples"}) + append_eos: bool = field(default=False, metadata={"help": "append eos"}) + uppercase: Optional[bool] = field( + default=False, metadata={"help": "uppercase for LM score computation"} + ) + skipwords: Optional[str] = field( + default="", + metadata={ + "help": "comma-separated words to be removed for LM score computation" + }, + ) + kenlm_path: Optional[str] = None + vocab_usage_power: float = 2 + + word_decoder_config: Optional[KaldiDecoderConfig] = None + word_kenlm_path: Optional[str] = None + + decoding_config: DecodingConfig = DecodingConfig() + + +@register_task("unpaired_audio_text", dataclass=UnpairedAudioTextConfig) +class UnpairedAudioText(FairseqTask): + """ """ + + cfg: UnpairedAudioTextConfig + + def __init__( + self, + cfg: UnpairedAudioTextConfig, + source_dictionary=None, + target_dictionary=None, + ): + super().__init__(cfg) + + self._target_dictionary = target_dictionary + self._source_dictionary = source_dictionary + self.num_symbols = ( + len([s for s in target_dictionary.symbols if not s.startswith("madeup")]) + - target_dictionary.nspecial + ) + self.sil_id = ( + target_dictionary.index("") if "" in target_dictionary else -1 + ) + self.kenlm = None + if cfg.kenlm_path is not None: + import kenlm + + self.kenlm = kenlm.Model(cfg.kenlm_path) + + self.word_kenlm = None + if cfg.word_kenlm_path is not None: + import kenlm + + self.word_kenlm = kenlm.Model(cfg.word_kenlm_path) + + self.uppercase = cfg.uppercase + self.skipwords = set(cfg.skipwords.split(",")) + + def str_postprocess(s): + s = " ".join(w for w in s.split() if w not in self.skipwords) + s = s.upper() if self.uppercase else s + return s + + self.str_postprocess = str_postprocess + self.compute_lm_score = lambda s: self.kenlm.score(self.str_postprocess(s)) + + self.compute_word_score = None + if cfg.word_decoder_config is not None: + self.kaldi_decoder = KaldiDecoder(cfg.word_decoder_config, beam=10) + + def compute_word_score(logits, padding): + res = self.kaldi_decoder.decode(logits, padding) + for r in res: + r = r.result() + assert len(r) == 1 + r = r[0] + yield r["score"], r["words"] + + self.compute_word_score = compute_word_score + + @classmethod + def setup_task(cls, cfg: UnpairedAudioTextConfig, **kwargs): + """Setup the task (e.g., load dictionaries). + + Args: + cfg (AudioPretrainingConfig): configuration of this task + """ + + dict_path = os.path.join(cfg.text_data, "dict.txt") + if os.path.exists(dict_path): + target_dictionary = Dictionary.load(dict_path) + else: + dict_path = os.path.join(cfg.data, f"dict.{cfg.labels}.txt") + target_dictionary = Dictionary.load(dict_path) + + return cls(cfg, target_dictionary=target_dictionary) + + def optimizer_step(self, optimizer, model, update_num): + if hasattr(model, "get_groups_for_update"): + groups = model.get_groups_for_update(update_num) + optimizer.step(groups={groups}) + else: + optimizer.step() + + def valid_step(self, sample, model, criterion): + res = model( + **sample["net_input"], + dense_x_only=True, + ) + + dense_x = res["logits"] + padding_mask = res["padding_mask"] + + word_scores = None + if self.compute_word_score is not None: + word_scores = self.compute_word_score(dense_x.cpu(), padding_mask.cpu()) + + z = dense_x.argmax(-1) + z[padding_mask] = self.target_dictionary.pad() + + vocab_seen = torch.zeros(self.num_symbols, dtype=torch.bool) + + import editdistance + + c_err = 0 + c_len = 0 + pred_c_len = 0 + lm_score_sum = 0 + for i, (x, t, id) in enumerate( + zip( + z, + sample["target"] if "target" in sample else [None] * len(z), + sample["id"], + ) + ): + + if t is not None: + t = t[(t >= self.target_dictionary.nspecial)] + x = x[ + (x >= self.target_dictionary.nspecial) + & (x < (self.num_symbols + self.target_dictionary.nspecial)) + ] + if self.sil_id >= 0: + x = x[x != self.sil_id] + + vocab_seen[x - self.target_dictionary.nspecial] = True + + pred_units_arr = x + if self.cfg.ctc_eval: + pred_units_arr = pred_units_arr.unique_consecutive() + pred_units_arr = pred_units_arr[pred_units_arr != 0] + + if id == 0: + if t is not None: + logger.info(f"REF: {self.target_dictionary.string(t)}") + logger.info(f"HYP: {self.target_dictionary.string(pred_units_arr)}") + + if self.kenlm is not None: + if t is not None: + ref_lm_s = self.compute_lm_score( + self.target_dictionary.string(t) + ) + logger.info( + f"LM [REF]: {ref_lm_s}, {math.pow(10, -ref_lm_s / (len(t) + 1))}" + ) + + hyp_lm_s = self.compute_lm_score( + self.target_dictionary.string(pred_units_arr) + ) + logger.info( + f"LM [HYP]: {hyp_lm_s}, {math.pow(10, -hyp_lm_s / (len(pred_units_arr) + 1))}" + ) + + pred_units_arr = pred_units_arr.tolist() + + pred_c_len += len(pred_units_arr) + + if t is not None: + t = t.tolist() + c_err += editdistance.eval(pred_units_arr, t) + c_len += len(t) + else: + c_len = pred_c_len + + if self.kenlm is not None: + pred_str = self.target_dictionary.string(pred_units_arr) + lm_score = self.compute_lm_score(pred_str) + lm_score_sum += lm_score + + kaldi_score_sum = 0 + word_lm_sum = 0 + num_words = 0 + if word_scores is not None: + for score, words in word_scores: + kaldi_score_sum += score + num_words += len(words) + if self.word_kenlm is not None: + word_lm_sum += self.kenlm.score(" ".join(words)) + + try: + world_size = get_data_parallel_world_size() + except: + world_size = 1 + + logging_output = { + "loss": c_err, + "_num_char_errors": c_err, + "_num_chars": c_len, + "_num_pred_chars": pred_c_len, + "ntokens": c_len, + "nsentences": z.size(0), + "sample_size": c_len, + "_world_size": world_size, + "_lm_score_sum": lm_score_sum, + "_kaldi_score_sum": kaldi_score_sum, + "_word_lm_sum": word_lm_sum, + "_num_words": num_words, + "_vocab_seen": vocab_seen, + } + + return c_err, c_len, logging_output + + def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs): + data_path = self.cfg.data + task_cfg = task_cfg or self.cfg + + has_unpaired_text = os.path.exists( + os.path.join(self.cfg.text_data, f"{split}.idx") + ) + + self.datasets[split] = ExtractedFeaturesDataset( + path=data_path, + split=split, + min_length=3, + max_length=task_cfg.max_length, + labels=None if has_unpaired_text else task_cfg.labels, + label_dict=self.target_dictionary, + shuffle=getattr(task_cfg, "shuffle", True), + sort_by_length=task_cfg.sort_by_length, + aux_target_postfix=task_cfg.aux_target_postfix, + ) + + logger.info(f"split {split} has unpaired text? {has_unpaired_text}") + if has_unpaired_text: + text_dataset = data_utils.load_indexed_dataset( + os.path.join(self.cfg.text_data, split), self.target_dictionary + ) + text_dataset = StripTokenDataset(text_dataset, self.target_dictionary.eos()) + self.datasets[split] = RandomInputDataset( + self.datasets[split], + text_dataset, + ["random_label"], + add_to_input=True, + pad_idx=self.target_dictionary.pad(), + ) + + @property + def source_dictionary(self): + return self._source_dictionary + + @property + def target_dictionary(self): + """Return the :class:`~fairseq.data.Dictionary` for the language + model.""" + return self._target_dictionary + + def max_positions(self): + """Maximum input length supported by the encoder.""" + return None + + def reduce_metrics(self, logging_outputs, criterion): + super().reduce_metrics(logging_outputs, criterion) + + zero = torch.scalar_tensor(0.0) + num_char_errors = sum( + log.get("_num_char_errors", zero) for log in logging_outputs + ) + num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs) + num_word_errors = sum( + log.get("_num_word_errors", zero) for log in logging_outputs + ) + num_words = sum(log.get("_num_words", zero) for log in logging_outputs) + num_pred_chars = sum( + log.get("_num_pred_chars", zero) for log in logging_outputs + ) + + lm_score_sum = sum(log.get("_lm_score_sum", zero) for log in logging_outputs) + vocab_seen = ( + sum(log.get("_vocab_seen", zero) for log in logging_outputs) + .bool() + .sum() + .item() + ) + kaldi_score_sum = sum( + log.get("_kaldi_score_sum", zero) for log in logging_outputs + ) + word_lm_sum = sum(log.get("_word_lm_sum", zero) for log in logging_outputs) + + metrics.log_scalar_sum("_num_char_errors", num_char_errors) + metrics.log_scalar_sum("_num_chars", num_chars) + metrics.log_scalar_sum("_num_word_errors", num_word_errors) + metrics.log_scalar_sum("_num_words", num_words) + + metrics.log_scalar_sum("lm_score_sum", lm_score_sum) + metrics.log_scalar_sum("num_pred_chars", num_pred_chars) + + if self.cfg.word_kenlm_path is not None: + metrics.log_scalar_sum("kaldi_score_sum", kaldi_score_sum) + metrics.log_scalar_sum("word_lm_sum", word_lm_sum) + + if num_chars > 0: + metrics.log_derived( + "uer", + lambda meters: meters["_num_char_errors"].sum + * 100.0 + / meters["_num_chars"].sum + if meters["_num_chars"].sum > 0 + else float("nan"), + ) + + if lm_score_sum < 0 and vocab_seen > 0: + metrics.log_scalar("vocab_seen_pct", vocab_seen / self.num_symbols) + + metrics.log_derived( + "weighted_lm_ppl", + lambda meters: math.pow( + 10, + -meters["lm_score_sum"].sum + / ( + meters["num_pred_chars"].sum + meters["nsentences"].sum + ), # account for + ) + / meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power, + ) + + metrics.log_derived( + "lm_ppl", + lambda meters: math.pow( + 10, + -meters["lm_score_sum"].sum + / ( + meters["num_pred_chars"].sum + meters["nsentences"].sum + ), # account for + ), + ) + else: + metrics.log_derived("weighted_lm_ppl", lambda meters: float("inf")) + + if num_words > 0: + if word_lm_sum != 0: + metrics.log_derived( + "word_lm_ppl", + lambda meters: math.pow( + 10, + -meters["word_lm_sum"].sum + / ( + meters["_num_words"].sum + meters["nsentences"].sum + ), # account for + ), + ) + metrics.log_derived( + "weighted_word_lm_ppl", + lambda meters: math.pow( + 10, + -meters["word_lm_sum"].sum + / ( + meters["_num_words"].sum + meters["nsentences"].sum + ), # account for + ) + / meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power, + ) + + if self.cfg.word_kenlm_path is not None: + metrics.log_derived( + "kaldi_score", + lambda meters: meters["kaldi_score_sum"].sum + / meters["nsentences"].sum, + ) + + def build_model(self, cfg: FairseqDataclass, from_checkpoint=False): + model = super().build_model(cfg) + + return model diff --git a/triton_model/1/aqc/examples/wav2vec/unsupervised/w2vu_generate.py b/triton_model/1/aqc/examples/wav2vec/unsupervised/w2vu_generate.py new file mode 100644 index 0000000..0611297 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/unsupervised/w2vu_generate.py @@ -0,0 +1,714 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Run inference for pre-processed data with a trained model. +""" + +import ast +from collections import namedtuple +from dataclasses import dataclass, field +from enum import Enum, auto +import hydra +from hydra.core.config_store import ConfigStore +import logging +import math +import os +from omegaconf import OmegaConf +from typing import Optional +import sys + +import editdistance +import torch + +from hydra.core.hydra_config import HydraConfig + +from fairseq import checkpoint_utils, progress_bar, tasks, utils +from fairseq.data.data_utils import post_process +from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig +from fairseq.logging.meters import StopwatchMeter +from omegaconf import open_dict + +from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig + +logging.root.setLevel(logging.INFO) +logging.basicConfig(stream=sys.stdout, level=logging.INFO) +logger = logging.getLogger(__name__) + + +class DecoderType(Enum): + VITERBI = auto() + KENLM = auto() + FAIRSEQ = auto() + KALDI = auto() + + +@dataclass +class UnsupGenerateConfig(FairseqDataclass): + fairseq: FairseqConfig = FairseqConfig() + lm_weight: float = field( + default=2.0, + metadata={"help": "language model weight"}, + ) + w2l_decoder: DecoderType = field( + default=DecoderType.VITERBI, + metadata={"help": "type of decoder to use"}, + ) + kaldi_decoder_config: Optional[KaldiDecoderConfig] = None + lexicon: Optional[str] = field( + default=None, + metadata={ + "help": "path to lexicon. This is also used to 'phonemize' for unsupvised param tuning" + }, + ) + lm_model: Optional[str] = field( + default=None, + metadata={"help": "path to language model (kenlm or fairseq)"}, + ) + decode_stride: Optional[float] = field( + default=None, + metadata={"help": "changing the decoding frequency of the generator"}, + ) + unit_lm: bool = field( + default=False, + metadata={"help": "whether to use unit lm"}, + ) + beam_threshold: float = field( + default=50.0, + metadata={"help": "beam score threshold"}, + ) + beam_size_token: float = field( + default=100.0, + metadata={"help": "max tokens per beam"}, + ) + beam: int = field( + default=5, + metadata={"help": "decoder beam size"}, + ) + nbest: int = field( + default=1, + metadata={"help": "number of results to return"}, + ) + word_score: float = field( + default=1.0, + metadata={"help": "word score to add at end of word"}, + ) + unk_weight: float = field( + default=-math.inf, + metadata={"help": "unknown token weight"}, + ) + sil_weight: float = field( + default=0.0, + metadata={"help": "silence token weight"}, + ) + targets: Optional[str] = field( + default=None, + metadata={"help": "extension of ground truth labels to compute UER"}, + ) + results_path: Optional[str] = field( + default=None, + metadata={"help": "where to store results"}, + ) + post_process: Optional[str] = field( + default=None, + metadata={"help": "how to post process results"}, + ) + vocab_usage_power: float = field( + default=2, + metadata={"help": "for unsupervised param tuning"}, + ) + + viterbi_transcript: Optional[str] = field( + default=None, + metadata={"help": "for unsupervised param tuning"}, + ) + min_lm_ppl: float = field( + default=0, + metadata={"help": "for unsupervised param tuning"}, + ) + min_vt_uer: float = field( + default=0, + metadata={"help": "for unsupervised param tuning"}, + ) + + blank_weight: float = field( + default=0, + metadata={"help": "value to add or set for blank emission"}, + ) + blank_mode: str = field( + default="set", + metadata={ + "help": "can be add or set, how to modify blank emission with blank weight" + }, + ) + sil_is_blank: bool = field( + default=False, + metadata={"help": "if true, token is same as blank token"}, + ) + + unsupervised_tuning: bool = field( + default=False, + metadata={ + "help": "if true, returns a score based on unsupervised param selection metric instead of UER" + }, + ) + is_ax: bool = field( + default=False, + metadata={ + "help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume" + }, + ) + + +def get_dataset_itr(cfg, task): + return task.get_batch_iterator( + dataset=task.dataset(cfg.fairseq.dataset.gen_subset), + max_tokens=cfg.fairseq.dataset.max_tokens, + max_sentences=cfg.fairseq.dataset.batch_size, + max_positions=(sys.maxsize, sys.maxsize), + ignore_invalid_inputs=cfg.fairseq.dataset.skip_invalid_size_inputs_valid_test, + required_batch_size_multiple=cfg.fairseq.dataset.required_batch_size_multiple, + num_shards=cfg.fairseq.dataset.num_shards, + shard_id=cfg.fairseq.dataset.shard_id, + num_workers=cfg.fairseq.dataset.num_workers, + data_buffer_size=cfg.fairseq.dataset.data_buffer_size, + ).next_epoch_itr(shuffle=False) + + +def process_predictions( + cfg: UnsupGenerateConfig, + hypos, + tgt_dict, + target_tokens, + res_files, +): + retval = [] + word_preds = [] + transcriptions = [] + dec_scores = [] + + for i, hypo in enumerate(hypos[: min(len(hypos), cfg.nbest)]): + if torch.is_tensor(hypo["tokens"]): + tokens = hypo["tokens"].int().cpu() + tokens = tokens[tokens >= tgt_dict.nspecial] + hyp_pieces = tgt_dict.string(tokens) + else: + hyp_pieces = " ".join(hypo["tokens"]) + + if "words" in hypo and len(hypo["words"]) > 0: + hyp_words = " ".join(hypo["words"]) + else: + hyp_words = post_process(hyp_pieces, cfg.post_process) + + to_write = {} + if res_files is not None: + to_write[res_files["hypo.units"]] = hyp_pieces + to_write[res_files["hypo.words"]] = hyp_words + + tgt_words = "" + if target_tokens is not None: + if isinstance(target_tokens, str): + tgt_pieces = tgt_words = target_tokens + else: + tgt_pieces = tgt_dict.string(target_tokens) + tgt_words = post_process(tgt_pieces, cfg.post_process) + + if res_files is not None: + to_write[res_files["ref.units"]] = tgt_pieces + to_write[res_files["ref.words"]] = tgt_words + + if not cfg.fairseq.common_eval.quiet: + logger.info(f"HYPO {i}:" + hyp_words) + if tgt_words: + logger.info("TARGET:" + tgt_words) + + if "am_score" in hypo and "lm_score" in hypo: + logger.info( + f"DECODER AM SCORE: {hypo['am_score']}, DECODER LM SCORE: {hypo['lm_score']}, DECODER SCORE: {hypo['score']}" + ) + elif "score" in hypo: + logger.info(f"DECODER SCORE: {hypo['score']}") + + logger.info("___________________") + + hyp_words_arr = hyp_words.split() + tgt_words_arr = tgt_words.split() + + retval.append( + ( + editdistance.eval(hyp_words_arr, tgt_words_arr), + len(hyp_words_arr), + len(tgt_words_arr), + hyp_pieces, + hyp_words, + ) + ) + word_preds.append(hyp_words_arr) + transcriptions.append(to_write) + dec_scores.append(-hypo.get("score", 0)) # negate cuz kaldi returns NLL + + if len(retval) > 1: + best = None + for r, t in zip(retval, transcriptions): + if best is None or r[0] < best[0][0]: + best = r, t + for dest, tran in best[1].items(): + print(tran, file=dest) + dest.flush() + return best[0] + + assert len(transcriptions) == 1 + for dest, tran in transcriptions[0].items(): + print(tran, file=dest) + + return retval[0] + + +def prepare_result_files(cfg: UnsupGenerateConfig): + def get_res_file(file_prefix): + if cfg.fairseq.dataset.num_shards > 1: + file_prefix = f"{cfg.fairseq.dataset.shard_id}_{file_prefix}" + path = os.path.join( + cfg.results_path, + "{}{}.txt".format( + cfg.fairseq.dataset.gen_subset, + file_prefix, + ), + ) + return open(path, "w", buffering=1) + + if not cfg.results_path: + return None + + return { + "hypo.words": get_res_file(""), + "hypo.units": get_res_file("_units"), + "ref.words": get_res_file("_ref"), + "ref.units": get_res_file("_ref_units"), + "hypo.nbest.words": get_res_file("_nbest_words"), + } + + +def optimize_models(cfg: UnsupGenerateConfig, use_cuda, models): + """Optimize ensemble for generation""" + for model in models: + model.eval() + if cfg.fairseq.common.fp16: + model.half() + if use_cuda: + model.cuda() + + +GenResult = namedtuple( + "GenResult", + [ + "count", + "errs_t", + "gen_timer", + "lengths_hyp_unit_t", + "lengths_hyp_t", + "lengths_t", + "lm_score_t", + "num_feats", + "num_sentences", + "num_symbols", + "vt_err_t", + "vt_length_t", + ], +) + + +def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda): + task = tasks.setup_task(cfg.fairseq.task) + saved_cfg.task.labels = cfg.fairseq.task.labels + task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task) + # Set dictionary + tgt_dict = task.target_dictionary + logger.info( + "| {} {} {} examples".format( + cfg.fairseq.task.data, + cfg.fairseq.dataset.gen_subset, + len(task.dataset(cfg.fairseq.dataset.gen_subset)), + ) + ) + # Load dataset (possibly sharded) + itr = get_dataset_itr(cfg, task) + # Initialize generator + gen_timer = StopwatchMeter() + + def build_generator(cfg: UnsupGenerateConfig): + w2l_decoder = cfg.w2l_decoder + if w2l_decoder == DecoderType.VITERBI: + from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder + + return W2lViterbiDecoder(cfg, task.target_dictionary) + elif w2l_decoder == DecoderType.KENLM: + from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder + + return W2lKenLMDecoder(cfg, task.target_dictionary) + elif w2l_decoder == DecoderType.FAIRSEQ: + from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder + + return W2lFairseqLMDecoder(cfg, task.target_dictionary) + elif w2l_decoder == DecoderType.KALDI: + from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder + + assert cfg.kaldi_decoder_config is not None + + return KaldiDecoder( + cfg.kaldi_decoder_config, + cfg.beam, + ) + else: + raise NotImplementedError( + "only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found " + + str(w2l_decoder) + ) + + generator = build_generator(cfg) + + kenlm = None + fairseq_lm = None + if cfg.lm_model is not None: + import kenlm + + kenlm = kenlm.Model(cfg.lm_model) + + num_sentences = 0 + if cfg.results_path is not None and not os.path.exists(cfg.results_path): + os.makedirs(cfg.results_path) + + res_files = prepare_result_files(cfg) + errs_t = 0 + lengths_hyp_t = 0 + lengths_hyp_unit_t = 0 + lengths_t = 0 + count = 0 + num_feats = 0 + all_hyp_pieces = [] + all_hyp_words = [] + + num_symbols = ( + len([s for s in tgt_dict.symbols if not s.startswith("madeup")]) + - tgt_dict.nspecial + ) + targets = None + if cfg.targets is not None: + tgt_path = os.path.join( + cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets + ) + if os.path.exists(tgt_path): + with open(tgt_path, "r") as f: + targets = f.read().splitlines() + viterbi_transcript = None + if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0: + logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}") + with open(cfg.viterbi_transcript, "r") as vf: + viterbi_transcript = vf.readlines() + viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript] + + gen_timer.start() + + start = 0 + end = len(itr) + + hypo_futures = None + if cfg.w2l_decoder == DecoderType.KALDI: + logger.info("Extracting features") + hypo_futures = [] + samples = [] + with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t: + for i, sample in enumerate(t): + if "net_input" not in sample or i < start or i >= end: + continue + if "padding_mask" not in sample["net_input"]: + sample["net_input"]["padding_mask"] = None + + hypos, num_feats = gen_hypos( + generator, models, num_feats, sample, task, use_cuda + ) + hypo_futures.append(hypos) + samples.append(sample) + itr = list(zip(hypo_futures, samples)) + start = 0 + end = len(itr) + logger.info("Finished extracting features") + + with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t: + for i, sample in enumerate(t): + if i < start or i >= end: + continue + + if hypo_futures is not None: + hypos, sample = sample + hypos = [h.result() for h in hypos] + else: + if "net_input" not in sample: + continue + + hypos, num_feats = gen_hypos( + generator, models, num_feats, sample, task, use_cuda + ) + + for i, sample_id in enumerate(sample["id"].tolist()): + if targets is not None: + target_tokens = targets[sample_id] + elif "target" in sample or "target_label" in sample: + toks = ( + sample["target"][i, :] + if "target_label" not in sample + else sample["target_label"][i, :] + ) + + target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu() + else: + target_tokens = None + + # Process top predictions + ( + errs, + length_hyp, + length, + hyp_pieces, + hyp_words, + ) = process_predictions( + cfg, + hypos[i], + tgt_dict, + target_tokens, + res_files, + ) + errs_t += errs + lengths_hyp_t += length_hyp + lengths_hyp_unit_t += ( + len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words) + ) + lengths_t += length + count += 1 + all_hyp_pieces.append(hyp_pieces) + all_hyp_words.append(hyp_words) + + num_sentences += ( + sample["nsentences"] if "nsentences" in sample else sample["id"].numel() + ) + + lm_score_sum = 0 + if kenlm is not None: + + if cfg.unit_lm: + lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces) + else: + lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words) + elif fairseq_lm is not None: + lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0]) + + vt_err_t = 0 + vt_length_t = 0 + if viterbi_transcript is not None: + unit_hyps = [] + if cfg.targets is not None and cfg.lexicon is not None: + lex = {} + with open(cfg.lexicon, "r") as lf: + for line in lf: + items = line.rstrip().split() + lex[items[0]] = items[1:] + for h in all_hyp_pieces: + hyp_ws = [] + for w in h.split(): + assert w in lex, w + hyp_ws.extend(lex[w]) + unit_hyps.append(hyp_ws) + + else: + unit_hyps.extend([h.split() for h in all_hyp_words]) + + vt_err_t = sum( + editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps) + ) + + vt_length_t = sum(len(h) for h in viterbi_transcript) + + if res_files is not None: + for r in res_files.values(): + r.close() + + gen_timer.stop(lengths_hyp_t) + + return GenResult( + count, + errs_t, + gen_timer, + lengths_hyp_unit_t, + lengths_hyp_t, + lengths_t, + lm_score_sum, + num_feats, + num_sentences, + num_symbols, + vt_err_t, + vt_length_t, + ) + + +def gen_hypos(generator, models, num_feats, sample, task, use_cuda): + sample = utils.move_to_cuda(sample) if use_cuda else sample + + if "features" in sample["net_input"]: + sample["net_input"]["dense_x_only"] = True + num_feats += ( + sample["net_input"]["features"].shape[0] + * sample["net_input"]["features"].shape[1] + ) + hypos = task.inference_step(generator, models, sample, None) + return hypos, num_feats + + +def main(cfg: UnsupGenerateConfig, model=None): + if ( + cfg.fairseq.dataset.max_tokens is None + and cfg.fairseq.dataset.batch_size is None + ): + cfg.fairseq.dataset.max_tokens = 1024000 + + use_cuda = torch.cuda.is_available() and not cfg.fairseq.common.cpu + + task = tasks.setup_task(cfg.fairseq.task) + + overrides = ast.literal_eval(cfg.fairseq.common_eval.model_overrides) + + if cfg.fairseq.task._name == "unpaired_audio_text": + overrides["model"] = { + "blank_weight": cfg.blank_weight, + "blank_mode": cfg.blank_mode, + "blank_is_sil": cfg.sil_is_blank, + "no_softmax": True, + "segmentation": { + "type": "NONE", + }, + } + else: + overrides["model"] = { + "blank_weight": cfg.blank_weight, + "blank_mode": cfg.blank_mode, + } + + if cfg.decode_stride: + overrides["model"]["generator_stride"] = cfg.decode_stride + + if model is None: + # Load ensemble + logger.info("| loading model(s) from {}".format(cfg.fairseq.common_eval.path)) + models, saved_cfg = checkpoint_utils.load_model_ensemble( + cfg.fairseq.common_eval.path.split("\\"), + arg_overrides=overrides, + task=task, + suffix=cfg.fairseq.checkpoint.checkpoint_suffix, + strict=(cfg.fairseq.checkpoint.checkpoint_shard_count == 1), + num_shards=cfg.fairseq.checkpoint.checkpoint_shard_count, + ) + optimize_models(cfg, use_cuda, models) + else: + models = [model] + saved_cfg = cfg.fairseq + + with open_dict(saved_cfg.task): + saved_cfg.task.shuffle = False + saved_cfg.task.sort_by_length = False + + gen_result = generate(cfg, models, saved_cfg, use_cuda) + + wer = None + if gen_result.lengths_t > 0: + wer = gen_result.errs_t * 100.0 / gen_result.lengths_t + logger.info(f"WER: {wer}") + + lm_ppl = float("inf") + + if gen_result.lm_score_t != 0 and gen_result.lengths_hyp_t > 0: + hyp_len = gen_result.lengths_hyp_t + lm_ppl = math.pow( + 10, -gen_result.lm_score_t / (hyp_len + gen_result.num_sentences) + ) + logger.info(f"LM PPL: {lm_ppl}") + + logger.info( + "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}" + " sentences/s, {:.2f} tokens/s)".format( + gen_result.num_sentences, + gen_result.gen_timer.n, + gen_result.gen_timer.sum, + gen_result.num_sentences / gen_result.gen_timer.sum, + 1.0 / gen_result.gen_timer.avg, + ) + ) + + vt_diff = None + if gen_result.vt_length_t > 0: + vt_diff = gen_result.vt_err_t / gen_result.vt_length_t + vt_diff = max(cfg.min_vt_uer, vt_diff) + + lm_ppl = max(cfg.min_lm_ppl, lm_ppl) + + if not cfg.unsupervised_tuning: + weighted_score = wer + else: + weighted_score = math.log(lm_ppl) * (vt_diff or 1.0) + + res = ( + f"| Generate {cfg.fairseq.dataset.gen_subset} with beam={cfg.beam}, " + f"lm_weight={cfg.kaldi_decoder_config.acoustic_scale if cfg.kaldi_decoder_config else cfg.lm_weight}, " + f"word_score={cfg.word_score}, sil_weight={cfg.sil_weight}, blank_weight={cfg.blank_weight}, " + f"WER: {wer}, LM_PPL: {lm_ppl}, num feats: {gen_result.num_feats}, " + f"length: {gen_result.lengths_hyp_t}, UER to viterbi: {(vt_diff or 0) * 100}, score: {weighted_score}" + ) + + logger.info(res) + # print(res) + + return task, weighted_score + + +@hydra.main( + config_path=os.path.join("../../..", "fairseq", "config"), config_name="config" +) +def hydra_main(cfg): + with open_dict(cfg): + # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) + cfg.job_logging_cfg = OmegaConf.to_container( + HydraConfig.get().job_logging, resolve=True + ) + + cfg = OmegaConf.create( + OmegaConf.to_container(cfg, resolve=False, enum_to_str=False) + ) + OmegaConf.set_struct(cfg, True) + logger.info(cfg) + + utils.import_user_module(cfg.fairseq.common) + + _, score = main(cfg) + + if cfg.is_ax: + return score, None + return score + + +def cli_main(): + try: + from hydra._internal.utils import get_args + + cfg_name = get_args().config_name or "config" + except: + logger.warning("Failed to get config name from hydra args") + cfg_name = "config" + + cs = ConfigStore.instance() + cs.store(name=cfg_name, node=UnsupGenerateConfig) + hydra_main() + + +if __name__ == "__main__": + cli_main() diff --git a/triton_model/1/aqc/examples/wav2vec/vq-wav2vec_featurize.py b/triton_model/1/aqc/examples/wav2vec/vq-wav2vec_featurize.py new file mode 100644 index 0000000..627072e --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/vq-wav2vec_featurize.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset +""" + +import argparse +import glob +import os +import os.path as osp +import pprint + +import soundfile as sf +import torch +import fairseq +from torch import nn +from torch.utils.data import DataLoader + + +try: + import tqdm +except: + print("Install tqdm to use --log-format=tqdm") + + +class FilesDataset: + def __init__(self, files, labels): + self.files = files + if labels and osp.exists(labels): + with open(labels, "r") as lbl_f: + self.labels = [line.rstrip() for line in lbl_f] + else: + self.labels = labels + + def __len__(self): + return len(self.files) + + def __getitem__(self, index): + fname = self.files[index] + + wav, sr = sf.read(fname) + assert sr == 16000 + + wav = torch.from_numpy(wav).float() + lbls = None + if self.labels: + if isinstance(self.labels, str): + lbl_file = osp.splitext(fname)[0] + "." + self.labels + with open(lbl_file, "r") as lblf: + lbls = lblf.readline() + assert lbls is not None + else: + lbls = self.labels[index] + return wav, lbls + + def collate(self, batch): + return batch + + +class ArgTypes: + @staticmethod + def existing_path(arg): + arg = str(arg) + assert osp.exists(arg), f"File {arg} does not exist" + return arg + + @staticmethod + def mkdir(arg): + arg = str(arg) + os.makedirs(arg, exist_ok=True) + return arg + + +class DatasetWriter: + def __init__(self): + + self.args = self.load_config() + pprint.pprint(self.args.__dict__) + + self.model = self.load_model() + + def __getattr__(self, attr): + return getattr(self.args, attr) + + def read_manifest(self, fname): + + with open(fname, "r") as fp: + lines = fp.read().split("\n") + root = lines.pop(0).strip() + fnames = [ + osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0 + ] + + return fnames + + def process_splits(self): + + if self.args.shard is not None or self.args.num_shards is not None: + assert self.args.shard is not None and self.args.num_shards is not None + + for split in self.splits: + print(split) + + if self.extension == "tsv": + datadir = osp.join(self.data_dir, f"{split}.{self.extension}") + print("Reading manifest file: ", datadir) + files = self.read_manifest(datadir) + else: + datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}") + files = glob.glob(datadir, recursive=True) + + assert len(files) > 0 + + if self.args.shard is not None: + files = files[self.args.shard :: self.args.num_shards] + + lbls = [] + with open(self.data_file(split), "w") as srcf: + for line, lbl in self.iterate(files): + print(line, file=srcf) + if self.args.labels: + lbls.append(lbl + "\n") + + if self.args.labels: + assert all(a is not None for a in lbls) + with open(self.lbl_file(split), "w") as lblf: + lblf.writelines(lbls) + + def iterate(self, files): + + data = self.load_data(files) + for samples in tqdm.tqdm(data, total=len(files) // 32): + + for wav, lbl in samples: + x = wav.unsqueeze(0).float().cuda() + + div = 1 + while x.size(-1) // div > self.args.max_size: + div += 1 + + xs = x.chunk(div, dim=-1) + + result = [] + for x in xs: + torch.cuda.empty_cache() + x = self.model.feature_extractor(x) + if self.quantize_location == "encoder": + with torch.no_grad(): + _, idx = self.model.vector_quantizer.forward_idx(x) + idx = idx.squeeze(0).cpu() + else: + with torch.no_grad(): + z = self.model.feature_aggregator(x) + _, idx = self.model.vector_quantizer.forward_idx(z) + idx = idx.squeeze(0).cpu() + result.append(idx) + + idx = torch.cat(result, dim=0) + yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl + + def lbl_file(self, name): + shard_part = "" if self.args.shard is None else f".{self.args.shard}" + return osp.join(self.output_dir, f"{name}.lbl{shard_part}") + + def data_file(self, name): + shard_part = "" if self.args.shard is None else f".{self.args.shard}" + return osp.join(self.output_dir, f"{name}.src{shard_part}") + + def var_file(self): + return osp.join(self.output_dir, f"vars.pt") + + def load_config(self): + + parser = argparse.ArgumentParser("Vector Quantized wav2vec features") + + # Model Arguments + parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True) + parser.add_argument("--data-parallel", action="store_true") + + # Output Arguments + parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True) + + # Data Arguments + parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True) + parser.add_argument("--splits", type=str, nargs="+", required=True) + parser.add_argument("--extension", type=str, required=True) + parser.add_argument("--labels", type=str, required=False) + + parser.add_argument("--shard", type=int, default=None) + parser.add_argument("--num-shards", type=int, default=None) + parser.add_argument("--max-size", type=int, default=1300000) + + # Logger Arguments + parser.add_argument( + "--log-format", type=str, choices=["none", "simple", "tqdm"] + ) + + return parser.parse_args() + + def load_data(self, fnames): + + dataset = FilesDataset(fnames, self.args.labels) + loader = DataLoader( + dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8 + ) + return loader + + def load_model(self): + model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint]) + model = model[0] + + self.quantize_location = getattr(cfg.model, "vq", "encoder") + + model.eval().float() + model.cuda() + + if self.data_parallel: + model = nn.DataParallel(model) + + return model + + def __call__(self): + + self.process_splits() + + if hasattr(self.model.feature_extractor, "vars") and ( + self.args.shard is None or self.args.shard == 0 + ): + vars = ( + self.model.feature_extractor.vars.view( + self.model.feature_extractor.banks, + self.model.feature_extractor.num_vars, + -1, + ) + .cpu() + .detach() + ) + print("writing learned latent variable embeddings: ", vars.shape) + torch.save(vars, self.var_file()) + + +if __name__ == "__main__": + write_data = DatasetWriter() + + write_data() + print("Done.") diff --git a/triton_model/1/aqc/examples/wav2vec/wav2vec_featurize.py b/triton_model/1/aqc/examples/wav2vec/wav2vec_featurize.py new file mode 100644 index 0000000..588268b --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/wav2vec_featurize.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset +""" + +import argparse +import glob +import os +from shutil import copy + +import h5py +import numpy as np +import soundfile as sf +import torch +import tqdm +import fairseq +from torch import nn + + +def read_audio(fname): + """ Load an audio file and return PCM along with the sample rate """ + + wav, sr = sf.read(fname) + assert sr == 16e3 + + return wav, 16e3 + + +class PretrainedWav2VecModel(nn.Module): + def __init__(self, fname): + super().__init__() + + model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fname]) + model = model[0] + model.eval() + + self.model = model + + def forward(self, x): + with torch.no_grad(): + z = self.model.feature_extractor(x) + if isinstance(z, tuple): + z = z[0] + c = self.model.feature_aggregator(z) + return z, c + + +class EmbeddingWriterConfig(argparse.ArgumentParser): + def __init__(self): + super().__init__("Pre-compute embeddings for flashlight datasets") + + kwargs = {"action": "store", "type": str, "required": True} + + self.add_argument("--input", "-i", help="Input Directory", **kwargs) + self.add_argument("--output", "-o", help="Output Directory", **kwargs) + self.add_argument("--model", help="Path to model checkpoint", **kwargs) + self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs) + self.add_argument( + "--ext", default="wav", required=False, help="Audio file extension" + ) + + self.add_argument( + "--no-copy-labels", + action="store_true", + help="Do not copy label files. Useful for large datasets, use --targetdir in flashlight then.", + ) + self.add_argument( + "--use-feat", + action="store_true", + help="Use the feature vector ('z') instead of context vector ('c') for features", + ) + self.add_argument("--gpu", help="GPU to use", default=0, type=int) + + +class Prediction: + """ Lightweight wrapper around a fairspeech embedding model """ + + def __init__(self, fname, gpu=0): + self.gpu = gpu + self.model = PretrainedWav2VecModel(fname).cuda(gpu) + + def __call__(self, x): + x = torch.from_numpy(x).float().cuda(self.gpu) + with torch.no_grad(): + z, c = self.model(x.unsqueeze(0)) + + return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy() + + +class H5Writer: + """ Write features as hdf5 file in flashlight compatible format """ + + def __init__(self, fname): + self.fname = fname + os.makedirs(os.path.dirname(self.fname), exist_ok=True) + + def write(self, data): + channel, T = data.shape + + with h5py.File(self.fname, "w") as out_ds: + data = data.T.flatten() + out_ds["features"] = data + out_ds["info"] = np.array([16e3 // 160, T, channel]) + + +class EmbeddingDatasetWriter(object): + """Given a model and a flashlight dataset, pre-compute and store embeddings + + Args: + input_root, str : + Path to the flashlight dataset + output_root, str : + Desired output directory. Will be created if non-existent + split, str : + Dataset split + """ + + def __init__( + self, + input_root, + output_root, + split, + model_fname, + extension="wav", + gpu=0, + verbose=False, + use_feat=False, + ): + + assert os.path.exists(model_fname) + + self.model_fname = model_fname + self.model = Prediction(self.model_fname, gpu) + + self.input_root = input_root + self.output_root = output_root + self.split = split + self.verbose = verbose + self.extension = extension + self.use_feat = use_feat + + assert os.path.exists(self.input_path), "Input path '{}' does not exist".format( + self.input_path + ) + + def _progress(self, iterable, **kwargs): + if self.verbose: + return tqdm.tqdm(iterable, **kwargs) + return iterable + + def require_output_path(self, fname=None): + path = self.get_output_path(fname) + os.makedirs(path, exist_ok=True) + + @property + def input_path(self): + return self.get_input_path() + + @property + def output_path(self): + return self.get_output_path() + + def get_input_path(self, fname=None): + if fname is None: + return os.path.join(self.input_root, self.split) + return os.path.join(self.get_input_path(), fname) + + def get_output_path(self, fname=None): + if fname is None: + return os.path.join(self.output_root, self.split) + return os.path.join(self.get_output_path(), fname) + + def copy_labels(self): + self.require_output_path() + + labels = list( + filter( + lambda x: self.extension not in x, glob.glob(self.get_input_path("*")) + ) + ) + for fname in tqdm.tqdm(labels): + copy(fname, self.output_path) + + @property + def input_fnames(self): + return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension)))) + + def __len__(self): + return len(self.input_fnames) + + def write_features(self): + + paths = self.input_fnames + + fnames_context = map( + lambda x: os.path.join( + self.output_path, x.replace("." + self.extension, ".h5context") + ), + map(os.path.basename, paths), + ) + + for name, target_fname in self._progress( + zip(paths, fnames_context), total=len(self) + ): + wav, sr = read_audio(name) + z, c = self.model(wav) + feat = z if self.use_feat else c + writer = H5Writer(target_fname) + writer.write(feat) + + def __repr__(self): + + return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format( + n_files=len(self), **self.__dict__ + ) + + +if __name__ == "__main__": + + args = EmbeddingWriterConfig().parse_args() + + for split in args.split: + + writer = EmbeddingDatasetWriter( + input_root=args.input, + output_root=args.output, + split=split, + model_fname=args.model, + gpu=args.gpu, + extension=args.ext, + use_feat=args.use_feat, + ) + + print(writer) + writer.require_output_path() + + print("Writing Features...") + writer.write_features() + print("Done.") + + if not args.no_copy_labels: + print("Copying label data...") + writer.copy_labels() + print("Done.") diff --git a/triton_model/1/aqc/examples/wav2vec/wav2vec_manifest.py b/triton_model/1/aqc/examples/wav2vec/wav2vec_manifest.py new file mode 100644 index 0000000..9b8aa18 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/wav2vec_manifest.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +""" +Data pre-processing: build vocabularies and binarize training data. +""" + +import argparse +import glob +import os +import random + +import soundfile + + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument( + "root", metavar="DIR", help="root directory containing flac files to index" + ) + parser.add_argument( + "--valid-percent", + default=0.01, + type=float, + metavar="D", + help="percentage of data to use as validation set (between 0 and 1)", + ) + parser.add_argument( + "--dest", default=".", type=str, metavar="DIR", help="output directory" + ) + parser.add_argument( + "--ext", default="flac", type=str, metavar="EXT", help="extension to look for" + ) + parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed") + parser.add_argument( + "--path-must-contain", + default=None, + type=str, + metavar="FRAG", + help="if set, path must contain this substring for a file to be included in the manifest", + ) + return parser + + +def main(args): + assert args.valid_percent >= 0 and args.valid_percent <= 1.0 + + if not os.path.exists(args.dest): + os.makedirs(args.dest) + + dir_path = os.path.realpath(args.root) + search_path = os.path.join(dir_path, "**/*." + args.ext) + rand = random.Random(args.seed) + + valid_f = ( + open(os.path.join(args.dest, "valid.tsv"), "w") + if args.valid_percent > 0 + else None + ) + + with open(os.path.join(args.dest, "train.tsv"), "w") as train_f: + print(dir_path, file=train_f) + + if valid_f is not None: + print(dir_path, file=valid_f) + + for fname in glob.iglob(search_path, recursive=True): + file_path = os.path.realpath(fname) + + if args.path_must_contain and args.path_must_contain not in file_path: + continue + + frames = soundfile.info(fname).frames + dest = train_f if rand.random() > args.valid_percent else valid_f + print( + "{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest + ) + if valid_f is not None: + valid_f.close() + + +if __name__ == "__main__": + parser = get_parser() + args = parser.parse_args() + main(args) diff --git a/triton_model/1/aqc/examples/wav2vec/xlsr/README.md b/triton_model/1/aqc/examples/wav2vec/xlsr/README.md new file mode 100644 index 0000000..84e9de3 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/xlsr/README.md @@ -0,0 +1,68 @@ +# XLS-R + +XLS-R is a set of large-scale models for self-supervised cross-lingual speech representation learning based on wav2vec 2.0. It was pretrained on 128 languages and approximately 436K hours of unlabeled speech data. With finetuning, these models achieve state of the art performance in speech translation, speech recognition and language identification. We evaluate the model across multiple benchmarks such as CoVoST-2 for speech translation, BABEL / MLS / CommonVoice / VoxPopuli for automatic speech recognition, and VoxLingua107 for language identification as we llas VoxCeleb1 for speaker identification. More details about this work can be found in our [paper](https://arxiv.org/pdf/2111.09296.pdf) and download links can be found below. + +Model | Link +|------|------ +XLS-R 300M | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_300m.pt) +XLS-R 1B | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_960m_1000k.pt) +XLS-R 2B | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_2B_1000k.pt) + +You can also download these models [here](https://huggingface.co/models?other=xls_r) and read more about it in the [blogpost](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) from Hugging Face. + +## Speech Translation Finetuned Models + +We multilingually finetune XLS-R models on [CoVoST 2](https://github.com/facebookresearch/covost), which has 21 +into-English and 15 out-of-English directions. + +Model | Directions | Link +|------|------|------ +XLS-R 300M | 21 langs → En | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xls_r_300m_21_en.pt) +XLS-R 300M | En → 15 langs | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xls_r_300m_en_15.pt) +XLS-R 1B | 21 langs → En | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xls_r_1b_21_en.pt) +XLS-R 1B | En → 15 langs | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xls_r_1b_en_15.pt) +XLS-R 2B | 21 langs → En | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xls_r_2b_21_en.pt) +XLS-R 2B | En → 15 langs | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xls_r_2b_en_15.pt) +XLS-R 2B | 21 langs → En + En → 15 langs | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xls_r_2b_22_16.pt) + +## ASR Finetuning + +You can refer the original wav2vec documentation on detailed instructions about how to finetune a pretrained model with CTC [here](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec#fine-tune-a-pre-trained-model-with-ctc). Below is an example command and you can find the values for different hyperparameters to reproduce the results in our paper. + +```shell script +$ fairseq-hydra-train \ + distributed_training.distributed_port=$PORT \ + task.data=/path/to/data \ + model.w2v_path=/path/to/model.pt \ + --config-dir /path/to/fairseq-py/examples/wav2vec/xlsr/config \ + --config-name finetune +``` + +For finetuning the 300M as well as 1B model, we use the same hyperparameter setting defined in `finetune.yaml`. We vary `optimization.max_update` as described in the below table and the `optimization.lr` is picked from the interval [2e-5, 3e-4] based on dev word error rate. + +Benchmark | Total Number of Updates +|------|------ +Babel | 26000 +Common Voice | 13000 +VoxPopuli | 50000 +MLS 10h | 20000 + +For finetuning the 2B model, we make some additional changes for `finetune.yaml` . We use the fully_sharded `distributed_training.ddp_backend` provided by the [fairscale](https://github.com/facebookresearch/fairscale) library and and set `model.activation_checkpoint` to true. We also increase `dataset.max_tokens` to 2560000 and use a total effective batch size of 2560000*24. We sweep for the best `optimization.lr` within the interval [3e−6,3e−5] using dev error rate. For common voice dataset, we pick the `model.mask_prob` for different languages among {0.30, 0.40} based on best dev error rate. + + + +## Citation + +Please cite as: + +``` bibtex +@article{babu2021xlsr, + title={XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale}, + author={Arun Babu and Changhan Wang and Andros Tjandra and Kushal Lakhotia and Qiantong Xu and Naman Goyal and Kritika Singh and Patrick von Platen and Yatharth Saraf and Juan Pino and Alexei Baevski and Alexis Conneau and Michael Auli}, + year={2021}, + volume={abs/2111.09296}, + journal={arXiv}, +} +``` + + diff --git a/triton_model/1/aqc/examples/wav2vec/xlsr/config/finetune.yaml b/triton_model/1/aqc/examples/wav2vec/xlsr/config/finetune.yaml new file mode 100644 index 0000000..8736e10 --- /dev/null +++ b/triton_model/1/aqc/examples/wav2vec/xlsr/config/finetune.yaml @@ -0,0 +1,66 @@ +# @package _group_ + +common: + fp16: true + log_format: json + log_interval: 200 + tensorboard_logdir: tb + +checkpoint: + save_interval: 1000 + save_interval_updates: 1000 + keep_interval_updates: 1 + no_epoch_checkpoints: true + best_checkpoint_metric: wer + +task: + _name: audio_finetuning + data: ??? + normalize: true + labels: ltr + +dataset: + num_workers: 6 + max_tokens: 1280000 + skip_invalid_size_inputs_valid_test: true + validate_after_updates: 10000 + validate_interval_updates: 1000 + valid_subset: valid + +distributed_training: + ddp_backend: legacy_ddp + distributed_world_size: 4 + +criterion: + _name: ctc + zero_infinity: true + +optimization: + max_update: ??? + lr: [0.0003] + sentence_avg: true + update_freq: [5] + +optimizer: + _name: adam + adam_betas: (0.9,0.98) + adam_eps: 1e-08 + +lr_scheduler: + _name: tri_stage + phase_ratio: [0.1, 0.4, 0.5] + final_lr_scale: 0.05 + +model: + _name: wav2vec_ctc + w2v_path: ??? + apply_mask: true + mask_prob: 0.75 + mask_channel_prob: 0.25 + mask_channel_length: 64 + layerdrop: 0.1 + activation_dropout: 0.1 + feature_grad_mult: 0.0 + freeze_finetune_updates: 10000 + + checkpoint_activations: false diff --git a/triton_model/1/model.py b/triton_model/1/model.py new file mode 100644 index 0000000..f444f43 --- /dev/null +++ b/triton_model/1/model.py @@ -0,0 +1,224 @@ +import gc +import os +import re +import sys +import json +import time +import yaml +import numpy +import torch +import codecs +import fairseq +import soundfile +import torchaudio + +sys.path.append("aqc") +from random import choice +from threading import Timer +import torch.nn.functional as F +from types import SimpleNamespace + +os.chdir(os.path.dirname(__file__)) +import torchaudio.sox_effects as ta_sox +from faster_whisper import WhisperModel +import triton_python_backend_utils as pb_utils +from examples import speech_recognition, data2vec +from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder + +SAMPLING_RATE = 16000 +LANGMAP = { + "bn": "Bengali", + "bh": "Bhojpuri", + "gu": "Gujarati", + "hi": "Hindi", + "kn": "Kannada", + "ml": "Malayalam", + "mr": "Marathi", + "or": "Odia", + "pa": "Punjabi", + "ta": "Tamil", + "te": "Telugu", + "ur": "Urdu", + 'en': 'English' +} + + +class ASRModel(object): + def __init__( + self, + lang, + device, + ): + self.lang = lang + self.timeout = 60 * 30 + self.language = LANGMAP[lang] + self.sampling_rate = SAMPLING_RATE + self.device = device + self.effects = [["gain", "-n"]] + self.decoder_args = SimpleNamespace( + post_process="letter", + sil_weight=0, + max_tokens=4000000, + beam=100, + nbest=1, + criterion="ctc", + ) + self.space_remover = re.compile(' +') + self.replacer = re.compile("\|+") + self.load() + self.last_accessed = time.time() + + def load(self): + self.model, self.cfg, self.task, self.token, self.decoder = ( + None, + None, + None, + None, + None, + ) + print(f"loading {self.language} asr....", end="") + if self.language == 'English': + self.model = WhisperModel('medium', device=self.device.split(':')[0], compute_type="float32", device_index=int(self.device.split(':')[1]), download_root='./whisper_models') + else: + model, self.cfg, self.task = ( + fairseq.checkpoint_utils.load_model_ensemble_and_task( + [f"models/SPRING_INX_data2vec_aqc_{self.language}.pt"], + arg_overrides={"data": f"models/SPRING_INX_{self.language}_dict.txt"}, + ) + ) + self.model = model[0].to(self.device).eval() + self.token = [ + x.strip().split(maxsplit=1)[0] + for x in open( + f"models/SPRING_INX_{self.language}_dict.txt", "rt" + ).readlines() + ] + self.decoder = W2lViterbiDecoder(self.decoder_args, self.task.target_dictionary) + self.loaded = True + self.timer = Timer(self.timeout, self.unload) + self.timer.start() + print(" ....loaded!") + + def unload(self): + print(f"unloading {self.language} asr....", end="") + self.model, self.cfg, self.task, self.token, self.decoder = ( + None, + None, + None, + None, + None, + ) + gc.collect() + torch.cuda.empty_cache() + self.loaded = False + print(" ....unloaded!") + + def recognize(self, audio_frames): + if self.loaded == False: + self.load() + self.timer.cancel() + self.last_accessed = time.time() + if self.language == 'English': + segments, _ = self.model.transcribe(audio_frames, beam_size=5, language='en') + output_text = self.space_remover.sub(' ', ' '.join(segment.text for segment in segments)) + self.timer = Timer(self.timeout, self.unload) + self.timer.start() + return output_text + else: + input_sample, _ = ta_sox.apply_effects_tensor( + torch.tensor(audio_frames).unsqueeze(0), self.sampling_rate, self.effects + ) + input_sample = input_sample.float().to(self.device) + with torch.no_grad(): + input_sample = F.layer_norm(input_sample, input_sample.shape) + logits = self.model(source=input_sample.unsqueeze(0), padding_mask=None)[ + "encoder_out" + ] + predicted_ids = [ + self.token[x - 4] + for x in torch.unique_consecutive( + torch.argmax(logits[:, 0], axis=-1) + ).tolist() + if x != 0 + ] + text = self.decoder.decode(logits.to("cpu")) + recognized_text = self.replacer.sub( + " ", + "".join( + self.task.target_dictionary.string(tstep[0]["tokens"]) for tstep in text + ), + ) + self.timer = Timer(self.timeout, self.unload) + self.timer.start() + return recognized_text + + +class TritonPythonModel: + def initialize(self, args): + self.device_id = int(json.loads(args["model_instance_device_id"])) + self.target_dtype = pb_utils.triton_string_to_numpy( + pb_utils.get_output_config_by_name( + json.loads(args["model_config"]), "OUTPUT_RECOGNIZED_TEXT" + )["data_type"] + ) + self.models = {} + + def execute(self, requests): + responses = [] + for request in requests: + input_audio = pb_utils.get_input_tensor_by_name( + request, "INPUT_AUDIO" + ).as_numpy() + input_language_id = ( + pb_utils.get_input_tensor_by_name(request, "INPUT_LANGUAGE_ID") + .as_numpy()[0] + .decode("utf-8") + ) + success_flag = True + for i in range(100): + success_flag = False + try: + if input_language_id not in self.models: + self.models[input_language_id] = ASRModel( + input_language_id, + f"cuda:{str(self.device_id)}", + ) + recognized_text = self.models[input_language_id].recognize( + input_audio + ) + success_flag = True + except RuntimeError: + if i == 0: + print( + f"GPU {self.device_id} full, releasing cuda cache..." + ) + gc.collect() + torch.cuda.empty_cache() + else: + print( + f"GPU {self.device_id} full, unloading the least recently used model..." + ) + recency_list = sorted( + filter(lambda x: x.loaded, self.models.values()), + key=lambda x: x.last_accessed, + ) + if len(recency_list) == 0: + time.sleep(5) + else: + recency_list[0].unload() + if success_flag: + break + responses.append( + pb_utils.InferenceResponse( + output_tensors=[ + pb_utils.Tensor( + "OUTPUT_RECOGNIZED_TEXT", + numpy.array([recognized_text], dtype="object"), + ) + ] + ) + ) + return responses + + def finalize(self): + pass diff --git a/triton_model/config.pbtxt b/triton_model/config.pbtxt new file mode 100644 index 0000000..46e5714 --- /dev/null +++ b/triton_model/config.pbtxt @@ -0,0 +1,37 @@ +name: "asr" +backend: "python" +max_batch_size: 0 +input [ + { + name: "INPUT_AUDIO" + data_type: TYPE_FP32 + dims: -1 + } +] +input [ + { + name: "INPUT_LANGUAGE_ID" + data_type: TYPE_STRING + dims: 1 + } +] +output [ + { + name: "OUTPUT_RECOGNIZED_TEXT" + data_type: TYPE_STRING + dims: 1 + } +] +parameters: { + key: "EXECUTION_ENV_PATH", + value: {string_value: "$$TRITON_MODEL_DIRECTORY/iitm-asr-env"} +} +instance_group [ + { + count: 1 + kind: KIND_GPU + } +] +response_cache { + enable: true +} -- GitLab