#!/usr/bin/env bash # Set bash to 'debug' mode, it will exit on : # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', set -e set -u set -o pipefail # start command: # bash run_wavlm_smollm.sh --stage 8 --stop_stage 8 --ngpu 8 --tag tts_smollm_whole_rinit --train_args "--codec_token_per_frame 8 --codec_token_in_use 8 --loss_region whole" train_jsons="" valid_jsons="" test_jsons="" # TTS train_jsons+="dump_wavlm/raw_tts_librispeech/train_960/data.json dump_wavlm/raw_tts_mls_en/mls_en_train/data.json" valid_jsons+="dump_wavlm/raw_tts_librispeech/dev/data.json " # Test sets test_jsons+="dump_wavlm/raw_codec_ssl_asr_librispeech/test_clean/data.json " test_jsons+="dump_wavlm/raw_codec_ssl_tts_librispeech/test_clean/data.json " train_config=conf/train_multiscale_smollm_360m.yaml inference_config=conf/decode_asr.yaml token_list_dir=data/token_list/llm_vocab # use lllm vocab bpe_opts="--subword_choice huggingface --subword_model HuggingFaceTB/SmolLM-1.7B" codec_opts="--codec_choice inhouse" ./speechlm.sh \ --skip_data_prep true \ --data_combo_name ls960_mlsen \ --fs 16000 \ --ngpu 4 \ --nj 16 \ --inference_nj 16 \ --nbest 10 \ --gpu_inference true \ --g2p g2p_en_no_space \ --cleaner tacotron \ --token_list_dir ${token_list_dir} \ --train_config ${train_config} \ --inference_config ${inference_config} \ --audio_format "flac.ark" \ --train_jsons "${train_jsons}" \ --valid_jsons "${valid_jsons}" \ --test_jsons "${test_jsons}" \ --dumpdir dump_wavlm \ ${bpe_opts} ${codec_opts} \ "$@"