|
python $1run_speech_recognition_seq2seq_streaming.py \ |
|
--model_name_or_path="openai/whisper-medium" \ |
|
--dataset_train_name="mozilla-foundation/common_voice_11_0,mozilla-foundation/common_voice_11_0,mozilla-foundation/common_voice_11_0,babelbox/babelbox_voice,NbAiLab/NST,NbAiLab/NPSC,google/fleurs,google/fleurs,google/fleurs" \ |
|
--dataset_train_config_name="sv-SE,da,nn-NO,nst,no-distant,16K_mp3_nynorsk,sv_se,da_dk,nb_no" \ |
|
--language_train="sv,da,no,sv,no,no,sv,da,no" \ |
|
--train_split_name="train+validation,train+validation,train+validation,train,train+test,train+validation,train+validation,train+validation,train+validation" \ |
|
--dataset_eval_name="mozilla-foundation/common_voice_11_0" \ |
|
--dataset_eval_config_name="sv-SE" \ |
|
--language_eval="sv" \ |
|
--eval_split_name="test" \ |
|
--model_index_name="Whisper Medium Nordic" \ |
|
--max_steps="10000" \ |
|
--output_dir="./" \ |
|
--per_device_train_batch_size="32" \ |
|
--per_device_eval_batch_size="16" \ |
|
--logging_steps="25" \ |
|
--learning_rate="3e-6" \ |
|
--warmup_ratio="0.1" \ |
|
--evaluation_strategy="steps" \ |
|
--eval_steps="1000" \ |
|
--save_strategy="steps" \ |
|
--save_steps="1000" \ |
|
--generation_max_length="225" \ |
|
--length_column_name="input_length" \ |
|
--max_duration_in_seconds="30" \ |
|
--text_column_name="sentence,text,raw_transcription" \ |
|
--freeze_feature_encoder="False" \ |
|
--report_to="wandb" \ |
|
--save_total_limit="2" \ |
|
--metric_for_best_model="wer" \ |
|
--greater_is_better="False" \ |
|
--load_best_model_at_end \ |
|
--gradient_checkpointing \ |
|
--overwrite_output_dir \ |
|
--do_train \ |
|
--do_eval \ |
|
--fp16 \ |
|
--predict_with_generate \ |
|
--do_normalize_eval \ |
|
--streaming \ |
|
--use_auth_token |