#!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esc-benchmark/esc-datasets" \ --model_name_or_path="esc-benchmark/wav2vec2-aed-pretrained" \ --dataset_config_name="librispeech" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-librispeech" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="2" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="300" \ --final_generation_num_beams="12" \ --generation_length_penalty="1.6" \ --hidden_dropout="0.2" \ --activation_dropout="0.2" \ --feat_proj_dropout="0.2" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token