File size: 1,972 Bytes
0dd32c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
#!/usr/bin/env bash
declare -a learning_rates=("1e-5" "3e-5" "1e-4" "3e-4" "1e-3")
declare -a batch_sizes=("8" "12" "14" "16")
declare -a gradient_accumulation_step_sizes=("2" "4" "8")
for learning_rate in "${learning_rates[@]}"; do
for batch_size in "${batch_sizes[@]}"; do
for gradient_accumulation_steps in "${gradient_accumulation_step_sizes[@]}"; do
python create_model.py
CUDA_VISIBLE_DEVICES=1 python run_speech_recognition_seq2seq.py \
--dataset_name="librispeech_asr" \
--model_name_or_path="./" \
--tokenizer_name="./" \
--dataset_config_name="clean" \
--train_split_name="train.100" \
--eval_split_name="validation" \
--output_dir="./" \
--preprocessing_num_workers="1" \
--length_column_name="input_length" \
--overwrite_output_dir \
--num_train_epochs="1" \
--per_device_train_batch_size=$batch_size \
--per_device_eval_batch_size=$batch_size \
--gradient_accumulation_steps=$gradient_accumulation_steps \
--generation_max_length="40" \
--generation_num_beams="1" \
--learning_rate=$learning_rate \
--warmup_steps="500" \
--evaluation_strategy="steps" \
--text_column_name="text" \
--save_steps="500" \
--eval_steps="500" \
--logging_steps="1" \
--save_total_limit="1" \
--freeze_feature_encoder \
--gradient_checkpointing \
--fp16 \
--group_by_length \
--predict_with_generate \
--do_lower_case \
--do_train \
--do_eval \
--push_to_hub \
--use_auth_token
done
done
done
|