|
#!/bin/bash |
|
|
|
export LC_ALL=C.UTF-8 |
|
export LANG=C.UTF-8 |
|
|
|
export MODEL_NAME_OR_PATH=/home/m3hrdadfi/code/gpt2-medium-persian |
|
export OUTPUT_DIR=/home/m3hrdadfi/code/gpt2-medium-persian |
|
|
|
|
|
|
|
|
|
export TRAIN_FILE=/home/m3hrdadfi/data/train.csv |
|
export VALIDATION_FILE=/home/m3hrdadfi/data/test.csv |
|
|
|
|
|
|
|
export MAX_SEQUENCE_LENGTH=512 |
|
|
|
|
|
|
|
|
|
export PER_DEVICE_TRAIN_BATCH_SIZE=16 |
|
export PER_DEVICE_EVAL_BATCH_SIZE=16 |
|
export NUM_TRAIN_EPOCHS=9.0 |
|
export LEARNING_RATE=8e-4 |
|
export WARMUP_STEPS=5000 |
|
export LOGGING_STEPS=500 |
|
export EVAL_STEPS=2500 |
|
export SAVE_STEPS=2500 |
|
|
|
python src/run_clm_flax.py \ |
|
--output_dir="$OUTPUT_DIR" \ |
|
--model_name_or_path="$MODEL_NAME_OR_PATH" \ |
|
--train_file="$TRAIN_FILE" \ |
|
--validation_file="$VALIDATION_FILE" \ |
|
--block_size=$MAX_SEQUENCE_LENGTH \ |
|
--per_device_train_batch_size=$PER_DEVICE_TRAIN_BATCH_SIZE \ |
|
--per_device_eval_batch_size=$PER_DEVICE_EVAL_BATCH_SIZE \ |
|
--num_train_epochs=$NUM_TRAIN_EPOCHS \ |
|
--learning_rate=$LEARNING_RATE \ |
|
--warmup_steps=$WARMUP_STEPS \ |
|
--logging_step=$LOGGING_STEPS \ |
|
--eval_steps=$EVAL_STEPS \ |
|
--save_steps=$SAVE_STEPS \ |
|
--do_train \ |
|
--do_eval \ |
|
--overwrite_output_dir \ |
|
--push_to_hub |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|