File size: 2,320 Bytes
31bf2aa c92ce97 31bf2aa d19a8a5 c92ce97 c36ebf7 31bf2aa c36ebf7 c92ce97 31bf2aa 70704f2 31bf2aa c92ce97 70704f2 31bf2aa c92ce97 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
#!/bin/bash
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
export MODEL_NAME_OR_PATH=/home/m3hrdadfi/code/gpt2-medium-persian
export OUTPUT_DIR=/home/m3hrdadfi/code/gpt2-medium-persian
# export MODEL_TYPE=gpt2
# export CONFIG_NAME=/home/m3hrdadfi/code/gpt2-medium-persian
# export TOKENIZER_NAME=/home/m3hrdadfi/code/gpt2-medium-persian
export TRAIN_FILE=/home/m3hrdadfi/data/train-fixed.csv
export VALIDATION_FILE=/home/m3hrdadfi/data/test-fixed.csv
export TEST_FILE=/home/m3hrdadfi/code/data/test-fixed.csv
# export DATASET_NAME=oscar
# export DATASET_CONFIG_NAME=unshuffled_deduplicated_fa
export MAX_SEQUENCE_LENGTH=512
#export MAX_TRAIN_SAMPLE=5000
#export MAX_EVAL_SAMPLES=5000
export PER_DEVICE_TRAIN_BATCH_SIZE=16
export PER_DEVICE_EVAL_BATCH_SIZE=16
export NUM_TRAIN_EPOCHS=9.0
export LEARNING_RATE=8e-4
export WARMUP_STEPS=5000
export LOGGING_STEPS=500
export EVAL_STEPS=2500
export SAVE_STEPS=2500
python src/run_clm_flax.py \
--output_dir="$OUTPUT_DIR" \
--model_name_or_path="$MODEL_NAME_OR_PATH" \
--train_file="$TRAIN_FILE" \
--validation_file="$VALIDATION_FILE" \
--block_size=$MAX_SEQUENCE_LENGTH \
--per_device_train_batch_size=$PER_DEVICE_TRAIN_BATCH_SIZE \
--per_device_eval_batch_size=$PER_DEVICE_EVAL_BATCH_SIZE \
--num_train_epochs=$NUM_TRAIN_EPOCHS \
--learning_rate=$LEARNING_RATE \
--warmup_steps=$WARMUP_STEPS \
--logging_step=$LOGGING_STEPS \
--eval_steps=$EVAL_STEPS \
--save_steps=$SAVE_STEPS \
--do_train \
--do_eval \
--overwrite_output_dir \
--push_to_hub
# python src/run_clm_flax.py \
# --output_dir="$OUTPUT_DIR" \
# --model_type="$MODEL_TYPE" \
# --config_name="$CONFIG_NAME" \
# --tokenizer_name="$TOKENIZER_NAME" \
# --dataset_name="$DATASET_NAME" \
# --dataset_config_name="$DATASET_CONFIG_NAME" \
# --block_size=$MAX_SEQUENCE_LENGTH \
# --per_device_train_batch_size=$PER_DEVICE_TRAIN_BATCH_SIZE \
# --per_device_eval_batch_size=$PER_DEVICE_EVAL_BATCH_SIZE \
# --num_train_epochs=$NUM_TRAIN_EPOCHS \
# --learning_rate=$LEARNING_RATE \
# --warmup_steps=$WARMUP_STEPS \
# --logging_step=$LOGGING_STEPS \
# --eval_steps=$EVAL_STEPS \
# --save_steps=$SAVE_STEPS \
# --do_train \
# --do_eval \
# --overwrite_output_dir \
# --push_to_hub |