gpt2-large-dutch / run_gpt.sh
yhavinga's picture
Add scripts, model config etc
c6e3db9
raw
history blame
1.39 kB
#!/bin/bash
export HF_PROJECT="gpt2-large-dutch-2"
# Variables for training the tokenizer and creating the config
export VOCAB_SIZE="50257"
export DATASET="yhavinga/mc4_nl_cleaned" # Name of the dataset in the Huggingface Hub
export DATASET_CONFIG="full" # Config of the dataset in the Huggingface Hub
export DATASET_SPLIT="train" # Split to use for training tokenizer and model
export TEXT_FIELD="text" # Field containing the text to be used for training
export CONFIG_TYPE="gpt2-large" # Config that our model will use
export MODEL_PATH="${HOME}/data/${HF_PROJECT}" # Path to the model, e.g. here inside the mount
python run_clm_flax.py \
--output_dir="${MODEL_PATH}" \
--model_type="gpt2" \
--config_name="${MODEL_PATH}" \
--model_name_or_path="${MODEL_PATH}" \
--tokenizer_name="${MODEL_PATH}" \
--preprocessing_num_workers="96" \
--do_train --do_eval \
--dataset_name="${DATASET}" \
--dataset_config_name="${DATASET_CONFIG}" \
--block_size="512" \
--per_device_train_batch_size="4" \
--per_device_eval_batch_size="4" \
--learning_rate="0.000033" --warmup_steps="5000" \
--adafactor \
--overwrite_output_dir \
--num_train_epochs="1" \
--logging_steps="500" \
--save_steps="20000" \
--eval_steps="2500"
# \
# --push_to_hub
# --adam_beta1="0.9" --adam_beta2="0.98" --weight_decay="0.01" \