TinyLlama-CPT / multilinguality_megatron /continue_pretraining.sh
sonalsannigrahi's picture
Upload 382 files (#1)
a93e458 verified
# This script will try to run a task *outside* any specified submitter
# Note: This script is for archival; it is not actually run by ducttape
# unset CUDA_VISIBLE_DEVICES
echo $CUDA_VISIBLE_DEVICES
data_path="1 spgi_vox_mls_text_1b/data/data_text_document"
megatron_model="spgi_vox_mls_text_1b/shards"
model_dir="spgi_vox_mls_text_1b/ckpt"
tokenizer_path="spgi_vox_mls_text_1b/new_extended_tokenizer/tokenizer.model"
tp="2"
pp="1"
# --wandb_logger \
# --wandb_id "hajmola" \
# --wandb_project "Megatron" \
# --wandb_entity "hajmola" \
# --wandb_api_key "c4a95af43e910d14b0eca23fbb8165f94944d5af" \
# optimization arguments; self-explanatory. Intervals and steps are in terms of training optimizer steps
grad_accum_steps="12"
micro_batch_size="12"
warmup_steps="13"
eval_interval="500"
lr="3e-5" #lr="3e-5"
log_interval="10"
lr_min="3e-6" #lr_min="3e-6"
lr_scheduler="cosine"
# infra arguments
save_interval="250"
n_gpus="2"
repo="multilinguality_megatron"
gpu_ids="4,5"
train_steps="1000"
# Parse command-line arguments
for arg in "$@"
do
case $arg in
--help)
echo "Usage: ./script.sh [OPTIONS]"
echo "Options:"
echo " --data_path=PATH Path to dataset. Should have the form of <integer_0> <PATH_TO_DATA_TEXT_DOCUMENT_0> <integer_1> <PATH_TO_DATA_TEXT_DOCUMENT_1> ..., where the integers determine the data's relative weight in the training set. If every integer is equal, then the data is uniformly sampled."
echo " --megatron_model=PATH Path to sharded megatron model"
echo " --model_dir=PATH folder to save model checkpoints; if this has a checkpoint, it will be used to continue training"
echo " --tokenizer_path=PATH Path to tokenizer.model of original HF model"
echo " --tp=NUMBER Number of shards model is divided in"
echo " --pp=NUMBER Pipeline parallel (default is 1)"
echo " --grad_accum_steps=NUMBER"
echo " Number of gradient accumulation steps"
echo " --micro_batch_size=NUMBER"
echo " Micro batch size"
echo " --warmup_steps=NUMBER Number of warmup steps"
echo " --eval_interval=NUMBER Number of steps between validations"
echo " --lr=NUMBER Learning rate"
echo " --log_interval=NUMBER Number of steps between logging"
echo " --lr_min=NUMBER Minimum learning rate of scheduler"
echo " --lr_scheduler=STRING Learning rate scheduler"
echo " --save_interval=NUMBER Number of steps between saves"
echo " --n_gpus=NUMBER Number of GPUs to use"
echo " --repo=PATH Path to repo"
echo " --gpu_ids=STRING GPU IDs to use"
echo " --train_steps=NUMBER Number of training steps"
exit 0
;;
--data_path=*)
data_path="${arg#*=}"
shift
;;
--megatron_model=*)
megatron_model="${arg#*=}"
shift
;;
--model_dir=*)
model_dir="${arg#*=}"
shift
;;
--tokenizer_path=*)
tokenizer_path="${arg#*=}"
shift
;;
--tp=*)
tp="${arg#*=}"
shift
;;
--pp=*)
pp="${arg#*=}"
shift
;;
--grad_accum_steps=*)
grad_accum_steps="${arg#*=}"
shift
;;
--micro_batch_size=*)
micro_batch_size="${arg#*=}"
shift
;;
--warmup_steps=*)
warmup_steps="${arg#*=}"
shift
;;
--eval_interval=*)
eval_interval="${arg#*=}"
shift
;;
--lr=*)
lr="${arg#*=}"
shift
;;
--log_interval=*)
log_interval="${arg#*=}"
shift
;;
--lr_min=*)
lr_min="${arg#*=}"
shift
;;
--lr_scheduler=*)
lr_scheduler="${arg#*=}"
shift
;;
--save_interval=*)
save_interval="${arg#*=}"
shift
;;
--n_gpus=*)
n_gpus="${arg#*=}"
shift
;;
--repo=*)
repo="${arg#*=}"
shift
;;
--gpu_ids=*)
gpu_ids="${arg#*=}"
shift
;;
--train_steps=*)
train_steps="${arg#*=}"
shift
;;
esac
done
# CUDA_VISIBLE_DEVICES=$gpu_ids
if [ "$model_dir" != "" ]; then
mkdir -p $model_dir
mkdir -p $model_dir/runs
fi
ckpt_flag=$model_dir/latest_checkpointed_iteration.txt
if [ -f $ckpt_flag ]; then
megatron_model=$model_dir
echo Loading from previously saved checkpoint.
fi
global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps))
LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval"
TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min"
DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 50000"
COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion"
LLAMA_ARGS="--use_rms_norm --glu_activation swiglu --no_tie_embed_logits --no_new_tokens --layernorm_epsilon 1e-5"
CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \
--tensor_model_parallel_size $tp \
--pipeline_model_parallel_size $pp \
--load $megatron_model \
--save $model_dir \
--tensorboard_dir $model_dir/runs \
--data_path $data_path \
--model_name llama \
--tokenizer_type SentencePieceTokenizer \
--vocab_file=$tokenizer_path \
--bf16 \
--use_flash_attn \
--micro_batch_size $micro_batch_size \
--global_batch_size $global_batch_size \
--sequence_parallel \
--recompute_granularity selective \
--use_checkpoint_args \
--seq_length 2048 \
--split 99,1,1 \
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS