|
export dataset_bin="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_fr_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_fr/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Baseline.baseline/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_pt/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_es/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_de/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_de_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ko_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_zh/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_it/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_nl/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_zh_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.instructions/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_pt_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ru/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_it_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ru_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_es_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_nl_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ko/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_en/data_bin" |
|
export datamix_file="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_fr_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_fr/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Baseline.baseline/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_pt/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_es/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_de/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_de_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ko_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_zh/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_it/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_nl/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_zh_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.instructions/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_pt_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ru/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_it_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ru_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_es_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_nl_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ko/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_en/datamix_file" |
|
export megatron_model="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/ModelSharding/PP.1+Size.1+TP.1/sharded_model" |
|
export model_dir="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/doc_attn_tests" |
|
export seq_length="2048" |
|
export tp="1" |
|
export warmup_steps="32" |
|
export micro_batch_size="24" |
|
export grad_accum_steps="4" |
|
export kv_channels="" |
|
export weight_decay="0.1" |
|
export external_model_dir="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/doc_attn_tests" |
|
export lr="3e-5" |
|
export eval_interval="635" |
|
export layernorm_epsilon="1e-5" |
|
export log_interval="1" |
|
export freeze_layers="" |
|
export glu_activation="swiglu" |
|
export eval_iters="1" |
|
export lr_min="3e-6" |
|
export pp="1" |
|
export model_type="llama2" |
|
export lr_scheduler="constant" |
|
export tokenizer_path="/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574" |
|
export save_interval="635" |
|
export n_gpus="1" |
|
export repo="/mnt/data/jpombal/multilinguality_megatron" |
|
export gpu_ids="0" |
|
export tokenizer_type="PretrainedFromHF" |
|
export train_steps="11430" |
|
|
|
external_model_dir="${external_model_dir}_${lr}" |
|
if [ "$external_model_dir" != "" ]; then |
|
mkdir -p $external_model_dir |
|
mkdir -p $external_model_dir/runs |
|
ln -s $external_model_dir $model_dir |
|
fi |
|
|
|
data_path="" |
|
for f in $datamix_file; do |
|
|
|
data_path="$data_path `cat $f`" |
|
done |
|
echo "Running with data_path=$data_path" |
|
|
|
FREEZE_ARGS="" |
|
if [ "$freeze_layers" == "not_embeddings" ]; then |
|
FREEZE_ARGS="--freeze_layers" |
|
fi |
|
echo $FREEZE_ARGS |
|
|
|
export CUDA_VISIBLE_DEVICES=$gpu_ids |
|
|
|
|
|
ckpt_flag=$external_model_dir/latest_checkpointed_iteration.txt |
|
if [ -f $ckpt_flag ]; then |
|
megatron_model=$external_model_dir |
|
echo Loading from previously saved checkpoint. |
|
fi |
|
|
|
KV_CHANNELS_ARGS="" |
|
if [ "$kv_channels" != "" ]; then |
|
KV_CHANNELS_ARGS="--kv_channels $kv_channels" |
|
fi |
|
|
|
TIE_ARGS="" |
|
if [ $model_type != 'gemma' ]; then |
|
TIE_ARGS+="--no_tie_embed_logits" |
|
fi |
|
echo $TIE_ARGS |
|
|
|
global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps)) |
|
|
|
LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval --eval_iters $eval_iters --log_validation_ppl_to_tensorboard --log_memory_to_tensorboard --log_batch_size_to_tensorboard" |
|
TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min --weight_decay $weight_decay" |
|
DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8134" |
|
COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion" |
|
LLAMA_ARGS="--use_rms_norm --glu_activation $glu_activation --no_new_tokens --layernorm_epsilon $layernorm_epsilon" |
|
CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \ |
|
--tensor_model_parallel_size $tp \ |
|
--pipeline_model_parallel_size $pp \ |
|
--load $megatron_model \ |
|
--save $model_dir \ |
|
--tensorboard_dir $external_model_dir/runs \ |
|
--data_path $data_path \ |
|
--model_name $model_type \ |
|
--tokenizer_type $tokenizer_type \ |
|
--vocab_file=$tokenizer_path \ |
|
--bf16 \ |
|
--use_flash_attn \ |
|
--micro_batch_size $micro_batch_size \ |
|
--global_batch_size $global_batch_size \ |
|
--sequence_parallel \ |
|
--recompute_granularity selective \ |
|
--use_checkpoint_args \ |
|
--seq_length $seq_length \ |
|
--split 9990,5,5 \ |
|
--sliding_window_size 4096 \ |
|
--reset_attention_mask \ |
|
--reset_position_ids \ |
|
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS $FREEZE_ARGS $KV_CHANNELS_ARGS $TIE_ARGS \ |