KshitijAmbilduke's picture
Upload 382 files
9ef89a4 verified
raw
history blame
13.9 kB
task DumpHFDataset
> dataset_json=dataset.json
:: repo=@
:: dataset_path=@
:: n_tokens=@
:: threshold=@
:: min_perplexity=@
:: is_hf_dataset=@
:: wikipedia=@
:: posterior_tokens=@
:: n_posterior_tokens=@
:: is_parallel=@
:: lp=@
{
python $repo/prepare_data.py \
--output $dataset_json \
--n_tokens $n_tokens \
--dataset_path $dataset_path \
--threshold $threshold \
--min_perplexity $min_perplexity \
--is_hf_dataset $is_hf_dataset \
--wikipedia $wikipedia \
--posterior_tokens $posterior_tokens \
--n_posterior_tokens $n_posterior_tokens \
--is_parallel $is_parallel \
--lp $lp
}
task PreprocessDataset
< dataset_json=$dataset_json@DumpHFDataset
> dataset_bin=data_bin
:: dataset=@
:: repo=@
:: tokenizer_path=@
:: tokenizer_type=@
:: cpu_workers=@
{
set -euo pipefail
mkdir -p $dataset_bin
python $repo/tools/preprocess_data.py \
--input=$dataset_json \
--output_prefix=$dataset_bin/data \
--tokenizer_type=$tokenizer_type \
--vocab_file=$tokenizer_path \
--chunk_size=32 \
--workers=16 \
--no_new_tokens \
--append_eod
}
task Convert2Megatron
> megatron_model
:: repo=@
:: size=@
:: model_path=@
:: model_type=@
{
python $repo/weights_conversion/hf_to_megatron.py $model_type \
--size=$size \
--out=$megatron_model \
--cache-dir=$model_path \
--model-path=$model_path
}
task ModelSharding
< megatron_model=$megatron_model@Convert2Megatron
> sharded_model
:: repo=@
:: tp=@
:: pp=@
:: vocab_size=@
:: model_type=@
:: kv_channels=@
{
KV_CHANNELS_ARGS=""
if [ "$kv_channels" != "" ]; then
KV_CHANNELS_ARGS="--kv_channels $kv_channels"
fi
python $repo/tools/checkpoint_util.py \
--target_tensor_parallel_size $tp \
--target_pipeline_parallel_size $pp \
--load_dir $megatron_model \
--save_dir $sharded_model \
--model_type $model_type \
--true_vocab_size $vocab_size \
--bf16 \
$KV_CHANNELS_ARGS
}
task MakeDataMix
< dataset_bin=@PreprocessDataset
> datamix_file
:: datamix_weights=@
{
# simply write datamix weight and path in dataset_bin to a file, separated by a space
echo "$datamix_weights $dataset_bin/data_text_document" > $datamix_file
}
task MakeDataMixAnnealing
< dataset_bin=@PreprocessDataset
> datamix_file_annealing
:: datamix_weights_annealing=@
{
# simply write datamix weight and path in dataset_bin to a file, separated by a space
echo "$datamix_weights_annealing $dataset_bin/data_text_document" > $datamix_file_annealing
}
task ContinuePretraining
< megatron_model=$sharded_model@ModelSharding
< dataset_bin=$dataset_bin@PreprocessDataset[Dataset:*]
< datamix_file=$datamix_file@MakeDataMix[Dataset:*]
> model_dir=checkpoints
:: repo=@
:: log_interval=@
:: save_interval=@
:: eval_interval=@
:: train_steps=@
:: lr_scheduler=@
:: warmup_steps=@
:: lr=@
:: lr_min=@
:: n_gpus=@
:: gpu_ids=@
:: tp=@
:: pp=@
:: external_model_dir=@
:: tokenizer_path=@
:: micro_batch_size=@
:: grad_accum_steps=@
:: weight_decay=@
:: freeze_layers=@
:: eval_iters=@
:: model_type=@
:: seq_length=@
:: glu_activation=@
:: kv_channels=@
:: layernorm_epsilon=@
:: tokenizer_type=@
{
#external_model_dir="${external_model_dir}_${lr}"
if [ "$external_model_dir" != "" ]; then
mkdir -p $external_model_dir
mkdir -p $external_model_dir/runs
ln -s $external_model_dir $model_dir
fi
data_path=""
for f in $datamix_file; do
# read file
data_path="$data_path `cat $f`"
done
echo "Running with data_path=$data_path"
FREEZE_ARGS=""
if [ "$freeze_layers" == "not_embeddings" ]; then
FREEZE_ARGS="--freeze_layers"
fi
echo $FREEZE_ARGS
export CUDA_VISIBLE_DEVICES=$gpu_ids
# if load_from_checkpoint, then set megatron_model to external_model_dir
ckpt_flag=$external_model_dir/latest_checkpointed_iteration.txt
if [ -f $ckpt_flag ]; then
megatron_model=$external_model_dir
echo Loading from previously saved checkpoint.
fi
KV_CHANNELS_ARGS=""
if [ "$kv_channels" != "" ]; then
KV_CHANNELS_ARGS="--kv_channels $kv_channels"
fi
TIE_ARGS=""
if [ $model_type != 'gemma' ]; then
TIE_ARGS+="--no_tie_embed_logits"
fi
global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps))
LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval --eval_iters $eval_iters --log_validation_ppl_to_tensorboard --log_memory_to_tensorboard --log_batch_size_to_tensorboard"
TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min --weight_decay $weight_decay"
DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8134"
COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion"
LLAMA_ARGS="--use_rms_norm --glu_activation $glu_activation --no_new_tokens --layernorm_epsilon $layernorm_epsilon"
CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \
--tensor_model_parallel_size $tp \
--pipeline_model_parallel_size $pp \
--load $megatron_model \
--save $model_dir \
--tensorboard_dir $external_model_dir/runs \
--data_path $data_path \
--model_name $model_type \
--tokenizer_type $tokenizer_type \
--vocab_file=$tokenizer_path \
--bf16 \
--use_flash_attn \
--micro_batch_size $micro_batch_size \
--global_batch_size $global_batch_size \
--sequence_parallel \
--recompute_granularity selective \
--use_checkpoint_args \
--seq_length $seq_length \
--split 9995,3,2 \
--sliding_window_size 4096 \
--reset_attention_mask \
--no_bias_gelu_fusion \
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS $FREEZE_ARGS $KV_CHANNELS_ARGS $TIE_ARGS
}
task Annealing
< megatron_model=$model_dir@ContinuePretraining
< dataset_bin=$dataset_bin@PreprocessDataset[Dataset:*]
< datamix_file=$datamix_file_annealing@MakeDataMixAnnealing[Dataset:*]
> model_dir=checkpoints
:: repo=@
:: log_interval=@
:: save_interval=@
:: eval_interval=@
:: train_steps_annealing=@
:: lr_scheduler_annealing=@
:: lr_annealing=@
:: lr_min_annealing=@
:: warmup_steps_annealing=@
:: n_gpus=@
:: gpu_ids=@
:: tp=@
:: pp=@
:: external_model_dir=@
:: external_model_dir_annealing=@
:: tokenizer_path=@
:: micro_batch_size=@
:: grad_accum_steps=@
:: weight_decay=@
:: freeze_layers=@
:: eval_iters=@
:: model_type=@
:: seq_length=@
:: glu_activation=@
:: kv_channels=@
:: layernorm_epsilon=@
:: tokenizer_type=@
{
#external_model_dir="${external_model_dir}_${lr_annealing}"
#external_model_dir_annealing="${external_model_dir_annealing}_${lr_annealing}"
if [ "$external_model_dir" != "" ]; then
mkdir -p $external_model_dir_annealing
mkdir -p $external_model_dir/runs/annealing
ln -s $external_model_dir_annealing $model_dir
fi
data_path=""
for f in $datamix_file; do
# read file
data_path="$data_path `cat $f`"
done
echo "Running with data_path=$data_path"
FREEZE_ARGS=""
if [ "$freeze_layers" == "not_embeddings" ]; then
FREEZE_ARGS="--freeze_layers"
fi
echo $FREEZE_ARGS
KV_CHANNELS_ARGS=""
if [ "$kv_channels" != "" ]; then
KV_CHANNELS_ARGS="--kv_channels $kv_channels"
fi
TIE_ARGS=""
if [ $model_type != 'gemma' ]; then
TIE_ARGS+="--no_tie_embed_logits"
fi
export CUDA_VISIBLE_DEVICES=$gpu_ids
global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps))
LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval --eval_iters $eval_iters --log_validation_ppl_to_tensorboard --log_memory_to_tensorboard --log_batch_size_to_tensorboard"
TRAIN_ARGS="--train_iters $train_steps_annealing --lr_decay_style $lr_scheduler_annealing --lr_warmup_iters $warmup_steps_annealing --lr $lr_annealing --min_lr $lr_min_annealing --weight_decay $weight_decay"
DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8135"
COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion"
LLAMA_ARGS="--use_rms_norm --glu_activation $glu_activation --no_new_tokens --layernorm_epsilon $layernorm_epsilon"
CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \
--tensor_model_parallel_size $tp \
--pipeline_model_parallel_size $pp \
--load $megatron_model \
--save $model_dir \
--tensorboard_dir $external_model_dir/runs/annealing \
--data_path $data_path \
--model_name $model_type \
--tokenizer_type $tokenizer_type \
--vocab_file=$tokenizer_path \
--bf16 \
--use_flash_attn \
--micro_batch_size $micro_batch_size \
--global_batch_size $global_batch_size \
--sequence_parallel \
--recompute_granularity selective \
--use_checkpoint_args \
--seq_length $seq_length \
--split 9990,5,5 \
--sliding_window_size 4096 \
--annealing \
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS $FREEZE_ARGS $KV_CHANNELS_ARGS $TIE_ARGS
}
plan preprocess_mc4 {
reach PreprocessDataset via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko) * (DataMix: mc4_uniform)
}
plan preprocess_inst {
reach PreprocessDataset via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko pl sv en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_de_wmt en_ru_wmt en_zh_wmt)
}
plan preprocess_data {
reach PreprocessDataset via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko pl sv en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_pl pl_en en_sv sv_en instructions) * (DataMix: mc4_wiki_uniform)
}
plan train_mc4_wiki {
reach ContinuePretraining via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_wiki de_wiki fr_wiki es_wiki it_wiki nl_wiki pt_wiki ru_wiki zh_wiki ko_wiki) * (DataMix: mc4_wiki_uniform)
}
plan train_mc4 {
reach ContinuePretraining via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko)
}
plan prepare_data {
reach DumpHFDataset via (Size: 1) * (TP: 1) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko)
}
plan preprocess_data_parallel {
reach PreprocessDataset via (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_ko ko_en en_zh zh_en)
}
plan train_mc4_parallel_instructions {
reach ContinuePretraining via (Size: 1) * (TP: 1) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions)
}
plan train_mc4_parallel_instructions_annealing {
reach Annealing via (Size: 1) * (TP: 1) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions)
}
plan train_mc4_parallel {
reach ContinuePretraining via (Size: 8) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
}
plan train_mc4_parallel_13B {
reach ContinuePretraining via (Size: 13) * (TP: 8) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
}
plan warmed_up_train {
reach ContinuePretrainingWarmedUp via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_wiki de_wiki fr_wiki es_wiki it_wiki nl_wiki pt_wiki ru_wiki zh_wiki ko_wiki) * (DataMix: mc4_wiki_uniform)
}
plan train_parallel {
reach ContinuePretraining via (Size: 8) * (TP: 4) * (PP: 1) * (Dataset: en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
}
plan gemma_test {
reach ContinuePretraining via (Size: 1) * (TP: 2) * (PP: 1) * (Dataset: en es)
}
plan train_mc4_parallel_gemma {
reach ContinuePretraining via (Size: 2) * (TP: 1) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
}
plan annealing {
reach Annealing via (Size: 1) * (TP: 4) * (PP: 1) * (Dataset: *)
}
plan cpt {
reach ContinuePretraining via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
}