File size: 2,432 Bytes
a93e458 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
global {
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_test
repo=/mnt/data/jpombal/multilinguality_megatron
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_test/checkpoints
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
threshold=(TrainLanguage:
en=516
es=275
de=611
fr=322
nl=649
pt=257
it=332
ru=334
zh=2041
ko=198
)
# less for zh (inefficient tokenizer)
n_tokens=(TrainLanguage:
en=250000000
es=83333333
de=83333333
fr=83333333
nl=83333333
pt=83333333
it=83333333
ru=83333333
zh=8333333
ko=83333333
)
dataset_path=(TrainLanguage:
en=/mnt/data_2/shared/tower_llm_data/en/data
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
)
mix="10 10 10 10 10 10 10 10 10 10"
min_perplexity=50
size=(Size: 7 13)
log_interval=10
save_interval=318
eval_interval=158
train_steps=1272
lr_scheduler=cosine
warmup_steps=13
lr=3e-5
lr_min=3e-6
weight_decay=0.1
n_gpus=8
gpu_ids=0,1,2,3,4,5,6,7
tp=(TP: 1 2 3 4)
pp=(PP: 1 2 3 4)
micro_batch_size=4
grad_accum_steps=6
cpu_workers=16
}
|