|
global { |
|
model_type="llama2" |
|
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_annealing_baseline |
|
repo=/mnt/data/jpombal/multilinguality_megatron |
|
|
|
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_annealing_baseline/mc4_parallel_checkpoints |
|
external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_annealing_baseline/mc4_parallel_checkpoints_annealed |
|
model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ |
|
tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model |
|
|
|
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) |
|
|
|
|
|
dataset_path=(Dataset: |
|
en=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/en/filtered_en_2023-06_head_documents.jsonl.gz |
|
es=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/es/filtered_es_2023-06_head_documents.jsonl.gz |
|
de=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/de/filtered_de_2023-06_head_documents.jsonl.gz |
|
fr=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/fr/filtered_fr_2023-06_head_documents.jsonl.gz |
|
nl=/mnt/data/shared/tower_llm_data/webcorpus/nl/0000.json.gz |
|
pt=/mnt/data/shared/tower_llm_data/webcorpus/pt/0000.json.gz |
|
it=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/it/filtered_it_2023-06_head_documents.jsonl.gz |
|
ru=/mnt/data/shared/tower_llm_data/webcorpus/ru/0000.json.gz |
|
zh=/mnt/data/shared/tower_llm_data/webcorpus/zh/0000.json.gz |
|
ko=/mnt/data/shared/tower_llm_data/webcorpus/ko/0000.json.gz |
|
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" |
|
) |
|
|
|
is_hf_dataset=(Dataset: |
|
en=False |
|
es=False |
|
de=False |
|
fr=False |
|
nl=False |
|
pt=False |
|
it=False |
|
ru=False |
|
zh=False |
|
ko=False |
|
en_de=False |
|
de_en=False |
|
en_fr=False |
|
fr_en=False |
|
en_es=False |
|
es_en=False |
|
en_it=False |
|
it_en=False |
|
en_nl=False |
|
nl_en=False |
|
en_pt=False |
|
pt_en=False |
|
en_ru=False |
|
ru_en=False |
|
en_zh=False |
|
zh_en=False |
|
en_ko=False |
|
ko_en=False |
|
) |
|
|
|
threshold=(Dataset: |
|
en=10000000 |
|
es=10000000 |
|
de=10000000 |
|
fr=10000000 |
|
nl=10000000 |
|
pt=10000000 |
|
it=10000000 |
|
ru=10000000 |
|
zh=10000000 |
|
ko=10000000 |
|
en_de=100000 |
|
de_en=100000 |
|
en_fr=100000 |
|
fr_en=100000 |
|
en_es=100000 |
|
es_en=100000 |
|
en_it=100000 |
|
it_en=100000 |
|
en_nl=100000 |
|
nl_en=100000 |
|
en_pt=100000 |
|
pt_en=100000 |
|
en_ru=100000 |
|
ru_en=100000 |
|
en_zh=100000 |
|
zh_en=100000 |
|
en_ko=100000 |
|
ko_en=100000 |
|
) |
|
|
|
# rougly 67% for mc4, 33% for total parallel data |
|
datamix_weights=( |
|
DataMix: |
|
mc4_parallel_instructions=( |
|
Dataset: |
|
en=670 |
|
es=670 |
|
de=670 |
|
fr=670 |
|
nl=670 |
|
pt=670 |
|
it=670 |
|
ru=670 |
|
zh=670 |
|
ko=670 |
|
en_de=183 |
|
de_en=183 |
|
en_fr=183 |
|
fr_en=183 |
|
en_es=183 |
|
es_en=183 |
|
en_it=183 |
|
it_en=183 |
|
en_nl=183 |
|
nl_en=183 |
|
en_pt=183 |
|
pt_en=183 |
|
en_ru=183 |
|
ru_en=183 |
|
en_zh=183 |
|
zh_en=183 |
|
en_ko=183 |
|
ko_en=183 |
|
) |
|
) |
|
|
|
datamix_weights_annealing=( |
|
DataMix: |
|
mc4_parallel_instructions=( |
|
Dataset: |
|
en=670 |
|
es=670 |
|
de=670 |
|
fr=670 |
|
nl=670 |
|
pt=670 |
|
it=670 |
|
ru=670 |
|
zh=670 |
|
ko=670 |
|
en_de=183 |
|
de_en=183 |
|
en_fr=183 |
|
fr_en=183 |
|
en_es=183 |
|
es_en=183 |
|
en_it=183 |
|
it_en=183 |
|
en_nl=183 |
|
nl_en=183 |
|
en_pt=183 |
|
pt_en=183 |
|
en_ru=183 |
|
ru_en=183 |
|
en_zh=183 |
|
zh_en=183 |
|
en_ko=183 |
|
ko_en=183 |
|
) |
|
) |
|
|
|
# number such that final tokens for each language are around 1B |
|
n_tokens=(Dataset: |
|
en=1000000000 |
|
es=833333330 |
|
de=833333330 |
|
fr=833333330 |
|
nl=833333330 |
|
pt=833333330 |
|
it=833333330 |
|
ru=500000000 |
|
zh=13888888 |
|
ko=250000000 |
|
en_de=20000000 |
|
de_en=20000000 |
|
en_fr=20000000 |
|
fr_en=20000000 |
|
en_es=20000000 |
|
es_en=20000000 |
|
en_it=20000000 |
|
it_en=20000000 |
|
en_nl=20000000 |
|
nl_en=20000000 |
|
en_pt=20000000 |
|
pt_en=20000000 |
|
en_ru=20000000 |
|
ru_en=20000000 |
|
en_zh=20000000 |
|
zh_en=20000000 |
|
en_ko=20000000 |
|
ko_en=20000000 |
|
) |
|
|
|
is_parallel=(Dataset: |
|
en=False |
|
es=False |
|
de=False |
|
fr=False |
|
nl=False |
|
pt=False |
|
it=False |
|
ru=False |
|
zh=False |
|
ko=False |
|
en_de=True |
|
de_en=True |
|
en_fr=True |
|
fr_en=True |
|
en_es=True |
|
es_en=True |
|
en_it=True |
|
it_en=True |
|
en_nl=True |
|
nl_en=True |
|
en_pt=True |
|
pt_en=True |
|
en_ru=True |
|
ru_en=True |
|
en_zh=True |
|
zh_en=True |
|
en_ko=True |
|
ko_en=True |
|
) |
|
|
|
lp=(Dataset: |
|
en="none" |
|
es="none" |
|
de="none" |
|
fr="none" |
|
nl="none" |
|
pt="none" |
|
it="none" |
|
ru="none" |
|
zh="none" |
|
ko="none" |
|
en_de="en-de" |
|
de_en="de-en" |
|
en_fr="en-fr" |
|
fr_en="fr-en" |
|
en_es="en-es" |
|
es_en="es-en" |
|
en_it="en-it" |
|
it_en="it-en" |
|
en_nl="en-nl" |
|
nl_en="nl-en" |
|
en_pt="en-pt" |
|
pt_en="pt-en" |
|
en_ru="en-ru" |
|
ru_en="ru-en" |
|
en_zh="en-zh" |
|
zh_en="zh-en" |
|
en_ko="en-ko" |
|
ko_en="ko-en" |
|
) |
|
|
|
min_perplexity=0 |
|
|
|
size=(Size: 1 7 13) |
|
|
|
log_interval=1 |
|
save_interval=635 |
|
eval_interval=635 |
|
train_steps=5556 |
|
train_steps_annealing=794 |
|
|
|
lr_scheduler=constant |
|
warmup_steps=32 |
|
lr=3e-5 |
|
lr_min=3e-6 |
|
weight_decay=0.1 |
|
|
|
lr_scheduler_annealing=linear |
|
warmup_steps_annealing=0 |
|
lr_annealing=3e-5 |
|
lr_min_annealing=3e-6 |
|
|
|
n_gpus=7 |
|
gpu_ids=1,2,3,4,5,6,7 |
|
tp=(TP: 1 2 3 4 5 6 7 8) |
|
pp=(PP: 1 2 3 4) |
|
micro_batch_size=4 |
|
grad_accum_steps=26 |
|
vocab_size=32000 |
|
|
|
cpu_workers=16 |
|
wikipedia=False |
|
freeze_layers="" |
|
posterior_tokens=False |
|
n_posterior_tokens=0 |
|
eval_iters=1 |
|
|
|
seq_length=2048 |
|
|
|
glu_activation=swiglu |
|
kv_channels="" |
|
layernorm_epsilon=1e-5 |
|
} |