TinyLlama-CPT / multilinguality_megatron /ducttape /20B_all_cleaned_mc4_wiki.tconf
KshitijAmbilduke's picture
Upload 382 files
9ef89a4 verified
raw
history blame
5.23 kB
global {
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B
repo=/mnt/data/jpombal/multilinguality_megatron
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_wiki_checkpoints
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
dataset=(Dataset: en de fr es it nl pt ru zh ko en_wiki de_wiki fr_wiki es_wiki it_wiki nl_wiki pt_wiki ru_wiki zh_wiki ko_wiki)
dataset_path=(Dataset:
en=/mnt/data_2/shared/tower_llm_data/en/data
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
en_wiki=""
es_wiki=""
de_wiki=""
fr_wiki=""
nl_wiki=""
pt_wiki=""
it_wiki=""
ru_wiki=""
zh_wiki=""
ko_wiki=""
)
is_hf_dataset=(Dataset:
en=True
es=False
de=False
fr=False
nl=False
pt=False
it=False
ru=False
zh=False
ko=False
en_wiki=False
es_wiki=False
de_wiki=False
fr_wiki=False
nl_wiki=False
pt_wiki=False
it_wiki=False
ru_wiki=False
zh_wiki=False
ko_wiki=False
)
threshold=(Dataset:
en=516 en_wiki=""
es=275 es_wiki=""
de=611 de_wiki=""
fr=322 fr_wiki=""
nl=649 nl_wiki=""
pt=257 pt_wiki=""
it=332 it_wiki=""
ru=334 ru_wiki=""
zh=2041 zh_wiki=""
ko=198 ko_wiki=""
)
datamix_weights=(
DataMix:
mc4_wiki_uniform=(
Dataset:
en=67
es=67
de=67
fr=67
nl=67
pt=67
it=67
ru=67
zh=67
ko=67
en_wiki=33
es_wiki=33
de_wiki=33
fr_wiki=33
nl_wiki=33
pt_wiki=33
it_wiki=33
ru_wiki=33
zh_wiki=33
ko_wiki=33
)
mc4_uniform=(
Dataset:
en=100
es=100
de=100
fr=100
nl=100
pt=100
it=100
ru=100
zh=100
ko=100
en_wiki=0
es_wiki=0
de_wiki=0
fr_wiki=0
nl_wiki=0
pt_wiki=0
it_wiki=0
ru_wiki=0
zh_wiki=0
ko_wiki=0
)
)
# number such that final tokens for each language are around 1B
n_tokens=(Dataset:
en=1000000000
es=833333330
de=833333330
fr=833333330
nl=833333330
pt=833333330
it=833333330
ru=500000000
zh=13888888
ko=250000000
en_wiki=""
es_wiki=""
de_wiki=""
fr_wiki=""
nl_wiki=""
pt_wiki=""
it_wiki=""
ru_wiki=""
zh_wiki=""
ko_wiki=""
)
min_perplexity=50
size=(Size: 7 13)
log_interval=1
save_interval=635
eval_interval=635
train_steps=12700
lr_scheduler=cosine
warmup_steps=127
lr=3e-5
lr_min=3e-6
weight_decay=0.1
n_gpus=8
gpu_ids=0,1,2,3,4,5,6,7
tp=(TP: 1 2 3 4)
pp=(PP: 1 2 3 4)
micro_batch_size=4
grad_accum_steps=12
vocab_size=32000
cpu_workers=16
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33"
wikipedia=False
freeze_layers=""
posterior_tokens=False
n_posterior_tokens=False
eval_iters=1
is_parallel=False
lp=""
}