global { ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B repo=/mnt/data/jpombal/multilinguality_megatron external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/dirty_mc4_checkpoints model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852 tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model dataset=(Dataset: en de fr es it nl pt ru zh ko) dataset_path=(Dataset: en=/mnt/data_2/shared/pre-training/tower_llm_data/en/data es=/mnt/data_2/shared/pre-training/tower_llm_data/es/0/0000.json.gz de=/mnt/data_2/shared/pre-training/tower_llm_data/de/0/0000.json.gz fr=/mnt/data_2/shared/pre-training/tower_llm_data/fr/1/0000.json.gz nl=/mnt/data_2/shared/pre-training/tower_llm_data/nl/0000.json.gz pt=/mnt/data_2/shared/pre-training/tower_llm_data/pt/0000.json.gz it=/mnt/data_2/shared/pre-training/tower_llm_data/it/0000.json.gz ru=/mnt/data_2/shared/pre-training/tower_llm_data/ru/0/0000.json.gz zh=/mnt/data_2/shared/pre-training/tower_llm_data/zh/0000.json.gz ko=/mnt/data_2/shared/pre-training/tower_llm_data/ko/0000.json.gz ) is_hf_dataset=(Dataset: en=True es=False de=False fr=False nl=False pt=False it=False ru=False zh=False ko=False ) threshold=(Dataset: en=10000000 es=10000000 de=10000000 fr=10000000 nl=10000000 pt=10000000 it=10000000 ru=10000000 zh=10000000 ko=10000000 ) datamix_weights=( DataMix: mc4_uniform=( Dataset: en=100 es=100 de=100 fr=100 nl=100 pt=100 it=100 ru=100 zh=100 ko=100 ) ) # number such that final tokens for each language are around 1B n_tokens=(Dataset: en=1000000000 es=833333330 de=833333330 fr=833333330 nl=833333330 pt=833333330 it=833333330 ru=500000000 zh=1388888800 ko=250000000 ) min_perplexity=0 size=(Size: 7 13) log_interval=1 save_interval=635 eval_interval=635 train_steps=12700 lr_scheduler=cosine warmup_steps=127 lr=3e-5 lr_min=3e-6 weight_decay=0.1 n_gpus=8 gpu_ids=0,1,2,3,4,5,6,7 tp=(TP: 1 2 3 4) pp=(PP: 1 2 3 4) micro_batch_size=4 grad_accum_steps=12 vocab_size=32000 cpu_workers=16 wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33" wikipedia=False freeze_layers="" posterior_tokens=False n_posterior_tokens=0 eval_iters=1 is_parallel=False lp=(Dataset: en="en" es="es" de="de" fr="fr" nl="nl" pt="pt" it="it" ru="ru" zh="zh" ko="ko" ) }