File size: 2,814 Bytes
9ef89a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
global {
    ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32
    repo=/mnt/data/jpombal/multilinguality_megatron

    external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32/warmed_up_checkpoints
    # for warmed up models, the model path points to the sharded megatron checkpoint
    model_path=/mnt/data/shared/multilingual_llm/experiments_megatron/warmup_embeddings_llama2_all_1B_extend32/checkpoints
    tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model 

    train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)

    wikipedia=False
    posterior_tokens=False
    n_posterior_tokens=False
    freeze_layers=""
    
    threshold=(TrainLanguage:
                en=516
                es=275
                de=611
                fr=322
                nl=649
                pt=257
                it=332
                ru=334
                zh=2041
                ko=198
            )

    n_tokens=(TrainLanguage:
                en=900000000
                es=900000000
                de=900000000
                fr=900000000
                nl=900000000
                pt=900000000
                it=900000000
                ru=550000000
                zh=20000000
                ko=450000000
            )
    
    dataset_path=(TrainLanguage: 
                en=/mnt/data_2/shared/tower_llm_data/en/data
                es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
                de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
                fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
                nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
                pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz             
                it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
                ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
                zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
                ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
            )

    mix="10 10 10 10 10 10 10 10 10 10"                    

    min_perplexity=50

    size=(Size: 7 13)

    log_interval=10
    save_interval=127
    eval_interval=635
    train_steps=6358
    eval_iters=1
    
    lr_scheduler=cosine
    warmup_steps=63
    lr=3e-5
    lr_min=3e-6
    weight_decay=0.1
    
    n_gpus=8
    gpu_ids=0,1,2,3,4,5,6,7
    tp=(TP: 1 2 3 4)
    pp=(PP: 1 2 3 4)
    micro_batch_size=4
    grad_accum_steps=12
    vocab_size=52620

    cpu_workers=16
    wandb_run_id="NEW_warmed_up_llama2_7B_10b_extend32_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198"
}