File size: 9,134 Bytes
a93e458 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 |
global {
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_13B_all_20B
repo=/mnt/data/jpombal/multilinguality_megatron
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_13B_all_20B/mc4_parallel_checkpoints
model_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/dc1d3b3bfdb69df26f8fc966c16353274b138c55
tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/dc1d3b3bfdb69df26f8fc966c16353274b138c55/tokenizer.model
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
dataset_path=(Dataset:
en=/mnt/data_2/shared/tower_llm_data/en/data
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
)
is_hf_dataset=(Dataset:
en=True
es=False
de=False
fr=False
nl=False
pt=False
it=False
ru=False
zh=False
ko=False
en_de=False
de_en=False
en_fr=False
fr_en=False
en_es=False
es_en=False
en_it=False
it_en=False
en_nl=False
nl_en=False
en_pt=False
pt_en=False
en_ru=False
ru_en=False
en_zh=False
zh_en=False
en_ko=False
ko_en=False
)
threshold=(Dataset:
en=516
es=275
de=611
fr=322
nl=649
pt=257
it=332
ru=334
zh=2041
ko=198
en_de=100000
de_en=100000
en_fr=100000
fr_en=100000
en_es=100000
es_en=100000
en_it=100000
it_en=100000
en_nl=100000
nl_en=100000
en_pt=100000
pt_en=100000
en_ru=100000
ru_en=100000
en_zh=100000
zh_en=100000
en_ko=100000
ko_en=100000
)
# rougly 67% for mc4, 33% for total parallel data
datamix_weights=(
DataMix:
mc4_parallel_uniform=(
Dataset:
en=670
es=670
de=670
fr=670
nl=670
pt=670
it=670
ru=670
zh=670
ko=670
en_de=183
de_en=183
en_fr=183
fr_en=183
en_es=183
es_en=183
en_it=183
it_en=183
en_nl=183
nl_en=183
en_pt=183
pt_en=183
en_ru=183
ru_en=183
en_zh=183
zh_en=183
en_ko=183
ko_en=183
)
)
# number such that final tokens for each language are around 1B
n_tokens=(Dataset:
en=1000000000
es=833333330
de=833333330
fr=833333330
nl=833333330
pt=833333330
it=833333330
ru=500000000
zh=13888888
ko=250000000
en_de=20000000
de_en=20000000
en_fr=20000000
fr_en=20000000
en_es=20000000
es_en=20000000
en_it=20000000
it_en=20000000
en_nl=20000000
nl_en=20000000
en_pt=20000000
pt_en=20000000
en_ru=20000000
ru_en=20000000
en_zh=20000000
zh_en=20000000
en_ko=20000000
ko_en=20000000
)
is_parallel=(Dataset:
en=False
es=False
de=False
fr=False
nl=False
pt=False
it=False
ru=False
zh=False
ko=False
en_de=True
de_en=True
en_fr=True
fr_en=True
en_es=True
es_en=True
en_it=True
it_en=True
en_nl=True
nl_en=True
en_pt=True
pt_en=True
en_ru=True
ru_en=True
en_zh=True
zh_en=True
en_ko=True
ko_en=True
)
lp=(Dataset:
en=""
es=""
de=""
fr=""
nl=""
pt=""
it=""
ru=""
zh=""
ko=""
en_de="en-de"
de_en="de-en"
en_fr="en-fr"
fr_en="fr-en"
en_es="en-es"
es_en="es-en"
en_it="en-it"
it_en="it-en"
en_nl="en-nl"
nl_en="nl-en"
en_pt="en-pt"
pt_en="pt-en"
en_ru="en-ru"
ru_en="ru-en"
en_zh="en-zh"
zh_en="zh-en"
en_ko="en-ko"
ko_en="ko-en"
)
min_perplexity=50
size=(Size: 7 13)
log_interval=1
save_interval=635
eval_interval=635
train_steps=12700
lr_scheduler=cosine
warmup_steps=127
lr=3e-5
lr_min=3e-6
weight_decay=0.1
n_gpus=8
gpu_ids=0,1,2,3,4,5,6,7
tp=(TP: 1 2 3 4 5 6 7 8)
pp=(PP: 1 2 3 4)
micro_batch_size=4
grad_accum_steps=12
vocab_size=32000
cpu_workers=16
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33"
wikipedia=False
freeze_layers=""
posterior_tokens=False
n_posterior_tokens=0
eval_iters=1
}
|