KshitijAmbilduke
commited on
Commit
•
9ef89a4
1
Parent(s):
b5cafa1
Upload 382 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- multilinguality_megatron/.gitignore +4 -0
- multilinguality_megatron/LICENSE +376 -0
- multilinguality_megatron/README.md +43 -0
- multilinguality_megatron/__pycache__/finetune.cpython-39.pyc +0 -0
- multilinguality_megatron/ablation_eval_pipeline.sh +18 -0
- multilinguality_megatron/continue_pretraining.sh +185 -0
- multilinguality_megatron/convert2megatron.sh +45 -0
- multilinguality_megatron/cp.sh +10 -0
- multilinguality_megatron/debug.sh +101 -0
- multilinguality_megatron/deploy.sh +53 -0
- multilinguality_megatron/docs/Makefile +20 -0
- multilinguality_megatron/docs/_templates/autosummary/base.rst +5 -0
- multilinguality_megatron/docs/_templates/autosummary/class.rst +9 -0
- multilinguality_megatron/docs/_templates/autosummary/module.rst +29 -0
- multilinguality_megatron/docs/api/index.rst +130 -0
- multilinguality_megatron/docs/conf.py +64 -0
- multilinguality_megatron/docs/guide/faq.md +170 -0
- multilinguality_megatron/docs/guide/getting_started.md +276 -0
- multilinguality_megatron/docs/guide/index.md +10 -0
- multilinguality_megatron/docs/guide/instruction_tuning.md +92 -0
- multilinguality_megatron/docs/guide/tokenization.md +76 -0
- multilinguality_megatron/docs/guide/weights_conversion.md +87 -0
- multilinguality_megatron/docs/imgs/llama-falcon.png +3 -0
- multilinguality_megatron/docs/index.rst +75 -0
- multilinguality_megatron/docs/make.bat +35 -0
- multilinguality_megatron/docs/requirements.txt +11 -0
- multilinguality_megatron/ducttape/10B_all_cleaned.tconf +80 -0
- multilinguality_megatron/ducttape/10B_all_cleaned_13B.tconf +80 -0
- multilinguality_megatron/ducttape/10B_all_cleaned_extend32.tconf +83 -0
- multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmed_up.tconf +84 -0
- multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmup.tconf +93 -0
- multilinguality_megatron/ducttape/10B_all_wikipedia.tconf +83 -0
- multilinguality_megatron/ducttape/20B_all_cleaned_mc4.tconf +113 -0
- multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel.tconf +264 -0
- multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_13b.tconf +264 -0
- multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_concat.tconf +264 -0
- multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_instructions.tconf +271 -0
- multilinguality_megatron/ducttape/20B_all_cleaned_mc4_wiki.tconf +176 -0
- multilinguality_megatron/ducttape/20B_all_cleaned_parallel.tconf +194 -0
- multilinguality_megatron/ducttape/20B_all_dirty_mc4.tconf +124 -0
- multilinguality_megatron/ducttape/40B_all_cleaned_mc4_parallel.tconf +305 -0
- multilinguality_megatron/ducttape/continue_pretraining.tconf +77 -0
- multilinguality_megatron/ducttape/data_test.tconf +79 -0
- multilinguality_megatron/ducttape/data_test_extend32.tconf +79 -0
- multilinguality_megatron/ducttape/gemma_2B_20B_all_cleaned_mc4_parallel.tconf +280 -0
- multilinguality_megatron/ducttape/gemma_2b_flavio.tconf +546 -0
- multilinguality_megatron/ducttape/gemma_7B_20B_all_cleaned_mc4_parallel.tconf +280 -0
- multilinguality_megatron/ducttape/llama_3_flavio.tconf +546 -0
- multilinguality_megatron/ducttape/llama_3_flavio_wmt_annealing.tconf +570 -0
.gitattributes
CHANGED
@@ -54,3 +54,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
data_text_document.idx filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
data_text_document.idx filter=lfs diff=lfs merge=lfs -text
|
57 |
+
multilinguality_megatron/megatron/fused_kernels/build/fused_mix_prec_layer_norm_cuda.so filter=lfs diff=lfs merge=lfs -text
|
58 |
+
multilinguality_megatron/megatron/fused_kernels/build/layer_norm_cuda_kernel.cuda.o filter=lfs diff=lfs merge=lfs -text
|
multilinguality_megatron/.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
build
|
3 |
+
.vscode
|
4 |
+
perplexity_texts
|
multilinguality_megatron/LICENSE
ADDED
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The following applies to all files unless otherwise noted:
|
2 |
+
|
3 |
+
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Redistribution and use in source and binary forms, with or without
|
6 |
+
# modification, are permitted provided that the following conditions
|
7 |
+
# are met:
|
8 |
+
# * Redistributions of source code must retain the above copyright
|
9 |
+
# notice, this list of conditions and the following disclaimer.
|
10 |
+
# * Redistributions in binary form must reproduce the above copyright
|
11 |
+
# notice, this list of conditions and the following disclaimer in the
|
12 |
+
# documentation and/or other materials provided with the distribution.
|
13 |
+
# * Neither the name of NVIDIA CORPORATION nor the names of its
|
14 |
+
# contributors may be used to endorse or promote products derived
|
15 |
+
# from this software without specific prior written permission.
|
16 |
+
#
|
17 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
|
18 |
+
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
19 |
+
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
20 |
+
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
21 |
+
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
22 |
+
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
23 |
+
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
24 |
+
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
25 |
+
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
26 |
+
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
27 |
+
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
28 |
+
|
29 |
+
--
|
30 |
+
|
31 |
+
This repository also contains code from Hugging Face Inc., Google Research,
|
32 |
+
Facebook (from their Fairseq and Dino projects), Microsoft(from their
|
33 |
+
Swin-Transformer project)and Philip Popien. Files from these
|
34 |
+
organizations have notices at the top of each file. Below are
|
35 |
+
licenses used in those files, as indicated.
|
36 |
+
|
37 |
+
|
38 |
+
------------- LICENSE FOR Facebook, huggingface and Google Research code --------------
|
39 |
+
|
40 |
+
|
41 |
+
Apache License
|
42 |
+
Version 2.0, January 2004
|
43 |
+
http://www.apache.org/licenses/
|
44 |
+
|
45 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
46 |
+
|
47 |
+
1. Definitions.
|
48 |
+
|
49 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
50 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
51 |
+
|
52 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
53 |
+
the copyright owner that is granting the License.
|
54 |
+
|
55 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
56 |
+
other entities that control, are controlled by, or are under common
|
57 |
+
control with that entity. For the purposes of this definition,
|
58 |
+
"control" means (i) the power, direct or indirect, to cause the
|
59 |
+
direction or management of such entity, whether by contract or
|
60 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
61 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
62 |
+
|
63 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
64 |
+
exercising permissions granted by this License.
|
65 |
+
|
66 |
+
"Source" form shall mean the preferred form for making modifications,
|
67 |
+
including but not limited to software source code, documentation
|
68 |
+
source, and configuration files.
|
69 |
+
|
70 |
+
"Object" form shall mean any form resulting from mechanical
|
71 |
+
transformation or translation of a Source form, including but
|
72 |
+
not limited to compiled object code, generated documentation,
|
73 |
+
and conversions to other media types.
|
74 |
+
|
75 |
+
"Work" shall mean the work of authorship, whether in Source or
|
76 |
+
Object form, made available under the License, as indicated by a
|
77 |
+
copyright notice that is included in or attached to the work
|
78 |
+
(an example is provided in the Appendix below).
|
79 |
+
|
80 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
81 |
+
form, that is based on (or derived from) the Work and for which the
|
82 |
+
editorial revisions, annotations, elaborations, or other modifications
|
83 |
+
represent, as a whole, an original work of authorship. For the purposes
|
84 |
+
of this License, Derivative Works shall not include works that remain
|
85 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
86 |
+
the Work and Derivative Works thereof.
|
87 |
+
|
88 |
+
"Contribution" shall mean any work of authorship, including
|
89 |
+
the original version of the Work and any modifications or additions
|
90 |
+
to that Work or Derivative Works thereof, that is intentionally
|
91 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
92 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
93 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
94 |
+
means any form of electronic, verbal, or written communication sent
|
95 |
+
to the Licensor or its representatives, including but not limited to
|
96 |
+
communication on electronic mailing lists, source code control systems,
|
97 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
98 |
+
Licensor for the purpose of discussing and improving the Work, but
|
99 |
+
excluding communication that is conspicuously marked or otherwise
|
100 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
101 |
+
|
102 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
103 |
+
on behalf of whom a Contribution has been received by Licensor and
|
104 |
+
subsequently incorporated within the Work.
|
105 |
+
|
106 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
107 |
+
this License, each Contributor hereby grants to You a perpetual,
|
108 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
109 |
+
copyright license to reproduce, prepare Derivative Works of,
|
110 |
+
publicly display, publicly perform, sublicense, and distribute the
|
111 |
+
Work and such Derivative Works in Source or Object form.
|
112 |
+
|
113 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
114 |
+
this License, each Contributor hereby grants to You a perpetual,
|
115 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
116 |
+
(except as stated in this section) patent license to make, have made,
|
117 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
118 |
+
where such license applies only to those patent claims licensable
|
119 |
+
by such Contributor that are necessarily infringed by their
|
120 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
121 |
+
with the Work to which such Contribution(s) was submitted. If You
|
122 |
+
institute patent litigation against any entity (including a
|
123 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
124 |
+
or a Contribution incorporated within the Work constitutes direct
|
125 |
+
or contributory patent infringement, then any patent licenses
|
126 |
+
granted to You under this License for that Work shall terminate
|
127 |
+
as of the date such litigation is filed.
|
128 |
+
|
129 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
130 |
+
Work or Derivative Works thereof in any medium, with or without
|
131 |
+
modifications, and in Source or Object form, provided that You
|
132 |
+
meet the following conditions:
|
133 |
+
|
134 |
+
(a) You must give any other recipients of the Work or
|
135 |
+
Derivative Works a copy of this License; and
|
136 |
+
|
137 |
+
(b) You must cause any modified files to carry prominent notices
|
138 |
+
stating that You changed the files; and
|
139 |
+
|
140 |
+
(c) You must retain, in the Source form of any Derivative Works
|
141 |
+
that You distribute, all copyright, patent, trademark, and
|
142 |
+
attribution notices from the Source form of the Work,
|
143 |
+
excluding those notices that do not pertain to any part of
|
144 |
+
the Derivative Works; and
|
145 |
+
|
146 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
147 |
+
distribution, then any Derivative Works that You distribute must
|
148 |
+
include a readable copy of the attribution notices contained
|
149 |
+
within such NOTICE file, excluding those notices that do not
|
150 |
+
pertain to any part of the Derivative Works, in at least one
|
151 |
+
of the following places: within a NOTICE text file distributed
|
152 |
+
as part of the Derivative Works; within the Source form or
|
153 |
+
documentation, if provided along with the Derivative Works; or,
|
154 |
+
within a display generated by the Derivative Works, if and
|
155 |
+
wherever such third-party notices normally appear. The contents
|
156 |
+
of the NOTICE file are for informational purposes only and
|
157 |
+
do not modify the License. You may add Your own attribution
|
158 |
+
notices within Derivative Works that You distribute, alongside
|
159 |
+
or as an addendum to the NOTICE text from the Work, provided
|
160 |
+
that such additional attribution notices cannot be construed
|
161 |
+
as modifying the License.
|
162 |
+
|
163 |
+
You may add Your own copyright statement to Your modifications and
|
164 |
+
may provide additional or different license terms and conditions
|
165 |
+
for use, reproduction, or distribution of Your modifications, or
|
166 |
+
for any such Derivative Works as a whole, provided Your use,
|
167 |
+
reproduction, and distribution of the Work otherwise complies with
|
168 |
+
the conditions stated in this License.
|
169 |
+
|
170 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
171 |
+
any Contribution intentionally submitted for inclusion in the Work
|
172 |
+
by You to the Licensor shall be under the terms and conditions of
|
173 |
+
this License, without any additional terms or conditions.
|
174 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
175 |
+
the terms of any separate license agreement you may have executed
|
176 |
+
with Licensor regarding such Contributions.
|
177 |
+
|
178 |
+
6. Trademarks. This License does not grant permission to use the trade
|
179 |
+
names, trademarks, service marks, or product names of the Licensor,
|
180 |
+
except as required for reasonable and customary use in describing the
|
181 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
182 |
+
|
183 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
184 |
+
agreed to in writing, Licensor provides the Work (and each
|
185 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
186 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
187 |
+
implied, including, without limitation, any warranties or conditions
|
188 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
189 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
190 |
+
appropriateness of using or redistributing the Work and assume any
|
191 |
+
risks associated with Your exercise of permissions under this License.
|
192 |
+
|
193 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
194 |
+
whether in tort (including negligence), contract, or otherwise,
|
195 |
+
unless required by applicable law (such as deliberate and grossly
|
196 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
197 |
+
liable to You for damages, including any direct, indirect, special,
|
198 |
+
incidental, or consequential damages of any character arising as a
|
199 |
+
result of this License or out of the use or inability to use the
|
200 |
+
Work (including but not limited to damages for loss of goodwill,
|
201 |
+
work stoppage, computer failure or malfunction, or any and all
|
202 |
+
other commercial damages or losses), even if such Contributor
|
203 |
+
has been advised of the possibility of such damages.
|
204 |
+
|
205 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
206 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
207 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
208 |
+
or other liability obligations and/or rights consistent with this
|
209 |
+
License. However, in accepting such obligations, You may act only
|
210 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
211 |
+
of any other Contributor, and only if You agree to indemnify,
|
212 |
+
defend, and hold each Contributor harmless for any liability
|
213 |
+
incurred by, or claims asserted against, such Contributor by reason
|
214 |
+
of your accepting any such warranty or additional liability.
|
215 |
+
|
216 |
+
END OF TERMS AND CONDITIONS
|
217 |
+
|
218 |
+
APPENDIX: How to apply the Apache License to your work.
|
219 |
+
|
220 |
+
To apply the Apache License to your work, attach the following
|
221 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
222 |
+
replaced with your own identifying information. (Don't include
|
223 |
+
the brackets!) The text should be enclosed in the appropriate
|
224 |
+
comment syntax for the file format. We also recommend that a
|
225 |
+
file or class name and description of purpose be included on the
|
226 |
+
same "printed page" as the copyright notice for easier
|
227 |
+
identification within third-party archives.
|
228 |
+
|
229 |
+
Copyright [yyyy] [name of copyright owner]
|
230 |
+
|
231 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
232 |
+
you may not use this file except in compliance with the License.
|
233 |
+
You may obtain a copy of the License at
|
234 |
+
|
235 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
236 |
+
|
237 |
+
Unless required by applicable law or agreed to in writing, software
|
238 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
239 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
240 |
+
See the License for the specific language governing permissions and
|
241 |
+
limitations under the License.
|
242 |
+
|
243 |
+
------------- LICENSE FOR Facebook Fairseq code --------------
|
244 |
+
|
245 |
+
MIT License
|
246 |
+
|
247 |
+
Copyright (c) Facebook, Inc. and its affiliates.
|
248 |
+
|
249 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
250 |
+
of this software and associated documentation files (the "Software"), to deal
|
251 |
+
in the Software without restriction, including without limitation the rights
|
252 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
253 |
+
copies of the Software, and to permit persons to whom the Software is
|
254 |
+
furnished to do so, subject to the following conditions:
|
255 |
+
|
256 |
+
The above copyright notice and this permission notice shall be included in all
|
257 |
+
copies or substantial portions of the Software.
|
258 |
+
|
259 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
260 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
261 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
262 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
263 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
264 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
265 |
+
SOFTWARE.
|
266 |
+
|
267 |
+
------------- LICENSE FOR Mircrosoft Swin transformer code --------------
|
268 |
+
|
269 |
+
MIT License
|
270 |
+
|
271 |
+
Copyright (c) Microsoft Corporation.
|
272 |
+
|
273 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
274 |
+
of this software and associated documentation files (the "Software"), to deal
|
275 |
+
in the Software without restriction, including without limitation the rights
|
276 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
277 |
+
copies of the Software, and to permit persons to whom the Software is
|
278 |
+
furnished to do so, subject to the following conditions:
|
279 |
+
|
280 |
+
The above copyright notice and this permission notice shall be included in all
|
281 |
+
copies or substantial portions of the Software.
|
282 |
+
|
283 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
284 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
285 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
286 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
287 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
288 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
289 |
+
SOFTWARE
|
290 |
+
|
291 |
+
|
292 |
+
--------------- NVIDIA Source Code License for SegFormer -----------------
|
293 |
+
1. Definitions
|
294 |
+
|
295 |
+
“Licensor” means any person or entity that distributes its Work.
|
296 |
+
|
297 |
+
“Software” means the original work of authorship made available under this
|
298 |
+
License.
|
299 |
+
|
300 |
+
“Work” means the Software and any additions to or derivative works of the
|
301 |
+
Software that are made available under this License.
|
302 |
+
|
303 |
+
The terms “reproduce,” “reproduction,” “derivative works,” and
|
304 |
+
“distribution” have the meaning as provided under U.S. copyright law;
|
305 |
+
provided, however, that for the purposes of this License, derivative works
|
306 |
+
shall not include works that remain separable from, or merely link
|
307 |
+
(or bind by name) to the interfaces of, the Work.
|
308 |
+
|
309 |
+
Works, including the Software, are “made available” under this License by
|
310 |
+
including in or with the Work either (a) a copyright notice referencing
|
311 |
+
the applicability of this License to the Work, or (b) a copy of this License.
|
312 |
+
|
313 |
+
2. License Grant
|
314 |
+
|
315 |
+
2.1 Copyright Grant. Subject to the terms and conditions of this License,
|
316 |
+
each Licensor grants to you a perpetual, worldwide, non-exclusive,
|
317 |
+
royalty-free, copyright license to reproduce, prepare derivative works of,
|
318 |
+
publicly display, publicly perform, sublicense and distribute its Work
|
319 |
+
and any resulting derivative works in any form.
|
320 |
+
|
321 |
+
3. Limitations
|
322 |
+
|
323 |
+
3.1 Redistribution. You may reproduce or distribute the Work only if
|
324 |
+
(a) you do so under this License, (b) you include a complete copy of this
|
325 |
+
License with your distribution, and (c) you retain without modification any
|
326 |
+
copyright, patent, trademark, or attribution notices that are present
|
327 |
+
in the Work.
|
328 |
+
|
329 |
+
3.2 Derivative Works. You may specify that additional or different terms
|
330 |
+
apply to the use, reproduction, and distribution of your derivative works
|
331 |
+
of the Work (“Your Terms”) only if (a) Your Terms provide that the use
|
332 |
+
limitation in Section 3.3 applies to your derivative works, and (b) you
|
333 |
+
identify the specific derivative works that are subject to Your Terms.
|
334 |
+
Notwithstanding Your Terms, this License (including the redistribution
|
335 |
+
requirements in Section 3.1) will continue to apply to the Work itself.
|
336 |
+
|
337 |
+
3.3 Use Limitation. The Work and any derivative works thereof only may
|
338 |
+
be used or intended for use non-commercially. Notwithstanding the
|
339 |
+
foregoing, NVIDIA and its affiliates may use the Work and any derivative
|
340 |
+
works commercially. As used herein, “non-commercially” means for research
|
341 |
+
or evaluation purposes only.
|
342 |
+
|
343 |
+
3.4 Patent Claims. If you bring or threaten to bring a patent claim against
|
344 |
+
any Licensor (including any claim, cross-claim or counterclaim in a lawsuit)
|
345 |
+
to enforce any patents that you allege are infringed by any Work, then
|
346 |
+
your rights under this License from such Licensor (including the grant
|
347 |
+
in Section 2.1) will terminate immediately.
|
348 |
+
|
349 |
+
3.5 Trademarks. This License does not grant any rights to use any Licensor’s
|
350 |
+
or its affiliates’ names, logos, or trademarks, except as necessary to
|
351 |
+
reproduce the notices described in this License.
|
352 |
+
|
353 |
+
3.6 Termination. If you violate any term of this License, then your rights
|
354 |
+
under this License (including the grant in Section 2.1) will terminate
|
355 |
+
immediately.
|
356 |
+
|
357 |
+
4. Disclaimer of Warranty.
|
358 |
+
|
359 |
+
THE WORK IS PROVIDED “AS IS” WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
360 |
+
EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
|
361 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT.
|
362 |
+
YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE.
|
363 |
+
|
364 |
+
5. Limitation of Liability.
|
365 |
+
|
366 |
+
EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
|
367 |
+
THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
|
368 |
+
SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
|
369 |
+
INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
|
370 |
+
OF OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
|
371 |
+
(INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
|
372 |
+
LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
|
373 |
+
COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN
|
374 |
+
ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
375 |
+
|
376 |
+
|
multilinguality_megatron/README.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Installation Instructions
|
2 |
+
|
3 |
+
As a pre-requisite, make sure you have [ducttape](https://github.com/CoderPat/ducttape) and [(mini)conda](https://docs.conda.io/en/latest/miniconda.html) installed.
|
4 |
+
|
5 |
+
First, clone this repository.
|
6 |
+
|
7 |
+
Then, to create a new conda environment with all the necessary dependencies, run the following command:
|
8 |
+
|
9 |
+
```bash
|
10 |
+
export CONDA_HOME="/path/to/(mini)conda3"
|
11 |
+
bash setup/conda.sh
|
12 |
+
```
|
13 |
+
|
14 |
+
# Training
|
15 |
+
|
16 |
+
## Data format
|
17 |
+
|
18 |
+
Before training, you must preprocess the training data. Before preprocessing, the data should be a `json` file, with the following format:
|
19 |
+
```json
|
20 |
+
{"text": "<instance_0_text>"}
|
21 |
+
{"text": "<instance_1_text>"}
|
22 |
+
```
|
23 |
+
Note that the preprocessing script will pack observations together in vectors of a specified length, and will separate each instance (json line) by the tokenizer's EOS token.
|
24 |
+
|
25 |
+
Then, run the bash scripts in this order:
|
26 |
+
|
27 |
+
```bash
|
28 |
+
./preprocess_data.sh [OPTIONS]
|
29 |
+
./convert2megatron.sh [OPTIONS]
|
30 |
+
./model_sharding.sh [OPTIONS]
|
31 |
+
./continue_pretraining.sh [OPTIONS]
|
32 |
+
```
|
33 |
+
>NOTE: each of these commands may be run with flag `--help`, which will inform the user on how to use each argument.
|
34 |
+
|
35 |
+
For example, for a continued pretraining run with Llama 2 7B on datasets `d1` and `d2` and 8 GPUs, run the following:
|
36 |
+
|
37 |
+
```bash
|
38 |
+
> ./preprocess_data.sh --dataset_json=<path_to_d1> --dataset_bin=<d1_output_path> --vocab_file=<path_to_hf_model>/tokenizer.model --repo=<path_to_repo>
|
39 |
+
> ./preprocess_data.sh --dataset_json=<path_to_d2> --dataset_bin=<d2_output_path> --vocab_file=<path_to_hf_model>/tokenizer.model --repo=<path_to_repo>
|
40 |
+
> ./convert2megatron.sh --megatron_model=<megatron_model_path> --model_path=<path_to_hf_model> --size=7 --repo=<path_to_repo>
|
41 |
+
> ./model_sharding.sh --megatron_model=<megatron_model_path> --sharded_model=<sharded_model_path> --tp=8 --pp=1 --vocab_size=32000 --repo=<path_to_repo>
|
42 |
+
> ./continue_pretraining.sh --data_path="1 d1 1 d2" --megatron_model=<sharded_model_path> --model_dir=<checkpoint_save_dir> --tokenizer_path=<path_to_hf_model>/tokenizer.model --tp=8 --pp=1 [TRAINING_ARGS]
|
43 |
+
```
|
multilinguality_megatron/__pycache__/finetune.cpython-39.pyc
ADDED
Binary file (6.74 kB). View file
|
|
multilinguality_megatron/ablation_eval_pipeline.sh
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# bash script to evaluate a given model on wmt23, flores, ape, gec, standard benchmarks, and perplexity, sequentially
|
2 |
+
|
3 |
+
# wmt23, flores, ape, gec, standard benchmarks use tower-eval
|
4 |
+
TOWER_EVAL_DIR=/mnt/data/jpombal/tower-eval
|
5 |
+
cd $TOWER_EVAL_DIR
|
6 |
+
source $TOWER_EVAL_DIR/tower-eval-env/bin/activate
|
7 |
+
|
8 |
+
CUDA_VISIBLE_DEVICES=0 python $TOWER_EVAL_DIR/tower_eval/cli.py lm_eval --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/std_bench.yaml &
|
9 |
+
CUDA_VISIBLE_DEVICES=1 python $TOWER_EVAL_DIR/tower_eval/cli.py gen-eval --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/mt.yaml &
|
10 |
+
CUDA_VISIBLE_DEVICES=2 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_flavio_final.yaml &
|
11 |
+
#CUDA_VISIBLE_DEVICES=3 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_flaviarlos_sft.yaml &
|
12 |
+
#CUDA_VISIBLE_DEVICES=4 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_carlos_no_mt_annealed_sft.yaml
|
13 |
+
# CUDA_VISIBLE_DEVICES=2 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_porfirio_pre_annealing.yaml &
|
14 |
+
# CUDA_VISIBLE_DEVICES=3 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_porfirio_sft.yaml &
|
15 |
+
# CUDA_VISIBLE_DEVICES=4 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_carlos_sft.yaml &
|
16 |
+
# CUDA_VISIBLE_DEVICES=5 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_carlos_annealed_sft.yaml &
|
17 |
+
# CUDA_VISIBLE_DEVICES=6 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_flavio_sft.yaml &
|
18 |
+
# CUDA_VISIBLE_DEVICES=7 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_porfirio_annealed.yaml &
|
multilinguality_megatron/continue_pretraining.sh
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This script will try to run a task *outside* any specified submitter
|
2 |
+
# Note: This script is for archival; it is not actually run by ducttape
|
3 |
+
# unset CUDA_VISIBLE_DEVICES
|
4 |
+
echo $CUDA_VISIBLE_DEVICES
|
5 |
+
|
6 |
+
data_path="1 spgi_vox_mls_text_1b/data/data_text_document"
|
7 |
+
megatron_model="spgi_vox_mls_text_1b/shards"
|
8 |
+
model_dir="spgi_vox_mls_text_1b/ckpt"
|
9 |
+
tokenizer_path="spgi_vox_mls_text_1b/new_extended_tokenizer/tokenizer.model"
|
10 |
+
tp="2"
|
11 |
+
pp="1"
|
12 |
+
|
13 |
+
# --wandb_logger \
|
14 |
+
# --wandb_id "hajmola" \
|
15 |
+
# --wandb_project "Megatron" \
|
16 |
+
# --wandb_entity "hajmola" \
|
17 |
+
# --wandb_api_key "c4a95af43e910d14b0eca23fbb8165f94944d5af" \
|
18 |
+
|
19 |
+
# optimization arguments; self-explanatory. Intervals and steps are in terms of training optimizer steps
|
20 |
+
grad_accum_steps="12"
|
21 |
+
micro_batch_size="12"
|
22 |
+
warmup_steps="13"
|
23 |
+
eval_interval="500"
|
24 |
+
lr="3e-5" #lr="3e-5"
|
25 |
+
log_interval="10"
|
26 |
+
lr_min="3e-6" #lr_min="3e-6"
|
27 |
+
lr_scheduler="cosine"
|
28 |
+
|
29 |
+
# infra arguments
|
30 |
+
save_interval="250"
|
31 |
+
n_gpus="2"
|
32 |
+
repo="multilinguality_megatron"
|
33 |
+
gpu_ids="4,5"
|
34 |
+
train_steps="1000"
|
35 |
+
|
36 |
+
|
37 |
+
# Parse command-line arguments
|
38 |
+
for arg in "$@"
|
39 |
+
do
|
40 |
+
case $arg in
|
41 |
+
--help)
|
42 |
+
echo "Usage: ./script.sh [OPTIONS]"
|
43 |
+
echo "Options:"
|
44 |
+
echo " --data_path=PATH Path to dataset. Should have the form of <integer_0> <PATH_TO_DATA_TEXT_DOCUMENT_0> <integer_1> <PATH_TO_DATA_TEXT_DOCUMENT_1> ..., where the integers determine the data's relative weight in the training set. If every integer is equal, then the data is uniformly sampled."
|
45 |
+
echo " --megatron_model=PATH Path to sharded megatron model"
|
46 |
+
echo " --model_dir=PATH folder to save model checkpoints; if this has a checkpoint, it will be used to continue training"
|
47 |
+
echo " --tokenizer_path=PATH Path to tokenizer.model of original HF model"
|
48 |
+
echo " --tp=NUMBER Number of shards model is divided in"
|
49 |
+
echo " --pp=NUMBER Pipeline parallel (default is 1)"
|
50 |
+
echo " --grad_accum_steps=NUMBER"
|
51 |
+
echo " Number of gradient accumulation steps"
|
52 |
+
echo " --micro_batch_size=NUMBER"
|
53 |
+
echo " Micro batch size"
|
54 |
+
echo " --warmup_steps=NUMBER Number of warmup steps"
|
55 |
+
echo " --eval_interval=NUMBER Number of steps between validations"
|
56 |
+
echo " --lr=NUMBER Learning rate"
|
57 |
+
echo " --log_interval=NUMBER Number of steps between logging"
|
58 |
+
echo " --lr_min=NUMBER Minimum learning rate of scheduler"
|
59 |
+
echo " --lr_scheduler=STRING Learning rate scheduler"
|
60 |
+
echo " --save_interval=NUMBER Number of steps between saves"
|
61 |
+
echo " --n_gpus=NUMBER Number of GPUs to use"
|
62 |
+
echo " --repo=PATH Path to repo"
|
63 |
+
echo " --gpu_ids=STRING GPU IDs to use"
|
64 |
+
echo " --train_steps=NUMBER Number of training steps"
|
65 |
+
exit 0
|
66 |
+
;;
|
67 |
+
--data_path=*)
|
68 |
+
data_path="${arg#*=}"
|
69 |
+
shift
|
70 |
+
;;
|
71 |
+
--megatron_model=*)
|
72 |
+
megatron_model="${arg#*=}"
|
73 |
+
shift
|
74 |
+
;;
|
75 |
+
--model_dir=*)
|
76 |
+
model_dir="${arg#*=}"
|
77 |
+
shift
|
78 |
+
;;
|
79 |
+
--tokenizer_path=*)
|
80 |
+
tokenizer_path="${arg#*=}"
|
81 |
+
shift
|
82 |
+
;;
|
83 |
+
--tp=*)
|
84 |
+
tp="${arg#*=}"
|
85 |
+
shift
|
86 |
+
;;
|
87 |
+
--pp=*)
|
88 |
+
pp="${arg#*=}"
|
89 |
+
shift
|
90 |
+
;;
|
91 |
+
--grad_accum_steps=*)
|
92 |
+
grad_accum_steps="${arg#*=}"
|
93 |
+
shift
|
94 |
+
;;
|
95 |
+
--micro_batch_size=*)
|
96 |
+
micro_batch_size="${arg#*=}"
|
97 |
+
shift
|
98 |
+
;;
|
99 |
+
--warmup_steps=*)
|
100 |
+
warmup_steps="${arg#*=}"
|
101 |
+
shift
|
102 |
+
;;
|
103 |
+
--eval_interval=*)
|
104 |
+
eval_interval="${arg#*=}"
|
105 |
+
shift
|
106 |
+
;;
|
107 |
+
--lr=*)
|
108 |
+
lr="${arg#*=}"
|
109 |
+
shift
|
110 |
+
;;
|
111 |
+
--log_interval=*)
|
112 |
+
log_interval="${arg#*=}"
|
113 |
+
shift
|
114 |
+
;;
|
115 |
+
--lr_min=*)
|
116 |
+
lr_min="${arg#*=}"
|
117 |
+
shift
|
118 |
+
;;
|
119 |
+
--lr_scheduler=*)
|
120 |
+
lr_scheduler="${arg#*=}"
|
121 |
+
shift
|
122 |
+
;;
|
123 |
+
--save_interval=*)
|
124 |
+
save_interval="${arg#*=}"
|
125 |
+
shift
|
126 |
+
;;
|
127 |
+
--n_gpus=*)
|
128 |
+
n_gpus="${arg#*=}"
|
129 |
+
shift
|
130 |
+
;;
|
131 |
+
--repo=*)
|
132 |
+
repo="${arg#*=}"
|
133 |
+
shift
|
134 |
+
;;
|
135 |
+
--gpu_ids=*)
|
136 |
+
gpu_ids="${arg#*=}"
|
137 |
+
shift
|
138 |
+
;;
|
139 |
+
--train_steps=*)
|
140 |
+
train_steps="${arg#*=}"
|
141 |
+
shift
|
142 |
+
;;
|
143 |
+
esac
|
144 |
+
done
|
145 |
+
|
146 |
+
# CUDA_VISIBLE_DEVICES=$gpu_ids
|
147 |
+
|
148 |
+
if [ "$model_dir" != "" ]; then
|
149 |
+
mkdir -p $model_dir
|
150 |
+
mkdir -p $model_dir/runs
|
151 |
+
fi
|
152 |
+
|
153 |
+
ckpt_flag=$model_dir/latest_checkpointed_iteration.txt
|
154 |
+
if [ -f $ckpt_flag ]; then
|
155 |
+
megatron_model=$model_dir
|
156 |
+
echo Loading from previously saved checkpoint.
|
157 |
+
fi
|
158 |
+
|
159 |
+
global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps))
|
160 |
+
|
161 |
+
LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval"
|
162 |
+
TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min"
|
163 |
+
DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 50000"
|
164 |
+
COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion"
|
165 |
+
LLAMA_ARGS="--use_rms_norm --glu_activation swiglu --no_tie_embed_logits --no_new_tokens --layernorm_epsilon 1e-5"
|
166 |
+
CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \
|
167 |
+
--tensor_model_parallel_size $tp \
|
168 |
+
--pipeline_model_parallel_size $pp \
|
169 |
+
--load $megatron_model \
|
170 |
+
--save $model_dir \
|
171 |
+
--tensorboard_dir $model_dir/runs \
|
172 |
+
--data_path $data_path \
|
173 |
+
--model_name llama \
|
174 |
+
--tokenizer_type SentencePieceTokenizer \
|
175 |
+
--vocab_file=$tokenizer_path \
|
176 |
+
--bf16 \
|
177 |
+
--use_flash_attn \
|
178 |
+
--micro_batch_size $micro_batch_size \
|
179 |
+
--global_batch_size $global_batch_size \
|
180 |
+
--sequence_parallel \
|
181 |
+
--recompute_granularity selective \
|
182 |
+
--use_checkpoint_args \
|
183 |
+
--seq_length 2048 \
|
184 |
+
--split 99,1,1 \
|
185 |
+
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS
|
multilinguality_megatron/convert2megatron.sh
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
megatron_model="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/megatron_model"
|
4 |
+
model_path="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/extended_non_uniform_model"
|
5 |
+
size="1"
|
6 |
+
repo="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron"
|
7 |
+
|
8 |
+
# Parse command-line arguments
|
9 |
+
for arg in "$@"
|
10 |
+
do
|
11 |
+
case $arg in
|
12 |
+
--help)
|
13 |
+
echo "Usage: ./script.sh [OPTIONS]"
|
14 |
+
echo "Options:"
|
15 |
+
echo " --megatron_model=PATH Path to save converted model."
|
16 |
+
echo " --model_path=PATH Path of HF directory of model to be converted."
|
17 |
+
echo " --size=NUMBER Billion parameters of model."
|
18 |
+
echo " --repo=PATH Path to repo."
|
19 |
+
exit 0
|
20 |
+
;;
|
21 |
+
--megatron_model=*)
|
22 |
+
megatron_model="${arg#*=}"
|
23 |
+
shift
|
24 |
+
;;
|
25 |
+
--model_path=*)
|
26 |
+
model_path="${arg#*=}"
|
27 |
+
shift
|
28 |
+
;;
|
29 |
+
--size=*)
|
30 |
+
size="${arg#*=}"
|
31 |
+
shift
|
32 |
+
;;
|
33 |
+
--repo=*)
|
34 |
+
repo="${arg#*=}"
|
35 |
+
shift
|
36 |
+
;;
|
37 |
+
esac
|
38 |
+
done
|
39 |
+
|
40 |
+
# Run the Python script
|
41 |
+
python $repo/weights_conversion/hf_to_megatron.py llama \
|
42 |
+
--size=$size \
|
43 |
+
--out=$megatron_model \
|
44 |
+
--cache-dir=$model_path \
|
45 |
+
--model-path=$model_path
|
multilinguality_megatron/cp.sh
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langs=(en de es fr it pt nl ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
|
2 |
+
|
3 |
+
for lang in ${langs[@]}; do
|
4 |
+
mkdir -p /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}
|
5 |
+
echo "0" > /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ducttape_exit_code.txt
|
6 |
+
touch /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ducttape_stderr.txt
|
7 |
+
touch /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ducttape_stdout.txt
|
8 |
+
touch /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ducttape_task.sh
|
9 |
+
cp /mnt/cephfs-nvme/shared/tower-base-training-data/${lang}/dataset.json /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ &
|
10 |
+
done
|
multilinguality_megatron/debug.sh
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export dataset_bin="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_fr_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_fr/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Baseline.baseline/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_pt/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_es/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_de/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_de_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ko_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_zh/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_it/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_nl/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_zh_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.instructions/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_pt_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ru/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_it_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ru_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_es_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_nl_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ko/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_en/data_bin"
|
2 |
+
export datamix_file="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_fr_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_fr/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Baseline.baseline/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_pt/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_es/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_de/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_de_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ko_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_zh/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_it/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_nl/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_zh_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.instructions/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_pt_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ru/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_it_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ru_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_es_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_nl_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ko/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_en/datamix_file"
|
3 |
+
export megatron_model="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/ModelSharding/PP.1+Size.1+TP.1/sharded_model"
|
4 |
+
export model_dir="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/doc_attn_tests"
|
5 |
+
export seq_length="2048"
|
6 |
+
export tp="1"
|
7 |
+
export warmup_steps="32"
|
8 |
+
export micro_batch_size="24"
|
9 |
+
export grad_accum_steps="4"
|
10 |
+
export kv_channels=""
|
11 |
+
export weight_decay="0.1"
|
12 |
+
export external_model_dir="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/doc_attn_tests"
|
13 |
+
export lr="3e-5"
|
14 |
+
export eval_interval="635"
|
15 |
+
export layernorm_epsilon="1e-5"
|
16 |
+
export log_interval="1"
|
17 |
+
export freeze_layers=""
|
18 |
+
export glu_activation="swiglu"
|
19 |
+
export eval_iters="1"
|
20 |
+
export lr_min="3e-6"
|
21 |
+
export pp="1"
|
22 |
+
export model_type="llama2"
|
23 |
+
export lr_scheduler="constant"
|
24 |
+
export tokenizer_path="/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574"
|
25 |
+
export save_interval="635"
|
26 |
+
export n_gpus="1"
|
27 |
+
export repo="/mnt/data/jpombal/multilinguality_megatron"
|
28 |
+
export gpu_ids="0"
|
29 |
+
export tokenizer_type="PretrainedFromHF"
|
30 |
+
export train_steps="11430"
|
31 |
+
|
32 |
+
external_model_dir="${external_model_dir}_${lr}"
|
33 |
+
if [ "$external_model_dir" != "" ]; then
|
34 |
+
mkdir -p $external_model_dir
|
35 |
+
mkdir -p $external_model_dir/runs
|
36 |
+
ln -s $external_model_dir $model_dir
|
37 |
+
fi
|
38 |
+
|
39 |
+
data_path=""
|
40 |
+
for f in $datamix_file; do
|
41 |
+
# read file
|
42 |
+
data_path="$data_path `cat $f`"
|
43 |
+
done
|
44 |
+
echo "Running with data_path=$data_path"
|
45 |
+
|
46 |
+
FREEZE_ARGS=""
|
47 |
+
if [ "$freeze_layers" == "not_embeddings" ]; then
|
48 |
+
FREEZE_ARGS="--freeze_layers"
|
49 |
+
fi
|
50 |
+
echo $FREEZE_ARGS
|
51 |
+
|
52 |
+
export CUDA_VISIBLE_DEVICES=$gpu_ids
|
53 |
+
|
54 |
+
# if load_from_checkpoint, then set megatron_model to external_model_dir
|
55 |
+
ckpt_flag=$external_model_dir/latest_checkpointed_iteration.txt
|
56 |
+
if [ -f $ckpt_flag ]; then
|
57 |
+
megatron_model=$external_model_dir
|
58 |
+
echo Loading from previously saved checkpoint.
|
59 |
+
fi
|
60 |
+
|
61 |
+
KV_CHANNELS_ARGS=""
|
62 |
+
if [ "$kv_channels" != "" ]; then
|
63 |
+
KV_CHANNELS_ARGS="--kv_channels $kv_channels"
|
64 |
+
fi
|
65 |
+
|
66 |
+
TIE_ARGS=""
|
67 |
+
if [ $model_type != 'gemma' ]; then
|
68 |
+
TIE_ARGS+="--no_tie_embed_logits"
|
69 |
+
fi
|
70 |
+
echo $TIE_ARGS
|
71 |
+
|
72 |
+
global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps))
|
73 |
+
|
74 |
+
LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval --eval_iters $eval_iters --log_validation_ppl_to_tensorboard --log_memory_to_tensorboard --log_batch_size_to_tensorboard"
|
75 |
+
TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min --weight_decay $weight_decay"
|
76 |
+
DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8134"
|
77 |
+
COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion"
|
78 |
+
LLAMA_ARGS="--use_rms_norm --glu_activation $glu_activation --no_new_tokens --layernorm_epsilon $layernorm_epsilon"
|
79 |
+
CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \
|
80 |
+
--tensor_model_parallel_size $tp \
|
81 |
+
--pipeline_model_parallel_size $pp \
|
82 |
+
--load $megatron_model \
|
83 |
+
--save $model_dir \
|
84 |
+
--tensorboard_dir $external_model_dir/runs \
|
85 |
+
--data_path $data_path \
|
86 |
+
--model_name $model_type \
|
87 |
+
--tokenizer_type $tokenizer_type \
|
88 |
+
--vocab_file=$tokenizer_path \
|
89 |
+
--bf16 \
|
90 |
+
--use_flash_attn \
|
91 |
+
--micro_batch_size $micro_batch_size \
|
92 |
+
--global_batch_size $global_batch_size \
|
93 |
+
--sequence_parallel \
|
94 |
+
--recompute_granularity selective \
|
95 |
+
--use_checkpoint_args \
|
96 |
+
--seq_length $seq_length \
|
97 |
+
--split 9990,5,5 \
|
98 |
+
--sliding_window_size 4096 \
|
99 |
+
--reset_attention_mask \
|
100 |
+
--reset_position_ids \
|
101 |
+
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS $FREEZE_ARGS $KV_CHANNELS_ARGS $TIE_ARGS \
|
multilinguality_megatron/deploy.sh
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
while getopts ":p:v:m:f:t:k:" opt; do
|
2 |
+
case ${opt} in
|
3 |
+
p )
|
4 |
+
path_to_weights=$OPTARG
|
5 |
+
;;
|
6 |
+
v )
|
7 |
+
vocab_size=$OPTARG
|
8 |
+
;;
|
9 |
+
m ) model_name=$OPTARG
|
10 |
+
;;
|
11 |
+
f ) vocab_file=$OPTARG
|
12 |
+
;;
|
13 |
+
t ) model_type=$OPTARG
|
14 |
+
;;
|
15 |
+
k ) kv_channels=$OPTARG
|
16 |
+
;;
|
17 |
+
\? )
|
18 |
+
echo "Invalid option: $OPTARG" 1>&2
|
19 |
+
exit 1
|
20 |
+
;;
|
21 |
+
: )
|
22 |
+
echo "Invalid option: $OPTARG requires an argument" 1>&2
|
23 |
+
exit 1
|
24 |
+
;;
|
25 |
+
esac
|
26 |
+
done
|
27 |
+
shift $((OPTIND -1))
|
28 |
+
|
29 |
+
KV_CHANNELS_ARGS=""
|
30 |
+
if [ "$kv_channels" != "" ]; then
|
31 |
+
KV_CHANNELS_ARGS="--kv_channels $kv_channels"
|
32 |
+
fi
|
33 |
+
|
34 |
+
# path_to_weights is where the latest_checkpointed_iteration.txt file is located
|
35 |
+
# script creates a folder with respective iteration in unsharded_dir, so no need to specify iteration
|
36 |
+
python tools/checkpoint_util.py \
|
37 |
+
--target_tensor_parallel_size 1 \
|
38 |
+
--target_pipeline_parallel_size 1 \
|
39 |
+
--load_dir $path_to_weights \
|
40 |
+
--save_dir "${path_to_weights}/unsharded" \
|
41 |
+
--model_type $model_type \
|
42 |
+
--true_vocab_size $vocab_size \
|
43 |
+
--bf16 \
|
44 |
+
$KV_CHANNELS_ARGS
|
45 |
+
|
46 |
+
python weights_conversion/megatron_to_hf.py \
|
47 |
+
--input_dir "${path_to_weights}/unsharded" \
|
48 |
+
--output_dir "${path_to_weights}/hf/${model_name}" \
|
49 |
+
--vocab_file "${vocab_file}" \
|
50 |
+
--model $model_type
|
51 |
+
|
52 |
+
# remove intermediate step
|
53 |
+
rm -r "${path_to_weights}/unsharded"
|
multilinguality_megatron/docs/Makefile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Minimal makefile for Sphinx documentation
|
2 |
+
#
|
3 |
+
|
4 |
+
# You can set these variables from the command line, and also
|
5 |
+
# from the environment for the first two.
|
6 |
+
SPHINXOPTS ?=
|
7 |
+
SPHINXBUILD ?= sphinx-build
|
8 |
+
SOURCEDIR = .
|
9 |
+
BUILDDIR = _build
|
10 |
+
|
11 |
+
# Put it first so that "make" without argument is like "make help".
|
12 |
+
help:
|
13 |
+
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
14 |
+
|
15 |
+
.PHONY: help Makefile
|
16 |
+
|
17 |
+
# Catch-all target: route all unknown targets to Sphinx using the new
|
18 |
+
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
19 |
+
%: Makefile
|
20 |
+
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
multilinguality_megatron/docs/_templates/autosummary/base.rst
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{{ fullname | escape | underline}}
|
2 |
+
|
3 |
+
.. currentmodule:: {{ module }}
|
4 |
+
|
5 |
+
.. auto{{ objtype }}:: {{ objname }}
|
multilinguality_megatron/docs/_templates/autosummary/class.rst
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{{ fullname | escape | underline}}
|
2 |
+
|
3 |
+
.. currentmodule:: {{ module }}
|
4 |
+
|
5 |
+
.. autoclass:: {{ objname }}
|
6 |
+
:members:
|
7 |
+
:special-members:
|
8 |
+
:show-inheritance:
|
9 |
+
:exclude-members: __weakref__, __init__
|
multilinguality_megatron/docs/_templates/autosummary/module.rst
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{{ fullname | escape | underline }}
|
2 |
+
|
3 |
+
.. rubric:: Description
|
4 |
+
|
5 |
+
.. automodule:: {{ fullname }}
|
6 |
+
|
7 |
+
.. currentmodule:: {{ fullname }}
|
8 |
+
|
9 |
+
{% if classes %}
|
10 |
+
.. rubric:: Classes
|
11 |
+
|
12 |
+
.. autosummary::
|
13 |
+
:toctree: .
|
14 |
+
{% for class in classes %}
|
15 |
+
{{ class }}
|
16 |
+
{% endfor %}
|
17 |
+
|
18 |
+
{% endif %}
|
19 |
+
|
20 |
+
{% if functions %}
|
21 |
+
.. rubric:: Functions
|
22 |
+
|
23 |
+
.. autosummary::
|
24 |
+
:toctree: .
|
25 |
+
{% for function in functions %}
|
26 |
+
{{ function }}
|
27 |
+
{% endfor %}
|
28 |
+
|
29 |
+
{% endif %}
|
multilinguality_megatron/docs/api/index.rst
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
API
|
2 |
+
===
|
3 |
+
|
4 |
+
megatron
|
5 |
+
--------
|
6 |
+
|
7 |
+
.. autosummary::
|
8 |
+
:toctree: megatron
|
9 |
+
|
10 |
+
megatron.arguments
|
11 |
+
megatron.checkpointing
|
12 |
+
megatron.dist_signal_handler
|
13 |
+
megatron.global_vars
|
14 |
+
megatron.indexer
|
15 |
+
megatron.initialize
|
16 |
+
megatron.memory
|
17 |
+
megatron.microbatches
|
18 |
+
megatron.optimizer_param_scheduler
|
19 |
+
megatron.p2p_communication
|
20 |
+
megatron.schedules
|
21 |
+
megatron.text_generation_server
|
22 |
+
megatron.timers
|
23 |
+
megatron.training
|
24 |
+
megatron.utils
|
25 |
+
megatron.wandb_logger
|
26 |
+
|
27 |
+
megatron.core
|
28 |
+
-------------
|
29 |
+
|
30 |
+
.. autosummary::
|
31 |
+
:toctree: megatron/core
|
32 |
+
|
33 |
+
megatron.core.parallel_state
|
34 |
+
megatron.core.utils
|
35 |
+
|
36 |
+
|
37 |
+
megatron.core.tensor_parallel
|
38 |
+
-----------------------------
|
39 |
+
|
40 |
+
.. autosummary::
|
41 |
+
:toctree: megatron/core/tensor_parallel
|
42 |
+
|
43 |
+
megatron.core.tensor_parallel.cross_entropy
|
44 |
+
megatron.core.tensor_parallel.data
|
45 |
+
megatron.core.tensor_parallel.layers
|
46 |
+
megatron.core.tensor_parallel.mappings
|
47 |
+
megatron.core.tensor_parallel.random
|
48 |
+
megatron.core.tensor_parallel.utils
|
49 |
+
|
50 |
+
megatron.data
|
51 |
+
-------------
|
52 |
+
|
53 |
+
.. autosummary::
|
54 |
+
:toctree: megatron/data
|
55 |
+
|
56 |
+
megatron.data.autoaugment
|
57 |
+
megatron.data.blendable_dataset
|
58 |
+
megatron.data.gpt_dataset
|
59 |
+
megatron.data.image_folder
|
60 |
+
megatron.data.realm_dataset_utils
|
61 |
+
megatron.data.bert_dataset
|
62 |
+
megatron.data.data_samplers
|
63 |
+
megatron.data.indexed_dataset
|
64 |
+
megatron.data.orqa_wiki_dataset
|
65 |
+
megatron.data.realm_index
|
66 |
+
megatron.data.biencoder_dataset_utils
|
67 |
+
megatron.data.dataset_utils
|
68 |
+
megatron.data.ict_dataset
|
69 |
+
megatron.data.t5_dataset
|
70 |
+
|
71 |
+
megatron.model
|
72 |
+
--------------
|
73 |
+
|
74 |
+
.. autosummary::
|
75 |
+
:toctree: megatron/model
|
76 |
+
|
77 |
+
megatron.model.bert_model
|
78 |
+
megatron.model.biencoder_model
|
79 |
+
megatron.model.classification
|
80 |
+
megatron.model.distributed
|
81 |
+
megatron.model.enums
|
82 |
+
megatron.model.falcon_model
|
83 |
+
megatron.model.fused_bias_gelu
|
84 |
+
megatron.model.fused_layer_norm
|
85 |
+
megatron.model.fused_softmax
|
86 |
+
megatron.model.glu_activations
|
87 |
+
megatron.model.gpt_model
|
88 |
+
megatron.model.language_model
|
89 |
+
megatron.model.llama_model
|
90 |
+
megatron.model.module
|
91 |
+
megatron.model.multiple_choice
|
92 |
+
megatron.model.positional_embeddings
|
93 |
+
megatron.model.t5_model
|
94 |
+
megatron.model.transformer
|
95 |
+
megatron.model.utils
|
96 |
+
|
97 |
+
megatron.optimizer
|
98 |
+
------------------
|
99 |
+
|
100 |
+
.. autosummary::
|
101 |
+
:toctree: megatron/optimizer
|
102 |
+
|
103 |
+
megatron.optimizer.clip_grads
|
104 |
+
megatron.optimizer.distrib_optimizer
|
105 |
+
megatron.optimizer.grad_scaler
|
106 |
+
megatron.optimizer.optimizer
|
107 |
+
|
108 |
+
megatron.text_generation
|
109 |
+
------------------------
|
110 |
+
|
111 |
+
.. autosummary::
|
112 |
+
:toctree: megatron/text_generation
|
113 |
+
|
114 |
+
megatron.text_generation.api
|
115 |
+
megatron.text_generation.beam_utils
|
116 |
+
megatron.text_generation.communication
|
117 |
+
megatron.text_generation.forward_step
|
118 |
+
megatron.text_generation.generation
|
119 |
+
megatron.text_generation.sampling
|
120 |
+
megatron.text_generation.tokenization
|
121 |
+
|
122 |
+
megatron.tokenizer
|
123 |
+
------------------
|
124 |
+
|
125 |
+
.. autosummary::
|
126 |
+
:toctree: megatron/tokenizer
|
127 |
+
|
128 |
+
megatron.tokenizer.bert_tokenization
|
129 |
+
megatron.tokenizer.gpt2_tokenization
|
130 |
+
megatron.tokenizer.tokenizer
|
multilinguality_megatron/docs/conf.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Configuration file for the Sphinx documentation builder.
|
2 |
+
#
|
3 |
+
# For the full list of built-in configuration values, see the documentation:
|
4 |
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
5 |
+
|
6 |
+
# -- Path setup --------------------------------------------------------------
|
7 |
+
|
8 |
+
# If extensions (or modules to document with autodoc) are in another directory,
|
9 |
+
# add these directories to sys.path here. If the directory is relative to the
|
10 |
+
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
11 |
+
#
|
12 |
+
import os
|
13 |
+
import sys
|
14 |
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
# -- Project information -----------------------------------------------------
|
19 |
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
20 |
+
|
21 |
+
project = 'Megatron-LLM'
|
22 |
+
copyright = '2023, Alejandro Hernández Cano, Matteo Pagliardini, Kyle Matoba, Amirkeivan Mohtashami, Olivia Simin Fan, Axel Marmet, Deniz Bayazit, Igor Krawczuk, Zeming Chen, Francesco Salvi, Antoine Bosselut, Martin Jaggi'
|
23 |
+
author = 'Alejandro Hernández Cano, Matteo Pagliardini, Kyle Matoba, Amirkeivan Mohtashami, Olivia Simin Fan, Axel Marmet, Deniz Bayazit, Igor Krawczuk, Zeming Chen, Francesco Salvi, Antoine Bosselut, Martin Jaggi'
|
24 |
+
release = '0.1.0'
|
25 |
+
|
26 |
+
# -- General configuration ---------------------------------------------------
|
27 |
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
28 |
+
|
29 |
+
extensions = [
|
30 |
+
'sphinx.ext.autodoc',
|
31 |
+
'sphinx.ext.intersphinx',
|
32 |
+
'sphinx.ext.autosummary',
|
33 |
+
'sphinx.ext.napoleon',
|
34 |
+
'sphinx.ext.mathjax',
|
35 |
+
'myst_parser'
|
36 |
+
]
|
37 |
+
|
38 |
+
# autosummary
|
39 |
+
autosummary_generate = True
|
40 |
+
|
41 |
+
# napoleon
|
42 |
+
napoleon_google_docstring = True
|
43 |
+
|
44 |
+
# myst
|
45 |
+
myst_enable_extensions = ["colon_fence"]
|
46 |
+
|
47 |
+
# autodoc
|
48 |
+
autodoc_mock_imports = ['amp_C', 'torchvision', 'flash_attn', 'apex']
|
49 |
+
|
50 |
+
templates_path = ['_templates']
|
51 |
+
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
52 |
+
|
53 |
+
intersphinx_mapping = {
|
54 |
+
'python': ('https://docs.python.org/3', None)
|
55 |
+
}
|
56 |
+
|
57 |
+
master_doc = 'index'
|
58 |
+
|
59 |
+
# -- Options for HTML output -------------------------------------------------
|
60 |
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
61 |
+
|
62 |
+
html_theme = 'pydata_sphinx_theme'
|
63 |
+
# html_theme = 'sphinx_rtd_theme'
|
64 |
+
html_static_path = ['_static']
|
multilinguality_megatron/docs/guide/faq.md
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Frequently Asked Questions
|
2 |
+
|
3 |
+
## How to add special tokens?
|
4 |
+
|
5 |
+
When defining a new task, it is often needed to introduce tokens with special meanings.
|
6 |
+
For instance, let's say we want to add two tokens `[formula]` and `[/formula]` to indicate the start and end of a formula in mathematics textbooks.
|
7 |
+
In order to include these new tokens, you need to indicate them in three different places:
|
8 |
+
|
9 |
+
1. When tokenizing (`tools/preprocess_data.py`), using the flag `--vocab_extra_ids_list` with the new tokens:
|
10 |
+
```
|
11 |
+
python tools/preprocess_data.py --vocab_extra_ids_list "[formula],[/formula]" # ...
|
12 |
+
```
|
13 |
+
|
14 |
+
1. When sharding the model (`tools/checkpoint_util.py`), using `--true_vocab_size`.
|
15 |
+
For instance, Falcon has 65024 tokens by default.
|
16 |
+
Including these two extra tokens will result in
|
17 |
+
```
|
18 |
+
python tools/checkpoint_util.py --true_vocab_size 65026 # ...
|
19 |
+
```
|
20 |
+
|
21 |
+
1. When training (`finetune.py`) using `--vocab_extra_ids_list`.
|
22 |
+
Same as before:
|
23 |
+
```
|
24 |
+
python finetune.py --vocab_extra_ids_list "[formula],[/formula]" # ...
|
25 |
+
```
|
26 |
+
|
27 |
+
(tp-pp)=
|
28 |
+
## How to set TP and PP?
|
29 |
+
|
30 |
+
General strategies:
|
31 |
+
- It is recommended to use data parallelism as much as possible, only use model parallelism if the model cannot fit in the GPU or the micro batch size is very small.
|
32 |
+
- It is preferable to use tensor parallelism before pipeline parallelism, when working on a single machine.
|
33 |
+
- When a model does not fit in a single node, use a tensor parallelism level of as many GPUs each node has, and pipeline parallelism level as small as possible to allow the model to fit in memory, and maintain a micro batch size large enough (of at least 5).
|
34 |
+
|
35 |
+
In the codebase, you won't set data parallelism explicitly.
|
36 |
+
Rather, the data parallelism will be inferred automatically to be as high as possible, depending in your available hardware and TP, PP levels.
|
37 |
+
In general, the number of GPUs you need is:
|
38 |
+
```
|
39 |
+
GPUs = DP * TP * PP
|
40 |
+
```
|
41 |
+
For instance, if you have two nodes with 8 GPUs each, TP=4 and PP=2, then DP will be automatically set to 2 as `4 x 2 x 2 = 16`.
|
42 |
+
|
43 |
+
```{seealso}
|
44 |
+
- For more information on data and model parallelism see: https://huggingface.co/docs/transformers/v4.15.0/parallelism.
|
45 |
+
- Detailed information on how TP and PP works: https://arxiv.org/abs/2104.04473.
|
46 |
+
```
|
47 |
+
|
48 |
+
## How to launch training on multiple nodes?
|
49 |
+
|
50 |
+
In order to launch training on multiple nodes, you will set the appropriate arguments to the `torchrun` program.
|
51 |
+
|
52 |
+
1. Select a "master" or main node and take note of its IP address.
|
53 |
+
1. Launch the `finetune.py` script in the main node using `torchrun` with the following arguments:
|
54 |
+
```
|
55 |
+
torchrun --n_proc_per_node NUMBER_OF_GPS_PER_NODE \
|
56 |
+
--nodes NUMBER_OF_NODES \
|
57 |
+
--node_rank 0 \
|
58 |
+
--master_addr ADDRESS_OF_THE_MAIN_NODE \
|
59 |
+
--master_port PORT \
|
60 |
+
finetune.py # ...
|
61 |
+
```
|
62 |
+
1. In the rest of the nodes, launch `finetune.py` with the same arguments, modifying `--node_rank` to a different value per node.
|
63 |
+
|
64 |
+
```{seealso}
|
65 |
+
- Take a look at the example script `examples/finetune.sh` for more information.
|
66 |
+
- Look at the [How to set TP and PP?](#tp-pp) section for more information.
|
67 |
+
```
|
68 |
+
|
69 |
+
## What are the basic hardware requirements?
|
70 |
+
|
71 |
+
In this section we give a brief overview on the minimal hardware requirements we observed during our experiments.
|
72 |
+
|
73 |
+
| Model | min VRAM | tp | pp |
|
74 |
+
| :--------- | :------: | :-: | :-: |
|
75 |
+
| LLaMa2-7B | 2x 80GB | 2 | 1 |
|
76 |
+
| Falcon-40B | 16x 80GB | 8 | 2 |
|
77 |
+
| LLaMa2-70B | 32x 80GB | 8 | 4 |
|
78 |
+
|
79 |
+
|
80 |
+
(shard)=
|
81 |
+
## How to shard and merge models?
|
82 |
+
|
83 |
+
Use `tools/checkpoint_util.py` to set the desired tensor and pipeline parallelism levels.
|
84 |
+
|
85 |
+
```
|
86 |
+
python tools/checkpoint_util.py \
|
87 |
+
--target_tensor_parallel_size TP \
|
88 |
+
--target_pipeline_parallel_size PP \
|
89 |
+
--load_dir /path/to/original/weights/ \
|
90 |
+
--save_dir /path/to/new/weights/ \
|
91 |
+
--model_type MODEL \
|
92 |
+
--bf16
|
93 |
+
```
|
94 |
+
Where MODEL can be either llama, llama2, falcon, gpt or bert, and TP and PP are the model parallelism levels desired.
|
95 |
+
Note that you can convert sharded weights (i.e. TP, PP > 1) to unsharded weights (TP = PP = 1) or viceversa.
|
96 |
+
|
97 |
+
## What arguments are used to train LLaMa 2?
|
98 |
+
|
99 |
+
We set the same hyperparamters specified by Meta during finetuning (see [their paper for more information](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/)).
|
100 |
+
This means, that training LLaMa 2 7B can be done with the following arguments:
|
101 |
+
|
102 |
+
```bash
|
103 |
+
torchrun \
|
104 |
+
# torchrun arguments # \
|
105 |
+
--nproc_per_node <GPUs per node> \
|
106 |
+
--nnodes <number of nodes> \
|
107 |
+
--node_rank <0,1,2,etc a different number per node> \
|
108 |
+
--master_addr <address of main node> \
|
109 |
+
--master_port <port> \
|
110 |
+
finetune.py --model_name llama2 \
|
111 |
+
# hardware/distributed arguments # \
|
112 |
+
--tensor_model_parallel_size <tp size> \
|
113 |
+
--pipeline_model_parallel_size <pp> \
|
114 |
+
--bf16 \
|
115 |
+
# training arguments # \
|
116 |
+
--train_iters <train iters> \
|
117 |
+
--adam_beta1 0.9 \
|
118 |
+
--adam_beta2 0.95 \
|
119 |
+
--adam_eps 1e-5 \
|
120 |
+
--lr_decay_style cosine 5 \
|
121 |
+
--lr_warmup_iters <warmup iters> \
|
122 |
+
--lr 3e-4 \
|
123 |
+
--min_lr 1e-6 \
|
124 |
+
--weight_decay 0.1 \
|
125 |
+
--micro_batch_size 5 \
|
126 |
+
--global_batch_size 1000 \
|
127 |
+
# additional optimization arguments # \
|
128 |
+
--use_flash_attn \
|
129 |
+
--sequence_parallel \
|
130 |
+
--recompute_granularity selective \
|
131 |
+
# logging/pathing arguments # \
|
132 |
+
--load <path to megatron-llama> \
|
133 |
+
--use_checkpoint_args \
|
134 |
+
--vocab_file <path to tokenizer.model provided by Meta> \
|
135 |
+
--log_interval 1 \
|
136 |
+
--data_path <path to tokenized data> \
|
137 |
+
--tokenizer_type SentencePieceTokenizer
|
138 |
+
```
|
139 |
+
|
140 |
+
```{seealso}
|
141 |
+
The file `examples/finetune.sh` gives the full picture of the arguments used to train either LLaMa.
|
142 |
+
```
|
143 |
+
|
144 |
+
## How to convert a LLaMa or Falcon architecture from a non-official checkpoint?
|
145 |
+
|
146 |
+
If you want to convert weights from a checkpoint other than the checkpoints provided by `llama-meta` or `tiiuae`, you might use `--model-path` during conversion.
|
147 |
+
For instance, to convert the [OpenAssistant llama2 70B](https://huggingface.co/OpenAssistant/llama2-70b-oasst-sft-v10) weights, run:
|
148 |
+
|
149 |
+
```
|
150 |
+
python weights_conversion/hf_to_megatron.py llama2 --size=70 \
|
151 |
+
--out=/path/to/megatron/weights/ --cache-dir=/path/to/llama-2-7b/ \
|
152 |
+
--model-path=OpenAssistant/llama2-70b-oasst-sft-v10
|
153 |
+
```
|
154 |
+
|
155 |
+
The `--model-path` argument should be either a local folder or the name of a model hosted on huggingface.
|
156 |
+
|
157 |
+
## I'm getting a `17300 Bus error (core dumped)` error!
|
158 |
+
|
159 |
+
If you are using a docker container and you get this error when sharding a large model, you might need to increase the shared memory size.
|
160 |
+
This is done via the command line option `--shm-size=128gb`.
|
161 |
+
|
162 |
+
## I'm getting a `ImportError: cannot import name 'helpers' from 'megatron.data'` error!
|
163 |
+
|
164 |
+
You need to compile the `helpers` module:
|
165 |
+
|
166 |
+
```
|
167 |
+
cd megatron/data
|
168 |
+
make
|
169 |
+
cd ../../
|
170 |
+
```
|
multilinguality_megatron/docs/guide/getting_started.md
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Getting started
|
2 |
+
|
3 |
+
This tutorial will guide you on the basic usage of Megatrom-LLM.
|
4 |
+
This guide we will fine tune a [LLaMa 2 7B](https://ai.meta.com/llama/) LLM on [code data](https://huggingface.co/datasets/bigcode/starcoderdata).
|
5 |
+
It is recommended to have at least 160GB VRAM available (e.g. two 80GB A100 GPUs).
|
6 |
+
|
7 |
+
```{note}
|
8 |
+
This tutorial can also be followed to train a Falcon architecture, using `falcon` instead of `llama2` throughout the guide.
|
9 |
+
```
|
10 |
+
|
11 |
+
## Setup
|
12 |
+
|
13 |
+
First we need to install the dependencies.
|
14 |
+
|
15 |
+
|
16 |
+
1. Clone our repo:
|
17 |
+
```
|
18 |
+
git clone git@github.com:epfLLM/Megatron-LLM.git
|
19 |
+
```
|
20 |
+
|
21 |
+
1. Run the [nvcr docker image](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch), mounting the source code to your desired path, e.g. `/mpt/Megatron-LLM`:
|
22 |
+
```
|
23 |
+
sudo docker run --gpus all -it --rm \
|
24 |
+
-v /path/to/Megatron-LLM/:/mpt/Megatron-LLM \
|
25 |
+
nvcr.io/nvidia/pytorch:23.07-py3
|
26 |
+
```
|
27 |
+
|
28 |
+
1. Enter the repository:
|
29 |
+
```
|
30 |
+
cd /mpt/Megatron-LLM/
|
31 |
+
```
|
32 |
+
|
33 |
+
1. Install the additional dependencies not included in the `nvcr` image:
|
34 |
+
```
|
35 |
+
pip install -r requirements.txt
|
36 |
+
```
|
37 |
+
|
38 |
+
1. Install the `megatron/data/helpers` binary:
|
39 |
+
```
|
40 |
+
cd megatron/data/
|
41 |
+
make
|
42 |
+
cd ../../
|
43 |
+
```
|
44 |
+
|
45 |
+
(download_weights)=
|
46 |
+
## Downloading LLaMa2 weights
|
47 |
+
|
48 |
+
1. Request access to the weights directly to meta: https://ai.meta.com/resources/models-and-libraries/llama-downloads/.
|
49 |
+
1. Request access to the LLaMa2 huggingface model: https://huggingface.co/meta-llama/Llama-2-7b-hf.
|
50 |
+
1. Create a new huggingface token (or use an existing one): https://huggingface.co/settings/tokens.
|
51 |
+
1. Run the huggingface login CLI, and enter the token created on the previous step when asked:
|
52 |
+
```
|
53 |
+
huggingface-cli login
|
54 |
+
```
|
55 |
+
|
56 |
+
## Preparing the raw data
|
57 |
+
|
58 |
+
:::{note}
|
59 |
+
|
60 |
+
This tutorial will use code data to fine tune the LLM.
|
61 |
+
Feel free to use any other dataset, as long as the raw data is saved in `.jsonl` format, i.e. one `json` dictionary with the key `"text"` per line:
|
62 |
+
|
63 |
+
```json
|
64 |
+
{"text": "The blue cat is big."}
|
65 |
+
{"text": "This is another document."}
|
66 |
+
```
|
67 |
+
|
68 |
+
In this case, skip to the [data preprocessing](#data-preprocessing) section.
|
69 |
+
|
70 |
+
:::
|
71 |
+
|
72 |
+
1. Accept starcoder's terms of use via the huggingface portal: https://huggingface.co/datasets/bigcode/starcoderdata
|
73 |
+
1. Create a huggingface token (or use an existing one) and login using `huggingface-cli` (see [Downloading LLaMa2 weights](#download_weights) for more information).
|
74 |
+
1. Download and save the starcoder dataset.
|
75 |
+
In this tutorial we will use the `julia` data, but feel free to use any other subset.
|
76 |
+
This data contains around 500M tokens.
|
77 |
+
```python
|
78 |
+
import json
|
79 |
+
from datasets import load_dataset
|
80 |
+
|
81 |
+
# the `cache_dir` argument is optional
|
82 |
+
dataset = load_dataset("bigcode/starcoderdata", data_dir="julia",
|
83 |
+
split="train", cache_dir="/path/to/cache/")
|
84 |
+
with open("/path/to/raw.jsonl", "w+") as f:
|
85 |
+
for document in dataset:
|
86 |
+
document = {"id": document["id"], "text": document["content"]}
|
87 |
+
f.write(json.dumps(document) + "\n")
|
88 |
+
```
|
89 |
+
|
90 |
+
At this point, the raw data will be available at `/path/to/raw.jsonl`.
|
91 |
+
|
92 |
+
|
93 |
+
(data-preprocessing)=
|
94 |
+
## Data preprocessing
|
95 |
+
|
96 |
+
In this step we will tokenize the raw data to binary files for optimized data loading during training.
|
97 |
+
Run:
|
98 |
+
```
|
99 |
+
python tools/preprocess_data.py --input=/path/to/raw.jsonl \
|
100 |
+
--output_prefix=/path/to/tokenized/starcoder \
|
101 |
+
--tokenizer_type=SentencePieceTokenizer \
|
102 |
+
--vocab_file=/path/to/tokenizer.model \
|
103 |
+
--chunk_size=32 \
|
104 |
+
--workers=16 \
|
105 |
+
--no_new_tokens
|
106 |
+
```
|
107 |
+
|
108 |
+
```{note}
|
109 |
+
In this guide we use a sequence length of 1024 to accelerate training.
|
110 |
+
Note that the official sequence length of LLaMa2 is 4096.
|
111 |
+
```
|
112 |
+
|
113 |
+
```{note}
|
114 |
+
If you are using falcon, use `FalconTokenizer` instead of `SentencePieceTokenizer`, don't supply any `--vocab_file` and ignore the `--no_new_tokens` flag.
|
115 |
+
```
|
116 |
+
|
117 |
+
|
118 |
+
(weight-conversion)=
|
119 |
+
## Weight conversion
|
120 |
+
|
121 |
+
In order to use pretrained weights in the Megatron-LLM codebase, we will need to convert the official weights provided to be compatible with Megatron.
|
122 |
+
To do so, run:
|
123 |
+
```
|
124 |
+
python weights_conversion/hf_to_megatron.py llama2 --size=7 \
|
125 |
+
--out=/path/to/megatron/weights/ --cache-dir=/path/to/llama-2-7b/
|
126 |
+
```
|
127 |
+
|
128 |
+
(correctness-verification)=
|
129 |
+
## Correctness verification (optional)
|
130 |
+
|
131 |
+
To make sure the weight conversion ran successfully we run the `verify_correctness.py` script.
|
132 |
+
This will run simultaneously the official LLaMa 2 implementation and the Megatron codebase.
|
133 |
+
Make sure to adjust the arguments to your convenience:
|
134 |
+
```bash
|
135 |
+
# arguments required by `torchrun`
|
136 |
+
DISTRIBUTED_ARGS="--nproc_per_node 1 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000"
|
137 |
+
LLAMA_ARGS="--use_rms_norm --glu_activation swiglu --no_tie_embed_logits --no_new_tokens --layernorm_epsilon 1e-5"
|
138 |
+
COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion"
|
139 |
+
torchrun $DISTRIBUTED_ARGS verify_correctness.py \
|
140 |
+
--model_name=llama2 \
|
141 |
+
--model_size=7 \
|
142 |
+
--load=/path/to/megatron/weights/ \
|
143 |
+
--data_path=/path/to/tokenized/starcoder \
|
144 |
+
--tokenizer_type=SentencePieceTokenizer \
|
145 |
+
--vocab_file=/path/to/megatron/weights/tokenizer.model \
|
146 |
+
--huggingface_cache=/path/to/meta/llama-2-7b/ \
|
147 |
+
--huggingface_device=cuda:1 \
|
148 |
+
$COMMON_ARGS $LLAMA_ARGS # dont include LLAMA_ARGS if using Falcon
|
149 |
+
```
|
150 |
+
|
151 |
+
This script will compare the logits output of Megatron model and the official implementation.
|
152 |
+
Expected outputs will yield average absolute error smaller than `0.01` when using 32-bit precision and `0.1` when using 16-bit precision.
|
153 |
+
|
154 |
+
## Model sharding
|
155 |
+
|
156 |
+
In order to use model parallelism you need to split the previously converted weights into multiple files, before you start training.
|
157 |
+
To do this, use `tools/checkpoint_util.py`.
|
158 |
+
Feel free to use different tensor parallel (tp) and pipeline (pp) sizes.
|
159 |
+
```
|
160 |
+
python tools/checkpoint_util.py \
|
161 |
+
--target_tensor_parallel_size 2 \
|
162 |
+
--target_pipeline_parallel_size 1 \
|
163 |
+
--load_dir /path/to/megatron/weights/ \
|
164 |
+
--save_dir /path/to/sharded/weights/ \
|
165 |
+
--model_type llama2 \
|
166 |
+
--true_vocab_size 32000 \
|
167 |
+
--bf16
|
168 |
+
```
|
169 |
+
|
170 |
+
Feel free to set `--target_tensor_parallel_size` to 4 if you have 4 or more GPUs available.
|
171 |
+
|
172 |
+
## Training
|
173 |
+
|
174 |
+
Use the `finetune.py`.
|
175 |
+
Example usage:
|
176 |
+
```bash
|
177 |
+
LOG_ARGS="--log_interval 1 --save_interval 100 --eval_interval 50"
|
178 |
+
TRAIN_ARGS="--train_iters 500 --lr_decay_style cosine --lr_warmup_iters 50 --lr 3e-4 --min_lr 1e-6"
|
179 |
+
DISTRIBUTED_ARGS="--nproc_per_node NUMBER_OF_GPUS --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000"
|
180 |
+
torchrun $DISTRIBUTED_ARGS finetune.py \
|
181 |
+
--tensor_model_parallel_size 4 \
|
182 |
+
--pipeline_model_parallel_size 1 \
|
183 |
+
--load /path/to/sharded/weights/ \
|
184 |
+
--save /path/to/sharded/weights/ \
|
185 |
+
--tensorboard_dir /path/to/sharded/weights/tensorboard/ \
|
186 |
+
--data_path /path/to/tokenized/starcoder \
|
187 |
+
--model_name llama2 \
|
188 |
+
--tokenizer_type SentencePieceTokenizer \
|
189 |
+
--vocab_file=/path/to/megatron/weights/tokenizer.model \
|
190 |
+
--bf16 \
|
191 |
+
--use_flash_attn \
|
192 |
+
--micro_batch_size 5 \
|
193 |
+
--global_batch_size 1000 \
|
194 |
+
--sequence_parallel \
|
195 |
+
--recompute_granularity selective \
|
196 |
+
--use_checkpoint_args \
|
197 |
+
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS
|
198 |
+
```
|
199 |
+
|
200 |
+
With the selected global batch size of 1000, and the total number of training tokens around 500M, in 500 iterations the trainer will perform approximately one epoch.
|
201 |
+
This will take approximately 20 hours to run on a 8x 80GB A100 cluster (DP=2, TP=4, PP=1).
|
202 |
+
|
203 |
+
:::{note}
|
204 |
+
|
205 |
+
To use distributed training make sure to set `nproc_per_node` to the number of GPUs per node, `nnodes` to the number of nodes in your training and `master_addr` to the addres of your master node in the `DISTRIBUTED_ARGS` variable.
|
206 |
+
For instance, to train a two node cluster, with 8 GPUs each:
|
207 |
+
```
|
208 |
+
DISTRIBUTED_ARGS="--nproc_per_node 1 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000"
|
209 |
+
```
|
210 |
+
|
211 |
+
Then, run the `finetune.py` script in all your nodes with the same parameters, just setting a different `node_rank` at every node.
|
212 |
+
|
213 |
+
:::
|
214 |
+
|
215 |
+
```{seealso}
|
216 |
+
Take a look at `examples/finetune.sh for more information on the recommended hyperparameters
|
217 |
+
```
|
218 |
+
|
219 |
+
## Model Deployment
|
220 |
+
|
221 |
+
After training, merge your distributed weights again into a single model:
|
222 |
+
```
|
223 |
+
python tools/checkpoint_util.py \
|
224 |
+
--target_tensor_parallel_size 1 \
|
225 |
+
--target_pipeline_parallel_size 1 \
|
226 |
+
--load_dir /path/to/sharded/weights/ \
|
227 |
+
--save_dir /path/to/unsharded/trained/weights/ \
|
228 |
+
--model_type llama2 \
|
229 |
+
--true_vocab_size 32000 \
|
230 |
+
--bf16
|
231 |
+
```
|
232 |
+
|
233 |
+
We provide a Megatron to Huggingface conversion utility for easier deployment: `weights_conversion/megatron_to_hf.py`.
|
234 |
+
Run:
|
235 |
+
```
|
236 |
+
python weights_conversion/megatron_to_hf.py --input_dir=/path/to/unsharded/trained/weights/ \
|
237 |
+
--output_dir=/path/to/hf/weights/
|
238 |
+
```
|
239 |
+
|
240 |
+
Once the conversion is done, you can load the fine tuned weights using huggingface:
|
241 |
+
```python
|
242 |
+
import torch
|
243 |
+
import transformers
|
244 |
+
from transformers import LlamaForCausalLM, LlamaTokenizer
|
245 |
+
|
246 |
+
pipeline = transformers.pipeline(
|
247 |
+
"text-generation",
|
248 |
+
model=LlamaForCausalLM.from_pretrained("/path/to/hf/weights/"),
|
249 |
+
tokenizer=LlamaTokenizer.from_pretrained("/path/to/hf/weights/"),
|
250 |
+
torch_dtype=torch.bfloat16,
|
251 |
+
device_map="auto"
|
252 |
+
)
|
253 |
+
prompt = """#= a function that returns the fibonacci number of its argument =#
|
254 |
+
function fibonacci(n::Int)::Int
|
255 |
+
"""
|
256 |
+
sequences = pipeline(prompt, max_new_tokens=100, do_sample=True, top_k=20,
|
257 |
+
num_return_sequences=1)
|
258 |
+
for sequence in sequences:
|
259 |
+
print(sequence["generated_text"])
|
260 |
+
```
|
261 |
+
|
262 |
+
Once you are happy with your model performance, you might publish it to the huggingface hub using the `tools/push_to_hub.py` utility:
|
263 |
+
|
264 |
+
```
|
265 |
+
python tools/push_to_hub.py /path/to/hf/weights --hf_repo_name=MyRepoName/MyAwesomeModel --auth_token=MyAuthToken
|
266 |
+
```
|
267 |
+
|
268 |
+
## What's next?
|
269 |
+
|
270 |
+
1. Take a look at our example scripts to familiarize yourself with some other capabilities and hyperparameters used in the codebase, such as to train (pretrain or finetune) larger models:
|
271 |
+
- `examples/parallelize.sh`
|
272 |
+
- `examples/finetune.sh`
|
273 |
+
- `examples/verify.sh`
|
274 |
+
1. See the [intruction finetuning](instruction_tuning) guide for more information on how to finetune a pretrained model to follow instructions.
|
275 |
+
1. Take a look at our [FAQ](faq) section.
|
276 |
+
1. See [Weights conversion](weights_conversion) for more information on the `hf_to_megatron.py` and `megatron_to_hf.py` scripts.
|
multilinguality_megatron/docs/guide/index.md
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# User guide
|
2 |
+
|
3 |
+
```{toctree}
|
4 |
+
|
5 |
+
getting_started
|
6 |
+
instruction_tuning
|
7 |
+
faq
|
8 |
+
tokenization
|
9 |
+
weights_conversion
|
10 |
+
```
|
multilinguality_megatron/docs/guide/instruction_tuning.md
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Instruction finetuning
|
2 |
+
|
3 |
+
This tutorial will guide you through the basics of instruction finetuning using the Megatron-LLM codebase, using LLaMa 2 as the base network.
|
4 |
+
See also the [getting started](getting_started) guide for information regarding installation of dependencies, pretraining, and weight preparation.
|
5 |
+
Following said tutorial, you would be able to finetune a 7B model in this guide, but feel free to use a different size.
|
6 |
+
In order to use Falcon, see the comments specified in the [getting started](getting_started) guide to learn more about the differences when using either model.
|
7 |
+
|
8 |
+
## Preparing raw data
|
9 |
+
|
10 |
+
The dataset used in this guide will be a subset of the [orca](https://huggingface.co/datasets/Open-Orca/OpenOrca) dataset, a general purpose instruction dataset.
|
11 |
+
We choose to only include the chain of thought instructions from the orca dataset in order to shrink the size of the data.
|
12 |
+
Feel free to use any other dataset, as long as the raw data is saved in `.jsonl` format, i.e. one `json` dictionary per line.
|
13 |
+
The dictionaries must include at least two keys (one for the "instruction" and another one for the expected "answer"), plus an optional "system" key.
|
14 |
+
In order to retrieve the CoT subset of the orca dataset, use the following code:
|
15 |
+
|
16 |
+
```python
|
17 |
+
import json
|
18 |
+
|
19 |
+
from datasets import load_dataset
|
20 |
+
|
21 |
+
# the `cache_dir` is optional
|
22 |
+
dataset = load_dataset("Open-Orca/OpenOrca", cache_dir="/path/to/cache", split="train")
|
23 |
+
with open("/path/to/raw/data.jsonl", "w+") as f:
|
24 |
+
for document in tqdm(dataset):
|
25 |
+
if document["id"].startswith("cot."):
|
26 |
+
f.write(json.dumps(document) + "\n")
|
27 |
+
```
|
28 |
+
|
29 |
+
## Data preprocessing
|
30 |
+
|
31 |
+
In this step we will tokenize the raw data to binary files for optimized data loading during training.
|
32 |
+
Run:
|
33 |
+
```
|
34 |
+
python instruct/preprocess_instruct_data.py \
|
35 |
+
--input=/path/to/raw/data.jsonl \
|
36 |
+
--output_prefix=/path/to/tokenized/orca \
|
37 |
+
--tokenizer_type=SentencePieceTokenizer \
|
38 |
+
--vocab_file=/path/to/llama/tokenizer.model \
|
39 |
+
--chunk_size=32 \
|
40 |
+
--workers=32 \
|
41 |
+
--vocab_extra_ids_list "<|im_start|>,<|im_end|>" \
|
42 |
+
--question_key=question \
|
43 |
+
--answer_key=response \
|
44 |
+
--system_key=system_prompt # Optional
|
45 |
+
```
|
46 |
+
|
47 |
+
## Training
|
48 |
+
|
49 |
+
At this point, you should come up with a Megatron checkpoint ready to be trained (i.e. sharded with the desired parallelism levels).
|
50 |
+
Take a look at the [getting started](getting_started) guide to look how to transform LLaMa 2 checkpoints in the huggingface format to Megatron, and shard the weights.
|
51 |
+
|
52 |
+
To start training, use the `finetune.py`.
|
53 |
+
Example usage:
|
54 |
+
```bash
|
55 |
+
LOG_ARGS="--log_interval 1 --save_interval 100 --eval_interval 50"
|
56 |
+
TRAIN_ARGS="--train_iters 6500 --lr_decay_style cosine --lr_warmup_iters 650 --lr 2e-5 --min_lr 2e-6"
|
57 |
+
DISTRIBUTED_ARGS="--nproc_per_node NUMBER_OF_GPUS --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000"
|
58 |
+
torchrun $DISTRIBUTED_ARGS finetune.py \
|
59 |
+
--tensor_model_parallel_size 4 \
|
60 |
+
--pipeline_model_parallel_size 1 \
|
61 |
+
--load /path/to/sharded/weights/ \
|
62 |
+
--save /path/to/sharded/weights/ \
|
63 |
+
--tensorboard_dir /path/to/sharded/weights/tensorboard/ \
|
64 |
+
--data_path /path/to/tokenized/orca \
|
65 |
+
--model_name llama2 \
|
66 |
+
--tokenizer_type SentencePieceTokenizer \
|
67 |
+
--vocab_file=/path/to/megatron/weights/tokenizer.model \
|
68 |
+
--bf16 \
|
69 |
+
--use_flash_attn \
|
70 |
+
--micro_batch_size 8 \
|
71 |
+
--global_batch_size 64 \
|
72 |
+
--sequence_parallel \
|
73 |
+
--recompute_granularity selective \
|
74 |
+
--use_checkpoint_args \
|
75 |
+
--data_type instruction \
|
76 |
+
--variable_seq_lengths \
|
77 |
+
--vocab_extra_ids_list "<|im_start|>,<|im_end|>" \
|
78 |
+
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS
|
79 |
+
```
|
80 |
+
|
81 |
+
The arguments given for pretraining and instruction finetuning are very similar, with the key differences being the batch sizes, learning rates, and the inclusion of `--data_type instruction`, `--variable_seq_lengths` and `--vocab_extra_ids_list`.
|
82 |
+
With the selected global batch size of 64, in 6500 iterations the trainer will perform approximately three epochs.
|
83 |
+
This will take approximately 3h hours to run on a 8x 80GB A100 device (DP=2, TP=4, PP=1).
|
84 |
+
|
85 |
+
```{note}
|
86 |
+
If your `--load` checkpoint corresponds to a checkpoint already trained with the Megatron-LLM codebase (and not a checkpoint gotten after directly converting from the huggingface format for instance), you might want to define a `--save` directory that points somewhere else, to avoid overwritting previous checkpoints.
|
87 |
+
You might also want to include the `--finetune` argument to ignore the previous optimizer and RNG states.
|
88 |
+
```
|
89 |
+
|
90 |
+
## Model Deployment
|
91 |
+
|
92 |
+
Once the finetuning is over, you can follow the [getting started](getting_started) guide steps to unshard your weights and convert them to huggingface, in order to do specific evaluations and deployment.
|
multilinguality_megatron/docs/guide/tokenization.md
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# How to tokenize a dataset?
|
2 |
+
|
3 |
+
## Step 1: get the right json format
|
4 |
+
|
5 |
+
The training data requires preprocessing. First, place your training data in a loose json format, with one json containing a text sample per line. For example:
|
6 |
+
<pre>
|
7 |
+
{"src": "www.nvidia.com", "text": "The quick brown fox", "type": "Eng", "id": "0", "title": "First Part"}
|
8 |
+
{"src": "The Internet", "text": "jumps over the lazy dog", "type": "Eng", "id": "42", "title": "Second Part"}
|
9 |
+
</pre>
|
10 |
+
|
11 |
+
The name of the `text` field of the json can be changed by using the `--json-key` flag in `preprocess_data.py`.
|
12 |
+
The other metadata are optional and are not used in training.
|
13 |
+
|
14 |
+
## Step 2: Tokenize
|
15 |
+
|
16 |
+
The loose json is then processed into a binary format for training. To convert the json into mmap, cached index file, or the lazy loader format use `preprocess_data.py`. Set the `--dataset_impl` flag to `mmap`, `cached`, or `lazy`, respectively (default is `mmap`). An example script to prepare data for Falcon training is:
|
17 |
+
<pre>
|
18 |
+
python3 tools/preprocess_data.py --input /scratch/dummy-data/train.json
|
19 |
+
--output_prefix wiki-train
|
20 |
+
--dataset_impl mmap
|
21 |
+
--tokenizer_type FalconTokenizer
|
22 |
+
--workers 2
|
23 |
+
--chunk_size 32
|
24 |
+
--append_eod
|
25 |
+
</pre>
|
26 |
+
|
27 |
+
The output will be two files named, in this case, `my-bert_text_sentence.bin` and `my-bert_text_sentence.idx`. The `--data_path` specified in later BERT training is the full path and new filename, but without the file extension.
|
28 |
+
|
29 |
+
Other options of `preprocess_data.py`:
|
30 |
+
|
31 |
+
```
|
32 |
+
input data:
|
33 |
+
--input INPUT Path to input JSON
|
34 |
+
--json_keys JSON_KEYS [JSON_KEYS ...]
|
35 |
+
space separate listed of keys to extract from json
|
36 |
+
--split_sentences Split documents into sentences.
|
37 |
+
--keep_newlines Keep newlines between sentences when splitting.
|
38 |
+
|
39 |
+
tokenizer:
|
40 |
+
--tokenizer_type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,FalconTokenizer}
|
41 |
+
What type of tokenizer to use.
|
42 |
+
--vocab_file VOCAB_FILE
|
43 |
+
Path to the vocab file
|
44 |
+
--merge_file MERGE_FILE
|
45 |
+
Path to the BPE merge file (if necessary).
|
46 |
+
--append_eod Append an <eod> token to the end of a document.
|
47 |
+
--lang LANG Language to use for NLTK-powered sentence splitting.
|
48 |
+
|
49 |
+
output data:
|
50 |
+
--output_prefix OUTPUT_PREFIX
|
51 |
+
Path to binary output file without suffix
|
52 |
+
--dataset_impl {lazy,cached,mmap}
|
53 |
+
|
54 |
+
runtime:
|
55 |
+
--workers WORKERS Number of worker processes to launch
|
56 |
+
--chunk_size CHUNK_SIZE
|
57 |
+
Chunk size assigned to each worker process
|
58 |
+
--log_interval LOG_INTERVAL
|
59 |
+
Interval between progress updates
|
60 |
+
--vocab_extra_ids VOCAB_EXTRA_IDS
|
61 |
+
--vocab_extra_ids_list VOCAB_EXTRA_IDS_LIST
|
62 |
+
comma separated list of special vocab ids to add to the tokenizer
|
63 |
+
--no_new_tokens Whether to add special tokens (e.g. CLS, MASK, etc) in the sentenciepiece tokenizer or not
|
64 |
+
```
|
65 |
+
|
66 |
+
If you want to tokenize using llama tokenizer:
|
67 |
+
```
|
68 |
+
python tools/preprocess_data.py \
|
69 |
+
--input=/path/to/data.json \
|
70 |
+
--output_prefix=wiki-train \
|
71 |
+
--dataset_impl=mmap \
|
72 |
+
--tokenizer_type=SentencePieceTokenizer \
|
73 |
+
--vocab_file=/path/to/tokenizer.model \
|
74 |
+
--workers=2 \
|
75 |
+
--chunk_size=32
|
76 |
+
```
|
multilinguality_megatron/docs/guide/weights_conversion.md
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Weights conversion
|
2 |
+
|
3 |
+
## Huggingface to megatron: `hf_to_megatron.py`
|
4 |
+
|
5 |
+
Convert weights from models in other formats (primairly huggingface) to megatron checkpoints.
|
6 |
+
|
7 |
+
This script supports converting Falcon, LLaMa and LLaMa 2 weights to megatron checkpoints.
|
8 |
+
Depending on the model to convert, the inputs might differ.
|
9 |
+
|
10 |
+
- **Falcon**:
|
11 |
+
Weights are automatically retrieved from the official implementation hosted in huggingface.
|
12 |
+
Thus, the `--cache-dir` argument is optional, if specified it should point to
|
13 |
+
the huggingface cache directory where the huggingface Falcon weights will be stored.
|
14 |
+
You will need to specify the `--size` argument to determine which version to download
|
15 |
+
(i.e. Falcon 7B or 40B).
|
16 |
+
|
17 |
+
- **LLaMa**, **LLaMa 2** and **CodeLlama**:
|
18 |
+
Converting llama weights can be done either fetching the weights hosted
|
19 |
+
in huggingface (recommended as it is the easier method) or directly from the
|
20 |
+
weights provided by Meta.
|
21 |
+
|
22 |
+
- From Meta weights (only available for LLaMa and LLaMa 2):
|
23 |
+
You will need to specify the `--cache-dir` to the directory where the
|
24 |
+
llama weights are stored.
|
25 |
+
This will by default have the form `xB` (e.g. 7B or 70B) for llama v1,
|
26 |
+
or `llama-2-xb` (e.g. llama-2-7b) for llama v2.
|
27 |
+
|
28 |
+
- From huggingface weights:
|
29 |
+
If `--cache-dir` is not specified or the directory specified does not
|
30 |
+
contain the format expected from Meta weights, the converter will automatically
|
31 |
+
retrieve the weights from huggingface, in which case the `--cache-dir` will
|
32 |
+
have the same semantics as with Falcon.
|
33 |
+
|
34 |
+
Note that to download llama v2 weights from huggingface, you will need to
|
35 |
+
login using `huggingface-cli login` with a huggingface account which has been
|
36 |
+
granted access to the `meta-llama/Llama-2-7b-hf` model.
|
37 |
+
|
38 |
+
|
39 |
+
In all cases, the megatron checkpoint will be stored in the `--out` argument.
|
40 |
+
If a huggingface is specified, the intermediate weights (i.e. the huggingface weights)
|
41 |
+
stored therein will not be removed when the conversion succeeds.
|
42 |
+
|
43 |
+
More information about the arguments:
|
44 |
+
|
45 |
+
```
|
46 |
+
positional arguments:
|
47 |
+
{llama2,falcon,codellama,llama}
|
48 |
+
|
49 |
+
options:
|
50 |
+
-h, --help show this help message and exit
|
51 |
+
--size {65,34,70,7,40,13,30}
|
52 |
+
The size of the model
|
53 |
+
--out OUT Directory to store the megatron weights (as checkpoint)
|
54 |
+
--cache-dir CACHE_DIR
|
55 |
+
Directory to use as cache for the huggingface weights, or in case of the llama model, the path of the weights privided Meta
|
56 |
+
```
|
57 |
+
|
58 |
+
## Megatron to huggingface: `megatron_to_hf.py`
|
59 |
+
|
60 |
+
Convert megatron checkpoints to huggingface weights.
|
61 |
+
|
62 |
+
This script will also convert the tokenizer configured.
|
63 |
+
Set the `--input_dir` to the megatron checkpoint root (i.e. where the
|
64 |
+
`latest_checkpointed_iteration.txt` file is located) and `--output_dir` to
|
65 |
+
the directory where the huggingface weights should be stored.
|
66 |
+
|
67 |
+
More information about the arguments:
|
68 |
+
|
69 |
+
```
|
70 |
+
options:
|
71 |
+
-h, --help show this help message and exit
|
72 |
+
--input_dir INPUT_DIR
|
73 |
+
Location of Megatron weights
|
74 |
+
--num_output_shards NUM_OUTPUT_SHARDS
|
75 |
+
--model {llama2,falcon,llama,codellama}
|
76 |
+
--output_dir OUTPUT_DIR
|
77 |
+
Location to write HF model and tokenizer
|
78 |
+
--cache_dir CACHE_DIR
|
79 |
+
Huggingface cache_dir (optional)
|
80 |
+
--vocab_file VOCAB_FILE
|
81 |
+
Path to the vocab file
|
82 |
+
--vocab_extra_ids_list VOCAB_EXTRA_IDS_LIST
|
83 |
+
comma separated list of special vocab ids to add to the tokenizer
|
84 |
+
--override_special_tokens [OVERRIDE_SPECIAL_TOKENS ...]
|
85 |
+
One or more arguments to override special tokens. Syntax set as `key=value`, e.g. `eos=<|im_end|>`. Overrides available only bos,
|
86 |
+
cls, eos, mask, pad, sep, unk.
|
87 |
+
```
|
multilinguality_megatron/docs/imgs/llama-falcon.png
ADDED
Git LFS Details
|
multilinguality_megatron/docs/index.rst
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Welcome to Megatron-LLM's documentation!
|
2 |
+
========================================
|
3 |
+
|
4 |
+
.. image:: imgs/llama-falcon.png
|
5 |
+
|
6 |
+
The `Megatron-LLM <https://github.com/epfLLM/Megatron-LLM/>`_ library enables pre-training and fine-tuning of large language models (LLMs) at scale.
|
7 |
+
Our repository is a modification of the `original Megatron-LM codebase <https://github.com/NVIDIA/Megatron-LM>`_ by Nvidia.
|
8 |
+
|
9 |
+
Added key features include:
|
10 |
+
|
11 |
+
- `LLaMa <https://arxiv.org/abs/2302.13971>`_, `LLaMa 2 <https://arxiv.org/abs/2307.09288>`_, `Falcon <https://huggingface.co/tiiuae>`_, and `Code Llama <https://together.ai/blog/llama-2-7b-32k>`_ support.
|
12 |
+
- support training of large models (70B Llama 2, 65B Llama 1, 34B Code Llama, and 40B Falcon) on commodity hardware on multiple nodes
|
13 |
+
- 3-way parallelism: tensor parallel, pipeline parallel and data parallel training (inherited from Megatron)
|
14 |
+
- full pretraining, finetuning and instruct tuning support
|
15 |
+
- Support for special tokens & tokenizers
|
16 |
+
- grouped-query attention (GQA) and multi-query attention (MQA)
|
17 |
+
- Rotary Position Embeddings (RoPE), RMS layer norm, Lima dropout
|
18 |
+
- `ROPE scaling <https://together.ai/blog/llama-2-7b-32k>`_ for longer attention context support
|
19 |
+
- FlashAttention 2
|
20 |
+
- BF16 / FP16 training
|
21 |
+
- WandB integration
|
22 |
+
- Metrics support: Ease to add custom metrics to evaluate on the validation set while training
|
23 |
+
- Conversion to and from Hugging Face hub
|
24 |
+
|
25 |
+
Example models trained with `Megatron-LLM <https://github.com/epfLLM/Megatron-LLM/>`_: See `README <https://github.com/epfLLM/Megatron-LLM/>`_.
|
26 |
+
|
27 |
+
User guide
|
28 |
+
----------
|
29 |
+
|
30 |
+
For information on installation and usage, take a look at our user guide.
|
31 |
+
|
32 |
+
.. toctree::
|
33 |
+
:maxdepth: 2
|
34 |
+
|
35 |
+
guide/index
|
36 |
+
|
37 |
+
|
38 |
+
API
|
39 |
+
---
|
40 |
+
|
41 |
+
Detailed information about Megatron-LLM components:
|
42 |
+
|
43 |
+
.. toctree::
|
44 |
+
:maxdepth: 2
|
45 |
+
|
46 |
+
api/index
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
Citation
|
52 |
+
--------
|
53 |
+
|
54 |
+
If you use this software please cite it:
|
55 |
+
|
56 |
+
.. code-block:: bib
|
57 |
+
|
58 |
+
@software{epfmgtrn,
|
59 |
+
author = {Alejandro Hernández Cano and
|
60 |
+
Matteo Pagliardini and
|
61 |
+
Andreas Köpf and
|
62 |
+
Kyle Matoba and
|
63 |
+
Amirkeivan Mohtashami and
|
64 |
+
Olivia Simin Fan and
|
65 |
+
Axel Marmet and
|
66 |
+
Deniz Bayazit and
|
67 |
+
Igor Krawczuk and
|
68 |
+
Zeming Chen and
|
69 |
+
Francesco Salvi and
|
70 |
+
Antoine Bosselut and
|
71 |
+
Martin Jaggi},
|
72 |
+
title = {epfLLM Megatron-LM},
|
73 |
+
year = 2023,
|
74 |
+
url = {https://github.com/epfLLM/Megatron-LLM}
|
75 |
+
}
|
multilinguality_megatron/docs/make.bat
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@ECHO OFF
|
2 |
+
|
3 |
+
pushd %~dp0
|
4 |
+
|
5 |
+
REM Command file for Sphinx documentation
|
6 |
+
|
7 |
+
if "%SPHINXBUILD%" == "" (
|
8 |
+
set SPHINXBUILD=sphinx-build
|
9 |
+
)
|
10 |
+
set SOURCEDIR=.
|
11 |
+
set BUILDDIR=_build
|
12 |
+
|
13 |
+
%SPHINXBUILD% >NUL 2>NUL
|
14 |
+
if errorlevel 9009 (
|
15 |
+
echo.
|
16 |
+
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
17 |
+
echo.installed, then set the SPHINXBUILD environment variable to point
|
18 |
+
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
19 |
+
echo.may add the Sphinx directory to PATH.
|
20 |
+
echo.
|
21 |
+
echo.If you don't have Sphinx installed, grab it from
|
22 |
+
echo.https://www.sphinx-doc.org/
|
23 |
+
exit /b 1
|
24 |
+
)
|
25 |
+
|
26 |
+
if "%1" == "" goto help
|
27 |
+
|
28 |
+
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
29 |
+
goto end
|
30 |
+
|
31 |
+
:help
|
32 |
+
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
33 |
+
|
34 |
+
:end
|
35 |
+
popd
|
multilinguality_megatron/docs/requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
sphinx == 7.1.0
|
2 |
+
pydata-sphinx-theme >= 0.13.0
|
3 |
+
myst-parser >= 2.0.0
|
4 |
+
flask >= 2.3.0
|
5 |
+
flask_restful >= 0.3.0
|
6 |
+
wandb >= 0.15.0
|
7 |
+
torch >= 2.0.0
|
8 |
+
regex >= 2023.6.0
|
9 |
+
numpy >= 1.25
|
10 |
+
pillow >= 10.0.0
|
11 |
+
einops >= 0.6.1
|
multilinguality_megatron/ducttape/10B_all_cleaned.tconf
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B/checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
|
8 |
+
|
9 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
threshold=(TrainLanguage:
|
12 |
+
en=516
|
13 |
+
es=275
|
14 |
+
de=611
|
15 |
+
fr=322
|
16 |
+
nl=649
|
17 |
+
pt=257
|
18 |
+
it=332
|
19 |
+
ru=334
|
20 |
+
zh=2041
|
21 |
+
ko=198
|
22 |
+
)
|
23 |
+
|
24 |
+
# number such that final tokens for each language are around 1B
|
25 |
+
n_tokens=(TrainLanguage:
|
26 |
+
en=1000000000
|
27 |
+
es=833333330
|
28 |
+
de=833333330
|
29 |
+
fr=833333330
|
30 |
+
nl=833333330
|
31 |
+
pt=833333330
|
32 |
+
it=833333330
|
33 |
+
ru=500000000
|
34 |
+
zh=13888888
|
35 |
+
ko=250000000
|
36 |
+
)
|
37 |
+
|
38 |
+
dataset_path=(TrainLanguage:
|
39 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
40 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
41 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
42 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
43 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
44 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
45 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
46 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
47 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
48 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
49 |
+
)
|
50 |
+
|
51 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
52 |
+
|
53 |
+
min_perplexity=50
|
54 |
+
|
55 |
+
size=(Size: 7 13)
|
56 |
+
|
57 |
+
log_interval=10
|
58 |
+
save_interval=635
|
59 |
+
eval_interval=635
|
60 |
+
train_steps=6358
|
61 |
+
|
62 |
+
lr_scheduler=cosine
|
63 |
+
warmup_steps=63
|
64 |
+
lr=3e-5
|
65 |
+
lr_min=3e-6
|
66 |
+
weight_decay=0.1
|
67 |
+
|
68 |
+
n_gpus=8
|
69 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
70 |
+
tp=(TP: 1 2 3 4)
|
71 |
+
pp=(PP: 1 2 3 4)
|
72 |
+
micro_batch_size=4
|
73 |
+
grad_accum_steps=12
|
74 |
+
vocab_size=32000
|
75 |
+
|
76 |
+
cpu_workers=16
|
77 |
+
wandb_run_id="llama2_7B_10b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198"
|
78 |
+
wikipedia=False
|
79 |
+
freeze_layers=""
|
80 |
+
}
|
multilinguality_megatron/ducttape/10B_all_cleaned_13B.tconf
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/llama2_13B_all
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/llama2_13B_all/checkpoints
|
6 |
+
model_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/db6b8eb1feabb38985fdf785a89895959e944936
|
7 |
+
tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/db6b8eb1feabb38985fdf785a89895959e944936/tokenizer.model
|
8 |
+
|
9 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
threshold=(TrainLanguage:
|
12 |
+
en=516
|
13 |
+
es=275
|
14 |
+
de=611
|
15 |
+
fr=322
|
16 |
+
nl=649
|
17 |
+
pt=257
|
18 |
+
it=332
|
19 |
+
ru=334
|
20 |
+
zh=2041
|
21 |
+
ko=198
|
22 |
+
)
|
23 |
+
|
24 |
+
# number such that final tokens for each language are around 1B
|
25 |
+
n_tokens=(TrainLanguage:
|
26 |
+
en=1000000000
|
27 |
+
es=833333330
|
28 |
+
de=833333330
|
29 |
+
fr=833333330
|
30 |
+
nl=833333330
|
31 |
+
pt=833333330
|
32 |
+
it=833333330
|
33 |
+
ru=500000000
|
34 |
+
zh=13888888
|
35 |
+
ko=250000000
|
36 |
+
)
|
37 |
+
|
38 |
+
dataset_path=(TrainLanguage:
|
39 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
40 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
41 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
42 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
43 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
44 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
45 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
46 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
47 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
48 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
49 |
+
)
|
50 |
+
|
51 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
52 |
+
|
53 |
+
min_perplexity=50
|
54 |
+
|
55 |
+
size=(Size: 7 13)
|
56 |
+
|
57 |
+
log_interval=1
|
58 |
+
save_interval=10
|
59 |
+
eval_interval=635
|
60 |
+
train_steps=10
|
61 |
+
|
62 |
+
lr_scheduler=cosine
|
63 |
+
warmup_steps=0
|
64 |
+
lr=3e-5
|
65 |
+
lr_min=3e-6
|
66 |
+
weight_decay=0.1
|
67 |
+
|
68 |
+
n_gpus=8
|
69 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
70 |
+
tp=(TP: 1 2 3 4 5 6 7 8)
|
71 |
+
pp=(PP: 1 2 3 4)
|
72 |
+
micro_batch_size=4
|
73 |
+
grad_accum_steps=12
|
74 |
+
vocab_size=32000
|
75 |
+
|
76 |
+
cpu_workers=16
|
77 |
+
wandb_run_id="test_llama_13B"
|
78 |
+
wikipedia=False
|
79 |
+
freeze_layers=""
|
80 |
+
}
|
multilinguality_megatron/ducttape/10B_all_cleaned_extend32.tconf
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32/checkpoints
|
6 |
+
model_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit
|
7 |
+
tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model
|
8 |
+
|
9 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
posterior_tokens=False
|
12 |
+
n_posterior_tokens=False
|
13 |
+
|
14 |
+
threshold=(TrainLanguage:
|
15 |
+
en=516
|
16 |
+
es=275
|
17 |
+
de=611
|
18 |
+
fr=322
|
19 |
+
nl=649
|
20 |
+
pt=257
|
21 |
+
it=332
|
22 |
+
ru=334
|
23 |
+
zh=2041
|
24 |
+
ko=198
|
25 |
+
)
|
26 |
+
|
27 |
+
n_tokens=(TrainLanguage:
|
28 |
+
en=900000000
|
29 |
+
es=900000000
|
30 |
+
de=900000000
|
31 |
+
fr=900000000
|
32 |
+
nl=900000000
|
33 |
+
pt=900000000
|
34 |
+
it=900000000
|
35 |
+
ru=550000000
|
36 |
+
zh=20000000
|
37 |
+
ko=450000000
|
38 |
+
)
|
39 |
+
|
40 |
+
dataset_path=(TrainLanguage:
|
41 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
42 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
43 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
44 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
45 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
46 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
47 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
48 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
49 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
50 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
51 |
+
)
|
52 |
+
|
53 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
54 |
+
|
55 |
+
min_perplexity=50
|
56 |
+
|
57 |
+
size=(Size: 7 13)
|
58 |
+
|
59 |
+
log_interval=10
|
60 |
+
save_interval=635
|
61 |
+
eval_interval=635
|
62 |
+
train_steps=6358
|
63 |
+
|
64 |
+
lr_scheduler=cosine
|
65 |
+
warmup_steps=63
|
66 |
+
lr=3e-5
|
67 |
+
lr_min=3e-6
|
68 |
+
weight_decay=0.1
|
69 |
+
|
70 |
+
n_gpus=8
|
71 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
72 |
+
tp=(TP: 1 2 3 4)
|
73 |
+
pp=(PP: 1 2 3 4)
|
74 |
+
micro_batch_size=4
|
75 |
+
grad_accum_steps=12
|
76 |
+
vocab_size=52620
|
77 |
+
eval_iters=1
|
78 |
+
|
79 |
+
cpu_workers=16
|
80 |
+
wandb_run_id="NEW_llama2_7B_10b_extend32_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198"
|
81 |
+
wikipedia=False
|
82 |
+
freeze_layers=""
|
83 |
+
}
|
multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmed_up.tconf
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32/warmed_up_checkpoints
|
6 |
+
# for warmed up models, the model path points to the sharded megatron checkpoint
|
7 |
+
model_path=/mnt/data/shared/multilingual_llm/experiments_megatron/warmup_embeddings_llama2_all_1B_extend32/checkpoints
|
8 |
+
tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model
|
9 |
+
|
10 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
11 |
+
|
12 |
+
wikipedia=False
|
13 |
+
posterior_tokens=False
|
14 |
+
n_posterior_tokens=False
|
15 |
+
freeze_layers=""
|
16 |
+
|
17 |
+
threshold=(TrainLanguage:
|
18 |
+
en=516
|
19 |
+
es=275
|
20 |
+
de=611
|
21 |
+
fr=322
|
22 |
+
nl=649
|
23 |
+
pt=257
|
24 |
+
it=332
|
25 |
+
ru=334
|
26 |
+
zh=2041
|
27 |
+
ko=198
|
28 |
+
)
|
29 |
+
|
30 |
+
n_tokens=(TrainLanguage:
|
31 |
+
en=900000000
|
32 |
+
es=900000000
|
33 |
+
de=900000000
|
34 |
+
fr=900000000
|
35 |
+
nl=900000000
|
36 |
+
pt=900000000
|
37 |
+
it=900000000
|
38 |
+
ru=550000000
|
39 |
+
zh=20000000
|
40 |
+
ko=450000000
|
41 |
+
)
|
42 |
+
|
43 |
+
dataset_path=(TrainLanguage:
|
44 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
45 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
46 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
47 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
48 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
49 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
50 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
51 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
52 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
53 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
54 |
+
)
|
55 |
+
|
56 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
57 |
+
|
58 |
+
min_perplexity=50
|
59 |
+
|
60 |
+
size=(Size: 7 13)
|
61 |
+
|
62 |
+
log_interval=10
|
63 |
+
save_interval=127
|
64 |
+
eval_interval=635
|
65 |
+
train_steps=6358
|
66 |
+
eval_iters=1
|
67 |
+
|
68 |
+
lr_scheduler=cosine
|
69 |
+
warmup_steps=63
|
70 |
+
lr=3e-5
|
71 |
+
lr_min=3e-6
|
72 |
+
weight_decay=0.1
|
73 |
+
|
74 |
+
n_gpus=8
|
75 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
76 |
+
tp=(TP: 1 2 3 4)
|
77 |
+
pp=(PP: 1 2 3 4)
|
78 |
+
micro_batch_size=4
|
79 |
+
grad_accum_steps=12
|
80 |
+
vocab_size=52620
|
81 |
+
|
82 |
+
cpu_workers=16
|
83 |
+
wandb_run_id="NEW_warmed_up_llama2_7B_10b_extend32_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198"
|
84 |
+
}
|
multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmup.tconf
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/warmup_embeddings_llama2_all_1B_extend32/
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/warmup_embeddings_llama2_all_1B_extend32/checkpoints
|
6 |
+
model_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit
|
7 |
+
tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model
|
8 |
+
|
9 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
threshold=(TrainLanguage:
|
12 |
+
en=516
|
13 |
+
es=275
|
14 |
+
de=611
|
15 |
+
fr=322
|
16 |
+
nl=649
|
17 |
+
pt=257
|
18 |
+
it=332
|
19 |
+
ru=334
|
20 |
+
zh=2041
|
21 |
+
ko=198
|
22 |
+
)
|
23 |
+
|
24 |
+
posterior_tokens=True
|
25 |
+
n_tokens=(TrainLanguage:
|
26 |
+
en=900000000
|
27 |
+
es=900000000
|
28 |
+
de=900000000
|
29 |
+
fr=900000000
|
30 |
+
nl=900000000
|
31 |
+
pt=900000000
|
32 |
+
it=900000000
|
33 |
+
ru=550000000
|
34 |
+
zh=20000000
|
35 |
+
ko=450000000
|
36 |
+
)
|
37 |
+
n_posterior_tokens=(TrainLanguage:
|
38 |
+
en=180000000
|
39 |
+
es=180000000
|
40 |
+
de=180000000
|
41 |
+
fr=180000000
|
42 |
+
nl=180000000
|
43 |
+
pt=180000000
|
44 |
+
it=180000000
|
45 |
+
ru=100000000
|
46 |
+
zh=4000000
|
47 |
+
ko=90000000
|
48 |
+
)
|
49 |
+
|
50 |
+
dataset_path=(TrainLanguage:
|
51 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
52 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
53 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
54 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
55 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
56 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
57 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
58 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
59 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
60 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
61 |
+
)
|
62 |
+
|
63 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
64 |
+
|
65 |
+
min_perplexity=50
|
66 |
+
|
67 |
+
size=(Size: 7 13)
|
68 |
+
|
69 |
+
log_interval=5
|
70 |
+
save_interval=635
|
71 |
+
eval_interval=635
|
72 |
+
train_steps=635
|
73 |
+
eval_iters=0
|
74 |
+
|
75 |
+
lr_scheduler=constant
|
76 |
+
warmup_steps=0
|
77 |
+
lr=3e-4
|
78 |
+
lr_min=3e-4
|
79 |
+
weight_decay=0.1
|
80 |
+
|
81 |
+
n_gpus=8
|
82 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
83 |
+
tp=(TP: 1 2 3 4)
|
84 |
+
pp=(PP: 1 2 3 4)
|
85 |
+
micro_batch_size=4
|
86 |
+
grad_accum_steps=12
|
87 |
+
vocab_size=52620
|
88 |
+
|
89 |
+
cpu_workers=16
|
90 |
+
wandb_run_id="NEW_EMBEDDINGS_ONLY_llama2_7B_10b_extend32_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198"
|
91 |
+
wikipedia=False
|
92 |
+
freeze_layers="not_embeddings"
|
93 |
+
}
|
multilinguality_megatron/ducttape/10B_all_wikipedia.tconf
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/wikipedia_llama2_all_10B/
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/wikipedia_llama2_all_10B/checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
|
8 |
+
|
9 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
threshold=(TrainLanguage:
|
12 |
+
en=516
|
13 |
+
es=275
|
14 |
+
de=611
|
15 |
+
fr=322
|
16 |
+
nl=649
|
17 |
+
pt=257
|
18 |
+
it=332
|
19 |
+
ru=334
|
20 |
+
zh=2041
|
21 |
+
ko=198
|
22 |
+
)
|
23 |
+
|
24 |
+
# number such that final tokens for each language are around 1B
|
25 |
+
n_tokens=(TrainLanguage:
|
26 |
+
en=1000000000
|
27 |
+
es=833333330
|
28 |
+
de=833333330
|
29 |
+
fr=833333330
|
30 |
+
nl=833333330
|
31 |
+
pt=833333330
|
32 |
+
it=833333330
|
33 |
+
ru=500000000
|
34 |
+
zh=13888888
|
35 |
+
ko=250000000
|
36 |
+
)
|
37 |
+
|
38 |
+
dataset_path=(TrainLanguage:
|
39 |
+
en=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/en
|
40 |
+
es=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/es
|
41 |
+
de=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/de
|
42 |
+
fr=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/fr
|
43 |
+
nl=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/nl
|
44 |
+
pt=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/pt
|
45 |
+
it=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/it
|
46 |
+
ru=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ru
|
47 |
+
zh=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/zh
|
48 |
+
ko=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ko
|
49 |
+
)
|
50 |
+
|
51 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
52 |
+
|
53 |
+
min_perplexity=50
|
54 |
+
|
55 |
+
size=(Size: 7 13)
|
56 |
+
|
57 |
+
log_interval=10
|
58 |
+
save_interval=127
|
59 |
+
eval_interval=635
|
60 |
+
train_steps=6358
|
61 |
+
|
62 |
+
lr_scheduler=cosine
|
63 |
+
warmup_steps=63
|
64 |
+
lr=3e-5
|
65 |
+
lr_min=3e-6
|
66 |
+
weight_decay=0.1
|
67 |
+
|
68 |
+
n_gpus=4
|
69 |
+
gpu_ids=0,1,2,3
|
70 |
+
tp=(TP: 1 2 3 4)
|
71 |
+
pp=(PP: 1 2 3 4)
|
72 |
+
micro_batch_size=4
|
73 |
+
grad_accum_steps=24
|
74 |
+
vocab_size=32000
|
75 |
+
|
76 |
+
cpu_workers=16
|
77 |
+
wandb_run_id="WIKIPEDIA_llama2_7B_10b_base_vocab_uniform"
|
78 |
+
wikipedia=True
|
79 |
+
freeze_layers=""
|
80 |
+
posterior_tokens=False
|
81 |
+
n_posterior_tokens=False
|
82 |
+
eval_iters=0
|
83 |
+
}
|
multilinguality_megatron/ducttape/20B_all_cleaned_mc4.tconf
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
13 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
14 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
15 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
16 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
17 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
18 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
19 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
20 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
21 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
22 |
+
)
|
23 |
+
|
24 |
+
is_hf_dataset=(Dataset:
|
25 |
+
en=True
|
26 |
+
es=False
|
27 |
+
de=False
|
28 |
+
fr=False
|
29 |
+
nl=False
|
30 |
+
pt=False
|
31 |
+
it=False
|
32 |
+
ru=False
|
33 |
+
zh=False
|
34 |
+
ko=False
|
35 |
+
)
|
36 |
+
|
37 |
+
threshold=(Dataset:
|
38 |
+
en=516
|
39 |
+
es=275
|
40 |
+
de=611
|
41 |
+
fr=322
|
42 |
+
nl=649
|
43 |
+
pt=257
|
44 |
+
it=332
|
45 |
+
ru=334
|
46 |
+
zh=2041
|
47 |
+
ko=198
|
48 |
+
)
|
49 |
+
|
50 |
+
datamix_weights=(
|
51 |
+
DataMix:
|
52 |
+
mc4_uniform=(
|
53 |
+
Dataset:
|
54 |
+
en=100
|
55 |
+
es=100
|
56 |
+
de=100
|
57 |
+
fr=100
|
58 |
+
nl=100
|
59 |
+
pt=100
|
60 |
+
it=100
|
61 |
+
ru=100
|
62 |
+
zh=100
|
63 |
+
ko=100
|
64 |
+
)
|
65 |
+
)
|
66 |
+
|
67 |
+
# number such that final tokens for each language are around 1B
|
68 |
+
n_tokens=(Dataset:
|
69 |
+
en=1000000000
|
70 |
+
es=833333330
|
71 |
+
de=833333330
|
72 |
+
fr=833333330
|
73 |
+
nl=833333330
|
74 |
+
pt=833333330
|
75 |
+
it=833333330
|
76 |
+
ru=500000000
|
77 |
+
zh=13888888
|
78 |
+
ko=250000000
|
79 |
+
)
|
80 |
+
|
81 |
+
min_perplexity=50
|
82 |
+
|
83 |
+
size=(Size: 7 13)
|
84 |
+
|
85 |
+
log_interval=1
|
86 |
+
save_interval=635
|
87 |
+
eval_interval=635
|
88 |
+
train_steps=12700
|
89 |
+
|
90 |
+
lr_scheduler=cosine
|
91 |
+
warmup_steps=127
|
92 |
+
lr=3e-5
|
93 |
+
lr_min=3e-6
|
94 |
+
weight_decay=0.1
|
95 |
+
|
96 |
+
n_gpus=8
|
97 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
98 |
+
tp=(TP: 1 2 3 4)
|
99 |
+
pp=(PP: 1 2 3 4)
|
100 |
+
micro_batch_size=4
|
101 |
+
grad_accum_steps=12
|
102 |
+
vocab_size=32000
|
103 |
+
|
104 |
+
cpu_workers=16
|
105 |
+
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33"
|
106 |
+
wikipedia=False
|
107 |
+
freeze_layers=""
|
108 |
+
posterior_tokens=False
|
109 |
+
n_posterior_tokens=False
|
110 |
+
eval_iters=1
|
111 |
+
is_parallel=False
|
112 |
+
lp=""
|
113 |
+
}
|
multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel.tconf
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_parallel_checkpoints
|
6 |
+
model_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852
|
7 |
+
tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
13 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
14 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
15 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
16 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
17 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
18 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
19 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
20 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
21 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
22 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
23 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
24 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
25 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
26 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
27 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
28 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
29 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
30 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
31 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
32 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
33 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
34 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
35 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
)
|
41 |
+
|
42 |
+
is_hf_dataset=(Dataset:
|
43 |
+
en=True
|
44 |
+
es=False
|
45 |
+
de=False
|
46 |
+
fr=False
|
47 |
+
nl=False
|
48 |
+
pt=False
|
49 |
+
it=False
|
50 |
+
ru=False
|
51 |
+
zh=False
|
52 |
+
ko=False
|
53 |
+
en_de=False
|
54 |
+
de_en=False
|
55 |
+
en_fr=False
|
56 |
+
fr_en=False
|
57 |
+
en_es=False
|
58 |
+
es_en=False
|
59 |
+
en_it=False
|
60 |
+
it_en=False
|
61 |
+
en_nl=False
|
62 |
+
nl_en=False
|
63 |
+
en_pt=False
|
64 |
+
pt_en=False
|
65 |
+
en_ru=False
|
66 |
+
ru_en=False
|
67 |
+
en_zh=False
|
68 |
+
zh_en=False
|
69 |
+
en_ko=False
|
70 |
+
ko_en=False
|
71 |
+
)
|
72 |
+
|
73 |
+
threshold=(Dataset:
|
74 |
+
en=516
|
75 |
+
es=275
|
76 |
+
de=611
|
77 |
+
fr=322
|
78 |
+
nl=649
|
79 |
+
pt=257
|
80 |
+
it=332
|
81 |
+
ru=334
|
82 |
+
zh=2041
|
83 |
+
ko=198
|
84 |
+
en_de=100000
|
85 |
+
de_en=100000
|
86 |
+
en_fr=100000
|
87 |
+
fr_en=100000
|
88 |
+
en_es=100000
|
89 |
+
es_en=100000
|
90 |
+
en_it=100000
|
91 |
+
it_en=100000
|
92 |
+
en_nl=100000
|
93 |
+
nl_en=100000
|
94 |
+
en_pt=100000
|
95 |
+
pt_en=100000
|
96 |
+
en_ru=100000
|
97 |
+
ru_en=100000
|
98 |
+
en_zh=100000
|
99 |
+
zh_en=100000
|
100 |
+
en_ko=100000
|
101 |
+
ko_en=100000
|
102 |
+
)
|
103 |
+
|
104 |
+
# rougly 67% for mc4, 33% for total parallel data
|
105 |
+
datamix_weights=(
|
106 |
+
DataMix:
|
107 |
+
mc4_parallel_uniform=(
|
108 |
+
Dataset:
|
109 |
+
en=670
|
110 |
+
es=670
|
111 |
+
de=670
|
112 |
+
fr=670
|
113 |
+
nl=670
|
114 |
+
pt=670
|
115 |
+
it=670
|
116 |
+
ru=670
|
117 |
+
zh=670
|
118 |
+
ko=670
|
119 |
+
en_de=183
|
120 |
+
de_en=183
|
121 |
+
en_fr=183
|
122 |
+
fr_en=183
|
123 |
+
en_es=183
|
124 |
+
es_en=183
|
125 |
+
en_it=183
|
126 |
+
it_en=183
|
127 |
+
en_nl=183
|
128 |
+
nl_en=183
|
129 |
+
en_pt=183
|
130 |
+
pt_en=183
|
131 |
+
en_ru=183
|
132 |
+
ru_en=183
|
133 |
+
en_zh=183
|
134 |
+
zh_en=183
|
135 |
+
en_ko=183
|
136 |
+
ko_en=183
|
137 |
+
)
|
138 |
+
)
|
139 |
+
|
140 |
+
# number such that final tokens for each language are around 1B
|
141 |
+
n_tokens=(Dataset:
|
142 |
+
en=1000000000
|
143 |
+
es=833333330
|
144 |
+
de=833333330
|
145 |
+
fr=833333330
|
146 |
+
nl=833333330
|
147 |
+
pt=833333330
|
148 |
+
it=833333330
|
149 |
+
ru=500000000
|
150 |
+
zh=13888888
|
151 |
+
ko=250000000
|
152 |
+
en_de=20000000
|
153 |
+
de_en=20000000
|
154 |
+
en_fr=20000000
|
155 |
+
fr_en=20000000
|
156 |
+
en_es=20000000
|
157 |
+
es_en=20000000
|
158 |
+
en_it=20000000
|
159 |
+
it_en=20000000
|
160 |
+
en_nl=20000000
|
161 |
+
nl_en=20000000
|
162 |
+
en_pt=20000000
|
163 |
+
pt_en=20000000
|
164 |
+
en_ru=20000000
|
165 |
+
ru_en=20000000
|
166 |
+
en_zh=20000000
|
167 |
+
zh_en=20000000
|
168 |
+
en_ko=20000000
|
169 |
+
ko_en=20000000
|
170 |
+
)
|
171 |
+
|
172 |
+
is_parallel=(Dataset:
|
173 |
+
en=False
|
174 |
+
es=False
|
175 |
+
de=False
|
176 |
+
fr=False
|
177 |
+
nl=False
|
178 |
+
pt=False
|
179 |
+
it=False
|
180 |
+
ru=False
|
181 |
+
zh=False
|
182 |
+
ko=False
|
183 |
+
en_de=True
|
184 |
+
de_en=True
|
185 |
+
en_fr=True
|
186 |
+
fr_en=True
|
187 |
+
en_es=True
|
188 |
+
es_en=True
|
189 |
+
en_it=True
|
190 |
+
it_en=True
|
191 |
+
en_nl=True
|
192 |
+
nl_en=True
|
193 |
+
en_pt=True
|
194 |
+
pt_en=True
|
195 |
+
en_ru=True
|
196 |
+
ru_en=True
|
197 |
+
en_zh=True
|
198 |
+
zh_en=True
|
199 |
+
en_ko=True
|
200 |
+
ko_en=True
|
201 |
+
)
|
202 |
+
|
203 |
+
lp=(Dataset:
|
204 |
+
en=""
|
205 |
+
es=""
|
206 |
+
de=""
|
207 |
+
fr=""
|
208 |
+
nl=""
|
209 |
+
pt=""
|
210 |
+
it=""
|
211 |
+
ru=""
|
212 |
+
zh=""
|
213 |
+
ko=""
|
214 |
+
en_de="en-de"
|
215 |
+
de_en="de-en"
|
216 |
+
en_fr="en-fr"
|
217 |
+
fr_en="fr-en"
|
218 |
+
en_es="en-es"
|
219 |
+
es_en="es-en"
|
220 |
+
en_it="en-it"
|
221 |
+
it_en="it-en"
|
222 |
+
en_nl="en-nl"
|
223 |
+
nl_en="nl-en"
|
224 |
+
en_pt="en-pt"
|
225 |
+
pt_en="pt-en"
|
226 |
+
en_ru="en-ru"
|
227 |
+
ru_en="ru-en"
|
228 |
+
en_zh="en-zh"
|
229 |
+
zh_en="zh-en"
|
230 |
+
en_ko="en-ko"
|
231 |
+
ko_en="ko-en"
|
232 |
+
)
|
233 |
+
|
234 |
+
min_perplexity=50
|
235 |
+
|
236 |
+
size=(Size: 7 13)
|
237 |
+
|
238 |
+
log_interval=1
|
239 |
+
save_interval=635
|
240 |
+
eval_interval=635
|
241 |
+
train_steps=12700
|
242 |
+
|
243 |
+
lr_scheduler=cosine
|
244 |
+
warmup_steps=127
|
245 |
+
lr=3e-5
|
246 |
+
lr_min=3e-6
|
247 |
+
weight_decay=0.1
|
248 |
+
|
249 |
+
n_gpus=8
|
250 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
251 |
+
tp=(TP: 1 2 3 4)
|
252 |
+
pp=(PP: 1 2 3 4)
|
253 |
+
micro_batch_size=4
|
254 |
+
grad_accum_steps=12
|
255 |
+
vocab_size=32000
|
256 |
+
|
257 |
+
cpu_workers=16
|
258 |
+
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33"
|
259 |
+
wikipedia=False
|
260 |
+
freeze_layers=""
|
261 |
+
posterior_tokens=False
|
262 |
+
n_posterior_tokens=0
|
263 |
+
eval_iters=1
|
264 |
+
}
|
multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_13b.tconf
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_13B_all_20B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_13B_all_20B/mc4_parallel_checkpoints
|
6 |
+
model_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/dc1d3b3bfdb69df26f8fc966c16353274b138c55
|
7 |
+
tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/dc1d3b3bfdb69df26f8fc966c16353274b138c55/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
13 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
14 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
15 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
16 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
17 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
18 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
19 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
20 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
21 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
22 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
23 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
24 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
25 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
26 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
27 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
28 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
29 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
30 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
31 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
32 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
33 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
34 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
35 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
)
|
41 |
+
|
42 |
+
is_hf_dataset=(Dataset:
|
43 |
+
en=True
|
44 |
+
es=False
|
45 |
+
de=False
|
46 |
+
fr=False
|
47 |
+
nl=False
|
48 |
+
pt=False
|
49 |
+
it=False
|
50 |
+
ru=False
|
51 |
+
zh=False
|
52 |
+
ko=False
|
53 |
+
en_de=False
|
54 |
+
de_en=False
|
55 |
+
en_fr=False
|
56 |
+
fr_en=False
|
57 |
+
en_es=False
|
58 |
+
es_en=False
|
59 |
+
en_it=False
|
60 |
+
it_en=False
|
61 |
+
en_nl=False
|
62 |
+
nl_en=False
|
63 |
+
en_pt=False
|
64 |
+
pt_en=False
|
65 |
+
en_ru=False
|
66 |
+
ru_en=False
|
67 |
+
en_zh=False
|
68 |
+
zh_en=False
|
69 |
+
en_ko=False
|
70 |
+
ko_en=False
|
71 |
+
)
|
72 |
+
|
73 |
+
threshold=(Dataset:
|
74 |
+
en=516
|
75 |
+
es=275
|
76 |
+
de=611
|
77 |
+
fr=322
|
78 |
+
nl=649
|
79 |
+
pt=257
|
80 |
+
it=332
|
81 |
+
ru=334
|
82 |
+
zh=2041
|
83 |
+
ko=198
|
84 |
+
en_de=100000
|
85 |
+
de_en=100000
|
86 |
+
en_fr=100000
|
87 |
+
fr_en=100000
|
88 |
+
en_es=100000
|
89 |
+
es_en=100000
|
90 |
+
en_it=100000
|
91 |
+
it_en=100000
|
92 |
+
en_nl=100000
|
93 |
+
nl_en=100000
|
94 |
+
en_pt=100000
|
95 |
+
pt_en=100000
|
96 |
+
en_ru=100000
|
97 |
+
ru_en=100000
|
98 |
+
en_zh=100000
|
99 |
+
zh_en=100000
|
100 |
+
en_ko=100000
|
101 |
+
ko_en=100000
|
102 |
+
)
|
103 |
+
|
104 |
+
# rougly 67% for mc4, 33% for total parallel data
|
105 |
+
datamix_weights=(
|
106 |
+
DataMix:
|
107 |
+
mc4_parallel_uniform=(
|
108 |
+
Dataset:
|
109 |
+
en=670
|
110 |
+
es=670
|
111 |
+
de=670
|
112 |
+
fr=670
|
113 |
+
nl=670
|
114 |
+
pt=670
|
115 |
+
it=670
|
116 |
+
ru=670
|
117 |
+
zh=670
|
118 |
+
ko=670
|
119 |
+
en_de=183
|
120 |
+
de_en=183
|
121 |
+
en_fr=183
|
122 |
+
fr_en=183
|
123 |
+
en_es=183
|
124 |
+
es_en=183
|
125 |
+
en_it=183
|
126 |
+
it_en=183
|
127 |
+
en_nl=183
|
128 |
+
nl_en=183
|
129 |
+
en_pt=183
|
130 |
+
pt_en=183
|
131 |
+
en_ru=183
|
132 |
+
ru_en=183
|
133 |
+
en_zh=183
|
134 |
+
zh_en=183
|
135 |
+
en_ko=183
|
136 |
+
ko_en=183
|
137 |
+
)
|
138 |
+
)
|
139 |
+
|
140 |
+
# number such that final tokens for each language are around 1B
|
141 |
+
n_tokens=(Dataset:
|
142 |
+
en=1000000000
|
143 |
+
es=833333330
|
144 |
+
de=833333330
|
145 |
+
fr=833333330
|
146 |
+
nl=833333330
|
147 |
+
pt=833333330
|
148 |
+
it=833333330
|
149 |
+
ru=500000000
|
150 |
+
zh=13888888
|
151 |
+
ko=250000000
|
152 |
+
en_de=20000000
|
153 |
+
de_en=20000000
|
154 |
+
en_fr=20000000
|
155 |
+
fr_en=20000000
|
156 |
+
en_es=20000000
|
157 |
+
es_en=20000000
|
158 |
+
en_it=20000000
|
159 |
+
it_en=20000000
|
160 |
+
en_nl=20000000
|
161 |
+
nl_en=20000000
|
162 |
+
en_pt=20000000
|
163 |
+
pt_en=20000000
|
164 |
+
en_ru=20000000
|
165 |
+
ru_en=20000000
|
166 |
+
en_zh=20000000
|
167 |
+
zh_en=20000000
|
168 |
+
en_ko=20000000
|
169 |
+
ko_en=20000000
|
170 |
+
)
|
171 |
+
|
172 |
+
is_parallel=(Dataset:
|
173 |
+
en=False
|
174 |
+
es=False
|
175 |
+
de=False
|
176 |
+
fr=False
|
177 |
+
nl=False
|
178 |
+
pt=False
|
179 |
+
it=False
|
180 |
+
ru=False
|
181 |
+
zh=False
|
182 |
+
ko=False
|
183 |
+
en_de=True
|
184 |
+
de_en=True
|
185 |
+
en_fr=True
|
186 |
+
fr_en=True
|
187 |
+
en_es=True
|
188 |
+
es_en=True
|
189 |
+
en_it=True
|
190 |
+
it_en=True
|
191 |
+
en_nl=True
|
192 |
+
nl_en=True
|
193 |
+
en_pt=True
|
194 |
+
pt_en=True
|
195 |
+
en_ru=True
|
196 |
+
ru_en=True
|
197 |
+
en_zh=True
|
198 |
+
zh_en=True
|
199 |
+
en_ko=True
|
200 |
+
ko_en=True
|
201 |
+
)
|
202 |
+
|
203 |
+
lp=(Dataset:
|
204 |
+
en=""
|
205 |
+
es=""
|
206 |
+
de=""
|
207 |
+
fr=""
|
208 |
+
nl=""
|
209 |
+
pt=""
|
210 |
+
it=""
|
211 |
+
ru=""
|
212 |
+
zh=""
|
213 |
+
ko=""
|
214 |
+
en_de="en-de"
|
215 |
+
de_en="de-en"
|
216 |
+
en_fr="en-fr"
|
217 |
+
fr_en="fr-en"
|
218 |
+
en_es="en-es"
|
219 |
+
es_en="es-en"
|
220 |
+
en_it="en-it"
|
221 |
+
it_en="it-en"
|
222 |
+
en_nl="en-nl"
|
223 |
+
nl_en="nl-en"
|
224 |
+
en_pt="en-pt"
|
225 |
+
pt_en="pt-en"
|
226 |
+
en_ru="en-ru"
|
227 |
+
ru_en="ru-en"
|
228 |
+
en_zh="en-zh"
|
229 |
+
zh_en="zh-en"
|
230 |
+
en_ko="en-ko"
|
231 |
+
ko_en="ko-en"
|
232 |
+
)
|
233 |
+
|
234 |
+
min_perplexity=50
|
235 |
+
|
236 |
+
size=(Size: 7 13)
|
237 |
+
|
238 |
+
log_interval=1
|
239 |
+
save_interval=635
|
240 |
+
eval_interval=635
|
241 |
+
train_steps=12700
|
242 |
+
|
243 |
+
lr_scheduler=cosine
|
244 |
+
warmup_steps=127
|
245 |
+
lr=3e-5
|
246 |
+
lr_min=3e-6
|
247 |
+
weight_decay=0.1
|
248 |
+
|
249 |
+
n_gpus=8
|
250 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
251 |
+
tp=(TP: 1 2 3 4 5 6 7 8)
|
252 |
+
pp=(PP: 1 2 3 4)
|
253 |
+
micro_batch_size=4
|
254 |
+
grad_accum_steps=12
|
255 |
+
vocab_size=32000
|
256 |
+
|
257 |
+
cpu_workers=16
|
258 |
+
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33"
|
259 |
+
wikipedia=False
|
260 |
+
freeze_layers=""
|
261 |
+
posterior_tokens=False
|
262 |
+
n_posterior_tokens=0
|
263 |
+
eval_iters=1
|
264 |
+
}
|
multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_concat.tconf
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_parallel_concat_checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
13 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
14 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
15 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
16 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
17 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
18 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
19 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
20 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
21 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
22 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
23 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
24 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
25 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
26 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
27 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
28 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
29 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
30 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
31 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
32 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
33 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
34 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
35 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
)
|
41 |
+
|
42 |
+
is_hf_dataset=(Dataset:
|
43 |
+
en=True
|
44 |
+
es=False
|
45 |
+
de=False
|
46 |
+
fr=False
|
47 |
+
nl=False
|
48 |
+
pt=False
|
49 |
+
it=False
|
50 |
+
ru=False
|
51 |
+
zh=False
|
52 |
+
ko=False
|
53 |
+
en_de=False
|
54 |
+
de_en=False
|
55 |
+
en_fr=False
|
56 |
+
fr_en=False
|
57 |
+
en_es=False
|
58 |
+
es_en=False
|
59 |
+
en_it=False
|
60 |
+
it_en=False
|
61 |
+
en_nl=False
|
62 |
+
nl_en=False
|
63 |
+
en_pt=False
|
64 |
+
pt_en=False
|
65 |
+
en_ru=False
|
66 |
+
ru_en=False
|
67 |
+
en_zh=False
|
68 |
+
zh_en=False
|
69 |
+
en_ko=False
|
70 |
+
ko_en=False
|
71 |
+
)
|
72 |
+
|
73 |
+
threshold=(Dataset:
|
74 |
+
en=516
|
75 |
+
es=275
|
76 |
+
de=611
|
77 |
+
fr=322
|
78 |
+
nl=649
|
79 |
+
pt=257
|
80 |
+
it=332
|
81 |
+
ru=334
|
82 |
+
zh=2041
|
83 |
+
ko=198
|
84 |
+
en_de=100000
|
85 |
+
de_en=100000
|
86 |
+
en_fr=100000
|
87 |
+
fr_en=100000
|
88 |
+
en_es=100000
|
89 |
+
es_en=100000
|
90 |
+
en_it=100000
|
91 |
+
it_en=100000
|
92 |
+
en_nl=100000
|
93 |
+
nl_en=100000
|
94 |
+
en_pt=100000
|
95 |
+
pt_en=100000
|
96 |
+
en_ru=100000
|
97 |
+
ru_en=100000
|
98 |
+
en_zh=100000
|
99 |
+
zh_en=100000
|
100 |
+
en_ko=100000
|
101 |
+
ko_en=100000
|
102 |
+
)
|
103 |
+
|
104 |
+
# rougly 67% for mc4, 33% for total parallel data
|
105 |
+
datamix_weights=(
|
106 |
+
DataMix:
|
107 |
+
mc4_parallel_uniform=(
|
108 |
+
Dataset:
|
109 |
+
en=670
|
110 |
+
es=670
|
111 |
+
de=670
|
112 |
+
fr=670
|
113 |
+
nl=670
|
114 |
+
pt=670
|
115 |
+
it=670
|
116 |
+
ru=670
|
117 |
+
zh=670
|
118 |
+
ko=670
|
119 |
+
en_de=183
|
120 |
+
de_en=183
|
121 |
+
en_fr=183
|
122 |
+
fr_en=183
|
123 |
+
en_es=183
|
124 |
+
es_en=183
|
125 |
+
en_it=183
|
126 |
+
it_en=183
|
127 |
+
en_nl=183
|
128 |
+
nl_en=183
|
129 |
+
en_pt=183
|
130 |
+
pt_en=183
|
131 |
+
en_ru=183
|
132 |
+
ru_en=183
|
133 |
+
en_zh=183
|
134 |
+
zh_en=183
|
135 |
+
en_ko=183
|
136 |
+
ko_en=183
|
137 |
+
)
|
138 |
+
)
|
139 |
+
|
140 |
+
# number such that final tokens for each language are around 1B
|
141 |
+
n_tokens=(Dataset:
|
142 |
+
en=1000000000
|
143 |
+
es=833333330
|
144 |
+
de=833333330
|
145 |
+
fr=833333330
|
146 |
+
nl=833333330
|
147 |
+
pt=833333330
|
148 |
+
it=833333330
|
149 |
+
ru=500000000
|
150 |
+
zh=13888888
|
151 |
+
ko=250000000
|
152 |
+
en_de=20000000
|
153 |
+
de_en=20000000
|
154 |
+
en_fr=20000000
|
155 |
+
fr_en=20000000
|
156 |
+
en_es=20000000
|
157 |
+
es_en=20000000
|
158 |
+
en_it=20000000
|
159 |
+
it_en=20000000
|
160 |
+
en_nl=20000000
|
161 |
+
nl_en=20000000
|
162 |
+
en_pt=20000000
|
163 |
+
pt_en=20000000
|
164 |
+
en_ru=20000000
|
165 |
+
ru_en=20000000
|
166 |
+
en_zh=20000000
|
167 |
+
zh_en=20000000
|
168 |
+
en_ko=20000000
|
169 |
+
ko_en=20000000
|
170 |
+
)
|
171 |
+
|
172 |
+
is_parallel=(Dataset:
|
173 |
+
en=False
|
174 |
+
es=False
|
175 |
+
de=False
|
176 |
+
fr=False
|
177 |
+
nl=False
|
178 |
+
pt=False
|
179 |
+
it=False
|
180 |
+
ru=False
|
181 |
+
zh=False
|
182 |
+
ko=False
|
183 |
+
en_de=True
|
184 |
+
de_en=True
|
185 |
+
en_fr=True
|
186 |
+
fr_en=True
|
187 |
+
en_es=True
|
188 |
+
es_en=True
|
189 |
+
en_it=True
|
190 |
+
it_en=True
|
191 |
+
en_nl=True
|
192 |
+
nl_en=True
|
193 |
+
en_pt=True
|
194 |
+
pt_en=True
|
195 |
+
en_ru=True
|
196 |
+
ru_en=True
|
197 |
+
en_zh=True
|
198 |
+
zh_en=True
|
199 |
+
en_ko=True
|
200 |
+
ko_en=True
|
201 |
+
)
|
202 |
+
|
203 |
+
lp=(Dataset:
|
204 |
+
en=""
|
205 |
+
es=""
|
206 |
+
de=""
|
207 |
+
fr=""
|
208 |
+
nl=""
|
209 |
+
pt=""
|
210 |
+
it=""
|
211 |
+
ru=""
|
212 |
+
zh=""
|
213 |
+
ko=""
|
214 |
+
en_de="en-de"
|
215 |
+
de_en="de-en"
|
216 |
+
en_fr="en-fr"
|
217 |
+
fr_en="fr-en"
|
218 |
+
en_es="en-es"
|
219 |
+
es_en="es-en"
|
220 |
+
en_it="en-it"
|
221 |
+
it_en="it-en"
|
222 |
+
en_nl="en-nl"
|
223 |
+
nl_en="nl-en"
|
224 |
+
en_pt="en-pt"
|
225 |
+
pt_en="pt-en"
|
226 |
+
en_ru="en-ru"
|
227 |
+
ru_en="ru-en"
|
228 |
+
en_zh="en-zh"
|
229 |
+
zh_en="zh-en"
|
230 |
+
en_ko="en-ko"
|
231 |
+
ko_en="ko-en"
|
232 |
+
)
|
233 |
+
|
234 |
+
min_perplexity=50
|
235 |
+
|
236 |
+
size=(Size: 7 13)
|
237 |
+
|
238 |
+
log_interval=1
|
239 |
+
save_interval=635
|
240 |
+
eval_interval=635
|
241 |
+
train_steps=12700
|
242 |
+
|
243 |
+
lr_scheduler=cosine
|
244 |
+
warmup_steps=127
|
245 |
+
lr=3e-5
|
246 |
+
lr_min=3e-6
|
247 |
+
weight_decay=0.1
|
248 |
+
|
249 |
+
n_gpus=8
|
250 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
251 |
+
tp=(TP: 1 2 3 4)
|
252 |
+
pp=(PP: 1 2 3 4)
|
253 |
+
micro_batch_size=4
|
254 |
+
grad_accum_steps=12
|
255 |
+
vocab_size=32000
|
256 |
+
|
257 |
+
cpu_workers=16
|
258 |
+
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33"
|
259 |
+
wikipedia=False
|
260 |
+
freeze_layers=""
|
261 |
+
posterior_tokens=False
|
262 |
+
n_posterior_tokens=0
|
263 |
+
eval_iters=1
|
264 |
+
}
|
multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_instructions.tconf
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B_w_instructions
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B_w_instructions/mc4_parallel_towerblocksv0.1_checkpoints
|
6 |
+
model_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852
|
7 |
+
tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en tower_blocks)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
13 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
14 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
15 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
16 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
17 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
18 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
19 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
20 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
21 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
22 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
23 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
24 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
25 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
26 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
27 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
28 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
29 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
30 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
31 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
32 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
33 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
34 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
35 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
tower_blocks="Unbabel/TowerBlocks-v0.1"
|
41 |
+
)
|
42 |
+
|
43 |
+
is_hf_dataset=(Dataset:
|
44 |
+
en=True
|
45 |
+
es=False
|
46 |
+
de=False
|
47 |
+
fr=False
|
48 |
+
nl=False
|
49 |
+
pt=False
|
50 |
+
it=False
|
51 |
+
ru=False
|
52 |
+
zh=False
|
53 |
+
ko=False
|
54 |
+
en_de=False
|
55 |
+
de_en=False
|
56 |
+
en_fr=False
|
57 |
+
fr_en=False
|
58 |
+
en_es=False
|
59 |
+
es_en=False
|
60 |
+
en_it=False
|
61 |
+
it_en=False
|
62 |
+
en_nl=False
|
63 |
+
nl_en=False
|
64 |
+
en_pt=False
|
65 |
+
pt_en=False
|
66 |
+
en_ru=False
|
67 |
+
ru_en=False
|
68 |
+
en_zh=False
|
69 |
+
zh_en=False
|
70 |
+
en_ko=False
|
71 |
+
ko_en=False
|
72 |
+
tower_blocks=True
|
73 |
+
)
|
74 |
+
|
75 |
+
threshold=(Dataset:
|
76 |
+
en=516
|
77 |
+
es=275
|
78 |
+
de=611
|
79 |
+
fr=322
|
80 |
+
nl=649
|
81 |
+
pt=257
|
82 |
+
it=332
|
83 |
+
ru=334
|
84 |
+
zh=2041
|
85 |
+
ko=198
|
86 |
+
en_de=100000
|
87 |
+
de_en=100000
|
88 |
+
en_fr=100000
|
89 |
+
fr_en=100000
|
90 |
+
en_es=100000
|
91 |
+
es_en=100000
|
92 |
+
en_it=100000
|
93 |
+
it_en=100000
|
94 |
+
en_nl=100000
|
95 |
+
nl_en=100000
|
96 |
+
en_pt=100000
|
97 |
+
pt_en=100000
|
98 |
+
en_ru=100000
|
99 |
+
ru_en=100000
|
100 |
+
en_zh=100000
|
101 |
+
zh_en=100000
|
102 |
+
en_ko=100000
|
103 |
+
ko_en=100000
|
104 |
+
tower_blocks=100000
|
105 |
+
)
|
106 |
+
|
107 |
+
# rougly 67% for mc4, 33% for total parallel data
|
108 |
+
datamix_weights=(
|
109 |
+
DataMix:
|
110 |
+
mc4_parallel_uniform=(
|
111 |
+
Dataset:
|
112 |
+
en=670
|
113 |
+
es=670
|
114 |
+
de=670
|
115 |
+
fr=670
|
116 |
+
nl=670
|
117 |
+
pt=670
|
118 |
+
it=670
|
119 |
+
ru=670
|
120 |
+
zh=670
|
121 |
+
ko=670
|
122 |
+
en_de=183
|
123 |
+
de_en=183
|
124 |
+
en_fr=183
|
125 |
+
fr_en=183
|
126 |
+
en_es=183
|
127 |
+
es_en=183
|
128 |
+
en_it=183
|
129 |
+
it_en=183
|
130 |
+
en_nl=183
|
131 |
+
nl_en=183
|
132 |
+
en_pt=183
|
133 |
+
pt_en=183
|
134 |
+
en_ru=183
|
135 |
+
ru_en=183
|
136 |
+
en_zh=183
|
137 |
+
zh_en=183
|
138 |
+
en_ko=183
|
139 |
+
ko_en=183
|
140 |
+
tower_blocks=183
|
141 |
+
)
|
142 |
+
)
|
143 |
+
|
144 |
+
# number such that final tokens for each language are around 1B
|
145 |
+
n_tokens=(Dataset:
|
146 |
+
en=1000000000
|
147 |
+
es=833333330
|
148 |
+
de=833333330
|
149 |
+
fr=833333330
|
150 |
+
nl=833333330
|
151 |
+
pt=833333330
|
152 |
+
it=833333330
|
153 |
+
ru=500000000
|
154 |
+
zh=13888888
|
155 |
+
ko=250000000
|
156 |
+
en_de=20000000
|
157 |
+
de_en=20000000
|
158 |
+
en_fr=20000000
|
159 |
+
fr_en=20000000
|
160 |
+
en_es=20000000
|
161 |
+
es_en=20000000
|
162 |
+
en_it=20000000
|
163 |
+
it_en=20000000
|
164 |
+
en_nl=20000000
|
165 |
+
nl_en=20000000
|
166 |
+
en_pt=20000000
|
167 |
+
pt_en=20000000
|
168 |
+
en_ru=20000000
|
169 |
+
ru_en=20000000
|
170 |
+
en_zh=20000000
|
171 |
+
zh_en=20000000
|
172 |
+
en_ko=20000000
|
173 |
+
ko_en=20000000
|
174 |
+
tower_blocks=20000000
|
175 |
+
)
|
176 |
+
|
177 |
+
is_parallel=(Dataset:
|
178 |
+
en=False
|
179 |
+
es=False
|
180 |
+
de=False
|
181 |
+
fr=False
|
182 |
+
nl=False
|
183 |
+
pt=False
|
184 |
+
it=False
|
185 |
+
ru=False
|
186 |
+
zh=False
|
187 |
+
ko=False
|
188 |
+
en_de=True
|
189 |
+
de_en=True
|
190 |
+
en_fr=True
|
191 |
+
fr_en=True
|
192 |
+
en_es=True
|
193 |
+
es_en=True
|
194 |
+
en_it=True
|
195 |
+
it_en=True
|
196 |
+
en_nl=True
|
197 |
+
nl_en=True
|
198 |
+
en_pt=True
|
199 |
+
pt_en=True
|
200 |
+
en_ru=True
|
201 |
+
ru_en=True
|
202 |
+
en_zh=True
|
203 |
+
zh_en=True
|
204 |
+
en_ko=True
|
205 |
+
ko_en=True
|
206 |
+
tower_blocks=False
|
207 |
+
)
|
208 |
+
|
209 |
+
lp=(Dataset:
|
210 |
+
en=""
|
211 |
+
es=""
|
212 |
+
de=""
|
213 |
+
fr=""
|
214 |
+
nl=""
|
215 |
+
pt=""
|
216 |
+
it=""
|
217 |
+
ru=""
|
218 |
+
zh=""
|
219 |
+
ko=""
|
220 |
+
en_de="en-de"
|
221 |
+
de_en="de-en"
|
222 |
+
en_fr="en-fr"
|
223 |
+
fr_en="fr-en"
|
224 |
+
en_es="en-es"
|
225 |
+
es_en="es-en"
|
226 |
+
en_it="en-it"
|
227 |
+
it_en="it-en"
|
228 |
+
en_nl="en-nl"
|
229 |
+
nl_en="nl-en"
|
230 |
+
en_pt="en-pt"
|
231 |
+
pt_en="pt-en"
|
232 |
+
en_ru="en-ru"
|
233 |
+
ru_en="ru-en"
|
234 |
+
en_zh="en-zh"
|
235 |
+
zh_en="zh-en"
|
236 |
+
en_ko="en-ko"
|
237 |
+
ko_en="ko-en"
|
238 |
+
tower_blocks="oi"
|
239 |
+
)
|
240 |
+
|
241 |
+
min_perplexity=50
|
242 |
+
|
243 |
+
size=(Size: 7 13)
|
244 |
+
|
245 |
+
log_interval=1
|
246 |
+
save_interval=635
|
247 |
+
eval_interval=635
|
248 |
+
train_steps=12700
|
249 |
+
|
250 |
+
lr_scheduler=cosine
|
251 |
+
warmup_steps=127
|
252 |
+
lr=3e-5
|
253 |
+
lr_min=3e-6
|
254 |
+
weight_decay=0.1
|
255 |
+
|
256 |
+
n_gpus=8
|
257 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
258 |
+
tp=(TP: 1 2 3 4)
|
259 |
+
pp=(PP: 1 2 3 4)
|
260 |
+
micro_batch_size=4
|
261 |
+
grad_accum_steps=12
|
262 |
+
vocab_size=32000
|
263 |
+
|
264 |
+
cpu_workers=16
|
265 |
+
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33"
|
266 |
+
wikipedia=False
|
267 |
+
freeze_layers=""
|
268 |
+
posterior_tokens=False
|
269 |
+
n_posterior_tokens=0
|
270 |
+
eval_iters=1
|
271 |
+
}
|
multilinguality_megatron/ducttape/20B_all_cleaned_mc4_wiki.tconf
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_wiki_checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_wiki de_wiki fr_wiki es_wiki it_wiki nl_wiki pt_wiki ru_wiki zh_wiki ko_wiki)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
13 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
14 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
15 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
16 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
17 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
18 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
19 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
20 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
21 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
22 |
+
en_wiki=""
|
23 |
+
es_wiki=""
|
24 |
+
de_wiki=""
|
25 |
+
fr_wiki=""
|
26 |
+
nl_wiki=""
|
27 |
+
pt_wiki=""
|
28 |
+
it_wiki=""
|
29 |
+
ru_wiki=""
|
30 |
+
zh_wiki=""
|
31 |
+
ko_wiki=""
|
32 |
+
)
|
33 |
+
|
34 |
+
is_hf_dataset=(Dataset:
|
35 |
+
en=True
|
36 |
+
es=False
|
37 |
+
de=False
|
38 |
+
fr=False
|
39 |
+
nl=False
|
40 |
+
pt=False
|
41 |
+
it=False
|
42 |
+
ru=False
|
43 |
+
zh=False
|
44 |
+
ko=False
|
45 |
+
en_wiki=False
|
46 |
+
es_wiki=False
|
47 |
+
de_wiki=False
|
48 |
+
fr_wiki=False
|
49 |
+
nl_wiki=False
|
50 |
+
pt_wiki=False
|
51 |
+
it_wiki=False
|
52 |
+
ru_wiki=False
|
53 |
+
zh_wiki=False
|
54 |
+
ko_wiki=False
|
55 |
+
)
|
56 |
+
|
57 |
+
threshold=(Dataset:
|
58 |
+
en=516 en_wiki=""
|
59 |
+
es=275 es_wiki=""
|
60 |
+
de=611 de_wiki=""
|
61 |
+
fr=322 fr_wiki=""
|
62 |
+
nl=649 nl_wiki=""
|
63 |
+
pt=257 pt_wiki=""
|
64 |
+
it=332 it_wiki=""
|
65 |
+
ru=334 ru_wiki=""
|
66 |
+
zh=2041 zh_wiki=""
|
67 |
+
ko=198 ko_wiki=""
|
68 |
+
)
|
69 |
+
|
70 |
+
datamix_weights=(
|
71 |
+
DataMix:
|
72 |
+
mc4_wiki_uniform=(
|
73 |
+
Dataset:
|
74 |
+
en=67
|
75 |
+
es=67
|
76 |
+
de=67
|
77 |
+
fr=67
|
78 |
+
nl=67
|
79 |
+
pt=67
|
80 |
+
it=67
|
81 |
+
ru=67
|
82 |
+
zh=67
|
83 |
+
ko=67
|
84 |
+
en_wiki=33
|
85 |
+
es_wiki=33
|
86 |
+
de_wiki=33
|
87 |
+
fr_wiki=33
|
88 |
+
nl_wiki=33
|
89 |
+
pt_wiki=33
|
90 |
+
it_wiki=33
|
91 |
+
ru_wiki=33
|
92 |
+
zh_wiki=33
|
93 |
+
ko_wiki=33
|
94 |
+
)
|
95 |
+
mc4_uniform=(
|
96 |
+
Dataset:
|
97 |
+
en=100
|
98 |
+
es=100
|
99 |
+
de=100
|
100 |
+
fr=100
|
101 |
+
nl=100
|
102 |
+
pt=100
|
103 |
+
it=100
|
104 |
+
ru=100
|
105 |
+
zh=100
|
106 |
+
ko=100
|
107 |
+
en_wiki=0
|
108 |
+
es_wiki=0
|
109 |
+
de_wiki=0
|
110 |
+
fr_wiki=0
|
111 |
+
nl_wiki=0
|
112 |
+
pt_wiki=0
|
113 |
+
it_wiki=0
|
114 |
+
ru_wiki=0
|
115 |
+
zh_wiki=0
|
116 |
+
ko_wiki=0
|
117 |
+
)
|
118 |
+
)
|
119 |
+
|
120 |
+
# number such that final tokens for each language are around 1B
|
121 |
+
n_tokens=(Dataset:
|
122 |
+
en=1000000000
|
123 |
+
es=833333330
|
124 |
+
de=833333330
|
125 |
+
fr=833333330
|
126 |
+
nl=833333330
|
127 |
+
pt=833333330
|
128 |
+
it=833333330
|
129 |
+
ru=500000000
|
130 |
+
zh=13888888
|
131 |
+
ko=250000000
|
132 |
+
en_wiki=""
|
133 |
+
es_wiki=""
|
134 |
+
de_wiki=""
|
135 |
+
fr_wiki=""
|
136 |
+
nl_wiki=""
|
137 |
+
pt_wiki=""
|
138 |
+
it_wiki=""
|
139 |
+
ru_wiki=""
|
140 |
+
zh_wiki=""
|
141 |
+
ko_wiki=""
|
142 |
+
)
|
143 |
+
|
144 |
+
min_perplexity=50
|
145 |
+
|
146 |
+
size=(Size: 7 13)
|
147 |
+
|
148 |
+
log_interval=1
|
149 |
+
save_interval=635
|
150 |
+
eval_interval=635
|
151 |
+
train_steps=12700
|
152 |
+
|
153 |
+
lr_scheduler=cosine
|
154 |
+
warmup_steps=127
|
155 |
+
lr=3e-5
|
156 |
+
lr_min=3e-6
|
157 |
+
weight_decay=0.1
|
158 |
+
|
159 |
+
n_gpus=8
|
160 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
161 |
+
tp=(TP: 1 2 3 4)
|
162 |
+
pp=(PP: 1 2 3 4)
|
163 |
+
micro_batch_size=4
|
164 |
+
grad_accum_steps=12
|
165 |
+
vocab_size=32000
|
166 |
+
|
167 |
+
cpu_workers=16
|
168 |
+
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33"
|
169 |
+
wikipedia=False
|
170 |
+
freeze_layers=""
|
171 |
+
posterior_tokens=False
|
172 |
+
n_posterior_tokens=False
|
173 |
+
eval_iters=1
|
174 |
+
is_parallel=False
|
175 |
+
lp=""
|
176 |
+
}
|
multilinguality_megatron/ducttape/20B_all_cleaned_parallel.tconf
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/parallel_checkpoints
|
6 |
+
model_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852
|
7 |
+
tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
13 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
14 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
15 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
16 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
17 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
18 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
19 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
20 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
21 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
22 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
23 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
24 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
25 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
26 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
27 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
28 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
29 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
30 |
+
)
|
31 |
+
|
32 |
+
is_hf_dataset=(Dataset:
|
33 |
+
en_de=False
|
34 |
+
de_en=False
|
35 |
+
en_fr=False
|
36 |
+
fr_en=False
|
37 |
+
en_es=False
|
38 |
+
es_en=False
|
39 |
+
en_it=False
|
40 |
+
it_en=False
|
41 |
+
en_nl=False
|
42 |
+
nl_en=False
|
43 |
+
en_pt=False
|
44 |
+
pt_en=False
|
45 |
+
en_ru=False
|
46 |
+
ru_en=False
|
47 |
+
en_zh=False
|
48 |
+
zh_en=False
|
49 |
+
en_ko=False
|
50 |
+
ko_en=False
|
51 |
+
)
|
52 |
+
|
53 |
+
threshold=(Dataset:
|
54 |
+
en_de=100000
|
55 |
+
de_en=100000
|
56 |
+
en_fr=100000
|
57 |
+
fr_en=100000
|
58 |
+
en_es=100000
|
59 |
+
es_en=100000
|
60 |
+
en_it=100000
|
61 |
+
it_en=100000
|
62 |
+
en_nl=100000
|
63 |
+
nl_en=100000
|
64 |
+
en_pt=100000
|
65 |
+
pt_en=100000
|
66 |
+
en_ru=100000
|
67 |
+
ru_en=100000
|
68 |
+
en_zh=100000
|
69 |
+
zh_en=100000
|
70 |
+
en_ko=100000
|
71 |
+
ko_en=100000
|
72 |
+
)
|
73 |
+
|
74 |
+
# rougly 67% for mc4, 33% for total parallel data
|
75 |
+
datamix_weights=(
|
76 |
+
DataMix:
|
77 |
+
mc4_parallel_uniform=(
|
78 |
+
Dataset:
|
79 |
+
en_de=1
|
80 |
+
de_en=1
|
81 |
+
en_fr=1
|
82 |
+
fr_en=1
|
83 |
+
en_es=1
|
84 |
+
es_en=1
|
85 |
+
en_it=1
|
86 |
+
it_en=1
|
87 |
+
en_nl=1
|
88 |
+
nl_en=1
|
89 |
+
en_pt=1
|
90 |
+
pt_en=1
|
91 |
+
en_ru=1
|
92 |
+
ru_en=1
|
93 |
+
en_zh=1
|
94 |
+
zh_en=1
|
95 |
+
en_ko=1
|
96 |
+
ko_en=1
|
97 |
+
)
|
98 |
+
)
|
99 |
+
|
100 |
+
# number such that final tokens for each language are around 1B
|
101 |
+
n_tokens=(Dataset:
|
102 |
+
en_de=20000000
|
103 |
+
de_en=20000000
|
104 |
+
en_fr=20000000
|
105 |
+
fr_en=20000000
|
106 |
+
en_es=20000000
|
107 |
+
es_en=20000000
|
108 |
+
en_it=20000000
|
109 |
+
it_en=20000000
|
110 |
+
en_nl=20000000
|
111 |
+
nl_en=20000000
|
112 |
+
en_pt=20000000
|
113 |
+
pt_en=20000000
|
114 |
+
en_ru=20000000
|
115 |
+
ru_en=20000000
|
116 |
+
en_zh=20000000
|
117 |
+
zh_en=20000000
|
118 |
+
en_ko=20000000
|
119 |
+
ko_en=20000000
|
120 |
+
)
|
121 |
+
|
122 |
+
is_parallel=(Dataset:
|
123 |
+
en_de=True
|
124 |
+
de_en=True
|
125 |
+
en_fr=True
|
126 |
+
fr_en=True
|
127 |
+
en_es=True
|
128 |
+
es_en=True
|
129 |
+
en_it=True
|
130 |
+
it_en=True
|
131 |
+
en_nl=True
|
132 |
+
nl_en=True
|
133 |
+
en_pt=True
|
134 |
+
pt_en=True
|
135 |
+
en_ru=True
|
136 |
+
ru_en=True
|
137 |
+
en_zh=True
|
138 |
+
zh_en=True
|
139 |
+
en_ko=True
|
140 |
+
ko_en=True
|
141 |
+
)
|
142 |
+
|
143 |
+
lp=(Dataset:
|
144 |
+
en_de="en-de"
|
145 |
+
de_en="de-en"
|
146 |
+
en_fr="en-fr"
|
147 |
+
fr_en="fr-en"
|
148 |
+
en_es="en-es"
|
149 |
+
es_en="es-en"
|
150 |
+
en_it="en-it"
|
151 |
+
it_en="it-en"
|
152 |
+
en_nl="en-nl"
|
153 |
+
nl_en="nl-en"
|
154 |
+
en_pt="en-pt"
|
155 |
+
pt_en="pt-en"
|
156 |
+
en_ru="en-ru"
|
157 |
+
ru_en="ru-en"
|
158 |
+
en_zh="en-zh"
|
159 |
+
zh_en="zh-en"
|
160 |
+
en_ko="en-ko"
|
161 |
+
ko_en="ko-en"
|
162 |
+
)
|
163 |
+
|
164 |
+
min_perplexity=50
|
165 |
+
|
166 |
+
size=(Size: 7 13)
|
167 |
+
|
168 |
+
log_interval=1
|
169 |
+
save_interval=635
|
170 |
+
eval_interval=635
|
171 |
+
train_steps=12700
|
172 |
+
|
173 |
+
lr_scheduler=cosine
|
174 |
+
warmup_steps=127
|
175 |
+
lr=3e-5
|
176 |
+
lr_min=3e-6
|
177 |
+
weight_decay=0.1
|
178 |
+
|
179 |
+
n_gpus=8
|
180 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
181 |
+
tp=(TP: 1 2 3 4)
|
182 |
+
pp=(PP: 1 2 3 4)
|
183 |
+
micro_batch_size=4
|
184 |
+
grad_accum_steps=12
|
185 |
+
vocab_size=32000
|
186 |
+
|
187 |
+
cpu_workers=16
|
188 |
+
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33"
|
189 |
+
wikipedia=False
|
190 |
+
freeze_layers=""
|
191 |
+
posterior_tokens=False
|
192 |
+
n_posterior_tokens=0
|
193 |
+
eval_iters=1
|
194 |
+
}
|
multilinguality_megatron/ducttape/20B_all_dirty_mc4.tconf
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/dirty_mc4_checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en=/mnt/data_2/shared/pre-training/tower_llm_data/en/data
|
13 |
+
es=/mnt/data_2/shared/pre-training/tower_llm_data/es/0/0000.json.gz
|
14 |
+
de=/mnt/data_2/shared/pre-training/tower_llm_data/de/0/0000.json.gz
|
15 |
+
fr=/mnt/data_2/shared/pre-training/tower_llm_data/fr/1/0000.json.gz
|
16 |
+
nl=/mnt/data_2/shared/pre-training/tower_llm_data/nl/0000.json.gz
|
17 |
+
pt=/mnt/data_2/shared/pre-training/tower_llm_data/pt/0000.json.gz
|
18 |
+
it=/mnt/data_2/shared/pre-training/tower_llm_data/it/0000.json.gz
|
19 |
+
ru=/mnt/data_2/shared/pre-training/tower_llm_data/ru/0/0000.json.gz
|
20 |
+
zh=/mnt/data_2/shared/pre-training/tower_llm_data/zh/0000.json.gz
|
21 |
+
ko=/mnt/data_2/shared/pre-training/tower_llm_data/ko/0000.json.gz
|
22 |
+
)
|
23 |
+
|
24 |
+
is_hf_dataset=(Dataset:
|
25 |
+
en=True
|
26 |
+
es=False
|
27 |
+
de=False
|
28 |
+
fr=False
|
29 |
+
nl=False
|
30 |
+
pt=False
|
31 |
+
it=False
|
32 |
+
ru=False
|
33 |
+
zh=False
|
34 |
+
ko=False
|
35 |
+
)
|
36 |
+
|
37 |
+
threshold=(Dataset:
|
38 |
+
en=10000000
|
39 |
+
es=10000000
|
40 |
+
de=10000000
|
41 |
+
fr=10000000
|
42 |
+
nl=10000000
|
43 |
+
pt=10000000
|
44 |
+
it=10000000
|
45 |
+
ru=10000000
|
46 |
+
zh=10000000
|
47 |
+
ko=10000000
|
48 |
+
)
|
49 |
+
|
50 |
+
datamix_weights=(
|
51 |
+
DataMix:
|
52 |
+
mc4_uniform=(
|
53 |
+
Dataset:
|
54 |
+
en=100
|
55 |
+
es=100
|
56 |
+
de=100
|
57 |
+
fr=100
|
58 |
+
nl=100
|
59 |
+
pt=100
|
60 |
+
it=100
|
61 |
+
ru=100
|
62 |
+
zh=100
|
63 |
+
ko=100
|
64 |
+
)
|
65 |
+
)
|
66 |
+
|
67 |
+
# number such that final tokens for each language are around 1B
|
68 |
+
n_tokens=(Dataset:
|
69 |
+
en=1000000000
|
70 |
+
es=833333330
|
71 |
+
de=833333330
|
72 |
+
fr=833333330
|
73 |
+
nl=833333330
|
74 |
+
pt=833333330
|
75 |
+
it=833333330
|
76 |
+
ru=500000000
|
77 |
+
zh=1388888800
|
78 |
+
ko=250000000
|
79 |
+
)
|
80 |
+
|
81 |
+
min_perplexity=0
|
82 |
+
|
83 |
+
size=(Size: 7 13)
|
84 |
+
|
85 |
+
log_interval=1
|
86 |
+
save_interval=635
|
87 |
+
eval_interval=635
|
88 |
+
train_steps=12700
|
89 |
+
|
90 |
+
lr_scheduler=cosine
|
91 |
+
warmup_steps=127
|
92 |
+
lr=3e-5
|
93 |
+
lr_min=3e-6
|
94 |
+
weight_decay=0.1
|
95 |
+
|
96 |
+
n_gpus=8
|
97 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
98 |
+
tp=(TP: 1 2 3 4)
|
99 |
+
pp=(PP: 1 2 3 4)
|
100 |
+
micro_batch_size=4
|
101 |
+
grad_accum_steps=12
|
102 |
+
vocab_size=32000
|
103 |
+
|
104 |
+
cpu_workers=16
|
105 |
+
wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33"
|
106 |
+
wikipedia=False
|
107 |
+
freeze_layers=""
|
108 |
+
posterior_tokens=False
|
109 |
+
n_posterior_tokens=0
|
110 |
+
eval_iters=1
|
111 |
+
is_parallel=False
|
112 |
+
lp=(Dataset:
|
113 |
+
en="en"
|
114 |
+
es="es"
|
115 |
+
de="de"
|
116 |
+
fr="fr"
|
117 |
+
nl="nl"
|
118 |
+
pt="pt"
|
119 |
+
it="it"
|
120 |
+
ru="ru"
|
121 |
+
zh="zh"
|
122 |
+
ko="ko"
|
123 |
+
)
|
124 |
+
}
|
multilinguality_megatron/ducttape/40B_all_cleaned_mc4_parallel.tconf
ADDED
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_40B
|
3 |
+
repo=/mnt/data/pmartins/code/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_40B/mc4_parallel_checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
|
8 |
+
|
9 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko pl sv en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_pl pl_en en_sv sv_en)
|
10 |
+
|
11 |
+
dataset_path=(Dataset:
|
12 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
13 |
+
es=/mnt/data_2/shared/tower_llm_data/es/0/0000.json.gz
|
14 |
+
de=/mnt/data_2/shared/tower_llm_data/de/0/0000.json.gz
|
15 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
16 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
17 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
18 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
19 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/0/0000.json.gz
|
20 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
21 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
22 |
+
pl=/mnt/data_2/shared/tower_llm_data/pl/0000.json.gz
|
23 |
+
sv=/mnt/data_2/shared/tower_llm_data/sv/0000.json.gz
|
24 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
25 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
26 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
27 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
28 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
29 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
30 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
31 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
32 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
33 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
34 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
35 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
41 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
42 |
+
en_pl="/mnt/data_2/shared/tower_llm_data/bilingual_data/en-pl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
43 |
+
pl_en="/mnt/data_2/shared/tower_llm_data/bilingual_data/en-pl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75/"
|
44 |
+
en_sv="/mnt/data_2/shared/tower_llm_data/bilingual_data/en-sv/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75/"
|
45 |
+
sv_en="/mnt/data_2/shared/tower_llm_data/bilingual_data/en-sv/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75/"
|
46 |
+
)
|
47 |
+
|
48 |
+
is_hf_dataset=(Dataset:
|
49 |
+
en=True
|
50 |
+
es=False
|
51 |
+
de=False
|
52 |
+
fr=False
|
53 |
+
nl=False
|
54 |
+
pt=False
|
55 |
+
it=False
|
56 |
+
ru=False
|
57 |
+
zh=False
|
58 |
+
ko=False
|
59 |
+
pl=False
|
60 |
+
sv=False
|
61 |
+
en_de=False
|
62 |
+
de_en=False
|
63 |
+
en_fr=False
|
64 |
+
fr_en=False
|
65 |
+
en_es=False
|
66 |
+
es_en=False
|
67 |
+
en_it=False
|
68 |
+
it_en=False
|
69 |
+
en_nl=False
|
70 |
+
nl_en=False
|
71 |
+
en_pt=False
|
72 |
+
pt_en=False
|
73 |
+
en_ru=False
|
74 |
+
ru_en=False
|
75 |
+
en_zh=False
|
76 |
+
zh_en=False
|
77 |
+
en_ko=False
|
78 |
+
ko_en=False
|
79 |
+
en_pl=False
|
80 |
+
pl_en=False
|
81 |
+
en_sv=False
|
82 |
+
sv_en=False
|
83 |
+
)
|
84 |
+
|
85 |
+
threshold=(Dataset:
|
86 |
+
en=516
|
87 |
+
es=275
|
88 |
+
de=611
|
89 |
+
fr=322
|
90 |
+
nl=649
|
91 |
+
pt=257
|
92 |
+
it=332
|
93 |
+
ru=334
|
94 |
+
zh=2041
|
95 |
+
ko=198
|
96 |
+
pl=261
|
97 |
+
sv=699
|
98 |
+
en_de=100000
|
99 |
+
de_en=100000
|
100 |
+
en_fr=100000
|
101 |
+
fr_en=100000
|
102 |
+
en_es=100000
|
103 |
+
es_en=100000
|
104 |
+
en_it=100000
|
105 |
+
it_en=100000
|
106 |
+
en_nl=100000
|
107 |
+
nl_en=100000
|
108 |
+
en_pt=100000
|
109 |
+
pt_en=100000
|
110 |
+
en_ru=100000
|
111 |
+
ru_en=100000
|
112 |
+
en_zh=100000
|
113 |
+
zh_en=100000
|
114 |
+
en_ko=100000
|
115 |
+
ko_en=100000
|
116 |
+
en_pl=100000
|
117 |
+
pl_en=100000
|
118 |
+
en_sv=100000
|
119 |
+
sv_en=100000
|
120 |
+
)
|
121 |
+
|
122 |
+
# rougly 67% for mc4, 33% for total parallel data
|
123 |
+
datamix_weights=(
|
124 |
+
DataMix:
|
125 |
+
mc4_parallel_uniform=(
|
126 |
+
Dataset:
|
127 |
+
en=670
|
128 |
+
es=670
|
129 |
+
de=670
|
130 |
+
fr=670
|
131 |
+
nl=670
|
132 |
+
pt=670
|
133 |
+
it=670
|
134 |
+
ru=670
|
135 |
+
zh=670
|
136 |
+
ko=670
|
137 |
+
pl=0
|
138 |
+
sv=0
|
139 |
+
en_de=183
|
140 |
+
de_en=183
|
141 |
+
en_fr=183
|
142 |
+
fr_en=183
|
143 |
+
en_es=183
|
144 |
+
es_en=183
|
145 |
+
en_it=183
|
146 |
+
it_en=183
|
147 |
+
en_nl=183
|
148 |
+
nl_en=183
|
149 |
+
en_pt=183
|
150 |
+
pt_en=183
|
151 |
+
en_ru=183
|
152 |
+
ru_en=183
|
153 |
+
en_zh=183
|
154 |
+
zh_en=183
|
155 |
+
en_ko=183
|
156 |
+
ko_en=183
|
157 |
+
en_pl=0
|
158 |
+
pl_en=0
|
159 |
+
en_sv=0
|
160 |
+
sv_en=0
|
161 |
+
)
|
162 |
+
)
|
163 |
+
|
164 |
+
n_tokens=(Dataset:
|
165 |
+
en=4000000000
|
166 |
+
es=4000000000
|
167 |
+
de=4000000000
|
168 |
+
fr=4000000000
|
169 |
+
nl=4000000000
|
170 |
+
pt=4000000000
|
171 |
+
it=4000000000
|
172 |
+
ru=4000000000
|
173 |
+
zh=10000000000
|
174 |
+
ko=4000000000
|
175 |
+
pl=4000000000
|
176 |
+
sv=4000000000
|
177 |
+
en_de=200000000
|
178 |
+
de_en=200000000
|
179 |
+
en_fr=200000000
|
180 |
+
fr_en=200000000
|
181 |
+
en_es=200000000
|
182 |
+
es_en=200000000
|
183 |
+
en_it=200000000
|
184 |
+
it_en=200000000
|
185 |
+
en_nl=200000000
|
186 |
+
nl_en=200000000
|
187 |
+
en_pt=200000000
|
188 |
+
pt_en=200000000
|
189 |
+
en_ru=200000000
|
190 |
+
ru_en=200000000
|
191 |
+
en_zh=200000000
|
192 |
+
zh_en=200000000
|
193 |
+
en_ko=200000000
|
194 |
+
ko_en=200000000
|
195 |
+
en_pl=200000000
|
196 |
+
pl_en=200000000
|
197 |
+
en_sv=200000000
|
198 |
+
sv_en=200000000
|
199 |
+
)
|
200 |
+
|
201 |
+
is_parallel=(Dataset:
|
202 |
+
en=False
|
203 |
+
es=False
|
204 |
+
de=False
|
205 |
+
fr=False
|
206 |
+
nl=False
|
207 |
+
pt=False
|
208 |
+
it=False
|
209 |
+
ru=False
|
210 |
+
zh=False
|
211 |
+
ko=False
|
212 |
+
pl=False
|
213 |
+
sv=False
|
214 |
+
en_de=True
|
215 |
+
de_en=True
|
216 |
+
en_fr=True
|
217 |
+
fr_en=True
|
218 |
+
en_es=True
|
219 |
+
es_en=True
|
220 |
+
en_it=True
|
221 |
+
it_en=True
|
222 |
+
en_nl=True
|
223 |
+
nl_en=True
|
224 |
+
en_pt=True
|
225 |
+
pt_en=True
|
226 |
+
en_ru=True
|
227 |
+
ru_en=True
|
228 |
+
en_zh=True
|
229 |
+
zh_en=True
|
230 |
+
en_ko=True
|
231 |
+
ko_en=True
|
232 |
+
en_pl=True
|
233 |
+
pl_en=True
|
234 |
+
en_sv=True
|
235 |
+
sv_en=True
|
236 |
+
)
|
237 |
+
|
238 |
+
lp=(Dataset:
|
239 |
+
en="en"
|
240 |
+
es="es"
|
241 |
+
de="de"
|
242 |
+
fr="fr"
|
243 |
+
nl="nl"
|
244 |
+
pt="pt"
|
245 |
+
it="it"
|
246 |
+
ru="ru"
|
247 |
+
zh="zh"
|
248 |
+
ko="ko"
|
249 |
+
pl="pl"
|
250 |
+
sv="sv"
|
251 |
+
en_de="en-de"
|
252 |
+
de_en="de-en"
|
253 |
+
en_fr="en-fr"
|
254 |
+
fr_en="fr-en"
|
255 |
+
en_es="en-es"
|
256 |
+
es_en="es-en"
|
257 |
+
en_it="en-it"
|
258 |
+
it_en="it-en"
|
259 |
+
en_nl="en-nl"
|
260 |
+
nl_en="nl-en"
|
261 |
+
en_pt="en-pt"
|
262 |
+
pt_en="pt-en"
|
263 |
+
en_ru="en-ru"
|
264 |
+
ru_en="ru-en"
|
265 |
+
en_zh="en-zh"
|
266 |
+
zh_en="zh-en"
|
267 |
+
en_ko="en-ko"
|
268 |
+
ko_en="ko-en"
|
269 |
+
en_pl="en-pl"
|
270 |
+
pl_en="pl-en"
|
271 |
+
en_sv="en-sv"
|
272 |
+
sv_en="sv-en"
|
273 |
+
)
|
274 |
+
|
275 |
+
min_perplexity=50
|
276 |
+
|
277 |
+
size=(Size: 7 13)
|
278 |
+
|
279 |
+
log_interval=1
|
280 |
+
save_interval=635
|
281 |
+
eval_interval=635
|
282 |
+
train_steps=12700
|
283 |
+
|
284 |
+
lr_scheduler=cosine
|
285 |
+
warmup_steps=127
|
286 |
+
lr=3e-5
|
287 |
+
lr_min=3e-6
|
288 |
+
weight_decay=0.1
|
289 |
+
|
290 |
+
n_gpus=8
|
291 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
292 |
+
tp=(TP: 1 2 3 4)
|
293 |
+
pp=(PP: 1 2 3 4)
|
294 |
+
micro_batch_size=4
|
295 |
+
grad_accum_steps=12
|
296 |
+
vocab_size=32000
|
297 |
+
|
298 |
+
cpu_workers=16
|
299 |
+
wandb_run_id="llama2_7B_40b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33"
|
300 |
+
wikipedia=False
|
301 |
+
freeze_layers=""
|
302 |
+
posterior_tokens=False
|
303 |
+
n_posterior_tokens=0
|
304 |
+
eval_iters=1
|
305 |
+
}
|
multilinguality_megatron/ducttape/continue_pretraining.tconf
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_test
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_test/checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
|
8 |
+
|
9 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
threshold=(TrainLanguage:
|
12 |
+
en=516
|
13 |
+
es=275
|
14 |
+
de=611
|
15 |
+
fr=322
|
16 |
+
nl=649
|
17 |
+
pt=257
|
18 |
+
it=332
|
19 |
+
ru=334
|
20 |
+
zh=2041
|
21 |
+
ko=198
|
22 |
+
)
|
23 |
+
|
24 |
+
# less for zh (inefficient tokenizer)
|
25 |
+
n_tokens=(TrainLanguage:
|
26 |
+
en=250000000
|
27 |
+
es=83333333
|
28 |
+
de=83333333
|
29 |
+
fr=83333333
|
30 |
+
nl=83333333
|
31 |
+
pt=83333333
|
32 |
+
it=83333333
|
33 |
+
ru=83333333
|
34 |
+
zh=8333333
|
35 |
+
ko=83333333
|
36 |
+
)
|
37 |
+
|
38 |
+
dataset_path=(TrainLanguage:
|
39 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
40 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
41 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
42 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
43 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
44 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
45 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
46 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
47 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
48 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
49 |
+
)
|
50 |
+
|
51 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
52 |
+
|
53 |
+
min_perplexity=50
|
54 |
+
|
55 |
+
size=(Size: 7 13)
|
56 |
+
|
57 |
+
log_interval=10
|
58 |
+
save_interval=318
|
59 |
+
eval_interval=158
|
60 |
+
train_steps=1272
|
61 |
+
|
62 |
+
lr_scheduler=cosine
|
63 |
+
warmup_steps=13
|
64 |
+
lr=3e-5
|
65 |
+
lr_min=3e-6
|
66 |
+
weight_decay=0.1
|
67 |
+
|
68 |
+
n_gpus=8
|
69 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
70 |
+
tp=(TP: 1 2 3 4)
|
71 |
+
pp=(PP: 1 2 3 4)
|
72 |
+
micro_batch_size=4
|
73 |
+
grad_accum_steps=6
|
74 |
+
|
75 |
+
cpu_workers=16
|
76 |
+
|
77 |
+
}
|
multilinguality_megatron/ducttape/data_test.tconf
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/wikipedia_llama2_all_10B
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B/checkpoints
|
6 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf
|
7 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model
|
8 |
+
|
9 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
threshold=(TrainLanguage:
|
12 |
+
en=516
|
13 |
+
es=275
|
14 |
+
de=611
|
15 |
+
fr=322
|
16 |
+
nl=649
|
17 |
+
pt=257
|
18 |
+
it=332
|
19 |
+
ru=334
|
20 |
+
zh=2041
|
21 |
+
ko=198
|
22 |
+
)
|
23 |
+
|
24 |
+
# number such that final tokens for each language are around 1B
|
25 |
+
n_tokens=(TrainLanguage:
|
26 |
+
en=1000000000
|
27 |
+
es=833333330
|
28 |
+
de=833333330
|
29 |
+
fr=833333330
|
30 |
+
nl=833333330
|
31 |
+
pt=833333330
|
32 |
+
it=833333330
|
33 |
+
ru=500000000
|
34 |
+
zh=13888888
|
35 |
+
ko=250000000
|
36 |
+
)
|
37 |
+
|
38 |
+
dataset_path=(TrainLanguage:
|
39 |
+
en=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/en
|
40 |
+
es=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/es
|
41 |
+
de=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/de
|
42 |
+
fr=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/fr
|
43 |
+
nl=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/nl
|
44 |
+
pt=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/pt
|
45 |
+
it=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/it
|
46 |
+
ru=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ru
|
47 |
+
zh=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/zh
|
48 |
+
ko=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ko
|
49 |
+
)
|
50 |
+
|
51 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
52 |
+
|
53 |
+
min_perplexity=50
|
54 |
+
|
55 |
+
size=(Size: 7 13)
|
56 |
+
|
57 |
+
log_interval=10
|
58 |
+
save_interval=635
|
59 |
+
eval_interval=635
|
60 |
+
train_steps=6358
|
61 |
+
|
62 |
+
lr_scheduler=cosine
|
63 |
+
warmup_steps=63
|
64 |
+
lr=3e-5
|
65 |
+
lr_min=3e-6
|
66 |
+
weight_decay=0.1
|
67 |
+
|
68 |
+
n_gpus=8
|
69 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
70 |
+
tp=(TP: 1 2 3 4)
|
71 |
+
pp=(PP: 1 2 3 4)
|
72 |
+
micro_batch_size=4
|
73 |
+
grad_accum_steps=12
|
74 |
+
vocab_size=32000
|
75 |
+
|
76 |
+
cpu_workers=16
|
77 |
+
wandb_run_id="wikipedia"
|
78 |
+
wikipedia=True
|
79 |
+
}
|
multilinguality_megatron/ducttape/data_test_extend32.tconf
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/wikipedia_llama2_all_10B_extend32
|
3 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
4 |
+
|
5 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32/checkpoints
|
6 |
+
model_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit
|
7 |
+
tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model
|
8 |
+
|
9 |
+
train_language=(TrainLanguage: en de fr es it nl pt ru zh ko)
|
10 |
+
|
11 |
+
threshold=(TrainLanguage:
|
12 |
+
en=516
|
13 |
+
es=275
|
14 |
+
de=611
|
15 |
+
fr=322
|
16 |
+
nl=649
|
17 |
+
pt=257
|
18 |
+
it=332
|
19 |
+
ru=334
|
20 |
+
zh=2041
|
21 |
+
ko=198
|
22 |
+
)
|
23 |
+
|
24 |
+
# number such that final tokens for each language are around 1B
|
25 |
+
n_tokens=(TrainLanguage:
|
26 |
+
en=1000000000
|
27 |
+
es=833333330
|
28 |
+
de=833333330
|
29 |
+
fr=833333330
|
30 |
+
nl=833333330
|
31 |
+
pt=833333330
|
32 |
+
it=833333330
|
33 |
+
ru=500000000
|
34 |
+
zh=13888888
|
35 |
+
ko=250000000
|
36 |
+
)
|
37 |
+
|
38 |
+
dataset_path=(TrainLanguage:
|
39 |
+
en=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/en
|
40 |
+
es=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/es
|
41 |
+
de=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/de
|
42 |
+
fr=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/fr
|
43 |
+
nl=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/nl
|
44 |
+
pt=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/pt
|
45 |
+
it=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/it
|
46 |
+
ru=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ru
|
47 |
+
zh=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/zh
|
48 |
+
ko=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ko
|
49 |
+
)
|
50 |
+
|
51 |
+
mix="10 10 10 10 10 10 10 10 10 10"
|
52 |
+
|
53 |
+
min_perplexity=50
|
54 |
+
|
55 |
+
size=(Size: 7 13)
|
56 |
+
|
57 |
+
log_interval=10
|
58 |
+
save_interval=635
|
59 |
+
eval_interval=635
|
60 |
+
train_steps=6358
|
61 |
+
|
62 |
+
lr_scheduler=cosine
|
63 |
+
warmup_steps=63
|
64 |
+
lr=3e-5
|
65 |
+
lr_min=3e-6
|
66 |
+
weight_decay=0.1
|
67 |
+
|
68 |
+
n_gpus=8
|
69 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
70 |
+
tp=(TP: 1 2 3 4)
|
71 |
+
pp=(PP: 1 2 3 4)
|
72 |
+
micro_batch_size=4
|
73 |
+
grad_accum_steps=12
|
74 |
+
vocab_size=52672
|
75 |
+
|
76 |
+
cpu_workers=16
|
77 |
+
wandb_run_id="wikipedia_extend32"
|
78 |
+
wikipedia=True
|
79 |
+
}
|
multilinguality_megatron/ducttape/gemma_2B_20B_all_cleaned_mc4_parallel.tconf
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
model_type="gemma"
|
3 |
+
ducttape_output=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B
|
4 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
5 |
+
|
6 |
+
external_model_dir=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B/test
|
7 |
+
external_model_dir_annealing=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B/test
|
8 |
+
model_path=/mnt/data_2/cache/models--google--gemma-2b/snapshots/9d067f00def958594aaa16b39a65b07d69ca655b/
|
9 |
+
tokenizer_path=/mnt/data_2/cache/models--google--gemma-2b/snapshots/9d067f00def958594aaa16b39a65b07d69ca655b
|
10 |
+
|
11 |
+
tokenizer_type=PretrainedFromHF
|
12 |
+
|
13 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
|
14 |
+
datamix_weights_annealing=""
|
15 |
+
|
16 |
+
dataset_path=(Dataset:
|
17 |
+
en=/mnt/data_2/shared/pre-training/tower_llm_data/en/data
|
18 |
+
es=/mnt/data_2/shared/pre-training/tower_llm_data/es/3/0000.json.gz
|
19 |
+
de=/mnt/data_2/shared/pre-training/tower_llm_data/de/2/0000.json.gz
|
20 |
+
fr=/mnt/data_2/shared/pre-training/tower_llm_data/fr/1/0000.json.gz
|
21 |
+
nl=/mnt/data_2/shared/pre-training/tower_llm_data/nl/0000.json.gz
|
22 |
+
pt=/mnt/data_2/shared/pre-training/tower_llm_data/pt/0000.json.gz
|
23 |
+
it=/mnt/data_2/shared/pre-training/tower_llm_data/it/0000.json.gz
|
24 |
+
ru=/mnt/data_2/shared/pre-training/tower_llm_data/ru/6/0000.json.gz
|
25 |
+
zh=/mnt/data_2/shared/pre-training/tower_llm_data/zh/0000.json.gz
|
26 |
+
ko=/mnt/data_2/shared/pre-training/tower_llm_data/ko/0000.json.gz
|
27 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
28 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
29 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
30 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
31 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
32 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
33 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
34 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
35 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
41 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
42 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
43 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
44 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
45 |
+
)
|
46 |
+
|
47 |
+
is_hf_dataset=(Dataset:
|
48 |
+
en=True
|
49 |
+
es=False
|
50 |
+
de=False
|
51 |
+
fr=False
|
52 |
+
nl=False
|
53 |
+
pt=False
|
54 |
+
it=False
|
55 |
+
ru=False
|
56 |
+
zh=False
|
57 |
+
ko=False
|
58 |
+
en_de=False
|
59 |
+
de_en=False
|
60 |
+
en_fr=False
|
61 |
+
fr_en=False
|
62 |
+
en_es=False
|
63 |
+
es_en=False
|
64 |
+
en_it=False
|
65 |
+
it_en=False
|
66 |
+
en_nl=False
|
67 |
+
nl_en=False
|
68 |
+
en_pt=False
|
69 |
+
pt_en=False
|
70 |
+
en_ru=False
|
71 |
+
ru_en=False
|
72 |
+
en_zh=False
|
73 |
+
zh_en=False
|
74 |
+
en_ko=False
|
75 |
+
ko_en=False
|
76 |
+
)
|
77 |
+
|
78 |
+
threshold=(Dataset:
|
79 |
+
en=516
|
80 |
+
es=275
|
81 |
+
de=611
|
82 |
+
fr=322
|
83 |
+
nl=649
|
84 |
+
pt=257
|
85 |
+
it=332
|
86 |
+
ru=334
|
87 |
+
zh=2041
|
88 |
+
ko=198
|
89 |
+
en_de=100000
|
90 |
+
de_en=100000
|
91 |
+
en_fr=100000
|
92 |
+
fr_en=100000
|
93 |
+
en_es=100000
|
94 |
+
es_en=100000
|
95 |
+
en_it=100000
|
96 |
+
it_en=100000
|
97 |
+
en_nl=100000
|
98 |
+
nl_en=100000
|
99 |
+
en_pt=100000
|
100 |
+
pt_en=100000
|
101 |
+
en_ru=100000
|
102 |
+
ru_en=100000
|
103 |
+
en_zh=100000
|
104 |
+
zh_en=100000
|
105 |
+
en_ko=100000
|
106 |
+
ko_en=100000
|
107 |
+
)
|
108 |
+
|
109 |
+
# rougly 67% for mc4, 33% for total parallel data
|
110 |
+
datamix_weights=(
|
111 |
+
DataMix:
|
112 |
+
mc4_parallel_uniform=(
|
113 |
+
Dataset:
|
114 |
+
en=670
|
115 |
+
es=670
|
116 |
+
de=670
|
117 |
+
fr=670
|
118 |
+
nl=670
|
119 |
+
pt=670
|
120 |
+
it=670
|
121 |
+
ru=670
|
122 |
+
zh=670
|
123 |
+
ko=670
|
124 |
+
en_de=183
|
125 |
+
de_en=183
|
126 |
+
en_fr=183
|
127 |
+
fr_en=183
|
128 |
+
en_es=183
|
129 |
+
es_en=183
|
130 |
+
en_it=183
|
131 |
+
it_en=183
|
132 |
+
en_nl=183
|
133 |
+
nl_en=183
|
134 |
+
en_pt=183
|
135 |
+
pt_en=183
|
136 |
+
en_ru=183
|
137 |
+
ru_en=183
|
138 |
+
en_zh=183
|
139 |
+
zh_en=183
|
140 |
+
en_ko=183
|
141 |
+
ko_en=183
|
142 |
+
)
|
143 |
+
)
|
144 |
+
|
145 |
+
# number such that final tokens for each language are around 1B
|
146 |
+
n_tokens=(Dataset:
|
147 |
+
en=1000000000
|
148 |
+
es=833333330
|
149 |
+
de=833333330
|
150 |
+
fr=833333330
|
151 |
+
nl=833333330
|
152 |
+
pt=833333330
|
153 |
+
it=833333330
|
154 |
+
ru=500000000
|
155 |
+
zh=13888888
|
156 |
+
ko=250000000
|
157 |
+
en_de=20000000
|
158 |
+
de_en=20000000
|
159 |
+
en_fr=20000000
|
160 |
+
fr_en=20000000
|
161 |
+
en_es=20000000
|
162 |
+
es_en=20000000
|
163 |
+
en_it=20000000
|
164 |
+
it_en=20000000
|
165 |
+
en_nl=20000000
|
166 |
+
nl_en=20000000
|
167 |
+
en_pt=20000000
|
168 |
+
pt_en=20000000
|
169 |
+
en_ru=20000000
|
170 |
+
ru_en=20000000
|
171 |
+
en_zh=20000000
|
172 |
+
zh_en=20000000
|
173 |
+
en_ko=20000000
|
174 |
+
ko_en=20000000
|
175 |
+
)
|
176 |
+
|
177 |
+
is_parallel=(Dataset:
|
178 |
+
en=False
|
179 |
+
es=False
|
180 |
+
de=False
|
181 |
+
fr=False
|
182 |
+
nl=False
|
183 |
+
pt=False
|
184 |
+
it=False
|
185 |
+
ru=False
|
186 |
+
zh=False
|
187 |
+
ko=False
|
188 |
+
en_de=True
|
189 |
+
de_en=True
|
190 |
+
en_fr=True
|
191 |
+
fr_en=True
|
192 |
+
en_es=True
|
193 |
+
es_en=True
|
194 |
+
en_it=True
|
195 |
+
it_en=True
|
196 |
+
en_nl=True
|
197 |
+
nl_en=True
|
198 |
+
en_pt=True
|
199 |
+
pt_en=True
|
200 |
+
en_ru=True
|
201 |
+
ru_en=True
|
202 |
+
en_zh=True
|
203 |
+
zh_en=True
|
204 |
+
en_ko=True
|
205 |
+
ko_en=True
|
206 |
+
)
|
207 |
+
|
208 |
+
lp=(Dataset:
|
209 |
+
en="none"
|
210 |
+
es="none"
|
211 |
+
de="none"
|
212 |
+
fr="none"
|
213 |
+
nl="none"
|
214 |
+
pt="none"
|
215 |
+
it="none"
|
216 |
+
ru="none"
|
217 |
+
zh="none"
|
218 |
+
ko="none"
|
219 |
+
en_de="en-de"
|
220 |
+
de_en="de-en"
|
221 |
+
en_fr="en-fr"
|
222 |
+
fr_en="fr-en"
|
223 |
+
en_es="en-es"
|
224 |
+
es_en="es-en"
|
225 |
+
en_it="en-it"
|
226 |
+
it_en="it-en"
|
227 |
+
en_nl="en-nl"
|
228 |
+
nl_en="nl-en"
|
229 |
+
en_pt="en-pt"
|
230 |
+
pt_en="pt-en"
|
231 |
+
en_ru="en-ru"
|
232 |
+
ru_en="ru-en"
|
233 |
+
en_zh="en-zh"
|
234 |
+
zh_en="zh-en"
|
235 |
+
en_ko="en-ko"
|
236 |
+
ko_en="ko-en"
|
237 |
+
)
|
238 |
+
|
239 |
+
min_perplexity=0
|
240 |
+
|
241 |
+
size=(Size: 2 7)
|
242 |
+
|
243 |
+
log_interval=1
|
244 |
+
save_interval=635
|
245 |
+
eval_interval=635
|
246 |
+
train_steps=12700
|
247 |
+
train_steps_annealing=0
|
248 |
+
|
249 |
+
lr_scheduler=cosine
|
250 |
+
warmup_steps=127
|
251 |
+
lr=3e-5
|
252 |
+
lr_min=3e-6
|
253 |
+
weight_decay=0.1
|
254 |
+
|
255 |
+
lr_scheduler_annealing=linear
|
256 |
+
warmup_steps_annealing=0
|
257 |
+
lr_annealing=3e-5
|
258 |
+
lr_min_annealing=3e-6
|
259 |
+
|
260 |
+
n_gpus=8
|
261 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
262 |
+
tp=(TP: 1 2 3 4 5 6 7 8)
|
263 |
+
pp=(PP: 1 2 3 4)
|
264 |
+
micro_batch_size=2
|
265 |
+
grad_accum_steps=48
|
266 |
+
vocab_size=256000
|
267 |
+
|
268 |
+
cpu_workers=16
|
269 |
+
wikipedia=False
|
270 |
+
freeze_layers=""
|
271 |
+
posterior_tokens=False
|
272 |
+
n_posterior_tokens=0
|
273 |
+
eval_iters=1
|
274 |
+
|
275 |
+
glu_activation=geglu
|
276 |
+
kv_channels=256
|
277 |
+
layernorm_epsilon=1e-6
|
278 |
+
|
279 |
+
seq_length=2048
|
280 |
+
}
|
multilinguality_megatron/ducttape/gemma_2b_flavio.tconf
ADDED
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
model_type="gemma"
|
3 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_gemma_2_20B
|
4 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
5 |
+
|
6 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_gemma_2_20B/flavio_checkpoints
|
7 |
+
external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_gemma_2_20B/flavio_checkpoints_annealed
|
8 |
+
model_path=/mnt/data_2/cache/models--google--gemma-2b/snapshots/9d067f00def958594aaa16b39a65b07d69ca655b/
|
9 |
+
tokenizer_path=/mnt/data_2/cache/models--google--gemma-2b/snapshots/9d067f00def958594aaa16b39a65b07d69ca655b
|
10 |
+
|
11 |
+
tokenizer_type=PretrainedFromHF
|
12 |
+
|
13 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions)
|
14 |
+
dataset_path=(Dataset:
|
15 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
16 |
+
en_synth=""
|
17 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
18 |
+
es_synth=""
|
19 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
20 |
+
de_synth=""
|
21 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
22 |
+
fr_synth=""
|
23 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
24 |
+
nl_synth=""
|
25 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
26 |
+
pt_synth=""
|
27 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
28 |
+
it_synth=""
|
29 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
30 |
+
ru_synth=""
|
31 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
32 |
+
zh_synth=""
|
33 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
34 |
+
ko_synth=""
|
35 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
41 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
42 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
43 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
44 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
45 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
46 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
47 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
48 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
49 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
50 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
51 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
52 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
53 |
+
instructions="oi"
|
54 |
+
en_de_pre_annealing="oi"
|
55 |
+
de_en_pre_annealing="oi"
|
56 |
+
en_fr_pre_annealing="oi"
|
57 |
+
fr_en_pre_annealing="oi"
|
58 |
+
en_es_pre_annealing="oi"
|
59 |
+
es_en_pre_annealing="oi"
|
60 |
+
en_it_pre_annealing="oi"
|
61 |
+
it_en_pre_annealing="oi"
|
62 |
+
en_nl_pre_annealing="oi"
|
63 |
+
nl_en_pre_annealing="oi"
|
64 |
+
en_pt_pre_annealing="oi"
|
65 |
+
pt_en_pre_annealing="oi"
|
66 |
+
en_ru_pre_annealing="oi"
|
67 |
+
ru_en_pre_annealing="oi"
|
68 |
+
en_zh_pre_annealing="oi"
|
69 |
+
zh_en_pre_annealing="oi"
|
70 |
+
en_ko_pre_annealing="oi"
|
71 |
+
ko_en_pre_annealing="oi"
|
72 |
+
)
|
73 |
+
|
74 |
+
is_hf_dataset=(Dataset:
|
75 |
+
en=True
|
76 |
+
es=False
|
77 |
+
de=False
|
78 |
+
fr=False
|
79 |
+
nl=False
|
80 |
+
pt=False
|
81 |
+
it=False
|
82 |
+
ru=False
|
83 |
+
zh=False
|
84 |
+
ko=False
|
85 |
+
en_de=False
|
86 |
+
de_en=False
|
87 |
+
en_fr=False
|
88 |
+
fr_en=False
|
89 |
+
en_es=False
|
90 |
+
es_en=False
|
91 |
+
en_it=False
|
92 |
+
it_en=False
|
93 |
+
en_nl=False
|
94 |
+
nl_en=False
|
95 |
+
en_pt=False
|
96 |
+
pt_en=False
|
97 |
+
en_ru=False
|
98 |
+
ru_en=False
|
99 |
+
en_zh=False
|
100 |
+
zh_en=False
|
101 |
+
en_ko=False
|
102 |
+
ko_en=False
|
103 |
+
en_synth=False
|
104 |
+
es_synth=False
|
105 |
+
de_synth=False
|
106 |
+
fr_synth=False
|
107 |
+
nl_synth=False
|
108 |
+
pt_synth=False
|
109 |
+
it_synth=False
|
110 |
+
ru_synth=False
|
111 |
+
zh_synth=False
|
112 |
+
ko_synth=False
|
113 |
+
instructions="oi"
|
114 |
+
en_de_pre_annealing="oi"
|
115 |
+
de_en_pre_annealing="oi"
|
116 |
+
en_fr_pre_annealing="oi"
|
117 |
+
fr_en_pre_annealing="oi"
|
118 |
+
en_es_pre_annealing="oi"
|
119 |
+
es_en_pre_annealing="oi"
|
120 |
+
en_it_pre_annealing="oi"
|
121 |
+
it_en_pre_annealing="oi"
|
122 |
+
en_nl_pre_annealing="oi"
|
123 |
+
nl_en_pre_annealing="oi"
|
124 |
+
en_pt_pre_annealing="oi"
|
125 |
+
pt_en_pre_annealing="oi"
|
126 |
+
en_ru_pre_annealing="oi"
|
127 |
+
ru_en_pre_annealing="oi"
|
128 |
+
en_zh_pre_annealing="oi"
|
129 |
+
zh_en_pre_annealing="oi"
|
130 |
+
en_ko_pre_annealing="oi"
|
131 |
+
ko_en_pre_annealing="oi"
|
132 |
+
)
|
133 |
+
|
134 |
+
threshold=(Dataset:
|
135 |
+
en=516
|
136 |
+
es=275
|
137 |
+
de=611
|
138 |
+
fr=322
|
139 |
+
nl=649
|
140 |
+
pt=257
|
141 |
+
it=332
|
142 |
+
ru=334
|
143 |
+
zh=2041
|
144 |
+
ko=198
|
145 |
+
en_de=100000
|
146 |
+
de_en=100000
|
147 |
+
en_fr=100000
|
148 |
+
fr_en=100000
|
149 |
+
en_es=100000
|
150 |
+
es_en=100000
|
151 |
+
en_it=100000
|
152 |
+
it_en=100000
|
153 |
+
en_nl=100000
|
154 |
+
nl_en=100000
|
155 |
+
en_pt=100000
|
156 |
+
pt_en=100000
|
157 |
+
en_ru=100000
|
158 |
+
ru_en=100000
|
159 |
+
en_zh=100000
|
160 |
+
zh_en=100000
|
161 |
+
en_ko=100000
|
162 |
+
ko_en=100000
|
163 |
+
en_synth=100000
|
164 |
+
es_synth=100000
|
165 |
+
de_synth=100000
|
166 |
+
fr_synth=100000
|
167 |
+
nl_synth=100000
|
168 |
+
pt_synth=100000
|
169 |
+
it_synth=100000
|
170 |
+
ru_synth=100000
|
171 |
+
zh_synth=100000
|
172 |
+
ko_synth=100000
|
173 |
+
instructions="oi"
|
174 |
+
en_de_pre_annealing="oi"
|
175 |
+
de_en_pre_annealing="oi"
|
176 |
+
en_fr_pre_annealing="oi"
|
177 |
+
fr_en_pre_annealing="oi"
|
178 |
+
en_es_pre_annealing="oi"
|
179 |
+
es_en_pre_annealing="oi"
|
180 |
+
en_it_pre_annealing="oi"
|
181 |
+
it_en_pre_annealing="oi"
|
182 |
+
en_nl_pre_annealing="oi"
|
183 |
+
nl_en_pre_annealing="oi"
|
184 |
+
en_pt_pre_annealing="oi"
|
185 |
+
pt_en_pre_annealing="oi"
|
186 |
+
en_ru_pre_annealing="oi"
|
187 |
+
ru_en_pre_annealing="oi"
|
188 |
+
en_zh_pre_annealing="oi"
|
189 |
+
zh_en_pre_annealing="oi"
|
190 |
+
en_ko_pre_annealing="oi"
|
191 |
+
ko_en_pre_annealing="oi"
|
192 |
+
)
|
193 |
+
|
194 |
+
# rougly 67% for mc4, 33% for total parallel data
|
195 |
+
datamix_weights=(
|
196 |
+
DataMix:
|
197 |
+
mc4_parallel_uniform=(
|
198 |
+
Dataset:
|
199 |
+
en=603
|
200 |
+
es=603
|
201 |
+
de=603
|
202 |
+
fr=603
|
203 |
+
nl=603
|
204 |
+
pt=603
|
205 |
+
it=603
|
206 |
+
ru=603
|
207 |
+
zh=603
|
208 |
+
ko=603
|
209 |
+
en_de=0
|
210 |
+
de_en=0
|
211 |
+
en_fr=0
|
212 |
+
fr_en=0
|
213 |
+
en_es=0
|
214 |
+
es_en=0
|
215 |
+
en_it=0
|
216 |
+
it_en=0
|
217 |
+
en_nl=0
|
218 |
+
nl_en=0
|
219 |
+
en_pt=0
|
220 |
+
pt_en=0
|
221 |
+
en_ru=0
|
222 |
+
ru_en=0
|
223 |
+
en_zh=0
|
224 |
+
zh_en=0
|
225 |
+
en_ko=0
|
226 |
+
ko_en=0
|
227 |
+
en_synth=67
|
228 |
+
es_synth=67
|
229 |
+
de_synth=67
|
230 |
+
fr_synth=67
|
231 |
+
nl_synth=67
|
232 |
+
pt_synth=67
|
233 |
+
it_synth=67
|
234 |
+
ru_synth=67
|
235 |
+
zh_synth=67
|
236 |
+
ko_synth=67
|
237 |
+
instructions=0
|
238 |
+
en_de_pre_annealing=183
|
239 |
+
de_en_pre_annealing=183
|
240 |
+
en_fr_pre_annealing=183
|
241 |
+
fr_en_pre_annealing=183
|
242 |
+
en_es_pre_annealing=183
|
243 |
+
es_en_pre_annealing=183
|
244 |
+
en_it_pre_annealing=183
|
245 |
+
it_en_pre_annealing=183
|
246 |
+
en_nl_pre_annealing=183
|
247 |
+
nl_en_pre_annealing=183
|
248 |
+
en_pt_pre_annealing=183
|
249 |
+
pt_en_pre_annealing=183
|
250 |
+
en_ru_pre_annealing=183
|
251 |
+
ru_en_pre_annealing=183
|
252 |
+
en_zh_pre_annealing=183
|
253 |
+
zh_en_pre_annealing=183
|
254 |
+
en_ko_pre_annealing=183
|
255 |
+
ko_en_pre_annealing=183
|
256 |
+
)
|
257 |
+
)
|
258 |
+
|
259 |
+
datamix_weights_annealing=(
|
260 |
+
DataMix:
|
261 |
+
mc4_parallel_uniform=(
|
262 |
+
Dataset:
|
263 |
+
en=0
|
264 |
+
es=0
|
265 |
+
de=0
|
266 |
+
fr=0
|
267 |
+
nl=0
|
268 |
+
pt=0
|
269 |
+
it=0
|
270 |
+
ru=0
|
271 |
+
zh=0
|
272 |
+
ko=0
|
273 |
+
en_de=833
|
274 |
+
de_en=833
|
275 |
+
en_fr=833
|
276 |
+
fr_en=833
|
277 |
+
en_es=833
|
278 |
+
es_en=833
|
279 |
+
en_it=833
|
280 |
+
it_en=833
|
281 |
+
en_nl=833
|
282 |
+
nl_en=833
|
283 |
+
en_pt=833
|
284 |
+
pt_en=833
|
285 |
+
en_ru=833
|
286 |
+
ru_en=833
|
287 |
+
en_zh=833
|
288 |
+
zh_en=833
|
289 |
+
en_ko=833
|
290 |
+
ko_en=833
|
291 |
+
en_synth=0
|
292 |
+
es_synth=0
|
293 |
+
de_synth=0
|
294 |
+
fr_synth=0
|
295 |
+
nl_synth=0
|
296 |
+
pt_synth=0
|
297 |
+
it_synth=0
|
298 |
+
ru_synth=0
|
299 |
+
zh_synth=0
|
300 |
+
ko_synth=0
|
301 |
+
instructions=85000
|
302 |
+
en_de_pre_annealing=0
|
303 |
+
de_en_pre_annealing=0
|
304 |
+
en_fr_pre_annealing=0
|
305 |
+
fr_en_pre_annealing=0
|
306 |
+
en_es_pre_annealing=0
|
307 |
+
es_en_pre_annealing=0
|
308 |
+
en_it_pre_annealing=0
|
309 |
+
it_en_pre_annealing=0
|
310 |
+
en_nl_pre_annealing=0
|
311 |
+
nl_en_pre_annealing=0
|
312 |
+
en_pt_pre_annealing=0
|
313 |
+
pt_en_pre_annealing=0
|
314 |
+
en_ru_pre_annealing=0
|
315 |
+
ru_en_pre_annealing=0
|
316 |
+
en_zh_pre_annealing=0
|
317 |
+
zh_en_pre_annealing=0
|
318 |
+
en_ko_pre_annealing=0
|
319 |
+
ko_en_pre_annealing=0
|
320 |
+
)
|
321 |
+
)
|
322 |
+
|
323 |
+
|
324 |
+
# number such that final tokens for each language are around 1B
|
325 |
+
n_tokens=(Dataset:
|
326 |
+
en=1000000000
|
327 |
+
es=833333330
|
328 |
+
de=833333330
|
329 |
+
fr=833333330
|
330 |
+
nl=833333330
|
331 |
+
pt=833333330
|
332 |
+
it=833333330
|
333 |
+
ru=500000000
|
334 |
+
zh=13888888
|
335 |
+
ko=250000000
|
336 |
+
en_de=20000000
|
337 |
+
de_en=20000000
|
338 |
+
en_fr=20000000
|
339 |
+
fr_en=20000000
|
340 |
+
en_es=20000000
|
341 |
+
es_en=20000000
|
342 |
+
en_it=20000000
|
343 |
+
it_en=20000000
|
344 |
+
en_nl=20000000
|
345 |
+
nl_en=20000000
|
346 |
+
en_pt=20000000
|
347 |
+
pt_en=20000000
|
348 |
+
en_ru=20000000
|
349 |
+
ru_en=20000000
|
350 |
+
en_zh=20000000
|
351 |
+
zh_en=20000000
|
352 |
+
en_ko=20000000
|
353 |
+
ko_en=20000000
|
354 |
+
en_synth=20000000
|
355 |
+
es_synth=20000000
|
356 |
+
de_synth=20000000
|
357 |
+
fr_synth=20000000
|
358 |
+
nl_synth=20000000
|
359 |
+
pt_synth=20000000
|
360 |
+
it_synth=20000000
|
361 |
+
ru_synth=20000000
|
362 |
+
zh_synth=20000000
|
363 |
+
ko_synth=20000000
|
364 |
+
instructions="oi"
|
365 |
+
en_de_pre_annealing="oi"
|
366 |
+
de_en_pre_annealing="oi"
|
367 |
+
en_fr_pre_annealing="oi"
|
368 |
+
fr_en_pre_annealing="oi"
|
369 |
+
en_es_pre_annealing="oi"
|
370 |
+
es_en_pre_annealing="oi"
|
371 |
+
en_it_pre_annealing="oi"
|
372 |
+
it_en_pre_annealing="oi"
|
373 |
+
en_nl_pre_annealing="oi"
|
374 |
+
nl_en_pre_annealing="oi"
|
375 |
+
en_pt_pre_annealing="oi"
|
376 |
+
pt_en_pre_annealing="oi"
|
377 |
+
en_ru_pre_annealing="oi"
|
378 |
+
ru_en_pre_annealing="oi"
|
379 |
+
en_zh_pre_annealing="oi"
|
380 |
+
zh_en_pre_annealing="oi"
|
381 |
+
en_ko_pre_annealing="oi"
|
382 |
+
ko_en_pre_annealing="oi"
|
383 |
+
)
|
384 |
+
|
385 |
+
is_parallel=(Dataset:
|
386 |
+
en=False
|
387 |
+
es=False
|
388 |
+
de=False
|
389 |
+
fr=False
|
390 |
+
nl=False
|
391 |
+
pt=False
|
392 |
+
it=False
|
393 |
+
ru=False
|
394 |
+
zh=False
|
395 |
+
ko=False
|
396 |
+
en_de=True
|
397 |
+
de_en=True
|
398 |
+
en_fr=True
|
399 |
+
fr_en=True
|
400 |
+
en_es=True
|
401 |
+
es_en=True
|
402 |
+
en_it=True
|
403 |
+
it_en=True
|
404 |
+
en_nl=True
|
405 |
+
nl_en=True
|
406 |
+
en_pt=True
|
407 |
+
pt_en=True
|
408 |
+
en_ru=True
|
409 |
+
ru_en=True
|
410 |
+
en_zh=True
|
411 |
+
zh_en=True
|
412 |
+
en_ko=True
|
413 |
+
ko_en=True
|
414 |
+
en_synth=False
|
415 |
+
es_synth=False
|
416 |
+
de_synth=False
|
417 |
+
fr_synth=False
|
418 |
+
nl_synth=False
|
419 |
+
pt_synth=False
|
420 |
+
it_synth=False
|
421 |
+
ru_synth=False
|
422 |
+
zh_synth=False
|
423 |
+
ko_synth=False
|
424 |
+
instructions="oi"
|
425 |
+
en_de_pre_annealing="oi"
|
426 |
+
de_en_pre_annealing="oi"
|
427 |
+
en_fr_pre_annealing="oi"
|
428 |
+
fr_en_pre_annealing="oi"
|
429 |
+
en_es_pre_annealing="oi"
|
430 |
+
es_en_pre_annealing="oi"
|
431 |
+
en_it_pre_annealing="oi"
|
432 |
+
it_en_pre_annealing="oi"
|
433 |
+
en_nl_pre_annealing="oi"
|
434 |
+
nl_en_pre_annealing="oi"
|
435 |
+
en_pt_pre_annealing="oi"
|
436 |
+
pt_en_pre_annealing="oi"
|
437 |
+
en_ru_pre_annealing="oi"
|
438 |
+
ru_en_pre_annealing="oi"
|
439 |
+
en_zh_pre_annealing="oi"
|
440 |
+
zh_en_pre_annealing="oi"
|
441 |
+
en_ko_pre_annealing="oi"
|
442 |
+
ko_en_pre_annealing="oi"
|
443 |
+
)
|
444 |
+
|
445 |
+
lp=(Dataset:
|
446 |
+
en=""
|
447 |
+
es=""
|
448 |
+
de=""
|
449 |
+
fr=""
|
450 |
+
nl=""
|
451 |
+
pt=""
|
452 |
+
it=""
|
453 |
+
ru=""
|
454 |
+
zh=""
|
455 |
+
ko=""
|
456 |
+
en_de="en-de"
|
457 |
+
de_en="de-en"
|
458 |
+
en_fr="en-fr"
|
459 |
+
fr_en="fr-en"
|
460 |
+
en_es="en-es"
|
461 |
+
es_en="es-en"
|
462 |
+
en_it="en-it"
|
463 |
+
it_en="it-en"
|
464 |
+
en_nl="en-nl"
|
465 |
+
nl_en="nl-en"
|
466 |
+
en_pt="en-pt"
|
467 |
+
pt_en="pt-en"
|
468 |
+
en_ru="en-ru"
|
469 |
+
ru_en="ru-en"
|
470 |
+
en_zh="en-zh"
|
471 |
+
zh_en="zh-en"
|
472 |
+
en_ko="en-ko"
|
473 |
+
ko_en="ko-en"
|
474 |
+
en_synth=""
|
475 |
+
es_synth=""
|
476 |
+
de_synth=""
|
477 |
+
fr_synth=""
|
478 |
+
nl_synth=""
|
479 |
+
pt_synth=""
|
480 |
+
it_synth=""
|
481 |
+
ru_synth=""
|
482 |
+
zh_synth=""
|
483 |
+
ko_synth=""
|
484 |
+
instructions="oi"
|
485 |
+
en_de_pre_annealing="oi"
|
486 |
+
de_en_pre_annealing="oi"
|
487 |
+
en_fr_pre_annealing="oi"
|
488 |
+
fr_en_pre_annealing="oi"
|
489 |
+
en_es_pre_annealing="oi"
|
490 |
+
es_en_pre_annealing="oi"
|
491 |
+
en_it_pre_annealing="oi"
|
492 |
+
it_en_pre_annealing="oi"
|
493 |
+
en_nl_pre_annealing="oi"
|
494 |
+
nl_en_pre_annealing="oi"
|
495 |
+
en_pt_pre_annealing="oi"
|
496 |
+
pt_en_pre_annealing="oi"
|
497 |
+
en_ru_pre_annealing="oi"
|
498 |
+
ru_en_pre_annealing="oi"
|
499 |
+
en_zh_pre_annealing="oi"
|
500 |
+
zh_en_pre_annealing="oi"
|
501 |
+
en_ko_pre_annealing="oi"
|
502 |
+
ko_en_pre_annealing="oi"
|
503 |
+
)
|
504 |
+
|
505 |
+
min_perplexity=0
|
506 |
+
|
507 |
+
size=(Size: 2)
|
508 |
+
|
509 |
+
log_interval=1
|
510 |
+
save_interval=635
|
511 |
+
eval_interval=635
|
512 |
+
train_steps=11430
|
513 |
+
train_steps_annealing=1270
|
514 |
+
|
515 |
+
lr_scheduler=constant
|
516 |
+
warmup_steps=32
|
517 |
+
lr=3e-5
|
518 |
+
lr_min=3e-6
|
519 |
+
weight_decay=0.1
|
520 |
+
|
521 |
+
lr_scheduler_annealing=linear
|
522 |
+
warmup_steps_annealing=0
|
523 |
+
lr_annealing=3e-5
|
524 |
+
lr_min_annealing=3e-6
|
525 |
+
|
526 |
+
n_gpus=8
|
527 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
528 |
+
tp=(TP: 1 2 3 4 5 6 7 8)
|
529 |
+
pp=(PP: 1 2 3 4)
|
530 |
+
micro_batch_size=24
|
531 |
+
grad_accum_steps=4
|
532 |
+
vocab_size=256000
|
533 |
+
|
534 |
+
cpu_workers=16
|
535 |
+
wikipedia=False
|
536 |
+
freeze_layers=""
|
537 |
+
posterior_tokens=False
|
538 |
+
n_posterior_tokens=0
|
539 |
+
eval_iters=1
|
540 |
+
|
541 |
+
glu_activation=geglu
|
542 |
+
kv_channels=256
|
543 |
+
layernorm_epsilon=1e-6
|
544 |
+
|
545 |
+
seq_length=2048
|
546 |
+
}
|
multilinguality_megatron/ducttape/gemma_7B_20B_all_cleaned_mc4_parallel.tconf
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
model_type="gemma"
|
3 |
+
ducttape_output=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B
|
4 |
+
repo=/mnt/data/jpombal/multilinguality_megatron
|
5 |
+
|
6 |
+
external_model_dir=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B/old_recipe_checkpoints
|
7 |
+
external_model_dir_annealing=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B/old_recipe_checkpoints
|
8 |
+
model_path=/mnt/data_2/cache/models--google--gemma-7b/snapshots/bc0790ce8e02c6b2240e2b94bf01fb0453dc90f6
|
9 |
+
tokenizer_path=/mnt/data_2/cache/models--google--gemma-7b/snapshots/bc0790ce8e02c6b2240e2b94bf01fb0453dc90f6
|
10 |
+
|
11 |
+
tokenizer_type=PretrainedFromHF
|
12 |
+
|
13 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en)
|
14 |
+
datamix_weights_annealing=""
|
15 |
+
|
16 |
+
dataset_path=(Dataset:
|
17 |
+
en=/mnt/data_2/shared/pre-training/tower_llm_data/en/data
|
18 |
+
es=/mnt/data_2/shared/pre-training/tower_llm_data/es/3/0000.json.gz
|
19 |
+
de=/mnt/data_2/shared/pre-training/tower_llm_data/de/2/0000.json.gz
|
20 |
+
fr=/mnt/data_2/shared/pre-training/tower_llm_data/fr/1/0000.json.gz
|
21 |
+
nl=/mnt/data_2/shared/pre-training/tower_llm_data/nl/0000.json.gz
|
22 |
+
pt=/mnt/data_2/shared/pre-training/tower_llm_data/pt/0000.json.gz
|
23 |
+
it=/mnt/data_2/shared/pre-training/tower_llm_data/it/0000.json.gz
|
24 |
+
ru=/mnt/data_2/shared/pre-training/tower_llm_data/ru/6/0000.json.gz
|
25 |
+
zh=/mnt/data_2/shared/pre-training/tower_llm_data/zh/0000.json.gz
|
26 |
+
ko=/mnt/data_2/shared/pre-training/tower_llm_data/ko/0000.json.gz
|
27 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
28 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
29 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
30 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
31 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
32 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
33 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
34 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
35 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
41 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
42 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
43 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
44 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
45 |
+
)
|
46 |
+
|
47 |
+
is_hf_dataset=(Dataset:
|
48 |
+
en=True
|
49 |
+
es=False
|
50 |
+
de=False
|
51 |
+
fr=False
|
52 |
+
nl=False
|
53 |
+
pt=False
|
54 |
+
it=False
|
55 |
+
ru=False
|
56 |
+
zh=False
|
57 |
+
ko=False
|
58 |
+
en_de=False
|
59 |
+
de_en=False
|
60 |
+
en_fr=False
|
61 |
+
fr_en=False
|
62 |
+
en_es=False
|
63 |
+
es_en=False
|
64 |
+
en_it=False
|
65 |
+
it_en=False
|
66 |
+
en_nl=False
|
67 |
+
nl_en=False
|
68 |
+
en_pt=False
|
69 |
+
pt_en=False
|
70 |
+
en_ru=False
|
71 |
+
ru_en=False
|
72 |
+
en_zh=False
|
73 |
+
zh_en=False
|
74 |
+
en_ko=False
|
75 |
+
ko_en=False
|
76 |
+
)
|
77 |
+
|
78 |
+
threshold=(Dataset:
|
79 |
+
en=516
|
80 |
+
es=275
|
81 |
+
de=611
|
82 |
+
fr=322
|
83 |
+
nl=649
|
84 |
+
pt=257
|
85 |
+
it=332
|
86 |
+
ru=334
|
87 |
+
zh=2041
|
88 |
+
ko=198
|
89 |
+
en_de=100000
|
90 |
+
de_en=100000
|
91 |
+
en_fr=100000
|
92 |
+
fr_en=100000
|
93 |
+
en_es=100000
|
94 |
+
es_en=100000
|
95 |
+
en_it=100000
|
96 |
+
it_en=100000
|
97 |
+
en_nl=100000
|
98 |
+
nl_en=100000
|
99 |
+
en_pt=100000
|
100 |
+
pt_en=100000
|
101 |
+
en_ru=100000
|
102 |
+
ru_en=100000
|
103 |
+
en_zh=100000
|
104 |
+
zh_en=100000
|
105 |
+
en_ko=100000
|
106 |
+
ko_en=100000
|
107 |
+
)
|
108 |
+
|
109 |
+
# rougly 67% for mc4, 33% for total parallel data
|
110 |
+
datamix_weights=(
|
111 |
+
DataMix:
|
112 |
+
mc4_parallel_uniform=(
|
113 |
+
Dataset:
|
114 |
+
en=670
|
115 |
+
es=670
|
116 |
+
de=670
|
117 |
+
fr=670
|
118 |
+
nl=670
|
119 |
+
pt=670
|
120 |
+
it=670
|
121 |
+
ru=670
|
122 |
+
zh=670
|
123 |
+
ko=670
|
124 |
+
en_de=183
|
125 |
+
de_en=183
|
126 |
+
en_fr=183
|
127 |
+
fr_en=183
|
128 |
+
en_es=183
|
129 |
+
es_en=183
|
130 |
+
en_it=183
|
131 |
+
it_en=183
|
132 |
+
en_nl=183
|
133 |
+
nl_en=183
|
134 |
+
en_pt=183
|
135 |
+
pt_en=183
|
136 |
+
en_ru=183
|
137 |
+
ru_en=183
|
138 |
+
en_zh=183
|
139 |
+
zh_en=183
|
140 |
+
en_ko=183
|
141 |
+
ko_en=183
|
142 |
+
)
|
143 |
+
)
|
144 |
+
|
145 |
+
# number such that final tokens for each language are around 1B
|
146 |
+
n_tokens=(Dataset:
|
147 |
+
en=1000000000
|
148 |
+
es=833333330
|
149 |
+
de=833333330
|
150 |
+
fr=833333330
|
151 |
+
nl=833333330
|
152 |
+
pt=833333330
|
153 |
+
it=833333330
|
154 |
+
ru=500000000
|
155 |
+
zh=13888888
|
156 |
+
ko=250000000
|
157 |
+
en_de=20000000
|
158 |
+
de_en=20000000
|
159 |
+
en_fr=20000000
|
160 |
+
fr_en=20000000
|
161 |
+
en_es=20000000
|
162 |
+
es_en=20000000
|
163 |
+
en_it=20000000
|
164 |
+
it_en=20000000
|
165 |
+
en_nl=20000000
|
166 |
+
nl_en=20000000
|
167 |
+
en_pt=20000000
|
168 |
+
pt_en=20000000
|
169 |
+
en_ru=20000000
|
170 |
+
ru_en=20000000
|
171 |
+
en_zh=20000000
|
172 |
+
zh_en=20000000
|
173 |
+
en_ko=20000000
|
174 |
+
ko_en=20000000
|
175 |
+
)
|
176 |
+
|
177 |
+
is_parallel=(Dataset:
|
178 |
+
en=False
|
179 |
+
es=False
|
180 |
+
de=False
|
181 |
+
fr=False
|
182 |
+
nl=False
|
183 |
+
pt=False
|
184 |
+
it=False
|
185 |
+
ru=False
|
186 |
+
zh=False
|
187 |
+
ko=False
|
188 |
+
en_de=True
|
189 |
+
de_en=True
|
190 |
+
en_fr=True
|
191 |
+
fr_en=True
|
192 |
+
en_es=True
|
193 |
+
es_en=True
|
194 |
+
en_it=True
|
195 |
+
it_en=True
|
196 |
+
en_nl=True
|
197 |
+
nl_en=True
|
198 |
+
en_pt=True
|
199 |
+
pt_en=True
|
200 |
+
en_ru=True
|
201 |
+
ru_en=True
|
202 |
+
en_zh=True
|
203 |
+
zh_en=True
|
204 |
+
en_ko=True
|
205 |
+
ko_en=True
|
206 |
+
)
|
207 |
+
|
208 |
+
lp=(Dataset:
|
209 |
+
en="none"
|
210 |
+
es="none"
|
211 |
+
de="none"
|
212 |
+
fr="none"
|
213 |
+
nl="none"
|
214 |
+
pt="none"
|
215 |
+
it="none"
|
216 |
+
ru="none"
|
217 |
+
zh="none"
|
218 |
+
ko="none"
|
219 |
+
en_de="en-de"
|
220 |
+
de_en="de-en"
|
221 |
+
en_fr="en-fr"
|
222 |
+
fr_en="fr-en"
|
223 |
+
en_es="en-es"
|
224 |
+
es_en="es-en"
|
225 |
+
en_it="en-it"
|
226 |
+
it_en="it-en"
|
227 |
+
en_nl="en-nl"
|
228 |
+
nl_en="nl-en"
|
229 |
+
en_pt="en-pt"
|
230 |
+
pt_en="pt-en"
|
231 |
+
en_ru="en-ru"
|
232 |
+
ru_en="ru-en"
|
233 |
+
en_zh="en-zh"
|
234 |
+
zh_en="zh-en"
|
235 |
+
en_ko="en-ko"
|
236 |
+
ko_en="ko-en"
|
237 |
+
)
|
238 |
+
|
239 |
+
min_perplexity=0
|
240 |
+
|
241 |
+
size=(Size: 2 7)
|
242 |
+
|
243 |
+
log_interval=1
|
244 |
+
save_interval=635
|
245 |
+
eval_interval=635
|
246 |
+
train_steps=12700
|
247 |
+
train_steps_annealing=0
|
248 |
+
|
249 |
+
lr_scheduler=cosine
|
250 |
+
warmup_steps=127
|
251 |
+
lr=3e-5
|
252 |
+
lr_min=3e-6
|
253 |
+
weight_decay=0.1
|
254 |
+
|
255 |
+
lr_scheduler_annealing=linear
|
256 |
+
warmup_steps_annealing=0
|
257 |
+
lr_annealing=3e-5
|
258 |
+
lr_min_annealing=3e-6
|
259 |
+
|
260 |
+
n_gpus=8
|
261 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
262 |
+
tp=(TP: 1 2 3 4 5 6 7 8)
|
263 |
+
pp=(PP: 1 2 3 4)
|
264 |
+
micro_batch_size=2
|
265 |
+
grad_accum_steps=24
|
266 |
+
vocab_size=256000
|
267 |
+
|
268 |
+
cpu_workers=16
|
269 |
+
wikipedia=False
|
270 |
+
freeze_layers=""
|
271 |
+
posterior_tokens=False
|
272 |
+
n_posterior_tokens=0
|
273 |
+
eval_iters=1
|
274 |
+
|
275 |
+
glu_activation=geglu
|
276 |
+
kv_channels=256
|
277 |
+
layernorm_epsilon=1e-6
|
278 |
+
|
279 |
+
seq_length=4096
|
280 |
+
}
|
multilinguality_megatron/ducttape/llama_3_flavio.tconf
ADDED
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
model_type="llama3"
|
3 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio
|
4 |
+
repo=/mnt/data/pmartins/code/multilinguality_megatron
|
5 |
+
|
6 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio/pre_annealing_checkpoints
|
7 |
+
external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio/checkpoints_annealed
|
8 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/cd892e8f4da1043d4b01d5ea182a2e8412bf658f/
|
9 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/cd892e8f4da1043d4b01d5ea182a2e8412bf658f/
|
10 |
+
|
11 |
+
tokenizer_type=PretrainedFromHF
|
12 |
+
|
13 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions)
|
14 |
+
dataset_path=(Dataset:
|
15 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
16 |
+
en_synth=""
|
17 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
18 |
+
es_synth=""
|
19 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
20 |
+
de_synth=""
|
21 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
22 |
+
fr_synth=""
|
23 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
24 |
+
nl_synth=""
|
25 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
26 |
+
pt_synth=""
|
27 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
28 |
+
it_synth=""
|
29 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
30 |
+
ru_synth=""
|
31 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
32 |
+
zh_synth=""
|
33 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
34 |
+
ko_synth=""
|
35 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
41 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
42 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
43 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
44 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
45 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
46 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
47 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
48 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
49 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
50 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
51 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
52 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
53 |
+
instructions="oi"
|
54 |
+
en_de_pre_annealing="oi"
|
55 |
+
de_en_pre_annealing="oi"
|
56 |
+
en_fr_pre_annealing="oi"
|
57 |
+
fr_en_pre_annealing="oi"
|
58 |
+
en_es_pre_annealing="oi"
|
59 |
+
es_en_pre_annealing="oi"
|
60 |
+
en_it_pre_annealing="oi"
|
61 |
+
it_en_pre_annealing="oi"
|
62 |
+
en_nl_pre_annealing="oi"
|
63 |
+
nl_en_pre_annealing="oi"
|
64 |
+
en_pt_pre_annealing="oi"
|
65 |
+
pt_en_pre_annealing="oi"
|
66 |
+
en_ru_pre_annealing="oi"
|
67 |
+
ru_en_pre_annealing="oi"
|
68 |
+
en_zh_pre_annealing="oi"
|
69 |
+
zh_en_pre_annealing="oi"
|
70 |
+
en_ko_pre_annealing="oi"
|
71 |
+
ko_en_pre_annealing="oi"
|
72 |
+
)
|
73 |
+
|
74 |
+
is_hf_dataset=(Dataset:
|
75 |
+
en=True
|
76 |
+
es=False
|
77 |
+
de=False
|
78 |
+
fr=False
|
79 |
+
nl=False
|
80 |
+
pt=False
|
81 |
+
it=False
|
82 |
+
ru=False
|
83 |
+
zh=False
|
84 |
+
ko=False
|
85 |
+
en_de=False
|
86 |
+
de_en=False
|
87 |
+
en_fr=False
|
88 |
+
fr_en=False
|
89 |
+
en_es=False
|
90 |
+
es_en=False
|
91 |
+
en_it=False
|
92 |
+
it_en=False
|
93 |
+
en_nl=False
|
94 |
+
nl_en=False
|
95 |
+
en_pt=False
|
96 |
+
pt_en=False
|
97 |
+
en_ru=False
|
98 |
+
ru_en=False
|
99 |
+
en_zh=False
|
100 |
+
zh_en=False
|
101 |
+
en_ko=False
|
102 |
+
ko_en=False
|
103 |
+
en_synth=False
|
104 |
+
es_synth=False
|
105 |
+
de_synth=False
|
106 |
+
fr_synth=False
|
107 |
+
nl_synth=False
|
108 |
+
pt_synth=False
|
109 |
+
it_synth=False
|
110 |
+
ru_synth=False
|
111 |
+
zh_synth=False
|
112 |
+
ko_synth=False
|
113 |
+
instructions="oi"
|
114 |
+
en_de_pre_annealing="oi"
|
115 |
+
de_en_pre_annealing="oi"
|
116 |
+
en_fr_pre_annealing="oi"
|
117 |
+
fr_en_pre_annealing="oi"
|
118 |
+
en_es_pre_annealing="oi"
|
119 |
+
es_en_pre_annealing="oi"
|
120 |
+
en_it_pre_annealing="oi"
|
121 |
+
it_en_pre_annealing="oi"
|
122 |
+
en_nl_pre_annealing="oi"
|
123 |
+
nl_en_pre_annealing="oi"
|
124 |
+
en_pt_pre_annealing="oi"
|
125 |
+
pt_en_pre_annealing="oi"
|
126 |
+
en_ru_pre_annealing="oi"
|
127 |
+
ru_en_pre_annealing="oi"
|
128 |
+
en_zh_pre_annealing="oi"
|
129 |
+
zh_en_pre_annealing="oi"
|
130 |
+
en_ko_pre_annealing="oi"
|
131 |
+
ko_en_pre_annealing="oi"
|
132 |
+
)
|
133 |
+
|
134 |
+
threshold=(Dataset:
|
135 |
+
en=516
|
136 |
+
es=275
|
137 |
+
de=611
|
138 |
+
fr=322
|
139 |
+
nl=649
|
140 |
+
pt=257
|
141 |
+
it=332
|
142 |
+
ru=334
|
143 |
+
zh=2041
|
144 |
+
ko=198
|
145 |
+
en_de=100000
|
146 |
+
de_en=100000
|
147 |
+
en_fr=100000
|
148 |
+
fr_en=100000
|
149 |
+
en_es=100000
|
150 |
+
es_en=100000
|
151 |
+
en_it=100000
|
152 |
+
it_en=100000
|
153 |
+
en_nl=100000
|
154 |
+
nl_en=100000
|
155 |
+
en_pt=100000
|
156 |
+
pt_en=100000
|
157 |
+
en_ru=100000
|
158 |
+
ru_en=100000
|
159 |
+
en_zh=100000
|
160 |
+
zh_en=100000
|
161 |
+
en_ko=100000
|
162 |
+
ko_en=100000
|
163 |
+
en_synth=100000
|
164 |
+
es_synth=100000
|
165 |
+
de_synth=100000
|
166 |
+
fr_synth=100000
|
167 |
+
nl_synth=100000
|
168 |
+
pt_synth=100000
|
169 |
+
it_synth=100000
|
170 |
+
ru_synth=100000
|
171 |
+
zh_synth=100000
|
172 |
+
ko_synth=100000
|
173 |
+
instructions="oi"
|
174 |
+
en_de_pre_annealing="oi"
|
175 |
+
de_en_pre_annealing="oi"
|
176 |
+
en_fr_pre_annealing="oi"
|
177 |
+
fr_en_pre_annealing="oi"
|
178 |
+
en_es_pre_annealing="oi"
|
179 |
+
es_en_pre_annealing="oi"
|
180 |
+
en_it_pre_annealing="oi"
|
181 |
+
it_en_pre_annealing="oi"
|
182 |
+
en_nl_pre_annealing="oi"
|
183 |
+
nl_en_pre_annealing="oi"
|
184 |
+
en_pt_pre_annealing="oi"
|
185 |
+
pt_en_pre_annealing="oi"
|
186 |
+
en_ru_pre_annealing="oi"
|
187 |
+
ru_en_pre_annealing="oi"
|
188 |
+
en_zh_pre_annealing="oi"
|
189 |
+
zh_en_pre_annealing="oi"
|
190 |
+
en_ko_pre_annealing="oi"
|
191 |
+
ko_en_pre_annealing="oi"
|
192 |
+
)
|
193 |
+
|
194 |
+
# rougly 67% for mc4, 33% for total parallel data
|
195 |
+
datamix_weights=(
|
196 |
+
DataMix:
|
197 |
+
mc4_parallel_uniform=(
|
198 |
+
Dataset:
|
199 |
+
en=603
|
200 |
+
es=603
|
201 |
+
de=603
|
202 |
+
fr=603
|
203 |
+
nl=603
|
204 |
+
pt=603
|
205 |
+
it=603
|
206 |
+
ru=603
|
207 |
+
zh=603
|
208 |
+
ko=603
|
209 |
+
en_de=0
|
210 |
+
de_en=0
|
211 |
+
en_fr=0
|
212 |
+
fr_en=0
|
213 |
+
en_es=0
|
214 |
+
es_en=0
|
215 |
+
en_it=0
|
216 |
+
it_en=0
|
217 |
+
en_nl=0
|
218 |
+
nl_en=0
|
219 |
+
en_pt=0
|
220 |
+
pt_en=0
|
221 |
+
en_ru=0
|
222 |
+
ru_en=0
|
223 |
+
en_zh=0
|
224 |
+
zh_en=0
|
225 |
+
en_ko=0
|
226 |
+
ko_en=0
|
227 |
+
en_synth=67
|
228 |
+
es_synth=67
|
229 |
+
de_synth=67
|
230 |
+
fr_synth=67
|
231 |
+
nl_synth=67
|
232 |
+
pt_synth=67
|
233 |
+
it_synth=67
|
234 |
+
ru_synth=67
|
235 |
+
zh_synth=67
|
236 |
+
ko_synth=67
|
237 |
+
instructions=0
|
238 |
+
en_de_pre_annealing=183
|
239 |
+
de_en_pre_annealing=183
|
240 |
+
en_fr_pre_annealing=183
|
241 |
+
fr_en_pre_annealing=183
|
242 |
+
en_es_pre_annealing=183
|
243 |
+
es_en_pre_annealing=183
|
244 |
+
en_it_pre_annealing=183
|
245 |
+
it_en_pre_annealing=183
|
246 |
+
en_nl_pre_annealing=183
|
247 |
+
nl_en_pre_annealing=183
|
248 |
+
en_pt_pre_annealing=183
|
249 |
+
pt_en_pre_annealing=183
|
250 |
+
en_ru_pre_annealing=183
|
251 |
+
ru_en_pre_annealing=183
|
252 |
+
en_zh_pre_annealing=183
|
253 |
+
zh_en_pre_annealing=183
|
254 |
+
en_ko_pre_annealing=183
|
255 |
+
ko_en_pre_annealing=183
|
256 |
+
)
|
257 |
+
)
|
258 |
+
|
259 |
+
datamix_weights_annealing=(
|
260 |
+
DataMix:
|
261 |
+
mc4_parallel_uniform=(
|
262 |
+
Dataset:
|
263 |
+
en=0
|
264 |
+
es=0
|
265 |
+
de=0
|
266 |
+
fr=0
|
267 |
+
nl=0
|
268 |
+
pt=0
|
269 |
+
it=0
|
270 |
+
ru=0
|
271 |
+
zh=0
|
272 |
+
ko=0
|
273 |
+
en_de=833
|
274 |
+
de_en=833
|
275 |
+
en_fr=833
|
276 |
+
fr_en=833
|
277 |
+
en_es=833
|
278 |
+
es_en=833
|
279 |
+
en_it=833
|
280 |
+
it_en=833
|
281 |
+
en_nl=833
|
282 |
+
nl_en=833
|
283 |
+
en_pt=833
|
284 |
+
pt_en=833
|
285 |
+
en_ru=833
|
286 |
+
ru_en=833
|
287 |
+
en_zh=833
|
288 |
+
zh_en=833
|
289 |
+
en_ko=833
|
290 |
+
ko_en=833
|
291 |
+
en_synth=0
|
292 |
+
es_synth=0
|
293 |
+
de_synth=0
|
294 |
+
fr_synth=0
|
295 |
+
nl_synth=0
|
296 |
+
pt_synth=0
|
297 |
+
it_synth=0
|
298 |
+
ru_synth=0
|
299 |
+
zh_synth=0
|
300 |
+
ko_synth=0
|
301 |
+
instructions=85000
|
302 |
+
en_de_pre_annealing=0
|
303 |
+
de_en_pre_annealing=0
|
304 |
+
en_fr_pre_annealing=0
|
305 |
+
fr_en_pre_annealing=0
|
306 |
+
en_es_pre_annealing=0
|
307 |
+
es_en_pre_annealing=0
|
308 |
+
en_it_pre_annealing=0
|
309 |
+
it_en_pre_annealing=0
|
310 |
+
en_nl_pre_annealing=0
|
311 |
+
nl_en_pre_annealing=0
|
312 |
+
en_pt_pre_annealing=0
|
313 |
+
pt_en_pre_annealing=0
|
314 |
+
en_ru_pre_annealing=0
|
315 |
+
ru_en_pre_annealing=0
|
316 |
+
en_zh_pre_annealing=0
|
317 |
+
zh_en_pre_annealing=0
|
318 |
+
en_ko_pre_annealing=0
|
319 |
+
ko_en_pre_annealing=0
|
320 |
+
)
|
321 |
+
)
|
322 |
+
|
323 |
+
|
324 |
+
# number such that final tokens for each language are around 1B
|
325 |
+
n_tokens=(Dataset:
|
326 |
+
en=1000000000
|
327 |
+
es=833333330
|
328 |
+
de=833333330
|
329 |
+
fr=833333330
|
330 |
+
nl=833333330
|
331 |
+
pt=833333330
|
332 |
+
it=833333330
|
333 |
+
ru=500000000
|
334 |
+
zh=13888888
|
335 |
+
ko=250000000
|
336 |
+
en_de=20000000
|
337 |
+
de_en=20000000
|
338 |
+
en_fr=20000000
|
339 |
+
fr_en=20000000
|
340 |
+
en_es=20000000
|
341 |
+
es_en=20000000
|
342 |
+
en_it=20000000
|
343 |
+
it_en=20000000
|
344 |
+
en_nl=20000000
|
345 |
+
nl_en=20000000
|
346 |
+
en_pt=20000000
|
347 |
+
pt_en=20000000
|
348 |
+
en_ru=20000000
|
349 |
+
ru_en=20000000
|
350 |
+
en_zh=20000000
|
351 |
+
zh_en=20000000
|
352 |
+
en_ko=20000000
|
353 |
+
ko_en=20000000
|
354 |
+
en_synth=20000000
|
355 |
+
es_synth=20000000
|
356 |
+
de_synth=20000000
|
357 |
+
fr_synth=20000000
|
358 |
+
nl_synth=20000000
|
359 |
+
pt_synth=20000000
|
360 |
+
it_synth=20000000
|
361 |
+
ru_synth=20000000
|
362 |
+
zh_synth=20000000
|
363 |
+
ko_synth=20000000
|
364 |
+
instructions="oi"
|
365 |
+
en_de_pre_annealing="oi"
|
366 |
+
de_en_pre_annealing="oi"
|
367 |
+
en_fr_pre_annealing="oi"
|
368 |
+
fr_en_pre_annealing="oi"
|
369 |
+
en_es_pre_annealing="oi"
|
370 |
+
es_en_pre_annealing="oi"
|
371 |
+
en_it_pre_annealing="oi"
|
372 |
+
it_en_pre_annealing="oi"
|
373 |
+
en_nl_pre_annealing="oi"
|
374 |
+
nl_en_pre_annealing="oi"
|
375 |
+
en_pt_pre_annealing="oi"
|
376 |
+
pt_en_pre_annealing="oi"
|
377 |
+
en_ru_pre_annealing="oi"
|
378 |
+
ru_en_pre_annealing="oi"
|
379 |
+
en_zh_pre_annealing="oi"
|
380 |
+
zh_en_pre_annealing="oi"
|
381 |
+
en_ko_pre_annealing="oi"
|
382 |
+
ko_en_pre_annealing="oi"
|
383 |
+
)
|
384 |
+
|
385 |
+
is_parallel=(Dataset:
|
386 |
+
en=False
|
387 |
+
es=False
|
388 |
+
de=False
|
389 |
+
fr=False
|
390 |
+
nl=False
|
391 |
+
pt=False
|
392 |
+
it=False
|
393 |
+
ru=False
|
394 |
+
zh=False
|
395 |
+
ko=False
|
396 |
+
en_de=True
|
397 |
+
de_en=True
|
398 |
+
en_fr=True
|
399 |
+
fr_en=True
|
400 |
+
en_es=True
|
401 |
+
es_en=True
|
402 |
+
en_it=True
|
403 |
+
it_en=True
|
404 |
+
en_nl=True
|
405 |
+
nl_en=True
|
406 |
+
en_pt=True
|
407 |
+
pt_en=True
|
408 |
+
en_ru=True
|
409 |
+
ru_en=True
|
410 |
+
en_zh=True
|
411 |
+
zh_en=True
|
412 |
+
en_ko=True
|
413 |
+
ko_en=True
|
414 |
+
en_synth=False
|
415 |
+
es_synth=False
|
416 |
+
de_synth=False
|
417 |
+
fr_synth=False
|
418 |
+
nl_synth=False
|
419 |
+
pt_synth=False
|
420 |
+
it_synth=False
|
421 |
+
ru_synth=False
|
422 |
+
zh_synth=False
|
423 |
+
ko_synth=False
|
424 |
+
instructions="oi"
|
425 |
+
en_de_pre_annealing="oi"
|
426 |
+
de_en_pre_annealing="oi"
|
427 |
+
en_fr_pre_annealing="oi"
|
428 |
+
fr_en_pre_annealing="oi"
|
429 |
+
en_es_pre_annealing="oi"
|
430 |
+
es_en_pre_annealing="oi"
|
431 |
+
en_it_pre_annealing="oi"
|
432 |
+
it_en_pre_annealing="oi"
|
433 |
+
en_nl_pre_annealing="oi"
|
434 |
+
nl_en_pre_annealing="oi"
|
435 |
+
en_pt_pre_annealing="oi"
|
436 |
+
pt_en_pre_annealing="oi"
|
437 |
+
en_ru_pre_annealing="oi"
|
438 |
+
ru_en_pre_annealing="oi"
|
439 |
+
en_zh_pre_annealing="oi"
|
440 |
+
zh_en_pre_annealing="oi"
|
441 |
+
en_ko_pre_annealing="oi"
|
442 |
+
ko_en_pre_annealing="oi"
|
443 |
+
)
|
444 |
+
|
445 |
+
lp=(Dataset:
|
446 |
+
en=""
|
447 |
+
es=""
|
448 |
+
de=""
|
449 |
+
fr=""
|
450 |
+
nl=""
|
451 |
+
pt=""
|
452 |
+
it=""
|
453 |
+
ru=""
|
454 |
+
zh=""
|
455 |
+
ko=""
|
456 |
+
en_de="en-de"
|
457 |
+
de_en="de-en"
|
458 |
+
en_fr="en-fr"
|
459 |
+
fr_en="fr-en"
|
460 |
+
en_es="en-es"
|
461 |
+
es_en="es-en"
|
462 |
+
en_it="en-it"
|
463 |
+
it_en="it-en"
|
464 |
+
en_nl="en-nl"
|
465 |
+
nl_en="nl-en"
|
466 |
+
en_pt="en-pt"
|
467 |
+
pt_en="pt-en"
|
468 |
+
en_ru="en-ru"
|
469 |
+
ru_en="ru-en"
|
470 |
+
en_zh="en-zh"
|
471 |
+
zh_en="zh-en"
|
472 |
+
en_ko="en-ko"
|
473 |
+
ko_en="ko-en"
|
474 |
+
en_synth=""
|
475 |
+
es_synth=""
|
476 |
+
de_synth=""
|
477 |
+
fr_synth=""
|
478 |
+
nl_synth=""
|
479 |
+
pt_synth=""
|
480 |
+
it_synth=""
|
481 |
+
ru_synth=""
|
482 |
+
zh_synth=""
|
483 |
+
ko_synth=""
|
484 |
+
instructions="oi"
|
485 |
+
en_de_pre_annealing="oi"
|
486 |
+
de_en_pre_annealing="oi"
|
487 |
+
en_fr_pre_annealing="oi"
|
488 |
+
fr_en_pre_annealing="oi"
|
489 |
+
en_es_pre_annealing="oi"
|
490 |
+
es_en_pre_annealing="oi"
|
491 |
+
en_it_pre_annealing="oi"
|
492 |
+
it_en_pre_annealing="oi"
|
493 |
+
en_nl_pre_annealing="oi"
|
494 |
+
nl_en_pre_annealing="oi"
|
495 |
+
en_pt_pre_annealing="oi"
|
496 |
+
pt_en_pre_annealing="oi"
|
497 |
+
en_ru_pre_annealing="oi"
|
498 |
+
ru_en_pre_annealing="oi"
|
499 |
+
en_zh_pre_annealing="oi"
|
500 |
+
zh_en_pre_annealing="oi"
|
501 |
+
en_ko_pre_annealing="oi"
|
502 |
+
ko_en_pre_annealing="oi"
|
503 |
+
)
|
504 |
+
|
505 |
+
min_perplexity=50
|
506 |
+
|
507 |
+
size=(Size: 8)
|
508 |
+
|
509 |
+
log_interval=1
|
510 |
+
save_interval=635
|
511 |
+
eval_interval=635
|
512 |
+
train_steps=11430
|
513 |
+
train_steps_annealing=1270
|
514 |
+
|
515 |
+
lr_scheduler=constant
|
516 |
+
warmup_steps=32
|
517 |
+
lr=3e-5
|
518 |
+
lr_min=3e-6
|
519 |
+
weight_decay=0.1
|
520 |
+
|
521 |
+
lr_scheduler_annealing=linear
|
522 |
+
warmup_steps_annealing=0
|
523 |
+
lr_annealing=3e-5
|
524 |
+
lr_min_annealing=3e-6
|
525 |
+
|
526 |
+
n_gpus=8
|
527 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
528 |
+
tp=(TP: 1 2 3 4 5 6 7 8)
|
529 |
+
pp=(PP: 1 2 3 4)
|
530 |
+
micro_batch_size=4
|
531 |
+
grad_accum_steps=12
|
532 |
+
vocab_size=128256
|
533 |
+
|
534 |
+
cpu_workers=16
|
535 |
+
wikipedia=False
|
536 |
+
freeze_layers=""
|
537 |
+
posterior_tokens=False
|
538 |
+
n_posterior_tokens=0
|
539 |
+
eval_iters=1
|
540 |
+
|
541 |
+
seq_length=4096
|
542 |
+
|
543 |
+
glu_activation=swiglu
|
544 |
+
kv_channels=""
|
545 |
+
layernorm_epsilon=1e-5
|
546 |
+
}
|
multilinguality_megatron/ducttape/llama_3_flavio_wmt_annealing.tconf
ADDED
@@ -0,0 +1,570 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
global {
|
2 |
+
model_type="llama3"
|
3 |
+
ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio
|
4 |
+
repo=/mnt/data/pmartins/code/multilinguality_megatron
|
5 |
+
|
6 |
+
external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio/pre_annealing_checkpoints
|
7 |
+
external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio/checkpoints_annealed_wmt
|
8 |
+
model_path=/mnt/data_2/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/cd892e8f4da1043d4b01d5ea182a2e8412bf658f/
|
9 |
+
tokenizer_path=/mnt/data_2/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/cd892e8f4da1043d4b01d5ea182a2e8412bf658f/
|
10 |
+
|
11 |
+
tokenizer_type=PretrainedFromHF
|
12 |
+
|
13 |
+
dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions en_de_wmt en_ru_wmt en_zh_wmt)
|
14 |
+
dataset_path=(Dataset:
|
15 |
+
en=/mnt/data_2/shared/tower_llm_data/en/data
|
16 |
+
en_synth=""
|
17 |
+
es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz
|
18 |
+
es_synth=""
|
19 |
+
de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz
|
20 |
+
de_synth=""
|
21 |
+
fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz
|
22 |
+
fr_synth=""
|
23 |
+
nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz
|
24 |
+
nl_synth=""
|
25 |
+
pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz
|
26 |
+
pt_synth=""
|
27 |
+
it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz
|
28 |
+
it_synth=""
|
29 |
+
ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz
|
30 |
+
ru_synth=""
|
31 |
+
zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz
|
32 |
+
zh_synth=""
|
33 |
+
ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz
|
34 |
+
ko_synth=""
|
35 |
+
en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
36 |
+
de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
37 |
+
en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
38 |
+
fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
39 |
+
en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
40 |
+
es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
41 |
+
en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
42 |
+
it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
43 |
+
en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
44 |
+
nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
45 |
+
en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
46 |
+
pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
47 |
+
en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
48 |
+
ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
49 |
+
en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
50 |
+
zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
51 |
+
en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
52 |
+
ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75"
|
53 |
+
instructions="oi"
|
54 |
+
en_de_pre_annealing="oi"
|
55 |
+
de_en_pre_annealing="oi"
|
56 |
+
en_fr_pre_annealing="oi"
|
57 |
+
fr_en_pre_annealing="oi"
|
58 |
+
en_es_pre_annealing="oi"
|
59 |
+
es_en_pre_annealing="oi"
|
60 |
+
en_it_pre_annealing="oi"
|
61 |
+
it_en_pre_annealing="oi"
|
62 |
+
en_nl_pre_annealing="oi"
|
63 |
+
nl_en_pre_annealing="oi"
|
64 |
+
en_pt_pre_annealing="oi"
|
65 |
+
pt_en_pre_annealing="oi"
|
66 |
+
en_ru_pre_annealing="oi"
|
67 |
+
ru_en_pre_annealing="oi"
|
68 |
+
en_zh_pre_annealing="oi"
|
69 |
+
zh_en_pre_annealing="oi"
|
70 |
+
en_ko_pre_annealing="oi"
|
71 |
+
ko_en_pre_annealing="oi"
|
72 |
+
en_de_wmt="oi"
|
73 |
+
en_ru_wmt="oi"
|
74 |
+
en_zh_wmt="oi"
|
75 |
+
)
|
76 |
+
|
77 |
+
is_hf_dataset=(Dataset:
|
78 |
+
en=True
|
79 |
+
es=False
|
80 |
+
de=False
|
81 |
+
fr=False
|
82 |
+
nl=False
|
83 |
+
pt=False
|
84 |
+
it=False
|
85 |
+
ru=False
|
86 |
+
zh=False
|
87 |
+
ko=False
|
88 |
+
en_de=False
|
89 |
+
de_en=False
|
90 |
+
en_fr=False
|
91 |
+
fr_en=False
|
92 |
+
en_es=False
|
93 |
+
es_en=False
|
94 |
+
en_it=False
|
95 |
+
it_en=False
|
96 |
+
en_nl=False
|
97 |
+
nl_en=False
|
98 |
+
en_pt=False
|
99 |
+
pt_en=False
|
100 |
+
en_ru=False
|
101 |
+
ru_en=False
|
102 |
+
en_zh=False
|
103 |
+
zh_en=False
|
104 |
+
en_ko=False
|
105 |
+
ko_en=False
|
106 |
+
en_synth=False
|
107 |
+
es_synth=False
|
108 |
+
de_synth=False
|
109 |
+
fr_synth=False
|
110 |
+
nl_synth=False
|
111 |
+
pt_synth=False
|
112 |
+
it_synth=False
|
113 |
+
ru_synth=False
|
114 |
+
zh_synth=False
|
115 |
+
ko_synth=False
|
116 |
+
instructions="oi"
|
117 |
+
en_de_pre_annealing="oi"
|
118 |
+
de_en_pre_annealing="oi"
|
119 |
+
en_fr_pre_annealing="oi"
|
120 |
+
fr_en_pre_annealing="oi"
|
121 |
+
en_es_pre_annealing="oi"
|
122 |
+
es_en_pre_annealing="oi"
|
123 |
+
en_it_pre_annealing="oi"
|
124 |
+
it_en_pre_annealing="oi"
|
125 |
+
en_nl_pre_annealing="oi"
|
126 |
+
nl_en_pre_annealing="oi"
|
127 |
+
en_pt_pre_annealing="oi"
|
128 |
+
pt_en_pre_annealing="oi"
|
129 |
+
en_ru_pre_annealing="oi"
|
130 |
+
ru_en_pre_annealing="oi"
|
131 |
+
en_zh_pre_annealing="oi"
|
132 |
+
zh_en_pre_annealing="oi"
|
133 |
+
en_ko_pre_annealing="oi"
|
134 |
+
ko_en_pre_annealing="oi"
|
135 |
+
en_de_wmt="oi"
|
136 |
+
en_ru_wmt="oi"
|
137 |
+
en_zh_wmt="oi"
|
138 |
+
)
|
139 |
+
|
140 |
+
threshold=(Dataset:
|
141 |
+
en=516
|
142 |
+
es=275
|
143 |
+
de=611
|
144 |
+
fr=322
|
145 |
+
nl=649
|
146 |
+
pt=257
|
147 |
+
it=332
|
148 |
+
ru=334
|
149 |
+
zh=2041
|
150 |
+
ko=198
|
151 |
+
en_de=100000
|
152 |
+
de_en=100000
|
153 |
+
en_fr=100000
|
154 |
+
fr_en=100000
|
155 |
+
en_es=100000
|
156 |
+
es_en=100000
|
157 |
+
en_it=100000
|
158 |
+
it_en=100000
|
159 |
+
en_nl=100000
|
160 |
+
nl_en=100000
|
161 |
+
en_pt=100000
|
162 |
+
pt_en=100000
|
163 |
+
en_ru=100000
|
164 |
+
ru_en=100000
|
165 |
+
en_zh=100000
|
166 |
+
zh_en=100000
|
167 |
+
en_ko=100000
|
168 |
+
ko_en=100000
|
169 |
+
en_synth=100000
|
170 |
+
es_synth=100000
|
171 |
+
de_synth=100000
|
172 |
+
fr_synth=100000
|
173 |
+
nl_synth=100000
|
174 |
+
pt_synth=100000
|
175 |
+
it_synth=100000
|
176 |
+
ru_synth=100000
|
177 |
+
zh_synth=100000
|
178 |
+
ko_synth=100000
|
179 |
+
instructions="oi"
|
180 |
+
en_de_pre_annealing="oi"
|
181 |
+
de_en_pre_annealing="oi"
|
182 |
+
en_fr_pre_annealing="oi"
|
183 |
+
fr_en_pre_annealing="oi"
|
184 |
+
en_es_pre_annealing="oi"
|
185 |
+
es_en_pre_annealing="oi"
|
186 |
+
en_it_pre_annealing="oi"
|
187 |
+
it_en_pre_annealing="oi"
|
188 |
+
en_nl_pre_annealing="oi"
|
189 |
+
nl_en_pre_annealing="oi"
|
190 |
+
en_pt_pre_annealing="oi"
|
191 |
+
pt_en_pre_annealing="oi"
|
192 |
+
en_ru_pre_annealing="oi"
|
193 |
+
ru_en_pre_annealing="oi"
|
194 |
+
en_zh_pre_annealing="oi"
|
195 |
+
zh_en_pre_annealing="oi"
|
196 |
+
en_ko_pre_annealing="oi"
|
197 |
+
ko_en_pre_annealing="oi"
|
198 |
+
en_de_wmt="oi"
|
199 |
+
en_ru_wmt="oi"
|
200 |
+
en_zh_wmt="oi"
|
201 |
+
)
|
202 |
+
|
203 |
+
# rougly 67% for mc4, 33% for total parallel data
|
204 |
+
datamix_weights=(
|
205 |
+
DataMix:
|
206 |
+
mc4_parallel_uniform=(
|
207 |
+
Dataset:
|
208 |
+
en=603
|
209 |
+
es=603
|
210 |
+
de=603
|
211 |
+
fr=603
|
212 |
+
nl=603
|
213 |
+
pt=603
|
214 |
+
it=603
|
215 |
+
ru=603
|
216 |
+
zh=603
|
217 |
+
ko=603
|
218 |
+
en_de=0
|
219 |
+
de_en=0
|
220 |
+
en_fr=0
|
221 |
+
fr_en=0
|
222 |
+
en_es=0
|
223 |
+
es_en=0
|
224 |
+
en_it=0
|
225 |
+
it_en=0
|
226 |
+
en_nl=0
|
227 |
+
nl_en=0
|
228 |
+
en_pt=0
|
229 |
+
pt_en=0
|
230 |
+
en_ru=0
|
231 |
+
ru_en=0
|
232 |
+
en_zh=0
|
233 |
+
zh_en=0
|
234 |
+
en_ko=0
|
235 |
+
ko_en=0
|
236 |
+
en_synth=67
|
237 |
+
es_synth=67
|
238 |
+
de_synth=67
|
239 |
+
fr_synth=67
|
240 |
+
nl_synth=67
|
241 |
+
pt_synth=67
|
242 |
+
it_synth=67
|
243 |
+
ru_synth=67
|
244 |
+
zh_synth=67
|
245 |
+
ko_synth=67
|
246 |
+
instructions=0
|
247 |
+
en_de_pre_annealing=183
|
248 |
+
de_en_pre_annealing=183
|
249 |
+
en_fr_pre_annealing=183
|
250 |
+
fr_en_pre_annealing=183
|
251 |
+
en_es_pre_annealing=183
|
252 |
+
es_en_pre_annealing=183
|
253 |
+
en_it_pre_annealing=183
|
254 |
+
it_en_pre_annealing=183
|
255 |
+
en_nl_pre_annealing=183
|
256 |
+
nl_en_pre_annealing=183
|
257 |
+
en_pt_pre_annealing=183
|
258 |
+
pt_en_pre_annealing=183
|
259 |
+
en_ru_pre_annealing=183
|
260 |
+
ru_en_pre_annealing=183
|
261 |
+
en_zh_pre_annealing=183
|
262 |
+
zh_en_pre_annealing=183
|
263 |
+
en_ko_pre_annealing=183
|
264 |
+
ko_en_pre_annealing=183
|
265 |
+
en_de_wmt=0
|
266 |
+
en_ru_wmt=0
|
267 |
+
en_zh_wmt=0
|
268 |
+
)
|
269 |
+
)
|
270 |
+
|
271 |
+
datamix_weights_annealing=(
|
272 |
+
DataMix:
|
273 |
+
mc4_parallel_uniform=(
|
274 |
+
Dataset:
|
275 |
+
en=0
|
276 |
+
es=0
|
277 |
+
de=0
|
278 |
+
fr=0
|
279 |
+
nl=0
|
280 |
+
pt=0
|
281 |
+
it=0
|
282 |
+
ru=0
|
283 |
+
zh=0
|
284 |
+
ko=0
|
285 |
+
en_de_wmt=833
|
286 |
+
en_ru_wmt=833
|
287 |
+
en_zh_wmt=833
|
288 |
+
en_de=0
|
289 |
+
de_en=833
|
290 |
+
en_fr=833
|
291 |
+
fr_en=833
|
292 |
+
en_es=833
|
293 |
+
es_en=833
|
294 |
+
en_it=833
|
295 |
+
it_en=833
|
296 |
+
en_nl=833
|
297 |
+
nl_en=833
|
298 |
+
en_pt=833
|
299 |
+
pt_en=833
|
300 |
+
en_ru=0
|
301 |
+
ru_en=833
|
302 |
+
en_zh=0
|
303 |
+
zh_en=833
|
304 |
+
en_ko=833
|
305 |
+
ko_en=833
|
306 |
+
en_synth=0
|
307 |
+
es_synth=0
|
308 |
+
de_synth=0
|
309 |
+
fr_synth=0
|
310 |
+
nl_synth=0
|
311 |
+
pt_synth=0
|
312 |
+
it_synth=0
|
313 |
+
ru_synth=0
|
314 |
+
zh_synth=0
|
315 |
+
ko_synth=0
|
316 |
+
instructions=85000
|
317 |
+
en_de_pre_annealing=0
|
318 |
+
de_en_pre_annealing=0
|
319 |
+
en_fr_pre_annealing=0
|
320 |
+
fr_en_pre_annealing=0
|
321 |
+
en_es_pre_annealing=0
|
322 |
+
es_en_pre_annealing=0
|
323 |
+
en_it_pre_annealing=0
|
324 |
+
it_en_pre_annealing=0
|
325 |
+
en_nl_pre_annealing=0
|
326 |
+
nl_en_pre_annealing=0
|
327 |
+
en_pt_pre_annealing=0
|
328 |
+
pt_en_pre_annealing=0
|
329 |
+
en_ru_pre_annealing=0
|
330 |
+
ru_en_pre_annealing=0
|
331 |
+
en_zh_pre_annealing=0
|
332 |
+
zh_en_pre_annealing=0
|
333 |
+
en_ko_pre_annealing=0
|
334 |
+
ko_en_pre_annealing=0
|
335 |
+
)
|
336 |
+
)
|
337 |
+
|
338 |
+
|
339 |
+
# number such that final tokens for each language are around 1B
|
340 |
+
n_tokens=(Dataset:
|
341 |
+
en=1000000000
|
342 |
+
es=833333330
|
343 |
+
de=833333330
|
344 |
+
fr=833333330
|
345 |
+
nl=833333330
|
346 |
+
pt=833333330
|
347 |
+
it=833333330
|
348 |
+
ru=500000000
|
349 |
+
zh=13888888
|
350 |
+
ko=250000000
|
351 |
+
en_de_wmt="oi"
|
352 |
+
en_ru_wmt="oi"
|
353 |
+
en_zh_wmt="oi"
|
354 |
+
en_de=20000000
|
355 |
+
de_en=20000000
|
356 |
+
en_fr=20000000
|
357 |
+
fr_en=20000000
|
358 |
+
en_es=20000000
|
359 |
+
es_en=20000000
|
360 |
+
en_it=20000000
|
361 |
+
it_en=20000000
|
362 |
+
en_nl=20000000
|
363 |
+
nl_en=20000000
|
364 |
+
en_pt=20000000
|
365 |
+
pt_en=20000000
|
366 |
+
en_ru=20000000
|
367 |
+
ru_en=20000000
|
368 |
+
en_zh=20000000
|
369 |
+
zh_en=20000000
|
370 |
+
en_ko=20000000
|
371 |
+
ko_en=20000000
|
372 |
+
en_synth=20000000
|
373 |
+
es_synth=20000000
|
374 |
+
de_synth=20000000
|
375 |
+
fr_synth=20000000
|
376 |
+
nl_synth=20000000
|
377 |
+
pt_synth=20000000
|
378 |
+
it_synth=20000000
|
379 |
+
ru_synth=20000000
|
380 |
+
zh_synth=20000000
|
381 |
+
ko_synth=20000000
|
382 |
+
instructions="oi"
|
383 |
+
en_de_pre_annealing="oi"
|
384 |
+
de_en_pre_annealing="oi"
|
385 |
+
en_fr_pre_annealing="oi"
|
386 |
+
fr_en_pre_annealing="oi"
|
387 |
+
en_es_pre_annealing="oi"
|
388 |
+
es_en_pre_annealing="oi"
|
389 |
+
en_it_pre_annealing="oi"
|
390 |
+
it_en_pre_annealing="oi"
|
391 |
+
en_nl_pre_annealing="oi"
|
392 |
+
nl_en_pre_annealing="oi"
|
393 |
+
en_pt_pre_annealing="oi"
|
394 |
+
pt_en_pre_annealing="oi"
|
395 |
+
en_ru_pre_annealing="oi"
|
396 |
+
ru_en_pre_annealing="oi"
|
397 |
+
en_zh_pre_annealing="oi"
|
398 |
+
zh_en_pre_annealing="oi"
|
399 |
+
en_ko_pre_annealing="oi"
|
400 |
+
ko_en_pre_annealing="oi"
|
401 |
+
)
|
402 |
+
|
403 |
+
is_parallel=(Dataset:
|
404 |
+
en=False
|
405 |
+
es=False
|
406 |
+
de=False
|
407 |
+
fr=False
|
408 |
+
nl=False
|
409 |
+
pt=False
|
410 |
+
it=False
|
411 |
+
ru=False
|
412 |
+
zh=False
|
413 |
+
ko=False
|
414 |
+
en_de_wmt="oi"
|
415 |
+
en_ru_wmt="oi"
|
416 |
+
en_zh_wmt="oi"
|
417 |
+
en_de=True
|
418 |
+
de_en=True
|
419 |
+
en_fr=True
|
420 |
+
fr_en=True
|
421 |
+
en_es=True
|
422 |
+
es_en=True
|
423 |
+
en_it=True
|
424 |
+
it_en=True
|
425 |
+
en_nl=True
|
426 |
+
nl_en=True
|
427 |
+
en_pt=True
|
428 |
+
pt_en=True
|
429 |
+
en_ru=True
|
430 |
+
ru_en=True
|
431 |
+
en_zh=True
|
432 |
+
zh_en=True
|
433 |
+
en_ko=True
|
434 |
+
ko_en=True
|
435 |
+
en_synth=False
|
436 |
+
es_synth=False
|
437 |
+
de_synth=False
|
438 |
+
fr_synth=False
|
439 |
+
nl_synth=False
|
440 |
+
pt_synth=False
|
441 |
+
it_synth=False
|
442 |
+
ru_synth=False
|
443 |
+
zh_synth=False
|
444 |
+
ko_synth=False
|
445 |
+
instructions="oi"
|
446 |
+
en_de_pre_annealing="oi"
|
447 |
+
de_en_pre_annealing="oi"
|
448 |
+
en_fr_pre_annealing="oi"
|
449 |
+
fr_en_pre_annealing="oi"
|
450 |
+
en_es_pre_annealing="oi"
|
451 |
+
es_en_pre_annealing="oi"
|
452 |
+
en_it_pre_annealing="oi"
|
453 |
+
it_en_pre_annealing="oi"
|
454 |
+
en_nl_pre_annealing="oi"
|
455 |
+
nl_en_pre_annealing="oi"
|
456 |
+
en_pt_pre_annealing="oi"
|
457 |
+
pt_en_pre_annealing="oi"
|
458 |
+
en_ru_pre_annealing="oi"
|
459 |
+
ru_en_pre_annealing="oi"
|
460 |
+
en_zh_pre_annealing="oi"
|
461 |
+
zh_en_pre_annealing="oi"
|
462 |
+
en_ko_pre_annealing="oi"
|
463 |
+
ko_en_pre_annealing="oi"
|
464 |
+
)
|
465 |
+
|
466 |
+
lp=(Dataset:
|
467 |
+
en=""
|
468 |
+
es=""
|
469 |
+
de=""
|
470 |
+
fr=""
|
471 |
+
nl=""
|
472 |
+
pt=""
|
473 |
+
it=""
|
474 |
+
ru=""
|
475 |
+
zh=""
|
476 |
+
ko=""
|
477 |
+
en_de_wmt="oi"
|
478 |
+
en_ru_wmt="oi"
|
479 |
+
en_zh_wmt="oi"
|
480 |
+
en_de="en-de"
|
481 |
+
de_en="de-en"
|
482 |
+
en_fr="en-fr"
|
483 |
+
fr_en="fr-en"
|
484 |
+
en_es="en-es"
|
485 |
+
es_en="es-en"
|
486 |
+
en_it="en-it"
|
487 |
+
it_en="it-en"
|
488 |
+
en_nl="en-nl"
|
489 |
+
nl_en="nl-en"
|
490 |
+
en_pt="en-pt"
|
491 |
+
pt_en="pt-en"
|
492 |
+
en_ru="en-ru"
|
493 |
+
ru_en="ru-en"
|
494 |
+
en_zh="en-zh"
|
495 |
+
zh_en="zh-en"
|
496 |
+
en_ko="en-ko"
|
497 |
+
ko_en="ko-en"
|
498 |
+
en_synth=""
|
499 |
+
es_synth=""
|
500 |
+
de_synth=""
|
501 |
+
fr_synth=""
|
502 |
+
nl_synth=""
|
503 |
+
pt_synth=""
|
504 |
+
it_synth=""
|
505 |
+
ru_synth=""
|
506 |
+
zh_synth=""
|
507 |
+
ko_synth=""
|
508 |
+
instructions="oi"
|
509 |
+
en_de_pre_annealing="oi"
|
510 |
+
de_en_pre_annealing="oi"
|
511 |
+
en_fr_pre_annealing="oi"
|
512 |
+
fr_en_pre_annealing="oi"
|
513 |
+
en_es_pre_annealing="oi"
|
514 |
+
es_en_pre_annealing="oi"
|
515 |
+
en_it_pre_annealing="oi"
|
516 |
+
it_en_pre_annealing="oi"
|
517 |
+
en_nl_pre_annealing="oi"
|
518 |
+
nl_en_pre_annealing="oi"
|
519 |
+
en_pt_pre_annealing="oi"
|
520 |
+
pt_en_pre_annealing="oi"
|
521 |
+
en_ru_pre_annealing="oi"
|
522 |
+
ru_en_pre_annealing="oi"
|
523 |
+
en_zh_pre_annealing="oi"
|
524 |
+
zh_en_pre_annealing="oi"
|
525 |
+
en_ko_pre_annealing="oi"
|
526 |
+
ko_en_pre_annealing="oi"
|
527 |
+
)
|
528 |
+
|
529 |
+
min_perplexity=50
|
530 |
+
|
531 |
+
size=(Size: 8)
|
532 |
+
|
533 |
+
log_interval=1
|
534 |
+
save_interval=635
|
535 |
+
eval_interval=635
|
536 |
+
train_steps=11430
|
537 |
+
train_steps_annealing=1270
|
538 |
+
|
539 |
+
lr_scheduler=constant
|
540 |
+
warmup_steps=32
|
541 |
+
lr=3e-5
|
542 |
+
lr_min=3e-6
|
543 |
+
weight_decay=0.1
|
544 |
+
|
545 |
+
lr_scheduler_annealing=linear
|
546 |
+
warmup_steps_annealing=0
|
547 |
+
lr_annealing=3e-5
|
548 |
+
lr_min_annealing=3e-6
|
549 |
+
|
550 |
+
n_gpus=8
|
551 |
+
gpu_ids=0,1,2,3,4,5,6,7
|
552 |
+
tp=(TP: 1 2 3 4 5 6 7 8)
|
553 |
+
pp=(PP: 1 2 3 4)
|
554 |
+
micro_batch_size=4
|
555 |
+
grad_accum_steps=12
|
556 |
+
vocab_size=128256
|
557 |
+
|
558 |
+
cpu_workers=16
|
559 |
+
wikipedia=False
|
560 |
+
freeze_layers=""
|
561 |
+
posterior_tokens=False
|
562 |
+
n_posterior_tokens=0
|
563 |
+
eval_iters=1
|
564 |
+
|
565 |
+
seq_length=4096
|
566 |
+
|
567 |
+
glu_activation=swiglu
|
568 |
+
kv_channels=""
|
569 |
+
layernorm_epsilon=1e-5
|
570 |
+
}
|