diff --git a/.gitattributes b/.gitattributes index 4160148ff4e6b806773224280f455916dd3d5497..e70488afdc0be0a5e05c761fd955b8e30d64026d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -54,3 +54,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.jpeg filter=lfs diff=lfs merge=lfs -text *.webp filter=lfs diff=lfs merge=lfs -text data_text_document.idx filter=lfs diff=lfs merge=lfs -text +multilinguality_megatron/megatron/fused_kernels/build/fused_mix_prec_layer_norm_cuda.so filter=lfs diff=lfs merge=lfs -text +multilinguality_megatron/megatron/fused_kernels/build/layer_norm_cuda_kernel.cuda.o filter=lfs diff=lfs merge=lfs -text diff --git a/multilinguality_megatron/.gitignore b/multilinguality_megatron/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..bf15bfda50ca3ec466ee45f171ace1ad9c52e027 --- /dev/null +++ b/multilinguality_megatron/.gitignore @@ -0,0 +1,4 @@ +__pycache__ +build +.vscode +perplexity_texts \ No newline at end of file diff --git a/multilinguality_megatron/LICENSE b/multilinguality_megatron/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..281fde95a677d5faceb5e2cf6a69fcdf67cc0c33 --- /dev/null +++ b/multilinguality_megatron/LICENSE @@ -0,0 +1,376 @@ +The following applies to all files unless otherwise noted: + +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-- + +This repository also contains code from Hugging Face Inc., Google Research, +Facebook (from their Fairseq and Dino projects), Microsoft(from their +Swin-Transformer project)and Philip Popien. Files from these +organizations have notices at the top of each file. Below are +licenses used in those files, as indicated. + + +------------- LICENSE FOR Facebook, huggingface and Google Research code -------------- + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------- LICENSE FOR Facebook Fairseq code -------------- + +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------- LICENSE FOR Mircrosoft Swin transformer code -------------- + +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE + + +--------------- NVIDIA Source Code License for SegFormer ----------------- +1. Definitions + +“Licensor” means any person or entity that distributes its Work. + +“Software” means the original work of authorship made available under this +License. + +“Work” means the Software and any additions to or derivative works of the +Software that are made available under this License. + +The terms “reproduce,” “reproduction,” “derivative works,” and +“distribution” have the meaning as provided under U.S. copyright law; +provided, however, that for the purposes of this License, derivative works +shall not include works that remain separable from, or merely link +(or bind by name) to the interfaces of, the Work. + +Works, including the Software, are “made available” under this License by +including in or with the Work either (a) a copyright notice referencing +the applicability of this License to the Work, or (b) a copy of this License. + +2. License Grant + +2.1 Copyright Grant. Subject to the terms and conditions of this License, +each Licensor grants to you a perpetual, worldwide, non-exclusive, +royalty-free, copyright license to reproduce, prepare derivative works of, +publicly display, publicly perform, sublicense and distribute its Work +and any resulting derivative works in any form. + +3. Limitations + +3.1 Redistribution. You may reproduce or distribute the Work only if +(a) you do so under this License, (b) you include a complete copy of this +License with your distribution, and (c) you retain without modification any +copyright, patent, trademark, or attribution notices that are present +in the Work. + +3.2 Derivative Works. You may specify that additional or different terms +apply to the use, reproduction, and distribution of your derivative works +of the Work (“Your Terms”) only if (a) Your Terms provide that the use +limitation in Section 3.3 applies to your derivative works, and (b) you +identify the specific derivative works that are subject to Your Terms. +Notwithstanding Your Terms, this License (including the redistribution +requirements in Section 3.1) will continue to apply to the Work itself. + +3.3 Use Limitation. The Work and any derivative works thereof only may +be used or intended for use non-commercially. Notwithstanding the +foregoing, NVIDIA and its affiliates may use the Work and any derivative +works commercially. As used herein, “non-commercially” means for research +or evaluation purposes only. + +3.4 Patent Claims. If you bring or threaten to bring a patent claim against +any Licensor (including any claim, cross-claim or counterclaim in a lawsuit) +to enforce any patents that you allege are infringed by any Work, then +your rights under this License from such Licensor (including the grant +in Section 2.1) will terminate immediately. + +3.5 Trademarks. This License does not grant any rights to use any Licensor’s +or its affiliates’ names, logos, or trademarks, except as necessary to +reproduce the notices described in this License. + +3.6 Termination. If you violate any term of this License, then your rights +under this License (including the grant in Section 2.1) will terminate +immediately. + +4. Disclaimer of Warranty. + +THE WORK IS PROVIDED “AS IS” WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT. +YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE. + +5. Limitation of Liability. + +EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL +THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE +SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, +INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT +OF OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK +(INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, +LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER +COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN +ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + diff --git a/multilinguality_megatron/README.md b/multilinguality_megatron/README.md new file mode 100644 index 0000000000000000000000000000000000000000..499c96be47cd57e5288042a993f1d0a99cef4da9 --- /dev/null +++ b/multilinguality_megatron/README.md @@ -0,0 +1,43 @@ +## Installation Instructions + +As a pre-requisite, make sure you have [ducttape](https://github.com/CoderPat/ducttape) and [(mini)conda](https://docs.conda.io/en/latest/miniconda.html) installed. + +First, clone this repository. + +Then, to create a new conda environment with all the necessary dependencies, run the following command: + +```bash +export CONDA_HOME="/path/to/(mini)conda3" +bash setup/conda.sh +``` + +# Training + +## Data format + +Before training, you must preprocess the training data. Before preprocessing, the data should be a `json` file, with the following format: +```json +{"text": ""} +{"text": ""} +``` +Note that the preprocessing script will pack observations together in vectors of a specified length, and will separate each instance (json line) by the tokenizer's EOS token. + +Then, run the bash scripts in this order: + +```bash +./preprocess_data.sh [OPTIONS] +./convert2megatron.sh [OPTIONS] +./model_sharding.sh [OPTIONS] +./continue_pretraining.sh [OPTIONS] +``` +>NOTE: each of these commands may be run with flag `--help`, which will inform the user on how to use each argument. + +For example, for a continued pretraining run with Llama 2 7B on datasets `d1` and `d2` and 8 GPUs, run the following: + +```bash +> ./preprocess_data.sh --dataset_json= --dataset_bin= --vocab_file=/tokenizer.model --repo= +> ./preprocess_data.sh --dataset_json= --dataset_bin= --vocab_file=/tokenizer.model --repo= +> ./convert2megatron.sh --megatron_model= --model_path= --size=7 --repo= +> ./model_sharding.sh --megatron_model= --sharded_model= --tp=8 --pp=1 --vocab_size=32000 --repo= +> ./continue_pretraining.sh --data_path="1 d1 1 d2" --megatron_model= --model_dir= --tokenizer_path=/tokenizer.model --tp=8 --pp=1 [TRAINING_ARGS] +``` \ No newline at end of file diff --git a/multilinguality_megatron/__pycache__/finetune.cpython-39.pyc b/multilinguality_megatron/__pycache__/finetune.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..315cb066df4493f88e020bbc764a78ccdd87dbbd Binary files /dev/null and b/multilinguality_megatron/__pycache__/finetune.cpython-39.pyc differ diff --git a/multilinguality_megatron/ablation_eval_pipeline.sh b/multilinguality_megatron/ablation_eval_pipeline.sh new file mode 100644 index 0000000000000000000000000000000000000000..d7bdd44caa61993219d2f7f1c44c0f9ab4ad66fc --- /dev/null +++ b/multilinguality_megatron/ablation_eval_pipeline.sh @@ -0,0 +1,18 @@ +# bash script to evaluate a given model on wmt23, flores, ape, gec, standard benchmarks, and perplexity, sequentially + +# wmt23, flores, ape, gec, standard benchmarks use tower-eval +TOWER_EVAL_DIR=/mnt/data/jpombal/tower-eval +cd $TOWER_EVAL_DIR +source $TOWER_EVAL_DIR/tower-eval-env/bin/activate + +CUDA_VISIBLE_DEVICES=0 python $TOWER_EVAL_DIR/tower_eval/cli.py lm_eval --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/std_bench.yaml & +CUDA_VISIBLE_DEVICES=1 python $TOWER_EVAL_DIR/tower_eval/cli.py gen-eval --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/mt.yaml & +CUDA_VISIBLE_DEVICES=2 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_flavio_final.yaml & +#CUDA_VISIBLE_DEVICES=3 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_flaviarlos_sft.yaml & +#CUDA_VISIBLE_DEVICES=4 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_carlos_no_mt_annealed_sft.yaml +# CUDA_VISIBLE_DEVICES=2 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_porfirio_pre_annealing.yaml & +# CUDA_VISIBLE_DEVICES=3 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_porfirio_sft.yaml & +# CUDA_VISIBLE_DEVICES=4 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_carlos_sft.yaml & +# CUDA_VISIBLE_DEVICES=5 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_carlos_annealed_sft.yaml & +# CUDA_VISIBLE_DEVICES=6 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_flavio_sft.yaml & +# CUDA_VISIBLE_DEVICES=7 python $TOWER_EVAL_DIR/tower_eval/cli.py evaluate --config /mnt/data/jpombal/tower-eval/local_configs/cp_ablations/perplexity_porfirio_annealed.yaml & \ No newline at end of file diff --git a/multilinguality_megatron/continue_pretraining.sh b/multilinguality_megatron/continue_pretraining.sh new file mode 100644 index 0000000000000000000000000000000000000000..d3af3c66b3f830b94e859113fa738b4ac0214ea5 --- /dev/null +++ b/multilinguality_megatron/continue_pretraining.sh @@ -0,0 +1,185 @@ +# This script will try to run a task *outside* any specified submitter +# Note: This script is for archival; it is not actually run by ducttape +# unset CUDA_VISIBLE_DEVICES +echo $CUDA_VISIBLE_DEVICES + +data_path="1 spgi_vox_mls_text_1b/data/data_text_document" +megatron_model="spgi_vox_mls_text_1b/shards" +model_dir="spgi_vox_mls_text_1b/ckpt" +tokenizer_path="spgi_vox_mls_text_1b/new_extended_tokenizer/tokenizer.model" +tp="2" +pp="1" + +# --wandb_logger \ +# --wandb_id "hajmola" \ +# --wandb_project "Megatron" \ +# --wandb_entity "hajmola" \ +# --wandb_api_key "c4a95af43e910d14b0eca23fbb8165f94944d5af" \ + +# optimization arguments; self-explanatory. Intervals and steps are in terms of training optimizer steps +grad_accum_steps="12" +micro_batch_size="12" +warmup_steps="13" +eval_interval="500" +lr="3e-5" #lr="3e-5" +log_interval="10" +lr_min="3e-6" #lr_min="3e-6" +lr_scheduler="cosine" + +# infra arguments +save_interval="250" +n_gpus="2" +repo="multilinguality_megatron" +gpu_ids="4,5" +train_steps="1000" + + +# Parse command-line arguments +for arg in "$@" +do + case $arg in + --help) + echo "Usage: ./script.sh [OPTIONS]" + echo "Options:" + echo " --data_path=PATH Path to dataset. Should have the form of ..., where the integers determine the data's relative weight in the training set. If every integer is equal, then the data is uniformly sampled." + echo " --megatron_model=PATH Path to sharded megatron model" + echo " --model_dir=PATH folder to save model checkpoints; if this has a checkpoint, it will be used to continue training" + echo " --tokenizer_path=PATH Path to tokenizer.model of original HF model" + echo " --tp=NUMBER Number of shards model is divided in" + echo " --pp=NUMBER Pipeline parallel (default is 1)" + echo " --grad_accum_steps=NUMBER" + echo " Number of gradient accumulation steps" + echo " --micro_batch_size=NUMBER" + echo " Micro batch size" + echo " --warmup_steps=NUMBER Number of warmup steps" + echo " --eval_interval=NUMBER Number of steps between validations" + echo " --lr=NUMBER Learning rate" + echo " --log_interval=NUMBER Number of steps between logging" + echo " --lr_min=NUMBER Minimum learning rate of scheduler" + echo " --lr_scheduler=STRING Learning rate scheduler" + echo " --save_interval=NUMBER Number of steps between saves" + echo " --n_gpus=NUMBER Number of GPUs to use" + echo " --repo=PATH Path to repo" + echo " --gpu_ids=STRING GPU IDs to use" + echo " --train_steps=NUMBER Number of training steps" + exit 0 + ;; + --data_path=*) + data_path="${arg#*=}" + shift + ;; + --megatron_model=*) + megatron_model="${arg#*=}" + shift + ;; + --model_dir=*) + model_dir="${arg#*=}" + shift + ;; + --tokenizer_path=*) + tokenizer_path="${arg#*=}" + shift + ;; + --tp=*) + tp="${arg#*=}" + shift + ;; + --pp=*) + pp="${arg#*=}" + shift + ;; + --grad_accum_steps=*) + grad_accum_steps="${arg#*=}" + shift + ;; + --micro_batch_size=*) + micro_batch_size="${arg#*=}" + shift + ;; + --warmup_steps=*) + warmup_steps="${arg#*=}" + shift + ;; + --eval_interval=*) + eval_interval="${arg#*=}" + shift + ;; + --lr=*) + lr="${arg#*=}" + shift + ;; + --log_interval=*) + log_interval="${arg#*=}" + shift + ;; + --lr_min=*) + lr_min="${arg#*=}" + shift + ;; + --lr_scheduler=*) + lr_scheduler="${arg#*=}" + shift + ;; + --save_interval=*) + save_interval="${arg#*=}" + shift + ;; + --n_gpus=*) + n_gpus="${arg#*=}" + shift + ;; + --repo=*) + repo="${arg#*=}" + shift + ;; + --gpu_ids=*) + gpu_ids="${arg#*=}" + shift + ;; + --train_steps=*) + train_steps="${arg#*=}" + shift + ;; + esac +done + +# CUDA_VISIBLE_DEVICES=$gpu_ids + +if [ "$model_dir" != "" ]; then + mkdir -p $model_dir + mkdir -p $model_dir/runs +fi + +ckpt_flag=$model_dir/latest_checkpointed_iteration.txt +if [ -f $ckpt_flag ]; then + megatron_model=$model_dir + echo Loading from previously saved checkpoint. +fi + +global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps)) + +LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval" +TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min" +DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 50000" +COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion" +LLAMA_ARGS="--use_rms_norm --glu_activation swiglu --no_tie_embed_logits --no_new_tokens --layernorm_epsilon 1e-5" +CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \ + --tensor_model_parallel_size $tp \ + --pipeline_model_parallel_size $pp \ + --load $megatron_model \ + --save $model_dir \ + --tensorboard_dir $model_dir/runs \ + --data_path $data_path \ + --model_name llama \ + --tokenizer_type SentencePieceTokenizer \ + --vocab_file=$tokenizer_path \ + --bf16 \ + --use_flash_attn \ + --micro_batch_size $micro_batch_size \ + --global_batch_size $global_batch_size \ + --sequence_parallel \ + --recompute_granularity selective \ + --use_checkpoint_args \ + --seq_length 2048 \ + --split 99,1,1 \ + $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS diff --git a/multilinguality_megatron/convert2megatron.sh b/multilinguality_megatron/convert2megatron.sh new file mode 100644 index 0000000000000000000000000000000000000000..08a80580604cb8e47ef2e52b7b3e2348323ccaf5 --- /dev/null +++ b/multilinguality_megatron/convert2megatron.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +megatron_model="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/megatron_model" +model_path="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/extended_non_uniform_model" +size="1" +repo="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron" + +# Parse command-line arguments +for arg in "$@" +do + case $arg in + --help) + echo "Usage: ./script.sh [OPTIONS]" + echo "Options:" + echo " --megatron_model=PATH Path to save converted model." + echo " --model_path=PATH Path of HF directory of model to be converted." + echo " --size=NUMBER Billion parameters of model." + echo " --repo=PATH Path to repo." + exit 0 + ;; + --megatron_model=*) + megatron_model="${arg#*=}" + shift + ;; + --model_path=*) + model_path="${arg#*=}" + shift + ;; + --size=*) + size="${arg#*=}" + shift + ;; + --repo=*) + repo="${arg#*=}" + shift + ;; + esac +done + +# Run the Python script +python $repo/weights_conversion/hf_to_megatron.py llama \ + --size=$size \ + --out=$megatron_model \ + --cache-dir=$model_path \ + --model-path=$model_path diff --git a/multilinguality_megatron/cp.sh b/multilinguality_megatron/cp.sh new file mode 100644 index 0000000000000000000000000000000000000000..4f9be5bb6ded1f1a575863ac84fbcc2e08a03850 --- /dev/null +++ b/multilinguality_megatron/cp.sh @@ -0,0 +1,10 @@ +langs=(en de es fr it pt nl ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + +for lang in ${langs[@]}; do + mkdir -p /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang} + echo "0" > /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ducttape_exit_code.txt + touch /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ducttape_stderr.txt + touch /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ducttape_stdout.txt + touch /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ducttape_task.sh + cp /mnt/cephfs-nvme/shared/tower-base-training-data/${lang}/dataset.json /mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/DumpHFDataset/Dataset.${lang}/ & +done \ No newline at end of file diff --git a/multilinguality_megatron/debug.sh b/multilinguality_megatron/debug.sh new file mode 100644 index 0000000000000000000000000000000000000000..0030413293d7c6e4badefd3167c8f96a847f77e4 --- /dev/null +++ b/multilinguality_megatron/debug.sh @@ -0,0 +1,101 @@ +export dataset_bin="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_fr_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_fr/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Baseline.baseline/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_pt/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_es/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_de/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_de_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ko_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_zh/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_it/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_nl/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_zh_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ru_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.zh_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.it_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.instructions/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_pt_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ru/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.de_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.ko/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_it_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ru_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_es_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.nl_synth/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_nl_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.en_ko/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.es_en_pre_annealing/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.fr_en/data_bin /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/PreprocessDataset/Dataset.pt_en/data_bin" +export datamix_file="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_fr_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_fr/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Baseline.baseline/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_pt/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_es/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_de/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_de_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ko_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_zh/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_it/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_nl/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_zh_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ru_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.zh_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.it_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.instructions/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_pt_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ru/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.de_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.ko/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_it_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ru_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_es_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.nl_synth/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_nl_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.en_ko/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.es_en_pre_annealing/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.fr_en/datamix_file /mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/MakeDataMix/Dataset.pt_en/datamix_file" +export megatron_model="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/ModelSharding/PP.1+Size.1+TP.1/sharded_model" +export model_dir="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/doc_attn_tests" +export seq_length="2048" +export tp="1" +export warmup_steps="32" +export micro_batch_size="24" +export grad_accum_steps="4" +export kv_channels="" +export weight_decay="0.1" +export external_model_dir="/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/doc_attn_tests" +export lr="3e-5" +export eval_interval="635" +export layernorm_epsilon="1e-5" +export log_interval="1" +export freeze_layers="" +export glu_activation="swiglu" +export eval_iters="1" +export lr_min="3e-6" +export pp="1" +export model_type="llama2" +export lr_scheduler="constant" +export tokenizer_path="/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574" +export save_interval="635" +export n_gpus="1" +export repo="/mnt/data/jpombal/multilinguality_megatron" +export gpu_ids="0" +export tokenizer_type="PretrainedFromHF" +export train_steps="11430" + + external_model_dir="${external_model_dir}_${lr}" + if [ "$external_model_dir" != "" ]; then + mkdir -p $external_model_dir + mkdir -p $external_model_dir/runs + ln -s $external_model_dir $model_dir + fi + + data_path="" + for f in $datamix_file; do + # read file + data_path="$data_path `cat $f`" + done + echo "Running with data_path=$data_path" + + FREEZE_ARGS="" + if [ "$freeze_layers" == "not_embeddings" ]; then + FREEZE_ARGS="--freeze_layers" + fi + echo $FREEZE_ARGS + + export CUDA_VISIBLE_DEVICES=$gpu_ids + + # if load_from_checkpoint, then set megatron_model to external_model_dir + ckpt_flag=$external_model_dir/latest_checkpointed_iteration.txt + if [ -f $ckpt_flag ]; then + megatron_model=$external_model_dir + echo Loading from previously saved checkpoint. + fi + + KV_CHANNELS_ARGS="" + if [ "$kv_channels" != "" ]; then + KV_CHANNELS_ARGS="--kv_channels $kv_channels" + fi + + TIE_ARGS="" + if [ $model_type != 'gemma' ]; then + TIE_ARGS+="--no_tie_embed_logits" + fi + echo $TIE_ARGS + + global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps)) + + LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval --eval_iters $eval_iters --log_validation_ppl_to_tensorboard --log_memory_to_tensorboard --log_batch_size_to_tensorboard" + TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min --weight_decay $weight_decay" + DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8134" + COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion" + LLAMA_ARGS="--use_rms_norm --glu_activation $glu_activation --no_new_tokens --layernorm_epsilon $layernorm_epsilon" + CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \ + --tensor_model_parallel_size $tp \ + --pipeline_model_parallel_size $pp \ + --load $megatron_model \ + --save $model_dir \ + --tensorboard_dir $external_model_dir/runs \ + --data_path $data_path \ + --model_name $model_type \ + --tokenizer_type $tokenizer_type \ + --vocab_file=$tokenizer_path \ + --bf16 \ + --use_flash_attn \ + --micro_batch_size $micro_batch_size \ + --global_batch_size $global_batch_size \ + --sequence_parallel \ + --recompute_granularity selective \ + --use_checkpoint_args \ + --seq_length $seq_length \ + --split 9990,5,5 \ + --sliding_window_size 4096 \ + --reset_attention_mask \ + --reset_position_ids \ + $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS $FREEZE_ARGS $KV_CHANNELS_ARGS $TIE_ARGS \ \ No newline at end of file diff --git a/multilinguality_megatron/deploy.sh b/multilinguality_megatron/deploy.sh new file mode 100644 index 0000000000000000000000000000000000000000..2f30acd5a590829eba8b932f911764971c028619 --- /dev/null +++ b/multilinguality_megatron/deploy.sh @@ -0,0 +1,53 @@ +while getopts ":p:v:m:f:t:k:" opt; do + case ${opt} in + p ) + path_to_weights=$OPTARG + ;; + v ) + vocab_size=$OPTARG + ;; + m ) model_name=$OPTARG + ;; + f ) vocab_file=$OPTARG + ;; + t ) model_type=$OPTARG + ;; + k ) kv_channels=$OPTARG + ;; + \? ) + echo "Invalid option: $OPTARG" 1>&2 + exit 1 + ;; + : ) + echo "Invalid option: $OPTARG requires an argument" 1>&2 + exit 1 + ;; + esac +done +shift $((OPTIND -1)) + +KV_CHANNELS_ARGS="" +if [ "$kv_channels" != "" ]; then + KV_CHANNELS_ARGS="--kv_channels $kv_channels" +fi + +# path_to_weights is where the latest_checkpointed_iteration.txt file is located +# script creates a folder with respective iteration in unsharded_dir, so no need to specify iteration +python tools/checkpoint_util.py \ + --target_tensor_parallel_size 1 \ + --target_pipeline_parallel_size 1 \ + --load_dir $path_to_weights \ + --save_dir "${path_to_weights}/unsharded" \ + --model_type $model_type \ + --true_vocab_size $vocab_size \ + --bf16 \ + $KV_CHANNELS_ARGS + +python weights_conversion/megatron_to_hf.py \ + --input_dir "${path_to_weights}/unsharded" \ + --output_dir "${path_to_weights}/hf/${model_name}" \ + --vocab_file "${vocab_file}" \ + --model $model_type + +# remove intermediate step +rm -r "${path_to_weights}/unsharded" \ No newline at end of file diff --git a/multilinguality_megatron/docs/Makefile b/multilinguality_megatron/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d4bb2cbb9eddb1bb1b4f366623044af8e4830919 --- /dev/null +++ b/multilinguality_megatron/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/multilinguality_megatron/docs/_templates/autosummary/base.rst b/multilinguality_megatron/docs/_templates/autosummary/base.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7556ebf7b06631c6d12c823ccaa7ca3c50a1d5a --- /dev/null +++ b/multilinguality_megatron/docs/_templates/autosummary/base.rst @@ -0,0 +1,5 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. auto{{ objtype }}:: {{ objname }} diff --git a/multilinguality_megatron/docs/_templates/autosummary/class.rst b/multilinguality_megatron/docs/_templates/autosummary/class.rst new file mode 100644 index 0000000000000000000000000000000000000000..9a7b57db805651ad67113c2caf6cf9a2db2d0301 --- /dev/null +++ b/multilinguality_megatron/docs/_templates/autosummary/class.rst @@ -0,0 +1,9 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :members: + :special-members: + :show-inheritance: + :exclude-members: __weakref__, __init__ diff --git a/multilinguality_megatron/docs/_templates/autosummary/module.rst b/multilinguality_megatron/docs/_templates/autosummary/module.rst new file mode 100644 index 0000000000000000000000000000000000000000..0957773d08c899574bbe9e96024a3fc6244d11ca --- /dev/null +++ b/multilinguality_megatron/docs/_templates/autosummary/module.rst @@ -0,0 +1,29 @@ +{{ fullname | escape | underline }} + +.. rubric:: Description + +.. automodule:: {{ fullname }} + +.. currentmodule:: {{ fullname }} + +{% if classes %} +.. rubric:: Classes + +.. autosummary:: + :toctree: . + {% for class in classes %} + {{ class }} + {% endfor %} + +{% endif %} + +{% if functions %} +.. rubric:: Functions + +.. autosummary:: + :toctree: . + {% for function in functions %} + {{ function }} + {% endfor %} + +{% endif %} diff --git a/multilinguality_megatron/docs/api/index.rst b/multilinguality_megatron/docs/api/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..f6f624bbf2ffbdef21595933f77e808c2e56df9f --- /dev/null +++ b/multilinguality_megatron/docs/api/index.rst @@ -0,0 +1,130 @@ +API +=== + +megatron +-------- + +.. autosummary:: + :toctree: megatron + + megatron.arguments + megatron.checkpointing + megatron.dist_signal_handler + megatron.global_vars + megatron.indexer + megatron.initialize + megatron.memory + megatron.microbatches + megatron.optimizer_param_scheduler + megatron.p2p_communication + megatron.schedules + megatron.text_generation_server + megatron.timers + megatron.training + megatron.utils + megatron.wandb_logger + +megatron.core +------------- + +.. autosummary:: + :toctree: megatron/core + + megatron.core.parallel_state + megatron.core.utils + + +megatron.core.tensor_parallel +----------------------------- + +.. autosummary:: + :toctree: megatron/core/tensor_parallel + + megatron.core.tensor_parallel.cross_entropy + megatron.core.tensor_parallel.data + megatron.core.tensor_parallel.layers + megatron.core.tensor_parallel.mappings + megatron.core.tensor_parallel.random + megatron.core.tensor_parallel.utils + +megatron.data +------------- + +.. autosummary:: + :toctree: megatron/data + + megatron.data.autoaugment + megatron.data.blendable_dataset + megatron.data.gpt_dataset + megatron.data.image_folder + megatron.data.realm_dataset_utils + megatron.data.bert_dataset + megatron.data.data_samplers + megatron.data.indexed_dataset + megatron.data.orqa_wiki_dataset + megatron.data.realm_index + megatron.data.biencoder_dataset_utils + megatron.data.dataset_utils + megatron.data.ict_dataset + megatron.data.t5_dataset + +megatron.model +-------------- + +.. autosummary:: + :toctree: megatron/model + + megatron.model.bert_model + megatron.model.biencoder_model + megatron.model.classification + megatron.model.distributed + megatron.model.enums + megatron.model.falcon_model + megatron.model.fused_bias_gelu + megatron.model.fused_layer_norm + megatron.model.fused_softmax + megatron.model.glu_activations + megatron.model.gpt_model + megatron.model.language_model + megatron.model.llama_model + megatron.model.module + megatron.model.multiple_choice + megatron.model.positional_embeddings + megatron.model.t5_model + megatron.model.transformer + megatron.model.utils + +megatron.optimizer +------------------ + +.. autosummary:: + :toctree: megatron/optimizer + + megatron.optimizer.clip_grads + megatron.optimizer.distrib_optimizer + megatron.optimizer.grad_scaler + megatron.optimizer.optimizer + +megatron.text_generation +------------------------ + +.. autosummary:: + :toctree: megatron/text_generation + + megatron.text_generation.api + megatron.text_generation.beam_utils + megatron.text_generation.communication + megatron.text_generation.forward_step + megatron.text_generation.generation + megatron.text_generation.sampling + megatron.text_generation.tokenization + +megatron.tokenizer +------------------ + +.. autosummary:: + :toctree: megatron/tokenizer + + megatron.tokenizer.bert_tokenization + megatron.tokenizer.gpt2_tokenization + megatron.tokenizer.tokenizer diff --git a/multilinguality_megatron/docs/conf.py b/multilinguality_megatron/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..05b51273acb01bb6f0075e19ae609c0447923d2d --- /dev/null +++ b/multilinguality_megatron/docs/conf.py @@ -0,0 +1,64 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) + + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = 'Megatron-LLM' +copyright = '2023, Alejandro Hernández Cano, Matteo Pagliardini, Kyle Matoba, Amirkeivan Mohtashami, Olivia Simin Fan, Axel Marmet, Deniz Bayazit, Igor Krawczuk, Zeming Chen, Francesco Salvi, Antoine Bosselut, Martin Jaggi' +author = 'Alejandro Hernández Cano, Matteo Pagliardini, Kyle Matoba, Amirkeivan Mohtashami, Olivia Simin Fan, Axel Marmet, Deniz Bayazit, Igor Krawczuk, Zeming Chen, Francesco Salvi, Antoine Bosselut, Martin Jaggi' +release = '0.1.0' + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', + 'sphinx.ext.autosummary', + 'sphinx.ext.napoleon', + 'sphinx.ext.mathjax', + 'myst_parser' +] + +# autosummary +autosummary_generate = True + +# napoleon +napoleon_google_docstring = True + +# myst +myst_enable_extensions = ["colon_fence"] + +# autodoc +autodoc_mock_imports = ['amp_C', 'torchvision', 'flash_attn', 'apex'] + +templates_path = ['_templates'] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None) +} + +master_doc = 'index' + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = 'pydata_sphinx_theme' +# html_theme = 'sphinx_rtd_theme' +html_static_path = ['_static'] diff --git a/multilinguality_megatron/docs/guide/faq.md b/multilinguality_megatron/docs/guide/faq.md new file mode 100644 index 0000000000000000000000000000000000000000..d4d871accebbd46c417ed4d9f79fd4d19a9e0d1f --- /dev/null +++ b/multilinguality_megatron/docs/guide/faq.md @@ -0,0 +1,170 @@ +# Frequently Asked Questions + +## How to add special tokens? + +When defining a new task, it is often needed to introduce tokens with special meanings. +For instance, let's say we want to add two tokens `[formula]` and `[/formula]` to indicate the start and end of a formula in mathematics textbooks. +In order to include these new tokens, you need to indicate them in three different places: + +1. When tokenizing (`tools/preprocess_data.py`), using the flag `--vocab_extra_ids_list` with the new tokens: + ``` + python tools/preprocess_data.py --vocab_extra_ids_list "[formula],[/formula]" # ... + ``` + +1. When sharding the model (`tools/checkpoint_util.py`), using `--true_vocab_size`. + For instance, Falcon has 65024 tokens by default. + Including these two extra tokens will result in + ``` + python tools/checkpoint_util.py --true_vocab_size 65026 # ... + ``` + +1. When training (`finetune.py`) using `--vocab_extra_ids_list`. + Same as before: + ``` + python finetune.py --vocab_extra_ids_list "[formula],[/formula]" # ... + ``` + +(tp-pp)= +## How to set TP and PP? + +General strategies: +- It is recommended to use data parallelism as much as possible, only use model parallelism if the model cannot fit in the GPU or the micro batch size is very small. +- It is preferable to use tensor parallelism before pipeline parallelism, when working on a single machine. +- When a model does not fit in a single node, use a tensor parallelism level of as many GPUs each node has, and pipeline parallelism level as small as possible to allow the model to fit in memory, and maintain a micro batch size large enough (of at least 5). + +In the codebase, you won't set data parallelism explicitly. +Rather, the data parallelism will be inferred automatically to be as high as possible, depending in your available hardware and TP, PP levels. +In general, the number of GPUs you need is: +``` +GPUs = DP * TP * PP +``` +For instance, if you have two nodes with 8 GPUs each, TP=4 and PP=2, then DP will be automatically set to 2 as `4 x 2 x 2 = 16`. + +```{seealso} +- For more information on data and model parallelism see: https://huggingface.co/docs/transformers/v4.15.0/parallelism. +- Detailed information on how TP and PP works: https://arxiv.org/abs/2104.04473. +``` + +## How to launch training on multiple nodes? + +In order to launch training on multiple nodes, you will set the appropriate arguments to the `torchrun` program. + +1. Select a "master" or main node and take note of its IP address. +1. Launch the `finetune.py` script in the main node using `torchrun` with the following arguments: + ``` + torchrun --n_proc_per_node NUMBER_OF_GPS_PER_NODE \ + --nodes NUMBER_OF_NODES \ + --node_rank 0 \ + --master_addr ADDRESS_OF_THE_MAIN_NODE \ + --master_port PORT \ + finetune.py # ... + ``` +1. In the rest of the nodes, launch `finetune.py` with the same arguments, modifying `--node_rank` to a different value per node. + +```{seealso} +- Take a look at the example script `examples/finetune.sh` for more information. +- Look at the [How to set TP and PP?](#tp-pp) section for more information. +``` + +## What are the basic hardware requirements? + +In this section we give a brief overview on the minimal hardware requirements we observed during our experiments. + +| Model | min VRAM | tp | pp | +| :--------- | :------: | :-: | :-: | +| LLaMa2-7B | 2x 80GB | 2 | 1 | +| Falcon-40B | 16x 80GB | 8 | 2 | +| LLaMa2-70B | 32x 80GB | 8 | 4 | + + +(shard)= +## How to shard and merge models? + +Use `tools/checkpoint_util.py` to set the desired tensor and pipeline parallelism levels. + +``` +python tools/checkpoint_util.py \ + --target_tensor_parallel_size TP \ + --target_pipeline_parallel_size PP \ + --load_dir /path/to/original/weights/ \ + --save_dir /path/to/new/weights/ \ + --model_type MODEL \ + --bf16 +``` +Where MODEL can be either llama, llama2, falcon, gpt or bert, and TP and PP are the model parallelism levels desired. +Note that you can convert sharded weights (i.e. TP, PP > 1) to unsharded weights (TP = PP = 1) or viceversa. + +## What arguments are used to train LLaMa 2? + +We set the same hyperparamters specified by Meta during finetuning (see [their paper for more information](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/)). +This means, that training LLaMa 2 7B can be done with the following arguments: + +```bash +torchrun \ + # torchrun arguments # \ + --nproc_per_node \ + --nnodes \ + --node_rank <0,1,2,etc a different number per node> \ + --master_addr
\ + --master_port \ + finetune.py --model_name llama2 \ + # hardware/distributed arguments # \ + --tensor_model_parallel_size \ + --pipeline_model_parallel_size \ + --bf16 \ + # training arguments # \ + --train_iters \ + --adam_beta1 0.9 \ + --adam_beta2 0.95 \ + --adam_eps 1e-5 \ + --lr_decay_style cosine 5 \ + --lr_warmup_iters \ + --lr 3e-4 \ + --min_lr 1e-6 \ + --weight_decay 0.1 \ + --micro_batch_size 5 \ + --global_batch_size 1000 \ + # additional optimization arguments # \ + --use_flash_attn \ + --sequence_parallel \ + --recompute_granularity selective \ + # logging/pathing arguments # \ + --load \ + --use_checkpoint_args \ + --vocab_file \ + --log_interval 1 \ + --data_path \ + --tokenizer_type SentencePieceTokenizer +``` + +```{seealso} +The file `examples/finetune.sh` gives the full picture of the arguments used to train either LLaMa. +``` + +## How to convert a LLaMa or Falcon architecture from a non-official checkpoint? + +If you want to convert weights from a checkpoint other than the checkpoints provided by `llama-meta` or `tiiuae`, you might use `--model-path` during conversion. +For instance, to convert the [OpenAssistant llama2 70B](https://huggingface.co/OpenAssistant/llama2-70b-oasst-sft-v10) weights, run: + +``` +python weights_conversion/hf_to_megatron.py llama2 --size=70 \ + --out=/path/to/megatron/weights/ --cache-dir=/path/to/llama-2-7b/ \ + --model-path=OpenAssistant/llama2-70b-oasst-sft-v10 +``` + +The `--model-path` argument should be either a local folder or the name of a model hosted on huggingface. + +## I'm getting a `17300 Bus error (core dumped)` error! + +If you are using a docker container and you get this error when sharding a large model, you might need to increase the shared memory size. +This is done via the command line option `--shm-size=128gb`. + +## I'm getting a `ImportError: cannot import name 'helpers' from 'megatron.data'` error! + +You need to compile the `helpers` module: + +``` +cd megatron/data +make +cd ../../ +``` diff --git a/multilinguality_megatron/docs/guide/getting_started.md b/multilinguality_megatron/docs/guide/getting_started.md new file mode 100644 index 0000000000000000000000000000000000000000..5189e58f27a922d0e02d703652c828e9ffe63f02 --- /dev/null +++ b/multilinguality_megatron/docs/guide/getting_started.md @@ -0,0 +1,276 @@ +# Getting started + +This tutorial will guide you on the basic usage of Megatrom-LLM. +This guide we will fine tune a [LLaMa 2 7B](https://ai.meta.com/llama/) LLM on [code data](https://huggingface.co/datasets/bigcode/starcoderdata). +It is recommended to have at least 160GB VRAM available (e.g. two 80GB A100 GPUs). + +```{note} +This tutorial can also be followed to train a Falcon architecture, using `falcon` instead of `llama2` throughout the guide. +``` + +## Setup + +First we need to install the dependencies. + + +1. Clone our repo: + ``` + git clone git@github.com:epfLLM/Megatron-LLM.git + ``` + +1. Run the [nvcr docker image](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch), mounting the source code to your desired path, e.g. `/mpt/Megatron-LLM`: + ``` + sudo docker run --gpus all -it --rm \ + -v /path/to/Megatron-LLM/:/mpt/Megatron-LLM \ + nvcr.io/nvidia/pytorch:23.07-py3 + ``` + +1. Enter the repository: + ``` + cd /mpt/Megatron-LLM/ + ``` + +1. Install the additional dependencies not included in the `nvcr` image: + ``` + pip install -r requirements.txt + ``` + +1. Install the `megatron/data/helpers` binary: + ``` + cd megatron/data/ + make + cd ../../ + ``` + +(download_weights)= +## Downloading LLaMa2 weights + +1. Request access to the weights directly to meta: https://ai.meta.com/resources/models-and-libraries/llama-downloads/. +1. Request access to the LLaMa2 huggingface model: https://huggingface.co/meta-llama/Llama-2-7b-hf. +1. Create a new huggingface token (or use an existing one): https://huggingface.co/settings/tokens. +1. Run the huggingface login CLI, and enter the token created on the previous step when asked: + ``` + huggingface-cli login + ``` + +## Preparing the raw data + +:::{note} + +This tutorial will use code data to fine tune the LLM. +Feel free to use any other dataset, as long as the raw data is saved in `.jsonl` format, i.e. one `json` dictionary with the key `"text"` per line: + +```json +{"text": "The blue cat is big."} +{"text": "This is another document."} +``` + +In this case, skip to the [data preprocessing](#data-preprocessing) section. + +::: + +1. Accept starcoder's terms of use via the huggingface portal: https://huggingface.co/datasets/bigcode/starcoderdata +1. Create a huggingface token (or use an existing one) and login using `huggingface-cli` (see [Downloading LLaMa2 weights](#download_weights) for more information). +1. Download and save the starcoder dataset. + In this tutorial we will use the `julia` data, but feel free to use any other subset. + This data contains around 500M tokens. + ```python + import json + from datasets import load_dataset + + # the `cache_dir` argument is optional + dataset = load_dataset("bigcode/starcoderdata", data_dir="julia", + split="train", cache_dir="/path/to/cache/") + with open("/path/to/raw.jsonl", "w+") as f: + for document in dataset: + document = {"id": document["id"], "text": document["content"]} + f.write(json.dumps(document) + "\n") + ``` + +At this point, the raw data will be available at `/path/to/raw.jsonl`. + + +(data-preprocessing)= +## Data preprocessing + +In this step we will tokenize the raw data to binary files for optimized data loading during training. +Run: +``` +python tools/preprocess_data.py --input=/path/to/raw.jsonl \ + --output_prefix=/path/to/tokenized/starcoder \ + --tokenizer_type=SentencePieceTokenizer \ + --vocab_file=/path/to/tokenizer.model \ + --chunk_size=32 \ + --workers=16 \ + --no_new_tokens +``` + +```{note} +In this guide we use a sequence length of 1024 to accelerate training. +Note that the official sequence length of LLaMa2 is 4096. +``` + +```{note} +If you are using falcon, use `FalconTokenizer` instead of `SentencePieceTokenizer`, don't supply any `--vocab_file` and ignore the `--no_new_tokens` flag. +``` + + +(weight-conversion)= +## Weight conversion + +In order to use pretrained weights in the Megatron-LLM codebase, we will need to convert the official weights provided to be compatible with Megatron. +To do so, run: +``` +python weights_conversion/hf_to_megatron.py llama2 --size=7 \ + --out=/path/to/megatron/weights/ --cache-dir=/path/to/llama-2-7b/ +``` + +(correctness-verification)= +## Correctness verification (optional) + +To make sure the weight conversion ran successfully we run the `verify_correctness.py` script. +This will run simultaneously the official LLaMa 2 implementation and the Megatron codebase. +Make sure to adjust the arguments to your convenience: +```bash +# arguments required by `torchrun` +DISTRIBUTED_ARGS="--nproc_per_node 1 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000" +LLAMA_ARGS="--use_rms_norm --glu_activation swiglu --no_tie_embed_logits --no_new_tokens --layernorm_epsilon 1e-5" +COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion" +torchrun $DISTRIBUTED_ARGS verify_correctness.py \ + --model_name=llama2 \ + --model_size=7 \ + --load=/path/to/megatron/weights/ \ + --data_path=/path/to/tokenized/starcoder \ + --tokenizer_type=SentencePieceTokenizer \ + --vocab_file=/path/to/megatron/weights/tokenizer.model \ + --huggingface_cache=/path/to/meta/llama-2-7b/ \ + --huggingface_device=cuda:1 \ + $COMMON_ARGS $LLAMA_ARGS # dont include LLAMA_ARGS if using Falcon +``` + +This script will compare the logits output of Megatron model and the official implementation. +Expected outputs will yield average absolute error smaller than `0.01` when using 32-bit precision and `0.1` when using 16-bit precision. + +## Model sharding + +In order to use model parallelism you need to split the previously converted weights into multiple files, before you start training. +To do this, use `tools/checkpoint_util.py`. +Feel free to use different tensor parallel (tp) and pipeline (pp) sizes. +``` +python tools/checkpoint_util.py \ + --target_tensor_parallel_size 2 \ + --target_pipeline_parallel_size 1 \ + --load_dir /path/to/megatron/weights/ \ + --save_dir /path/to/sharded/weights/ \ + --model_type llama2 \ + --true_vocab_size 32000 \ + --bf16 +``` + +Feel free to set `--target_tensor_parallel_size` to 4 if you have 4 or more GPUs available. + +## Training + +Use the `finetune.py`. +Example usage: +```bash +LOG_ARGS="--log_interval 1 --save_interval 100 --eval_interval 50" +TRAIN_ARGS="--train_iters 500 --lr_decay_style cosine --lr_warmup_iters 50 --lr 3e-4 --min_lr 1e-6" +DISTRIBUTED_ARGS="--nproc_per_node NUMBER_OF_GPUS --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000" +torchrun $DISTRIBUTED_ARGS finetune.py \ + --tensor_model_parallel_size 4 \ + --pipeline_model_parallel_size 1 \ + --load /path/to/sharded/weights/ \ + --save /path/to/sharded/weights/ \ + --tensorboard_dir /path/to/sharded/weights/tensorboard/ \ + --data_path /path/to/tokenized/starcoder \ + --model_name llama2 \ + --tokenizer_type SentencePieceTokenizer \ + --vocab_file=/path/to/megatron/weights/tokenizer.model \ + --bf16 \ + --use_flash_attn \ + --micro_batch_size 5 \ + --global_batch_size 1000 \ + --sequence_parallel \ + --recompute_granularity selective \ + --use_checkpoint_args \ + $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS +``` + +With the selected global batch size of 1000, and the total number of training tokens around 500M, in 500 iterations the trainer will perform approximately one epoch. +This will take approximately 20 hours to run on a 8x 80GB A100 cluster (DP=2, TP=4, PP=1). + +:::{note} + +To use distributed training make sure to set `nproc_per_node` to the number of GPUs per node, `nnodes` to the number of nodes in your training and `master_addr` to the addres of your master node in the `DISTRIBUTED_ARGS` variable. +For instance, to train a two node cluster, with 8 GPUs each: +``` +DISTRIBUTED_ARGS="--nproc_per_node 1 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000" +``` + +Then, run the `finetune.py` script in all your nodes with the same parameters, just setting a different `node_rank` at every node. + +::: + +```{seealso} +Take a look at `examples/finetune.sh for more information on the recommended hyperparameters +``` + +## Model Deployment + +After training, merge your distributed weights again into a single model: +``` +python tools/checkpoint_util.py \ + --target_tensor_parallel_size 1 \ + --target_pipeline_parallel_size 1 \ + --load_dir /path/to/sharded/weights/ \ + --save_dir /path/to/unsharded/trained/weights/ \ + --model_type llama2 \ + --true_vocab_size 32000 \ + --bf16 +``` + +We provide a Megatron to Huggingface conversion utility for easier deployment: `weights_conversion/megatron_to_hf.py`. +Run: +``` +python weights_conversion/megatron_to_hf.py --input_dir=/path/to/unsharded/trained/weights/ \ + --output_dir=/path/to/hf/weights/ +``` + +Once the conversion is done, you can load the fine tuned weights using huggingface: +```python +import torch +import transformers +from transformers import LlamaForCausalLM, LlamaTokenizer + +pipeline = transformers.pipeline( + "text-generation", + model=LlamaForCausalLM.from_pretrained("/path/to/hf/weights/"), + tokenizer=LlamaTokenizer.from_pretrained("/path/to/hf/weights/"), + torch_dtype=torch.bfloat16, + device_map="auto" +) +prompt = """#= a function that returns the fibonacci number of its argument =# +function fibonacci(n::Int)::Int +""" +sequences = pipeline(prompt, max_new_tokens=100, do_sample=True, top_k=20, + num_return_sequences=1) +for sequence in sequences: + print(sequence["generated_text"]) +``` + +Once you are happy with your model performance, you might publish it to the huggingface hub using the `tools/push_to_hub.py` utility: + +``` +python tools/push_to_hub.py /path/to/hf/weights --hf_repo_name=MyRepoName/MyAwesomeModel --auth_token=MyAuthToken +``` + +## What's next? + +1. Take a look at our example scripts to familiarize yourself with some other capabilities and hyperparameters used in the codebase, such as to train (pretrain or finetune) larger models: + - `examples/parallelize.sh` + - `examples/finetune.sh` + - `examples/verify.sh` +1. See the [intruction finetuning](instruction_tuning) guide for more information on how to finetune a pretrained model to follow instructions. +1. Take a look at our [FAQ](faq) section. +1. See [Weights conversion](weights_conversion) for more information on the `hf_to_megatron.py` and `megatron_to_hf.py` scripts. diff --git a/multilinguality_megatron/docs/guide/index.md b/multilinguality_megatron/docs/guide/index.md new file mode 100644 index 0000000000000000000000000000000000000000..66e9d4496837c636a1c4efaf8ee37b845ae3a287 --- /dev/null +++ b/multilinguality_megatron/docs/guide/index.md @@ -0,0 +1,10 @@ +# User guide + +```{toctree} + +getting_started +instruction_tuning +faq +tokenization +weights_conversion +``` diff --git a/multilinguality_megatron/docs/guide/instruction_tuning.md b/multilinguality_megatron/docs/guide/instruction_tuning.md new file mode 100644 index 0000000000000000000000000000000000000000..20da7add6c34ac961a699a9af11c33b388a89517 --- /dev/null +++ b/multilinguality_megatron/docs/guide/instruction_tuning.md @@ -0,0 +1,92 @@ +# Instruction finetuning + +This tutorial will guide you through the basics of instruction finetuning using the Megatron-LLM codebase, using LLaMa 2 as the base network. +See also the [getting started](getting_started) guide for information regarding installation of dependencies, pretraining, and weight preparation. +Following said tutorial, you would be able to finetune a 7B model in this guide, but feel free to use a different size. +In order to use Falcon, see the comments specified in the [getting started](getting_started) guide to learn more about the differences when using either model. + +## Preparing raw data + +The dataset used in this guide will be a subset of the [orca](https://huggingface.co/datasets/Open-Orca/OpenOrca) dataset, a general purpose instruction dataset. +We choose to only include the chain of thought instructions from the orca dataset in order to shrink the size of the data. +Feel free to use any other dataset, as long as the raw data is saved in `.jsonl` format, i.e. one `json` dictionary per line. +The dictionaries must include at least two keys (one for the "instruction" and another one for the expected "answer"), plus an optional "system" key. +In order to retrieve the CoT subset of the orca dataset, use the following code: + +```python +import json + +from datasets import load_dataset + +# the `cache_dir` is optional +dataset = load_dataset("Open-Orca/OpenOrca", cache_dir="/path/to/cache", split="train") +with open("/path/to/raw/data.jsonl", "w+") as f: + for document in tqdm(dataset): + if document["id"].startswith("cot."): + f.write(json.dumps(document) + "\n") +``` + +## Data preprocessing + +In this step we will tokenize the raw data to binary files for optimized data loading during training. +Run: +``` +python instruct/preprocess_instruct_data.py \ + --input=/path/to/raw/data.jsonl \ + --output_prefix=/path/to/tokenized/orca \ + --tokenizer_type=SentencePieceTokenizer \ + --vocab_file=/path/to/llama/tokenizer.model \ + --chunk_size=32 \ + --workers=32 \ + --vocab_extra_ids_list "<|im_start|>,<|im_end|>" \ + --question_key=question \ + --answer_key=response \ + --system_key=system_prompt # Optional +``` + +## Training + +At this point, you should come up with a Megatron checkpoint ready to be trained (i.e. sharded with the desired parallelism levels). +Take a look at the [getting started](getting_started) guide to look how to transform LLaMa 2 checkpoints in the huggingface format to Megatron, and shard the weights. + +To start training, use the `finetune.py`. +Example usage: +```bash +LOG_ARGS="--log_interval 1 --save_interval 100 --eval_interval 50" +TRAIN_ARGS="--train_iters 6500 --lr_decay_style cosine --lr_warmup_iters 650 --lr 2e-5 --min_lr 2e-6" +DISTRIBUTED_ARGS="--nproc_per_node NUMBER_OF_GPUS --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000" +torchrun $DISTRIBUTED_ARGS finetune.py \ + --tensor_model_parallel_size 4 \ + --pipeline_model_parallel_size 1 \ + --load /path/to/sharded/weights/ \ + --save /path/to/sharded/weights/ \ + --tensorboard_dir /path/to/sharded/weights/tensorboard/ \ + --data_path /path/to/tokenized/orca \ + --model_name llama2 \ + --tokenizer_type SentencePieceTokenizer \ + --vocab_file=/path/to/megatron/weights/tokenizer.model \ + --bf16 \ + --use_flash_attn \ + --micro_batch_size 8 \ + --global_batch_size 64 \ + --sequence_parallel \ + --recompute_granularity selective \ + --use_checkpoint_args \ + --data_type instruction \ + --variable_seq_lengths \ + --vocab_extra_ids_list "<|im_start|>,<|im_end|>" \ + $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS +``` + +The arguments given for pretraining and instruction finetuning are very similar, with the key differences being the batch sizes, learning rates, and the inclusion of `--data_type instruction`, `--variable_seq_lengths` and `--vocab_extra_ids_list`. +With the selected global batch size of 64, in 6500 iterations the trainer will perform approximately three epochs. +This will take approximately 3h hours to run on a 8x 80GB A100 device (DP=2, TP=4, PP=1). + +```{note} +If your `--load` checkpoint corresponds to a checkpoint already trained with the Megatron-LLM codebase (and not a checkpoint gotten after directly converting from the huggingface format for instance), you might want to define a `--save` directory that points somewhere else, to avoid overwritting previous checkpoints. +You might also want to include the `--finetune` argument to ignore the previous optimizer and RNG states. +``` + +## Model Deployment + +Once the finetuning is over, you can follow the [getting started](getting_started) guide steps to unshard your weights and convert them to huggingface, in order to do specific evaluations and deployment. diff --git a/multilinguality_megatron/docs/guide/tokenization.md b/multilinguality_megatron/docs/guide/tokenization.md new file mode 100644 index 0000000000000000000000000000000000000000..28281701afa6d8e7cf52b4cd4cf18d5d0cf6e770 --- /dev/null +++ b/multilinguality_megatron/docs/guide/tokenization.md @@ -0,0 +1,76 @@ +# How to tokenize a dataset? + +## Step 1: get the right json format + +The training data requires preprocessing. First, place your training data in a loose json format, with one json containing a text sample per line. For example: +
+{"src": "www.nvidia.com", "text": "The quick brown fox", "type": "Eng", "id": "0", "title": "First Part"}
+{"src": "The Internet", "text": "jumps over the lazy dog", "type": "Eng", "id": "42", "title": "Second Part"}
+
+ +The name of the `text` field of the json can be changed by using the `--json-key` flag in `preprocess_data.py`. +The other metadata are optional and are not used in training. + +## Step 2: Tokenize + +The loose json is then processed into a binary format for training. To convert the json into mmap, cached index file, or the lazy loader format use `preprocess_data.py`. Set the `--dataset_impl` flag to `mmap`, `cached`, or `lazy`, respectively (default is `mmap`). An example script to prepare data for Falcon training is: +
+python3 tools/preprocess_data.py --input /scratch/dummy-data/train.json 
+    --output_prefix wiki-train 
+    --dataset_impl mmap 
+    --tokenizer_type FalconTokenizer 
+    --workers 2 
+    --chunk_size 32
+    --append_eod
+
+ +The output will be two files named, in this case, `my-bert_text_sentence.bin` and `my-bert_text_sentence.idx`. The `--data_path` specified in later BERT training is the full path and new filename, but without the file extension. + +Other options of `preprocess_data.py`: + +``` +input data: + --input INPUT Path to input JSON + --json_keys JSON_KEYS [JSON_KEYS ...] + space separate listed of keys to extract from json + --split_sentences Split documents into sentences. + --keep_newlines Keep newlines between sentences when splitting. + +tokenizer: + --tokenizer_type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,FalconTokenizer} + What type of tokenizer to use. + --vocab_file VOCAB_FILE + Path to the vocab file + --merge_file MERGE_FILE + Path to the BPE merge file (if necessary). + --append_eod Append an token to the end of a document. + --lang LANG Language to use for NLTK-powered sentence splitting. + +output data: + --output_prefix OUTPUT_PREFIX + Path to binary output file without suffix + --dataset_impl {lazy,cached,mmap} + +runtime: + --workers WORKERS Number of worker processes to launch + --chunk_size CHUNK_SIZE + Chunk size assigned to each worker process + --log_interval LOG_INTERVAL + Interval between progress updates + --vocab_extra_ids VOCAB_EXTRA_IDS + --vocab_extra_ids_list VOCAB_EXTRA_IDS_LIST + comma separated list of special vocab ids to add to the tokenizer + --no_new_tokens Whether to add special tokens (e.g. CLS, MASK, etc) in the sentenciepiece tokenizer or not +``` + +If you want to tokenize using llama tokenizer: +``` +python tools/preprocess_data.py \ + --input=/path/to/data.json \ + --output_prefix=wiki-train \ + --dataset_impl=mmap \ + --tokenizer_type=SentencePieceTokenizer \ + --vocab_file=/path/to/tokenizer.model \ + --workers=2 \ + --chunk_size=32 +``` diff --git a/multilinguality_megatron/docs/guide/weights_conversion.md b/multilinguality_megatron/docs/guide/weights_conversion.md new file mode 100644 index 0000000000000000000000000000000000000000..abf18d2a8fbbdbbd9fdc6e15c3182ea21febeb93 --- /dev/null +++ b/multilinguality_megatron/docs/guide/weights_conversion.md @@ -0,0 +1,87 @@ +# Weights conversion + +## Huggingface to megatron: `hf_to_megatron.py` + +Convert weights from models in other formats (primairly huggingface) to megatron checkpoints. + +This script supports converting Falcon, LLaMa and LLaMa 2 weights to megatron checkpoints. +Depending on the model to convert, the inputs might differ. + +- **Falcon**: + Weights are automatically retrieved from the official implementation hosted in huggingface. + Thus, the `--cache-dir` argument is optional, if specified it should point to + the huggingface cache directory where the huggingface Falcon weights will be stored. + You will need to specify the `--size` argument to determine which version to download + (i.e. Falcon 7B or 40B). + +- **LLaMa**, **LLaMa 2** and **CodeLlama**: + Converting llama weights can be done either fetching the weights hosted + in huggingface (recommended as it is the easier method) or directly from the + weights provided by Meta. + + - From Meta weights (only available for LLaMa and LLaMa 2): + You will need to specify the `--cache-dir` to the directory where the + llama weights are stored. + This will by default have the form `xB` (e.g. 7B or 70B) for llama v1, + or `llama-2-xb` (e.g. llama-2-7b) for llama v2. + + - From huggingface weights: + If `--cache-dir` is not specified or the directory specified does not + contain the format expected from Meta weights, the converter will automatically + retrieve the weights from huggingface, in which case the `--cache-dir` will + have the same semantics as with Falcon. + + Note that to download llama v2 weights from huggingface, you will need to + login using `huggingface-cli login` with a huggingface account which has been + granted access to the `meta-llama/Llama-2-7b-hf` model. + + +In all cases, the megatron checkpoint will be stored in the `--out` argument. +If a huggingface is specified, the intermediate weights (i.e. the huggingface weights) +stored therein will not be removed when the conversion succeeds. + +More information about the arguments: + +``` +positional arguments: + {llama2,falcon,codellama,llama} + +options: + -h, --help show this help message and exit + --size {65,34,70,7,40,13,30} + The size of the model + --out OUT Directory to store the megatron weights (as checkpoint) + --cache-dir CACHE_DIR + Directory to use as cache for the huggingface weights, or in case of the llama model, the path of the weights privided Meta +``` + +## Megatron to huggingface: `megatron_to_hf.py` + +Convert megatron checkpoints to huggingface weights. + +This script will also convert the tokenizer configured. +Set the `--input_dir` to the megatron checkpoint root (i.e. where the +`latest_checkpointed_iteration.txt` file is located) and `--output_dir` to +the directory where the huggingface weights should be stored. + +More information about the arguments: + +``` +options: + -h, --help show this help message and exit + --input_dir INPUT_DIR + Location of Megatron weights + --num_output_shards NUM_OUTPUT_SHARDS + --model {llama2,falcon,llama,codellama} + --output_dir OUTPUT_DIR + Location to write HF model and tokenizer + --cache_dir CACHE_DIR + Huggingface cache_dir (optional) + --vocab_file VOCAB_FILE + Path to the vocab file + --vocab_extra_ids_list VOCAB_EXTRA_IDS_LIST + comma separated list of special vocab ids to add to the tokenizer + --override_special_tokens [OVERRIDE_SPECIAL_TOKENS ...] + One or more arguments to override special tokens. Syntax set as `key=value`, e.g. `eos=<|im_end|>`. Overrides available only bos, + cls, eos, mask, pad, sep, unk. +``` diff --git a/multilinguality_megatron/docs/imgs/llama-falcon.png b/multilinguality_megatron/docs/imgs/llama-falcon.png new file mode 100644 index 0000000000000000000000000000000000000000..88036a04b6aa52aa514ad3ca9eb17a3235ba8fea --- /dev/null +++ b/multilinguality_megatron/docs/imgs/llama-falcon.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2f649e67f8b9867bd941109ad79f4459226f50f1b66e8a49e23922aaf0bff12 +size 2466055 diff --git a/multilinguality_megatron/docs/index.rst b/multilinguality_megatron/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..decfefdd1013764fefac0e59b1b3407650e81e53 --- /dev/null +++ b/multilinguality_megatron/docs/index.rst @@ -0,0 +1,75 @@ +Welcome to Megatron-LLM's documentation! +======================================== + +.. image:: imgs/llama-falcon.png + +The `Megatron-LLM `_ library enables pre-training and fine-tuning of large language models (LLMs) at scale. +Our repository is a modification of the `original Megatron-LM codebase `_ by Nvidia. + +Added key features include: + +- `LLaMa `_, `LLaMa 2 `_, `Falcon `_, and `Code Llama `_ support. +- support training of large models (70B Llama 2, 65B Llama 1, 34B Code Llama, and 40B Falcon) on commodity hardware on multiple nodes +- 3-way parallelism: tensor parallel, pipeline parallel and data parallel training (inherited from Megatron) +- full pretraining, finetuning and instruct tuning support +- Support for special tokens & tokenizers +- grouped-query attention (GQA) and multi-query attention (MQA) +- Rotary Position Embeddings (RoPE), RMS layer norm, Lima dropout +- `ROPE scaling `_ for longer attention context support +- FlashAttention 2 +- BF16 / FP16 training +- WandB integration +- Metrics support: Ease to add custom metrics to evaluate on the validation set while training +- Conversion to and from Hugging Face hub + +Example models trained with `Megatron-LLM `_: See `README `_. + +User guide +---------- + +For information on installation and usage, take a look at our user guide. + +.. toctree:: + :maxdepth: 2 + + guide/index + + +API +--- + +Detailed information about Megatron-LLM components: + +.. toctree:: + :maxdepth: 2 + + api/index + + + + +Citation +-------- + +If you use this software please cite it: + +.. code-block:: bib + + @software{epfmgtrn, + author = {Alejandro Hernández Cano and + Matteo Pagliardini and + Andreas Köpf and + Kyle Matoba and + Amirkeivan Mohtashami and + Olivia Simin Fan and + Axel Marmet and + Deniz Bayazit and + Igor Krawczuk and + Zeming Chen and + Francesco Salvi and + Antoine Bosselut and + Martin Jaggi}, + title = {epfLLM Megatron-LM}, + year = 2023, + url = {https://github.com/epfLLM/Megatron-LLM} + } diff --git a/multilinguality_megatron/docs/make.bat b/multilinguality_megatron/docs/make.bat new file mode 100644 index 0000000000000000000000000000000000000000..32bb24529f92346af26219baed295b7488b77534 --- /dev/null +++ b/multilinguality_megatron/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/multilinguality_megatron/docs/requirements.txt b/multilinguality_megatron/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8236ea39b0821a2fa8ba2bc7bea5bb4b712b47c1 --- /dev/null +++ b/multilinguality_megatron/docs/requirements.txt @@ -0,0 +1,11 @@ +sphinx == 7.1.0 +pydata-sphinx-theme >= 0.13.0 +myst-parser >= 2.0.0 +flask >= 2.3.0 +flask_restful >= 0.3.0 +wandb >= 0.15.0 +torch >= 2.0.0 +regex >= 2023.6.0 +numpy >= 1.25 +pillow >= 10.0.0 +einops >= 0.6.1 diff --git a/multilinguality_megatron/ducttape/10B_all_cleaned.tconf b/multilinguality_megatron/ducttape/10B_all_cleaned.tconf new file mode 100644 index 0000000000000000000000000000000000000000..7f08ca07b81a9468d90c38d0a85e8e61cf6d9b57 --- /dev/null +++ b/multilinguality_megatron/ducttape/10B_all_cleaned.tconf @@ -0,0 +1,80 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B/checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + # number such that final tokens for each language are around 1B + n_tokens=(TrainLanguage: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=10 + save_interval=635 + eval_interval=635 + train_steps=6358 + + lr_scheduler=cosine + warmup_steps=63 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_10b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198" + wikipedia=False + freeze_layers="" +} diff --git a/multilinguality_megatron/ducttape/10B_all_cleaned_13B.tconf b/multilinguality_megatron/ducttape/10B_all_cleaned_13B.tconf new file mode 100644 index 0000000000000000000000000000000000000000..f82ae2c0c7a2936497e51db1b8583c79843a3015 --- /dev/null +++ b/multilinguality_megatron/ducttape/10B_all_cleaned_13B.tconf @@ -0,0 +1,80 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/llama2_13B_all + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/llama2_13B_all/checkpoints + model_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/db6b8eb1feabb38985fdf785a89895959e944936 + tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/db6b8eb1feabb38985fdf785a89895959e944936/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + # number such that final tokens for each language are around 1B + n_tokens=(TrainLanguage: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=10 + eval_interval=635 + train_steps=10 + + lr_scheduler=cosine + warmup_steps=0 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="test_llama_13B" + wikipedia=False + freeze_layers="" +} diff --git a/multilinguality_megatron/ducttape/10B_all_cleaned_extend32.tconf b/multilinguality_megatron/ducttape/10B_all_cleaned_extend32.tconf new file mode 100644 index 0000000000000000000000000000000000000000..e6b9f3f4dda9a0ab3594b576406ca8d08dd1f1ea --- /dev/null +++ b/multilinguality_megatron/ducttape/10B_all_cleaned_extend32.tconf @@ -0,0 +1,83 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32 + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32/checkpoints + model_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit + tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + posterior_tokens=False + n_posterior_tokens=False + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + n_tokens=(TrainLanguage: + en=900000000 + es=900000000 + de=900000000 + fr=900000000 + nl=900000000 + pt=900000000 + it=900000000 + ru=550000000 + zh=20000000 + ko=450000000 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=10 + save_interval=635 + eval_interval=635 + train_steps=6358 + + lr_scheduler=cosine + warmup_steps=63 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=52620 + eval_iters=1 + + cpu_workers=16 + wandb_run_id="NEW_llama2_7B_10b_extend32_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198" + wikipedia=False + freeze_layers="" +} diff --git a/multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmed_up.tconf b/multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmed_up.tconf new file mode 100644 index 0000000000000000000000000000000000000000..5f66c69129e1243d390e2f15c38172f5e3db29a7 --- /dev/null +++ b/multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmed_up.tconf @@ -0,0 +1,84 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32 + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32/warmed_up_checkpoints + # for warmed up models, the model path points to the sharded megatron checkpoint + model_path=/mnt/data/shared/multilingual_llm/experiments_megatron/warmup_embeddings_llama2_all_1B_extend32/checkpoints + tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + wikipedia=False + posterior_tokens=False + n_posterior_tokens=False + freeze_layers="" + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + n_tokens=(TrainLanguage: + en=900000000 + es=900000000 + de=900000000 + fr=900000000 + nl=900000000 + pt=900000000 + it=900000000 + ru=550000000 + zh=20000000 + ko=450000000 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=10 + save_interval=127 + eval_interval=635 + train_steps=6358 + eval_iters=1 + + lr_scheduler=cosine + warmup_steps=63 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=52620 + + cpu_workers=16 + wandb_run_id="NEW_warmed_up_llama2_7B_10b_extend32_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198" +} diff --git a/multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmup.tconf b/multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmup.tconf new file mode 100644 index 0000000000000000000000000000000000000000..fd65202a10396aede0c0784225787c941c5a281e --- /dev/null +++ b/multilinguality_megatron/ducttape/10B_all_cleaned_extend32_warmup.tconf @@ -0,0 +1,93 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/warmup_embeddings_llama2_all_1B_extend32/ + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/warmup_embeddings_llama2_all_1B_extend32/checkpoints + model_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit + tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + posterior_tokens=True + n_tokens=(TrainLanguage: + en=900000000 + es=900000000 + de=900000000 + fr=900000000 + nl=900000000 + pt=900000000 + it=900000000 + ru=550000000 + zh=20000000 + ko=450000000 + ) + n_posterior_tokens=(TrainLanguage: + en=180000000 + es=180000000 + de=180000000 + fr=180000000 + nl=180000000 + pt=180000000 + it=180000000 + ru=100000000 + zh=4000000 + ko=90000000 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=5 + save_interval=635 + eval_interval=635 + train_steps=635 + eval_iters=0 + + lr_scheduler=constant + warmup_steps=0 + lr=3e-4 + lr_min=3e-4 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=52620 + + cpu_workers=16 + wandb_run_id="NEW_EMBEDDINGS_ONLY_llama2_7B_10b_extend32_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198" + wikipedia=False + freeze_layers="not_embeddings" +} diff --git a/multilinguality_megatron/ducttape/10B_all_wikipedia.tconf b/multilinguality_megatron/ducttape/10B_all_wikipedia.tconf new file mode 100644 index 0000000000000000000000000000000000000000..fd6db70bf290d6a066374c8aace7aaba985144fa --- /dev/null +++ b/multilinguality_megatron/ducttape/10B_all_wikipedia.tconf @@ -0,0 +1,83 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/wikipedia_llama2_all_10B/ + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/wikipedia_llama2_all_10B/checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + # number such that final tokens for each language are around 1B + n_tokens=(TrainLanguage: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/en + es=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/es + de=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/de + fr=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/fr + nl=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/nl + pt=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/pt + it=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/it + ru=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ru + zh=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/zh + ko=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ko + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=10 + save_interval=127 + eval_interval=635 + train_steps=6358 + + lr_scheduler=cosine + warmup_steps=63 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=4 + gpu_ids=0,1,2,3 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=24 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="WIKIPEDIA_llama2_7B_10b_base_vocab_uniform" + wikipedia=True + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=False + eval_iters=0 +} diff --git a/multilinguality_megatron/ducttape/20B_all_cleaned_mc4.tconf b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4.tconf new file mode 100644 index 0000000000000000000000000000000000000000..09f55d06395382a628edf7b3917ccd09594683c1 --- /dev/null +++ b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4.tconf @@ -0,0 +1,113 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9 + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + datamix_weights=( + DataMix: + mc4_uniform=( + Dataset: + en=100 + es=100 + de=100 + fr=100 + nl=100 + pt=100 + it=100 + ru=100 + zh=100 + ko=100 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=False + eval_iters=1 + is_parallel=False + lp="" +} diff --git a/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel.tconf b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel.tconf new file mode 100644 index 0000000000000000000000000000000000000000..ee73bb975e65f4bd33d9762f364ed1f694e227e5 --- /dev/null +++ b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel.tconf @@ -0,0 +1,264 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_parallel_checkpoints + model_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852 + tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 +} diff --git a/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_13b.tconf b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_13b.tconf new file mode 100644 index 0000000000000000000000000000000000000000..377931394f7d6b6e82f600ee25e7bd24de34e25a --- /dev/null +++ b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_13b.tconf @@ -0,0 +1,264 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_13B_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_13B_all_20B/mc4_parallel_checkpoints + model_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/dc1d3b3bfdb69df26f8fc966c16353274b138c55 + tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-13b-hf/snapshots/dc1d3b3bfdb69df26f8fc966c16353274b138c55/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 +} diff --git a/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_concat.tconf b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_concat.tconf new file mode 100644 index 0000000000000000000000000000000000000000..96b7dc8b916a823df88abf5bbdc6e2a5888d1f3e --- /dev/null +++ b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_concat.tconf @@ -0,0 +1,264 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_parallel_concat_checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852 + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 +} diff --git a/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_instructions.tconf b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_instructions.tconf new file mode 100644 index 0000000000000000000000000000000000000000..cca3ac2e464fa51e2e158e52a786d4b68e84c152 --- /dev/null +++ b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_parallel_instructions.tconf @@ -0,0 +1,271 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B_w_instructions + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B_w_instructions/mc4_parallel_towerblocksv0.1_checkpoints + model_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852 + tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en tower_blocks) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + tower_blocks="Unbabel/TowerBlocks-v0.1" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + tower_blocks=True + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + tower_blocks=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + tower_blocks=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + tower_blocks=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + tower_blocks=False + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + tower_blocks="oi" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 +} diff --git a/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_wiki.tconf b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_wiki.tconf new file mode 100644 index 0000000000000000000000000000000000000000..9283bd18db896805b549eb2a931e5407442b9ec6 --- /dev/null +++ b/multilinguality_megatron/ducttape/20B_all_cleaned_mc4_wiki.tconf @@ -0,0 +1,176 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/mc4_wiki_checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9 + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_wiki de_wiki fr_wiki es_wiki it_wiki nl_wiki pt_wiki ru_wiki zh_wiki ko_wiki) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_wiki="" + es_wiki="" + de_wiki="" + fr_wiki="" + nl_wiki="" + pt_wiki="" + it_wiki="" + ru_wiki="" + zh_wiki="" + ko_wiki="" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_wiki=False + es_wiki=False + de_wiki=False + fr_wiki=False + nl_wiki=False + pt_wiki=False + it_wiki=False + ru_wiki=False + zh_wiki=False + ko_wiki=False + ) + + threshold=(Dataset: + en=516 en_wiki="" + es=275 es_wiki="" + de=611 de_wiki="" + fr=322 fr_wiki="" + nl=649 nl_wiki="" + pt=257 pt_wiki="" + it=332 it_wiki="" + ru=334 ru_wiki="" + zh=2041 zh_wiki="" + ko=198 ko_wiki="" + ) + + datamix_weights=( + DataMix: + mc4_wiki_uniform=( + Dataset: + en=67 + es=67 + de=67 + fr=67 + nl=67 + pt=67 + it=67 + ru=67 + zh=67 + ko=67 + en_wiki=33 + es_wiki=33 + de_wiki=33 + fr_wiki=33 + nl_wiki=33 + pt_wiki=33 + it_wiki=33 + ru_wiki=33 + zh_wiki=33 + ko_wiki=33 + ) + mc4_uniform=( + Dataset: + en=100 + es=100 + de=100 + fr=100 + nl=100 + pt=100 + it=100 + ru=100 + zh=100 + ko=100 + en_wiki=0 + es_wiki=0 + de_wiki=0 + fr_wiki=0 + nl_wiki=0 + pt_wiki=0 + it_wiki=0 + ru_wiki=0 + zh_wiki=0 + ko_wiki=0 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_wiki="" + es_wiki="" + de_wiki="" + fr_wiki="" + nl_wiki="" + pt_wiki="" + it_wiki="" + ru_wiki="" + zh_wiki="" + ko_wiki="" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=False + eval_iters=1 + is_parallel=False + lp="" +} diff --git a/multilinguality_megatron/ducttape/20B_all_cleaned_parallel.tconf b/multilinguality_megatron/ducttape/20B_all_cleaned_parallel.tconf new file mode 100644 index 0000000000000000000000000000000000000000..0a065a4f304f149fcf4fb1973ccb379d119f43c2 --- /dev/null +++ b/multilinguality_megatron/ducttape/20B_all_cleaned_parallel.tconf @@ -0,0 +1,194 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/parallel_checkpoints + model_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852 + tokenizer_path=/mnt/data/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model + + dataset=(Dataset: en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + + dataset_path=(Dataset: + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en_de=1 + de_en=1 + en_fr=1 + fr_en=1 + en_es=1 + es_en=1 + en_it=1 + it_en=1 + en_nl=1 + nl_en=1 + en_pt=1 + pt_en=1 + en_ru=1 + ru_en=1 + en_zh=1 + zh_en=1 + en_ko=1 + ko_en=1 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 +} diff --git a/multilinguality_megatron/ducttape/20B_all_dirty_mc4.tconf b/multilinguality_megatron/ducttape/20B_all_dirty_mc4.tconf new file mode 100644 index 0000000000000000000000000000000000000000..58910c9cdc7b3794ed67dc87a3b0a1d41232cd17 --- /dev/null +++ b/multilinguality_megatron/ducttape/20B_all_dirty_mc4.tconf @@ -0,0 +1,124 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_20B/dirty_mc4_checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852 + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/8cca527612d856d7d32bd94f8103728d614eb852/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/pre-training/tower_llm_data/en/data + es=/mnt/data_2/shared/pre-training/tower_llm_data/es/0/0000.json.gz + de=/mnt/data_2/shared/pre-training/tower_llm_data/de/0/0000.json.gz + fr=/mnt/data_2/shared/pre-training/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/pre-training/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/pre-training/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/pre-training/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/pre-training/tower_llm_data/ru/0/0000.json.gz + zh=/mnt/data_2/shared/pre-training/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/pre-training/tower_llm_data/ko/0000.json.gz + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + ) + + threshold=(Dataset: + en=10000000 + es=10000000 + de=10000000 + fr=10000000 + nl=10000000 + pt=10000000 + it=10000000 + ru=10000000 + zh=10000000 + ko=10000000 + ) + + datamix_weights=( + DataMix: + mc4_uniform=( + Dataset: + en=100 + es=100 + de=100 + fr=100 + nl=100 + pt=100 + it=100 + ru=100 + zh=100 + ko=100 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=1388888800 + ko=250000000 + ) + + min_perplexity=0 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_20b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_wiki_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + is_parallel=False + lp=(Dataset: + en="en" + es="es" + de="de" + fr="fr" + nl="nl" + pt="pt" + it="it" + ru="ru" + zh="zh" + ko="ko" + ) +} diff --git a/multilinguality_megatron/ducttape/40B_all_cleaned_mc4_parallel.tconf b/multilinguality_megatron/ducttape/40B_all_cleaned_mc4_parallel.tconf new file mode 100644 index 0000000000000000000000000000000000000000..8dd7bf758d592bf2d689d2770ae1f2ace46da172 --- /dev/null +++ b/multilinguality_megatron/ducttape/40B_all_cleaned_mc4_parallel.tconf @@ -0,0 +1,305 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_40B + repo=/mnt/data/pmartins/code/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_40B/mc4_parallel_checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9 + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko pl sv en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_pl pl_en en_sv sv_en) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/0/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/0/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/0/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + pl=/mnt/data_2/shared/tower_llm_data/pl/0000.json.gz + sv=/mnt/data_2/shared/tower_llm_data/sv/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pl="/mnt/data_2/shared/tower_llm_data/bilingual_data/en-pl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pl_en="/mnt/data_2/shared/tower_llm_data/bilingual_data/en-pl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75/" + en_sv="/mnt/data_2/shared/tower_llm_data/bilingual_data/en-sv/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75/" + sv_en="/mnt/data_2/shared/tower_llm_data/bilingual_data/en-sv/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75/" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + pl=False + sv=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_pl=False + pl_en=False + en_sv=False + sv_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + pl=261 + sv=699 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_pl=100000 + pl_en=100000 + en_sv=100000 + sv_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + pl=0 + sv=0 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + en_pl=0 + pl_en=0 + en_sv=0 + sv_en=0 + ) + ) + + n_tokens=(Dataset: + en=4000000000 + es=4000000000 + de=4000000000 + fr=4000000000 + nl=4000000000 + pt=4000000000 + it=4000000000 + ru=4000000000 + zh=10000000000 + ko=4000000000 + pl=4000000000 + sv=4000000000 + en_de=200000000 + de_en=200000000 + en_fr=200000000 + fr_en=200000000 + en_es=200000000 + es_en=200000000 + en_it=200000000 + it_en=200000000 + en_nl=200000000 + nl_en=200000000 + en_pt=200000000 + pt_en=200000000 + en_ru=200000000 + ru_en=200000000 + en_zh=200000000 + zh_en=200000000 + en_ko=200000000 + ko_en=200000000 + en_pl=200000000 + pl_en=200000000 + en_sv=200000000 + sv_en=200000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + pl=False + sv=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_pl=True + pl_en=True + en_sv=True + sv_en=True + ) + + lp=(Dataset: + en="en" + es="es" + de="de" + fr="fr" + nl="nl" + pt="pt" + it="it" + ru="ru" + zh="zh" + ko="ko" + pl="pl" + sv="sv" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_pl="en-pl" + pl_en="pl-en" + en_sv="en-sv" + sv_en="sv-en" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="llama2_7B_40b_base_vocab_uniform_cleaned_ppl_thresh_516_275_611_322_649_257_332_334_2041_198_and_parallel_33" + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 +} diff --git a/multilinguality_megatron/ducttape/continue_pretraining.tconf b/multilinguality_megatron/ducttape/continue_pretraining.tconf new file mode 100644 index 0000000000000000000000000000000000000000..d163233bc7893766f0e86809f96e4fe933212131 --- /dev/null +++ b/multilinguality_megatron/ducttape/continue_pretraining.tconf @@ -0,0 +1,77 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_test + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_test/checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + # less for zh (inefficient tokenizer) + n_tokens=(TrainLanguage: + en=250000000 + es=83333333 + de=83333333 + fr=83333333 + nl=83333333 + pt=83333333 + it=83333333 + ru=83333333 + zh=8333333 + ko=83333333 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=10 + save_interval=318 + eval_interval=158 + train_steps=1272 + + lr_scheduler=cosine + warmup_steps=13 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=6 + + cpu_workers=16 + +} diff --git a/multilinguality_megatron/ducttape/data_test.tconf b/multilinguality_megatron/ducttape/data_test.tconf new file mode 100644 index 0000000000000000000000000000000000000000..a6caf4fec33d8c232691fdf2ed0f0d52e0636269 --- /dev/null +++ b/multilinguality_megatron/ducttape/data_test.tconf @@ -0,0 +1,79 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/wikipedia_llama2_all_10B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B/checkpoints + model_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + # number such that final tokens for each language are around 1B + n_tokens=(TrainLanguage: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/en + es=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/es + de=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/de + fr=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/fr + nl=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/nl + pt=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/pt + it=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/it + ru=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ru + zh=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/zh + ko=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ko + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=10 + save_interval=635 + eval_interval=635 + train_steps=6358 + + lr_scheduler=cosine + warmup_steps=63 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wandb_run_id="wikipedia" + wikipedia=True +} diff --git a/multilinguality_megatron/ducttape/data_test_extend32.tconf b/multilinguality_megatron/ducttape/data_test_extend32.tconf new file mode 100644 index 0000000000000000000000000000000000000000..5f4c9146003d557c89edb0c08bae6f46d6077186 --- /dev/null +++ b/multilinguality_megatron/ducttape/data_test_extend32.tconf @@ -0,0 +1,79 @@ +global { + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/wikipedia_llama2_all_10B_extend32 + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama2_all_10B_extend32/checkpoints + model_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit + tokenizer_path=/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit/tokenizer.model + + train_language=(TrainLanguage: en de fr es it nl pt ru zh ko) + + threshold=(TrainLanguage: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + ) + + # number such that final tokens for each language are around 1B + n_tokens=(TrainLanguage: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + ) + + dataset_path=(TrainLanguage: + en=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/en + es=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/es + de=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/de + fr=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/fr + nl=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/nl + pt=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/pt + it=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/it + ru=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ru + zh=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/zh + ko=/mnt/data/shared/multilingual_llm/tower_llm_wikipedia/ko + ) + + mix="10 10 10 10 10 10 10 10 10 10" + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=10 + save_interval=635 + eval_interval=635 + train_steps=6358 + + lr_scheduler=cosine + warmup_steps=63 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=52672 + + cpu_workers=16 + wandb_run_id="wikipedia_extend32" + wikipedia=True +} diff --git a/multilinguality_megatron/ducttape/gemma_2B_20B_all_cleaned_mc4_parallel.tconf b/multilinguality_megatron/ducttape/gemma_2B_20B_all_cleaned_mc4_parallel.tconf new file mode 100644 index 0000000000000000000000000000000000000000..605bebbe21c15965dd04e694ea5f77b42a82652e --- /dev/null +++ b/multilinguality_megatron/ducttape/gemma_2B_20B_all_cleaned_mc4_parallel.tconf @@ -0,0 +1,280 @@ +global { + model_type="gemma" + ducttape_output=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B/test + external_model_dir_annealing=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B/test + model_path=/mnt/data_2/cache/models--google--gemma-2b/snapshots/9d067f00def958594aaa16b39a65b07d69ca655b/ + tokenizer_path=/mnt/data_2/cache/models--google--gemma-2b/snapshots/9d067f00def958594aaa16b39a65b07d69ca655b + + tokenizer_type=PretrainedFromHF + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + datamix_weights_annealing="" + + dataset_path=(Dataset: + en=/mnt/data_2/shared/pre-training/tower_llm_data/en/data + es=/mnt/data_2/shared/pre-training/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/pre-training/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/pre-training/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/pre-training/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/pre-training/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/pre-training/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/pre-training/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/pre-training/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/pre-training/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="none" + es="none" + de="none" + fr="none" + nl="none" + pt="none" + it="none" + ru="none" + zh="none" + ko="none" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=0 + + size=(Size: 2 7) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + train_steps_annealing=0 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=2 + grad_accum_steps=48 + vocab_size=256000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + glu_activation=geglu + kv_channels=256 + layernorm_epsilon=1e-6 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/gemma_2b_flavio.tconf b/multilinguality_megatron/ducttape/gemma_2b_flavio.tconf new file mode 100644 index 0000000000000000000000000000000000000000..597eed35fd9c39f2ff00c81a440b3d8750cc4d63 --- /dev/null +++ b/multilinguality_megatron/ducttape/gemma_2b_flavio.tconf @@ -0,0 +1,546 @@ +global { + model_type="gemma" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_gemma_2_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_gemma_2_20B/flavio_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_gemma_2_20B/flavio_checkpoints_annealed + model_path=/mnt/data_2/cache/models--google--gemma-2b/snapshots/9d067f00def958594aaa16b39a65b07d69ca655b/ + tokenizer_path=/mnt/data_2/cache/models--google--gemma-2b/snapshots/9d067f00def958594aaa16b39a65b07d69ca655b + + tokenizer_type=PretrainedFromHF + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions) + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=0 + de_en=0 + en_fr=0 + fr_en=0 + en_es=0 + es_en=0 + en_it=0 + it_en=0 + en_nl=0 + nl_en=0 + en_pt=0 + pt_en=0 + en_ru=0 + ru_en=0 + en_zh=0 + zh_en=0 + en_ko=0 + ko_en=0 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + instructions=0 + en_de_pre_annealing=183 + de_en_pre_annealing=183 + en_fr_pre_annealing=183 + fr_en_pre_annealing=183 + en_es_pre_annealing=183 + es_en_pre_annealing=183 + en_it_pre_annealing=183 + it_en_pre_annealing=183 + en_nl_pre_annealing=183 + nl_en_pre_annealing=183 + en_pt_pre_annealing=183 + pt_en_pre_annealing=183 + en_ru_pre_annealing=183 + ru_en_pre_annealing=183 + en_zh_pre_annealing=183 + zh_en_pre_annealing=183 + en_ko_pre_annealing=183 + ko_en_pre_annealing=183 + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=833 + de_en=833 + en_fr=833 + fr_en=833 + en_es=833 + es_en=833 + en_it=833 + it_en=833 + en_nl=833 + nl_en=833 + en_pt=833 + pt_en=833 + en_ru=833 + ru_en=833 + en_zh=833 + zh_en=833 + en_ko=833 + ko_en=833 + en_synth=0 + es_synth=0 + de_synth=0 + fr_synth=0 + nl_synth=0 + pt_synth=0 + it_synth=0 + ru_synth=0 + zh_synth=0 + ko_synth=0 + instructions=85000 + en_de_pre_annealing=0 + de_en_pre_annealing=0 + en_fr_pre_annealing=0 + fr_en_pre_annealing=0 + en_es_pre_annealing=0 + es_en_pre_annealing=0 + en_it_pre_annealing=0 + it_en_pre_annealing=0 + en_nl_pre_annealing=0 + nl_en_pre_annealing=0 + en_pt_pre_annealing=0 + pt_en_pre_annealing=0 + en_ru_pre_annealing=0 + ru_en_pre_annealing=0 + en_zh_pre_annealing=0 + zh_en_pre_annealing=0 + en_ko_pre_annealing=0 + ko_en_pre_annealing=0 + ) + ) + + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20000000 + es_synth=20000000 + de_synth=20000000 + fr_synth=20000000 + nl_synth=20000000 + pt_synth=20000000 + it_synth=20000000 + ru_synth=20000000 + zh_synth=20000000 + ko_synth=20000000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth="" + es_synth="" + de_synth="" + fr_synth="" + nl_synth="" + pt_synth="" + it_synth="" + ru_synth="" + zh_synth="" + ko_synth="" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + min_perplexity=0 + + size=(Size: 2) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=11430 + train_steps_annealing=1270 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=24 + grad_accum_steps=4 + vocab_size=256000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + glu_activation=geglu + kv_channels=256 + layernorm_epsilon=1e-6 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/gemma_7B_20B_all_cleaned_mc4_parallel.tconf b/multilinguality_megatron/ducttape/gemma_7B_20B_all_cleaned_mc4_parallel.tconf new file mode 100644 index 0000000000000000000000000000000000000000..1d8a99ee0daae3d4c662e39520d4c89471cac659 --- /dev/null +++ b/multilinguality_megatron/ducttape/gemma_7B_20B_all_cleaned_mc4_parallel.tconf @@ -0,0 +1,280 @@ +global { + model_type="gemma" + ducttape_output=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B/old_recipe_checkpoints + external_model_dir_annealing=/mnt/data_2/shared/experiments_megatron/continue_pretraining_gemma_7B/old_recipe_checkpoints + model_path=/mnt/data_2/cache/models--google--gemma-7b/snapshots/bc0790ce8e02c6b2240e2b94bf01fb0453dc90f6 + tokenizer_path=/mnt/data_2/cache/models--google--gemma-7b/snapshots/bc0790ce8e02c6b2240e2b94bf01fb0453dc90f6 + + tokenizer_type=PretrainedFromHF + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + datamix_weights_annealing="" + + dataset_path=(Dataset: + en=/mnt/data_2/shared/pre-training/tower_llm_data/en/data + es=/mnt/data_2/shared/pre-training/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/pre-training/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/pre-training/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/pre-training/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/pre-training/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/pre-training/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/pre-training/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/pre-training/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/pre-training/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="none" + es="none" + de="none" + fr="none" + nl="none" + pt="none" + it="none" + ru="none" + zh="none" + ko="none" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=0 + + size=(Size: 2 7) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + train_steps_annealing=0 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=2 + grad_accum_steps=24 + vocab_size=256000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + glu_activation=geglu + kv_channels=256 + layernorm_epsilon=1e-6 + + seq_length=4096 +} diff --git a/multilinguality_megatron/ducttape/llama_3_flavio.tconf b/multilinguality_megatron/ducttape/llama_3_flavio.tconf new file mode 100644 index 0000000000000000000000000000000000000000..50a875bcaf85ff531409af3d6f7108108572a90d --- /dev/null +++ b/multilinguality_megatron/ducttape/llama_3_flavio.tconf @@ -0,0 +1,546 @@ +global { + model_type="llama3" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio + repo=/mnt/data/pmartins/code/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio/pre_annealing_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio/checkpoints_annealed + model_path=/mnt/data_2/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/cd892e8f4da1043d4b01d5ea182a2e8412bf658f/ + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/cd892e8f4da1043d4b01d5ea182a2e8412bf658f/ + + tokenizer_type=PretrainedFromHF + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions) + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=0 + de_en=0 + en_fr=0 + fr_en=0 + en_es=0 + es_en=0 + en_it=0 + it_en=0 + en_nl=0 + nl_en=0 + en_pt=0 + pt_en=0 + en_ru=0 + ru_en=0 + en_zh=0 + zh_en=0 + en_ko=0 + ko_en=0 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + instructions=0 + en_de_pre_annealing=183 + de_en_pre_annealing=183 + en_fr_pre_annealing=183 + fr_en_pre_annealing=183 + en_es_pre_annealing=183 + es_en_pre_annealing=183 + en_it_pre_annealing=183 + it_en_pre_annealing=183 + en_nl_pre_annealing=183 + nl_en_pre_annealing=183 + en_pt_pre_annealing=183 + pt_en_pre_annealing=183 + en_ru_pre_annealing=183 + ru_en_pre_annealing=183 + en_zh_pre_annealing=183 + zh_en_pre_annealing=183 + en_ko_pre_annealing=183 + ko_en_pre_annealing=183 + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=833 + de_en=833 + en_fr=833 + fr_en=833 + en_es=833 + es_en=833 + en_it=833 + it_en=833 + en_nl=833 + nl_en=833 + en_pt=833 + pt_en=833 + en_ru=833 + ru_en=833 + en_zh=833 + zh_en=833 + en_ko=833 + ko_en=833 + en_synth=0 + es_synth=0 + de_synth=0 + fr_synth=0 + nl_synth=0 + pt_synth=0 + it_synth=0 + ru_synth=0 + zh_synth=0 + ko_synth=0 + instructions=85000 + en_de_pre_annealing=0 + de_en_pre_annealing=0 + en_fr_pre_annealing=0 + fr_en_pre_annealing=0 + en_es_pre_annealing=0 + es_en_pre_annealing=0 + en_it_pre_annealing=0 + it_en_pre_annealing=0 + en_nl_pre_annealing=0 + nl_en_pre_annealing=0 + en_pt_pre_annealing=0 + pt_en_pre_annealing=0 + en_ru_pre_annealing=0 + ru_en_pre_annealing=0 + en_zh_pre_annealing=0 + zh_en_pre_annealing=0 + en_ko_pre_annealing=0 + ko_en_pre_annealing=0 + ) + ) + + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20000000 + es_synth=20000000 + de_synth=20000000 + fr_synth=20000000 + nl_synth=20000000 + pt_synth=20000000 + it_synth=20000000 + ru_synth=20000000 + zh_synth=20000000 + ko_synth=20000000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth="" + es_synth="" + de_synth="" + fr_synth="" + nl_synth="" + pt_synth="" + it_synth="" + ru_synth="" + zh_synth="" + ko_synth="" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + min_perplexity=50 + + size=(Size: 8) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=11430 + train_steps_annealing=1270 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=128256 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=4096 + + glu_activation=swiglu + kv_channels="" + layernorm_epsilon=1e-5 +} diff --git a/multilinguality_megatron/ducttape/llama_3_flavio_wmt_annealing.tconf b/multilinguality_megatron/ducttape/llama_3_flavio_wmt_annealing.tconf new file mode 100644 index 0000000000000000000000000000000000000000..10f68dd56905cfc77559b9afca36b7372c6c0118 --- /dev/null +++ b/multilinguality_megatron/ducttape/llama_3_flavio_wmt_annealing.tconf @@ -0,0 +1,570 @@ +global { + model_type="llama3" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio + repo=/mnt/data/pmartins/code/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio/pre_annealing_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_llama3_flavio/checkpoints_annealed_wmt + model_path=/mnt/data_2/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/cd892e8f4da1043d4b01d5ea182a2e8412bf658f/ + tokenizer_path=/mnt/data_2/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/cd892e8f4da1043d4b01d5ea182a2e8412bf658f/ + + tokenizer_type=PretrainedFromHF + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions en_de_wmt en_ru_wmt en_zh_wmt) + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + en_de_wmt="oi" + en_ru_wmt="oi" + en_zh_wmt="oi" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + en_de_wmt="oi" + en_ru_wmt="oi" + en_zh_wmt="oi" + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + en_de_wmt="oi" + en_ru_wmt="oi" + en_zh_wmt="oi" + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=0 + de_en=0 + en_fr=0 + fr_en=0 + en_es=0 + es_en=0 + en_it=0 + it_en=0 + en_nl=0 + nl_en=0 + en_pt=0 + pt_en=0 + en_ru=0 + ru_en=0 + en_zh=0 + zh_en=0 + en_ko=0 + ko_en=0 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + instructions=0 + en_de_pre_annealing=183 + de_en_pre_annealing=183 + en_fr_pre_annealing=183 + fr_en_pre_annealing=183 + en_es_pre_annealing=183 + es_en_pre_annealing=183 + en_it_pre_annealing=183 + it_en_pre_annealing=183 + en_nl_pre_annealing=183 + nl_en_pre_annealing=183 + en_pt_pre_annealing=183 + pt_en_pre_annealing=183 + en_ru_pre_annealing=183 + ru_en_pre_annealing=183 + en_zh_pre_annealing=183 + zh_en_pre_annealing=183 + en_ko_pre_annealing=183 + ko_en_pre_annealing=183 + en_de_wmt=0 + en_ru_wmt=0 + en_zh_wmt=0 + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de_wmt=833 + en_ru_wmt=833 + en_zh_wmt=833 + en_de=0 + de_en=833 + en_fr=833 + fr_en=833 + en_es=833 + es_en=833 + en_it=833 + it_en=833 + en_nl=833 + nl_en=833 + en_pt=833 + pt_en=833 + en_ru=0 + ru_en=833 + en_zh=0 + zh_en=833 + en_ko=833 + ko_en=833 + en_synth=0 + es_synth=0 + de_synth=0 + fr_synth=0 + nl_synth=0 + pt_synth=0 + it_synth=0 + ru_synth=0 + zh_synth=0 + ko_synth=0 + instructions=85000 + en_de_pre_annealing=0 + de_en_pre_annealing=0 + en_fr_pre_annealing=0 + fr_en_pre_annealing=0 + en_es_pre_annealing=0 + es_en_pre_annealing=0 + en_it_pre_annealing=0 + it_en_pre_annealing=0 + en_nl_pre_annealing=0 + nl_en_pre_annealing=0 + en_pt_pre_annealing=0 + pt_en_pre_annealing=0 + en_ru_pre_annealing=0 + ru_en_pre_annealing=0 + en_zh_pre_annealing=0 + zh_en_pre_annealing=0 + en_ko_pre_annealing=0 + ko_en_pre_annealing=0 + ) + ) + + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de_wmt="oi" + en_ru_wmt="oi" + en_zh_wmt="oi" + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20000000 + es_synth=20000000 + de_synth=20000000 + fr_synth=20000000 + nl_synth=20000000 + pt_synth=20000000 + it_synth=20000000 + ru_synth=20000000 + zh_synth=20000000 + ko_synth=20000000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de_wmt="oi" + en_ru_wmt="oi" + en_zh_wmt="oi" + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de_wmt="oi" + en_ru_wmt="oi" + en_zh_wmt="oi" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth="" + es_synth="" + de_synth="" + fr_synth="" + nl_synth="" + pt_synth="" + it_synth="" + ru_synth="" + zh_synth="" + ko_synth="" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + min_perplexity=50 + + size=(Size: 8) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=11430 + train_steps_annealing=1270 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=128256 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=4096 + + glu_activation=swiglu + kv_channels="" + layernorm_epsilon=1e-5 +} diff --git a/multilinguality_megatron/ducttape/llama_3_tower_mix.tconf b/multilinguality_megatron/ducttape/llama_3_tower_mix.tconf new file mode 100644 index 0000000000000000000000000000000000000000..a1d7db9e609d9455ccc8a10e120aa2282e6fae38 --- /dev/null +++ b/multilinguality_megatron/ducttape/llama_3_tower_mix.tconf @@ -0,0 +1,314 @@ +global { + model_type="llama3" + ducttape_output=/mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3 + repo=/mnt/cephfs-nvme/jpombal/multilinguality_megatron + + external_model_dir=/mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/tower_mix_checkpoints + external_model_dir_annealing=/mnt/cephfs-nvme/shared/experiments_megatron/cpt_llama_3/tower_mix_checkpoints_annealed + model_path=/mnt/cephfs-nvme/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/1460c22666392e470910ce3d44ffeb2ab7dbd4df/ + tokenizer_path=/mnt/cephfs-nvme/cache/models--meta-llama--Meta-Llama-3-8B/snapshots/1460c22666392e470910ce3d44ffeb2ab7dbd4df/ + + tokenizer_type=PretrainedFromHF + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=833 + de_en=833 + en_fr=833 + fr_en=833 + en_es=833 + es_en=833 + en_it=833 + it_en=833 + en_nl=833 + nl_en=833 + en_pt=833 + pt_en=833 + en_ru=833 + ru_en=833 + en_zh=833 + zh_en=833 + en_ko=833 + ko_en=833 + ) + ) + + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=50 + + size=(Size: 8) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + train_steps_annealing=0 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=128256 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=4096 + + glu_activation=swiglu + kv_channels="" + layernorm_epsilon=1e-5 +} diff --git a/multilinguality_megatron/ducttape/main.tape b/multilinguality_megatron/ducttape/main.tape new file mode 100644 index 0000000000000000000000000000000000000000..69d2c88dfd5699f01e856a5001511c17a7c35f26 --- /dev/null +++ b/multilinguality_megatron/ducttape/main.tape @@ -0,0 +1,378 @@ +task DumpHFDataset + > dataset_json=dataset.json + :: repo=@ + :: dataset_path=@ + :: n_tokens=@ + :: threshold=@ + :: min_perplexity=@ + :: is_hf_dataset=@ + :: wikipedia=@ + :: posterior_tokens=@ + :: n_posterior_tokens=@ + :: is_parallel=@ + :: lp=@ +{ + python $repo/prepare_data.py \ + --output $dataset_json \ + --n_tokens $n_tokens \ + --dataset_path $dataset_path \ + --threshold $threshold \ + --min_perplexity $min_perplexity \ + --is_hf_dataset $is_hf_dataset \ + --wikipedia $wikipedia \ + --posterior_tokens $posterior_tokens \ + --n_posterior_tokens $n_posterior_tokens \ + --is_parallel $is_parallel \ + --lp $lp +} + +task PreprocessDataset + < dataset_json=$dataset_json@DumpHFDataset + > dataset_bin=data_bin + :: dataset=@ + :: repo=@ + :: tokenizer_path=@ + :: tokenizer_type=@ + :: cpu_workers=@ +{ +set -euo pipefail +mkdir -p $dataset_bin + python $repo/tools/preprocess_data.py \ + --input=$dataset_json \ + --output_prefix=$dataset_bin/data \ + --tokenizer_type=$tokenizer_type \ + --vocab_file=$tokenizer_path \ + --chunk_size=32 \ + --workers=16 \ + --no_new_tokens \ + --append_eod +} + +task Convert2Megatron + > megatron_model + :: repo=@ + :: size=@ + :: model_path=@ + :: model_type=@ +{ + python $repo/weights_conversion/hf_to_megatron.py $model_type \ + --size=$size \ + --out=$megatron_model \ + --cache-dir=$model_path \ + --model-path=$model_path +} + +task ModelSharding + < megatron_model=$megatron_model@Convert2Megatron + > sharded_model + :: repo=@ + :: tp=@ + :: pp=@ + :: vocab_size=@ + :: model_type=@ + :: kv_channels=@ +{ + KV_CHANNELS_ARGS="" + if [ "$kv_channels" != "" ]; then + KV_CHANNELS_ARGS="--kv_channels $kv_channels" + fi + + python $repo/tools/checkpoint_util.py \ + --target_tensor_parallel_size $tp \ + --target_pipeline_parallel_size $pp \ + --load_dir $megatron_model \ + --save_dir $sharded_model \ + --model_type $model_type \ + --true_vocab_size $vocab_size \ + --bf16 \ + $KV_CHANNELS_ARGS +} + +task MakeDataMix + < dataset_bin=@PreprocessDataset + > datamix_file + :: datamix_weights=@ +{ + # simply write datamix weight and path in dataset_bin to a file, separated by a space + echo "$datamix_weights $dataset_bin/data_text_document" > $datamix_file +} + +task MakeDataMixAnnealing + < dataset_bin=@PreprocessDataset + > datamix_file_annealing + :: datamix_weights_annealing=@ +{ + # simply write datamix weight and path in dataset_bin to a file, separated by a space + echo "$datamix_weights_annealing $dataset_bin/data_text_document" > $datamix_file_annealing +} + +task ContinuePretraining + < megatron_model=$sharded_model@ModelSharding + < dataset_bin=$dataset_bin@PreprocessDataset[Dataset:*] + < datamix_file=$datamix_file@MakeDataMix[Dataset:*] + > model_dir=checkpoints + :: repo=@ + :: log_interval=@ + :: save_interval=@ + :: eval_interval=@ + :: train_steps=@ + :: lr_scheduler=@ + :: warmup_steps=@ + :: lr=@ + :: lr_min=@ + :: n_gpus=@ + :: gpu_ids=@ + :: tp=@ + :: pp=@ + :: external_model_dir=@ + :: tokenizer_path=@ + :: micro_batch_size=@ + :: grad_accum_steps=@ + :: weight_decay=@ + :: freeze_layers=@ + :: eval_iters=@ + :: model_type=@ + :: seq_length=@ + :: glu_activation=@ + :: kv_channels=@ + :: layernorm_epsilon=@ + :: tokenizer_type=@ +{ + #external_model_dir="${external_model_dir}_${lr}" + if [ "$external_model_dir" != "" ]; then + mkdir -p $external_model_dir + mkdir -p $external_model_dir/runs + ln -s $external_model_dir $model_dir + fi + + data_path="" + for f in $datamix_file; do + # read file + data_path="$data_path `cat $f`" + done + echo "Running with data_path=$data_path" + + FREEZE_ARGS="" + if [ "$freeze_layers" == "not_embeddings" ]; then + FREEZE_ARGS="--freeze_layers" + fi + echo $FREEZE_ARGS + + export CUDA_VISIBLE_DEVICES=$gpu_ids + + # if load_from_checkpoint, then set megatron_model to external_model_dir + ckpt_flag=$external_model_dir/latest_checkpointed_iteration.txt + if [ -f $ckpt_flag ]; then + megatron_model=$external_model_dir + echo Loading from previously saved checkpoint. + fi + + KV_CHANNELS_ARGS="" + if [ "$kv_channels" != "" ]; then + KV_CHANNELS_ARGS="--kv_channels $kv_channels" + fi + + TIE_ARGS="" + if [ $model_type != 'gemma' ]; then + TIE_ARGS+="--no_tie_embed_logits" + fi + + + global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps)) + + LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval --eval_iters $eval_iters --log_validation_ppl_to_tensorboard --log_memory_to_tensorboard --log_batch_size_to_tensorboard" + TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min --weight_decay $weight_decay" + DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8134" + COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion" + LLAMA_ARGS="--use_rms_norm --glu_activation $glu_activation --no_new_tokens --layernorm_epsilon $layernorm_epsilon" + CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \ + --tensor_model_parallel_size $tp \ + --pipeline_model_parallel_size $pp \ + --load $megatron_model \ + --save $model_dir \ + --tensorboard_dir $external_model_dir/runs \ + --data_path $data_path \ + --model_name $model_type \ + --tokenizer_type $tokenizer_type \ + --vocab_file=$tokenizer_path \ + --bf16 \ + --use_flash_attn \ + --micro_batch_size $micro_batch_size \ + --global_batch_size $global_batch_size \ + --sequence_parallel \ + --recompute_granularity selective \ + --use_checkpoint_args \ + --seq_length $seq_length \ + --split 9995,3,2 \ + --sliding_window_size 4096 \ + --reset_attention_mask \ + --no_bias_gelu_fusion \ + $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS $FREEZE_ARGS $KV_CHANNELS_ARGS $TIE_ARGS +} + + +task Annealing + < megatron_model=$model_dir@ContinuePretraining + < dataset_bin=$dataset_bin@PreprocessDataset[Dataset:*] + < datamix_file=$datamix_file_annealing@MakeDataMixAnnealing[Dataset:*] + > model_dir=checkpoints + :: repo=@ + :: log_interval=@ + :: save_interval=@ + :: eval_interval=@ + :: train_steps_annealing=@ + :: lr_scheduler_annealing=@ + :: lr_annealing=@ + :: lr_min_annealing=@ + :: warmup_steps_annealing=@ + :: n_gpus=@ + :: gpu_ids=@ + :: tp=@ + :: pp=@ + :: external_model_dir=@ + :: external_model_dir_annealing=@ + :: tokenizer_path=@ + :: micro_batch_size=@ + :: grad_accum_steps=@ + :: weight_decay=@ + :: freeze_layers=@ + :: eval_iters=@ + :: model_type=@ + :: seq_length=@ + :: glu_activation=@ + :: kv_channels=@ + :: layernorm_epsilon=@ + :: tokenizer_type=@ +{ + #external_model_dir="${external_model_dir}_${lr_annealing}" + #external_model_dir_annealing="${external_model_dir_annealing}_${lr_annealing}" + if [ "$external_model_dir" != "" ]; then + mkdir -p $external_model_dir_annealing + mkdir -p $external_model_dir/runs/annealing + ln -s $external_model_dir_annealing $model_dir + fi + + data_path="" + for f in $datamix_file; do + # read file + data_path="$data_path `cat $f`" + done + echo "Running with data_path=$data_path" + + FREEZE_ARGS="" + if [ "$freeze_layers" == "not_embeddings" ]; then + FREEZE_ARGS="--freeze_layers" + fi + echo $FREEZE_ARGS + + KV_CHANNELS_ARGS="" + if [ "$kv_channels" != "" ]; then + KV_CHANNELS_ARGS="--kv_channels $kv_channels" + fi + + TIE_ARGS="" + if [ $model_type != 'gemma' ]; then + TIE_ARGS+="--no_tie_embed_logits" + fi + + export CUDA_VISIBLE_DEVICES=$gpu_ids + + + global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps)) + + LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval --eval_iters $eval_iters --log_validation_ppl_to_tensorboard --log_memory_to_tensorboard --log_batch_size_to_tensorboard" + TRAIN_ARGS="--train_iters $train_steps_annealing --lr_decay_style $lr_scheduler_annealing --lr_warmup_iters $warmup_steps_annealing --lr $lr_annealing --min_lr $lr_min_annealing --weight_decay $weight_decay" + DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8135" + COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion" + LLAMA_ARGS="--use_rms_norm --glu_activation $glu_activation --no_new_tokens --layernorm_epsilon $layernorm_epsilon" + CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \ + --tensor_model_parallel_size $tp \ + --pipeline_model_parallel_size $pp \ + --load $megatron_model \ + --save $model_dir \ + --tensorboard_dir $external_model_dir/runs/annealing \ + --data_path $data_path \ + --model_name $model_type \ + --tokenizer_type $tokenizer_type \ + --vocab_file=$tokenizer_path \ + --bf16 \ + --use_flash_attn \ + --micro_batch_size $micro_batch_size \ + --global_batch_size $global_batch_size \ + --sequence_parallel \ + --recompute_granularity selective \ + --use_checkpoint_args \ + --seq_length $seq_length \ + --split 9990,5,5 \ + --sliding_window_size 4096 \ + --annealing \ + $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS $FREEZE_ARGS $KV_CHANNELS_ARGS $TIE_ARGS +} + +plan preprocess_mc4 { + reach PreprocessDataset via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko) * (DataMix: mc4_uniform) +} + +plan preprocess_inst { + reach PreprocessDataset via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko pl sv en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_de_wmt en_ru_wmt en_zh_wmt) +} + +plan preprocess_data { + reach PreprocessDataset via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko pl sv en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_pl pl_en en_sv sv_en instructions) * (DataMix: mc4_wiki_uniform) +} + +plan train_mc4_wiki { + reach ContinuePretraining via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_wiki de_wiki fr_wiki es_wiki it_wiki nl_wiki pt_wiki ru_wiki zh_wiki ko_wiki) * (DataMix: mc4_wiki_uniform) +} + +plan train_mc4 { + reach ContinuePretraining via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko) +} + +plan prepare_data { + reach DumpHFDataset via (Size: 1) * (TP: 1) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko) +} + +plan preprocess_data_parallel { + reach PreprocessDataset via (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_ko ko_en en_zh zh_en) +} + +plan train_mc4_parallel_instructions { + reach ContinuePretraining via (Size: 1) * (TP: 1) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions) +} + +plan train_mc4_parallel_instructions_annealing { + reach Annealing via (Size: 1) * (TP: 1) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions) +} + +plan train_mc4_parallel { + reach ContinuePretraining via (Size: 8) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) +} + +plan train_mc4_parallel_13B { + reach ContinuePretraining via (Size: 13) * (TP: 8) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) +} + +plan warmed_up_train { + reach ContinuePretrainingWarmedUp via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_wiki de_wiki fr_wiki es_wiki it_wiki nl_wiki pt_wiki ru_wiki zh_wiki ko_wiki) * (DataMix: mc4_wiki_uniform) +} + +plan train_parallel { + reach ContinuePretraining via (Size: 8) * (TP: 4) * (PP: 1) * (Dataset: en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) +} + +plan gemma_test { + reach ContinuePretraining via (Size: 1) * (TP: 2) * (PP: 1) * (Dataset: en es) +} + +plan train_mc4_parallel_gemma { + reach ContinuePretraining via (Size: 2) * (TP: 1) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) +} + +plan annealing { + reach Annealing via (Size: 1) * (TP: 4) * (PP: 1) * (Dataset: *) +} + +plan cpt { + reach ContinuePretraining via (Size: 7) * (TP: 4) * (PP: 1) * (Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) +} \ No newline at end of file diff --git a/multilinguality_megatron/ducttape/mistral_20B_all_cleaned_mc4_parallel.tconf b/multilinguality_megatron/ducttape/mistral_20B_all_cleaned_mc4_parallel.tconf new file mode 100644 index 0000000000000000000000000000000000000000..26414b83c61aeb9c38a914e3a51eecdb681ea126 --- /dev/null +++ b/multilinguality_megatron/ducttape/mistral_20B_all_cleaned_mc4_parallel.tconf @@ -0,0 +1,264 @@ +global { + model_type="mistral" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_mistral_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_mistral_all_20B/mc4_parallel_checkpoints + model_path=/mnt/data/cache/models--mistralai--Mistral-7B-v0.1/snapshots/26bca36bde8333b5d7f72e9ed20ccda6a618af24 + tokenizer_path=/mnt/data/cache/models--mistralai--Mistral-7B-v0.1/snapshots/26bca36bde8333b5d7f72e9ed20ccda6a618af24/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 +} diff --git a/multilinguality_megatron/ducttape/mistral_20B_all_cleaned_mc4_parallel_instructions.tconf b/multilinguality_megatron/ducttape/mistral_20B_all_cleaned_mc4_parallel_instructions.tconf new file mode 100644 index 0000000000000000000000000000000000000000..4ea3016aacd74efcfc377008435aa8f4d0a4f285 --- /dev/null +++ b/multilinguality_megatron/ducttape/mistral_20B_all_cleaned_mc4_parallel_instructions.tconf @@ -0,0 +1,281 @@ +global { + model_type="mistral" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_mistral_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_mistral_all_20B/mc4_parallel_instructions_checkpoints + model_path=/mnt/data/cache/models--mistralai--Mistral-7B-v0.1/snapshots/26bca36bde8333b5d7f72e9ed20ccda6a618af24 + tokenizer_path=/mnt/data/cache/models--mistralai--Mistral-7B-v0.1/snapshots/26bca36bde8333b5d7f72e9ed20ccda6a618af24/tokenizer.model + external_model_dir_annealing="" + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions) + datamix_weights_annealing="" + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="none" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + instructions=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + instructions=10000000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_instructions=( + Dataset: + en=1350 + es=1350 + de=1350 + fr=1350 + nl=1350 + pt=1350 + it=1350 + ru=1350 + zh=1350 + ko=1350 + en_de=222 + de_en=222 + en_fr=222 + fr_en=222 + en_es=222 + es_en=222 + en_it=222 + it_en=222 + en_nl=222 + nl_en=222 + en_pt=222 + pt_en=222 + en_ru=222 + ru_en=222 + en_zh=222 + zh_en=222 + en_ko=222 + ko_en=222 + instructions=2500 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + instructions=100000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + instructions=False + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + instructions="" + ) + + min_perplexity=50 + + size=(Size: 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=12700 + + lr_scheduler=cosine + warmup_steps=127 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=12 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + seq_length=4096 + + # annealing + train_steps_annealing=20 + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 +} diff --git a/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel.tconf b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel.tconf new file mode 100644 index 0000000000000000000000000000000000000000..495bbca32c887ff05849348eb2be77966e3c5735 --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel.tconf @@ -0,0 +1,274 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + datamix_weights_annealing="" + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=50 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=6350 + train_steps_annealing=20 + + lr_scheduler=cosine + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=24 + grad_accum_steps=4 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_instructions.tconf b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_instructions.tconf new file mode 100644 index 0000000000000000000000000000000000000000..5084e389a19529d48e77fbab5cc014e3cc3ec227 --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_instructions.tconf @@ -0,0 +1,281 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_instructions_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_instructions_20B/mc4_parallel_instructions_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_instructions_20B/mc4_parallel_instructions_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions) + datamix_weights_annealing="" + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="none" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + instructions=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + instructions=10000000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_instructions=( + Dataset: + en=1350 + es=1350 + de=1350 + fr=1350 + nl=1350 + pt=1350 + it=1350 + ru=1350 + zh=1350 + ko=1350 + en_de=222 + de_en=222 + en_fr=222 + fr_en=222 + en_es=222 + es_en=222 + en_it=222 + it_en=222 + en_nl=222 + nl_en=222 + en_pt=222 + pt_en=222 + en_ru=222 + ru_en=222 + en_zh=222 + zh_en=222 + en_ko=222 + ko_en=222 + instructions=2500 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + instructions=100000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + instructions=False + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + instructions="" + ) + + min_perplexity=50 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=670 + eval_interval=670 + train_steps=6700 + train_steps_annealing=20 + + lr_scheduler=cosine + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=7 + gpu_ids=0,1,2,3,4,5,6 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=26 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth.tconf b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth.tconf new file mode 100644 index 0000000000000000000000000000000000000000..f749fd17e8ff0c6eeb3c556e892437a410877423 --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth.tconf @@ -0,0 +1,344 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_synth_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth) + datamix_weights_annealing="" + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20000000 + es_synth=20000000 + de_synth=20000000 + fr_synth=20000000 + nl_synth=20000000 + pt_synth=20000000 + it_synth=20000000 + ru_synth=20000000 + zh_synth=20000000 + ko_synth=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth="" + es_synth="" + de_synth="" + fr_synth="" + nl_synth="" + pt_synth="" + it_synth="" + ru_synth="" + zh_synth="" + ko_synth="" + ) + + min_perplexity=50 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=670 + eval_interval=670 + train_steps=6700 + train_steps_annealing=20 + + lr_scheduler=cosine + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=7 + gpu_ids=0,1,2,3,4,5,6 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=26 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth_annealing.tconf b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth_annealing.tconf new file mode 100644 index 0000000000000000000000000000000000000000..77c549cf18209b139fe7912288859f2b0e7c56f3 --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth_annealing.tconf @@ -0,0 +1,397 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_synth_pre_annealing_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions) + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="oi" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions="oi" + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + instructions="oi" + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=833 + de_en=833 + en_fr=833 + fr_en=833 + en_es=833 + es_en=833 + en_it=833 + it_en=833 + en_nl=833 + nl_en=833 + en_pt=833 + pt_en=833 + en_ru=833 + ru_en=833 + en_zh=833 + zh_en=833 + en_ko=833 + ko_en=833 + en_synth=0 + es_synth=0 + de_synth=0 + fr_synth=0 + nl_synth=0 + pt_synth=0 + it_synth=0 + ru_synth=0 + zh_synth=0 + ko_synth=0 + instructions=85000 + ) + ) + + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20000000 + es_synth=20000000 + de_synth=20000000 + fr_synth=20000000 + nl_synth=20000000 + pt_synth=20000000 + it_synth=20000000 + ru_synth=20000000 + zh_synth=20000000 + ko_synth=20000000 + instructions="oi" + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth="" + es_synth="" + de_synth="" + fr_synth="" + nl_synth="" + pt_synth="" + it_synth="" + ru_synth="" + zh_synth="" + ko_synth="" + instructions="oi" + ) + + min_perplexity=50 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=5556 + train_steps_annealing=794 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=24 + grad_accum_steps=4 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth_annealing_lr_exps.tconf b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth_annealing_lr_exps.tconf new file mode 100644 index 0000000000000000000000000000000000000000..6ba3a296d70e0f983029dc73188a46e1159e01da --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_mc4_parallel_with_synth_annealing_lr_exps.tconf @@ -0,0 +1,540 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_synth_pre_annealing_lr_exps_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_lr_exps_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions) + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=0 + de_en=0 + en_fr=0 + fr_en=0 + en_es=0 + es_en=0 + en_it=0 + it_en=0 + en_nl=0 + nl_en=0 + en_pt=0 + pt_en=0 + en_ru=0 + ru_en=0 + en_zh=0 + zh_en=0 + en_ko=0 + ko_en=0 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + instructions=0 + en_de_pre_annealing=183 + de_en_pre_annealing=183 + en_fr_pre_annealing=183 + fr_en_pre_annealing=183 + en_es_pre_annealing=183 + es_en_pre_annealing=183 + en_it_pre_annealing=183 + it_en_pre_annealing=183 + en_nl_pre_annealing=183 + nl_en_pre_annealing=183 + en_pt_pre_annealing=183 + pt_en_pre_annealing=183 + en_ru_pre_annealing=183 + ru_en_pre_annealing=183 + en_zh_pre_annealing=183 + zh_en_pre_annealing=183 + en_ko_pre_annealing=183 + ko_en_pre_annealing=183 + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=833 + de_en=833 + en_fr=833 + fr_en=833 + en_es=833 + es_en=833 + en_it=833 + it_en=833 + en_nl=833 + nl_en=833 + en_pt=833 + pt_en=833 + en_ru=833 + ru_en=833 + en_zh=833 + zh_en=833 + en_ko=833 + ko_en=833 + en_synth=0 + es_synth=0 + de_synth=0 + fr_synth=0 + nl_synth=0 + pt_synth=0 + it_synth=0 + ru_synth=0 + zh_synth=0 + ko_synth=0 + instructions=85000 + en_de_pre_annealing=0 + de_en_pre_annealing=0 + en_fr_pre_annealing=0 + fr_en_pre_annealing=0 + en_es_pre_annealing=0 + es_en_pre_annealing=0 + en_it_pre_annealing=0 + it_en_pre_annealing=0 + en_nl_pre_annealing=0 + nl_en_pre_annealing=0 + en_pt_pre_annealing=0 + pt_en_pre_annealing=0 + en_ru_pre_annealing=0 + ru_en_pre_annealing=0 + en_zh_pre_annealing=0 + zh_en_pre_annealing=0 + en_ko_pre_annealing=0 + ko_en_pre_annealing=0 + ) + ) + + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20000000 + es_synth=20000000 + de_synth=20000000 + fr_synth=20000000 + nl_synth=20000000 + pt_synth=20000000 + it_synth=20000000 + ru_synth=20000000 + zh_synth=20000000 + ko_synth=20000000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth="" + es_synth="" + de_synth="" + fr_synth="" + nl_synth="" + pt_synth="" + it_synth="" + ru_synth="" + zh_synth="" + ko_synth="" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + min_perplexity=50 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=5556 + train_steps_annealing=794 + + lr_scheduler=constant + warmup_steps=32 + lr=(lr: placeholder 3e-4 1.5e-4 6e-5) + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=(lr_annealing: placeholder 3e-4 1.5e-4 6e-5) + lr_min_annealing=(lr_min_annealing: placeholder 3e-5 1.5e-5 6e-6) + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=24 + grad_accum_steps=4 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_new_monolingual_parallel.tconf b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_new_monolingual_parallel.tconf new file mode 100644 index 0000000000000000000000000000000000000000..1b086aec0a6f06e069a0080c49663ae300ba84c5 --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_new_monolingual_parallel.tconf @@ -0,0 +1,274 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_20B/mc4_parallel_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_20B/mc4_parallel_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + datamix_weights_annealing="" + + dataset_path=(Dataset: + en=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/en/filtered_en_2023-06_head_documents.jsonl.gz + es=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/es/filtered_es_2023-06_head_documents.jsonl.gz + de=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/de/filtered_de_2023-06_head_documents.jsonl.gz + fr=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/fr/filtered_fr_2023-06_head_documents.jsonl.gz + nl=/mnt/data/shared/tower_llm_data/webcorpus/nl/0000.json.gz + pt=/mnt/data/shared/tower_llm_data/webcorpus/pt/0000.json.gz + it=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/it/filtered_it_2023-06_head_documents.jsonl.gz + ru=/mnt/data/shared/tower_llm_data/webcorpus/ru/0000.json.gz + zh=/mnt/data/shared/tower_llm_data/webcorpus/zh/0000.json.gz + ko=/mnt/data/shared/tower_llm_data/webcorpus/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=10000000 + es=10000000 + de=10000000 + fr=10000000 + nl=10000000 + pt=10000000 + it=10000000 + ru=10000000 + zh=10000000 + ko=10000000 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="none" + es="none" + de="none" + fr="none" + nl="none" + pt="none" + it="none" + ru="none" + zh="none" + ko="none" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=0 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=670 + eval_interval=670 + train_steps=6700 + train_steps_annealing=20 + + lr_scheduler=cosine + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=7 + gpu_ids=0,1,2,3,4,5,6 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=26 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_new_monolingual_parallel_instructions.tconf b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_new_monolingual_parallel_instructions.tconf new file mode 100644 index 0000000000000000000000000000000000000000..4457ebe60689b17a1d8cf97d8b86c544022395af --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_20B_all_cleaned_new_monolingual_parallel_instructions.tconf @@ -0,0 +1,281 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_20B/mc4_parallel_instructions_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_20B/mc4_parallel_instructions_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en instructions) + datamix_weights_annealing="" + + dataset_path=(Dataset: + en=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/en/filtered_en_2023-06_head_documents.jsonl.gz + es=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/es/filtered_es_2023-06_head_documents.jsonl.gz + de=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/de/filtered_de_2023-06_head_documents.jsonl.gz + fr=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/fr/filtered_fr_2023-06_head_documents.jsonl.gz + nl=/mnt/data/shared/tower_llm_data/webcorpus/nl/0000.json.gz + pt=/mnt/data/shared/tower_llm_data/webcorpus/pt/0000.json.gz + it=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/it/filtered_it_2023-06_head_documents.jsonl.gz + ru=/mnt/data/shared/tower_llm_data/webcorpus/ru/0000.json.gz + zh=/mnt/data/shared/tower_llm_data/webcorpus/zh/0000.json.gz + ko=/mnt/data/shared/tower_llm_data/webcorpus/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="none" + ) + + is_hf_dataset=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + instructions=False + ) + + threshold=(Dataset: + en=10000000 + es=10000000 + de=10000000 + fr=10000000 + nl=10000000 + pt=10000000 + it=10000000 + ru=10000000 + zh=10000000 + ko=10000000 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + instructions=10000000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_instructions=( + Dataset: + en=1350 + es=1350 + de=1350 + fr=1350 + nl=1350 + pt=1350 + it=1350 + ru=1350 + zh=1350 + ko=1350 + en_de=222 + de_en=222 + en_fr=222 + fr_en=222 + en_es=222 + es_en=222 + en_it=222 + it_en=222 + en_nl=222 + nl_en=222 + en_pt=222 + pt_en=222 + en_ru=222 + ru_en=222 + en_zh=222 + zh_en=222 + en_ko=222 + ko_en=222 + instructions=2500 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + instructions=100000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + instructions=False + ) + + lp=(Dataset: + en="none" + es="none" + de="none" + fr="none" + nl="none" + pt="none" + it="none" + ru="none" + zh="none" + ko="none" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + instructions="" + ) + + min_perplexity=0 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=670 + eval_interval=670 + train_steps=6700 + train_steps_annealing=20 + + lr_scheduler=cosine + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=7 + gpu_ids=0,1,2,3,4,5,6 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=26 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 +} \ No newline at end of file diff --git a/multilinguality_megatron/ducttape/tiny_llama_annealing_no_mt.tconf b/multilinguality_megatron/ducttape/tiny_llama_annealing_no_mt.tconf new file mode 100644 index 0000000000000000000000000000000000000000..e3a18a42c631d01b25cc05658442cab38467e53b --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_annealing_no_mt.tconf @@ -0,0 +1,401 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B/mc4_parallel_instructions_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B/mc4_parallel_instructions_no_mt_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions) + + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions=False + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_instructions_synth=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + instructions=False + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_instructions_synth=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=0 + de_en=0 + en_fr=0 + fr_en=0 + en_es=0 + es_en=0 + en_it=0 + it_en=0 + en_nl=0 + nl_en=0 + en_pt=0 + pt_en=0 + en_ru=0 + ru_en=0 + en_zh=0 + zh_en=0 + en_ko=0 + ko_en=0 + en_synth=0 + es_synth=0 + de_synth=0 + fr_synth=0 + nl_synth=0 + pt_synth=0 + it_synth=0 + ru_synth=0 + zh_synth=0 + ko_synth=0 + instructions=1 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + lp=(Dataset: + en="none" + es="none" + de="none" + fr="none" + nl="none" + pt="none" + it="none" + ru="none" + zh="none" + ko="none" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + min_perplexity=0 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=5556 + train_steps_annealing=794 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=7 + gpu_ids=1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=26 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 + + glu_activation=swiglu + kv_channels="" + layernorm_epsilon=1e-5 +} \ No newline at end of file diff --git a/multilinguality_megatron/ducttape/tiny_llama_annealing_no_mt_but_parallel_data.tconf b/multilinguality_megatron/ducttape/tiny_llama_annealing_no_mt_but_parallel_data.tconf new file mode 100644 index 0000000000000000000000000000000000000000..046fb02476cf5aa61a6856deb53ae988bd54344f --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_annealing_no_mt_but_parallel_data.tconf @@ -0,0 +1,401 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B/mc4_parallel_instructions_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B/mc4_parallel_instructions_no_mt_but_parallel_data_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions) + + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions=False + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_instructions_synth=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + instructions=False + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_instructions_synth=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=833 + de_en=833 + en_fr=833 + fr_en=833 + en_es=833 + es_en=833 + en_it=833 + it_en=833 + en_nl=833 + nl_en=833 + en_pt=833 + pt_en=833 + en_ru=833 + ru_en=833 + en_zh=833 + zh_en=833 + en_ko=833 + ko_en=833 + en_synth=0 + es_synth=0 + de_synth=0 + fr_synth=0 + nl_synth=0 + pt_synth=0 + it_synth=0 + ru_synth=0 + zh_synth=0 + ko_synth=0 + instructions=85000 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + lp=(Dataset: + en="none" + es="none" + de="none" + fr="none" + nl="none" + pt="none" + it="none" + ru="none" + zh="none" + ko="none" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + min_perplexity=0 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=5556 + train_steps_annealing=794 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=7 + gpu_ids=1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=26 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 + + glu_activation=swiglu + kv_channels="" + layernorm_epsilon=1e-5 +} \ No newline at end of file diff --git a/multilinguality_megatron/ducttape/tiny_llama_annealing_synth.tconf b/multilinguality_megatron/ducttape/tiny_llama_annealing_synth.tconf new file mode 100644 index 0000000000000000000000000000000000000000..7f59b97ef4539c51438d297c75d46d5b9b26764d --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_annealing_synth.tconf @@ -0,0 +1,401 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B/mc4_parallel_instructions_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_new_mono_annealing_20B/mc4_parallel_instructions_synth_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions) + + + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions=False + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions=False + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_instructions_synth=( + Dataset: + en=603 + es=603 + de=603 + fr=603 + nl=603 + pt=603 + it=603 + ru=603 + zh=603 + ko=603 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + en_synth=67 + es_synth=67 + de_synth=67 + fr_synth=67 + nl_synth=67 + pt_synth=67 + it_synth=67 + ru_synth=67 + zh_synth=67 + ko_synth=67 + instructions=False + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_instructions_synth=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=0 + de_en=0 + en_fr=0 + fr_en=0 + en_es=0 + es_en=0 + en_it=0 + it_en=0 + en_nl=0 + nl_en=0 + en_pt=0 + pt_en=0 + en_ru=0 + ru_en=0 + en_zh=0 + zh_en=0 + en_ko=0 + ko_en=0 + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + lp=(Dataset: + en="none" + es="none" + de="none" + fr="none" + nl="none" + pt="none" + it="none" + ru="none" + zh="none" + ko="none" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth=20 + es_synth=20 + de_synth=20 + fr_synth=20 + nl_synth=20 + pt_synth=20 + it_synth=20 + ru_synth=20 + zh_synth=20 + ko_synth=20 + instructions=800 + ) + + min_perplexity=0 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=5556 + train_steps_annealing=794 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=7 + gpu_ids=1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=26 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 + + glu_activation=swiglu + kv_channels="" + layernorm_epsilon=1e-5 +} \ No newline at end of file diff --git a/multilinguality_megatron/ducttape/tiny_llama_flavio_20b.tconf b/multilinguality_megatron/ducttape/tiny_llama_flavio_20b.tconf new file mode 100644 index 0000000000000000000000000000000000000000..4414673aeeead30f879c69b41ce41f4871b2f02e --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_flavio_20b.tconf @@ -0,0 +1,545 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_synth_pre_annealing_20B_checkpoints_doc_attn + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_20B_checkpoints_annealed_doc_attn + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574 + + tokenizer_type=PretrainedFromHF + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en en_de_pre_annealing de_en_pre_annealing en_fr_pre_annealing fr_en_pre_annealing en_es_pre_annealing es_en_pre_annealing en_it_pre_annealing it_en_pre_annealing en_nl_pre_annealing nl_en_pre_annealing en_pt_pre_annealing pt_en_pre_annealing en_ru_pre_annealing ru_en_pre_annealing en_zh_pre_annealing zh_en_pre_annealing en_ko_pre_annealing ko_en_pre_annealing en_synth es_synth de_synth fr_synth nl_synth pt_synth it_synth ru_synth zh_synth ko_synth instructions) + dataset_path=(Dataset: + en=/mnt/data_2/shared/tower_llm_data/en/data + en_synth="" + es=/mnt/data_2/shared/tower_llm_data/es/3/0000.json.gz + es_synth="" + de=/mnt/data_2/shared/tower_llm_data/de/2/0000.json.gz + de_synth="" + fr=/mnt/data_2/shared/tower_llm_data/fr/1/0000.json.gz + fr_synth="" + nl=/mnt/data_2/shared/tower_llm_data/nl/0000.json.gz + nl_synth="" + pt=/mnt/data_2/shared/tower_llm_data/pt/0000.json.gz + pt_synth="" + it=/mnt/data_2/shared/tower_llm_data/it/0000.json.gz + it_synth="" + ru=/mnt/data_2/shared/tower_llm_data/ru/6/0000.json.gz + ru_synth="" + zh=/mnt/data_2/shared/tower_llm_data/zh/0000.json.gz + zh_synth="" + ko=/mnt/data_2/shared/tower_llm_data/ko/0000.json.gz + ko_synth="" + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_hf_dataset=(Dataset: + en=True + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + threshold=(Dataset: + en=516 + es=275 + de=611 + fr=322 + nl=649 + pt=257 + it=332 + ru=334 + zh=2041 + ko=198 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + en_synth=100000 + es_synth=100000 + de_synth=100000 + fr_synth=100000 + nl_synth=100000 + pt_synth=100000 + it_synth=100000 + ru_synth=100000 + zh_synth=100000 + ko_synth=100000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=637 + es=637 + de=637 + fr=637 + nl=637 + pt=637 + it=637 + ru=637 + zh=637 + ko=637 + en_de=0 + de_en=0 + en_fr=0 + fr_en=0 + en_es=0 + es_en=0 + en_it=0 + it_en=0 + en_nl=0 + nl_en=0 + en_pt=0 + pt_en=0 + en_ru=0 + ru_en=0 + en_zh=0 + zh_en=0 + en_ko=0 + ko_en=0 + en_synth=34 + es_synth=34 + de_synth=34 + fr_synth=34 + nl_synth=34 + pt_synth=34 + it_synth=34 + ru_synth=34 + zh_synth=34 + ko_synth=34 + instructions=0 + en_de_pre_annealing=183 + de_en_pre_annealing=183 + en_fr_pre_annealing=183 + fr_en_pre_annealing=183 + en_es_pre_annealing=183 + es_en_pre_annealing=183 + en_it_pre_annealing=183 + it_en_pre_annealing=183 + en_nl_pre_annealing=183 + nl_en_pre_annealing=183 + en_pt_pre_annealing=183 + pt_en_pre_annealing=183 + en_ru_pre_annealing=183 + ru_en_pre_annealing=183 + en_zh_pre_annealing=183 + zh_en_pre_annealing=183 + en_ko_pre_annealing=183 + ko_en_pre_annealing=183 + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_uniform=( + Dataset: + en=0 + es=0 + de=0 + fr=0 + nl=0 + pt=0 + it=0 + ru=0 + zh=0 + ko=0 + en_de=833 + de_en=833 + en_fr=833 + fr_en=833 + en_es=833 + es_en=833 + en_it=833 + it_en=833 + en_nl=833 + nl_en=833 + en_pt=833 + pt_en=833 + en_ru=833 + ru_en=833 + en_zh=833 + zh_en=833 + en_ko=833 + ko_en=833 + en_synth=0 + es_synth=0 + de_synth=0 + fr_synth=0 + nl_synth=0 + pt_synth=0 + it_synth=0 + ru_synth=0 + zh_synth=0 + ko_synth=0 + instructions=85000 + en_de_pre_annealing=0 + de_en_pre_annealing=0 + en_fr_pre_annealing=0 + fr_en_pre_annealing=0 + en_es_pre_annealing=0 + es_en_pre_annealing=0 + en_it_pre_annealing=0 + it_en_pre_annealing=0 + en_nl_pre_annealing=0 + nl_en_pre_annealing=0 + en_pt_pre_annealing=0 + pt_en_pre_annealing=0 + en_ru_pre_annealing=0 + ru_en_pre_annealing=0 + en_zh_pre_annealing=0 + zh_en_pre_annealing=0 + en_ko_pre_annealing=0 + ko_en_pre_annealing=0 + ) + ) + + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + en_synth=20000000 + es_synth=20000000 + de_synth=20000000 + fr_synth=20000000 + nl_synth=20000000 + pt_synth=20000000 + it_synth=20000000 + ru_synth=20000000 + zh_synth=20000000 + ko_synth=20000000 + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + en_synth=False + es_synth=False + de_synth=False + fr_synth=False + nl_synth=False + pt_synth=False + it_synth=False + ru_synth=False + zh_synth=False + ko_synth=False + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + lp=(Dataset: + en="" + es="" + de="" + fr="" + nl="" + pt="" + it="" + ru="" + zh="" + ko="" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + en_synth="" + es_synth="" + de_synth="" + fr_synth="" + nl_synth="" + pt_synth="" + it_synth="" + ru_synth="" + zh_synth="" + ko_synth="" + instructions="oi" + en_de_pre_annealing="oi" + de_en_pre_annealing="oi" + en_fr_pre_annealing="oi" + fr_en_pre_annealing="oi" + en_es_pre_annealing="oi" + es_en_pre_annealing="oi" + en_it_pre_annealing="oi" + it_en_pre_annealing="oi" + en_nl_pre_annealing="oi" + nl_en_pre_annealing="oi" + en_pt_pre_annealing="oi" + pt_en_pre_annealing="oi" + en_ru_pre_annealing="oi" + ru_en_pre_annealing="oi" + en_zh_pre_annealing="oi" + zh_en_pre_annealing="oi" + en_ko_pre_annealing="oi" + ko_en_pre_annealing="oi" + ) + + min_perplexity=50 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=11430 + train_steps_annealing=1270 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=8 + gpu_ids=0,1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=24 + grad_accum_steps=4 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + kv_channels="" + glu_activation=swiglu + layernorm_epsilon=1e-5 + + seq_length=2048 +} diff --git a/multilinguality_megatron/ducttape/tiny_llama_porfirio_annealing.tconf b/multilinguality_megatron/ducttape/tiny_llama_porfirio_annealing.tconf new file mode 100644 index 0000000000000000000000000000000000000000..759bb7e80d92e786565a2ad10053d2a9febe05a9 --- /dev/null +++ b/multilinguality_megatron/ducttape/tiny_llama_porfirio_annealing.tconf @@ -0,0 +1,313 @@ +global { + model_type="llama2" + ducttape_output=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_annealing_baseline + repo=/mnt/data/jpombal/multilinguality_megatron + + external_model_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_annealing_baseline/mc4_parallel_checkpoints + external_model_dir_annealing=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_annealing_baseline/mc4_parallel_checkpoints_annealed + model_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/ + tokenizer_path=/mnt/data_2/cache/models--TinyLlama--TinyLlama-1.1B-intermediate-step-1431k-3T/snapshots/036fa4651240b9a1487f709833b9e4b96b4c1574/tokenizer.model + + dataset=(Dataset: en de fr es it nl pt ru zh ko en_de de_en en_fr fr_en en_es es_en en_it it_en en_nl nl_en en_pt pt_en en_ru ru_en en_zh zh_en en_ko ko_en) + + + dataset_path=(Dataset: + en=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/en/filtered_en_2023-06_head_documents.jsonl.gz + es=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/es/filtered_es_2023-06_head_documents.jsonl.gz + de=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/de/filtered_de_2023-06_head_documents.jsonl.gz + fr=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/fr/filtered_fr_2023-06_head_documents.jsonl.gz + nl=/mnt/data/shared/tower_llm_data/webcorpus/nl/0000.json.gz + pt=/mnt/data/shared/tower_llm_data/webcorpus/pt/0000.json.gz + it=/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered/it/filtered_it_2023-06_head_documents.jsonl.gz + ru=/mnt/data/shared/tower_llm_data/webcorpus/ru/0000.json.gz + zh=/mnt/data/shared/tower_llm_data/webcorpus/zh/0000.json.gz + ko=/mnt/data/shared/tower_llm_data/webcorpus/ko/0000.json.gz + en_de="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + de_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-de/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_fr="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + fr_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-fr/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_es="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + es_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-es/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_it="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + it_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-it/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_nl="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + nl_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-nl/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_pt="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + pt_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-pt/bicleaner_0.6_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ru="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ru_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ru/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_zh="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + zh_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-zh/no_bicleaner_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + en_ko="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ko_en="/mnt/data/shared/tower_llm_data/bilingual_data/v1/en-ko/bicleaner_0.5_cometkiwi-wmt22-cometkiwi-da/threshold_0.75" + ) + + is_hf_dataset=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=False + de_en=False + en_fr=False + fr_en=False + en_es=False + es_en=False + en_it=False + it_en=False + en_nl=False + nl_en=False + en_pt=False + pt_en=False + en_ru=False + ru_en=False + en_zh=False + zh_en=False + en_ko=False + ko_en=False + ) + + threshold=(Dataset: + en=10000000 + es=10000000 + de=10000000 + fr=10000000 + nl=10000000 + pt=10000000 + it=10000000 + ru=10000000 + zh=10000000 + ko=10000000 + en_de=100000 + de_en=100000 + en_fr=100000 + fr_en=100000 + en_es=100000 + es_en=100000 + en_it=100000 + it_en=100000 + en_nl=100000 + nl_en=100000 + en_pt=100000 + pt_en=100000 + en_ru=100000 + ru_en=100000 + en_zh=100000 + zh_en=100000 + en_ko=100000 + ko_en=100000 + ) + + # rougly 67% for mc4, 33% for total parallel data + datamix_weights=( + DataMix: + mc4_parallel_instructions=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + datamix_weights_annealing=( + DataMix: + mc4_parallel_instructions=( + Dataset: + en=670 + es=670 + de=670 + fr=670 + nl=670 + pt=670 + it=670 + ru=670 + zh=670 + ko=670 + en_de=183 + de_en=183 + en_fr=183 + fr_en=183 + en_es=183 + es_en=183 + en_it=183 + it_en=183 + en_nl=183 + nl_en=183 + en_pt=183 + pt_en=183 + en_ru=183 + ru_en=183 + en_zh=183 + zh_en=183 + en_ko=183 + ko_en=183 + ) + ) + + # number such that final tokens for each language are around 1B + n_tokens=(Dataset: + en=1000000000 + es=833333330 + de=833333330 + fr=833333330 + nl=833333330 + pt=833333330 + it=833333330 + ru=500000000 + zh=13888888 + ko=250000000 + en_de=20000000 + de_en=20000000 + en_fr=20000000 + fr_en=20000000 + en_es=20000000 + es_en=20000000 + en_it=20000000 + it_en=20000000 + en_nl=20000000 + nl_en=20000000 + en_pt=20000000 + pt_en=20000000 + en_ru=20000000 + ru_en=20000000 + en_zh=20000000 + zh_en=20000000 + en_ko=20000000 + ko_en=20000000 + ) + + is_parallel=(Dataset: + en=False + es=False + de=False + fr=False + nl=False + pt=False + it=False + ru=False + zh=False + ko=False + en_de=True + de_en=True + en_fr=True + fr_en=True + en_es=True + es_en=True + en_it=True + it_en=True + en_nl=True + nl_en=True + en_pt=True + pt_en=True + en_ru=True + ru_en=True + en_zh=True + zh_en=True + en_ko=True + ko_en=True + ) + + lp=(Dataset: + en="none" + es="none" + de="none" + fr="none" + nl="none" + pt="none" + it="none" + ru="none" + zh="none" + ko="none" + en_de="en-de" + de_en="de-en" + en_fr="en-fr" + fr_en="fr-en" + en_es="en-es" + es_en="es-en" + en_it="en-it" + it_en="it-en" + en_nl="en-nl" + nl_en="nl-en" + en_pt="en-pt" + pt_en="pt-en" + en_ru="en-ru" + ru_en="ru-en" + en_zh="en-zh" + zh_en="zh-en" + en_ko="en-ko" + ko_en="ko-en" + ) + + min_perplexity=0 + + size=(Size: 1 7 13) + + log_interval=1 + save_interval=635 + eval_interval=635 + train_steps=5556 + train_steps_annealing=794 + + lr_scheduler=constant + warmup_steps=32 + lr=3e-5 + lr_min=3e-6 + weight_decay=0.1 + + lr_scheduler_annealing=linear + warmup_steps_annealing=0 + lr_annealing=3e-5 + lr_min_annealing=3e-6 + + n_gpus=7 + gpu_ids=1,2,3,4,5,6,7 + tp=(TP: 1 2 3 4 5 6 7 8) + pp=(PP: 1 2 3 4) + micro_batch_size=4 + grad_accum_steps=26 + vocab_size=32000 + + cpu_workers=16 + wikipedia=False + freeze_layers="" + posterior_tokens=False + n_posterior_tokens=0 + eval_iters=1 + + seq_length=2048 + + glu_activation=swiglu + kv_channels="" + layernorm_epsilon=1e-5 +} \ No newline at end of file diff --git a/multilinguality_megatron/examples/evaluate_retriever_nq.sh b/multilinguality_megatron/examples/evaluate_retriever_nq.sh new file mode 100644 index 0000000000000000000000000000000000000000..b9c40e73738437a80f41b036dfd8e3f433ea26c9 --- /dev/null +++ b/multilinguality_megatron/examples/evaluate_retriever_nq.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Evaluate natural question test data given Wikipedia embeddings and pretrained +# ICT model or a finetuned model for Natural Question task + +# Datasets can be downloaded from the following link: +# https://github.com/facebookresearch/DPR/blob/master/data/download_data.py + +EVIDENCE_DATA_DIR= +EMBEDDING_PATH= +CHECKPOINT_PATH= + +QA_FILE= + +python tasks/main.py \ + --task RETRIEVER-EVAL \ + --tokenizer_type BertWordPieceLowerCase \ + --num_layers 12 \ + --hidden_size 768 \ + --num_attention_heads 12 \ + --tensor_model_parallel_size 1 \ + --micro_batch_size 128 \ + --activations_checkpoint_method uniform \ + --seq_length 512 \ + --max_position_embeddings 512 \ + --load ${CHECKPOINT_PATH} \ + --evidence_data_path ${EVIDENCE_DATA_DIR} \ + --embedding_path ${EMBEDDING_PATH} \ + --retriever_seq_length 256 \ + --vocab_file bert-vocab.txt\ + --qa_data_test ${QA_FILE} \ + --faiss_use_gpu \ + --retriever_report_topk_accuracies 1 5 20 100 \ + --fp16 \ + --indexer_log_interval 1000 \ + --indexer_batch_size 128 + + diff --git a/multilinguality_megatron/examples/evaluate_zeroshot_gpt.sh b/multilinguality_megatron/examples/evaluate_zeroshot_gpt.sh new file mode 100644 index 0000000000000000000000000000000000000000..7bc1f4c75f2215eea068cc27d91c05cf8640d473 --- /dev/null +++ b/multilinguality_megatron/examples/evaluate_zeroshot_gpt.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +WORLD_SIZE=8 + +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +TASK="LAMBADA" + +VALID_DATA= +VOCAB_FILE=gpt2-vocab.json +MERGE_FILE=gpt2-merges.txt +CHECKPOINT=checkpoints/gpt2_345m + + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ + --task $TASK \ + --valid_data $VALID_DATA \ + --tokenizer_type GPT2BPETokenizer \ + --strict_lambada \ + --vocab_file $VOCAB_FILE \ + --merge_file $MERGE_FILE \ + --load $CHECKPOINT \ + --tensor_model_parallel_size 1 \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --batch_size 8 \ + --activations_checkpoint_method uniform \ + --seq_length 1024 \ + --max_position_embeddings 1024 \ + --log_interval 10 \ + --fp16 \ + --no_load_optim \ + --no_load_rng diff --git a/multilinguality_megatron/examples/finetune.sh b/multilinguality_megatron/examples/finetune.sh new file mode 100644 index 0000000000000000000000000000000000000000..9387500404c2e9e864119411711a1632039f3932 --- /dev/null +++ b/multilinguality_megatron/examples/finetune.sh @@ -0,0 +1,245 @@ +#! /bin/bash + + +# default arguments +SIZE=7 +TP=8 +PP=1 +GPUS_PER_NODE=8 +MICRO_BATCH=1 +GLOBAL_BATCH=12 +RANK=0 +N_NODES=1 +ADDR=localhost +WANDB=0 +INSTRUCT=0 +CHECKPOINT_PATH=none +DATA=none +WANDB_PROJ=none +WANDB_ID=none +WANDB_ENTITY=none +ITERS=1000 +SEQ_LEN=none +DATA_PATH=none +TRAINED_PATH=none +VAL_PATH=none +USR_LR=none +USR_MIN_LR=none +LOSS_MASK=0.0 +HELP_STR="[--rank=$RANK] [--size=$SIZE] [--tp=$TP] [--pp=$PP] [--gpus=$GPUS_PER_NODE] \ +[--micro-batch=$MICRO_BATCH] [--global-batch=$GLOBAL_BATCH] [--nodes=$N_NODES] \ +[--addr=$ADDR] [--wandb] [--instruct] [--checkpoint=...] [--data=...] [--iters=$ITERS] \ +[--wandb-proj=none] [--wandb-id=none] [--wandb-entity=none] [--seq-len=...] \ +[--val-path=none] [--out=...] [--lr=lr minlr] [--loss-mask=$LOSS_MASK] --help]" + + +# define help function +help () { + echo "Usage: $0 $HELP_STR" +} + + +# parse arguments, three modes +# mode1 = -h or --help requested +if [[ $# = 1 ]] && [[ $1 = "-h" ]] || [[ $1 = "--help" ]]; then + help + exit 0 +# mode2 = not arguments given +elif [[ $# = 0 ]]; then + help + exit 1 +fi +# mode3 = correct usage, read model +MODEL=$1 +shift +while [[ $# -gt 0 ]]; do + case $1 in + --tp) TP="$2"; shift; shift;; + --pp) PP="$2"; shift; shift;; + --size) SIZE="$2"; shift; shift;; + --gpus) GPUS_PER_NODE="$2"; shift; shift;; + --micro-batch) MICRO_BATCH="$2"; shift; shift;; + --global-batch) GLOBAL_BATCH="$2"; shift; shift;; + --rank) RANK=$2; shift; shift;; + --nodes) N_NODES=$2; shift; shift;; + --addr) ADDR=$2; shift; shift;; + --wandb) WANDB=1; shift;; + --wandb-project) WANDB_PROJ=$2; shift; shift;; + --wandb-id) WANDB_ID=$2; shift; shift;; + --wandb-entity) WANDB_ENTITY=$2; shift; shift;; + --instruct) INSTRUCT=1; shift;; + --checkpoint) CHECKPOINT_PATH=$2; shift; shift;; + --data) DATA_PATH=$2; shift; shift;; + --iters) ITERS=$2; shift; shift;; + --seq-len) SEQ_LEN=$2; shift; shift;; + --out) TRAINED_PATH=$2; shift; shift;; + --val-path) VAL_PATH=$2; shift; shift;; + --lr) USR_LR=$2; USR_MIN_LR=$3; shift; shift; shift;; + --loss-mask) LOSS_MASK=$2; shift; shift;; + *) echo unknown argument $1; help; exit 1;; + esac +done + + +# set args +if [[ $CHECKPOINT_PATH = none ]]; then + CHECKPOINT_PATH=/pure-mlo-scratch/alhernan/megatron-data/checkpoints/${MODEL}-${SIZE}b-tp$TP-pp$PP +fi + +if [[ $INSTRUCT = 1 ]]; then + LR="2e-5" + MIN_LR="2e-6" + if [[ $TRAINED_PATH = none ]]; then + TRAINED_PATH=$CHECKPOINT_PATH-instructed + fi +else + LR="3e-4" + MIN_LR="3e-4" + if [[ $TRAINED_PATH = none ]]; then + TRAINED_PATH=$CHECKPOINT_PATH-pretrained + fi +fi + +TENSORBOARD_PATH=$TRAINED_PATH/logging +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $N_NODES --node_rank + $RANK --master_addr $ADDR --master_port 6000" + +if [[ $MODEL = falcon ]]; then + if [[ $DATA_PATH = none ]]; then + DATA_PATH=/pure-mlo-scratch/pagliard/data/wikitext-falcon/wiki-train_text_document + fi + TOKENIZER=FalconTokenizer + EXTRA_ARGS="--parallel_attn" + if [[ $SEQ_LEN = none ]]; then + SEQ_LEN=2048 + fi +elif [[ $MODEL = llama ]] || [[ $MODEL = llama2 ]] || [[ $MODEL = codellama ]]; then + EXTRA_IDS="[bib_ref],[/bib_ref],[fig_ref],[/fig_ref],[bib],[/bib],[fig],[/fig],[table],[/table],[formula],[/formula]" + EXTRA_ARGS="--vocab_file=/pure-mlo-scratch/llama/tokenizer.model --use_rms_norm + --glu_activation swiglu --no_tie_embed_logits" + if [[ $INSTRUCT = 1 ]]; then + if [[ $DATA_PATH = none ]]; then + DATA_PATH=/pure-mlo-scratch/alhernan/data/orca/orca + fi + EXTRA_IDS="$EXTRA_IDS,<|im_start|>,<|im_end|>" + else + if [[ $DATA_PATH = none ]]; then + DATA_PATH=/pure-mlo-scratch/data/tokenized/pubmed-all/pubmed-all-llama_text_document + fi + fi + TOKENIZER=SentencePieceTokenizer + EXTRA_ARGS="$EXTRA_ARGS --vocab_extra_ids_list $EXTRA_IDS" + if [[ $MODEL == llama ]]; then + if [[ $SEQ_LEN = none ]]; then + SEQ_LEN=2048 + fi + EXTRA_ARGS="$EXTRA_ARGS --vocab_file=/pure-mlo-scratch/llama2/Llama-2-7b-hf/tokenizer.model" + EXTRA_ARGS="$EXTRA_ARGS --layernorm_epsilon 1e-6" + elif [[ $MODEL == llama2 ]]; then + if [[ $SEQ_LEN = none ]]; then + SEQ_LEN=4096 + fi + EXTRA_ARGS="$EXTRA_ARGS --vocab_file=/pure-mlo-scratch/llama2/Llama-2-7b-hf/tokenizer.model" + EXTRA_ARGS="$EXTRA_ARGS --layernorm_epsilon 1e-5" + if (( $SIZE > 13 )); then # llama 2, 34B and 70B + LR="1.5e-4" + fi + else # codellama + if [[ $SEQ_LEN = none ]]; then + SEQ_LEN=16384 + fi + EXTRA_ARGS="$EXTRA_ARGS --vocab_file=/pure-mlo-scratch/codellama/CodeLlama-7b/tokenizer.model --rope_theta 1e6" + fi +elif [[ $MODEL = gpt ]]; then + if [[ $DATA_PATH = none ]]; then + DATA_PATH=/scratch/wikitext-megatron/wikitext-train_text_document + fi + TOKENIZER=FalconTokenizer + EXTRA_ARGS="--num_layers 4 --hidden_size 512 --num_attention_heads 8" + if [[ $SEQ_LEN = none ]]; then + SEQ_LEN=2048 + fi +else + echo "Model should be either gpt, llama or falcon, not $MODEL" + help + exit 1 +fi +COMMON_ARGS="--use_flash_attn --no_bias_gelu_fusion + --seq_length $SEQ_LEN --max_position_embeddings $SEQ_LEN + --log_interval 1 --save_interval 800 --eval_interval 200 + --eval_iters 10 --hidden_dropout 0.0 --position_embedding_type rotary + --no_bias_dropout_fusion --use_checkpoint_args + --attention_dropout 0.0 --adam_beta1 0.9 --adam_beta2 0.95 --adam_eps 1e-5 + --lr_decay_style cosine --lr_warmup_fraction 0.1 --lr $LR --min_lr $MIN_LR + --weight_decay 0.1 --sequence_parallel --recompute_granularity selective + --log_timers_to_tensorboard --scalar_loss_mask=$LOSS_MASK + --rope_scaling_factor 1.0" + +if [[ $INSTRUCT = 1 ]]; then + COMMON_ARGS="$COMMON_ARGS --variable_seq_lengths --data_type instruction --metrics all" + if [[ $CHECKPOINT_PATH != $TRAINED_PATH ]]; then + COMMON_ARGS="$COMMON_ARGS --finetune" + fi +else + COMMON_ARGS="$COMMON_ARGS --metrics perplexity accuracy count_loss_mask" +fi + +if [[ $CHECKPOINT_PATH != $TRAINED_PATH ]]; then + COMMON_ARGS="$COMMON_ARGS --train_iters $ITERS" +fi + +if [[ $WANDB = 1 ]]; then + COMMON_ARGS="$COMMON_ARGS --wandb_logger" + if [[ $WANDB_PROJ != none ]]; then + COMMON_ARGS="$COMMON_ARGS --wandb_project $WANDB_PROJ" + fi + if [[ $WANDB_ID != none ]]; then + COMMON_ARGS="$COMMON_ARGS --wandb_id $WANDB_ID" + fi + if [[ $WANDB_ENTITY != none ]]; then + COMMON_ARGS="$COMMON_ARGS --wandb_entity $WANDB_ENTITY" + fi +fi + +if [[ $VAL_PATH = none ]]; then + DATA_ARGS="--data_path $DATA_PATH" +else + DATA_ARGS="--train_data_path $DATA_PATH --valid_data_path $VAL_PATH" +fi + +# print some args +echo +echo Settings: +echo RANK=$RANK +echo ADDR=$ADDR +echo N_NODES=$N_NODES +echo DATA_ARGS=$DATA_ARGS +echo CHECKPOINT_PATH=$CHECKPOINT_PATH +echo TRAINED_PATH=$TRAINED_PATH +echo MODEL=$MODEL +echo TP=$TP +echo PP=$PP +echo MICRO_BATCH=$MICRO_BATCH +echo GLOBAL_BATCH=$GLOBAL_BATCH +echo INSTRUCT=$INSTRUCT +echo COMMON_ARGS=$COMMON_ARGS +echo EXTRA_ARGS=$EXTRA_ARGS +echo + + +# finally, call finetune.py +CUDA_DEVICE_MAX_CONNECTIONS=1 OMP_NUM_THREADS=16 torchrun $DISTRIBUTED_ARGS finetune.py \ + --tensor_model_parallel_size $TP \ + --pipeline_model_parallel_size $PP \ + --load $CHECKPOINT_PATH \ + --save $TRAINED_PATH \ + --tensorboard_dir $TENSORBOARD_PATH \ + $DATA_ARGS \ + --model_name $MODEL \ + --tokenizer_type $TOKENIZER \ + --bf16 \ + --global_batch_size $GLOBAL_BATCH \ + --micro_batch_size $MICRO_BATCH \ + --num_workers=2 \ + $EXTRA_ARGS \ + $COMMON_ARGS diff --git a/multilinguality_megatron/examples/finetune_mnli_distributed.sh b/multilinguality_megatron/examples/finetune_mnli_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..9671a6bf4081dbfe18886c2fdc7c434519af12ce --- /dev/null +++ b/multilinguality_megatron/examples/finetune_mnli_distributed.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +WORLD_SIZE=8 + +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +TRAIN_DATA="data/glue_data/MNLI/train.tsv" +VALID_DATA="data/glue_data/MNLI/dev_matched.tsv \ + data/glue_data/MNLI/dev_mismatched.tsv" +PRETRAINED_CHECKPOINT=checkpoints/bert_345m +VOCAB_FILE=bert-vocab.txt +CHECKPOINT_PATH=checkpoints/bert_345m_mnli + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ + --task MNLI \ + --seed 1234 \ + --train_data $TRAIN_DATA \ + --valid_data $VALID_DATA \ + --tokenizer_type BertWordPieceLowerCase \ + --vocab_file $VOCAB_FILE \ + --epochs 5 \ + --pretrained_checkpoint $PRETRAINED_CHECKPOINT \ + --tensor_model_parallel_size 1 \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --micro_batch_size 8 \ + --activations_checkpoint_method uniform \ + --lr 5.0e-5 \ + --lr_decay_style linear \ + --lr_warmup_fraction 0.065 \ + --seq_length 512 \ + --max_position_embeddings 512 \ + --save_interval 500000 \ + --save $CHECKPOINT_PATH \ + --log_interval 10 \ + --eval_interval 100 \ + --eval_iters 50 \ + --weight_decay 1.0e-1 \ + --fp16 diff --git a/multilinguality_megatron/examples/finetune_race_distributed.sh b/multilinguality_megatron/examples/finetune_race_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..5bcd6adb0f242284a2d1bf22e475b6084d22afd2 --- /dev/null +++ b/multilinguality_megatron/examples/finetune_race_distributed.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +WORLD_SIZE=8 + +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +TRAIN_DATA="data/RACE/train/middle" +VALID_DATA="data/RACE/dev/middle \ + data/RACE/dev/high" +VOCAB_FILE=bert-vocab.txt +PRETRAINED_CHECKPOINT=checkpoints/bert_345m +CHECKPOINT_PATH=checkpoints/bert_345m_race + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ + --task RACE \ + --seed 1234 \ + --train_data $TRAIN_DATA \ + --valid_data $VALID_DATA \ + --tokenizer_type BertWordPieceLowerCase \ + --vocab_file $VOCAB_FILE \ + --epochs 3 \ + --pretrained_checkpoint $PRETRAINED_CHECKPOINT \ + --tensor_model_parallel_size 1 \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --micro_batch_size 4 \ + --activations_checkpoint_method uniform \ + --lr 1.0e-5 \ + --lr_decay_style linear \ + --lr_warmup_fraction 0.06 \ + --seq_length 512 \ + --max_position_embeddings 512 \ + --save_interval 100000 \ + --save $CHECKPOINT_PATH \ + --log_interval 10 \ + --eval_interval 100 \ + --eval_iters 50 \ + --weight_decay 1.0e-1 \ + --clip_grad 1.0 \ + --hidden_dropout 0.1 \ + --attention_dropout 0.1 \ + --fp16 diff --git a/multilinguality_megatron/examples/finetune_retriever_distributed.sh b/multilinguality_megatron/examples/finetune_retriever_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f2ccc971ec713781191e5988f83213da8b61142 --- /dev/null +++ b/multilinguality_megatron/examples/finetune_retriever_distributed.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# Finetune a BERT or pretrained ICT model using Google natural question data +# Datasets can be downloaded from the following link: +# https://github.com/facebookresearch/DPR/blob/master/data/download_data.py + +WORLD_SIZE=8 + +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +CHECKPOINT_PATH= + +# Load either of the below +BERT_LOAD_PATH= +PRETRAINED_CHECKPOINT= + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ + --task RET-FINETUNE-NQ \ + --train_with_neg \ + --train_hard_neg 1 \ + --pretrained_checkpoint ${PRETRAINED_CHECKPOINT} \ + --num_layers 12 \ + --hidden_size 768 \ + --num_attention_heads 12 \ + --tensor_model_parallel_size 1 \ + --tokenizer_type BertWordPieceLowerCase \ + --train_data nq-train.json \ + --valid_data nq-dev.json \ + --save ${CHECKPOINT_PATH} \ + --load ${CHECKPOINT_PATH} \ + --vocab_file bert-vocab.txt \ + --bert_load ${BERT_LOAD_PATH} \ + --save_interval 5000 \ + --log_interval 10 \ + --eval_interval 20000 \ + --eval_iters 100 \ + --indexer_log_interval 1000 \ + --faiss_use_gpu \ + --DDP_impl torch \ + --fp16 \ + --retriever_report_topk_accuracies 1 5 10 20 100 \ + --seq_length 512 \ + --retriever_seq_length 256 \ + --max_position_embeddings 512 \ + --retriever_score_scaling \ + --epochs 80 \ + --micro_batch_size 8 \ + --eval_micro_batch_size 16 \ + --indexer_batch_size 128 \ + --lr 2e-5 \ + --lr_warmup_fraction 0.01 \ + --weight_decay 1e-1 diff --git a/multilinguality_megatron/examples/hf_to_megatron.sh b/multilinguality_megatron/examples/hf_to_megatron.sh new file mode 100644 index 0000000000000000000000000000000000000000..2d5ba2fc2d2b4e87db19c292e098f8760a872cf2 --- /dev/null +++ b/multilinguality_megatron/examples/hf_to_megatron.sh @@ -0,0 +1,35 @@ +#! /bin/bash + +# assert correct usage +if [[ $# -ne 2 ]]; then + echo "Usage: $0 <7,13,30,34,40,65,70>" + exit 1 +fi + + +# extract variables +MODEL=$1 +SIZE=$2 + + +# determine cache directory (either raw llama or huggingface cache) +if [[ $MODEL = falcon ]]; then + CACHE=/pure-mlo-scratch/huggingface_cache/ +elif [[ $MODEL = llama ]]; then + CACHE=/pure-mlo-scratch/llama/${SIZE}B/ +elif [[ $MODEL = llama2 ]]; then + CACHE=/pure-mlo-scratch/llama2/llama-2-${SIZE}b/ +elif [[ $MODEL = codellama ]]; then + CACHE=/pure-mlo-scratch/codellama/CodeLlama-${SIZE}b/ +else + echo "Model should be either llama, llama2, codellama or falcon, not $MODEL" + exit 1 +fi + + +# finally call the script +python weights_conversion/hf_to_megatron.py \ + $MODEL \ + --size=$SIZE \ + --out=/pure-mlo-scratch/alhernan/megatron-data/checkpoints/${MODEL}-${SIZE}b/ \ + --cache-dir=$CACHE diff --git a/multilinguality_megatron/examples/merge_mp_bert.sh b/multilinguality_megatron/examples/merge_mp_bert.sh new file mode 100644 index 0000000000000000000000000000000000000000..2f3d5c7d17ae593498130567761f0a89886afbdf --- /dev/null +++ b/multilinguality_megatron/examples/merge_mp_bert.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +TENSOR_MODEL_PARALLEL_SIZE=2 + +VOCAB_FILE=bert-vocab.txt +CHECKPOINT_PATH=checkpoints/bert_345m + +WORLD_SIZE=$TENSOR_MODEL_PARALLEL_SIZE python tools/merge_mp_partitions.py \ + --model_type BERT \ + --tensor_model_parallel_size $TENSOR_MODEL_PARALLEL_SIZE \ + --tokenizer_type BertWordPieceLowerCase \ + --vocab_file $VOCAB_FILE \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --seq_length 512 \ + --max_position_embeddings 512 \ + --load $CHECKPOINT_PATH diff --git a/multilinguality_megatron/examples/msdp/README.md b/multilinguality_megatron/examples/msdp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8ff95099e0d9e005ecf6bf5ec7e85d0b10eb4d23 --- /dev/null +++ b/multilinguality_megatron/examples/msdp/README.md @@ -0,0 +1,5 @@ + +# Multi-Stage Prompting for Knowledgeable Dialogue Generation + +This directory contains all the scripts of multi-stage prompting for knowledgeable dialogue generation that includes data preparation, and knowledge and response generations. More details are available on [`knowledgeable task directory`](../../tasks/msdp). + diff --git a/multilinguality_megatron/examples/msdp/data_processing.sh b/multilinguality_megatron/examples/msdp/data_processing.sh new file mode 100644 index 0000000000000000000000000000000000000000..37a6512a806fd0a141339ea857c73074fced12a9 --- /dev/null +++ b/multilinguality_megatron/examples/msdp/data_processing.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +# Data preparation for our framework: preprocessing the WoW and WoI datasets +# The datasets can be downloaded through the following links: +# WoW: https://parl.ai/projects/wizard_of_wikipedia/ +# WoI: https://parl.ai/projects/sea/ + +DIR=`pwd` +# Before running the preprocessing, please download +# the wizard of wikipedia and wizard datasets +WOW_DATA_FOLDER= +WOI_DATA_FOLDER= + +# We provide examples for processing the raw data from Wizard of Wikipedia +# Processing the train dataset (train.json) +python ${DIR}/tasks/msdp/preprocessing.py \ + --func process_wow_dataset \ + --raw_file ${WOW_DATA_FOLDER}/train.json \ + --processed_file ${WOW_DATA_FOLDER}/train_processed.txt + +# Processing test seen dataset (test_random_split.json) +python ${DIR}/tasks/msdp/preprocessing.py \ + --func process_wow_dataset \ + --raw_file ${WOW_DATA_FOLDER}/test_random_split.json \ + --processed_file ${WOW_DATA_FOLDER}/testseen_processed.txt \ + --knwl_ref_file ${WOW_DATA_FOLDER}/output_testseen_knowledge_reference.txt \ + --resp_ref_file ${WOW_DATA_FOLDER}/output_testseen_response_reference.txt + +# processing test unseen dataset (test_topic_split.json) +python ${DIR}/tasks/msdp/preprocessing.py \ + --func process_wow_dataset \ + --raw_file ${WOW_DATA_FOLDER}/test_topic_split.json \ + --processed_file ${WOW_DATA_FOLDER}/testunseen_processed.txt \ + --knwl_ref_file ${WOW_DATA_FOLDER}/output_testunseen_knowledge_reference.txt \ + --resp_ref_file ${WOW_DATA_FOLDER}/output_testunseen_response_reference.txt + + +# We provide the following script to process the raw data from Wizard of Internet +# Processing the test dataset (test.jsonl) +python ${DIR}/tasks/msdp/preprocessing.py \ + --func process_woi_dataset \ + --raw_file ${WOI_DATA_FOLDER}/test.jsonl \ + --processed_file ${WOI_DATA_FOLDER}/test_processed.txt \ + --knwl_ref_file ${WOI_DATA_FOLDER}/output_test_knowledge_reference.txt \ + --resp_ref_file ${WOI_DATA_FOLDER}/output_test_response_reference.txt + + +# Get the knowledge generation prompts for the each test dataset in WoW and WoI +MODEL_FILE= +# WoW test seen +python ${DIR}/tasks/msdp/preprocessing.py \ + --func get_knwl_gen_prompts \ + --test_file ${WOW_DATA_FOLDER}/testseen_processed.txt \ + --train_file ${WOW_DATA_FOLDER}/train_processed.txt \ + --model_file ${MODEL_FILE} \ + --processed_file ${WOW_DATA_FOLDER}/output_testseen_knowledge_prompts.json \ + --data_type wow_seen + +# WoW test unseen +python ${DIR}/tasks/msdp/preprocessing.py \ + --func get_knwl_gen_prompts \ + --test_file ${WOW_DATA_FOLDER}/testunseen_processed.txt \ + --train_file ${WOW_DATA_FOLDER}/train_processed.txt \ + --model_file ${MODEL_FILE} \ + --processed_file ${WOW_DATA_FOLDER}/output_testunseen_knowledge_prompts.json \ + --data_type wow_unseen + +# WoI +python ${DIR}/tasks/msdp/preprocessing.py \ + --func get_knwl_gen_prompts \ + --test_file ${WOI_DATA_FOLDER}/test_processed.txt \ + --train_file ${WOW_DATA_FOLDER}/train_processed.txt \ + --model_file ${MODEL_FILE} \ + --processed_file ${WOI_DATA_FOLDER}/output_test_knowledge_prompts.json \ + --data_type woi + + +# Get the response generation prompts (can be applied for all the test datasets) +python ${DIR}/tasks/msdp/preprocessing.py \ + --func get_resp_gen_prompts \ + --train_file ${WOW_DATA_FOLDER}/train_processed.txt \ + --processed_file ${WOW_DATA_FOLDER}/output_response_prompts.txt + diff --git a/multilinguality_megatron/examples/msdp/eval_knwl_generation.sh b/multilinguality_megatron/examples/msdp/eval_knwl_generation.sh new file mode 100644 index 0000000000000000000000000000000000000000..ff438f169598f11c72a7fdc0d630989a3e9626f8 --- /dev/null +++ b/multilinguality_megatron/examples/msdp/eval_knwl_generation.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +######################### +# Evaluate the F1 scores. +######################### + +WORLD_SIZE=1 +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +MODEL_GEN_PATH= \ + (e.g., /testseen_knowledge_generations.txt) +GROUND_TRUTH_PATH= \ + (e.g., /testseen_knowledge_reference.txt) + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/msdp/main.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --seq_length 2048 \ + --max_position_embeddings 2048 \ + --micro_batch_size 4 \ + --task MSDP-EVAL-F1 \ + --guess_file ${MODEL_GEN_PATH} \ + --answer_file ${GROUND_TRUTH_PATH} + + +############################################ +# Evaluate BLEU, METEOR, and ROUGE-L scores. +############################################ + +# We follow the nlg-eval (https://github.com/Maluuba/nlg-eval) to +# evaluate the BLEU, METEOR, and ROUGE-L scores. + +# To evaluate on these metrics, please setup the environments based on +# the nlg-eval github, and run the corresponding evaluation commands. + +nlg-eval \ + --hypothesis= \ + --references= diff --git a/multilinguality_megatron/examples/msdp/eval_resp_generation.sh b/multilinguality_megatron/examples/msdp/eval_resp_generation.sh new file mode 100644 index 0000000000000000000000000000000000000000..6d885df3e58ba8f96a43cd1e99542d7b39181460 --- /dev/null +++ b/multilinguality_megatron/examples/msdp/eval_resp_generation.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +######################### +# Evaluate the F1 scores. +######################### + +WORLD_SIZE=1 +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +MODEL_GEN_PATH= \ + (e.g., /testseen_response_generations.txt) +GROUND_TRUTH_PATH= \ + (e.g., /testseen_response_reference.txt) + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/msdp/main.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --seq_length 2048 \ + --max_position_embeddings 2048 \ + --micro_batch_size 4 \ + --task MSDP-EVAL-F1 \ + --guess_file ${MODEL_GEN_PATH} \ + --answer_file ${GROUND_TRUTH_PATH} + + +########################## +# Evaluate the KF1 scores. +########################## + +MODEL_GEN_PATH= \ + (e.g., /testseen_response_generations.txt) +GROUND_TRUTH_PATH= \ + (e.g., /testseen_knowledge_reference.txt) + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/msdp/main.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --seq_length 2048 \ + --max_position_embeddings 2048 \ + --micro_batch_size 4 \ + --task MSDP-EVAL-F1 \ + --guess_file ${MODEL_GEN_PATH} \ + --answer_file ${GROUND_TRUTH_PATH} + + +############################################ +# Evaluate BLEU, METEOR, and ROUGE-L scores. +############################################ + +# We follow the nlg-eval (https://github.com/Maluuba/nlg-eval) to +# evaluate the BLEU, METEOR, and ROUGE-L scores. + +# To evaluate on these metrics, please setup the environments based on +# the nlg-eval github, and run the corresponding evaluation commands. + +nlg-eval \ + --hypothesis= \ + --references= diff --git a/multilinguality_megatron/examples/msdp/prep_resp_gen.sh b/multilinguality_megatron/examples/msdp/prep_resp_gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f202724dddbaa6ada3bcb1c33ec035a3afe44ee --- /dev/null +++ b/multilinguality_megatron/examples/msdp/prep_resp_gen.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Preparing the input file for the response generation (second-stage prompting) + +DIR=`pwd` + +TEST_FILE= \ + (e.g., /testseen_processed.txt) +KNOWLEDGE_FILE= \ + (e.g., /testseen_knowledge_generations.txt) +PROCESSED_FILE= \ + (e.g., /testseen_processed_with_generated_knowledge.txt) + +python ${DIR}/tasks/msdp/preprocessing.py \ + --func prepare_input \ + --test_file ${TEST_FILE} \ + --knwl_gen_file ${KNOWLEDGE_FILE} \ + --processed_file ${PROCESSED_FILE} diff --git a/multilinguality_megatron/examples/msdp/prompt_knwl_gen.sh b/multilinguality_megatron/examples/msdp/prompt_knwl_gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f928f15a4b41960912228a3dd0708ffbc653d89 --- /dev/null +++ b/multilinguality_megatron/examples/msdp/prompt_knwl_gen.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Stage-1: Prompt a pretrained language model to generate the context-relevant knowledge +# The input contains prompts and current dialogue context, the output is the relevant knowledge +# The size of the pretrained language model is 357M + +WORLD_SIZE=8 + +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +CHECKPOINT_PATH= (e.g., /357m) +VOCAB_PATH= (e.g., /gpt2-vocab.json) +MERGE_PATH= (e.g., /gpt2-merges.txt) +INPUT_PATH= \ + (e.g., /testseen_processed.txt) +PROMPT_PATH= \ + (e.g., /testseen_knowledge_prompts.json) +OUTPUT_PATH= \ + (e.g., /testseen_knowledge_generations.txt) + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/msdp/main.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --seq_length 2048 \ + --max_position_embeddings 2048 \ + --micro_batch_size 1 \ + --vocab_file ${VOCAB_PATH} \ + --merge_file ${MERGE_PATH} \ + --load ${CHECKPOINT_PATH} \ + --fp16 \ + --DDP_impl torch \ + --tokenizer_type GPT2BPETokenizer \ + --sample_input_file ${INPUT_PATH} \ + --sample_output_file ${OUTPUT_PATH} \ + --prompt_file ${PROMPT_PATH} \ + --prompt_type knowledge \ + --num_prompt_examples 10 \ + --task MSDP-PROMPT + +# NOTE: If you use api for the model generation, please use +# the "--api_prompt" flag (setting this value as True). diff --git a/multilinguality_megatron/examples/msdp/prompt_resp_gen.sh b/multilinguality_megatron/examples/msdp/prompt_resp_gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..246ae83b9f17bcb9a8d006839e47d3b3949f7b9a --- /dev/null +++ b/multilinguality_megatron/examples/msdp/prompt_resp_gen.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Stage-2: Prompt a pretrained language model to generate the corresponding response +# The input contains prompts, current dialogue context, and generated knowledge in Stage-1 +# The output is the corresponding response. +# The size of the pretrained language model is 357M + +WORLD_SIZE=8 + +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +CHECKPOINT_PATH= (e.g., /357m) +VOCAB_PATH= (e.g., /gpt2-vocab.json) +MERGE_PATH= (e.g., /gpt2-merges.txt) +INPUT_PATH= (e.g., /testseen_processed.txt) +PROMPT_PATH= \ + (e.g., /response_prompts.txt) +OUTPUT_PATH= \ + (e.g., /output_testseen_response_generations.txt) + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/msdp/main.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --seq_length 2048 \ + --max_position_embeddings 2048 \ + --micro_batch_size 1 \ + --vocab_file ${VOCAB_PATH} \ + --merge_file ${MERGE_PATH} \ + --load ${CHECKPOINT_PATH} \ + --fp16 \ + --DDP_impl torch \ + --tokenizer_type GPT2BPETokenizer \ + --sample_input_file ${INPUT_PATH} \ + --sample_output_file ${OUTPUT_PATH} \ + --prompt_file ${PROMPT_PATH} \ + --prompt_type response \ + --num_prompt_examples 20 \ + --task MSDP-PROMPT + +# NOTE: If you use api for the model generation, please use +# the "--api_prompt" flag (setting this value as True). diff --git a/multilinguality_megatron/examples/parallelize.sh b/multilinguality_megatron/examples/parallelize.sh new file mode 100644 index 0000000000000000000000000000000000000000..8292c513cb978b35e04f7e7c510c9ad59d39f003 --- /dev/null +++ b/multilinguality_megatron/examples/parallelize.sh @@ -0,0 +1,39 @@ +#! /bin/bash + +# assert correct usage +if [[ $# -ne 4 ]]; then + echo "Usage: $0 <7,13,30,40,65,70> " + exit 1 +fi + + +# extract variables from command line +MODEL=$1 +SIZE=$2 +TENSOR_PARALLELISM=$3 +PIPELINE_PARALLELISM=$4 + + +# model-specific parameters +EXTRA_ARGS="" +if [[ $MODEL = falcon ]]; then + TRUE_VOCAB_SIZE=65024 +elif [[ $MODEL = llama ]] || [[ $MODEL = llama2 ]]; then + TRUE_VOCAB_SIZE=32017 # 17 new tokens + if (( $SIZE > 60 )); then + EXTRA_ARGS="--bf16" + fi +elif [[ $MODEL = codellama ]]; then + TRUE_VOCAB_SIZE=32033 # 32016 + 17 new tokens +fi + + +# finally call the script +python tools/checkpoint_util.py \ + --target_tensor_parallel_size $TENSOR_PARALLELISM \ + --target_pipeline_parallel_size $PIPELINE_PARALLELISM \ + --load_dir /pure-mlo-scratch/alhernan/megatron-data/checkpoints/${MODEL}-${SIZE}b/ \ + --save_dir /pure-mlo-scratch/alhernan/megatron-data/checkpoints/${MODEL}-${SIZE}b-tp$TENSOR_PARALLELISM-pp$PIPELINE_PARALLELISM/ \ + --model_type $MODEL \ + --true_vocab_size $TRUE_VOCAB_SIZE \ + $EXTRA_ARGS diff --git a/multilinguality_megatron/examples/pretrain_bert.sh b/multilinguality_megatron/examples/pretrain_bert.sh new file mode 100644 index 0000000000000000000000000000000000000000..043c4526606e4f9f32b1054fc3fc64f355ba4f0c --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_bert.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +RANK=0 +WORLD_SIZE=1 +DATA_PATH=_text_sentence +CHECKPOINT_PATH= + +python pretrain_bert.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --micro_batch_size 4 \ + --global_batch_size 8 \ + --seq_length 512 \ + --max_position_embeddings 512 \ + --train_iters 2000000 \ + --lr_decay_iters 990000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file bert-vocab.txt \ + --data_impl mmap \ + --split 949,50,1 \ + --lr 0.0001 \ + --min_lr 0.00001 \ + --lr_decay_style linear \ + --lr_warmup_fraction .01 \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 diff --git a/multilinguality_megatron/examples/pretrain_bert_distributed.sh b/multilinguality_megatron/examples/pretrain_bert_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..1566578775d6e3ed21db881344740da31c2d51ff --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_bert_distributed.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DATA_PATH=_text_sentence +CHECKPOINT_PATH= + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +python -m torch.distributed.launch $DISTRIBUTED_ARGS \ + pretrain_bert.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --micro_batch_size 4 \ + --global_batch_size 32 \ + --seq_length 512 \ + --max_position_embeddings 512 \ + --train_iters 1000000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file bert-vocab.txt \ + --data_impl mmap \ + --split 949,50,1 \ + --distributed_backend nccl \ + --lr 0.0001 \ + --lr_decay_style linear \ + --min_lr 1.0e-5 \ + --lr_decay_iters 990000 \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --lr_warmup_fraction .01 \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 diff --git a/multilinguality_megatron/examples/pretrain_bert_distributed_with_mp.sh b/multilinguality_megatron/examples/pretrain_bert_distributed_with_mp.sh new file mode 100644 index 0000000000000000000000000000000000000000..50e0f48a22c2a345b520be1ba201d86ceeba7380 --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_bert_distributed_with_mp.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DATA_PATH=_text_sentence +VOCAB_FILE= +CHECKPOINT_PATH= + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +python -m torch.distributed.launch $DISTRIBUTED_ARGS \ + pretrain_bert.py \ + --tensor_model_parallel_size 2 \ + --pipeline_model_parallel_size 2 \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --micro_batch_size 2 \ + --global_batch_size 16 \ + --seq_length 512 \ + --max_position_embeddings 512 \ + --train_iters 1000000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file $VOCAB_FILE \ + --data_impl mmap \ + --split 949,50,1 \ + --distributed_backend nccl \ + --lr 0.0001 \ + --lr_decay_style linear \ + --min_lr 1.0e-5 \ + --lr_decay_iters 990000 \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --lr_warmup_fraction .01 \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 diff --git a/multilinguality_megatron/examples/pretrain_gpt.sh b/multilinguality_megatron/examples/pretrain_gpt.sh new file mode 100644 index 0000000000000000000000000000000000000000..18f3c1917987a5da519ee23f88b8dfe7281aa9b3 --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_gpt.sh @@ -0,0 +1,41 @@ +#! /bin/bash + +# Runs the "345M" parameter model + +RANK=0 +WORLD_SIZE=1 + +DATA_PATH=_text_document +CHECKPOINT_PATH= + + +python pretrain_gpt.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --micro_batch_size 4 \ + --global_batch_size 8 \ + --seq_length 1024 \ + --max_position_embeddings 1024 \ + --train_iters 500000 \ + --lr_decay_iters 320000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file gpt2-vocab.json \ + --merge_file gpt2-merges.txt \ + --data_impl mmap \ + --split 949,50,1 \ + --distributed_backend nccl \ + --lr 0.00015 \ + --min_lr 1.0e-5 \ + --lr_decay_style cosine \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --lr_warmup_fraction .01 \ + --activations_checkpoint_method uniform \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 diff --git a/multilinguality_megatron/examples/pretrain_gpt3_175B.sh b/multilinguality_megatron/examples/pretrain_gpt3_175B.sh new file mode 100644 index 0000000000000000000000000000000000000000..628e41a72139b48692ba887dcc1048b62bc20de4 --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_gpt3_175B.sh @@ -0,0 +1,64 @@ +#!/bin/bash + + +#SBATCH --nodes=128 --exclusive --ntasks-per-node=8 --job-name=megatron_gpt3_175b + + +DIR=`pwd` +DATETIME=`date +'date_%y-%m-%d_time_%H-%M-%S'` +mkdir -p $DIR/logs + + +DATASET_1="" +DATASET_2="" +DATASET_3="" +DATASET="0.2 ${DATASET_1} 0.3 ${DATASET_2} 0.5 ${DATASET_3}" + + +options=" \ + --tensor_model_parallel_size 8 \ + --pipeline_model_parallel_size 16 \ + --num_layers 96 \ + --hidden_size 12288 \ + --num_attention_heads 96 \ + --seq_length 2048 \ + --max_position_embeddings 2048 \ + --micro_batch_size 1 \ + --global_batch_size 1536 \ + --rampup_batch_size 16 16 5859375 \ + --train_samples 146484375 \ + --lr_decay_samples 126953125 \ + --lr_warmup_samples 183105 \ + --lr 6.0e-5 \ + --min_lr 6.0e-6 \ + --lr_decay_style cosine \ + --log_interval 10 \ + --eval_iters 40 \ + --eval_interval 1000 \ + --data_path ${DATASET} \ + --vocab_file \ + --merge_file \ + --save_interval 1000 \ + --save \ + --load \ + --split 98,2,0 \ + --clip_grad 1.0 \ + --weight_decay 0.1 \ + --adam_beta1 0.9 \ + --adam_beta2 0.95 \ + --init_method_std 0.006 \ + --tensorboard_dir \ + --fp16 \ + --activations_checkpoint_method uniform " + +run_cmd="python -u ${DIR}/pretrain_gpt.py $@ ${options}" + + +srun -l \ + --container-image "nvcr.io/nvidia/pytorch:20.12-py3" \ + --container-mounts "" \ + --output=$DIR/logs/%x_%j_$DATETIME.log sh -c "${run_cmd}" + + +set +x + diff --git a/multilinguality_megatron/examples/pretrain_gpt_distributed.sh b/multilinguality_megatron/examples/pretrain_gpt_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..f98a97f8b96086f207dc66b87bab4731002fda58 --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_gpt_distributed.sh @@ -0,0 +1,48 @@ +#! /bin/bash + +# Runs the "345M" parameter model + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DATA_PATH=_text_document +CHECKPOINT_PATH= + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +python -m torch.distributed.launch $DISTRIBUTED_ARGS \ + pretrain_gpt.py \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --micro_batch_size 8 \ + --global_batch_size 64 \ + --seq_length 1024 \ + --max_position_embeddings 1024 \ + --train_iters 500000 \ + --lr_decay_iters 320000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file gpt2-vocab.json \ + --merge_file gpt2-merges.txt \ + --data_impl mmap \ + --split 949,50,1 \ + --distributed_backend nccl \ + --lr 0.00015 \ + --lr_decay_style cosine \ + --min_lr 1.0e-5 \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --lr_warmup_fraction .01 \ + --activations_checkpoint_method uniform \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 diff --git a/multilinguality_megatron/examples/pretrain_gpt_distributed_with_mp.sh b/multilinguality_megatron/examples/pretrain_gpt_distributed_with_mp.sh new file mode 100644 index 0000000000000000000000000000000000000000..d3a9a677a18bd059bd0bbca73752c2653261fa4b --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_gpt_distributed_with_mp.sh @@ -0,0 +1,51 @@ +#! /bin/bash + +# Runs the "345M" parameter model + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DATA_PATH=_text_document +CHECKPOINT_PATH= + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +python -m torch.distributed.launch $DISTRIBUTED_ARGS \ + pretrain_gpt.py \ + --tensor_model_parallel_size 2 \ + --pipeline_model_parallel_size 2 \ + --sequence_parallel \ + --num_layers 24 \ + --hidden_size 1024 \ + --num_attention_heads 16 \ + --micro_batch_size 4 \ + --global_batch_size 16 \ + --seq_length 1024 \ + --max_position_embeddings 1024 \ + --train_iters 500000 \ + --lr_decay_iters 320000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file gpt2-vocab.json \ + --merge_file gpt2-merges.txt \ + --data_impl mmap \ + --split 949,50,1 \ + --distributed_backend nccl \ + --lr 0.00015 \ + --lr_decay_style cosine \ + --min_lr 1.0e-5 \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --lr_warmup_fraction .01 \ + --activations_checkpoint_method uniform \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 diff --git a/multilinguality_megatron/examples/pretrain_ict.sh b/multilinguality_megatron/examples/pretrain_ict.sh new file mode 100644 index 0000000000000000000000000000000000000000..d7f56cd88f37db89a32c044793e5f922ff365b11 --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_ict.sh @@ -0,0 +1,44 @@ +#! /bin/bash + +# Runs the "217M" parameter biencoder model for ICT retriever + +RANK=0 +WORLD_SIZE=1 + +PRETRAINED_BERT_PATH= +TEXT_DATA_PATH= +TITLE_DATA_PATH= +CHECKPOINT_PATH= + + +python pretrain_ict.py \ + --num_layers 12 \ + --hidden_size 768 \ + --num_attention_heads 12 \ + --tensor_model_parallel_size 1 \ + --micro_batch_size 32 \ + --seq_length 256 \ + --max_position_embeddings 512 \ + --train_iters 100000 \ + --vocab_file bert-vocab.txt \ + --tokenizer_type BertWordPieceLowerCase \ + --DDP_impl torch \ + --bert_load ${PRETRAINED_BERT_PATH} \ + --log_interval 100 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --retriever_report_topk_accuracies 1 5 10 20 100 \ + --retriever_score_scaling \ + --load $CHECKPOINT_PATH \ + --save $CHECKPOINT_PATH \ + --data_path ${TEXT_DATA_PATH} \ + --titles_data_path ${TITLE_DATA_PATH} \ + --lr 0.0001 \ + --lr_decay_style linear \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --lr_warmup_fraction 0.01 \ + --save_interval 4000 \ + --exit_interval 8000 \ + --query_in_block_prob 0.1 \ + --fp16 diff --git a/multilinguality_megatron/examples/pretrain_t5.sh b/multilinguality_megatron/examples/pretrain_t5.sh new file mode 100644 index 0000000000000000000000000000000000000000..4761833d153be4dd08105e1266bbee5ea7fa41b5 --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_t5.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +RANK=0 +WORLD_SIZE=1 +DATA_PATH= +VOCAB_FILE= +CHECKPOINT_PATH= + +python pretrain_t5.py \ + --num_layers 12 \ + --hidden_size 768 \ + --num_attention_heads 12 \ + --kv_channels 64 \ + --ffn_hidden_size 3072 \ + --encoder_seq_length 512 \ + --decoder_seq_length 128 \ + --micro_batch_size 16 \ + --global_batch_size 16 \ + --max_position_embeddings 512 \ + --train_iters 1000000 \ + --lr_decay_iters 1000000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file $VOCAB_FILE \ + --data_impl mmap \ + --split 949,50,1 \ + --lr 0.0001 \ + --min_lr 0.00001 \ + --lr_decay_style linear \ + --lr_warmup_fraction .01 \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 \ + --vocab_extra_ids 100 diff --git a/multilinguality_megatron/examples/pretrain_t5_distributed.sh b/multilinguality_megatron/examples/pretrain_t5_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..d9f3ab3b8962938a3b6cddf817e05f58ff355cde --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_t5_distributed.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DATA_PATH= +VOCAB_FILE= +CHECKPOINT_PATH= + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +python -m torch.distributed.launch $DISTRIBUTED_ARGS \ + pretrain_t5.py \ + --num_layers 12 \ + --hidden_size 768 \ + --num_attention_heads 12 \ + --kv_channels 64 \ + --ffn_hidden_size 3072 \ + --encoder_seq_length 512 \ + --decoder_seq_length 128 \ + --micro_batch_size 16 \ + --global_batch_size 128 \ + --max_position_embeddings 512 \ + --train_iters 1000000 \ + --lr_decay_iters 1000000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file $VOCAB_FILE \ + --data_impl mmap \ + --split 949,50,1 \ + --lr 0.0001 \ + --min_lr 0.00001 \ + --lr_decay_style linear \ + --lr_warmup_fraction .01 \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 \ + --vocab_extra_ids 100 diff --git a/multilinguality_megatron/examples/pretrain_t5_distributed_with_mp.sh b/multilinguality_megatron/examples/pretrain_t5_distributed_with_mp.sh new file mode 100644 index 0000000000000000000000000000000000000000..398fe177655b3a356b2c290b14efd337b47f283d --- /dev/null +++ b/multilinguality_megatron/examples/pretrain_t5_distributed_with_mp.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DATA_PATH= +CHECKPOINT_PATH= + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +python -m torch.distributed.launch $DISTRIBUTED_ARGS \ + pretrain_t5.py \ + --tensor-model-parallel-size 2 \ + --num_layers 12 \ + --hidden_size 768 \ + --num_attention_heads 12 \ + --kv_channels 64 \ + --ffn_hidden_size 3072 \ + --encoder_seq_length 512 \ + --decoder_seq_length 128 \ + --micro_batch_size 16 \ + --global_batch_size 128 \ + --max_position_embeddings 512 \ + --train_iters 1000000 \ + --lr_decay_iters 1000000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data_path $DATA_PATH \ + --vocab_file t5-vocab.txt \ + --data_impl mmap \ + --split 949,50,1 \ + --lr 0.0001 \ + --min_lr 0.00001 \ + --lr_decay_style linear \ + --lr_warmup_fraction .01 \ + --weight_decay 1e-2 \ + --clip_grad 1.0 \ + --log_interval 100 \ + --save_interval 10000 \ + --eval_interval 1000 \ + --eval_iters 10 \ + --fp16 \ + --vocab_extra_ids 100 diff --git a/multilinguality_megatron/examples/run_text_generation_server_345M.sh b/multilinguality_megatron/examples/run_text_generation_server_345M.sh new file mode 100644 index 0000000000000000000000000000000000000000..f9b7c4cc6964e4378ee54c911e6ee5ab567b945d --- /dev/null +++ b/multilinguality_megatron/examples/run_text_generation_server_345M.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# This example will start serving the 345M model. +DISTRIBUTED_ARGS="--nproc_per_node 1 \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +CHECKPOINT= +VOCAB_FILE= +MERGE_FILE= + +pip install flask-restful + +python -m torch.distributed.run $DISTRIBUTED_ARGS tools/run_text_generation_server.py \ + --tensor_model_parallel_size 1 \ + --pipeline_model_parallel_size 1 \ + --num_layers 24 \ + --hidden_size 1024 \ + --load ${CHECKPOINT} \ + --num_attention_heads 16 \ + --max_position_embeddings 1024 \ + --tokenizer_type GPT2BPETokenizer \ + --fp16 \ + --micro_batch_size 1 \ + --seq_length 1024 \ + --out_seq_length 1024 \ + --temperature 1.0 \ + --vocab_file $VOCAB_FILE \ + --merge_file $MERGE_FILE \ + --top_p 0.9 \ + --seed 42 diff --git a/multilinguality_megatron/examples/run_text_generation_server_345M_8_tensor_parallel.sh b/multilinguality_megatron/examples/run_text_generation_server_345M_8_tensor_parallel.sh new file mode 100644 index 0000000000000000000000000000000000000000..6ef0a8dc4a712671d6dc205b18875f2c96445c90 --- /dev/null +++ b/multilinguality_megatron/examples/run_text_generation_server_345M_8_tensor_parallel.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# This example will start serving the 345M model that is partitioned 8 way tensor parallel +DISTRIBUTED_ARGS="--nproc_per_node 8 \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +CHECKPOINT= +VOCAB_FILE= +MERGE_FILE= + +pip install flask-restful + +python -m torch.distributed.launch $DISTRIBUTED_ARGS tools/run_text_generation_server.py \ + --tensor_model_parallel_size 8 \ + --pipeline_model_parallel_size 1 \ + --num_layers 24 \ + --hidden_size 1024 \ + --load ${CHECKPOINT} \ + --num_attention_heads 16 \ + --max_position_embeddings 1024 \ + --tokenizer_type GPT2BPETokenizer \ + --fp16 \ + --micro_batch_size 1 \ + --seq_length 1024 \ + --out_seq_length 1024 \ + --temperature 1.0 \ + --vocab_file $VOCAB_FILE \ + --merge_file $MERGE_FILE \ + --top_p 0.9 \ + --seed 42 diff --git a/multilinguality_megatron/examples/sc21/CONFIG.sh b/multilinguality_megatron/examples/sc21/CONFIG.sh new file mode 100644 index 0000000000000000000000000000000000000000..81dfc2ef4f97ece9bff032604f4171d9c39ff311 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/CONFIG.sh @@ -0,0 +1,57 @@ +#!/bin/bash + + +# SLURM options. +export SLURM_PARTITION= +export SLURM_ACCOUNT= + + +# Source code. +export MEGATRON_CODE_DIR= + + +# This variable is used to mount the relevant part of the filesystem +# inside the docker container. Note that the `MEGATRON_CODE_DIR` and the +# launch directory already get mounted; this variable should be used to +# mount the directories that contain the data and tokenizer files. +export DOCKER_MOUNT_DIR= + + +# Data and tokenizer files. +MEGATRON_DATA= +BPE_VOCAB_FILE= +BPE_MERGE_FILE= + + +# Megatron input parameters. +# `MEGATRON_EXTRA_PARAMS` can be used to provide any extra parameters +# that are not listed here. +export MEGATRON_PARAMS=" ${MEGATRON_EXTRA_PARAMS} \ + --tensor_model_parallel_size ${TP} \ + --pipeline_model_parallel_size ${PP} \ + --micro_batch_size ${MBS} \ + --global_batch_size ${GBS} \ + --num_layers ${NLS} \ + --hidden_size ${HS} \ + --num_attention_heads ${NAH} \ + --DDP_impl ${DDP} \ + --data_path ${MEGATRON_DATA} \ + --vocab_file ${BPE_VOCAB_FILE} \ + --merge_file ${BPE_MERGE_FILE} \ + --log_interval 5 \ + --seq_length 2048 \ + --max_position_embeddings 2048 \ + --train_iters 500 \ + --lr_decay_iters 320 \ + --lr 0.0001 \ + --min_lr 0.00001 \ + --lr_decay_style cosine \ + --lr_warmup_fraction 0.01 \ + --split 969,30,1 \ + --eval_iters 100 \ + --eval_interval 1000 \ + --clip_grad 1.0 \ + --fp16 \ + --loss_scale 8192 " + + diff --git a/multilinguality_megatron/examples/sc21/README.md b/multilinguality_megatron/examples/sc21/README.md new file mode 100644 index 0000000000000000000000000000000000000000..940c37903ef063613e3d247b489ba2d186bbea4d --- /dev/null +++ b/multilinguality_megatron/examples/sc21/README.md @@ -0,0 +1,45 @@ +# Reproducing Figures in SC21 Paper + + +This directory contains some of the scripts that were used to produce the +results in the [Megatron paper](https://arxiv.org/pdf/2104.04473.pdf) that is +to appear at [SuperComputing 2021](https://sc21.supercomputing.org/). These +scripts use [Slurm](https://slurm.schedmd.com/documentation.html) with the +[pyxis plugin](https://github.com/NVIDIA/pyxis), but can be modified for other +schedulers as well. + + +## Setup + +All the cluster-dependent variables are in [`CONFIG.sh`](./CONFIG.sh). Please +update the unspecified values (in angle brackets `<...>`) before launching any +scripts. + + + +## Scripts + +Below is a list of scripts that can be used to reproduce various figures in our +[paper](https://arxiv.org/pdf/2104.04473.pdf): + +* [run_table_1.sh](./run_table_1.sh): Table 1 showing weak-scaling throughput +for GPT models ranging from 1 billion to 1 trillion parameters. +* [run_figure_11.sh](./run_figure_11.sh): Figure 11 showing the weak-scaling +performance of pipeline parallelism. +* [run_figure_12.sh](./run_figure_12.sh): Figure 12 showing the effect of +the interleaved schedule on a 175B GPT model. +* [run_figure_13.sh](./run_figure_13.sh): Figure 13 showing the effect of +different degrees of pipeline and tensor model parallelism on a model with +162.2 billion parameters. +* [run_figure_14.sh](./run_figure_14.sh): Figure 14 showing the effect of +different degrees of data and pipeline model parallelism on a model with +5.9 billion parameters. +* [run_figure_15.sh](./run_figure_15.sh): Figure 15 showing the effect of +different degrees of data and tensor model parallelism on a model with +5.9 billion parameters. +* [run_figure_16.sh](./run_figure_16.sh): Figure 16 showing the effect of +microbatch size. +* [run_figure_17.sh](./run_figure_17.sh): Figure 17 showing the effect of +activation recomputation. +* [run_figure_18.sh](./run_figure_18.sh): Figure 18 showing the effect of +the scatter-gather communication optimization. diff --git a/multilinguality_megatron/examples/sc21/SBATCH.sh b/multilinguality_megatron/examples/sc21/SBATCH.sh new file mode 100644 index 0000000000000000000000000000000000000000..95431b9b7e780bbdd4b18593546356aad02945b1 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/SBATCH.sh @@ -0,0 +1,13 @@ +#!/bin/bash + + +sbatch -p ${SLURM_PARTITION} \ + -A ${SLURM_ACCOUNT} \ + --job-name=${JOB_NAME} \ + --nodes=${NNODES} \ + --export=MEGATRON_CODE_DIR,MEGATRON_PARAMS,DOCKER_MOUNT_DIR SRUN.sh + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/SRUN.sh b/multilinguality_megatron/examples/sc21/SRUN.sh new file mode 100644 index 0000000000000000000000000000000000000000..52a9aff0c1294acb1e5527faad4f73fe5e027e21 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/SRUN.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +#SBATCH -t 0:30:00 --exclusive --mem=0 --overcommit --ntasks-per-node=8 + + +THIS_DIR=`pwd` +DATETIME=`date +'date_%y-%m-%d_time_%H-%M-%S'` +mkdir -p ${THIS_DIR}/logs + + +CMD="python -u ${MEGATRON_CODE_DIR}/pretrain_gpt.py ${MEGATRON_PARAMS}" + + +srun -l \ + --container-image "nvcr.io#nvidia/pytorch:20.12-py3" \ + --container-mounts "${THIS_DIR}:${THIS_DIR},${MEGATRON_CODE_DIR}:${MEGATRON_CODE_DIR},${DOCKER_MOUNT_DIR}:${DOCKER_MOUNT_DIR}" \ + --output=${THIS_DIR}/logs/%x_%j_$DATETIME.log sh -c "${CMD}" + diff --git a/multilinguality_megatron/examples/sc21/run_figure_11.sh b/multilinguality_megatron/examples/sc21/run_figure_11.sh new file mode 100644 index 0000000000000000000000000000000000000000..2ec7d9eb31e50e01e3d5dab6978a71deffd247aa --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_figure_11.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ + +# Pipeline-parallel size options = [1, 2, 4, 8]. +PP=1 + +# Batch size (global batch size) options = [8, 128]. +GBS=8 + + + + + +# Set pipeline-parallel size options. +NLS=$((3*PP)) +NNODES=${PP} + + +# Other params. +TP=8 +MBS=1 +HS=20480 +NAH=128 +DDP=local +MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " + + +# Name of the job. +export JOB_NAME=results_figure_11_pipeline_parallel_size_${PP}_batch_size_${GBS} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/run_figure_12.sh b/multilinguality_megatron/examples/sc21/run_figure_12.sh new file mode 100644 index 0000000000000000000000000000000000000000..a4b0ee12e04c41a00ca8cbfb7f2178c066bf69b7 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_figure_12.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ + +# Interleaved schedule options = [YES, NO]. +INTERLEAVED=YES + +# Batch size (global batch size) options = [12, 24, 36, ..., 60]. +GBS=12 + + + + + +# Set interleaved schedule options. +if [ ${INTERLEAVED} == "YES" ]; then + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform --num_layers_per_virtual_pipeline_stage 2 " +elif [ ${INTERLEAVED} == "NO" ]; then + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +else + echo "Invalid configuration" + exit 1 +fi + + +# Other params. +TP=8 +PP=12 +MBS=1 +NLS=96 +HS=12288 +NAH=96 +DDP=local +NNODES=12 + + +# Name of the job. +export JOB_NAME=results_figure_12_interleaved_${INTERLEAVED}_batch_size_${GBS} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/run_figure_13.sh b/multilinguality_megatron/examples/sc21/run_figure_13.sh new file mode 100644 index 0000000000000000000000000000000000000000..7ba560e87b253fb63192866d3089c3d967f086e6 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_figure_13.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ + +# Pipeline-parallel size options = [2, 4, 8, 16, 32]. +PP=2 + +# Batch size (global batch size) options = [32, 128]. +GBS=32 + + + + + +# Set pipeline-parallel and tensor-parallel size options. +TP=$((64/PP)) + + +# Other params. +MBS=1 +NLS=32 +HS=20480 +NAH=128 +DDP=local +MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +NNODES=8 + + +# Name of the job. +export JOB_NAME=results_figure_13_pipeline_parallel_size_${PP}_tensor_parallel_size_${TP}_batch_size_${GBS} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/run_figure_14.sh b/multilinguality_megatron/examples/sc21/run_figure_14.sh new file mode 100644 index 0000000000000000000000000000000000000000..4b83879c4bb71546a7fb5bac365491efd96d3049 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_figure_14.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ + +# Pipeline-parallel size options = [2, 4, 8, 16, 32]. +PP=2 + +# Batch size (global batch size) options = [32, 512]. +GBS=32 + + + + + +# Set pipeline-parallel and data-parallel size options. +DP=$((64/PP)) + + +# Other params. +TP=1 +MBS=1 +NLS=32 +HS=3840 +NAH=32 +DDP=local +MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +NNODES=8 + + +# Name of the job. +export JOB_NAME=results_figure_14_pipeline_parallel_size_${PP}_data_parallel_size_${DP}_batch_size_${GBS} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/run_figure_15.sh b/multilinguality_megatron/examples/sc21/run_figure_15.sh new file mode 100644 index 0000000000000000000000000000000000000000..547ad1de6fb091ca5f922e2b48559ceadffa7ce8 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_figure_15.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ + +# Tensor-parallel size options = [2, 4, 8, 16, 32]. +TP=2 + +# Batch size (global batch size) options = [32, 128, 512]. +GBS=32 + + + + + +# Set tensor-parallel and data-parallel size options. +DP=$((64/TP)) + + +# Other params. +PP=1 +MBS=1 +NLS=32 +HS=3840 +NAH=32 +DDP=local +MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +NNODES=8 + + +# Name of the job. +export JOB_NAME=results_figure_15_tensor_parallel_size_${TP}_data_parallel_size_${DP}_batch_size_${GBS} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/run_figure_16.sh b/multilinguality_megatron/examples/sc21/run_figure_16.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c353a3e7623262baf9dc6c24554e9ab4dce26e7 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_figure_16.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ + +# Microbatch size options = [1, 2, 4, 8]. +MBS=1 + +# Batch size (global batch size) options = [128, 512]. +GBS=128 + + + + + +# Other params. +TP=8 +PP=8 +NLS=32 +HS=15360 +NAH=128 +DDP=local +MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +NNODES=8 + + +# Name of the job. +export JOB_NAME=results_figure_16_microbatch_size_${MBS}_batch_size_${GBS} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/run_figure_17.sh b/multilinguality_megatron/examples/sc21/run_figure_17.sh new file mode 100644 index 0000000000000000000000000000000000000000..d6899b321d6c11238af3b12da3690c8c3d46be34 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_figure_17.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ + +# Activation recomputation options = [YES, NO]. +ACTIVATION_RECOMPUTATION=YES + +# Batch size (global batch size) options = [1, 2, 4, ..., 256]. +GBS=1 + + + + + +# Set activation recomputation. +if [ ${ACTIVATION_RECOMPUTATION} == "YES" ]; then + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +elif [ ${ACTIVATION_RECOMPUTATION} == "NO" ]; then + MEGATRON_EXTRA_PARAMS="" +else + echo "Invalid configuration" + exit 1 +fi + + +# Other params. +TP=8 +PP=16 +MBS=1 +NLS=80 +HS=12288 +NAH=96 +DDP=local +NNODES=16 + + +# Name of the job. +export JOB_NAME=results_figure_17_activation_recomputation_${ACTIVATION_RECOMPUTATION}_batch_size_${GBS} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/run_figure_18.sh b/multilinguality_megatron/examples/sc21/run_figure_18.sh new file mode 100644 index 0000000000000000000000000000000000000000..0d4d7d1cbe3b53b978358d5a299db180754352b7 --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_figure_18.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ + +# Scatter-gather communication optimization options = [YES, NO]. +SCATTER_GATHER=YES + +# Batch size (global batch size) options = [12, 24, 36, ..., 60]. +GBS=12 + + + + + +# Set scatter-gather communication optimization options. +if [ ${SCATTER_GATHER} == "YES" ]; then + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform --num_layers_per_virtual_pipeline_stage 2 " +elif [ ${SCATTER_GATHER} == "NO" ]; then + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform --num_layers_per_virtual_pipeline_stage 2 --no_scatter_gather_tensors_in_pipeline " +else + echo "Invalid configuration" + exit 1 +fi + + +# Other params. +TP=8 +PP=12 +MBS=1 +NLS=96 +HS=12288 +NAH=96 +DDP=local +NNODES=12 + + +# Name of the job. +export JOB_NAME=results_figure_18_scatter_gather_${SCATTER_GATHER}_batch_size_${GBS} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/sc21/run_table_1.sh b/multilinguality_megatron/examples/sc21/run_table_1.sh new file mode 100644 index 0000000000000000000000000000000000000000..0fa3a5b018e7e09dd06db5a8c4a34830f14e16ab --- /dev/null +++ b/multilinguality_megatron/examples/sc21/run_table_1.sh @@ -0,0 +1,145 @@ +#!/bin/bash + +# ================================ +# Choose the case to run. +# ================================ +# model size options = [1.7B, 3.6B, 7.5B, 18B, 39B, 76B, 145B, 310B, 530B, 1T] +MODEL_SIZE=1.7B + + + + + + +if [ ${MODEL_SIZE} == "1.7B" ]; then + TP=1 + PP=1 + MBS=16 + GBS=512 + NLS=24 + HS=2304 + NAH=24 + DDP=torch + NNODES=4 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +elif [ ${MODEL_SIZE} == "3.6B" ]; then + TP=2 + PP=1 + MBS=16 + GBS=512 + NLS=30 + HS=3072 + NAH=32 + DDP=torch + NNODES=8 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +elif [ ${MODEL_SIZE} == "7.5B" ]; then + TP=4 + PP=1 + MBS=16 + GBS=512 + NLS=36 + HS=4096 + NAH=32 + DDP=torch + NNODES=16 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +elif [ ${MODEL_SIZE} == "18B" ]; then + TP=8 + PP=1 + MBS=8 + GBS=1024 + NLS=40 + HS=6144 + NAH=48 + DDP=torch + NNODES=32 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +elif [ ${MODEL_SIZE} == "39B" ]; then + TP=8 + PP=2 + MBS=4 + GBS=1536 + NLS=48 + HS=8192 + NAH=64 + DDP=local + NNODES=64 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +elif [ ${MODEL_SIZE} == "76B" ]; then + TP=8 + PP=4 + MBS=2 + GBS=1792 + NLS=60 + HS=10240 + NAH=80 + DDP=local + NNODES=128 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform --num_layers_per_virtual_pipeline_stage 5" +elif [ ${MODEL_SIZE} == "145B" ]; then + TP=8 + PP=8 + MBS=2 + GBS=2304 + NLS=80 + HS=12288 + NAH=96 + DDP=local + NNODES=192 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform --num_layers_per_virtual_pipeline_stage 5 " +elif [ ${MODEL_SIZE} == "310B" ]; then + TP=8 + PP=16 + MBS=1 + GBS=2160 + NLS=96 + HS=16384 + NAH=128 + DDP=local + NNODES=240 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform --num_layers_per_virtual_pipeline_stage 3 " +elif [ ${MODEL_SIZE} == "530B" ]; then + TP=8 + PP=35 + MBS=1 + GBS=2520 + NLS=105 + HS=20480 + NAH=128 + DDP=local + NNODES=315 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform --num_layers_per_virtual_pipeline_stage 1 " +elif [ ${MODEL_SIZE} == "1T" ]; then + TP=8 + PP=64 + MBS=1 + GBS=3072 + NLS=128 + HS=25600 + NAH=160 + DDP=local + NNODES=384 + MEGATRON_EXTRA_PARAMS="--activations-checkpoint-method uniform " +else + echo "Invalid configuration" + exit 1 +fi + + +# Name of the job +export JOB_NAME=results_table_1_model_size_${MODEL_SIZE} + + +# Import the configs. +. `pwd`/CONFIG.sh + + +# Submit the job. +. `pwd`/SBATCH.sh + + +exit 0 + + + diff --git a/multilinguality_megatron/examples/verify.sh b/multilinguality_megatron/examples/verify.sh new file mode 100644 index 0000000000000000000000000000000000000000..3198c1c8dcf45866f5c3e1ca7a88c4420f4d74f6 --- /dev/null +++ b/multilinguality_megatron/examples/verify.sh @@ -0,0 +1,53 @@ +#! /bin/bash + +# assert correct usage +if [[ $# -ne 2 ]]; then + echo "Usage: $0 <7,13,30,34,40,65,70>" + exit 1 +fi + + +# extract variables from command line +MODEL=$1 +SIZE=$2 + + +# based on the model, determine args +if [[ $MODEL = falcon ]]; then + DATA_PATH=/pure-mlo-scratch/pagliard/data/wikitext-falcon/wiki-train_text_document + CACHE=/pure-mlo-scratch/alhernan/huggingface_cache/ + TOKENIZER=FalconTokenizer + EXTRA_ARGS="" +elif [[ $MODEL = llama ]] || [[ $MODEL = llama2 ]]; then + DATA_PATH=/pure-mlo-scratch/alhernan/data/wikitext-llama-32000/wiki-train_text_document + TOKENIZER=SentencePieceTokenizer + EXTRA_ARGS="--vocab_file=/pure-mlo-scratch/llama/tokenizer.model --no_new_tokens --use_rms_norm + --glu_activation swiglu --no_tie_embed_logits" + if [[ $MODEL = llama ]]; then + CACHE=/pure-mlo-scratch/llama/converted_HF_${SIZE}B/ + EXTRA_ARGS="$EXTRA_ARGS --layernorm_epsilon 1e-6" + else + CACHE=/pure-mlo-scratch/alhernan/llama2/llama-2-${SIZE}b/ + EXTRA_ARGS="$EXTRA_ARGS --layernorm_epsilon 1e-5" + fi +else + echo "Model should be either llama, llama2 or falcon, not $MODEL" + exit 1 +fi +COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_dropout_fusion + --no_bias_gelu_fusion --use_flash_attn" + + +# finally call the script +DISTRIBUTED_ARGS="--nproc_per_node 1 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000" +torchrun $DISTRIBUTED_ARGS verify_correctness.py \ + --model_name $MODEL \ + --load /pure-mlo-scratch/alhernan/megatron-data/checkpoints/${MODEL}-${SIZE}b/ \ + --data_path $DATA_PATH \ + --huggingface_cache $CACHE \ + --huggingface_device "cuda:1" \ + --tokenizer_type $TOKENIZER \ + --model_size $SIZE \ + --bf16 \ + $COMMON_ARGS \ + $EXTRA_ARGS diff --git a/multilinguality_megatron/finetune.py b/multilinguality_megatron/finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..d479ee5ee4973dfd41c6a19277f8fc7c8fc6c0b1 --- /dev/null +++ b/multilinguality_megatron/finetune.py @@ -0,0 +1,307 @@ +"""Fine-tune gpt, llama or falcon""" + +import datetime as dt +from functools import partial + +import torch + +from megatron import get_args, get_counters, get_timers, get_tokenizer, print_rank_0 +from megatron.core import tensor_parallel +from megatron.core.parallel_state import get_data_parallel_group +from megatron.data.gpt_dataset import ( + build_train_valid_test_datasets as gpt_build_datasets, +) +from megatron.data.instruction_dataset import ( + build_train_valid_test_datasets as instruct_build_datasets, +) +from megatron.data.instruction_dataset import instruction_collator +from megatron.initialize import initialize_megatron +from megatron.metrics import MetricInput, get_metric +from megatron.model import ( + FalconModel, + GPTModel, + LlamaModel, + MistralModel, + ModelType, + GemmaModel, +) +from megatron.training import pretrain +from megatron.utils import ( + average_losses_across_data_parallel_group, + get_ltor_masks_and_position_ids, +) + +## +# Model provider utilities +## + + +def model_provider(pre_process: bool = True, post_process: bool = True): + """Build the model.""" + + print_rank_0("Building model ...") + + args = get_args() + if args.model_name == "gpt": + cls = GPTModel + elif args.model_name == "falcon": + cls = FalconModel + elif args.model_name in {"llama", "llama2", "llama3", "codellama"}: + cls = partial(LlamaModel, version=1 if args.model_name == "llama" else 2) + elif args.model_name == "gemma": + cls = GemmaModel + elif args.model_name == "mistral": + cls = MistralModel + if args.sliding_window_size != 4096: + print_rank_0( + "Mistral uses sliding window attention (set sliding_window=4096)" + ) + args.sliding_window_size = 4096 + else: + raise KeyError(f"Unkown model {args.model_name}") + + if isinstance(args.model_type, ModelType): + model_type = args.model_type + elif args.model_type == "encoder_or_decoder": + model_type = ModelType.encoder_or_decoder + elif args.model_type == "encoder_and_decoder": + model_type = ModelType.encoder_and_decoder + else: + raise KeyError(f"Unsupported model_type {args.model_type}") + + model = cls( + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + model_type=model_type, + ) + return model + + +## +# Dataset utilities +## + + +# Heavily inspired by Andreas Köpf: https://github.com/andreaskoepf/epfl-megatron/tree/local_changes/ +def get_attention_mask_and_position_ids(data, attention_mask): + """Build causal attention masks and position id for left to right model. + Builds a (batch, 1, seq, seq)-sized binary causal attention mask from + a (batch, seq)-sized attention mask specifying. + If any value in the input attention_mask is < 0.5, the output + attention mask will mask this position for every token, i.e. out[i, 0, :, j] = True + if in[i, j] < 0.5. + Returns attention_mask, position_ids""" + + # Extract batch size and sequence length. + micro_batch_size, seq_length = data.size() + + # Attention mask (lower triangular). + att_mask_batch = micro_batch_size + attention_mask = ( + attention_mask.unsqueeze(1) + .expand(micro_batch_size, seq_length, seq_length) + .to(data.device) + ) + attention_mask = torch.tril(attention_mask).view( + att_mask_batch, 1, seq_length, seq_length + ) + + # Convert attention mask to binary, True entries will masked + attention_mask = attention_mask < 0.5 + + # Position ids. + position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device) + position_ids = position_ids.unsqueeze(0).expand_as(data) + + return attention_mask, position_ids + + +def get_batch(data_iterator): + """Generate a batch""" + args = get_args() + tokenizer = get_tokenizer() + + # Items and their type. + datatype = torch.int64 + if args.data_type == "gpt": + keys = ["text"] + elif args.data_type == "instruction": + keys = ["text", "attention_mask", "assistant_mask", "pad_mask"] + else: + raise KeyError(f"Unknown dataset type {args.data_type}") + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + tokens = data_b["text"] + labels = tokens[:, 1:].contiguous() + tokens = tokens[:, :-1].contiguous() + + # Update tokens counter. + counters = get_counters() + n_tokens = torch.tensor(tokens.numel(), device=tokens.device) + if args.data_parallel_size == 1: + n_tokens = n_tokens.item() + else: + group = get_data_parallel_group() + torch.distributed.all_reduce( + n_tokens, op=torch.distributed.ReduceOp.SUM, group=group + ) + n_tokens = n_tokens.item() + counters["tokens"] += n_tokens + + if args.data_type == "gpt": + # Get the masks and position ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, + tokenizer.eod, + args.reset_position_ids, + args.reset_attention_mask, + args.eod_mask_loss, + ) + + return tokens, labels, loss_mask, attention_mask, position_ids + + # Instruction dataset. + # Heavily inspired by Andreas Köpf: https://github.com/andreaskoepf/epfl-megatron/tree/local_changes/ + attention_mask = data_b["attention_mask"][:, :-1] + assistant_mask = data_b["assistant_mask"][:, 1:].to(tokens.device) + pad_mask = data_b["pad_mask"][:, 1:].to(tokens.device) + loss_mask = torch.full( + labels.size(), args.scalar_loss_mask, dtype=torch.float, device=tokens.device + ) + loss_mask[assistant_mask == 1] = 1.0 + loss_mask[pad_mask == 1] = 0.0 + attention_mask, position_ids = get_attention_mask_and_position_ids( + tokens, attention_mask + ) + + return tokens, labels, loss_mask, attention_mask, position_ids + + +def data_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + + if args.data_type == "gpt": + builder = gpt_build_datasets + elif args.data_type == "instruction": + builder = instruct_build_datasets + + print_rank_0("> building train, validation, and test datasets ...") + train_ds, valid_ds, test_ds = builder( + data_prefix=args.data_path, + data_impl=args.data_impl, + splits_string=args.split, + train_valid_test_num_samples=train_val_test_num_samples, + seq_length=args.seq_length, + seed=args.seed, + skip_warmup=(not args.mmap_warmup), + train_data_prefix=args.train_data_path, + valid_data_prefix=args.valid_data_path, + test_data_prefix=args.test_data_path, + ) + print_rank_0("> finished creating datasets ...") + + return train_ds, valid_ds, test_ds + + +## +# Loss and forward +## + + +def loss_func(is_training, batch, outputs): + loss_mask = batch[2] + losses, logits = outputs + losses = losses.float() + loss_mask = loss_mask.view(-1).float() + loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() + # Reduce loss for logging. + averaged_loss = average_losses_across_data_parallel_group([loss]) + out_dict = {"lm loss": averaged_loss[0]} + + # Calculate other metrics + if not is_training: + inputs = MetricInput(batch, logits, averaged_loss[0]) + args = get_args() + for metric in map(get_metric, args.metrics): + out_dict.update(metric(inputs)) + + return loss, out_dict + + +def forward_step(data_iterator, model): + """Forward step.""" + args = get_args() + timers = get_timers() + + # Get the batch. + timers("batch-generator", log_level=2).start() + batch = get_batch(data_iterator) + tokens, labels, loss_mask, attention_mask, position_ids = batch + timers("batch-generator").stop() + + output_tensor = model(tokens, position_ids, attention_mask, labels=labels) + return output_tensor, partial(loss_func, model.training, batch) + + +## +# Main +## + + +def extra_args(parser): + """Text generation arguments.""" + group = parser.add_argument_group(title="validation set") + group.add_argument( + "--model_name", + choices={ + "gpt", + "llama", + "falcon", + "llama2", + "llama3", + "codellama", + "mistral", + "gemma", + }, + default="gpt", + ) + group.add_argument( + "--model_type", + choices={"encoder_or_decoder", "encoder_and_decoder"}, + default="encoder_or_decoder", + ) + group.add_argument("--data_type", choices={"gpt", "instruction"}, default="gpt") + group.add_argument("--log_learning_rate_to_tensorboard", type=bool, default=True) + group.add_argument("--log_loss_scale_to_tensorboard", type=bool, default=True) + return parser + + +if __name__ == "__main__": + args_defaults = {"tokenizer_type": "GPT2BPETokenizer"} + initialize_megatron(extra_args, args_defaults) + args = get_args() + + if args.data_type == "gpt": + collate_fn = None + else: + collate_fn = instruction_collator + + pretrain( + args, + data_provider, + model_provider, + ModelType.encoder_or_decoder, + forward_step, + collate_fn=collate_fn, + ) + print(f"Done {dt.datetime.now(dt.timezone.utc)}") diff --git a/multilinguality_megatron/inspect_weights.py b/multilinguality_megatron/inspect_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..32799a981665ad08b41ac12d49708f1828fd05e2 --- /dev/null +++ b/multilinguality_megatron/inspect_weights.py @@ -0,0 +1,29 @@ +import torch +from transformers import AutoModel, AutoTokenizer, LlamaForCausalLM + +# Load the Llama 2 checkpoint +llama2_checkpoint_path = "/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit" +llama2_tokenizer = AutoTokenizer.from_pretrained(llama2_checkpoint_path) +llama2_model = LlamaForCausalLM.from_pretrained(llama2_checkpoint_path) + +# Load the original Llama 2 model +original_llama2_model_name = "/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9" +original_llama2_tokenizer = AutoTokenizer.from_pretrained(original_llama2_model_name) +original_llama2_model = LlamaForCausalLM.from_pretrained(original_llama2_model_name) + +# Compare the weights of the embedding and output layers +llama2_embedding_weights = llama2_model.get_input_embeddings().weight +original_llama2_embedding_weights = original_llama2_model.get_input_embeddings().weight + +for (name1, param1), (name2, param2) in zip( + llama2_model.named_parameters(), original_llama2_model.named_parameters() +): + try: + if not torch.allclose(param1, param2, atol=1e-7): + print(f"Different weights in {name1} and {name2}") + else: + print(f"Same weights in {name1} and {name2}") + except: + print(f"Couldn't do allclose for layer {name1}") + +a = 1 diff --git a/multilinguality_megatron/loop_deploy.sh b/multilinguality_megatron/loop_deploy.sh new file mode 100644 index 0000000000000000000000000000000000000000..f5bf51b3f77af95299df715d7b1b45763e3b10eb --- /dev/null +++ b/multilinguality_megatron/loop_deploy.sh @@ -0,0 +1,18 @@ +step_size=670 +model_type=llama2 +ckpt_path=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/mc4_parallel_synth_checkpoints/ +name=flavio + +for i in {1..10}; do + iter=`expr $i \* $step_size` + echo $iter + echo ${iter} > $ckpt_path/latest_checkpointed_iteration.txt + bash deploy.sh \ + -p $ckpt_path \ + -v "32000" \ + -m "tiny_llama_${name}_${i}b_tokens" \ + -t $model_type \ + #-f /mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit +done + +#echo "6350" > $ckpt_path/latest_checkpointed_iteration.txt diff --git a/multilinguality_megatron/megatron/__init__.py b/multilinguality_megatron/megatron/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb67678e55db04ca8e056dfa5d871320d6e59ed6 --- /dev/null +++ b/multilinguality_megatron/megatron/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +import torch + +from .global_vars import get_args +from .global_vars import get_current_global_batch_size +from .global_vars import get_num_microbatches +from .global_vars import get_signal_handler +from .global_vars import update_num_microbatches +from .global_vars import get_tokenizer +from .global_vars import get_tensorboard_writer +from .global_vars import get_adlr_autoresume +from .global_vars import get_timers +from .global_vars import get_counters + +from .utils import (print_rank_0, + print_all_nodes, + is_last_rank, + print_rank_last) diff --git a/multilinguality_megatron/megatron/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01b2c9bba2e362c7ffe803bd94d63e331d99bda8 Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/arguments.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/arguments.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e377853d2cc370739b816e19e3b2a1cd371fee18 Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/arguments.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/checkpointing.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/checkpointing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e32ece898074704e99a0b095dff92ede4ddc07ba Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/checkpointing.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/dist_signal_handler.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/dist_signal_handler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79d0f5ecd05b3bb0dff3bf498872b42c3640c78f Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/dist_signal_handler.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/global_vars.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/global_vars.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba503952e06df38601cff386cc8df45e6816ff29 Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/global_vars.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/initialize.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/initialize.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af824512a9f07823f11d5d743f5819c7bf522f4a Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/initialize.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/metrics.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/metrics.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..341cc4c965ce00b7f529d5dd550d6578ead2318d Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/metrics.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/microbatches.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/microbatches.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ed49148a5b2d17e878f513f809cd1755ace80d6 Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/microbatches.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/optimizer_param_scheduler.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/optimizer_param_scheduler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cb3bb6578b296060347c750205e233dfd513e4c Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/optimizer_param_scheduler.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/p2p_communication.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/p2p_communication.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e2c63f57206b1caabf3f50c48b4957aed67a41a Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/p2p_communication.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/schedules.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/schedules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a615cf47a3d87142945d0b3f65e9808636e00d1 Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/schedules.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/timers.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/timers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce56e848e03ceefe31b09115fed1bda212013581 Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/timers.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/training.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/training.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..141ba6dc38a4685de9e4b8588bc3cb336d6da9dc Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/training.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/__pycache__/utils.cpython-39.pyc b/multilinguality_megatron/megatron/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6260d12668e2742d0ed16610accdfef8d20a18c0 Binary files /dev/null and b/multilinguality_megatron/megatron/__pycache__/utils.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/arguments.py b/multilinguality_megatron/megatron/arguments.py new file mode 100644 index 0000000000000000000000000000000000000000..c18242ab81c05d6074791068ed3c919406d86e1d --- /dev/null +++ b/multilinguality_megatron/megatron/arguments.py @@ -0,0 +1,1098 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron arguments.""" + +import argparse +import os + +import torch + +import megatron +from megatron.metrics import METRICS +from megatron.model.enums import PositionEmbeddingType + + +def build_base_parser(): + parser = argparse.ArgumentParser(description='Megatron-LM Arguments', + allow_abbrev=False) + # Standard arguments. + parser = _add_network_size_args(parser) + parser = _add_regularization_args(parser) + parser = _add_training_args(parser) + parser = _add_initialization_args(parser) + parser = _add_learning_rate_args(parser) + parser = _add_checkpointing_args(parser) + parser = _add_mixed_precision_args(parser) + parser = _add_distributed_args(parser) + parser = _add_validation_args(parser) + parser = _add_data_args(parser) + parser = _add_autoresume_args(parser) + parser = _add_biencoder_args(parser) + parser = _add_vision_args(parser) + parser = _add_logging_args(parser) + parser = _add_inference_args(parser) + parser = _add_transformer_engine_args(parser) + return parser + + +def parse_args(extra_args_provider=None): + """Parse all arguments.""" + parser = build_base_parser() + # Custom arguments. + if extra_args_provider is not None: + parser = extra_args_provider(parser) + + args = parser.parse_args() + + # Args from environment + args.rank = int(os.getenv('RANK', '0')) + args.world_size = int(os.getenv("WORLD_SIZE", '1')) + return args + + +def validate_args(args, defaults={}): + # Tensor model parallel size. + args.tensor_model_parallel_size = min( + args.tensor_model_parallel_size, args.world_size) + assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\ + ' ({}) is not divisible by tensor model parallel size ({})'.format( + args.world_size, args.tensor_model_parallel_size) + # Pipeline model parallel size. + args.pipeline_model_parallel_size = min( + args.pipeline_model_parallel_size, + (args.world_size // args.tensor_model_parallel_size)) + args.transformer_pipeline_model_parallel_size = ( + args.pipeline_model_parallel_size - 1 + if args.standalone_embedding_stage else + args.pipeline_model_parallel_size + ) + # Checks. + model_parallel_size = args.pipeline_model_parallel_size * \ + args.tensor_model_parallel_size + assert args.world_size % model_parallel_size == 0, 'world size is not'\ + ' divisible by tensor parallel size ({}) times pipeline parallel ' \ + 'size ({})'.format(args.world_size, args.tensor_model_parallel_size, + args.pipeline_model_parallel_size) + args.data_parallel_size = args.world_size // model_parallel_size + if args.rank == 0: + print('using world size: {}, data-parallel-size: {}, ' + 'tensor-model-parallel size: {}, ' + 'pipeline-model-parallel size: {} '.format( + args.world_size, args.data_parallel_size, + args.tensor_model_parallel_size, + args.pipeline_model_parallel_size), flush=True) + if args.pipeline_model_parallel_size > 1: + if args.pipeline_model_parallel_split_rank is not None: + assert args.pipeline_model_parallel_split_rank < \ + args.pipeline_model_parallel_size, 'split rank needs'\ + ' to be less than pipeline model parallel size ({})'.format( + args.pipeline_model_parallel_size) + + if args.recompute_activations: + args.recompute_granularity = 'selective' + del args.recompute_activations + if args.metrics == ["all"]: + args.metrics = list(METRICS) + + + # Set input defaults. + for key in defaults: + # For default to be valid, it should not be provided in the + # arguments that are passed to the program. We check this by + # ensuring the arg is set to None. + if getattr(args, key) is not None: + if args.rank == 0: + print('WARNING: overriding default arguments for {key}:{v} \ + with {key}:{v2}'.format(key=key, v=defaults[key], + v2=getattr(args, key)), + flush=True) + else: + setattr(args, key, defaults[key]) + + # Batch size. + assert args.micro_batch_size is not None + assert args.micro_batch_size > 0 + if args.global_batch_size is None: + args.global_batch_size = args.micro_batch_size * args.data_parallel_size + if args.rank == 0: + print('setting global batch size to {}'.format( + args.global_batch_size), flush=True) + assert args.global_batch_size > 0 + if args.num_layers_per_virtual_pipeline_stage is not None: + assert args.pipeline_model_parallel_size > 2, \ + 'pipeline-model-parallel size should be greater than 2 with ' \ + 'interleaved schedule' + assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \ + 'number of layers is not divisible by number of layers per virtual ' \ + 'pipeline stage' + args.virtual_pipeline_model_parallel_size = \ + (args.num_layers // args.transformer_pipeline_model_parallel_size) // \ + args.num_layers_per_virtual_pipeline_stage + else: + args.virtual_pipeline_model_parallel_size = None + + # Parameters dtype. + args.params_dtype = torch.float + if args.fp16: + assert not args.bf16 + args.params_dtype = torch.half + if args.bf16: + assert not args.fp16 + args.params_dtype = torch.bfloat16 + # bfloat16 requires gradient accumulation and all-reduce to + # be done in fp32. + if not args.accumulate_allreduce_grads_in_fp32: + args.accumulate_allreduce_grads_in_fp32 = True + if args.rank == 0: + print('accumulate and all-reduce gradients in fp32 for ' + 'bfloat16 data type.', flush=True) + + if args.rank == 0: + print('using {} for parameters ...'.format(args.params_dtype), + flush=True) + + # If we do accumulation and all-reduces in fp32, we need to have local DDP + # and we should make sure use-contiguous-buffers-in-local-ddp is not off. + if args.accumulate_allreduce_grads_in_fp32: + assert args.DDP_impl == 'local' + assert args.use_contiguous_buffers_in_local_ddp + + # If we use the distributed optimizer, we need to have local DDP + # and we should make sure use-contiguous-buffers-in-local-ddp is on. + if args.use_distributed_optimizer: + assert args.DDP_impl == 'local' + assert args.use_contiguous_buffers_in_local_ddp + + # For torch DDP, we do not use contiguous buffer + if args.DDP_impl == 'torch': + args.use_contiguous_buffers_in_local_ddp = False + + if args.dataloader_type is None: + args.dataloader_type = 'single' + + # Consumed tokens. + args.consumed_train_samples = 0 + args.consumed_valid_samples = 0 + + # Support for variable sequence lengths across batches/microbatches. + # set it if the dataloader supports generation of variable sequence lengths + # across batches/microbatches. Due to additional communication overhead + # during pipeline parallelism, it should not be set if sequence length + # is constant during training. + if args.variable_seq_lengths is None: + args.variable_seq_lengths = False + + # Iteration-based training. + if args.train_iters: + # If we use iteration-based training, make sure the + # sample-based options are off. + assert args.train_samples is None, \ + 'expected iteration-based training' + assert args.lr_decay_samples is None, \ + 'expected iteration-based learning rate decay' + assert args.lr_warmup_samples == 0, \ + 'expected iteration-based learning rate warmup' + assert args.rampup_batch_size is None, \ + 'expected no batch-size rampup for iteration-based training' + if args.lr_warmup_fraction is not None: + assert args.lr_warmup_iters == 0, \ + 'can only specify one of lr_warmup_fraction and lr_warmup_iters' + + # Sample-based training. + if args.train_samples: + # If we use sample-based training, make sure the + # iteration-based options are off. + assert args.train_iters is None, \ + 'expected sample-based training' + assert args.lr_decay_iters is None, \ + 'expected sample-based learning rate decay' + assert args.lr_warmup_iters == 0, \ + 'expected sample-based learning rate warmup' + if args.lr_warmup_fraction is not None: + assert args.lr_warmup_samples == 0, \ + 'can only specify one of lr_warmup_fraction ' \ + 'and lr_warmup_samples' + + if args.num_layers is not None: + assert args.encoder_num_layers is None, \ + 'cannot have both num_layers and encoder_num_layers specified' + args.encoder_num_layers = args.num_layers + else: + assert args.encoder_num_layers is not None, \ + 'either num_layers or encoder_num_layers should be specified' + args.num_layers = args.encoder_num_layers + + # Check required arguments. + # required_args = ['num_layers', 'hidden_size', 'num_attention_heads', + # 'max_position_embeddings'] + required_args = ['num_layers', 'hidden_size', 'num_attention_heads'] + for req_arg in required_args: + _check_arg_is_not_none(args, req_arg) + + # Checks. + if args.ffn_hidden_size is None: + args.ffn_hidden_size = 4 * args.hidden_size + + if args.kv_channels is None: + assert args.hidden_size % args.num_attention_heads == 0 + args.kv_channels = args.hidden_size // args.num_attention_heads + + if args.num_attention_heads_kv is None: + args.num_attention_heads_kv = args.num_attention_heads + + if args.seq_length is not None: + assert args.encoder_seq_length is None + args.encoder_seq_length = args.seq_length + else: + assert args.encoder_seq_length is not None + args.seq_length = args.encoder_seq_length + + if not isinstance(args.position_embedding_type, PositionEmbeddingType): + args.position_embedding_type = PositionEmbeddingType[args.position_embedding_type] + if args.position_embedding_type in [PositionEmbeddingType.absolute, PositionEmbeddingType.rotary]: + assert args.max_position_embeddings is not None + if args.seq_length is not None: + assert args.max_position_embeddings >= args.seq_length + if args.decoder_seq_length is not None: + assert args.max_position_embeddings >= args.decoder_seq_length + assert args.rope_scaling_factor >= 1, 'rope_scaling_factor must be >= 1' + else: + assert args.max_position_embeddings is None + + if args.lr is not None: + assert args.min_lr <= args.lr + if args.save is not None: + assert args.save_interval is not None + # Mixed precision checks. + if args.fp16_lm_cross_entropy: + assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.' + if args.fp32_residual_connection: + assert args.fp16 or args.bf16, \ + 'residual connection in fp32 only supported when using fp16 or bf16.' + + if args.weight_decay_incr_style == 'constant': + assert args.start_weight_decay is None + assert args.end_weight_decay is None + args.start_weight_decay = args.weight_decay + args.end_weight_decay = args.weight_decay + else: + assert args.start_weight_decay is not None + assert args.end_weight_decay is not None + + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + # Persistent fused layer norm. + if TORCH_MAJOR < 1 or (TORCH_MAJOR == 1 and TORCH_MINOR < 11): + args.no_persist_layer_norm = True + if args.rank == 0: + print('Persistent fused layer norm kernel is supported from ' + 'pytorch v1.11 (nvidia pytorch container paired with v1.11). ' + 'Defaulting to no_persist_layer_norm=True') + + # Activation recomputing. + if args.distribute_saved_activations: + assert args.tensor_model_parallel_size > 1, 'can distribute ' \ + 'recomputed activations only across tensor model ' \ + 'parallel groups' + assert args.recompute_granularity == 'full', \ + 'distributed recompute activations is only '\ + 'application to full recompute granularity' + assert args.recompute_method is not None, \ + 'for distributed recompute activations to work you '\ + 'need to use a recompute method ' + assert TORCH_MAJOR >= 1 and TORCH_MINOR >= 10, \ + 'distributed recompute activations are supported for pytorch ' \ + 'v1.10 and above (Nvidia Pytorch container >= 21.07). Current ' \ + 'pytorch version is v%s.%s.' % (TORCH_MAJOR, TORCH_MINOR) + + # Tranformer-Engine/FP8 related checking + if args.fp8_e4m3 or args.fp8_hybrid: + assert args.transformer_impl == 'transformer_engine', \ + 'transformer-engine required for fp8 training and inference' + + assert not (args.fp8_e4m3 and args.fp8_hybrid), \ + 'cannot train with both fp8 e4m3 and hybrid formatting' + + if args.fp16: + assert args.transformer_impl == 'local', \ + 'transformer-engine not yet approved for fp16 training and inference' + + if args.recompute_granularity == 'selective': + assert args.recompute_method is None, \ + 'recompute method is not yet supported for ' \ + 'selective recomputing granularity' + + # Parallel attention. + if not args.parallel_attn: + assert not args.parallel_layernorm, "parallel_layernorm only implemented with parallel_attention" + + # disable sequence parallelism when tp=1 + # to avoid change in numerics when + # sequence_parallelism is enabled. + if args.tensor_model_parallel_size == 1: + args.sequence_parallel = False + + # disable async_tensor_model_parallel_allreduce when + # model parallel memory optimization is enabled + if args.sequence_parallel: + args.async_tensor_model_parallel_allreduce = False + + if os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') and os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1": + if args.sequence_parallel: + raise RuntimeError( + "Using sequence parallelism requires setting the environment variable " + "CUDA_DEVICE_MAX_CONNECTIONS to 1") + if args.async_tensor_model_parallel_allreduce: + raise RuntimeError( + "Using async gradient all reduce requires setting the environment " + "variable CUDA_DEVICE_MAX_CONNECTIONS to 1") + _print_args(args) + return args + + +def _print_args(args): + """Print arguments.""" + if args.rank == 0: + print('------------------------ arguments ------------------------', + flush=True) + str_list = [] + for arg in vars(args): + dots = '.' * (48 - len(arg)) + str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg))) + for arg in sorted(str_list, key=lambda x: x.lower()): + print(arg, flush=True) + print('-------------------- end of arguments ---------------------', + flush=True) + + +def _check_arg_is_not_none(args, arg): + assert getattr(args, arg) is not None, '{} argument is None'.format(arg) + + +def _add_transformer_engine_args(parser): + group = parser.add_argument_group(title='Transformer-Engine') + group.add_argument('--fp8_e4m3', action='store_true', + help='E4M3 TransformerLayer', dest='fp8_e4m3') + group.add_argument('--fp8_hybrid', action='store_true', + help='Hybrid FP8 TransformerLayer') + group.add_argument('--no_fp8_wgrad', action='store_false', + help='Execute wgrad in higher precision even for FP8 runs', dest='fp8_wgrad') + group.add_argument('--fp8_margin', type=int, default=0, + help='Scaling margin for fp8', dest='fp8_margin') + group.add_argument('--fp8_interval', type=int, default=1, + help='Scaling update interval for fp8', dest='fp8_interval') + group.add_argument('--transformer_impl', default='local', + choices=['local', 'transformer_engine'], + help='Which Transformer implementation to use.') + group.add_argument('--fp8_amax_history_len', type=int, default=1, + help='Number of steps for which amax history is recorded per tensor') + group.add_argument('--fp8_amax_compute_algo', default='most_recent', + choices=['most_recent', 'max'], + help='Algorithm for computing amax from history') + return parser + + +def _add_inference_args(parser): + group = parser.add_argument_group(title='inference') + group.add_argument('--inference_batch_times_seqlen_threshold', + type=int, default=512, + help='During inference, if batch-size times ' + 'sequence-length is smaller than this threshold ' + 'then we will not use pipelining, otherwise we will.') + group.add_argument('--max_tokens_to_oom', + type=int, default=12000, + help='Maximum number of tokens during inference' + 'tokens here is # in prompt + # to generate' + 'Allows us to throw an error before OOM crashes server') + return parser + + +def _add_network_size_args(parser): + group = parser.add_argument_group(title='network size') + group.add_argument('--num_layers', type=int, default=None, + help='Number of transformer layers.') + group.add_argument('--encoder_num_layers', type=int, default=None, + help='Number of encoder transformer layers.') + group.add_argument('--decoder_num_layers', type=int, default=None, + help='Number of decoder transformer layers.') + group.add_argument('--hidden_size', type=int, default=None, + help='Tansformer hidden size.') + group.add_argument('--ffn_hidden_size', type=int, default=None, + help='Transformer Feed-Forward Network hidden size. ' + 'This is set to 4*hidden_size if not provided') + group.add_argument('--num_attention_heads', type=int, default=None, + help='Number of transformer attention heads.') + group.add_argument('--num_attention_heads_kv', type=int, default=None, + help='Number of transformer attention heads for the keys and values.') + group.add_argument('--kv_channels', type=int, default=None, + help='Projection weights dimension in multi-head ' + 'attention. This is set to ' + ' args.hidden_size // args.num_attention_heads ' + 'if not provided.') + group.add_argument('--max_position_embeddings', type=int, default=None, + help='Maximum number of position embeddings to use. ' + 'This is the size of position embedding.') + group.add_argument('--make_vocab_size_divisible_by', type=int, default=128, + help='Pad the vocab size to be divisible by this value.' + 'This is added for computational efficieny reasons.') + group.add_argument('--layernorm_epsilon', type=float, default=1e-5, + help='Layer norm epsilon.') + group.add_argument('--apply_residual_connection_post_layernorm', + action='store_true', + help='If set, use original BERT residual connection ' + 'ordering.') + group.add_argument('--use_bias', action='store_true', + help='If set then use bias.') # Added during hackathon + # Extracted from: https://github.com/facebookresearch/llama/blob/main/llama/model.py + group.add_argument('--use_rms_norm', + action='store_true', + help='If set, use RMSNorm instead of LayerNorm.') + group.add_argument('--use_post_ln', + action='store_true', + help='If set, use Post-LN transformer (in the notation of https://sh-tsang.medium.com/review-pre-ln-transformer-on-layer-normalization-in-the-transformer-architecture-b6c91a89e9ab).') + group.add_argument('--onnx_safe', type=bool, required=False, + help='Use workarounds for known problems with ' + 'Torch ONNX exporter') + # Extracted from: https://github.com/bigscience-workshop/Megatron-DeepSpeed + group.add_argument('--glu_activation', type=str, + choices=megatron.model.glu_activations.GLU_ACTIVATIONS.keys(), + help='GLU activations to use.' + ) + group.add_argument('--position_embedding_type', type=lambda x: PositionEmbeddingType[x], + choices=list(PositionEmbeddingType), + default=PositionEmbeddingType.absolute, + help='Define position embedding type ("absolute" | "rotary"). "absolute" by default.') + group.add_argument('--rope_scaling_factor', type=float, default=1.0, + help='Set the linear RoPE scaling factor for sequence interpolation.') + group.add_argument('--rope_theta', type=float, default=10000.0, + help='Set RoPE theta base (llama/llama2: 1e4, codellama: 1e6).') + # Added mainly for Falcon + group.add_argument("--parallel_attn", action="store_true", + help="Whether to use parallel mlp and attn computation with a single layernorm") + group.add_argument("--parallel_layernorm", action="store_true", + help="Whether to use a dedicated layernorm for the mlp in the attention") + # Added mainly for Llama + group.add_argument("--no_tie_embed_logits", action="store_false", dest="tie_embed_logits", + help=("If set, the weights of the word embedding and lm_head " + "are not tied")) + group.add_argument("--sliding_window_size", type=int, default=None, + help="Whether to use sliding window attention for Mistral. Default is None, which means no sliding window attention.") + return parser + + +def _add_logging_args(parser): + group = parser.add_argument_group(title='logging') + group.add_argument('--log_params_norm', action='store_true', + help='If set, calculate and log parameters norm.') + group.add_argument('--log_num_zeros_in_grad', action='store_true', + help='If set, calculate and log the number of zeros in gradient.') + group.add_argument('--timing_log_level', type=int, + default=0, choices=range(0, 3), + help='Granularity level to measure and report timing. ' + ' 0: report only iteration time and make sure timing ' + ' does not introduce extra overhead.' + ' 1: report timing for operations that are executed ' + ' very limited times (basically once) during ' + ' each iteration (such as gradient all-reduce) ' + ' 2: report timing for operations that migh be ' + ' executed numerous times during each iteration. ' + 'Note that setting the level to 1 or 2 might ' + 'cause increase in iteration time.') + group.add_argument('--barrier_with_L1_time', action='store_false', + help='If not set, use barrier with level 1 time ' + 'measurements. Note that this is up to the user ' + 'to make sure calling barrier with their timers ' + 'will not result in hangs. This can happen if for ' + 'example the user adds a level 1 timer that is not ' + 'called by all ranks.') + group.add_argument('--timing_log_option', type=str, default='minmax', + choices=['max', 'minmax', 'all'], + help='Options for logging timing:' + ' max: report the max timing across all ranks' + ' minmax: report min and max timings across all ranks' + ' all: report timings of all ranks.') + group.add_argument('--tensorboard_log_interval', type=int, default=1, + help='Report to tensorboard interval.') + group.add_argument('--tensorboard_queue_size', type=int, default=1000, + help='Size of the tensorboard queue for pending events ' + 'and summaries before one of the ‘add’ calls forces a ' + 'flush to disk.') + group.add_argument('--log_timers_to_tensorboard', action='store_true', + help='If set, write timers to tensorboard.') + group.add_argument('--log_batch_size_to_tensorboard', action='store_true', + help='If set, write batch-size to tensorboard.') + group.add_argument('--log_validation_ppl_to_tensorboard', + action='store_true', + help='If set, write validation perplexity to ' + 'tensorboard.') + group.add_argument('--log_memory_to_tensorboard', + action='store_true', + help='Enable memory logging to tensorboard.') + group.add_argument('--log_world_size_to_tensorboard', + action='store_true', + help='Enable world size logging to tensorboard.') + group.add_argument('--wandb_logger', + action='store_true', + help='Enable logging to Weights & Biases instead of tensorboard.') + group.add_argument('--wandb_project', type=str, default=None, + help='Project name for Weights & Biases.') + group.add_argument('--wandb_entity', type=str, default="meditron", + help='Entity/team name for Weights & Biases.') + group.add_argument('--wandb_id',type=str,default=None, + help="Unique ID to identify this run, alternatively can set `WANDB_RUN_ID`.") + group.add_argument('--wandb_resume',action="store_true", + help="If set, we resume logging for the id given instead of launching a new run (errors if id given and resume=False).") + group.add_argument("--wandb_api_key",type=str,default=None, + help="API key for Weights & Biases, needs to be set if not set in environment variable `WANDB_API_KEY`.") + group.add_argument("--metrics", default=[], nargs="+", choices=list(METRICS) + ["all"], + help="Metrics to report when logging") + return parser + + +def _add_regularization_args(parser): + group = parser.add_argument_group(title='regularization') + group.add_argument('--attention_dropout', type=float, default=0.1, + help='Post attention dropout probability.') + group.add_argument('--hidden_dropout', type=float, default=0.1, + help='Dropout probability for hidden state transformer.') + # see "LIMA: Less Is More for Alignment", Zhou et al 2023, https://arxiv.org/abs/2305.11206 + group.add_argument('--lima_dropout', action='store_true', + help='Linearly raise the hidden_dropout probability from 0.0 at the first layer to the full hidden_dropout value at the last layer.') + group.add_argument('--weight_decay', type=float, default=0.01, + help='Weight decay coefficient for L2 regularization.') + group.add_argument('--start_weight_decay', type=float, + help='Initial weight decay coefficient for L2 regularization.') + group.add_argument('--end_weight_decay', type=float, + help='End of run weight decay coefficient for L2 regularization.') + group.add_argument('--weight_decay_incr_style', type=str, default='constant', + choices=['constant', 'linear', 'cosine'], + help='Weight decay increment function.') + group.add_argument('--clip_grad', type=float, default=1.0, + help='Gradient clipping based on global L2 norm.') + group.add_argument('--adam_beta1', type=float, default=0.9, + help='First coefficient for computing running averages ' + 'of gradient and its square') + group.add_argument('--adam_beta2', type=float, default=0.999, + help='Second coefficient for computing running averages ' + 'of gradient and its square') + group.add_argument('--adam_eps', type=float, default=1e-08, + help='Term added to the denominator to improve' + 'numerical stability') + group.add_argument('--sgd_momentum', type=float, default=0.9, + help='Momentum factor for sgd') + + return parser + + +def _add_training_args(parser): + group = parser.add_argument_group(title='training') + group.add_argument('--micro_batch_size', type=int, default=None, + help='Batch size per model instance (local batch size). ' + 'Global batch size is local batch size times data ' + 'parallel size times number of micro batches.') + group.add_argument('--global_batch_size', type=int, default=None, + help='Training batch size. If set, it should be a ' + 'multiple of micro_batch_size times data-parallel-size. ' + 'If this value is None, then ' + 'use micro_batch_size * data-parallel-size as the ' + 'global batch size. This choice will result in 1 for ' + 'number of micro-batches.') + group.add_argument('--rampup_batch_size', nargs='*', default=None, + help='Batch size ramp up with the following values:' + ' --rampup_batch_size ' + ' ' + ' ' + 'For example:' + ' --rampup_batch_size 16 8 300000 \ ' + ' --global_batch_size 1024' + 'will start with global batch size 16 and over ' + ' (1024 - 16) / 8 = 126 intervals will increase' + 'the batch size linearly to 1024. In each interval' + 'we will use approximately 300000 / 126 = 2380 samples.') + group.add_argument('--recompute_activations', action='store_true', + help='recompute activation to allow for training ' + 'with larger models, sequences, and batch sizes.') + group.add_argument('--recompute_granularity', type=str, default=None, + choices=['full', 'selective'], + help='Checkpoint activations to allow for training ' + 'with larger models, sequences, and batch sizes. ' + 'It is supported at two granularities 1) full: ' + 'whole transformer layer is recomputed, ' + '2) selective: core attention part of the transformer ' + 'layer is recomputed.') + group.add_argument('--distribute_saved_activations', + action='store_true', + help='If set, distribute recomputed activations ' + 'across model parallel group.') + group.add_argument('--recompute_method', type=str, default=None, + choices=['uniform', 'block'], + help='1) uniform: uniformly divide the total number of ' + 'Transformer layers and recompute the input activation of ' + 'each divided chunk at specified granularity, ' + '2) recompute the input activations of only a set number of ' + 'individual Transformer layers per pipeline stage and do the ' + 'rest without any recomputing at specified granularity' + 'default) do not apply activations recompute to any layers') + group.add_argument('--recompute_num_layers', type=int, default=1, + help='1) uniform: the number of Transformer layers in each ' + 'uniformly divided recompute unit, ' + '2) block: the number of individual Transformer layers ' + 'to recompute within each pipeline stage.') + group.add_argument('--train_iters', type=int, default=None, + help='Total number of iterations to train over all ' + 'training runs. Note that either train_iters or ' + 'train_samples should be provided.') + group.add_argument('--skip_iters', type=int, nargs='*', default=[], + help=('One or more iterations to ignore. Neither the forward ' + 'nor backward pass will be computed for this iterations')) + group.add_argument('--train_samples', type=int, default=None, + help='Total number of samples to train over all ' + 'training runs. Note that either train_iters or ' + 'train_samples should be provided.') + group.add_argument('--log_interval', type=int, default=100, + help='Report loss and timing interval.') + group.add_argument('--exit_interval', type=int, default=None, + help='Exit the program after the iteration is divisible ' + 'by this value.') + group.add_argument('--exit_duration_in_mins', type=int, default=None, + help='Exit the program after this many minutes.') + group.add_argument('--exit_signal_handler', action='store_true', + help='Dynamically save the checkpoint and shutdown the ' + 'training if SIGTERM is received') + group.add_argument('--tensorboard_dir', type=str, default=None, + help='Write TensorBoard logs to this directory.') + group.add_argument('--no_masked_softmax_fusion', + action='store_false', + help='Disable fusion of query_key_value scaling, ' + 'masking, and softmax.', + dest='masked_softmax_fusion') + group.add_argument('--no_bias_gelu_fusion', action='store_false', + help='Disable bias and gelu fusion.', + dest='bias_gelu_fusion') + group.add_argument('--no_bias_dropout_fusion', action='store_false', + help='Disable bias and dropout fusion.', + dest='bias_dropout_fusion') + group.add_argument('--use_flash_attn', action='store_true', + help='use FlashAttention implementation of attention. ' + 'https://arxiv.org/abs/2205.14135') + group.add_argument('--optimizer', type=str, default='adam', + choices=['adam', 'sgd'], + help='Optimizer function') + group.add_argument('--dataloader_type', type=str, default=None, + choices=['single', 'cyclic'], + help='Single pass vs multiple pass data loader') + group.add_argument('--no_async_tensor_model_parallel_allreduce', + action='store_false', + help='Disable asynchronous execution of ' + 'tensor-model-parallel all-reduce with weight ' + 'gradient compuation of a column-linear layer.', + dest='async_tensor_model_parallel_allreduce') + group.add_argument('--no_persist_layer_norm', action='store_true', + help='Disable using persistent fused layer norm kernel. ' + 'This kernel supports only a set of hidden sizes. Please ' + 'check persist_ln_hidden_sizes if your hidden ' + 'size is supported.') + group.add_argument('--sequence_parallel', action='store_true', + help='Enable sequence parallel optimization.') + group.add_argument('--no_gradient_accumulation_fusion', + action='store_false', + help='Disable fusing gradient accumulation to weight ' + 'gradient computation of linear layers', + dest='gradient_accumulation_fusion') + group.add_argument('--freeze_layers', action='store_true', + help='Freeze layers besides embedding ones.') + return parser + + +def _add_initialization_args(parser): + group = parser.add_argument_group(title='initialization') + group.add_argument('--seed', type=int, default=1234, + help='Random seed used for python, numpy, ' + 'pytorch, and cuda.') + group.add_argument('--data_parallel_random_init', action='store_true', + help='Enable random initialization of params ' + 'across data parallel ranks') + group.add_argument('--init_method_std', type=float, default=0.02, + help='Standard deviation of the zero mean normal ' + 'distribution used for weight initialization.') + group.add_argument('--init_method_xavier_uniform', action='store_true', + help='Enable Xavier uniform parameter initialization') + return parser + + +def _add_learning_rate_args(parser): + group = parser.add_argument_group(title='learning rate') + group.add_argument('--lr', type=float, default=None, + help='Initial learning rate. Depending on decay style ' + 'and initial warmup, the learing rate at each ' + 'iteration would be different.') + group.add_argument('--lr_decay_style', type=str, default='linear', + choices=['constant', 'linear', 'cosine', 'inverse-square-root'], + help='Learning rate decay function.') + group.add_argument('--lr_decay_iters', type=int, default=None, + help='number of iterations to decay learning rate over,' + ' If None defaults to `--train_iters`') + group.add_argument('--lr_decay_samples', type=int, default=None, + help='number of samples to decay learning rate over,' + ' If None defaults to `--train_samples`') + group.add_argument('--lr_warmup_fraction', type=float, default=None, + help='fraction of lr-warmup-(iters/samples) to use ' + 'for warmup (as a float)') + group.add_argument('--lr_warmup_iters', type=int, default=0, + help='number of iterations to linearly warmup ' + 'learning rate over.') + group.add_argument('--lr_warmup_samples', type=int, default=0, + help='number of samples to linearly warmup ' + 'learning rate over.') + group.add_argument('--min_lr', type=float, default=0.0, + help='Minumum value for learning rate. The scheduler' + 'clip values below this threshold.') + group.add_argument('--override_opt_param_scheduler', action='store_true', + help='Reset the values of the scheduler (learning rate,' + 'warmup iterations, minimum learning rate, maximum ' + 'number of iterations, and decay style from input ' + 'arguments and ignore values from checkpoints. Note' + 'that all the above values will be reset.') + group.add_argument('--use_checkpoint_opt_param_scheduler', action='store_true', + help='Use checkpoint to set the values of the scheduler ' + '(learning rate, warmup iterations, minimum learning ' + 'rate, maximum number of iterations, and decay style ' + 'from checkpoint and ignore input arguments.') + group.add_argument('--annealing', action='store_true',) + return parser + + +def _add_checkpointing_args(parser): + group = parser.add_argument_group(title='checkpointing') + + group.add_argument('--save', type=str, default=None, + help='Output directory to save checkpoints to.') + group.add_argument('--save_interval', type=int, default=None, + help='Number of iterations between checkpoint saves.') + group.add_argument('--no_save_optim', action='store_true', default=None, + help='Do not save current optimizer.') + group.add_argument('--no_save_rng', action='store_true', default=None, + help='Do not save current rng state.') + group.add_argument('--load', type=str, default=None, + help='Directory containing a model checkpoint.') + group.add_argument('--no_load_optim', action='store_true', default=None, + help='Do not load optimizer when loading checkpoint.') + group.add_argument('--no_load_rng', action='store_true', default=None, + help='Do not load rng state when loading checkpoint.') + group.add_argument('--finetune', action='store_true', + help='Load model for finetuning. Do not load optimizer ' + 'or rng state from checkpoint and set iteration to 0. ' + 'Assumed when loading a release checkpoint.') + group.add_argument('--no_initialization', action='store_false', + help='Do not perform initialization when building model, ' + 'can reduce startup time when definitely loading from a ' + 'checkpoint', + dest='perform_initialization') + group.add_argument('--use_checkpoint_args', action='store_true', + help='Override any command line arguments with arguments ' + 'from the checkpoint') + return parser + + +def _add_mixed_precision_args(parser): + group = parser.add_argument_group(title='mixed precision') + group.add_argument('--fp16', action='store_true', + help='Run model in fp16 mode.') + group.add_argument('--bf16', action='store_true', + help='Run model in bfloat16 mode.') + group.add_argument('--loss_scale', type=float, default=None, + help='Static loss scaling, positive power of 2 ' + 'values can improve fp16 convergence. If None, dynamic' + 'loss scaling is used.') + group.add_argument('--initial_loss_scale', type=float, default=2**32, + help='Initial loss scale for dynamic loss scaling.') + group.add_argument('--min_loss_scale', type=float, default=1.0, + help='Minimum loss scale for dynamic loss scale.') + group.add_argument('--loss_scale_window', type=float, default=1000, + help='Window over which to raise/lower dynamic scale.') + group.add_argument('--hysteresis', type=int, default=2, + help='hysteresis for dynamic loss scaling') + group.add_argument('--fp32_residual_connection', action='store_true', + help='Move residual connections to fp32.') + group.add_argument('--no_query_key_layer_scaling', action='store_false', + help='Do not scale Q * K^T by 1 / layer-number.', + dest='apply_query_key_layer_scaling') + group.add_argument('--attention_softmax_in_fp32', action='store_true', + help='Run attention masking and softmax in fp32. ' + 'This flag is ignored unless ' + '--no_query_key_layer_scaling is specified.') + group.add_argument('--accumulate_allreduce_grads_in_fp32', + action='store_true', + help='Gradient accumulation and all-reduce in fp32.') + group.add_argument('--fp16_lm_cross_entropy', + action='store_true', + help='Move the cross entropy unreduced loss calculation' + 'for lm head to fp16.') + return parser + + +def _add_distributed_args(parser): + group = parser.add_argument_group(title='distributed') + group.add_argument('--tensor_model_parallel_size', type=int, default=1, + help='Degree of tensor model parallelism.') + group.add_argument('--pipeline_model_parallel_size', type=int, default=1, + help='Degree of pipeline model parallelism.') + group.add_argument('--pipeline_model_parallel_split_rank', + type=int, default=None, + help='Rank where encoder and decoder should be split.') + group.add_argument('--num_layers_per_virtual_pipeline_stage', type=int, default=None, + help='Number of layers per virtual pipeline stage') + group.add_argument('--distributed_backend', default='nccl', + choices=['nccl', 'gloo'], + help='Which backend to use for distributed training.') + group.add_argument('--DDP_impl', default='local', + choices=['local', 'torch'], + help='which DistributedDataParallel implementation ' + 'to use.') + group.add_argument('--no_contiguous_buffers_in_local_ddp', + action='store_false', help='If set, dont use ' + 'contiguous buffer in local DDP.', + dest='use_contiguous_buffers_in_local_ddp') + group.add_argument('--no_scatter_gather_tensors_in_pipeline', + action='store_false', + help='Use scatter/gather to optimize communication of tensors in pipeline', + dest='scatter_gather_tensors_in_pipeline') + group.add_argument('--use_ring_exchange_p2p', action='store_true', + default=False, help='If set, use custom-built ring exchange ' + 'for p2p communications. Note that this option will require ' + 'a custom built image that support ring-exchange p2p.') + group.add_argument('--local_rank', type=int, default=None, + help='local rank passed from distributed launcher.') + group.add_argument('--use_cpu_initialization', action='store_true', + default=None, help='If set, affine parallel weights ' + 'initialization uses CPU') + group.add_argument('--empty_unused_memory_level', default=0, type=int, + choices=[0, 1, 2], + help='Call torch.cuda.empty_cache() each iteration ' + '(training and eval), to reduce fragmentation.' + '0=off, 1=moderate, 2=aggressive.') + group.add_argument('--standalone_embedding_stage', action='store_true', + default=False, help='If set, *input* embedding layer ' + 'is placed on its own pipeline stage, without any ' + 'transformer layers. (For T5, this flag currently only ' + 'affects the encoder embedding.)') + group.add_argument('--use_distributed_optimizer', action='store_true', + help='Use distributed optimizer.') + return parser + + +def _add_validation_args(parser): + group = parser.add_argument_group(title='validation') + group.add_argument('--eval_iters', type=int, default=100, + help='Number of iterations to run for evaluation' + 'validation/test for.') + group.add_argument('--eval_interval', type=int, default=1000, + help='Interval between running evaluation on ' + 'validation set.') + return parser + + +def _add_data_args(parser): + group = parser.add_argument_group(title='data and dataloader') + group.add_argument('--data_path', nargs='*', default=None, + help='Path to the training dataset. Accepted format:' + '1) a single data path, 2) multiple datasets in the' + 'form: dataset1-weight dataset1-path dataset2-weight ' + 'dataset2-path ... It is used with --split when a ' + 'single dataset used for all three: train, valid ' + 'and test. It is exclusive to the other ' + '--*-data_path args') + group.add_argument('--split', type=str, default='969, 30, 1', + help='Comma-separated list of proportions for training,' + ' validation, and test split. For example the split ' + '`90,5,5` will use 90%% of data for training, 5%% for ' + 'validation and 5%% for test.') + group.add_argument('--train_data_path', nargs='*', default=None, + help='Path to the training dataset. Accepted format:' + '1) a single data path, 2) multiple datasets in the' + 'form: dataset1-weight dataset1-path dataset2-weight ' + 'dataset2-path ...') + group.add_argument('--valid_data_path', nargs='*', default=None, + help='Path to the validation dataset. Accepted format:' + '1) a single data path, 2) multiple datasets in the' + 'form: dataset1-weight dataset1-path dataset2-weight ' + 'dataset2-path ...') + group.add_argument('--test_data_path', nargs='*', default=None, + help='Path to the test dataset. Accepted format:' + '1) a single data path, 2) multiple datasets in the' + 'form: dataset1-weight dataset1-path dataset2-weight ' + 'dataset2-path ...') + group.add_argument('--vocab_file', type=str, default=None, + help='Path to the vocab file.') + group.add_argument('--merge_file', type=str, default=None, + help='Path to the BPE merge file.') + group.add_argument('--vocab_extra_ids', type=int, default=0, + help='Number of additional vocabulary tokens. ' + 'They are used for span masking in the T5 model') + group.add_argument('--vocab_extra_ids_list', type=str, default=None, + help='comma separated list of special vocab ids to add to the tokenizer') + group.add_argument('--seq_length', type=int, default=None, + help='Maximum sequence length to process.') + group.add_argument('--variable_seq_lengths', action='store_true', default=None, + help='Enable variable sequence lengths.') + group.add_argument('--scalar_loss_mask', type=float, default=0.0, + help=('Instruction-tuning argument: Scalar to multiply the ' + 'loss of the "masked out" tokens (usually the user ' + 'tokens, not assistant ones). Set to zero (default) ' + 'to completely remove the loss of said tokens')) + group.add_argument('--encoder_seq_length', type=int, default=None, + help='Maximum encoder sequence length to process.' + 'This should be exclusive of --seq_length') + group.add_argument('--decoder_seq_length', type=int, default=None, + help="Maximum decoder sequence length to process.") + group.add_argument('--retriever_seq_length', type=int, default=256, + help='Maximum sequence length for the biencoder model ' + 'for retriever') + group.add_argument('--sample_rate', type=float, default=1.0, + help='sample rate for training data. Supposed to be 0 ' + ' < sample_rate < 1') + group.add_argument('--mask_prob', type=float, default=0.15, + help='Probability of replacing a token with mask.') + group.add_argument('--short_seq_prob', type=float, default=0.1, + help='Probability of producing a short sequence.') + group.add_argument('--mmap_warmup', action='store_true', + help='Warm up mmap files.') + group.add_argument('--num_workers', type=int, default=2, + help="Dataloader number of workers.") + group.add_argument('--tokenizer_type', type=str, + default=None, + choices=['BertWordPieceLowerCase', + 'BertWordPieceCase', + 'GPT2BPETokenizer', + 'SentencePieceTokenizer', + 'PretrainedFromHF', + 'FalconTokenizer'], + help='What type of tokenizer to use.') + group.add_argument('--tokenizer_model', type=str, default=None, + help='Sentencepiece tokenizer model.') + group.add_argument("--no_new_tokens", action="store_false", dest="new_tokens", + help=("Do not add special tokens (e.g. CLS, MASK, etc) " + "in the sentenciepiece tokenizer")) + group.add_argument('--data_impl', type=str, default='infer', + choices=['lazy', 'cached', 'mmap', 'infer'], + help='Implementation of indexed datasets.') + group.add_argument('--reset_position_ids', action='store_true', + help='Reset posistion ids after end-of-document token.') + group.add_argument('--reset_attention_mask', action='store_true', + help='Reset self attention maske after ' + 'end-of-document token.') + group.add_argument('--eod_mask_loss', action='store_true', + help='Mask loss for the end of document tokens.') + return parser + + +def _add_autoresume_args(parser): + group = parser.add_argument_group(title='autoresume') + group.add_argument('--adlr_autoresume', action='store_true', + help='Enable autoresume on adlr cluster.') + group.add_argument('--adlr_autoresume_interval', type=int, default=1000, + help='Intervals over which check for autoresume' + 'termination signal') + return parser + + +def _add_biencoder_args(parser): + group = parser.add_argument_group(title='biencoder') + # network size + group.add_argument('--ict_head_size', type=int, default=None, + help='Size of block embeddings to be used in ICT and ' + 'REALM (paper default: 128)') + group.add_argument('--biencoder_projection_dim', type=int, default=0, + help='Size of projection head used in biencoder') + group.add_argument('--biencoder_shared_query_context_model', action='store_true', + help='Whether to share the parameters of the query ' + 'and context models or not') + # checkpointing + group.add_argument('--ict_load', type=str, default=None, + help='Directory containing an ICTBertModel checkpoint') + group.add_argument('--bert_load', type=str, default=None, + help='Directory containing an BertModel checkpoint ' + '(needed to start ICT and REALM)') + + # data + group.add_argument('--titles_data_path', type=str, default=None, + help='Path to titles dataset used for ICT') + group.add_argument('--query_in_block_prob', type=float, default=0.1, + help='Probability of keeping query in block for ' + 'ICT dataset') + group.add_argument('--use_one_sent_docs', action='store_true', + help='Whether to use one sentence documents in ICT') + group.add_argument('--evidence_data_path', type=str, default=None, + help='Path to Wikipedia Evidence frm DPR paper') + + # training + group.add_argument('--retriever_report_topk_accuracies', nargs='+', type=int, + default=[], help="Which top-k accuracies to report " + "(e.g. '1 5 20')") + group.add_argument('--retriever_score_scaling', action='store_true', + help='Whether to scale retriever scores by inverse ' + 'square root of hidden size') + + # faiss index + group.add_argument('--block_data_path', type=str, default=None, + help='Where to save/load BlockData to/from') + group.add_argument('--embedding_path', type=str, default=None, + help='Where to save/load Open-Retrieval Embedding' + ' data to/from') + + # indexer + group.add_argument('--indexer_batch_size', type=int, default=128, + help='How large of batches to use when doing indexing ' + 'jobs') + group.add_argument('--indexer_log_interval', type=int, default=1000, + help='After how many batches should the indexer ' + 'report progress') + return parser + + +def _add_vision_args(parser): + group = parser.add_argument_group(title="vision") + + # general vision arguements + group.add_argument('--num_classes', type=int, default=1000, + help='num of classes in vision classificaiton task') + group.add_argument('--img_h', type=int, default=224, + help='Image height for vision classification task') + group.add_argument('--img_w', type=int, default=224, + help='Image height for vision classification task') + group.add_argument('--num_channels', type=int, default=3, + help='Number of channels in input image data') + group.add_argument('--patch_dim', type=int, default=16, + help='patch dimension') + group.add_argument('--classes_fraction', type=float, default=1.0, + help='training with fraction of classes.') + group.add_argument('--data_per_class_fraction', type=float, default=1.0, + help='training with fraction of data per class.') + group.add_argument('--no_data_sharding', action='store_false', + help='Disable data sharding.', + dest='data_sharding') + group.add_argument('--head_lr_mult', type=float, default=1.0, + help='learning rate multiplier for head during finetuning') + + # dino arguments + group.add_argument('--iter_per_epoch', type=int, default=1250, + help='iterations per epoch') + group.add_argument('--dino_local_img_size', type=int, default=96, + help='Image size for vision classification task') + group.add_argument('--dino_local_crops_number', type=int, default=10, + help='Number of local crops') + group.add_argument('--dino_head_hidden_size', type=int, default=2048, + help='Hidden dimension size in dino head') + group.add_argument('--dino_bottleneck_size', type=int, default=256, + help='Bottle neck dimension in dino head ') + group.add_argument('--dino_freeze_last_layer', type=float, default=1, + help='Freezing last layer weights') + group.add_argument('--dino_norm_last_layer', action='store_true', + help='Disable Norm in last layer.') + group.add_argument('--dino_warmup_teacher_temp', type=float, default=0.04, + help='warump teacher temperature') + group.add_argument('--dino_teacher_temp', type=float, default=0.07, + help='teacher temperature') + group.add_argument('--dino_warmup_teacher_temp_epochs', type=int, default=30, + help='warmup teacher temperaure epochs') + return parser diff --git a/multilinguality_megatron/megatron/checkpointing.py b/multilinguality_megatron/megatron/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..75bb73c272b0f523624e09cc7de604031c2ce0ee --- /dev/null +++ b/multilinguality_megatron/megatron/checkpointing.py @@ -0,0 +1,813 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Input/output checkpointing.""" + +import os +import random +import sys + +import numpy as np +import torch + +from megatron import update_num_microbatches +from megatron.core import mpu, tensor_parallel + +from .global_vars import get_args +from .utils import print_rank_0, unwrap_model + +_CHECKPOINT_VERSION = None + + +def set_checkpoint_version(value): + global _CHECKPOINT_VERSION + if _CHECKPOINT_VERSION is not None: + assert _CHECKPOINT_VERSION == value, "checkpoint versions do not match" + _CHECKPOINT_VERSION = value + + +def get_checkpoint_version(): + global _CHECKPOINT_VERSION + return _CHECKPOINT_VERSION + + +def check_checkpoint_args(checkpoint_args): + """Ensure fixed arguments for a model are the same for the input + arguments and the one retrieved from checkpoint.""" + args = get_args() + + def _compare(arg_name, old_arg_name=None): + if old_arg_name is not None: + checkpoint_value = getattr(checkpoint_args, old_arg_name) + else: + checkpoint_value = getattr(checkpoint_args, arg_name) + args_value = getattr(args, arg_name) + error_message = ( + "{} value from checkpoint ({}) is not equal to the " + "input argument value ({}).".format(arg_name, checkpoint_value, args_value) + ) + #assert checkpoint_value == args_value, error_message + + _compare("num_layers") + _compare("hidden_size") + _compare("num_attention_heads") + if args.vocab_file: + _compare("max_position_embeddings") + _compare("make_vocab_size_divisible_by") + _compare("padded_vocab_size") + _compare("tokenizer_type") + if args.data_parallel_random_init: + _compare("data_parallel_random_init") + if get_checkpoint_version() < 3.0: + _compare("tensor_model_parallel_size", old_arg_name="model_parallel_size") + if get_checkpoint_version() >= 3.0: + _compare("tensor_model_parallel_size") + _compare("pipeline_model_parallel_size") + + +def ensure_directory_exists(filename): + """Build filename's path if it does not already exists.""" + dirname = os.path.dirname(filename) + if not os.path.exists(dirname): + os.makedirs(dirname) + + +def get_checkpoint_name( + checkpoints_path, + iteration, + release=False, + pipeline_parallel=None, + tensor_rank=None, + pipeline_rank=None, +): + """Determine the directory name for this rank's checkpoint.""" + if release: + directory = "release" + else: + directory = "iter_{:07d}".format(iteration) + + # Use both the tensor and pipeline MP rank. + if pipeline_parallel is None: + pipeline_parallel = mpu.get_pipeline_model_parallel_world_size() > 1 + if tensor_rank is None: + tensor_rank = mpu.get_tensor_model_parallel_rank() + if pipeline_rank is None: + pipeline_rank = mpu.get_pipeline_model_parallel_rank() + + # Use both the tensor and pipeline MP rank. If using the distributed + # optimizer, then the optimizer's path must additionally include the + # data parallel rank. + if not pipeline_parallel: + common_path = os.path.join( + checkpoints_path, directory, f"mp_rank_{tensor_rank:02d}" + ) + else: + common_path = os.path.join( + checkpoints_path, + directory, + f"mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}", + ) + + return os.path.join(common_path, "model_optim_rng.pt") + + +def get_checkpoint_names( + checkpoints_path, + iteration, + use_distributed_optimizer, + release=False, + pipeline_parallel=None, + tensor_rank=None, + pipeline_rank=None, +): + """Determine the directory name for this rank's checkpoint.""" + if release: + directory = "release" + else: + directory = "iter_{:07d}".format(iteration) + + # Use both the tensor and pipeline MP rank. + if pipeline_parallel is None: + pipeline_parallel = mpu.get_pipeline_model_parallel_world_size() > 1 + if tensor_rank is None: + tensor_rank = mpu.get_tensor_model_parallel_rank() + if pipeline_rank is None: + pipeline_rank = mpu.get_pipeline_model_parallel_rank() + + # Use both the tensor and pipeline MP rank. If using the distributed + # optimizer, then the optimizer's path must additionally include the + # data parallel rank. + if not pipeline_parallel: + common_path = os.path.join( + checkpoints_path, directory, f"mp_rank_{tensor_rank:02d}" + ) + else: + common_path = os.path.join( + checkpoints_path, + directory, + f"mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}", + ) + + if use_distributed_optimizer: + model_name = os.path.join(common_path, "model_rng.pt") + optim_name = os.path.join( + common_path + "_%03d" % mpu.get_data_parallel_rank(), "optim.pt" + ) + else: + model_name = optim_name = os.path.join(common_path, "model_optim_rng.pt") + return model_name, optim_name + + +def find_checkpoint_rank_0( + checkpoints_path, iteration, use_distributed_optimizer, release=False +): + """Finds the checkpoint for rank 0 without knowing if we are using + pipeline parallelism or not. + + Since the checkpoint naming scheme changes if pipeline parallelism + is present, we need to look for both naming schemes if we don't + know if the checkpoint has pipeline parallelism. + + """ + + # Look for checkpoint with no pipelining + filenames = get_checkpoint_names( + checkpoints_path, + iteration, + use_distributed_optimizer, + release, + pipeline_parallel=False, + tensor_rank=0, + pipeline_rank=0, + ) + if os.path.isfile(filenames[0]): + return filenames + + # Look for checkpoint with pipelining + filenames = get_checkpoint_names( + checkpoints_path, + iteration, + use_distributed_optimizer, + release, + pipeline_parallel=True, + tensor_rank=0, + pipeline_rank=0, + ) + if os.path.isfile(filenames[0]): + return filenames + + return None, None + + +def get_checkpoint_tracker_filename(checkpoints_path): + """Tracker file rescords the latest chckpoint during + training to restart from.""" + return os.path.join(checkpoints_path, "latest_checkpointed_iteration.txt") + + +def read_metadata(tracker_filename): + # Read the tracker file and either set the iteration or + # mark it as a release checkpoint. + iteration = 0 + release = False + with open(tracker_filename, "r") as f: + metastring = f.read().strip() + try: + iteration = int(metastring) + except ValueError: + release = metastring == "release" + if not release: + print_rank_0( + "ERROR: Invalid metadata file {}. Exiting".format(tracker_filename) + ) + sys.exit() + assert iteration > 0 or release, "error parsing metadata file {}".format( + tracker_filename + ) + + # Get the max iteration retrieved across the ranks. + if torch.distributed.is_initialized(): + iters_cuda = torch.cuda.LongTensor([iteration]) + torch.distributed.all_reduce(iters_cuda, op=torch.distributed.ReduceOp.MAX) + max_iter = iters_cuda[0].item() + + # We should now have all the same iteration. + # If not, print a warning and chose the maximum + # iteration across all ranks. + if iteration != max_iter: + print( + "WARNING: on rank {} found iteration {} in the " + "metadata while max iteration across the ranks " + "is {}, replacing it with max iteration.".format( + rank, iteration, max_iter + ), + flush=True, + ) + else: + # When loading a checkpoint outside of training (for example, + # when editing it), we might not have torch distributed + # initialized, in this case, just assume we have the latest + max_iter = iteration + return max_iter, release + + +def get_rng_state(): + """collect rng state across data parallel ranks""" + args = get_args() + rng_state = { + "random_rng_state": random.getstate(), + "np_rng_state": np.random.get_state(), + "torch_rng_state": torch.get_rng_state(), + "cuda_rng_state": torch.cuda.get_rng_state(), + "rng_tracker_states": tensor_parallel.get_cuda_rng_tracker().get_states(), + } + + rng_state_list = None + if ( + torch.distributed.is_initialized() + and mpu.get_data_parallel_world_size() > 1 + and args.data_parallel_random_init + ): + rng_state_list = [None for i in range(mpu.get_data_parallel_world_size())] + torch.distributed.all_gather_object( + rng_state_list, rng_state, group=mpu.get_data_parallel_group() + ) + else: + rng_state_list = [rng_state] + + return rng_state_list + + +def save_checkpoint(iteration, model, optimizer, opt_param_scheduler): + """Save a model checkpoint.""" + args = get_args() + + # Only rank zero of the data parallel writes to the disk. + model = unwrap_model(model) + + release = iteration == "release" + if release: + print_rank_0( + "saving checkpoint marked as release to {}".format(iteration, args.save) + ) + else: + print_rank_0( + "saving checkpoint at iteration {:7d} to {}".format(iteration, args.save) + ) + + # Collect rng state across data parallel ranks. + rng_state = get_rng_state() + + # Checkpoint file names. + model_checkpoint_name, optim_checkpoint_name = get_checkpoint_names( + args.save, iteration, args.use_distributed_optimizer, release=release + ) + + # Collect args, model, RNG. + model_state_dict = {} + if not torch.distributed.is_initialized() or mpu.get_data_parallel_rank() == 0: + # Arguments, iteration, and model. + model_state_dict["args"] = args + model_state_dict["checkpoint_version"] = 3.0 + model_state_dict["iteration"] = iteration + if len(model) == 1: + model_state_dict["model"] = model[0].state_dict_for_save_checkpoint() + else: + for i in range(len(model)): + mpu.set_virtual_pipeline_model_parallel_rank(i) + model_state_dict["model%d" % i] = model[ + i + ].state_dict_for_save_checkpoint() + + # RNG states. + if not args.no_save_rng: + model_state_dict["rng_state"] = rng_state + + # Collect optimizer state. (Optimizer is saved separately from the model, due + # to the conflicting data pattern when using the distributed optimizer.) + optim_state_dict = {} + if not args.no_save_optim and ( + not torch.distributed.is_initialized() + or mpu.get_data_parallel_rank() == 0 + or args.use_distributed_optimizer + ): + # Optimizer stuff. + if optimizer is not None: + optim_state_dict["optimizer"] = optimizer.state_dict() + if opt_param_scheduler is not None: + optim_state_dict["opt_param_scheduler"] = opt_param_scheduler.state_dict() + + # Save. + if args.use_distributed_optimizer: + # Save model separate from optimizer. + if model_state_dict: + ensure_directory_exists(model_checkpoint_name) + torch.save(model_state_dict, model_checkpoint_name) + if optim_state_dict: + ensure_directory_exists(optim_checkpoint_name) + torch.save(optim_state_dict, optim_checkpoint_name) + else: + # Save model and optimizer together. + state_dict = {**model_state_dict, **optim_state_dict} + if state_dict: # only saves if populated (i.e., inherits conditions above) + ensure_directory_exists(model_checkpoint_name) + torch.save(state_dict, model_checkpoint_name) + + # Wait so everyone is done (necessary) + if torch.distributed.is_initialized(): + torch.distributed.barrier() + + if release: + print_rank_0( + " successfully saved checkpoint marked as release to {}".format( + iteration, args.save + ) + ) + else: + print_rank_0( + " successfully saved checkpoint at iteration {:7d} to {}".format( + iteration, args.save + ) + ) + + # And update the latest iteration + if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0: + tracker_filename = get_checkpoint_tracker_filename(args.save) + with open(tracker_filename, "w") as f: + f.write(str(iteration)) + + # Wait so everyone is done (not necessary) + if torch.distributed.is_initialized(): + torch.distributed.barrier() + + +def _transpose_first_dim(t, num_splits, num_splits_first, model): + input_shape = t.size() + # We use a self_attention module but the values extracted aren't + # specific to self attention so should work for cross attention as well + while hasattr(model, "module"): + model = model.module + attention_module = model.language_model.encoder.layers[0].self_attention + hidden_size_per_attention_head = attention_module.hidden_size_per_attention_head + num_attention_heads_per_partition = ( + attention_module.num_attention_heads_per_partition + ) + if num_splits_first: + """[num_splits * np * hn, h] + -->(view) [num_splits, np, hn, h] + -->(tranpose) [np, num_splits, hn, h] + -->(view) [np * num_splits * hn, h]""" + + intermediate_shape = ( + num_splits, + num_attention_heads_per_partition, + hidden_size_per_attention_head, + ) + input_shape[1:] + + t = t.view(*intermediate_shape) + t = t.transpose(0, 1).contiguous() + else: + """[np * hn * num_splits, h] + -->(view) [np, hn, num_splits, h] + -->(tranpose) [np, num_splits, hn, h] + -->(view) [np * num_splits * hn, h]""" + + intermediate_shape = ( + num_attention_heads_per_partition, + hidden_size_per_attention_head, + num_splits, + ) + input_shape[1:] + + t = t.view(*intermediate_shape) + t = t.transpose(1, 2).contiguous() + t = t.view(*input_shape) + + return t + + +def fix_query_key_value_ordering(model, checkpoint_version): + """Fix up query/key/value matrix ordering if checkpoint + version is smaller than 2.0 + """ + if checkpoint_version < 2.0: + if isinstance(model, list): + assert len(model) == 1 + model = model[0] + for name, param in model.named_parameters(): + if name.endswith((".query_key_value.weight", ".query_key_value.bias")): + # multiquery attn does not require transposition + args = get_args() + if args.num_attention_heads_kv != args.num_attention_heads: + continue + if checkpoint_version == 0: + fixed_param = _transpose_first_dim(param.data, 3, True, model) + elif checkpoint_version == 1.0: + fixed_param = _transpose_first_dim(param.data, 3, False, model) + else: + print_rank_0(f"Invalid checkpoint version {checkpoint_version}.") + sys.exit() + param.data.copy_(fixed_param) + if name.endswith((".key_value.weight", ".key_value.bias")): + if checkpoint_version == 0: + fixed_param = _transpose_first_dim(param.data, 2, True, model) + elif checkpoint_version == 1.0: + fixed_param = _transpose_first_dim(param.data, 2, False, model) + else: + print_rank_0(f"Invalid checkpoint version {checkpoint_version}.") + sys.exit() + param.data.copy_(fixed_param) + print_rank_0( + " succesfully fixed query-key-values ordering for" + " checkpoint version {}".format(checkpoint_version) + ) + + +def _load_base_checkpoint(load_dir, use_distributed_optimizer, rank0=False): + """Load the base state_dict from the given directory + + If rank0 is true, just loads rank 0 checkpoint, ignoring arguments. + """ + # Read the tracker file and set the iteration. + tracker_filename = get_checkpoint_tracker_filename(load_dir) + + # If no tracker file, return nothing + if not os.path.isfile(tracker_filename): + if not rank0: + print_rank_0( + "WARNING: could not find the metadata file {} ".format(tracker_filename) + ) + print_rank_0( + " will not load any checkpoints and will start from " "random" + ) + return None, None, False + + # Otherwise, read the tracker file and either set the iteration or + # mark it as a release checkpoint. + iteration, release = read_metadata(tracker_filename) + + # Checkpoint. + if rank0: + checkpoint_names = find_checkpoint_rank_0( + load_dir, iteration, use_distributed_optimizer, release + ) + else: + checkpoint_names = get_checkpoint_names( + load_dir, iteration, use_distributed_optimizer, release + ) + if release: + print_rank_0(f" loading release checkpoint from {load_dir}") + else: + print_rank_0( + f" loading checkpoint from {load_dir} at iteration {iteration}" + ) + + model_checkpoint_name, optim_checkpoint_name = checkpoint_names + + # Load the checkpoint. + try: + model_state_dict = torch.load(model_checkpoint_name, map_location="cpu") + if use_distributed_optimizer: + optim_state_dict = torch.load(optim_checkpoint_name, map_location="cpu") + else: + optim_state_dict = model_state_dict + except ModuleNotFoundError: + from megatron.fp16_deprecated import loss_scaler + + # For backward compatibility. + if not rank0: + print_rank_0(" > deserializing using the old code structure ...") + sys.modules["fp16.loss_scaler"] = sys.modules[ + "megatron.fp16_deprecated.loss_scaler" + ] + sys.modules["megatron.fp16.loss_scaler"] = sys.modules[ + "megatron.fp16_deprecated.loss_scaler" + ] + model_state_dict = torch.load(model_checkpoint_name, map_location="cpu") + optim_state_dict = torch.load(optim_checkpoint_name, map_location="cpu") + sys.modules.pop("fp16.loss_scaler", None) + sys.modules.pop("megatron.fp16.loss_scaler", None) + except BaseException as e: + print_rank_0("could not load the checkpoint") + print_rank_0(e) + sys.exit() + return model_state_dict, optim_state_dict, release + + +def load_args_from_checkpoint(args, load_arg="load"): + """Set required arguments from the checkpoint specified in the + arguments. + + Will overwrite arguments that have a non-None default value, but + will leave any arguments that default to None as set. + + Returns the same args NameSpace with the new values added/updated. + + If no checkpoint is specified in args, or if the checkpoint is + there but invalid, the arguments will not be modified + + """ + load_dir = getattr(args, load_arg) + + if load_dir is None: + print_rank_0("No load directory specified, using provided arguments.") + return args + + model_state_dict, optim_state_dict, release = _load_base_checkpoint( + load_dir, use_distributed_optimizer=args.use_distributed_optimizer, rank0=True + ) + + # For args we only care about model state dict + state_dict = model_state_dict + + if not state_dict: + print_rank_0( + "Checkpoint not found to provide arguments, using provided arguments." + ) + return args + + if "args" not in state_dict: + print_rank_0( + "Checkpoint provided does not have arguments saved, using provided arguments." + ) + return args + + checkpoint_args = state_dict["args"] + checkpoint_version = state_dict.get("checkpoint_version", 0) + args.iteration = state_dict["iteration"] + + def _set_arg(arg_name, old_arg_name=None, force=False): + if not force and getattr(args, arg_name, None) is not None: + return + + if old_arg_name is not None: + checkpoint_value = getattr(checkpoint_args, old_arg_name, None) + else: + checkpoint_value = getattr(checkpoint_args, arg_name, None) + + if checkpoint_value is not None: + print_rank_0(f"Setting {arg_name} to {checkpoint_value} from checkpoint") + setattr(args, arg_name, checkpoint_value) + + _set_arg("num_layers") + _set_arg("hidden_size") + _set_arg("ffn_hidden_size") + _set_arg("seq_length") + _set_arg("num_attention_heads") + _set_arg("kv_channels") + _set_arg("max_position_embeddings") + _set_arg("tokenizer_type") + _set_arg("padded_vocab_size", force=True) + + _set_arg("position_embedding_type", force=True) + _set_arg("num_attention_heads_kv") + _set_arg("bias_droput_fusion") + _set_arg("bias_gelu_fusion") + _set_arg("hidden_dropout") + _set_arg("parallel_attn", force=True) + _set_arg("parallel_layernorm", force=True) + _set_arg("use_flash_attn") + _set_arg("use_rms_norm", force=True) + _set_arg("ffn_hidden_size") + _set_arg("glu_activation") + _set_arg("tie_embed_logits", force=True) + _set_arg("make_vocab_size_divisible_by", force=True) + _set_arg("train_iters") + _set_arg("sliding_window_size") + if checkpoint_version < 3.0: + _set_arg("tensor_model_parallel_size", "model_parallel_size") + else: + _set_arg("tensor_model_parallel_size", force=True) + _set_arg("pipeline_model_parallel_size", force=True) + _set_arg("num_layers_per_virtual_pipeline_stage") + return args + + +def load_checkpoint( + model, optimizer, opt_param_scheduler, load_arg="load", strict=True +): + """Load a model checkpoint and return the iteration. + strict (bool): whether to strictly enforce that the keys in + :attr:`state_dict` of the checkpoint match the names of + parameters and buffers in model. + """ + args = get_args() + load_dir = getattr(args, load_arg) + model = unwrap_model(model) + + model_state_dict, optim_state_dict, release = _load_base_checkpoint( + load_dir, use_distributed_optimizer=args.use_distributed_optimizer, rank0=False + ) + + if model_state_dict is None: + return 0 + + # set checkpoint version + set_checkpoint_version(model_state_dict.get("checkpoint_version", 0)) + + # Set iteration. + if args.finetune or release or args.annealing: + iteration = 0 + else: + try: + iteration = model_state_dict["iteration"] + except KeyError: + try: # Backward compatible with older checkpoints + iteration = model_state_dict["total_iters"] + except KeyError: + print_rank_0( + "A metadata file exists but unable to load " + "iteration from checkpoint {}, exiting".format(checkpoint_name) + ) + sys.exit() + + # Check arguments. + assert args.consumed_train_samples == 0 + assert args.consumed_valid_samples == 0 + if "args" in model_state_dict and not args.finetune and not args.annealing: + checkpoint_args = model_state_dict["args"] + check_checkpoint_args(checkpoint_args) + args.consumed_train_samples = getattr( + checkpoint_args, "consumed_train_samples", 0 + ) + update_num_microbatches(consumed_samples=args.consumed_train_samples) + args.consumed_valid_samples = getattr( + checkpoint_args, "consumed_valid_samples", 0 + ) + else: + print_rank_0("could not find arguments in the checkpoint ...") + + # Model. + if len(model) == 1: + model[0].load_state_dict(model_state_dict["model"], strict=strict) + else: + for i in range(len(model)): + mpu.set_virtual_pipeline_model_parallel_rank(i) + model[i].load_state_dict(model_state_dict["model%d" % i], strict=strict) + + # Fix up query/key/value matrix ordering if needed + checkpoint_version = get_checkpoint_version() + print_rank_0(f" checkpoint version {checkpoint_version}") + fix_query_key_value_ordering(model, checkpoint_version) + + # Optimizer. + if not release and not args.finetune and not args.no_load_optim: + try: + if optimizer is not None: + optimizer.load_state_dict(optim_state_dict["optimizer"]) + if opt_param_scheduler is not None and not args.annealing: + if "lr_scheduler" in optim_state_dict: # backward compatbility + opt_param_scheduler.load_state_dict( + optim_state_dict["lr_scheduler"] + ) + else: + opt_param_scheduler.load_state_dict( + optim_state_dict["opt_param_scheduler"] + ) + except KeyError: + print_rank_0( + "Unable to load optimizer from checkpoint {}. " + "Specify --no_load_optim or --finetune to prevent " + "attempting to load the optimizer state, " + "exiting ...".format(checkpoint_name) + ) + sys.exit() + else: + if args.fp16 and optimizer is not None: + optimizer.reload_model_params() + + # rng states. + if not release and not args.finetune and not args.no_load_rng: + try: + if "rng_state" in model_state_dict: + # access rng_state for data parallel rank + if args.data_parallel_random_init: + rng_state = model_state_dict["rng_state"][ + mpu.get_data_parallel_rank() + ] + else: + rng_state = model_state_dict["rng_state"][0] + random.setstate(rng_state["random_rng_state"]) + np.random.set_state(rng_state["np_rng_state"]) + torch.set_rng_state(rng_state["torch_rng_state"]) + torch.cuda.set_rng_state(rng_state["cuda_rng_state"]) + # Check for empty states array + if not rng_state["rng_tracker_states"]: + raise KeyError + tensor_parallel.get_cuda_rng_tracker().set_states( + rng_state["rng_tracker_states"] + ) + else: # backward compatability + random.setstate(model_state_dict["random_rng_state"]) + np.random.set_state(model_state_dict["np_rng_state"]) + torch.set_rng_state(model_state_dict["torch_rng_state"]) + torch.cuda.set_rng_state(model_state_dict["cuda_rng_state"]) + # Check for empty states array + if not model_state_dict["rng_tracker_states"]: + raise KeyError + tensor_parallel.get_cuda_rng_tracker().set_states( + model_state_dict["rng_tracker_states"] + ) + except KeyError: + print_rank_0( + "Unable to load rng state from checkpoint {}. " + "Specify --no_load_rng or --finetune to prevent " + "attempting to load the rng state, " + "exiting ...".format(checkpoint_name) + ) + sys.exit() + + # Some utilities want to load a checkpoint without distributed being initialized + if torch.distributed.is_initialized(): + torch.distributed.barrier() + + print_rank_0( + f" successfully loaded checkpoint from {args.load} " + f"at iteration {iteration}" + ) + + return iteration + + +def load_biencoder_checkpoint( + model, only_query_model=False, only_context_model=False, custom_load_path=None +): + """ + selectively load retrieval models for indexing/retrieving + from saved checkpoints + """ + + args = get_args() + + model = unwrap_model(model) + load_path = custom_load_path if custom_load_path is not None else args.load + + tracker_filename = get_checkpoint_tracker_filename(load_path) + with open(tracker_filename, "r") as f: + iteration = int(f.read().strip()) + + checkpoint_name, _ = get_checkpoint_names( + load_path, iteration, args.use_distributed_optimizer, release=False + ) + + if mpu.get_data_parallel_rank() == 0: + print( + "global rank {} is loading checkpoint {}".format( + torch.distributed.get_rank(), checkpoint_name + ) + ) + + state_dict = torch.load(model_checkpoint_name, map_location="cpu") + ret_state_dict = state_dict["model"] + + if only_query_model: + ret_state_dict.pop("context_model") + if only_context_model: + ret_state_dict.pop("query_model") + + assert len(model) == 1 + model[0].load_state_dict(ret_state_dict) + torch.distributed.barrier() + + if mpu.get_data_parallel_rank() == 0: + print(" successfully loaded {}".format(checkpoint_name)) + + return model diff --git a/multilinguality_megatron/megatron/core/__init__.py b/multilinguality_megatron/megatron/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cb437d5dae0a911c6dc079cfb023f176ac2c1181 --- /dev/null +++ b/multilinguality_megatron/megatron/core/__init__.py @@ -0,0 +1,12 @@ +import megatron.core.parallel_state +import megatron.core.tensor_parallel +import megatron.core.utils + +# Alias parallel_state as mpu, its legacy name +mpu = parallel_state + +__all__ = [ + "parallel_state", + "tensor_parallel", + "utils", +] diff --git a/multilinguality_megatron/megatron/core/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/megatron/core/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c3a132193d530034dfac744912779f887876dc0 Binary files /dev/null and b/multilinguality_megatron/megatron/core/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/__pycache__/parallel_state.cpython-39.pyc b/multilinguality_megatron/megatron/core/__pycache__/parallel_state.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76d72445b22b050477a78c3da736903f7afe10fb Binary files /dev/null and b/multilinguality_megatron/megatron/core/__pycache__/parallel_state.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/__pycache__/utils.cpython-39.pyc b/multilinguality_megatron/megatron/core/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a4bc93e02dee26092e50826708f53261fdcfed6 Binary files /dev/null and b/multilinguality_megatron/megatron/core/__pycache__/utils.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/parallel_state.py b/multilinguality_megatron/megatron/core/parallel_state.py new file mode 100644 index 0000000000000000000000000000000000000000..7a09f7bcc80161bfab20904ecb1322cfe2d68cdf --- /dev/null +++ b/multilinguality_megatron/megatron/core/parallel_state.py @@ -0,0 +1,524 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Model and data parallel groups.""" + +import torch +from typing import Optional + +from .utils import GlobalMemoryBuffer + +# Intra-layer model parallel group that the current rank belongs to. +_TENSOR_MODEL_PARALLEL_GROUP = None +# Inter-layer model parallel group that the current rank belongs to. +_PIPELINE_MODEL_PARALLEL_GROUP = None +# Model parallel group (both intra- and pipeline) that the current rank belongs to. +_MODEL_PARALLEL_GROUP = None +# Embedding group. +_EMBEDDING_GROUP = None +# Position embedding group. +_POSITION_EMBEDDING_GROUP = None +# Data parallel group that the current rank belongs to. +_DATA_PARALLEL_GROUP = None + +_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None +_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None +_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = None + +# These values enable us to change the mpu sizes on the fly. +_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None +_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None +_MPU_TENSOR_MODEL_PARALLEL_RANK = None +_MPU_PIPELINE_MODEL_PARALLEL_RANK = None + +# A list of ranks that have a copy of the embedding. +_EMBEDDING_GLOBAL_RANKS = None + +# A list of ranks that have a copy of the position embedding. +_POSITION_EMBEDDING_GLOBAL_RANKS = None + +# A list of global ranks for each pipeline group to ease calculation of the source +# rank when broadcasting from the first or last pipeline stage. +_PIPELINE_GLOBAL_RANKS = None + +# A list of global ranks for each data parallel group to ease calculation of the source +# rank when broadcasting weights from src to all other data parallel ranks +_DATA_PARALLEL_GLOBAL_RANKS = None + +# Memory buffers to avoid dynamic memory allocation +_GLOBAL_MEMORY_BUFFER = None + + +def initialize_model_parallel( + tensor_model_parallel_size: int = 1, + pipeline_model_parallel_size: int = 1, + virtual_pipeline_model_parallel_size: Optional[int] = None, + pipeline_model_parallel_split_rank: Optional[int] = None, +) -> None: + """ + Initialize model data parallel groups. + + Arguments: + tensor_model_parallel_size: number of GPUs used for tensor model parallelism. + pipeline_model_parallel_size: number of GPUs used for pipeline model parallelism. + virtual_pipeline_model_parallel_size: number of virtual stages (interleaved + pipeline). + pipeline_model_parallel_split_rank: for models with both encoder and decoder, + rank in pipeline with split point. + + Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we + use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize + the model pipeline. The present function will + create 8 tensor model-parallel groups, 4 pipeline model-parallel groups + and 8 data-parallel groups as: + 8 data_parallel groups: + [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15] + 8 tensor model-parallel groups: + [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15] + 4 pipeline model-parallel groups: + [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15] + Note that for efficiency, the caller should make sure adjacent ranks + are on the same DGX box. For example if we are using 2 DGX-1 boxes + with a total of 16 GPUs, rank 0 to 7 belong to the first box and + ranks 8 to 15 belong to the second box. + """ + # Get world size and rank. Ensure some consistencies. + assert torch.distributed.is_initialized() + world_size: int = torch.distributed.get_world_size() + + if world_size % (tensor_model_parallel_size * pipeline_model_parallel_size) != 0: + raise RuntimeError( + f"world_size ({world_size}) is not divisible by tensor_model_parallel_size " + f"({tensor_model_parallel_size}) x pipeline_model_parallel_size ({pipeline_model_parallel_size})" + ) + + data_parallel_size: int = world_size // (tensor_model_parallel_size * + pipeline_model_parallel_size) + + num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size + num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size + num_data_parallel_groups: int = world_size // data_parallel_size + + if virtual_pipeline_model_parallel_size is not None: + if not pipeline_model_parallel_size > 2: + raise RuntimeError("pipeline-model-parallel size should be greater than 2 with " + "interleaved schedule") + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0 + _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = virtual_pipeline_model_parallel_size + + if pipeline_model_parallel_split_rank is not None: + global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank + + rank = torch.distributed.get_rank() + + # Build the data-parallel groups. + global _DATA_PARALLEL_GROUP + global _DATA_PARALLEL_GLOBAL_RANKS + assert _DATA_PARALLEL_GROUP is None, 'data parallel group is already initialized' + all_data_parallel_group_ranks = [] + for i in range(pipeline_model_parallel_size): + start_rank = i * num_pipeline_model_parallel_groups + end_rank = (i + 1) * num_pipeline_model_parallel_groups + for j in range(tensor_model_parallel_size): + ranks = range(start_rank + j, end_rank, tensor_model_parallel_size) + all_data_parallel_group_ranks.append(list(ranks)) + group = torch.distributed.new_group(ranks) + if rank in ranks: + _DATA_PARALLEL_GROUP = group + _DATA_PARALLEL_GLOBAL_RANKS = ranks + + # Build the model-parallel groups. + global _MODEL_PARALLEL_GROUP + assert _MODEL_PARALLEL_GROUP is None, 'model parallel group is already initialized' + for i in range(data_parallel_size): + ranks = [data_parallel_group_ranks[i] + for data_parallel_group_ranks in all_data_parallel_group_ranks] + group = torch.distributed.new_group(ranks) + if rank in ranks: + _MODEL_PARALLEL_GROUP = group + + # Build the tensor model-parallel groups. + global _TENSOR_MODEL_PARALLEL_GROUP + assert _TENSOR_MODEL_PARALLEL_GROUP is None, \ + 'tensor model parallel group is already initialized' + for i in range(num_tensor_model_parallel_groups): + ranks = range(i * tensor_model_parallel_size, + (i + 1) * tensor_model_parallel_size) + group = torch.distributed.new_group(ranks) + if rank in ranks: + _TENSOR_MODEL_PARALLEL_GROUP = group + + # Build the pipeline model-parallel groups and embedding groups + # (first and last rank in each pipeline model-parallel group). + global _PIPELINE_MODEL_PARALLEL_GROUP + global _PIPELINE_GLOBAL_RANKS + assert _PIPELINE_MODEL_PARALLEL_GROUP is None, \ + 'pipeline model parallel group is already initialized' + global _EMBEDDING_GROUP + global _EMBEDDING_GLOBAL_RANKS + assert _EMBEDDING_GROUP is None, 'embedding group is already initialized' + global _POSITION_EMBEDDING_GROUP + global _POSITION_EMBEDDING_GLOBAL_RANKS + assert _POSITION_EMBEDDING_GROUP is None, \ + 'position embedding group is already initialized' + for i in range(num_pipeline_model_parallel_groups): + ranks = range(i, world_size, num_pipeline_model_parallel_groups) + group = torch.distributed.new_group(ranks) + if rank in ranks: + _PIPELINE_MODEL_PARALLEL_GROUP = group + _PIPELINE_GLOBAL_RANKS = ranks + # Setup embedding group (to exchange gradients between + # first and last stages). + if len(ranks) > 1: + embedding_ranks = [ranks[0], ranks[-1]] + position_embedding_ranks = [ranks[0]] + if pipeline_model_parallel_split_rank is not None: + if ranks[pipeline_model_parallel_split_rank] not in embedding_ranks: + embedding_ranks = [ranks[0], + ranks[pipeline_model_parallel_split_rank], + ranks[-1]] + if ranks[pipeline_model_parallel_split_rank] not in position_embedding_ranks: + position_embedding_ranks = [ranks[0], + ranks[pipeline_model_parallel_split_rank]] + else: + embedding_ranks = ranks + position_embedding_ranks = ranks + + group = torch.distributed.new_group(embedding_ranks) + if rank in embedding_ranks: + _EMBEDDING_GROUP = group + if rank in ranks: + _EMBEDDING_GLOBAL_RANKS = embedding_ranks + + group = torch.distributed.new_group(position_embedding_ranks) + if rank in position_embedding_ranks: + _POSITION_EMBEDDING_GROUP = group + if rank in ranks: + _POSITION_EMBEDDING_GLOBAL_RANKS = position_embedding_ranks + + # Initialize global memory buffer + # This isn't really "parallel state" but there isn't another good place to + # put this. If we end up with a more generic initialization of megatron-core + # we could stick it there + _set_global_memory_buffer() + + +def model_parallel_is_initialized(): + """Check if model and data parallel groups are initialized.""" + if _TENSOR_MODEL_PARALLEL_GROUP is None or \ + _PIPELINE_MODEL_PARALLEL_GROUP is None or \ + _DATA_PARALLEL_GROUP is None: + return False + return True + + +def get_model_parallel_group(): + """Get the model parallel group the caller rank belongs to.""" + assert _MODEL_PARALLEL_GROUP is not None, \ + 'model parallel group is not initialized' + return _MODEL_PARALLEL_GROUP + + +def get_tensor_model_parallel_group(): + """Get the tensor model parallel group the caller rank belongs to.""" + assert _TENSOR_MODEL_PARALLEL_GROUP is not None, \ + 'intra_layer_model parallel group is not initialized' + return _TENSOR_MODEL_PARALLEL_GROUP + + +def get_pipeline_model_parallel_group(): + """Get the pipeline model parallel group the caller rank belongs to.""" + assert _PIPELINE_MODEL_PARALLEL_GROUP is not None, \ + 'pipeline_model parallel group is not initialized' + return _PIPELINE_MODEL_PARALLEL_GROUP + + +def get_data_parallel_group(): + """Get the data parallel group the caller rank belongs to.""" + assert _DATA_PARALLEL_GROUP is not None, \ + 'data parallel group is not initialized' + return _DATA_PARALLEL_GROUP + + +def get_embedding_group(): + """Get the embedding group the caller rank belongs to.""" + assert _EMBEDDING_GROUP is not None, \ + 'embedding group is not initialized' + return _EMBEDDING_GROUP + + +def get_position_embedding_group(): + """Get the position embedding group the caller rank belongs to.""" + assert _POSITION_EMBEDDING_GROUP is not None, \ + 'position embedding group is not initialized' + return _POSITION_EMBEDDING_GROUP + + +def set_tensor_model_parallel_world_size(world_size): + """Set the tensor model parallel size""" + global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE + _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = world_size + + +def set_pipeline_model_parallel_world_size(world_size): + """Set the pipeline model parallel size""" + global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size + + +def get_tensor_model_parallel_world_size(): + """Return world size for the tensor model parallel group.""" + global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE + if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None: + return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE + return torch.distributed.get_world_size(group=get_tensor_model_parallel_group()) + + +def get_pipeline_model_parallel_world_size(): + """Return world size for the pipeline model parallel group.""" + global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + if _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE is not None: + return _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group()) + + +def set_tensor_model_parallel_rank(rank): + """Set tensor model parallel rank.""" + global _MPU_TENSOR_MODEL_PARALLEL_RANK + _MPU_TENSOR_MODEL_PARALLEL_RANK = rank + + +def set_pipeline_model_parallel_rank(rank): + """Set pipeline model parallel rank.""" + global _MPU_PIPELINE_MODEL_PARALLEL_RANK + _MPU_PIPELINE_MODEL_PARALLEL_RANK = rank + + +def set_pipeline_model_parallel_split_rank(rank): + """Set pipeline model parallel split rank.""" + global _MPU_PIPELINE_MODEL_PARALLEL_SPLIT_RANK + _MPU_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = rank + + +def get_tensor_model_parallel_rank(): + """Return my rank for the tensor model parallel group.""" + global _MPU_TENSOR_MODEL_PARALLEL_RANK + if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None: + return _MPU_TENSOR_MODEL_PARALLEL_RANK + return torch.distributed.get_rank(group=get_tensor_model_parallel_group()) + + +def get_pipeline_model_parallel_rank(): + """Return my rank for the pipeline model parallel group.""" + global _MPU_PIPELINE_MODEL_PARALLEL_RANK + if _MPU_PIPELINE_MODEL_PARALLEL_RANK is not None: + return _MPU_PIPELINE_MODEL_PARALLEL_RANK + return torch.distributed.get_rank(group=get_pipeline_model_parallel_group()) + + + +def is_pipeline_first_stage(ignore_virtual=False): + """Return True if in the first pipeline model-parallel stage, False otherwise.""" + if not ignore_virtual: + if get_virtual_pipeline_model_parallel_world_size() is not None and \ + get_virtual_pipeline_model_parallel_rank() != 0: + return False + return get_pipeline_model_parallel_rank() == 0 + + +def is_pipeline_last_stage(ignore_virtual=False): + """Return True if in the last pipeline model-parallel stage, False otherwise.""" + if not ignore_virtual: + virtual_pipeline_model_parallel_world_size = \ + get_virtual_pipeline_model_parallel_world_size() + if virtual_pipeline_model_parallel_world_size is not None and \ + get_virtual_pipeline_model_parallel_rank() != ( + virtual_pipeline_model_parallel_world_size - 1): + return False + return get_pipeline_model_parallel_rank() == ( + get_pipeline_model_parallel_world_size() - 1) + + +def is_rank_in_embedding_group(ignore_virtual=False): + """Return true if current rank is in embedding group, False otherwise.""" + rank = torch.distributed.get_rank() + global _EMBEDDING_GLOBAL_RANKS + if ignore_virtual: + return rank in _EMBEDDING_GLOBAL_RANKS + if rank in _EMBEDDING_GLOBAL_RANKS: + if rank == _EMBEDDING_GLOBAL_RANKS[0]: + return is_pipeline_first_stage(ignore_virtual=False) + elif rank == _EMBEDDING_GLOBAL_RANKS[-1]: + return is_pipeline_last_stage(ignore_virtual=False) + else: + return True + return False + + +def is_rank_in_position_embedding_group(): + """Return true if current rank is in position embedding group, False otherwise.""" + rank = torch.distributed.get_rank() + global _POSITION_EMBEDDING_GLOBAL_RANKS + return rank in _POSITION_EMBEDDING_GLOBAL_RANKS + + +def is_pipeline_stage_before_split(rank=None): + """Return True if pipeline stage executes encoder block for a model + with both encoder and decoder.""" + if get_pipeline_model_parallel_world_size() == 1: + return True + if rank is None: + rank = get_pipeline_model_parallel_rank() + global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None: + return True + if rank < _PIPELINE_MODEL_PARALLEL_SPLIT_RANK: + return True + return False + + +def is_pipeline_stage_after_split(rank=None): + """Return True if pipeline stage executes decoder block for a model + with both encoder and decoder.""" + if get_pipeline_model_parallel_world_size() == 1: + return True + if rank is None: + rank = get_pipeline_model_parallel_rank() + global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None: + return True + if rank >= _PIPELINE_MODEL_PARALLEL_SPLIT_RANK: + return True + return False + + +def is_pipeline_stage_at_split(): + """Return true if pipeline stage executes decoder block and next + stage executes encoder block for a model with both encoder and + decoder.""" + rank = get_pipeline_model_parallel_rank() + return is_pipeline_stage_before_split(rank) and \ + is_pipeline_stage_after_split(rank+1) + + +def get_virtual_pipeline_model_parallel_rank(): + """Return the virtual pipeline-parallel rank.""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + return _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + + +def set_virtual_pipeline_model_parallel_rank(rank): + """Set the virtual pipeline-parallel rank.""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = rank + + +def get_virtual_pipeline_model_parallel_world_size(): + """Return the virtual pipeline-parallel world size.""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + return _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + + +def get_tensor_model_parallel_src_rank(): + """Calculate the global rank corresponding to the first local rank + in the tensor model parallel group.""" + global_rank = torch.distributed.get_rank() + local_world_size = get_tensor_model_parallel_world_size() + return (global_rank // local_world_size) * local_world_size + + +def get_data_parallel_src_rank(): + """Calculate the global rank corresponding to the first local rank + in the data parallel group.""" + assert _DATA_PARALLEL_GLOBAL_RANKS is not None, \ + "Data parallel group is not initialized" + return _DATA_PARALLEL_GLOBAL_RANKS[0] + + +def get_pipeline_model_parallel_first_rank(): + """Return the global rank of the first process in the pipeline for the + current tensor parallel group""" + assert _PIPELINE_GLOBAL_RANKS is not None, \ + "Pipeline parallel group is not initialized" + return _PIPELINE_GLOBAL_RANKS[0] + + +def get_pipeline_model_parallel_last_rank(): + """Return the global rank of the last process in the pipeline for the + current tensor parallel group""" + assert _PIPELINE_GLOBAL_RANKS is not None, \ + "Pipeline parallel group is not initialized" + last_rank_local = get_pipeline_model_parallel_world_size() - 1 + return _PIPELINE_GLOBAL_RANKS[last_rank_local] + +def get_pipeline_model_parallel_next_rank(): + """Return the global rank that follows the caller in the pipeline""" + assert _PIPELINE_GLOBAL_RANKS is not None, \ + "Pipeline parallel group is not initialized" + rank_in_pipeline = get_pipeline_model_parallel_rank() + world_size = get_pipeline_model_parallel_world_size() + return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size] + + +def get_pipeline_model_parallel_prev_rank(): + """Return the global rank that preceeds the caller in the pipeline""" + assert _PIPELINE_GLOBAL_RANKS is not None, \ + "Pipeline parallel group is not initialized" + rank_in_pipeline = get_pipeline_model_parallel_rank() + world_size = get_pipeline_model_parallel_world_size() + return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size] + + +def get_data_parallel_world_size(): + """Return world size for the data parallel group.""" + return torch.distributed.get_world_size(group=get_data_parallel_group()) + + +def get_data_parallel_rank(): + """Return my rank for the data parallel group.""" + return torch.distributed.get_rank(group=get_data_parallel_group()) + + +def _set_global_memory_buffer(): + """Initialize global buffer""" + global _GLOBAL_MEMORY_BUFFER + assert _GLOBAL_MEMORY_BUFFER is None, 'global memory buffer is already initialized' + _GLOBAL_MEMORY_BUFFER = GlobalMemoryBuffer() + + +def get_global_memory_buffer(): + """Return the global GlobalMemoryBuffer object""" + assert _GLOBAL_MEMORY_BUFFER is not None, 'global memory buffer is not initialized' + return _GLOBAL_MEMORY_BUFFER + + +def destroy_model_parallel(): + """Set the groups to none.""" + global _MODEL_PARALLEL_GROUP + _MODEL_PARALLEL_GROUP = None + global _TENSOR_MODEL_PARALLEL_GROUP + _TENSOR_MODEL_PARALLEL_GROUP = None + global _PIPELINE_MODEL_PARALLEL_GROUP + _PIPELINE_MODEL_PARALLEL_GROUP = None + global _DATA_PARALLEL_GROUP + _DATA_PARALLEL_GROUP = None + global _EMBEDDING_GROUP + _EMBEDDING_GROUP = None + global _POSITION_EMBEDDING_GROUP + _POSITION_EMBEDDING_GROUP = None + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None + global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE + _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None + global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None + global _MPU_TENSOR_MODEL_PARALLEL_RANK + _MPU_TENSOR_MODEL_PARALLEL_RANK = None + global _MPU_PIPELINE_MODEL_PARALLEL_RANK + _MPU_PIPELINE_MODEL_PARALLEL_RANK = None + global _GLOBAL_MEMORY_BUFFER + _GLOBAL_MEMORY_BUFFER = None diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/__init__.py b/multilinguality_megatron/megatron/core/tensor_parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..de2263b1b3cd258045dff65454c18e61049fb28b --- /dev/null +++ b/multilinguality_megatron/megatron/core/tensor_parallel/__init__.py @@ -0,0 +1,65 @@ +from .cross_entropy import vocab_parallel_cross_entropy, vocab_parallel_max_indices +from .data import broadcast_data + +from .layers import ( + ColumnParallelLinear, + RowParallelLinear, + VocabParallelEmbedding, + set_tensor_model_parallel_attributes, + set_defaults_if_not_set_tensor_model_parallel_attributes, + copy_tensor_model_parallel_attributes, + param_is_not_tensor_parallel_duplicate, + linear_with_grad_accumulation_and_async_allreduce + +) + +from .mappings import ( + copy_to_tensor_model_parallel_region, + gather_from_tensor_model_parallel_region, + gather_from_sequence_parallel_region, + scatter_to_tensor_model_parallel_region, + scatter_to_sequence_parallel_region, +) + +from .random import ( + checkpoint, + get_cuda_rng_tracker, + model_parallel_cuda_manual_seed, +) + +from .utils import ( + split_tensor_along_last_dim, + split_tensor_into_1d_equal_chunks, + gather_split_1d_tensor, +) + +__all__ = [ + # cross_entropy.py + "vocab_parallel_cross_entropy", + # data.py + "broadcast_data", + #layers.py + "ColumnParallelLinear", + "RowParallelLinear", + "VocabParallelEmbedding", + "set_tensor_model_parallel_attributes", + "set_defaults_if_not_set_tensor_model_parallel_attributes", + "copy_tensor_model_parallel_attributes", + "param_is_not_tensor_parallel_duplicate", + "linear_with_grad_accumulation_and_async_allreduce", + # mappings.py + "copy_to_tensor_model_parallel_region", + "gather_from_tensor_model_parallel_region", + "gather_from_sequence_parallel_region", +# "reduce_from_tensor_model_parallel_region", + "scatter_to_tensor_model_parallel_region", + "scatter_to_sequence_parallel_region", + # random.py + "checkpoint", + "get_cuda_rng_tracker", + "model_parallel_cuda_manual_seed", + # utils.py + "split_tensor_along_last_dim", + "split_tensor_into_1d_equal_chunks", + "gather_split_1d_tensor", +] diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37dd2e5f84b652ec969954d3b68fbfc723a85a10 Binary files /dev/null and b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/cross_entropy.cpython-39.pyc b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/cross_entropy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b99123cfadbeca64a08fd1797e54c363d811db24 Binary files /dev/null and b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/cross_entropy.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/data.cpython-39.pyc b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/data.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf942deb729e11be56c71acd07d7641e83c6675c Binary files /dev/null and b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/data.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/layers.cpython-39.pyc b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/layers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aff635b4a23e6c588dc711bb3dcfb38b529a2d29 Binary files /dev/null and b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/layers.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/mappings.cpython-39.pyc b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/mappings.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cb69b44498fec67dbb8a0bb619fe105bff9fb73 Binary files /dev/null and b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/mappings.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/random.cpython-39.pyc b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/random.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89fc88db7be042a88a45cec3f381d7f314b75ee4 Binary files /dev/null and b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/random.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/utils.cpython-39.pyc b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47d5e467a1cabb7b1db0036eb6dd612e24e24343 Binary files /dev/null and b/multilinguality_megatron/megatron/core/tensor_parallel/__pycache__/utils.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/cross_entropy.py b/multilinguality_megatron/megatron/core/tensor_parallel/cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..25c0ee5c4e945d9bb8bf697de73d814f2334c1bf --- /dev/null +++ b/multilinguality_megatron/megatron/core/tensor_parallel/cross_entropy.py @@ -0,0 +1,175 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron.core.parallel_state import ( + get_tensor_model_parallel_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size +) + +from .utils import VocabUtility + + +class _VocabParallelCrossEntropy(torch.autograd.Function): + + @staticmethod + def forward(ctx, vocab_parallel_logits, target, label_smoothing=0.0): + + # Maximum value along vocab dimension across all GPUs. + logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] + torch.distributed.all_reduce(logits_max, + op=torch.distributed.ReduceOp.MAX, + group=get_tensor_model_parallel_group()) + # Subtract the maximum value. + vocab_parallel_logits = vocab_parallel_logits - logits_max.unsqueeze(dim=-1) + + # Get the partition's vocab indecies + get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size + partition_vocab_size = vocab_parallel_logits.size()[-1] + rank = get_tensor_model_parallel_rank() + world_size = get_tensor_model_parallel_world_size() + vocab_start_index, vocab_end_index = get_vocab_range( + partition_vocab_size, rank, world_size) + + # Create a mask of valid vocab ids (1 means it needs to be masked). + target_mask = (target < vocab_start_index) | (target >= vocab_end_index) + masked_target = target.clone() - vocab_start_index + masked_target[target_mask] = 0 + + # Get predicted-logits = logits[target]. + # For Simplicity, we convert logits to a 2-D tensor with size + # [*, partition-vocab-size] and target to a 1-D tensor of size [*]. + logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size) + masked_target_1d = masked_target.view(-1) + arange_1d = torch.arange(start=0, end=logits_2d.size()[0], + device=logits_2d.device) + predicted_logits_1d = logits_2d[arange_1d, masked_target_1d] + predicted_logits_1d = predicted_logits_1d.clone().contiguous() + predicted_logits = predicted_logits_1d.view_as(target) + predicted_logits[target_mask] = 0.0 + # All reduce is needed to get the chunks from other GPUs. + torch.distributed.all_reduce(predicted_logits, + op=torch.distributed.ReduceOp.SUM, + group=get_tensor_model_parallel_group()) + + # Sum of exponential of logits along vocab dimension across all GPUs. + exp_logits = vocab_parallel_logits + torch.exp(vocab_parallel_logits, out=exp_logits) + sum_exp_logits = exp_logits.sum(dim=-1) + torch.distributed.all_reduce(sum_exp_logits, + op=torch.distributed.ReduceOp.SUM, + group=get_tensor_model_parallel_group()) + + # Loss = log(sum(exp(logits))) - predicted-logit. + loss = torch.log(sum_exp_logits) - predicted_logits + + # Normalize and optionally smooth logits + exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) + + vocab_size = exp_logits.size(-1) + if label_smoothing > 0: + """ + We'd like to assign 1 / (K - 1) probability mass to every index that is not the ground truth. + = (1 - alpha) * y_gt + alpha * mean(y_{i for i != gt}) + = (1 - alpha) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i + = ((K - 1) * (1 - alpha) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i + = (K * (1 - alpha) - 1) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i} y_i + = (1 - (alpha * K) / (K - 1)) * y_gt + ( (alpha * K) / (K - 1) ) * \sum_{i} y_i / K + From: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/losses/smoothed_cross_entropy.py + """ + assert 1.0 > label_smoothing > 0.0 + smoothing = label_smoothing * vocab_size / (vocab_size - 1) + + # Exp logits at this point are normalized probabilities. So we can just take the log to get log-probs. + log_probs = torch.log(exp_logits) + mean_log_probs = log_probs.mean(dim=-1) + loss = (1.0 - smoothing) * loss - smoothing * mean_log_probs + + ctx.label_smoothing, ctx.vocab_size = label_smoothing, vocab_size + ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) + + # Store softmax, target-mask and masked-target for backward pass. + ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) + + return loss + + @staticmethod + def backward(ctx, grad_output): + + # Retreive tensors from the forward path. + softmax, target_mask, masked_target_1d = ctx.saved_tensors + label_smoothing, vocab_size = ctx.label_smoothing, ctx.vocab_size + + # All the inputs have softmax as thier gradient. + grad_input = softmax + # For simplicity, work with the 2D gradient. + partition_vocab_size = softmax.size()[-1] + grad_2d = grad_input.view(-1, partition_vocab_size) + + # Add the gradient from matching classes. + arange_1d = torch.arange(start=0, end=grad_2d.size()[0], + device=grad_2d.device) + + softmax_update = 1.0 - target_mask.view(-1).float() + + if label_smoothing > 0: + smoothing = label_smoothing * vocab_size / (vocab_size - 1) + grad_2d[arange_1d, masked_target_1d] -= (1.0 - smoothing) * softmax_update + average_grad = 1 / vocab_size + grad_2d[arange_1d, :] -= smoothing * average_grad + else: + grad_2d[arange_1d, masked_target_1d] -= softmax_update + + # Finally elementwise multiplication with the output gradients. + grad_input.mul_(grad_output.unsqueeze(dim=-1)) + + return grad_input, None, None + + +def vocab_parallel_cross_entropy(vocab_parallel_logits, target, label_smoothing=0.0): + """ + Performs cross entropy loss when logits are split across tensor parallel ranks + + Arguments: + vocab_parallel_logits: logits split across tensor parallel ranks + dimension is [sequence_length, batch_size, hidden_size] + + target: correct vocab ids of dimseion [sequence_length, micro_batch_size] + + lobal_smoothing: smoothing factor, must be in range [0.0, 1.0) + default is no smoothing (=0.0) + """ + return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target, label_smoothing) + + +def vocab_parallel_max_indices(logits): + """ + Performs argmax(dim=-1) over logits across tensor parallel ranks + Arguments: + logits: logits split across tensor parallel ranks + dimension is [sequence_length, batch_size, hidden_size] + """ + world_size = get_tensor_model_parallel_world_size() + if world_size == 1: + return logits.argmax(dim=-1) + + seq_length, batch_size, partition_vocab_size = logits.shape + max_values, max_indices = logits.max(dim=-1) + + # Get the partition's vocab indices + get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size + rank = get_tensor_model_parallel_rank() + vocab_start_index, _ = get_vocab_range(partition_vocab_size, rank, world_size) + max_indices = max_indices + vocab_start_index + + # gather max values and indices of all ranks + max_values_group = torch.zeros(world_size, seq_length, batch_size, dtype=logits.dtype, device=logits.device) + max_indices_group = torch.zeros(world_size, seq_length, batch_size, dtype=torch.int64, device=logits.device) + torch.distributed.all_gather_into_tensor(max_values_group, max_values, group=get_tensor_model_parallel_group()) + torch.distributed.all_gather_into_tensor(max_indices_group, max_indices, group=get_tensor_model_parallel_group()) + + # find rank with maximum value for each position and gather corresponding indices + max_group_indices = torch.argmax(max_values_group, dim=0, keepdim=True) + max_indices = torch.gather(max_indices_group, dim=0, index=max_group_indices).squeeze(0) + return max_indices diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/data.py b/multilinguality_megatron/megatron/core/tensor_parallel/data.py new file mode 100644 index 0000000000000000000000000000000000000000..b911790dae8275e5b5102184cd49b9b94254eff9 --- /dev/null +++ b/multilinguality_megatron/megatron/core/tensor_parallel/data.py @@ -0,0 +1,105 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron.core.parallel_state import ( + get_tensor_model_parallel_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_src_rank, +) + + +_MAX_DATA_DIM = 5 + + +def _check_data_types(keys, data, target_dtype): + """Check that all the keys have the same target data type.""" + for key in keys: + assert data[key].dtype == target_dtype, '{} has data type {} which '\ + 'is different than {}'.format(key, data[key].dtype, target_dtype) + + +def _build_key_size_numel_dictionaries(keys, data): + """Build the size on rank 0 and broadcast.""" + max_dim = _MAX_DATA_DIM + sizes = [0 for _ in range(max_dim) for _ in keys] + + # Pack the sizes on rank zero. + if get_tensor_model_parallel_rank() == 0: + offset = 0 + for key in keys: + assert data[key].dim() < max_dim, 'you should increase MAX_DATA_DIM' + size = data[key].size() + for i, s in enumerate(size): + sizes[i + offset] = s + offset += max_dim + + # Move to GPU and broadcast. + sizes_cuda = torch.cuda.LongTensor(sizes) + torch.distributed.broadcast(sizes_cuda, get_tensor_model_parallel_src_rank(), + group=get_tensor_model_parallel_group()) + + # Move back to cpu and unpack. + sizes_cpu = sizes_cuda.cpu() + key_size = {} + key_numel = {} + total_numel = 0 + offset = 0 + for key in keys: + i = 0 + size = [] + numel = 1 + while sizes_cpu[offset + i] > 0: + this_size = sizes_cpu[offset + i] + size.append(this_size) + numel *= this_size + i += 1 + key_size[key] = size + key_numel[key] = numel + total_numel += numel + offset += max_dim + + return key_size, key_numel, total_numel + + +def broadcast_data(keys, data, datatype): + """Broadcast data from rank zero of each model parallel group to the + members of the same model parallel group. + + Arguments: + keys: list of keys in the data disctionary to be broadcasted + data: data dictionary of string keys and cpu tensor values. + datatype: torch data type of all tensors in data associated + with keys. + """ + # Build (key, size) and (key, number of elements) dictionaries along + # with the total number of elements on all ranks. + key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys, + data) + + # Pack on rank zero. + if get_tensor_model_parallel_rank() == 0: + # Check that all keys have the same data type. + _check_data_types(keys, data, datatype) + # Flatten the data associated with the keys + flatten_data = torch.cat( + [data[key].contiguous().view(-1) for key in keys], dim=0).cuda() + else: + flatten_data = torch.empty(total_numel, + device=torch.cuda.current_device(), + dtype=datatype) + + # Broadcast + torch.distributed.broadcast(flatten_data, get_tensor_model_parallel_src_rank(), + group=get_tensor_model_parallel_group()) + + # Unpack + output = {} + offset = 0 + for key in keys: + size = key_size[key] + numel = key_numel[key] + output[key] = flatten_data.narrow(0, offset, numel).view(size) + offset += numel + + return output diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/layers.py b/multilinguality_megatron/megatron/core/tensor_parallel/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..f930e6f97f30b621702fbe8e67f4ae693a53cd4b --- /dev/null +++ b/multilinguality_megatron/megatron/core/tensor_parallel/layers.py @@ -0,0 +1,701 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +# Parts of the code here are adapted from PyTorch +# repo: https://github.com/pytorch/pytorch + +import os +from typing import Optional +import warnings + +import torch +import torch.nn.functional as F +import torch.nn.init as init +from torch.nn.parameter import Parameter + +from megatron.core.parallel_state import ( + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + get_tensor_model_parallel_group, + get_global_memory_buffer, +) +from .mappings import ( + copy_to_tensor_model_parallel_region, + gather_from_tensor_model_parallel_region, + reduce_from_tensor_model_parallel_region, + scatter_to_tensor_model_parallel_region, + reduce_scatter_to_sequence_parallel_region, +) + +from .random import get_cuda_rng_tracker +from .utils import ( + divide, + VocabUtility, +) + +_grad_accum_fusion_available = True +try: + import fused_weight_gradient_mlp_cuda +except ImportError: + _grad_accum_fusion_available = False + +_MODEL_PARALLEL_ATTRIBUTE_DEFAULTS = {'tensor_model_parallel': False, + 'partition_dim': -1, + 'partition_stride': 1} + + +def param_is_not_tensor_parallel_duplicate(param): + return (hasattr(param, 'tensor_model_parallel') and + param.tensor_model_parallel) or ( + get_tensor_model_parallel_rank() == 0) + + +def set_tensor_model_parallel_attributes(tensor, is_parallel, dim, stride): + # Make sure the attributes are not set. + for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS: + assert not hasattr(tensor, attribute) + # Set the attributes. + setattr(tensor, 'tensor_model_parallel', is_parallel) + setattr(tensor, 'partition_dim', dim) + setattr(tensor, 'partition_stride', stride) + + +def set_defaults_if_not_set_tensor_model_parallel_attributes(tensor): + def maybe_set(attribute, value): + if not hasattr(tensor, attribute): + setattr(tensor, attribute, value) + for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS: + maybe_set(attribute, _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS[attribute]) + + +def copy_tensor_model_parallel_attributes(destination_tensor, source_tensor): + def maybe_copy(attribute): + if hasattr(source_tensor, attribute): + setattr(destination_tensor, attribute, + getattr(source_tensor, attribute)) + for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS: + maybe_copy(attribute) + + +def _initialize_affine_weight_gpu(weight, init_method, + partition_dim, stride=1): + """Initialize affine weight for model parallel on GPU.""" + set_tensor_model_parallel_attributes(tensor=weight, + is_parallel=True, + dim=partition_dim, + stride=stride) + + with get_cuda_rng_tracker().fork(): + init_method(weight) + + +def _initialize_affine_weight_cpu(weight, output_size, input_size, + per_partition_size, partition_dim, + init_method, stride=1, + return_master_weight=False, + *, params_dtype=torch.float32): + """Initialize affine weight for model parallel. + + Build the master weight on all processes and scatter + the relevant chunk.""" + + set_tensor_model_parallel_attributes(tensor=weight, + is_parallel=True, + dim=partition_dim, + stride=stride) + + # Initialize master weight + master_weight = torch.empty(output_size, input_size, + dtype=torch.float, + requires_grad=False) + init_method(master_weight) + master_weight = master_weight.to(dtype=params_dtype) + + # Split and copy + per_partition_per_stride_size = divide(per_partition_size, stride) + weight_list = torch.split(master_weight, per_partition_per_stride_size, + dim=partition_dim) + rank = get_tensor_model_parallel_rank() + world_size = get_tensor_model_parallel_world_size() + my_weight_list = weight_list[rank::world_size] + + with torch.no_grad(): + torch.cat(my_weight_list, dim=partition_dim, out=weight) + if return_master_weight: + return master_weight + return None + + +class VocabParallelEmbedding(torch.nn.Module): + """Embedding parallelized in the vocabulary dimension. + + This is mainly adapted from torch.nn.Embedding and all the default + values are kept. + Arguments: + num_embeddings: vocabulary size. + embedding_dim: size of hidden state. + + Keyword Arguments: + init_method: method to initialize weights. + params_dtype + use_cpu_initialization + perform_initialization + """ + + def __init__(self, num_embeddings: int, embedding_dim: int, *, + init_method=init.xavier_normal_, + params_dtype: torch.dtype=torch.float32, + use_cpu_initialization: bool=False, + perform_initialization: bool=True): + super(VocabParallelEmbedding, self).__init__() + # Keep the input dimensions. + self.num_embeddings = num_embeddings + self.embedding_dim = embedding_dim + # Set the detauls for compatibility. + self.padding_idx = None + self.max_norm = None + self.norm_type = 2. + self.scale_grad_by_freq = False + self.sparse = False + self._weight = None + self.tensor_model_parallel_size = get_tensor_model_parallel_world_size() + # Divide the weight matrix along the vocaburaly dimension. + self.vocab_start_index, self.vocab_end_index = \ + VocabUtility.vocab_range_from_global_vocab_size( + self.num_embeddings, get_tensor_model_parallel_rank(), + self.tensor_model_parallel_size) + self.num_embeddings_per_partition = self.vocab_end_index - \ + self.vocab_start_index + + # Allocate weights and initialize. + if use_cpu_initialization: + self.weight = Parameter(torch.empty( + self.num_embeddings_per_partition, self.embedding_dim, + dtype=params_dtype)) + if perform_initialization: + _initialize_affine_weight_cpu( + self.weight, self.num_embeddings, self.embedding_dim, + self.num_embeddings_per_partition, 0, init_method, + params_dtype=params_dtype) + else: + self.weight = Parameter(torch.empty( + self.num_embeddings_per_partition, self.embedding_dim, + device=torch.cuda.current_device(), dtype=params_dtype)) + if perform_initialization: + _initialize_affine_weight_gpu(self.weight, init_method, + partition_dim=0, stride=1) + + def forward(self, input_): + if self.tensor_model_parallel_size > 1: + # Build the mask. + input_mask = (input_ < self.vocab_start_index) | \ + (input_ >= self.vocab_end_index) + # Mask the input. + masked_input = input_.clone() - self.vocab_start_index + masked_input[input_mask] = 0 + else: + masked_input = input_ + # Get the embeddings. + output_parallel = F.embedding(masked_input, + self.weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse) + # Mask the output embedding. + if self.tensor_model_parallel_size > 1: + output_parallel[input_mask, :] = 0.0 + # Reduce across all the model parallel GPUs. + output = reduce_from_tensor_model_parallel_region(output_parallel) + return output + + +class LinearWithGradAccumulationAndAsyncCommunication(torch.autograd.Function): + """See linear_with_grad_accumulation_and_async_allreduce""" + + @staticmethod + def forward(ctx, input, weight, bias, gradient_accumulation_fusion, + async_grad_allreduce, sequence_parallel): + ctx.save_for_backward(input, weight) + ctx.use_bias = bias is not None + ctx.gradient_accumulation_fusion = gradient_accumulation_fusion + ctx.async_grad_allreduce = async_grad_allreduce + ctx.sequence_parallel = sequence_parallel + + if sequence_parallel: + world_size = get_tensor_model_parallel_world_size() + dim_size = list(input.size()) + dim_size[0] = dim_size[0] * world_size + + all_gather_buffer = \ + get_global_memory_buffer().get_tensor(dim_size, input.dtype, "mpu") + torch.distributed._all_gather_base( + all_gather_buffer, + input, + group=get_tensor_model_parallel_group()) + total_input = all_gather_buffer + else: + total_input = input + + output = torch.matmul(total_input, weight.t()) + if bias is not None: + output = output + bias + return output + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + use_bias = ctx.use_bias + + if ctx.sequence_parallel: + world_size = get_tensor_model_parallel_world_size() + dim_size = list(input.size()) + dim_size[0] = dim_size[0] * world_size + + all_gather_buffer = \ + get_global_memory_buffer().get_tensor(dim_size, input.dtype, "mpu") + handle = torch.distributed._all_gather_base( + all_gather_buffer, + input, + group=get_tensor_model_parallel_group(), async_op=True) + + # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the + # gather is scheduled before the input gradient computation + total_input = all_gather_buffer + else: + total_input = input + grad_input = grad_output.matmul(weight) + + if ctx.sequence_parallel: + handle.wait() + + # Convert the tensor shapes to 2D for execution compatibility + grad_output = grad_output.view(grad_output.shape[0] * grad_output.shape[1], + grad_output.shape[2]) + total_input = total_input.view(total_input.shape[0] * total_input.shape[1], + total_input.shape[2]) + + if ctx.async_grad_allreduce: + # Asynchronous all-reduce + handle = torch.distributed.all_reduce( + grad_input, group=get_tensor_model_parallel_group(), async_op=True) + # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the + # all-reduce is scheduled before the weight gradient computation + + if ctx.sequence_parallel: + assert not ctx.async_grad_allreduce + dim_size = list(input.size()) + sub_grad_input = torch.empty(dim_size, dtype=input.dtype, + device=torch.cuda.current_device(), + requires_grad=False) + # reduce_scatter + handle = torch.distributed.reduce_scatter_tensor(sub_grad_input, grad_input, + group=get_tensor_model_parallel_group(), + async_op=True) + # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the + # reduce scatter is scheduled before the weight gradient computation + + if ctx.gradient_accumulation_fusion: + if weight.main_grad.dtype == torch.float32: + fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, weight.main_grad) + elif weight.main_grad.dtype == torch.float16: + fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, weight.main_grad) + else: + raise RuntimeError("Unsupported gradient type for gradient accumulation fusion") + grad_weight = None + else: + grad_weight = grad_output.t().matmul(total_input) + grad_bias = grad_output.sum(dim=0) if use_bias else None + + if ctx.sequence_parallel: + handle.wait() + return sub_grad_input, grad_weight, grad_bias, None, None, None + + if ctx.async_grad_allreduce: + handle.wait() + + return grad_input, grad_weight, grad_bias, None, None, None + + +def linear_with_grad_accumulation_and_async_allreduce( + input: torch.Tensor, + weight: torch.Tensor, + bias: Optional[torch.Tensor], + gradient_accumulation_fusion: bool, + async_grad_allreduce: bool, + sequence_parallel_enabled: bool, +) -> torch.Tensor: + """Linear layer execution with asynchronous communication and + gradient accumulation fusion in backprop. + + This has the option to accumulate the result of backprop + calculation into an existing gradient buffer, preventing the need + to do an additional addition kernel after the gradient + calculation. + + Additionally, the tensor parallel all reduce of the input + gradients can be done asynchronously with the calculation of + the weight gradients. + + In the case of sequence parallelism, the reduce scatter of the + input gradients is done asynchronously with the calcluation of the + weight gradients. + + Use of this module requires that the environment variable + CUDA_DEVICE_MAX_CONNECTIONS=1. There are a few collective + operations, noted in the code, that should be scheduled before + compute kernels to overlap the communication with the computation, + which is necessary for a speedup but not for correctness so that + ordering isn't imposed by the scheduler. Setting + CUDA_DEVICE_MAX_CONNECTIONS=1 forces the kernels to be scheduled + in the order they are called. + + Arguments: + + input (torch.Tensor required): input like torch.nn.functional.linear + + weight (torch.Tensor required): weight like torch.nn.functional.linear + + bias (torch.Tensor optional): bias like torch.nn.functional.linear + + gradient_accumulation_fusion (bool required): Perform the gradient + accumulation fusion, requires the custom CUDA extension + fused_weight_gradient_mlp_cuda module. To use + gradient_accumulation_fusion you must install APEX with + --cpp_ext and --cuda_ext. For example: "pip install + --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext .\" + " Note that the extension requires CUDA>=11. Otherwise, you + must turn off gradient accumulation fusion." + + async_grad_allreduce (bool required): Do the allreduce of input + gradients asyncronously with the computation of weight + gradients. If sequence_parallel_enabled is True, this must be + False, as no all reduce is performed. + + sequence_parallel_enabled (bool required): Indicates that sequence + parallelism is used and thus in the forward pass the input is + all gathered, and the backward pass the input gradients are + reduce scattered. + """ + args = [ + input, + weight, + bias, + gradient_accumulation_fusion, + async_grad_allreduce, + sequence_parallel_enabled, + ] + + if not linear_with_grad_accumulation_and_async_allreduce.warned: + if os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1": + if sequence_parallel_enabled: + warnings.warn( + "When using sequence parallelism it is recommended to set the " + "environment variable CUDA_DEVICE_MAX_CONNECTIONS to 1 for " + "maximum speedup") + linear_with_grad_accumulation_and_async_allreduce.warned = True + + if async_grad_allreduce: + warnings.warn( + "When using async grad allreduce it is recommended to set the " + "environment variable CUDA_DEVICE_MAX_CONNECTIONS to 1 for " + "maximum speedup") + linear_with_grad_accumulation_and_async_allreduce.warned = True + + with torch.cuda.amp.autocast(enabled=False): + return LinearWithGradAccumulationAndAsyncCommunication.apply(*args) +linear_with_grad_accumulation_and_async_allreduce.warned = False + + +class ColumnParallelLinear(torch.nn.Module): + """Linear layer with column parallelism. + + The linear layer is defined as Y = XA + b. A is parallelized along + its second dimension as A = [A_1, ..., A_p]. + + Arguments: + input_size: first dimension of matrix A. + output_size: second dimension of matrix A. + + Keyword Arguments + bias: If true, add bias + gather_output: If true, call all-gather on output and make Y available + to all GPUs, otherwise, every GPU will have its output + which is Y_i = XA_i + init_method: method to initialize weights. Note that bias is always set + to zero. + stride: For the strided linear layers. + keep_master_weight_for_test: This was added for testing and should be + set to False. It returns the master weights + used for initialization. + skip_bias_add: This was added to enable performance optimations where bias + can be fused with other elementwise operations. we skip + adding bias but instead return it. + async_tensor_model_parallel_allreduce: + params_dtype: + use_cpu_initialization: + gradient_accumulation_fusion: + sequence_parallel_enabled: + """ + + def __init__(self, input_size, output_size, *, + bias=True, gather_output=True, + init_method=init.xavier_normal_, + stride=1, + keep_master_weight_for_test=False, + skip_bias_add=False, + async_tensor_model_parallel_allreduce=True, + params_dtype=torch.float32, + use_cpu_initialization=False, + perform_initialization=True, + gradient_accumulation_fusion=False, + sequence_parallel_enabled: bool = False, + world_size: int=None): + super(ColumnParallelLinear, self).__init__() + # Keep input parameters + self.input_size = input_size + self.output_size = output_size + self.gather_output = gather_output + # Divide the weight matrix along the last dimension. + self.output_size_per_partition = divide(output_size, world_size) + self.skip_bias_add = skip_bias_add + + # Parameters. + # Note: torch.nn.functional.linear performs XA^T + b and as a result + # we allocate the transpose. + # Initialize weight. + if use_cpu_initialization: + self.weight = Parameter(torch.empty(self.output_size_per_partition, + self.input_size, + dtype=params_dtype)) + if perform_initialization: + self.master_weight = _initialize_affine_weight_cpu( + self.weight, self.output_size, self.input_size, + self.output_size_per_partition, 0, init_method, + stride=stride, return_master_weight=keep_master_weight_for_test) + else: + self.weight = Parameter(torch.empty( + self.output_size_per_partition, self.input_size, + device=torch.cuda.current_device(), dtype=params_dtype)) + if perform_initialization: + _initialize_affine_weight_gpu(self.weight, init_method, + partition_dim=0, stride=stride) + + if bias: + if use_cpu_initialization: + self.bias = Parameter(torch.empty( + self.output_size_per_partition, dtype=params_dtype)) + else: + self.bias = Parameter(torch.empty( + self.output_size_per_partition, + device=torch.cuda.current_device(), + dtype=params_dtype)) + set_tensor_model_parallel_attributes(self.bias, True, 0, stride) + # Always initialize bias to zero. + with torch.no_grad(): + self.bias.zero_() + else: + self.register_parameter('bias', None) + + self.async_tensor_model_parallel_allreduce = ( + async_tensor_model_parallel_allreduce and + world_size > 1) + if sequence_parallel_enabled: + if world_size <= 1: + warnings.warn( + f"`sequence_parallel_enabled` is set to `True`, but tensor model parallel size is {world_size}. " + f"Disabling sequence parallel." + ) + sequence_parallel_enabled = False + self.sequence_parallel_enabled = sequence_parallel_enabled + + if gradient_accumulation_fusion: + if not _grad_accum_fusion_available: + raise RuntimeError( + "ColumnParallelLinear was called with gradient_accumulation_fusion set " + "to True but the custom CUDA extension fused_weight_gradient_mlp_cuda " + "module is not found. To use gradient_accumulation_fusion you must " + "install APEX with --cpp_ext and --cuda_ext. For example: " + "pip install --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext .\" " + "Note that the extension requires CUDA>=11. Otherwise, you must turn off " + "gradient accumulation fusion." + ) + self.gradient_accumulation_fusion = gradient_accumulation_fusion + + if self.async_tensor_model_parallel_allreduce and self.sequence_parallel_enabled: + raise RuntimeError( + "`async_tensor_model_parallel_allreduce` and `sequence_parallel_enabled` " + "cannot be enabled at the same time." + ) + + def forward(self, input_): + """ + Args: + input_: 3D tensor whose order of dimension is [sequence, batch, hidden] + + Returns: + - output + - bias + """ + bias = self.bias if not self.skip_bias_add else None + + if self.async_tensor_model_parallel_allreduce or \ + self.sequence_parallel_enabled: + input_parallel = input_ + else: + input_parallel = copy_to_tensor_model_parallel_region(input_) + # Matrix multiply. + output_parallel = linear_with_grad_accumulation_and_async_allreduce( + input=input_parallel, + weight=self.weight, + bias=bias, + gradient_accumulation_fusion=self.gradient_accumulation_fusion, + async_grad_allreduce=self.async_tensor_model_parallel_allreduce, + sequence_parallel_enabled=self.sequence_parallel_enabled, + ) + if self.gather_output: + # All-gather across the partitions. + assert not self.sequence_parallel_enabled + output = gather_from_tensor_model_parallel_region(output_parallel) + else: + output = output_parallel + output_bias = self.bias if self.skip_bias_add else None + return output, output_bias + + +class RowParallelLinear(torch.nn.Module): + """Linear layer with row parallelism. + + The linear layer is defined as Y = XA + b. A is parallelized along + its first dimension and X along its second dimension as: + - - + | A_1 | + | . | + A = | . | X = [X_1, ..., X_p] + | . | + | A_p | + - - + Arguments: + input_size: first dimension of matrix A. + output_size: second dimension of matrix A. + + Keyword Arguments: + bias: If true, add bias. Note that bias is not parallelized. + input_is_parallel: If true, we assume that the input is already + split across the GPUs and we do not split + again. + init_method: method to initialize weights. Note that bias is always set + to zero. + stride: For the strided linear layers. + keep_master_weight_for_test: This was added for testing and should be + set to False. It returns the master weights + used for initialization. + skip_bias_add: This was added to enable performance optimization where bias + can be fused with other elementwise operations. We skip + adding bias but instead return it. + params_dtype: + use_cpu_initialization: + perform_initialization: + gradient_accumulation_fusion: + sequence_parallel_enabled: + """ + + def __init__(self, input_size, output_size, *, + bias=True, input_is_parallel=False, + init_method=init.xavier_normal_, stride=1, + keep_master_weight_for_test=False, + skip_bias_add=False, + params_dtype=torch.float32, + use_cpu_initialization=False, + perform_initialization=True, + gradient_accumulation_fusion=False, + sequence_parallel_enabled: bool = False, + world_size: int=None + ): + super(RowParallelLinear, self).__init__() + # Keep input parameters + self.input_size = input_size + self.output_size = output_size + self.input_is_parallel = input_is_parallel + # Divide the weight matrix along the last dimension. + self.input_size_per_partition = divide(input_size, world_size) + self.skip_bias_add = skip_bias_add + self.gradient_accumulation_fusion = gradient_accumulation_fusion + self.sequence_parallel_enabled = sequence_parallel_enabled + if self.sequence_parallel_enabled and not self.input_is_parallel: + raise RuntimeError("To enable `sequence_parallel_enabled`, `input_is_parallel` must be `True`") + + # Parameters. + # Note: torch.nn.functional.linear performs XA^T + b and as a result + # we allocate the transpose. + # Initialize weight. + if use_cpu_initialization: + self.weight = Parameter(torch.empty(self.output_size, + self.input_size_per_partition, + dtype=params_dtype)) + if perform_initialization: + self.master_weight = _initialize_affine_weight_cpu( + self.weight, self.output_size, self.input_size, + self.input_size_per_partition, 1, init_method, + stride=stride, return_master_weight=keep_master_weight_for_test, + params_dtype=params_dtype) + else: + self.weight = Parameter(torch.empty( + self.output_size, self.input_size_per_partition, + device=torch.cuda.current_device(), dtype=params_dtype)) + if perform_initialization: + _initialize_affine_weight_gpu(self.weight, init_method, + partition_dim=1, stride=stride) + if bias: + if use_cpu_initialization: + self.bias = Parameter(torch.empty(self.output_size, + dtype=params_dtype)) + else: + self.bias = Parameter(torch.empty( + self.output_size, device=torch.cuda.current_device(), + dtype=params_dtype)) + setattr(self.bias, 'sequence_parallel', sequence_parallel_enabled) + + # Always initialize bias to zero. + with torch.no_grad(): + self.bias.zero_() + else: + self.register_parameter('bias', None) + + def forward(self, input_): + """ + Args: + input_: 3D tensor whose order of dimension is [sequence, batch, hidden] + + Returns: + - output + - bias + """ + # Set up backprop all-reduce. + if self.input_is_parallel: + input_parallel = input_ + else: + assert not self.sequence_parallel_enabled + input_parallel = scatter_to_tensor_model_parallel_region(input_) + # Matrix multiply. + output_parallel = linear_with_grad_accumulation_and_async_allreduce( + input=input_parallel, + weight=self.weight, + bias=None, + gradient_accumulation_fusion=self.gradient_accumulation_fusion, + async_grad_allreduce=False, + sequence_parallel_enabled=False, + ) + + # All-reduce across all the partitions. + if self.sequence_parallel_enabled: + output_ = reduce_scatter_to_sequence_parallel_region(output_parallel) + else: + output_ = reduce_from_tensor_model_parallel_region(output_parallel) + if not self.skip_bias_add: + output = output_ + self.bias if self.bias is not None else output_ + output_bias = None + else: + output = output_ + output_bias = self.bias + return output, output_bias diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/mappings.py b/multilinguality_megatron/megatron/core/tensor_parallel/mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..6d03ac7f3f26dfe84df863b4af858a979b198d4e --- /dev/null +++ b/multilinguality_megatron/megatron/core/tensor_parallel/mappings.py @@ -0,0 +1,279 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron.core.parallel_state import ( + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + get_tensor_model_parallel_group, +) +from .utils import split_tensor_along_last_dim + + +def _reduce(input_): + """All-reduce the input tensor across model parallel group.""" + + # Bypass the function if we are using only 1 GPU. + if get_tensor_model_parallel_world_size()==1: + return input_ + + # All-reduce. + torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group()) + + return input_ + + +def _split_along_last_dim(input_): + """Split the tensor along its last dimension and keep the + corresponding slice.""" + + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Split along last dimension. + input_list = split_tensor_along_last_dim(input_, world_size) + + # Note: torch.split does not create contiguous tensors by default. + rank = get_tensor_model_parallel_rank() + output = input_list[rank].contiguous() + + return output + + +def _split_along_first_dim(input_): + """Split the tensor along its first dimension and keep the + corresponding slice.""" + + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Split along first dimension. + dim_size = input_.size()[0] + assert dim_size % world_size == 0, \ + "First dimension of the tensor should be divisible by tensor parallel size" + local_dim_size = dim_size // world_size + rank = get_tensor_model_parallel_rank() + dim_offset = rank * local_dim_size + + output = input_[dim_offset:dim_offset+local_dim_size].contiguous() + + return output + + +def _gather_along_last_dim(input_): + """Gather tensors and concatinate along the last dimension.""" + + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Size and dimension. + last_dim = input_.dim() - 1 + rank = get_tensor_model_parallel_rank() + + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + torch.distributed.all_gather(tensor_list, input_, group=get_tensor_model_parallel_group()) + + # Note: torch.cat already creates a contiguous tensor. + output = torch.cat(tensor_list, dim=last_dim).contiguous() + + return output + + +def _gather_along_first_dim(input_): + """Gather tensors and concatinate along the first dimension.""" + + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + dim_size = list(input_.size()) + dim_size[0] = dim_size[0] * world_size + + output = torch.empty(dim_size, dtype=input_.dtype, + device=torch.cuda.current_device()) + torch.distributed._all_gather_base(output, input_.contiguous(), + group=get_tensor_model_parallel_group()) + + return output + +def _reduce_scatter_along_first_dim(input_): + """Reduce-scatter the input tensor across model parallel group.""" + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + dim_size = list(input_.size()) + assert dim_size[0] % world_size == 0, \ + "First dimension of the tensor should be divisible by tensor parallel size" + + dim_size[0] = dim_size[0] // world_size + + output = torch.empty(dim_size, dtype=input_.dtype, + device=torch.cuda.current_device()) + torch.distributed.reduce_scatter_tensor(output, input_.contiguous(), + group=get_tensor_model_parallel_group()) + return output + + +class _CopyToModelParallelRegion(torch.autograd.Function): + """Pass the input to the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return input_ + + @staticmethod + def forward(ctx, input_): + return input_ + + @staticmethod + def backward(ctx, grad_output): + return _reduce(grad_output) + + +class _ReduceFromModelParallelRegion(torch.autograd.Function): + """All-reduce the input from the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return _reduce(input_) + + @staticmethod + def forward(ctx, input_): + return _reduce(input_) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +class _ScatterToModelParallelRegion(torch.autograd.Function): + """Split the input and keep only the corresponding chuck to the rank.""" + + @staticmethod + def symbolic(graph, input_): + return _split_along_last_dim(input_) + + @staticmethod + def forward(ctx, input_): + return _split_along_last_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather_along_last_dim(grad_output) + + +class _GatherFromModelParallelRegion(torch.autograd.Function): + """Gather the input from model parallel region and concatinate.""" + + @staticmethod + def symbolic(graph, input_): + return _gather_along_last_dim(input_) + + @staticmethod + def forward(ctx, input_): + return _gather_along_last_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + return _split_along_last_dim(grad_output) + + +class _ScatterToSequenceParallelRegion(torch.autograd.Function): + """Split the input and keep only the corresponding chuck to the rank.""" + + @staticmethod + def symbolic(graph, input_): + return _split_along_first_dim(input_) + + @staticmethod + def forward(ctx, input_): + return _split_along_first_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather_along_first_dim(grad_output) + + +class _GatherFromSequenceParallelRegion(torch.autograd.Function): + """Gather the input from sequence parallel region and concatinate.""" + + @staticmethod + def symbolic(graph, input_, tensor_parallel_output_grad=True): + return _gather_along_first_dim(input_) + + @staticmethod + def forward(ctx, input_, tensor_parallel_output_grad=True): + ctx.tensor_parallel_output_grad = tensor_parallel_output_grad + return _gather_along_first_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + tensor_parallel_output_grad = ctx.tensor_parallel_output_grad + + # If the computation graph after the gather operation is + # in the tensor parallel mode, output gradients need to reduce + # scattered and whereas if the computation is duplicated, + # output gradients need to be scattered. + if tensor_parallel_output_grad: + return _reduce_scatter_along_first_dim(grad_output), None + else: + return _split_along_first_dim(grad_output), None + + +class _ReduceScatterToSequenceParallelRegion(torch.autograd.Function): + """Reduce scatter the input from the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return _reduce_scatter_along_first_dim(input_) + + @staticmethod + def forward(ctx, input_): + return _reduce_scatter_along_first_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather_along_first_dim(grad_output) + + +# ----------------- +# Helper functions. +# ----------------- + +def copy_to_tensor_model_parallel_region(input_): + return _CopyToModelParallelRegion.apply(input_) + + +def reduce_from_tensor_model_parallel_region(input_): + return _ReduceFromModelParallelRegion.apply(input_) + + +def scatter_to_tensor_model_parallel_region(input_): + return _ScatterToModelParallelRegion.apply(input_) + + +def gather_from_tensor_model_parallel_region(input_): + return _GatherFromModelParallelRegion.apply(input_) + + +def scatter_to_sequence_parallel_region(input_): + return _ScatterToSequenceParallelRegion.apply(input_) + + +def gather_from_sequence_parallel_region(input_, tensor_parallel_output_grad=True): + return _GatherFromSequenceParallelRegion.apply(input_, tensor_parallel_output_grad) + + +def reduce_scatter_to_sequence_parallel_region(input_): + return _ReduceScatterToSequenceParallelRegion.apply(input_) + diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/random.py b/multilinguality_megatron/megatron/core/tensor_parallel/random.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7c3378ff760a1bd6bc1cb5ddaa445084150af4 --- /dev/null +++ b/multilinguality_megatron/megatron/core/tensor_parallel/random.py @@ -0,0 +1,252 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +# Parts of the code here are adapted from PyTorch +# repo: https://github.com/pytorch/pytorch + +import contextlib + +import torch +from torch import _C +from torch.cuda import _lazy_call, device as device_ctx_manager +from torch.utils.checkpoint import detach_variable + +from megatron.core.parallel_state import ( + get_data_parallel_rank, + get_tensor_model_parallel_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, +) + +from .utils import ( + split_tensor_into_1d_equal_chunks, + gather_split_1d_tensor, +) + +from megatron.core.utils import safely_set_viewless_tensor_data + +# Default name for the model parallel rng tracker. +_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng' + + +def _set_cuda_rng_state(new_state, device=-1): + """Sets the random number generator state of the current GPU. + + Argumentss: + new_state (torch.ByteTensor): The desired state + This function is adapted from PyTorch repo (torch.cuda.set_rng_state) + with a single change: the input state is not cloned. Cloning caused + major performance issues for +4 GPU cases. + """ + if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState): + # older PyTorch + def cb(): + with device_ctx_manager(device): + _C._cuda_setRNGState(new_state) + else: + # newer PyTorch + if device == -1: + device = torch.device('cuda') + elif isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device('cuda', device) + + def cb(): + idx = device.index + if idx is None: + idx = torch.cuda.current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.set_state(new_state) + + _lazy_call(cb) + + +class CudaRNGStatesTracker: + """Tracker for the cuda RNG states. + + Using the `add` method, a cuda rng state is initialized based on + the input `seed` and is assigned to `name`. Later, by forking the + rng state, we can perform operations and return to our starting + cuda state. + """ + + def __init__(self): + # Map from a string name to the cuda rng state. + self.states_ = {} + # Seeds are just for book keeping and ensure no seed is set twice. + self.seeds_ = set() + + def reset(self): + """Set to the initial state (no tracker).""" + self.states_ = {} + self.seeds_ = set() + + def get_states(self): + """Get rng states. Copy the dictionary so we have direct + pointers to the states, not just a pointer to the dictionary.""" + states = {} + for name in self.states_: + states[name] = self.states_[name] + return states + + def set_states(self, states): + """Set the rng states. For efficiency purposes, we do not check + the size of seed for compatibility.""" + self.states_ = states + + def add(self, name, seed): + """Track the rng state.""" + # Check seed is not already used. + if seed in self.seeds_: + raise Exception('seed {} already exists'.format(seed)) + self.seeds_.add(seed) + # Check that state is not already defined. + if name in self.states_: + raise Exception('cuda rng state {} already exists'.format(name)) + # Get the current rng state. + orig_rng_state = torch.cuda.get_rng_state() + # Set the new state and store it. + torch.cuda.manual_seed(seed) + self.states_[name] = torch.cuda.get_rng_state() + # Reset rng state to what it was. + _set_cuda_rng_state(orig_rng_state) + + @contextlib.contextmanager + def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME): + """Fork the cuda rng state, perform operations, and exit with + the original state.""" + # Check if we have added the state + if name not in self.states_: + raise Exception('cuda rng state {} is not added'.format(name)) + # Store current rng state. + orig_cuda_rng_state = torch.cuda.get_rng_state() + # Set rng state to the desired one + _set_cuda_rng_state(self.states_[name]) + # Do the stuff we wanted to do. + try: + yield + finally: + # Update the current rng state for later use. + self.states_[name] = torch.cuda.get_rng_state() + # And set the state to the original state we started with. + _set_cuda_rng_state(orig_cuda_rng_state) + + +# RNG tracker object. +_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker() + + +def get_cuda_rng_tracker(): + """Get cuda rng tracker.""" + return _CUDA_RNG_STATE_TRACKER + + +def model_parallel_cuda_manual_seed(seed): + """Initialize model parallel cuda seed. + + This function should be called after the model parallel is + initialized. Also, no torch.cuda.manual_seed should be called + after this function. Basically, this is replacement for that + function. + Two set of RNG states are tracked: + default state: This is for data parallelism and is the same among a + set of model parallel GPUs but different across + different model paralle groups. This is used for + example for dropout in the non-tensor-model-parallel regions. + tensor-model-parallel state: This state is different among a set of model + parallel GPUs, but the same across data parallel + groups. This is used for example for dropout in + model parallel regions. + """ + # 2718 is just for fun and any POSITIVE value will work. + offset = seed + 2718 + tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank() + # Data parallel gets the original seed. + data_parallel_seed = seed + + _CUDA_RNG_STATE_TRACKER.reset() + # Set the default state. + torch.cuda.manual_seed(data_parallel_seed) + # and model parallel state. + _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, + tensor_model_parallel_seed) + + +class CheckpointFunction(torch.autograd.Function): + """This function is adapted from torch.utils.checkpoint with + two main changes: + 1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` + 2) the states in the model parallel tracker are also properly + tracked/set/reset. + """ + @staticmethod + def forward(ctx, run_function, distribute_saved_activations, *args): + ctx.run_function = run_function + ctx.distribute_saved_activations \ + = distribute_saved_activations + + # Copy the rng states. + ctx.fwd_cpu_rng_state = torch.get_rng_state() + ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state() + ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + with torch.no_grad(): + outputs = run_function(*args) + + # Divide hidden states across model parallel group and only keep + # the chunk corresponding to the current rank. + if distribute_saved_activations: + ctx.input_0_shape = args[0].data.shape + safely_set_viewless_tensor_data( + args[0], + split_tensor_into_1d_equal_chunks(args[0].data, new_buffer=True)) + + # Store everything. + ctx.save_for_backward(*args) + + return outputs + + @staticmethod + def backward(ctx, *args): + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError("Checkpointing is not compatible with .grad(), " + "please use .backward() if possible") + inputs = ctx.saved_tensors + if ctx.distribute_saved_activations: + safely_set_viewless_tensor_data( + inputs[0], + gather_split_1d_tensor(inputs[0].data).view(ctx.input_0_shape)) + + # Store the current states. + bwd_cpu_rng_state = torch.get_rng_state() + bwd_cuda_rng_state = torch.cuda.get_rng_state() + bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + # Set the states to what it used to be before the forward pass. + torch.set_rng_state(ctx.fwd_cpu_rng_state) + _set_cuda_rng_state(ctx.fwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker) + + # Compute the forward pass. + detached_inputs = detach_variable(inputs) + with torch.enable_grad(): + outputs = ctx.run_function(*detached_inputs) + + # Set the states back to what it was at the start of this function. + torch.set_rng_state(bwd_cpu_rng_state) + _set_cuda_rng_state(bwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker) + + if isinstance(outputs, torch.Tensor): + outputs = (outputs,) + torch.autograd.backward(outputs, args) + grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp + for inp in detached_inputs) + return (None, None) + grads + + +def checkpoint(function, distribute_saved_activations, *args): + """Checkpoint a model or part of the model. + This has been directly copied from torch.utils.checkpoint.""" + return CheckpointFunction.apply(function, + distribute_saved_activations, *args) diff --git a/multilinguality_megatron/megatron/core/tensor_parallel/utils.py b/multilinguality_megatron/megatron/core/tensor_parallel/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a4c7cb77cc3d2279dbd973df03cecaac4a3edba0 --- /dev/null +++ b/multilinguality_megatron/megatron/core/tensor_parallel/utils.py @@ -0,0 +1,108 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch +from typing import List, Sequence + +from megatron.core.utils import divide +from megatron.core import parallel_state + +def split_tensor_along_last_dim( + tensor: torch.Tensor, + num_partitions: int, + contiguous_split_chunks: bool = False, +) -> List[torch.Tensor]: + """ Split a tensor along its last dimension. + + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + + Returns: + A list of Tensors + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = divide(tensor.size()[last_dim], num_partitions) + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + +def split_tensor_into_1d_equal_chunks(tensor, new_buffer=False): + """ Break a tensor into equal 1D chunks across tensor parallel ranks. + + Returns a Tensor or View with this rank's portion of the data. + + Arguments: + tensor: The tensor to split + + Keyword Arguments: + new_buffer (bool): If True, returns a new Tensor. + If False, returns a view into the existing Tensor. + Default is False + + """ + partition_size = torch.numel(tensor) // \ + parallel_state.get_tensor_model_parallel_world_size() + start_index = partition_size * parallel_state.get_tensor_model_parallel_rank() + end_index = start_index + partition_size + if new_buffer: + data = torch.empty(partition_size, dtype=tensor.dtype, + device=torch.cuda.current_device(), + requires_grad=False) + data.copy_(tensor.view(-1)[start_index:end_index]) + else: + data = tensor.view(-1)[start_index:end_index] + return data + + +def gather_split_1d_tensor(tensor): + """ Opposite of split_tensor_into_1d_equal_chunks. Gather values from tensor + model parallel ranks. + + Returns a new Tensor with the gathered data. + + Arguments: + tensor: A Tensor or view of this rank's portion of the data. + """ + numel_gathered = torch.numel(tensor) * \ + parallel_state.get_tensor_model_parallel_world_size() + gathered = torch.empty(numel_gathered, dtype=tensor.dtype, + device=torch.cuda.current_device(), + requires_grad=False) + # TODO: This API is experimental in pytorch (as of Feb 2022) and + # this might break in future pytorch releases. We chose this API + # as opposed to torch.distributed.all_gather for efficiency reasons. + # This API calls directly NCCL all-gather versus the former does + # internal copies and can potentially cause slow down. + torch.distributed._all_gather_base(gathered, tensor, + group=parallel_state.get_tensor_model_parallel_group()) + return gathered + + +class VocabUtility: + """ Split the vocabulary into `world_size` chunks and return the first + and last index of the vocabulary belonging to the `rank` + partition: Note that indices in [fist, last) + + """ + + @staticmethod + def vocab_range_from_per_partition_vocab_size( + per_partition_vocab_size: int, rank, world_size: int + ) -> Sequence[int]: + index_f = rank * per_partition_vocab_size + index_l = index_f + per_partition_vocab_size + return index_f, index_l + + @staticmethod + def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, world_size: int) -> Sequence[int]: + per_partition_vocab_size = divide(global_vocab_size, world_size) + return VocabUtility.vocab_range_from_per_partition_vocab_size( + per_partition_vocab_size, rank, world_size + ) diff --git a/multilinguality_megatron/megatron/core/utils.py b/multilinguality_megatron/megatron/core/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..62b0a465fba440464ef697b5448a87b91ef7d1c5 --- /dev/null +++ b/multilinguality_megatron/megatron/core/utils.py @@ -0,0 +1,124 @@ +"""Utility functions used throughout Megatron core""" +from functools import reduce +import operator + +import torch + +from megatron.core import parallel_state + + +def ensure_divisibility(numerator, denominator): + """Ensure that numerator is divisible by the denominator.""" + assert numerator % denominator == 0, "{} is not divisible by {}".format( + numerator, denominator + ) + + +def divide(numerator, denominator): + """Ensure that numerator is divisible by the denominator and return + the division value.""" + ensure_divisibility(numerator, denominator) + return numerator // denominator + + +class GlobalMemoryBuffer: + """Global buffer to avoid dynamic memory allocations. + Caller should ensure that buffers of the same name + are not used concurrently.""" + + def __init__(self): + self.buffer = {} + + def get_tensor(self, tensor_shape, dtype, name): + required_len = reduce(operator.mul, tensor_shape, 1) + if self.buffer.get((name, dtype), None) is None or \ + self.buffer[(name, dtype)].numel() < required_len: + self.buffer[(name, dtype)] = \ + torch.empty(required_len, + dtype=dtype, + device=torch.cuda.current_device(), + requires_grad=False) + + return self.buffer[(name, dtype)][0:required_len].view(*tensor_shape) + +def _kernel_make_viewless_tensor(inp, requires_grad): + '''Make a viewless tensor. + + View tensors have the undesirable side-affect of retaining a reference + to the originally-viewed tensor, even after manually setting the '.data' + field. This method creates a new tensor that links to the old tensor's + data, without linking the viewed tensor, referenced via the '._base' + field. + ''' + out = torch.empty( + (1,), + dtype = inp.dtype, + device = inp.device, + requires_grad = requires_grad, + ) + out.data = inp.data + return out + + +class MakeViewlessTensor(torch.autograd.Function): + ''' + Autograd function to make a viewless tensor. + + This function should be used in cases where the computation graph needs + to be propagated, but we only want a viewless tensor (e.g., + ParallelTransformer's hidden_states). Call this function by passing + 'keep_graph = True' to 'make_viewless_tensor()'. + ''' + + @staticmethod + def forward(ctx, inp, requires_grad): + return _kernel_make_viewless_tensor(inp, requires_grad) + + @staticmethod + def backward(ctx, grad_output): + return grad_output, None + + +def make_viewless_tensor(inp, requires_grad, keep_graph): + ''' + Entry-point for creating viewless tensors. + + This method should be used, rather than calling 'MakeViewlessTensor' + or '_kernel_make_viewless_tensor' directly. This method acts as a + switch for determining if an autograd function or a regular method + should be used to create the tensor. + ''' + + # return tensor as-is, if not a 'view' + if inp._base is None: + return inp + + # create viewless tensor + if keep_graph: + return MakeViewlessTensor.apply(inp, requires_grad) + else: + return _kernel_make_viewless_tensor(inp, requires_grad) + +def assert_viewless_tensor(tensor, extra_msg = None): + '''Assert that a tensor is not a view (i.e., its '._base' field is + not set).''' + if isinstance(tensor, list): + [ assert_viewless_tensor(t) for t in tensor ] + return tensor + if not isinstance(tensor, torch.Tensor): + return tensor + assert tensor._base is None, ( + "Ensure tensor._base is None before setting tensor.data or storing " + "tensor to memory buffer. Otherwise, a memory leak will occur (and " + "likely accumulate over iterations). %s" + ) % extra_msg + return tensor + +def safely_set_viewless_tensor_data(tensor, new_data_tensor): + '''Safely set tensor's '.data' field. + + Check first that the tensor is viewless (i.e., '._base' not set). If not, + raise an exception. + ''' + assert_viewless_tensor(tensor, extra_msg = "FYI, tensor._base has shape %s, and new_data_tensor has shape %s." % ("--" if tensor._base is None else tensor._base.shape, new_data_tensor.shape)) + tensor.data = new_data_tensor diff --git a/multilinguality_megatron/megatron/data/Makefile b/multilinguality_megatron/megatron/data/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..2817ab541679b5c1826f989a37ebfd731dadc75a --- /dev/null +++ b/multilinguality_megatron/megatron/data/Makefile @@ -0,0 +1,9 @@ +CXXFLAGS += -O3 -Wall -shared -std=c++11 -fPIC -fdiagnostics-color +CPPFLAGS += $(shell /mnt/data/shared/tower_llm_training/miniconda3//envs/towerllm-env/bin/python3 -m pybind11 --includes) +LIBNAME = helpers +LIBEXT = $(shell /mnt/data/shared/tower_llm_training/miniconda3//envs/towerllm-env/bin/python3-config --extension-suffix) + +default: $(LIBNAME)$(LIBEXT) + +%$(LIBEXT): %.cpp + $(CXX) $(CXXFLAGS) $(CPPFLAGS) $< -o $@ diff --git a/multilinguality_megatron/megatron/data/__init__.py b/multilinguality_megatron/megatron/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cd5f898c6bdf89c6cf0243af102d04f6efed86b8 --- /dev/null +++ b/multilinguality_megatron/megatron/data/__init__.py @@ -0,0 +1 @@ +from . import indexed_dataset diff --git a/multilinguality_megatron/megatron/data/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/megatron/data/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a00e6d5effce039c3033d8845022be7d4009656 Binary files /dev/null and b/multilinguality_megatron/megatron/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/data/__pycache__/blendable_dataset.cpython-39.pyc b/multilinguality_megatron/megatron/data/__pycache__/blendable_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..230a5491bcf0db538e8acc68b5f7832b55118452 Binary files /dev/null and b/multilinguality_megatron/megatron/data/__pycache__/blendable_dataset.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/data/__pycache__/data_samplers.cpython-39.pyc b/multilinguality_megatron/megatron/data/__pycache__/data_samplers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a06abfb32359bfa468faa33eb174d3abdecd637 Binary files /dev/null and b/multilinguality_megatron/megatron/data/__pycache__/data_samplers.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/data/__pycache__/dataset_utils.cpython-39.pyc b/multilinguality_megatron/megatron/data/__pycache__/dataset_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5f98378b6732c8302b0401dc1f9bde85a026e9b Binary files /dev/null and b/multilinguality_megatron/megatron/data/__pycache__/dataset_utils.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/data/__pycache__/gpt_dataset.cpython-39.pyc b/multilinguality_megatron/megatron/data/__pycache__/gpt_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..717d3b9c42dbc20f34d50d347c5b0676b7b55b24 Binary files /dev/null and b/multilinguality_megatron/megatron/data/__pycache__/gpt_dataset.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/data/__pycache__/indexed_dataset.cpython-39.pyc b/multilinguality_megatron/megatron/data/__pycache__/indexed_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c80f226d0281b534e69b5787aee77178076eb7da Binary files /dev/null and b/multilinguality_megatron/megatron/data/__pycache__/indexed_dataset.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/data/__pycache__/instruction_dataset.cpython-39.pyc b/multilinguality_megatron/megatron/data/__pycache__/instruction_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d38e5052e2ba9342db9d61671146e065c6d7938 Binary files /dev/null and b/multilinguality_megatron/megatron/data/__pycache__/instruction_dataset.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/data/autoaugment.py b/multilinguality_megatron/megatron/data/autoaugment.py new file mode 100644 index 0000000000000000000000000000000000000000..585a4fa6a51214be7ecdf8cc58a81caea700582d --- /dev/null +++ b/multilinguality_megatron/megatron/data/autoaugment.py @@ -0,0 +1,320 @@ +"""AutoAugment data augmentation policy for ImageNet. + +-- Begin license text. + +MIT License + +Copyright (c) 2018 Philip Popien + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-- End license text. + +Code adapted from https://github.com/DeepVoltaire/AutoAugment. + +This module implements the fixed AutoAugment data augmentation policy for ImageNet provided in +Appendix A, Table 9 of reference [1]. It does not include any of the search code for augmentation +policies. + +Reference: +[1] https://arxiv.org/abs/1805.09501 +""" + +import random + +import numpy as np +from PIL import Image +from PIL import ImageEnhance +from PIL import ImageOps + +_MAX_LEVEL = 10 # Maximum integer strength of an augmentation, if applicable. + + +class ImageNetPolicy: + """Definition of an ImageNetPolicy. + + Implements a fixed AutoAugment data augmentation policy targeted at + ImageNet training by randomly applying at runtime one of the 25 pre-defined + data augmentation sub-policies provided in Reference [1]. + + Usage example as a Pytorch Transform: + >>> transform=transforms.Compose([transforms.Resize(256), + >>> ImageNetPolicy(), + >>> transforms.ToTensor()]) + """ + + def __init__(self, fillcolor=(128, 128, 128)): + """Initialize an ImageNetPolicy. + + Args: + fillcolor (tuple): RGB color components of the color to be used for + filling when needed (default: (128, 128, 128), which + corresponds to gray). + """ + # Instantiate a list of sub-policies. + # Each entry of the list is a SubPolicy which consists of + # two augmentation operations, + # each of those parametrized as operation, probability, magnitude. + # Those two operations are applied sequentially on the image upon call. + self.policies = [ + SubPolicy("posterize", 0.4, 8, "rotate", 0.6, 9, fillcolor), + SubPolicy("solarize", 0.6, 5, "autocontrast", 0.6, 5, fillcolor), + SubPolicy("equalize", 0.8, 8, "equalize", 0.6, 3, fillcolor), + SubPolicy("posterize", 0.6, 7, "posterize", 0.6, 6, fillcolor), + SubPolicy("equalize", 0.4, 7, "solarize", 0.2, 4, fillcolor), + SubPolicy("equalize", 0.4, 4, "rotate", 0.8, 8, fillcolor), + SubPolicy("solarize", 0.6, 3, "equalize", 0.6, 7, fillcolor), + SubPolicy("posterize", 0.8, 5, "equalize", 1.0, 2, fillcolor), + SubPolicy("rotate", 0.2, 3, "solarize", 0.6, 8, fillcolor), + SubPolicy("equalize", 0.6, 8, "posterize", 0.4, 6, fillcolor), + SubPolicy("rotate", 0.8, 8, "color", 0.4, 0, fillcolor), + SubPolicy("rotate", 0.4, 9, "equalize", 0.6, 2, fillcolor), + SubPolicy("equalize", 0.0, 7, "equalize", 0.8, 8, fillcolor), + SubPolicy("invert", 0.6, 4, "equalize", 1.0, 8, fillcolor), + SubPolicy("color", 0.6, 4, "contrast", 1.0, 8, fillcolor), + SubPolicy("rotate", 0.8, 8, "color", 1.0, 2, fillcolor), + SubPolicy("color", 0.8, 8, "solarize", 0.8, 7, fillcolor), + SubPolicy("sharpness", 0.4, 7, "invert", 0.6, 8, fillcolor), + SubPolicy("shearX", 0.6, 5, "equalize", 1.0, 9, fillcolor), + SubPolicy("color", 0.4, 0, "equalize", 0.6, 3, fillcolor), + SubPolicy("equalize", 0.4, 7, "solarize", 0.2, 4, fillcolor), + SubPolicy("solarize", 0.6, 5, "autocontrast", 0.6, 5, fillcolor), + SubPolicy("invert", 0.6, 4, "equalize", 1.0, 8, fillcolor), + SubPolicy("color", 0.6, 4, "contrast", 1.0, 8, fillcolor), + SubPolicy("equalize", 0.8, 8, "equalize", 0.6, 3, fillcolor), + ] + + def __call__(self, img): + """Define call method for ImageNetPolicy class.""" + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + """Define repr method for ImageNetPolicy class.""" + return "ImageNetPolicy" + + +class SubPolicy: + """Definition of a SubPolicy. + + A SubPolicy consists of two augmentation operations, + each of those parametrized as operation, probability, magnitude. + The two operations are applied sequentially on the image upon call. + """ + + def __init__( + self, + operation1, + probability1, + magnitude_idx1, + operation2, + probability2, + magnitude_idx2, + fillcolor, + ): + """Initialize a SubPolicy. + + Args: + operation1 (str): Key specifying the first augmentation operation. + There are fourteen key values altogether (see supported_ops below + listing supported operations). probability1 (float): Probability + within [0., 1.] of applying the first augmentation operation. + magnitude_idx1 (int): Integer specifiying the strength of the first + operation as an index further used to derive the magnitude from a + range of possible values. + operation2 (str): Key specifying the second augmentation operation. + probability2 (float): Probability within [0., 1.] of applying the + second augmentation operation. + magnitude_idx2 (int): Integer specifiying the strength of the + second operation as an index further used to derive the magnitude + from a range of possible values. + fillcolor (tuple): RGB color components of the color to be used for + filling. + Returns: + """ + # List of supported operations for operation1 and operation2. + supported_ops = [ + "shearX", + "shearY", + "translateX", + "translateY", + "rotate", + "color", + "posterize", + "solarize", + "contrast", + "sharpness", + "brightness", + "autocontrast", + "equalize", + "invert", + ] + assert (operation1 in supported_ops) and ( + operation2 in supported_ops + ), "SubPolicy:one of oper1 or oper2 refers to an unsupported operation." + + assert ( + 0.0 <= probability1 <= 1.0 and 0.0 <= probability2 <= 1.0 + ), "SubPolicy: prob1 and prob2 should be within [0., 1.]." + + assert ( + isinstance(magnitude_idx1, int) and 0 <= magnitude_idx1 <= 10 + ), "SubPolicy: idx1 should be specified as an integer within [0, 10]." + + assert ( + isinstance(magnitude_idx2, int) and 0 <= magnitude_idx2 <= 10 + ), "SubPolicy: idx2 should be specified as an integer within [0, 10]." + + # Define a dictionary where each key refers to a specific type of + # augmentation and the corresponding value is a range of ten possible + # magnitude values for that augmentation. + num_levels = _MAX_LEVEL + 1 + ranges = { + "shearX": np.linspace(0, 0.3, num_levels), + "shearY": np.linspace(0, 0.3, num_levels), + "translateX": np.linspace(0, 150 / 331, num_levels), + "translateY": np.linspace(0, 150 / 331, num_levels), + "rotate": np.linspace(0, 30, num_levels), + "color": np.linspace(0.0, 0.9, num_levels), + "posterize": np.round(np.linspace(8, 4, num_levels), 0).astype( + np.int + ), + "solarize": np.linspace(256, 0, num_levels), # range [0, 256] + "contrast": np.linspace(0.0, 0.9, num_levels), + "sharpness": np.linspace(0.0, 0.9, num_levels), + "brightness": np.linspace(0.0, 0.9, num_levels), + "autocontrast": [0] + * num_levels, # This augmentation doesn't use magnitude parameter. + "equalize": [0] + * num_levels, # This augmentation doesn't use magnitude parameter. + "invert": [0] + * num_levels, # This augmentation doesn't use magnitude parameter. + } + + def rotate_with_fill(img, magnitude): + """Define rotation transformation with fill. + + The input image is first rotated, then it is blended together with + a gray mask of the same size. Note that fillcolor as defined + elsewhere in this module doesn't apply here. + + Args: + magnitude (float): rotation angle in degrees. + Returns: + rotated_filled (PIL Image): rotated image with gray filling for + disoccluded areas unveiled by the rotation. + """ + rotated = img.convert("RGBA").rotate(magnitude) + rotated_filled = Image.composite( + rotated, Image.new("RGBA", rotated.size, (128,) * 4), rotated + ) + return rotated_filled.convert(img.mode) + + # Define a dictionary of augmentation functions where each key refers + # to a specific type of augmentation and the corresponding value defines + # the augmentation itself using a lambda function. + # pylint: disable=unnecessary-lambda + func_dict = { + "shearX": lambda img, magnitude: img.transform( + img.size, + Image.AFFINE, + (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, + fillcolor=fillcolor, + ), + "shearY": lambda img, magnitude: img.transform( + img.size, + Image.AFFINE, + (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), + Image.BICUBIC, + fillcolor=fillcolor, + ), + "translateX": lambda img, magnitude: img.transform( + img.size, + Image.AFFINE, + ( + 1, + 0, + magnitude * img.size[0] * random.choice([-1, 1]), + 0, + 1, + 0, + ), + fillcolor=fillcolor, + ), + "translateY": lambda img, magnitude: img.transform( + img.size, + Image.AFFINE, + ( + 1, + 0, + 0, + 0, + 1, + magnitude * img.size[1] * random.choice([-1, 1]), + ), + fillcolor=fillcolor, + ), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance( + 1 + magnitude * random.choice([-1, 1]) + ), + "posterize": lambda img, magnitude: ImageOps.posterize( + img, magnitude + ), + "solarize": lambda img, magnitude: ImageOps.solarize( + img, magnitude + ), + "contrast": lambda img, magnitude: ImageEnhance.Contrast( + img + ).enhance(1 + magnitude * random.choice([-1, 1])), + "sharpness": lambda img, magnitude: ImageEnhance.Sharpness( + img + ).enhance(1 + magnitude * random.choice([-1, 1])), + "brightness": lambda img, magnitude: ImageEnhance.Brightness( + img + ).enhance(1 + magnitude * random.choice([-1, 1])), + "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img), + } + + # Store probability, function and magnitude of the first augmentation + # for the sub-policy. + self.probability1 = probability1 + self.operation1 = func_dict[operation1] + self.magnitude1 = ranges[operation1][magnitude_idx1] + + # Store probability, function and magnitude of the second augmentation + # for the sub-policy. + self.probability2 = probability2 + self.operation2 = func_dict[operation2] + self.magnitude2 = ranges[operation2][magnitude_idx2] + + def __call__(self, img): + """Define call method for SubPolicy class.""" + # Randomly apply operation 1. + if random.random() < self.probability1: + img = self.operation1(img, self.magnitude1) + + # Randomly apply operation 2. + if random.random() < self.probability2: + img = self.operation2(img, self.magnitude2) + + return img diff --git a/multilinguality_megatron/megatron/data/bert_dataset.py b/multilinguality_megatron/megatron/data/bert_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d837270915c456d2fb41757e302289757cc6e4ab --- /dev/null +++ b/multilinguality_megatron/megatron/data/bert_dataset.py @@ -0,0 +1,182 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""BERT Style dataset.""" + +import numpy as np +import torch + +from megatron import ( + get_args, + get_tokenizer, + mpu, + print_rank_0 +) +from megatron.data.dataset_utils import ( + get_samples_mapping, + get_a_and_b_segments, + truncate_segments, + create_tokens_and_tokentypes, + create_masked_lm_predictions +) + + +class BertDataset(torch.utils.data.Dataset): + + def __init__(self, name, indexed_dataset, data_prefix, + num_epochs, max_num_samples, masked_lm_prob, + max_seq_length, short_seq_prob, seed, binary_head): + + # Params to store. + self.name = name + self.seed = seed + self.masked_lm_prob = masked_lm_prob + self.max_seq_length = max_seq_length + self.binary_head = binary_head + + # Dataset. + self.indexed_dataset = indexed_dataset + + # Build the samples mapping. + self.samples_mapping = get_samples_mapping(self.indexed_dataset, + data_prefix, + num_epochs, + max_num_samples, + self.max_seq_length - 3, # account for added tokens + short_seq_prob, + self.seed, + self.name, + self.binary_head) + + # Vocab stuff. + tokenizer = get_tokenizer() + self.vocab_id_list = list(tokenizer.inv_vocab.keys()) + self.vocab_id_to_token_dict = tokenizer.inv_vocab + self.cls_id = tokenizer.cls + self.sep_id = tokenizer.sep + self.mask_id = tokenizer.mask + self.pad_id = tokenizer.pad + + def __len__(self): + return self.samples_mapping.shape[0] + + def __getitem__(self, idx): + start_idx, end_idx, seq_length = self.samples_mapping[idx] + sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)] + # Note that this rng state should be numpy and not python since + # python randint is inclusive whereas the numpy one is exclusive. + # We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1 + np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32)) + return build_training_sample(sample, seq_length, + self.max_seq_length, # needed for padding + self.vocab_id_list, + self.vocab_id_to_token_dict, + self.cls_id, self.sep_id, + self.mask_id, self.pad_id, + self.masked_lm_prob, np_rng, + self.binary_head) + + + + +def build_training_sample(sample, + target_seq_length, max_seq_length, + vocab_id_list, vocab_id_to_token_dict, + cls_id, sep_id, mask_id, pad_id, + masked_lm_prob, np_rng, binary_head): + """Biuld training sample. + + Arguments: + sample: A list of sentences in which each sentence is a list token ids. + target_seq_length: Desired sequence length. + max_seq_length: Maximum length of the sequence. All values are padded to + this length. + vocab_id_list: List of vocabulary ids. Used to pick a random id. + vocab_id_to_token_dict: A dictionary from vocab ids to text tokens. + cls_id: Start of example id. + sep_id: Separator id. + mask_id: Mask token id. + pad_id: Padding token id. + masked_lm_prob: Probability to mask tokens. + np_rng: Random number genenrator. Note that this rng state should be + numpy and not python since python randint is inclusive for + the opper bound whereas the numpy one is exclusive. + """ + + if binary_head: + # We assume that we have at least two sentences in the sample + assert len(sample) > 1 + assert target_seq_length <= max_seq_length + + # Divide sample into two segments (A and B). + if binary_head: + tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample, + np_rng) + else: + tokens_a = [] + for j in range(len(sample)): + tokens_a.extend(sample[j]) + tokens_b = [] + is_next_random = False + + # Truncate to `target_sequence_length`. + max_num_tokens = target_seq_length + truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a), + len(tokens_b), max_num_tokens, np_rng) + + # Build tokens and toketypes. + tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b, + cls_id, sep_id) + + # Masking. + max_predictions_per_seq = masked_lm_prob * max_num_tokens + (tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions( + tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, + cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng) + + # Padding. + tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np \ + = pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, + masked_labels, pad_id, max_seq_length) + + train_sample = { + 'text': tokens_np, + 'types': tokentypes_np, + 'labels': labels_np, + 'is_random': int(is_next_random), + 'loss_mask': loss_mask_np, + 'padding_mask': padding_mask_np, + 'truncated': int(truncated)} + return train_sample + + +def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, + masked_labels, pad_id, max_seq_length): + """Pad sequences and convert them to numpy.""" + + # Some checks. + num_tokens = len(tokens) + padding_length = max_seq_length - num_tokens + assert padding_length >= 0 + assert len(tokentypes) == num_tokens + assert len(masked_positions) == len(masked_labels) + + # Tokens and token types. + filler = [pad_id] * padding_length + tokens_np = np.array(tokens + filler, dtype=np.int64) + tokentypes_np = np.array(tokentypes + filler, dtype=np.int64) + + # Padding mask. + padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, + dtype=np.int64) + + # Lables and loss mask. + labels = [-1] * max_seq_length + loss_mask = [0] * max_seq_length + for i in range(len(masked_positions)): + assert masked_positions[i] < num_tokens + labels[masked_positions[i]] = masked_labels[i] + loss_mask[masked_positions[i]] = 1 + labels_np = np.array(labels, dtype=np.int64) + loss_mask_np = np.array(loss_mask, dtype=np.int64) + + return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np diff --git a/multilinguality_megatron/megatron/data/biencoder_dataset_utils.py b/multilinguality_megatron/megatron/data/biencoder_dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c08f067923df54d17d0458f86214e3ebe41edc75 --- /dev/null +++ b/multilinguality_megatron/megatron/data/biencoder_dataset_utils.py @@ -0,0 +1,209 @@ +import os +import time + +import numpy as np +import torch + +from megatron import get_args, get_tokenizer, print_rank_0 +from megatron.core import mpu, tensor_parallel +from megatron.data.dataset_utils import create_masked_lm_predictions, \ + pad_and_convert_to_numpy +from megatron.data.data_samplers import MegatronPretrainingSampler + +def make_attention_mask(source_block, target_block): + """ + Returns a 2-dimensional (2-D) attention mask + :param source_block: 1-D array + :param target_block: 1-D array + """ + mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) + mask = mask.astype(np.int64) + # (source_length, target_length) + return mask + +def get_one_epoch_dataloader(dataset, micro_batch_size=None): + """Specifically one epoch to be used in an indexing job.""" + args = get_args() + + if micro_batch_size is None: + micro_batch_size = args.micro_batch_size + num_workers = args.num_workers + + # Use megatron's sampler with consumed samples set to 0 as + # this is only for evaluation and don't intend to resume half way. + # Also, set the drop last to false as don't intend to remove + # the last batch + batch_sampler = MegatronPretrainingSampler( + total_samples=len(dataset), + consumed_samples=0, + micro_batch_size=args.micro_batch_size, + data_parallel_rank=mpu.get_data_parallel_rank(), + data_parallel_size=mpu.get_data_parallel_world_size(), + drop_last=False) + + return torch.utils.data.DataLoader(dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + pin_memory=True) + + +def get_ict_batch(data_iterator): + # Items and their type. + keys = ['query_tokens', 'query_mask', + 'context_tokens', 'context_mask', 'block_data'] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is None: + data = None + else: + data = next(data_iterator) + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + query_tokens = data_b['query_tokens'].long() + query_mask = data_b['query_mask'] < 0.5 + context_tokens = data_b['context_tokens'].long() + context_mask = data_b['context_mask'] < 0.5 + block_indices = data_b['block_data'].long() + + return query_tokens, query_mask,\ + context_tokens, context_mask, block_indices + + +def join_str_list(str_list): + """Join a list of strings, handling spaces appropriately""" + result = "" + for s in str_list: + if s.startswith("##"): + result += s[2:] + else: + result += " " + s + return result + + +class BlockSampleData(object): + """A struct for fully describing a fixed-size block of data as used in REALM + + :param start_idx: for first sentence of the block + :param end_idx: for last sentence of the block (may be partially truncated in sample construction) + :param doc_idx: the index of the document from which the block comes in the original indexed dataset + :param block_idx: a unique integer identifier given to every block. + """ + def __init__(self, start_idx, end_idx, doc_idx, block_idx): + self.start_idx = start_idx + self.end_idx = end_idx + self.doc_idx = doc_idx + self.block_idx = block_idx + + def as_array(self): + return np.array([self.start_idx, self.end_idx, self.doc_idx, self.block_idx]).astype(np.int64) + + def as_tuple(self): + return self.start_idx, self.end_idx, self.doc_idx, self.block_idx + + +class BlockSamplesMapping(object): + def __init__(self, mapping_array): + # make sure that the array is compatible with BlockSampleData + assert mapping_array.shape[1] == 4 + self.mapping_array = mapping_array + + def __len__(self): + return self.mapping_array.shape[0] + + def __getitem__(self, idx): + """Get the data associated with an indexed sample.""" + sample_data = BlockSampleData(*self.mapping_array[idx]) + return sample_data + + +def get_block_samples_mapping(block_dataset, title_dataset, data_prefix, num_epochs, + max_num_samples, max_seq_length, seed, name, use_one_sent_docs=False): + """Get samples mapping for a dataset over fixed size blocks. This function also requires + a dataset of the titles for the source documents since their lengths must be taken into account. + + :return: samples_mapping (BlockSamplesMapping) + """ + + if not num_epochs: + if not max_num_samples: + raise ValueError("Need to specify either max_num_samples " + "or num_epochs") + num_epochs = np.iinfo(np.int32).max - 1 + if not max_num_samples: + max_num_samples = np.iinfo(np.int64).max - 1 + + # Filename of the index mapping + indexmap_filename = data_prefix + indexmap_filename += '_{}_indexmap'.format(name) + if num_epochs != (np.iinfo(np.int32).max - 1): + indexmap_filename += '_{}ep'.format(num_epochs) + if max_num_samples != (np.iinfo(np.int64).max - 1): + indexmap_filename += '_{}mns'.format(max_num_samples) + indexmap_filename += '_{}msl'.format(max_seq_length) + indexmap_filename += '_{}s'.format(seed) + if use_one_sent_docs: + indexmap_filename += '_1sentok' + indexmap_filename += '.npy' + + # Build the indexed mapping if not exist. + if mpu.get_data_parallel_rank() == 0 and \ + not os.path.isfile(indexmap_filename): + print(' > WARNING: could not find index map file {}, building ' + 'the indices on rank 0 ...'.format(indexmap_filename)) + + # Make sure the types match the helpers input types. + assert block_dataset.doc_idx.dtype == np.int64 + assert block_dataset.sizes.dtype == np.int32 + + # Build samples mapping + verbose = torch.distributed.get_rank() == 0 + start_time = time.time() + print_rank_0(' > building samples index mapping for {} ...'.format( + name)) + + from megatron.data import helpers + mapping_array = helpers.build_blocks_mapping( + block_dataset.doc_idx, + block_dataset.sizes, + title_dataset.sizes, + num_epochs, + max_num_samples, + max_seq_length - 3, # account for added tokens + seed, + verbose, + use_one_sent_docs) + + + print_rank_0(' > done building samples index mapping') + np.save(indexmap_filename, mapping_array, allow_pickle=True) + print_rank_0(' > saved the index mapping in {}'.format( + indexmap_filename)) + # Make sure all the ranks have built the mapping + print_rank_0(' > elapsed time to build and save samples mapping ' + '(seconds): {:4f}'.format( + time.time() - start_time)) + + # This should be a barrier but nccl barrier assumes + # device_index=rank which is not the case for model + # parallel case + counts = torch.cuda.LongTensor([1]) + torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group()) + assert counts[0].item() == torch.distributed.get_world_size( + group=mpu.get_data_parallel_group()) + + # Load indexed dataset. + print_rank_0(' > loading indexed mapping from {}'.format( + indexmap_filename)) + start_time = time.time() + + mapping_array = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r') + samples_mapping = BlockSamplesMapping(mapping_array) + + print_rank_0(' loaded indexed file in {:3.3f} seconds'.format( + time.time() - start_time)) + print_rank_0(' total number of samples: {}'.format( + mapping_array.shape[0])) + + return samples_mapping diff --git a/multilinguality_megatron/megatron/data/blendable_dataset.py b/multilinguality_megatron/megatron/data/blendable_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..6b642bccacbf5b5d1b078ceda541bac654b7f0e4 --- /dev/null +++ b/multilinguality_megatron/megatron/data/blendable_dataset.py @@ -0,0 +1,53 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Blendable dataset.""" + +import time + +import numpy as np +import torch + +from megatron import print_rank_0 + +class BlendableDataset(torch.utils.data.Dataset): + + + def __init__(self, datasets, weights): + + self.datasets = datasets + num_datasets = len(datasets) + assert num_datasets == len(weights) + + self.size = 0 + for dataset in self.datasets: + self.size += len(dataset) + + # Normalize weights. + weights = np.array(weights, dtype=np.float64) + sum_weights = np.sum(weights) + assert sum_weights > 0.0 + weights /= sum_weights + + # Build indecies. + start_time = time.time() + assert num_datasets < 255 + self.dataset_index = np.zeros(self.size, dtype=np.uint8) + self.dataset_sample_index = np.zeros(self.size, dtype=np.int64) + + from megatron.data import helpers + helpers.build_blending_indices(self.dataset_index, + self.dataset_sample_index, + weights, num_datasets, self.size, + torch.distributed.get_rank() == 0) + print_rank_0('> elapsed time for building blendable dataset indices: ' + '{:.2f} (sec)'.format(time.time() - start_time)) + + + def __len__(self): + return self.size + + + def __getitem__(self, idx): + dataset_idx = self.dataset_index[idx] + sample_idx = self.dataset_sample_index[idx] + return self.datasets[dataset_idx][sample_idx] diff --git a/multilinguality_megatron/megatron/data/data_samplers.py b/multilinguality_megatron/megatron/data/data_samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..75930a03a9e77acda5618aadfb4ebf2e166a318f --- /dev/null +++ b/multilinguality_megatron/megatron/data/data_samplers.py @@ -0,0 +1,187 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Dataloaders.""" + + +import random +import torch +import numpy as np +from torch.utils.data import Dataset +from megatron import get_args +from megatron.core import mpu + + +def build_pretraining_data_loader(dataset, consumed_samples, collate_fn=None): + """Buld dataloader given an input dataset.""" + + if dataset is None: + return None + args = get_args() + + # Megatron sampler + if args.dataloader_type == 'single': + batch_sampler = MegatronPretrainingSampler( + total_samples=len(dataset), + consumed_samples=consumed_samples, + micro_batch_size=args.micro_batch_size, + data_parallel_rank=mpu.get_data_parallel_rank(), + data_parallel_size=mpu.get_data_parallel_world_size()) + elif args.dataloader_type == 'cyclic': + batch_sampler = MegatronPretrainingRandomSampler( + dataset, + total_samples=len(dataset), + consumed_samples=consumed_samples, + micro_batch_size=args.micro_batch_size, + data_parallel_rank=mpu.get_data_parallel_rank(), + data_parallel_size=mpu.get_data_parallel_world_size(), + data_sharding=args.data_sharding) + else: + raise Exception('{} dataloader type is not supported.'.format( + args.dataloader_type)) + + # Torch dataloader. + return torch.utils.data.DataLoader(dataset, + batch_sampler=batch_sampler, + num_workers=args.num_workers, + pin_memory=True, + collate_fn=collate_fn) + +class MegatronPretrainingSampler: + + def __init__(self, total_samples, consumed_samples, micro_batch_size, + data_parallel_rank, data_parallel_size, drop_last=True): + # Keep a copy of input params for later use. + self.total_samples = total_samples + self.consumed_samples = consumed_samples + self.micro_batch_size = micro_batch_size + self.data_parallel_rank = data_parallel_rank + self.micro_batch_times_data_parallel_size = \ + self.micro_batch_size * data_parallel_size + self.drop_last = drop_last + + # Sanity checks. + assert self.total_samples > 0, \ + 'no sample to consume: {}'.format(self.total_samples) + assert self.consumed_samples < self.total_samples, \ + 'no samples left to consume: {}, {}'.format(self.consumed_samples, + self.total_samples) + assert self.micro_batch_size > 0 + assert data_parallel_size > 0 + assert self.data_parallel_rank < data_parallel_size, \ + 'data_parallel_rank should be smaller than data size: {}, ' \ + '{}'.format(self.data_parallel_rank, data_parallel_size) + + def __len__(self): + return self.total_samples + + def get_start_end_idx(self): + start_idx = self.data_parallel_rank * self.micro_batch_size + end_idx = start_idx + self.micro_batch_size + return start_idx, end_idx + + def __iter__(self): + batch = [] + # Last batch will be dropped if drop_last is not set False + for idx in range(self.consumed_samples, self.total_samples): + batch.append(idx) + if len(batch) == self.micro_batch_times_data_parallel_size: + start_idx, end_idx = self.get_start_end_idx() + yield batch[start_idx:end_idx] + batch = [] + + # Check the last partial batch and see drop_last is set + if len(batch) > 0 and not self.drop_last: + start_idx, end_idx = self.get_start_end_idx() + yield batch[start_idx:end_idx] + + +class RandomSeedDataset(Dataset): + + def __init__(self, dataset): + args = get_args() + self.base_seed = args.seed + self.curr_seed = args.seed + self.dataset = dataset + + def __len__(self): + return len(self.dataset) + + def set_epoch(self, epoch): + self.curr_seed = self.base_seed + epoch + + def __getitem__(self, idx): + seed = idx + self.curr_seed + torch.manual_seed(seed) + random.seed(seed) + np.random.seed(seed) + return self.dataset[idx] + + +class MegatronPretrainingRandomSampler: + + def __init__(self, dataset, total_samples, consumed_samples, micro_batch_size, + data_parallel_rank, data_parallel_size, data_sharding): + # Keep a copy of input params for later use. + self.dataset = dataset + self.total_samples = total_samples + self.consumed_samples = consumed_samples + self.micro_batch_size = micro_batch_size + self.data_parallel_rank = data_parallel_rank + self.data_parallel_size = data_parallel_size + self.data_sharding = data_sharding + self.micro_batch_times_data_parallel_size = \ + self.micro_batch_size * data_parallel_size + self.last_batch_size = \ + self.total_samples % self.micro_batch_times_data_parallel_size + + # Sanity checks. + assert self.total_samples > 0, \ + 'no sample to consume: {}'.format(self.total_samples) + assert self.micro_batch_size > 0 + assert data_parallel_size > 0 + assert self.data_parallel_rank < data_parallel_size, \ + 'data_parallel_rank should be smaller than data size: {}, ' \ + '{}'.format(self.data_parallel_rank, data_parallel_size) + + def __len__(self): + return self.total_samples + + def __iter__(self): + active_total_samples = self.total_samples - self.last_batch_size + self.epoch = self.consumed_samples // active_total_samples + current_epoch_samples = self.consumed_samples % active_total_samples + assert current_epoch_samples % self.micro_batch_times_data_parallel_size == 0 + + if isinstance(self.dataset, RandomSeedDataset): + self.dataset.set_epoch(self.epoch) + + # data sharding and random sampling + if self.data_sharding: + bucket_size = (self.total_samples // self.micro_batch_times_data_parallel_size) \ + * self.micro_batch_size + bucket_offset = current_epoch_samples // self.data_parallel_size + start_idx = self.data_parallel_rank * bucket_size + + g = torch.Generator() + g.manual_seed(self.epoch) + random_idx = torch.randperm(bucket_size, generator=g).tolist() + idx_range = [start_idx + x for x in random_idx[bucket_offset:]] + else: + full_bucket_size = (self.total_samples // self.micro_batch_size) \ + * self.micro_batch_size + full_bucket_offset = current_epoch_samples + g = torch.Generator() + g.manual_seed(self.epoch) + idx_range_total = \ + torch.randperm(full_bucket_size, generator=g).tolist() + idx_range_active = idx_range_total[full_bucket_offset:] + idx_range = idx_range_active[self.data_parallel_rank::self.data_parallel_size] + + batch = [] + # Last batch if not complete will be dropped. + for idx in idx_range: + batch.append(idx) + if len(batch) == self.micro_batch_size: + self.consumed_samples += self.micro_batch_times_data_parallel_size + yield batch + batch = [] diff --git a/multilinguality_megatron/megatron/data/dataset_utils.py b/multilinguality_megatron/megatron/data/dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..69b91284382c2d78fbc195fd11bb30106ccd5c1c --- /dev/null +++ b/multilinguality_megatron/megatron/data/dataset_utils.py @@ -0,0 +1,729 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors, and NVIDIA. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Most of the code here has been copied from: +# https://github.com/google-research/albert/blob/master/create_pretraining_data.py +# with some modifications. + +import math +import os +import time +import collections + +import numpy as np +import torch + +from megatron import ( + get_args, + print_rank_0 +) +from megatron.core import mpu +from megatron.data.blendable_dataset import BlendableDataset +from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset + +DSET_TYPE_BERT = 'standard_bert' +DSET_TYPE_ICT = 'ict' +DSET_TYPE_T5 = 't5' + +DSET_TYPES = [DSET_TYPE_BERT, DSET_TYPE_ICT, DSET_TYPE_T5] + + +def get_datasets_weights_and_num_samples(data_prefix, + train_valid_test_num_samples): + + # The data prefix should be in the format of: + # weight-1, data-prefix-1, weight-2, data-prefix-2, .. + assert len(data_prefix) % 2 == 0 + num_datasets = len(data_prefix) // 2 + weights = [0]*num_datasets + prefixes = [0]*num_datasets + for i in range(num_datasets): + weights[i] = float(data_prefix[2*i]) + prefixes[i] = (data_prefix[2*i+1]).strip() + # Normalize weights + weight_sum = 0.0 + for weight in weights: + weight_sum += weight + assert weight_sum > 0.0 + weights = [weight / weight_sum for weight in weights] + + # Add 0.5% (the 1.005 factor) so in case the bleding dataset does + # not uniformly distribute the number of samples, we still have + # samples left to feed to the network. + if isinstance(train_valid_test_num_samples, list): + datasets_train_valid_test_num_samples = [] + for weight in weights: + datasets_train_valid_test_num_samples.append( + [int(math.ceil(val * weight * 1.005)) + for val in train_valid_test_num_samples]) + else: + # Used when separate dataset files are provided for train, + # valid and test + datasets_train_valid_test_num_samples = [ + int(math.ceil(train_valid_test_num_samples * weight * 1.005)) + for weight in weights] + + return prefixes, weights, datasets_train_valid_test_num_samples + + +def compile_helper(): + """Compile helper function ar runtime. Make sure this + is invoked on a single process.""" + import os + import subprocess + path = os.path.abspath(os.path.dirname(__file__)) + ret = subprocess.run(['make', '-C', path]) + if ret.returncode != 0: + print("Making C++ dataset helpers module failed, exiting.") + import sys + sys.exit(1) + + +def get_a_and_b_segments(sample, np_rng): + """Divide sample into a and b segments.""" + + # Number of sentences in the sample. + n_sentences = len(sample) + # Make sure we always have two sentences. + assert n_sentences > 1, 'make sure each sample has at least two sentences.' + + # First part: + # `a_end` is how many sentences go into the `A`. + a_end = 1 + if n_sentences >= 3: + # Note that randin in numpy is exclusive. + a_end = np_rng.randint(1, n_sentences) + tokens_a = [] + for j in range(a_end): + tokens_a.extend(sample[j]) + + # Second part: + tokens_b = [] + for j in range(a_end, n_sentences): + tokens_b.extend(sample[j]) + + # Random next: + is_next_random = False + if np_rng.random() < 0.5: + is_next_random = True + tokens_a, tokens_b = tokens_b, tokens_a + + return tokens_a, tokens_b, is_next_random + + +def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng): + """Truncates a pair of sequences to a maximum sequence length.""" + #print(len_a, len_b, max_num_tokens) + assert len_a > 0 + if len_a + len_b <= max_num_tokens: + return False + while len_a + len_b > max_num_tokens: + if len_a > len_b: + len_a -= 1 + tokens = tokens_a + else: + len_b -= 1 + tokens = tokens_b + if np_rng.random() < 0.5: + del tokens[0] + else: + tokens.pop() + return True + + +def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id): + """Merge segments A and B, add [CLS] and [SEP] and build tokentypes.""" + + tokens = [] + tokentypes = [] + # [CLS]. + tokens.append(cls_id) + tokentypes.append(0) + # Segment A. + for token in tokens_a: + tokens.append(token) + tokentypes.append(0) + # [SEP]. + tokens.append(sep_id) + tokentypes.append(0) + # Segment B. + for token in tokens_b: + tokens.append(token) + tokentypes.append(1) + if tokens_b: + # [SEP]. + tokens.append(sep_id) + tokentypes.append(1) + + return tokens, tokentypes + + +MaskedLmInstance = collections.namedtuple("MaskedLmInstance", + ["index", "label"]) + + +def is_start_piece(piece): + """Check if the current word piece is the starting piece (BERT).""" + # When a word has been split into + # WordPieces, the first token does not have any marker and any subsequence + # tokens are prefixed with ##. So whenever we see the ## token, we + # append it to the previous set of word indexes. + return not piece.startswith("##") + + +def create_masked_lm_predictions(tokens, + vocab_id_list, vocab_id_to_token_dict, + masked_lm_prob, + cls_id, sep_id, mask_id, + max_predictions_per_seq, + np_rng, + max_ngrams=3, + do_whole_word_mask=True, + favor_longer_ngram=False, + do_permutation=False, + geometric_dist=False, + masking_style="bert"): + """Creates the predictions for the masked LM objective. + Note: Tokens here are vocab ids and not text tokens.""" + + cand_indexes = [] + # Note(mingdachen): We create a list for recording if the piece is + # the starting piece of current token, where 1 means true, so that + # on-the-fly whole word masking is possible. + token_boundary = [0] * len(tokens) + + for (i, token) in enumerate(tokens): + if token == cls_id or token == sep_id: + token_boundary[i] = 1 + continue + # Whole Word Masking means that if we mask all of the wordpieces + # corresponding to an original word. + # + # Note that Whole Word Masking does *not* change the training code + # at all -- we still predict each WordPiece independently, softmaxed + # over the entire vocabulary. + if (do_whole_word_mask and len(cand_indexes) >= 1 and + not is_start_piece(vocab_id_to_token_dict[token])): + cand_indexes[-1].append(i) + else: + cand_indexes.append([i]) + if is_start_piece(vocab_id_to_token_dict[token]): + token_boundary[i] = 1 + + output_tokens = list(tokens) + + masked_lm_positions = [] + masked_lm_labels = [] + + if masked_lm_prob == 0: + return (output_tokens, masked_lm_positions, + masked_lm_labels, token_boundary) + + num_to_predict = min(max_predictions_per_seq, + max(1, int(round(len(tokens) * masked_lm_prob)))) + + ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64) + if not geometric_dist: + # Note(mingdachen): + # By default, we set the probilities to favor shorter ngram sequences. + pvals = 1. / np.arange(1, max_ngrams + 1) + pvals /= pvals.sum(keepdims=True) + if favor_longer_ngram: + pvals = pvals[::-1] + + ngram_indexes = [] + for idx in range(len(cand_indexes)): + ngram_index = [] + for n in ngrams: + ngram_index.append(cand_indexes[idx:idx + n]) + ngram_indexes.append(ngram_index) + + np_rng.shuffle(ngram_indexes) + + (masked_lms, masked_spans) = ([], []) + covered_indexes = set() + for cand_index_set in ngram_indexes: + if len(masked_lms) >= num_to_predict: + break + if not cand_index_set: + continue + # Note(mingdachen): + # Skip current piece if they are covered in lm masking or previous ngrams. + for index_set in cand_index_set[0]: + for index in index_set: + if index in covered_indexes: + continue + + if not geometric_dist: + n = np_rng.choice(ngrams[:len(cand_index_set)], + p=pvals[:len(cand_index_set)] / + pvals[:len(cand_index_set)].sum(keepdims=True)) + else: + # Sampling "n" from the geometric distribution and clipping it to + # the max_ngrams. Using p=0.2 default from the SpanBERT paper + # https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1) + n = min(np_rng.geometric(0.2), max_ngrams) + + index_set = sum(cand_index_set[n - 1], []) + n -= 1 + # Note(mingdachen): + # Repeatedly looking for a candidate that does not exceed the + # maximum number of predictions by trying shorter ngrams. + while len(masked_lms) + len(index_set) > num_to_predict: + if n == 0: + break + index_set = sum(cand_index_set[n - 1], []) + n -= 1 + # If adding a whole-word mask would exceed the maximum number of + # predictions, then just skip this candidate. + if len(masked_lms) + len(index_set) > num_to_predict: + continue + is_any_index_covered = False + for index in index_set: + if index in covered_indexes: + is_any_index_covered = True + break + if is_any_index_covered: + continue + for index in index_set: + covered_indexes.add(index) + masked_token = None + if masking_style == "bert": + # 80% of the time, replace with [MASK] + if np_rng.random() < 0.8: + masked_token = mask_id + else: + # 10% of the time, keep original + if np_rng.random() < 0.5: + masked_token = tokens[index] + # 10% of the time, replace with random word + else: + masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))] + elif masking_style == "t5": + masked_token = mask_id + else: + raise ValueError("invalid value of masking style") + + output_tokens[index] = masked_token + masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) + + masked_spans.append(MaskedLmInstance( + index=index_set, + label=[tokens[index] for index in index_set])) + + assert len(masked_lms) <= num_to_predict + np_rng.shuffle(ngram_indexes) + + select_indexes = set() + if do_permutation: + for cand_index_set in ngram_indexes: + if len(select_indexes) >= num_to_predict: + break + if not cand_index_set: + continue + # Note(mingdachen): + # Skip current piece if they are covered in lm masking or previous ngrams. + for index_set in cand_index_set[0]: + for index in index_set: + if index in covered_indexes or index in select_indexes: + continue + + n = np.random.choice(ngrams[:len(cand_index_set)], + p=pvals[:len(cand_index_set)] / + pvals[:len(cand_index_set)].sum(keepdims=True)) + index_set = sum(cand_index_set[n - 1], []) + n -= 1 + + while len(select_indexes) + len(index_set) > num_to_predict: + if n == 0: + break + index_set = sum(cand_index_set[n - 1], []) + n -= 1 + # If adding a whole-word mask would exceed the maximum number of + # predictions, then just skip this candidate. + if len(select_indexes) + len(index_set) > num_to_predict: + continue + is_any_index_covered = False + for index in index_set: + if index in covered_indexes or index in select_indexes: + is_any_index_covered = True + break + if is_any_index_covered: + continue + for index in index_set: + select_indexes.add(index) + assert len(select_indexes) <= num_to_predict + + select_indexes = sorted(select_indexes) + permute_indexes = list(select_indexes) + np_rng.shuffle(permute_indexes) + orig_token = list(output_tokens) + + for src_i, tgt_i in zip(select_indexes, permute_indexes): + output_tokens[src_i] = orig_token[tgt_i] + masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i])) + + masked_lms = sorted(masked_lms, key=lambda x: x.index) + # Sort the spans by the index of the first span + masked_spans = sorted(masked_spans, key=lambda x: x.index[0]) + + for p in masked_lms: + masked_lm_positions.append(p.index) + masked_lm_labels.append(p.label) + return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary, masked_spans) + + +def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, + masked_labels, pad_id, max_seq_length): + """Pad sequences and convert them to numpy.""" + + # Some checks. + num_tokens = len(tokens) + padding_length = max_seq_length - num_tokens + assert padding_length >= 0 + assert len(tokentypes) == num_tokens + assert len(masked_positions) == len(masked_labels) + + # Tokens and token types. + filler = [pad_id] * padding_length + tokens_np = np.array(tokens + filler, dtype=np.int64) + tokentypes_np = np.array(tokentypes + filler, dtype=np.int64) + + # Padding mask. + padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, + dtype=np.int64) + # Lables and loss mask. + labels = [-1] * max_seq_length + loss_mask = [0] * max_seq_length + for i in range(len(masked_positions)): + assert masked_positions[i] < num_tokens + labels[masked_positions[i]] = masked_labels[i] + loss_mask[masked_positions[i]] = 1 + labels_np = np.array(labels, dtype=np.int64) + loss_mask_np = np.array(loss_mask, dtype=np.int64) + + return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np + + +def build_train_valid_test_datasets(data_prefix, + data_impl, + splits_string, + train_valid_test_num_samples, + max_seq_length, + masked_lm_prob, + short_seq_prob, + seed, + skip_warmup, + binary_head=False, + max_seq_length_dec=None, + dataset_type='standard_bert'): + + if len(data_prefix) == 1: + return _build_train_valid_test_datasets(data_prefix[0], + data_impl, splits_string, + train_valid_test_num_samples, + max_seq_length, masked_lm_prob, + short_seq_prob, seed, + skip_warmup, + binary_head, + max_seq_length_dec, + dataset_type=dataset_type) + # Blending dataset. + # Parse the values. + output = get_datasets_weights_and_num_samples(data_prefix, + train_valid_test_num_samples) + prefixes, weights, datasets_train_valid_test_num_samples = output + + # Build individual datasets. + train_datasets = [] + valid_datasets = [] + test_datasets = [] + for i in range(len(prefixes)): + train_ds, valid_ds, test_ds = _build_train_valid_test_datasets( + prefixes[i], data_impl, splits_string, + datasets_train_valid_test_num_samples[i], + max_seq_length, masked_lm_prob, short_seq_prob, + seed, skip_warmup, binary_head, dataset_type=dataset_type) + if train_ds: + train_datasets.append(train_ds) + if valid_ds: + valid_datasets.append(valid_ds) + if test_ds: + test_datasets.append(test_ds) + + # Blend. + blending_train_dataset = None + if train_datasets: + blending_train_dataset = BlendableDataset(train_datasets, weights) + blending_valid_dataset = None + if valid_datasets: + blending_valid_dataset = BlendableDataset(valid_datasets, weights) + blending_test_dataset = None + if test_datasets: + blending_test_dataset = BlendableDataset(test_datasets, weights) + + return (blending_train_dataset, blending_valid_dataset, + blending_test_dataset) + + +def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string, + train_valid_test_num_samples, + max_seq_length, + masked_lm_prob, short_seq_prob, seed, + skip_warmup, binary_head, + max_seq_length_dec, + dataset_type='standard_bert'): + + if dataset_type not in DSET_TYPES: + raise ValueError("Invalid dataset_type: ", dataset_type) + + # Indexed dataset. + indexed_dataset = get_indexed_dataset_(data_prefix, + data_impl, + skip_warmup) + + if dataset_type == DSET_TYPE_ICT: + args = get_args() + title_dataset = get_indexed_dataset_(args.titles_data_path, + data_impl, + skip_warmup) + + # Get start and end indices of train/valid/train into doc-idx + # Note that doc-idx is desinged to be num-docs + 1 so we can + # easily iterate over it. + total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1 + splits = get_train_valid_test_split_(splits_string, total_num_of_documents) + + # Print stats about the splits. + print_rank_0(' > dataset split:') + + def print_split_stats(name, index): + print_rank_0(' {}:'.format(name)) + print_rank_0(' document indices in [{}, {}) total of {} ' + 'documents'.format(splits[index], splits[index + 1], + splits[index + 1] - splits[index])) + start_index = indexed_dataset.doc_idx[splits[index]] + end_index = indexed_dataset.doc_idx[splits[index + 1]] + print_rank_0(' sentence indices in [{}, {}) total of {} ' + 'sentences'.format(start_index, end_index, + end_index - start_index)) + print_split_stats('train', 0) + print_split_stats('validation', 1) + print_split_stats('test', 2) + + def build_dataset(index, name): + from megatron.data.bert_dataset import BertDataset + from megatron.data.ict_dataset import ICTDataset + from megatron.data.t5_dataset import T5Dataset + dataset = None + if splits[index + 1] > splits[index]: + # Get the pointer to the original doc-idx so we can set it later. + doc_idx_ptr = indexed_dataset.get_doc_idx() + # Slice the doc-idx + start_index = splits[index] + # Add +1 so we can index into the dataset to get the upper bound. + end_index = splits[index + 1] + 1 + # New doc_idx view. + indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index]) + # Build the dataset accordingly. + kwargs = dict( + name=name, + data_prefix=data_prefix, + num_epochs=None, + max_num_samples=train_valid_test_num_samples[index], + max_seq_length=max_seq_length, + seed=seed, + ) + + if dataset_type == DSET_TYPE_ICT: + args = get_args() + dataset = ICTDataset( + block_dataset=indexed_dataset, + title_dataset=title_dataset, + query_in_block_prob=args.query_in_block_prob, + use_one_sent_docs=args.use_one_sent_docs, + binary_head=binary_head, + **kwargs + ) + elif dataset_type == DSET_TYPE_T5: + dataset = T5Dataset( + indexed_dataset=indexed_dataset, + masked_lm_prob=masked_lm_prob, + max_seq_length_dec=max_seq_length_dec, + short_seq_prob=short_seq_prob, + **kwargs + ) + elif dataset_type == DSET_TYPE_BERT: + dataset = BertDataset( + indexed_dataset=indexed_dataset, + masked_lm_prob=masked_lm_prob, + short_seq_prob=short_seq_prob, + binary_head=binary_head, + **kwargs + ) + else: + raise NotImplementedError("Dataset type not fully implemented.") + + # Set the original pointer so dataset remains the main dataset. + indexed_dataset.set_doc_idx(doc_idx_ptr) + # Checks. + assert indexed_dataset.doc_idx[0] == 0 + assert indexed_dataset.doc_idx.shape[0] == \ + (total_num_of_documents + 1) + return dataset + + train_dataset = build_dataset(0, 'train') + valid_dataset = build_dataset(1, 'valid') + test_dataset = build_dataset(2, 'test') + + return (train_dataset, valid_dataset, test_dataset) + + +def get_indexed_dataset_(data_prefix, data_impl, skip_warmup): + + print_rank_0(' > building dataset index ...') + + start_time = time.time() + indexed_dataset = make_indexed_dataset(data_prefix, + data_impl, + skip_warmup) + assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1] + print_rank_0(' > finished creating indexed dataset in {:4f} ' + 'seconds'.format(time.time() - start_time)) + + print_rank_0(' > indexed dataset stats:') + print_rank_0(' number of documents: {}'.format( + indexed_dataset.doc_idx.shape[0] - 1)) + print_rank_0(' number of sentences: {}'.format( + indexed_dataset.sizes.shape[0])) + + return indexed_dataset + + +def get_train_valid_test_split_(splits_string, size): + """ Get dataset splits from comma or '/' separated string list.""" + + splits = [] + if splits_string.find(',') != -1: + splits = [float(s) for s in splits_string.split(',')] + elif splits_string.find('/') != -1: + splits = [float(s) for s in splits_string.split('/')] + else: + splits = [float(splits_string)] + while len(splits) < 3: + splits.append(0.) + splits = splits[:3] + splits_sum = sum(splits) + assert splits_sum > 0.0 + splits = [split / splits_sum for split in splits] + splits_index = [0] + for index, split in enumerate(splits): + splits_index.append(splits_index[index] + + int(round(split * float(size)))) + diff = splits_index[-1] - size + for index in range(1, len(splits_index)): + splits_index[index] -= diff + assert len(splits_index) == 4 + assert splits_index[-1] == size + return splits_index + +def get_samples_mapping(indexed_dataset, + data_prefix, + num_epochs, + max_num_samples, + max_seq_length, + short_seq_prob, + seed, + name, + binary_head): + """Get a list that maps a sample index to a starting sentence index, end sentence index, and length""" + + if not num_epochs: + if not max_num_samples: + raise ValueError("Need to specify either max_num_samples " + "or num_epochs") + num_epochs = np.iinfo(np.int32).max - 1 + if not max_num_samples: + max_num_samples = np.iinfo(np.int64).max - 1 + + # Filename of the index mapping + indexmap_filename = data_prefix + indexmap_filename += '_{}_indexmap'.format(name) + if num_epochs != (np.iinfo(np.int32).max - 1): + indexmap_filename += '_{}ep'.format(num_epochs) + if max_num_samples != (np.iinfo(np.int64).max - 1): + indexmap_filename += '_{}mns'.format(max_num_samples) + indexmap_filename += '_{}msl'.format(max_seq_length) + indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob) + indexmap_filename += '_{}s'.format(seed) + indexmap_filename += '.npy' + + # Build the indexed mapping if not exist. + if torch.distributed.get_rank() == 0 and \ + not os.path.isfile(indexmap_filename): + print(' > WARNING: could not find index map file {}, building ' + 'the indices on rank 0 ...'.format(indexmap_filename)) + + # Make sure the types match the helpers input types. + assert indexed_dataset.doc_idx.dtype == np.int64 + assert indexed_dataset.sizes.dtype == np.int32 + + # Build samples mapping + verbose = torch.distributed.get_rank() == 0 + start_time = time.time() + print_rank_0(' > building samples index mapping for {} ...'.format( + name)) + # First compile and then import. + from megatron.data import helpers + samples_mapping = helpers.build_mapping( + indexed_dataset.doc_idx, + indexed_dataset.sizes, + num_epochs, + max_num_samples, + max_seq_length, + short_seq_prob, + seed, + verbose, + 2 if binary_head else 1) + print_rank_0(' > done building samples index maping') + np.save(indexmap_filename, samples_mapping, allow_pickle=True) + print_rank_0(' > saved the index mapping in {}'.format( + indexmap_filename)) + # Make sure all the ranks have built the mapping + print_rank_0(' > elasped time to build and save samples mapping ' + '(seconds): {:4f}'.format( + time.time() - start_time)) + # This should be a barrier but nccl barrier assumes + # device_index=rank which is not the case for model + # parallel case + counts = torch.cuda.LongTensor([1]) + torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group()) + torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group()) + assert counts[0].item() == ( + torch.distributed.get_world_size() // + torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group())) + + # Load indexed dataset. + print_rank_0(' > loading indexed mapping from {}'.format( + indexmap_filename)) + start_time = time.time() + samples_mapping = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r') + print_rank_0(' loaded indexed file in {:3.3f} seconds'.format( + time.time() - start_time)) + print_rank_0(' total number of samples: {}'.format( + samples_mapping.shape[0])) + + return samples_mapping diff --git a/multilinguality_megatron/megatron/data/gpt_dataset.py b/multilinguality_megatron/megatron/data/gpt_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..6e67084f811629325d1408b980430cb78ea00235 --- /dev/null +++ b/multilinguality_megatron/megatron/data/gpt_dataset.py @@ -0,0 +1,515 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""GPT style dataset.""" + +import os +import time +from typing import Optional, List + +import numpy as np +import torch + +from megatron import print_rank_0 +from megatron.core import mpu +from megatron.data.blendable_dataset import BlendableDataset +from megatron.data.dataset_utils import get_datasets_weights_and_num_samples +from megatron.data.dataset_utils import get_train_valid_test_split_ +import megatron.data.indexed_dataset + + +def build_train_valid_test_datasets(data_prefix: Optional[str], + data_impl: str, + splits_string: str, + train_valid_test_num_samples: List[int], + seq_length: int, + seed: int, + skip_warmup: bool, + train_data_prefix=None, + valid_data_prefix=None, + test_data_prefix=None): + """Build train, valid, and test datasets.""" + if data_prefix: + print_rank_0("Single data path provided for train, valid & test") + # Single dataset. + if len(data_prefix) == 1: + return _build_train_valid_test_datasets(data_prefix[0], + data_impl, + splits_string, + train_valid_test_num_samples, + seq_length, + seed, + skip_warmup) + # Blending dataset. + # Parse the values. + output = get_datasets_weights_and_num_samples(data_prefix, + train_valid_test_num_samples) + prefixes, weights, datasets_train_valid_test_num_samples = output + + # Build individual datasets. + train_datasets = [] + valid_datasets = [] + test_datasets = [] + for i in range(len(prefixes)): + train_ds, valid_ds, test_ds = _build_train_valid_test_datasets( + prefixes[i], data_impl, splits_string, + datasets_train_valid_test_num_samples[i], + seq_length, seed, skip_warmup) + if train_ds: + train_datasets.append(train_ds) + if valid_ds: + valid_datasets.append(valid_ds) + if test_ds: + test_datasets.append(test_ds) + + # Blend. + blending_train_dataset = None + + if train_datasets: + blending_train_dataset = BlendableDataset(train_datasets, weights) + blending_valid_dataset = None + if valid_datasets: + blending_valid_dataset = BlendableDataset(valid_datasets, weights) + blending_test_dataset = None + if test_datasets: + #weights=weights[:-1] + blending_test_dataset = BlendableDataset(test_datasets, weights) + + return (blending_train_dataset, blending_valid_dataset, + blending_test_dataset) + else: + print_rank_0("Separate data paths provided for train, valid & test. Split string will be ignored.") + train_dataset, valid_dataset, test_dataset = None, None, None + # Single dataset. + if train_data_prefix is not None: + train_dataset = _build_dataset("train", train_data_prefix, data_impl, + train_valid_test_num_samples[0], seq_length, seed, + skip_warmup) + + if valid_data_prefix is not None: + valid_dataset = _build_dataset("valid", valid_data_prefix, data_impl, + train_valid_test_num_samples[1], seq_length, seed, + False) + + if test_data_prefix is not None: + test_dataset = _build_dataset("test", test_data_prefix, data_impl, + train_valid_test_num_samples[2], seq_length, seed, + False) + return train_dataset, valid_dataset, test_dataset + + +def _build_dataset(dataset_name, + data_prefix, + data_impl, + num_samples, + seq_length, + seed, + skip_warmup): + dataset = None + if len(data_prefix) == 1: + dataset = _build_dataset_kernel(dataset_name, + data_prefix[0], data_impl, + num_samples, seq_length, + seed, skip_warmup) + else: + # Blending dataset. + # Parse the values. + output = get_datasets_weights_and_num_samples(data_prefix, num_samples) + prefixes, weights, dataset_num_samples = output + + # Build individual datasets. + datasets = [] + for i in range(len(prefixes)): + ds = _build_dataset_kernel(dataset_name, prefixes[i], + data_impl, dataset_num_samples[i], + seq_length, seed, skip_warmup) + if ds: + datasets.append(ds) + + if datasets: + dataset = BlendableDataset(datasets, weights) + return dataset + + +def _build_dataset_kernel(dataset_name, data_prefix, data_impl, + num_samples, seq_length, seed, skip_warmup): + """ + Build dataset. This method is called when individual + train, valid, test datasets are provided + """ + + # Indexed dataset. + indexed_dataset = get_indexed_dataset_(data_prefix, + data_impl, + skip_warmup) + + total_num_of_documents = indexed_dataset.sizes.shape[0] + + print_rank_0(' {}:'.format(dataset_name)) + print_rank_0(' document indices in [0, {}) total of {} ' + 'documents'.format(total_num_of_documents, total_num_of_documents)) + + documents = np.arange(start=0, stop=total_num_of_documents, + step=1, dtype=np.int32) + + dataset = GPTDataset(dataset_name, data_prefix, + documents, indexed_dataset, + num_samples, seq_length, seed) + + return dataset + + +def _build_train_valid_test_datasets(data_prefix, + data_impl, + splits_string: str, + train_valid_test_num_samples, + seq_length, + seed, + skip_warmup): + """Build train, valid, and test datasets.""" + + # Indexed dataset. + indexed_dataset = get_indexed_dataset_(data_prefix, + data_impl, + skip_warmup) + + total_num_of_documents = indexed_dataset.sizes.shape[0] + splits = get_train_valid_test_split_(splits_string, total_num_of_documents) + + # Print stats about the splits. + print_rank_0(' > dataset split:') + + def print_split_stats(name, index): + print_rank_0(' {}:'.format(name)) + print_rank_0(' document indices in [{}, {}) total of {} ' + 'documents'.format(splits[index], splits[index + 1], + splits[index + 1] - splits[index])) + print_split_stats('train', 0) + print_split_stats('validation', 1) + print_split_stats('test', 2) + + def _f(index, name): + dataset = None + if splits[index + 1] > splits[index]: + documents = np.arange(start=splits[index], stop=splits[index + 1], + step=1, dtype=np.int32) + dataset = GPTDataset(name, data_prefix, + documents, indexed_dataset, + train_valid_test_num_samples[index], + seq_length, seed) + return dataset + + train_dataset = _f(0, 'train') + valid_dataset = _f(1, 'valid') + test_dataset = _f(2, 'test') + + return train_dataset, valid_dataset, test_dataset + + +def get_indexed_dataset_(data_prefix, data_impl, skip_warmup): + print_rank_0(' > building dataset index ...') + + start_time = time.time() + indexed_dataset = megatron.data.indexed_dataset.make_dataset(data_prefix, + data_impl, + skip_warmup) + assert indexed_dataset is not None + print_rank_0(' > finished creating indexed dataset in {:4f} seconds'.format(time.time() - start_time)) + print_rank_0(' number of documents: {}'.format(indexed_dataset.sizes.shape[0])) + n_tokens = _num_tokens(np.arange(start=0, stop=indexed_dataset.sizes.shape[0], step=1, dtype=np.int32), indexed_dataset.sizes) + print_rank_0(' number of tokens: {}'.format(n_tokens)) + return indexed_dataset + + +class GPTDataset(torch.utils.data.Dataset): + + def __init__(self, name, data_prefix, documents, indexed_dataset, + num_samples, seq_length, seed): + + self.name = name + self.indexed_dataset = indexed_dataset + + # Checks + assert np.min(documents) >= 0 + assert np.max(documents) < indexed_dataset.sizes.shape[0] + + # Build index mappings. + self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings( + self.name, data_prefix, documents, self.indexed_dataset.sizes, + num_samples, seq_length, seed) + + def __len__(self): + # -1 is due to data structure used to retieve the index: + # sample i --> [sample_idx[i], sample_idx[i+1]) + return self.sample_idx.shape[0] - 1 + + def __getitem__(self, idx): + # Get the shuffled index. + idx = self.shuffle_idx[idx] + # Start and end documents and offsets. + doc_index_f = self.sample_idx[idx][0] + doc_index_l = self.sample_idx[idx + 1][0] + offset_f = self.sample_idx[idx][1] + offset_l = self.sample_idx[idx + 1][1] + # If we are within the same document, just extract the chunk. + if doc_index_f == doc_index_l: + sample = self.indexed_dataset.get(self.doc_idx[doc_index_f], + offset=offset_f, + length=offset_l - offset_f + 1) + else: + # Otherwise, get the rest of the initial document. + sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], + offset=offset_f)] + # Loop over all in between documents and add the entire document. + for i in range(doc_index_f + 1, doc_index_l): + sample_list.append(self.indexed_dataset.get(self.doc_idx[i])) + # And finally add the relevant portion of last document. + sample_list.append(self.indexed_dataset.get( + self.doc_idx[doc_index_l], + length=offset_l + 1)) + sample = np.concatenate(sample_list) + + return {'text': np.array(sample, dtype=np.int64)} + + +def _build_index_mappings(name, data_prefix, documents, sizes, + num_samples, seq_length, seed): + """Build doc-idx, sample-idx, and shuffle-idx. + doc-idx: is an array (ordered) of documents to be used in training. + sample-idx: is the start document index and document offset for each + training sample. + shuffle-idx: maps the sample index into a random index into sample-idx. + """ + # Number of tokens in each epoch and number of required epochs. + tokens_per_epoch = _num_tokens(documents, sizes) + num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples) + # rng state + np_rng = np.random.RandomState(seed=seed) + + # Filename of the index mappings. + _filename = data_prefix + _filename += '_{}_indexmap'.format(name) + _filename += '_{}ns'.format(num_samples) + _filename += '_{}sl'.format(seq_length) + _filename += '_{}s'.format(seed) + doc_idx_filename = _filename + '_doc_idx.npy' + sample_idx_filename = _filename + '_sample_idx.npy' + shuffle_idx_filename = _filename + '_shuffle_idx.npy' + + # Build the indexed mapping if not exist. + if torch.distributed.get_rank() == 0: + if (not os.path.isfile(doc_idx_filename)) or \ + (not os.path.isfile(sample_idx_filename)) or \ + (not os.path.isfile(shuffle_idx_filename)): + + print_rank_0(' > WARNING: could not find index map files, building ' + 'the indices on rank 0 ...') + + # For the last epoch, decide whether include the entire epoch + # in the global shuffle or not. + + # If we need only one epoch, then separating last epoch does + # not mean anything. + if num_epochs == 1: + separate_last_epoch = False + print(' > only one epoch required, setting ' + 'separate_last_epoch to False', flush=True) + + else: + # Get the number of samples for the last epoch + num_samples_from_epochs_minus_one = ( + (num_epochs - 1) * tokens_per_epoch - 1) // seq_length + last_epoch_num_samples = num_samples - \ + num_samples_from_epochs_minus_one + assert last_epoch_num_samples >= 0, \ + 'last epoch number of samples should be non-negative.' + num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length + assert last_epoch_num_samples < (num_samples_per_epoch + 1), \ + 'last epoch number of samples exceeded max value.' + # If we have less than 80% of the samples for the last epoch, + # seperate out the epoch and treat it differently. + # Note: the 80% number is just based on common sense and can + # be adjusted if needed. + separate_last_epoch = (last_epoch_num_samples < + int(0.80 * num_samples_per_epoch)) + if separate_last_epoch: + string = ' > last epoch number of samples ({}) is smaller '\ + 'than 80% of number of samples per epoch ({}), '\ + 'setting separate_last_epoch to True' + else: + string = ' > last epoch number of samples ({}) is larger '\ + 'than 80% of number of samples per epoch ({}), '\ + 'setting separate_last_epoch to False' + print(string.format(last_epoch_num_samples, + num_samples_per_epoch), flush=True) + + # doc-idx. + start_time = time.time() + doc_idx = _build_doc_idx(documents, num_epochs, np_rng, + separate_last_epoch) + np.save(doc_idx_filename, doc_idx, allow_pickle=True) + print_rank_0(' > elasped time to build and save doc-idx mapping ' + '(seconds): {:4f}'.format(time.time() - start_time)) + # sample-idx. + start_time = time.time() + # Use C++ implementation for speed. + # First compile and then import. + from megatron.data import helpers + assert doc_idx.dtype == np.int32 + assert sizes.dtype == np.int32 + sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length, + num_epochs, tokens_per_epoch) + # sample_idx = _build_sample_idx(sizes, doc_idx, seq_length, + # num_epochs, tokens_per_epoch) + np.save(sample_idx_filename, sample_idx, allow_pickle=True) + print_rank_0(' > elasped time to build and save sample-idx mapping ' + '(seconds): {:4f}'.format(time.time() - start_time)) + # shuffle-idx. + start_time = time.time() + # -1 is due to data structure used to retieve the index: + # sample i --> [sample_idx[i], sample_idx[i+1]) + if separate_last_epoch: + num_samples_ = num_samples_from_epochs_minus_one + else: + num_samples_ = sample_idx.shape[0] - 1 + shuffle_idx = _build_shuffle_idx(num_samples_, + sample_idx.shape[0] - 1, np_rng) + np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True) + print_rank_0(' > elasped time to build and save shuffle-idx mapping' + ' (seconds): {:4f}'.format(time.time() - start_time)) + + # This should be a barrier but nccl barrier assumes + # device_index=rank which is not the case for model + # parallel case + counts = torch.cuda.LongTensor([1]) + torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group()) + torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group()) + assert counts[0].item() == ( + torch.distributed.get_world_size() // + torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group())) + + # Load mappings. + start_time = time.time() + print_rank_0(' > loading doc-idx mapping from {}'.format( + doc_idx_filename)) + doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode='r') + print_rank_0(' > loading sample-idx mapping from {}'.format( + sample_idx_filename)) + sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode='r') + print_rank_0(' > loading shuffle-idx mapping from {}'.format( + shuffle_idx_filename)) + shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode='r') + print_rank_0(' loaded indexed file in {:3.3f} seconds'.format( + time.time() - start_time)) + print_rank_0(' total number of tokens: {}'.format(_num_tokens(documents, sizes))) + print_rank_0(' total number of samples: {}'.format( + sample_idx.shape[0])) + print_rank_0(' total number of epochs: {}'.format(num_epochs)) + + return doc_idx, sample_idx, shuffle_idx + + +def _num_tokens(documents, sizes): + """Total number of tokens in the dataset.""" + return np.sum(sizes[documents]) + + +def _num_epochs(tokens_per_epoch, seq_length, num_samples): + """Based on number of samples and sequence lenght, calculate how many + epochs will be needed.""" + num_epochs = 0 + total_tokens = 0 + while True: + num_epochs += 1 + total_tokens += tokens_per_epoch + # -1 is because we need to retrieve seq_length + 1 token each time + # but the last token will overlap with the first token of the next + # sample except for the last sample. + if ((total_tokens - 1) // seq_length) >= num_samples: + return num_epochs + + +def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch): + """Build an array with length = number-of-epochs * number-of-dcuments. + Each index is mapped to a corresponding document.""" + if not separate_last_epoch or num_epochs == 1: + doc_idx = np.mgrid[0:num_epochs, 0:len(documents)][1] + doc_idx[:] = documents + doc_idx = doc_idx.reshape(-1) + doc_idx = doc_idx.astype(np.int32) + np_rng.shuffle(doc_idx) + return doc_idx + + doc_idx_first = _build_doc_idx(documents, num_epochs-1, np_rng, False) + doc_idx_last = _build_doc_idx(documents, 1, np_rng, False) + return np.concatenate((doc_idx_first, doc_idx_last)) + + +def _build_sample_idx(sizes, doc_idx, seq_length, + num_epochs, tokens_per_epoch): + """Sample index mapping is a 2D array with sizes + [number-of-samples + 1, 2] where [..., 0] contains + the index into `doc_idx` and [..., 1] is the + starting offset in that document.""" + + # Total number of samples. For -1 see comments in `_num_epochs`. + num_samples = (num_epochs * tokens_per_epoch - 1) // seq_length + sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32) + + # Index into sample_idx. + sample_index = 0 + # Index into doc_idx. + doc_idx_index = 0 + # Begining offset for each document. + doc_offset = 0 + # Start with first document and no offset. + sample_idx[sample_index][0] = doc_idx_index + sample_idx[sample_index][1] = doc_offset + sample_index += 1 + while sample_index <= num_samples: + # Start with a fresh sequence. + remaining_seq_length = seq_length + 1 + while remaining_seq_length != 0: + # Get the document length. + doc_id = doc_idx[doc_idx_index] + doc_length = sizes[doc_id] - doc_offset + # And add it to the current sequence. + remaining_seq_length -= doc_length + # If we have more than a full sequence, adjust offset and set + # remaining length to zero so we return from the while loop. + # Note that -1 here is for the same reason we have -1 in + # `_num_epochs` calculations. + if remaining_seq_length <= 0: + doc_offset += (remaining_seq_length + doc_length - 1) + remaining_seq_length = 0 + else: + # Otherwise, start from the begining of the next document. + doc_idx_index += 1 + doc_offset = 0 + # Record the sequence. + sample_idx[sample_index][0] = doc_idx_index + sample_idx[sample_index][1] = doc_offset + sample_index += 1 + + return sample_idx + + +def _build_shuffle_idx(num_samples, total_size, np_rng): + """Build the range [0, size) and shuffle.""" + print(' > building shuffle index with split [0, {}) and [{}, {}) ' + '...'.format(num_samples, num_samples, total_size), flush=True) + + dtype_ = np.uint32 + if total_size >= (np.iinfo(np.uint32).max - 1): + dtype_ = np.int64 + + shuffle_idx_first = np.arange(start=0, stop=num_samples, + step=1, dtype=dtype_) + np_rng.shuffle(shuffle_idx_first) + if num_samples == total_size: + return shuffle_idx_first + + shuffle_idx_last = np.arange(start=num_samples, stop=total_size, + step=1, dtype=dtype_) + np_rng.shuffle(shuffle_idx_last) + + return np.concatenate((shuffle_idx_first, shuffle_idx_last)) diff --git a/multilinguality_megatron/megatron/data/helpers.cpp b/multilinguality_megatron/megatron/data/helpers.cpp new file mode 100644 index 0000000000000000000000000000000000000000..09f5f9762654c01913e1321b6c6bf95a98d6143e --- /dev/null +++ b/multilinguality_megatron/megatron/data/helpers.cpp @@ -0,0 +1,701 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +/* Helper methods for fast index mapping builds */ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace std; + +const int32_t LONG_SENTENCE_LEN = 512; + + +void build_blending_indices(py::array_t& dataset_index, + py::array_t& dataset_sample_index, + const py::array_t& weights, + const int32_t num_datasets, + const int64_t size, const bool verbose) { + /* Given multiple datasets and a weighting array, build samples + such that it follows those wieghts.*/ + + if (verbose) { + std::cout << "> building indices for blendable datasets ..." << std::endl; + } + + // Get the pointer access without the checks. + auto dataset_index_ptr = dataset_index.mutable_unchecked<1>(); + auto dataset_sample_index_ptr = dataset_sample_index.mutable_unchecked<1>(); + auto weights_ptr = weights.unchecked<1>(); + + // Initialize buffer for number of samples used for each dataset. + int64_t current_samples[num_datasets]; + for(int64_t i = 0; i < num_datasets; ++i) { + current_samples[i] = 0; + } + + // For each sample: + for(int64_t sample_idx = 0; sample_idx < size; ++sample_idx) { + + // Determine where the max error in sampling is happening. + auto sample_idx_double = std::max(static_cast(sample_idx), 1.0); + int64_t max_error_index = 0; + double max_error = weights_ptr[0] * sample_idx_double - + static_cast(current_samples[0]); + for (int64_t dataset_idx = 1; dataset_idx < num_datasets; ++dataset_idx) { + double error = weights_ptr[dataset_idx] * sample_idx_double - + static_cast(current_samples[dataset_idx]); + if (error > max_error) { + max_error = error; + max_error_index = dataset_idx; + } + } + + // Populate the indices. + dataset_index_ptr[sample_idx] = static_cast(max_error_index); + dataset_sample_index_ptr[sample_idx] = current_samples[max_error_index]; + + // Update the total samples. + current_samples[max_error_index] += 1; + + } + + // print info + if (verbose) { + std::cout << " > sample ratios:" << std::endl; + for (int64_t dataset_idx = 0; dataset_idx < num_datasets; ++dataset_idx) { + auto ratio = static_cast(current_samples[dataset_idx]) / + static_cast(size); + std::cout << " dataset " << dataset_idx << ", input: " << + weights_ptr[dataset_idx] << ", achieved: " << ratio << std::endl; + } + } + +} + + +py::array build_sample_idx(const py::array_t& sizes_, + const py::array_t& doc_idx_, + const int32_t seq_length, + const int32_t num_epochs, + const int64_t tokens_per_epoch) { + /* Sample index (sample_idx) is used for gpt2 like dataset for which + the documents are flattened and the samples are built based on this + 1-D flatten array. It is a 2D array with sizes [number-of-samples + 1, 2] + where [..., 0] contains the index into `doc_idx` and [..., 1] is the + starting offset in that document.*/ + + // Consistency checks. + assert(seq_length > 1); + assert(num_epochs > 0); + assert(tokens_per_epoch > 1); + + // Remove bound checks. + auto sizes = sizes_.unchecked<1>(); + auto doc_idx = doc_idx_.unchecked<1>(); + + // Mapping and it's length (1D). + int64_t num_samples = (num_epochs * tokens_per_epoch - 1) / seq_length; + int32_t* sample_idx = new int32_t[2*(num_samples+1)]; + + cout << " using:" << endl << std::flush; + cout << " number of documents: " << + doc_idx_.shape(0) / num_epochs << endl << std::flush; + cout << " number of epochs: " << num_epochs << + endl << std::flush; + cout << " sequence length: " << seq_length << + endl << std::flush; + cout << " total number of samples: " << num_samples << + endl << std::flush; + + // Index into sample_idx. + int64_t sample_index = 0; + // Index into doc_idx. + int64_t doc_idx_index = 0; + // Begining offset for each document. + int32_t doc_offset = 0; + // Start with first document and no offset. + sample_idx[2 * sample_index] = doc_idx_index; + sample_idx[2 * sample_index + 1] = doc_offset; + ++sample_index; + + while (sample_index <= num_samples) { + // Start with a fresh sequence. + int32_t remaining_seq_length = seq_length + 1; + while (remaining_seq_length != 0) { + // Get the document length. + auto doc_id = doc_idx[doc_idx_index]; + auto doc_length = sizes[doc_id] - doc_offset; + // And add it to the current sequence. + remaining_seq_length -= doc_length; + // If we have more than a full sequence, adjust offset and set + // remaining length to zero so we return from the while loop. + // Note that -1 here is for the same reason we have -1 in + // `_num_epochs` calculations. + if (remaining_seq_length <= 0) { + doc_offset += (remaining_seq_length + doc_length - 1); + remaining_seq_length = 0; + } else { + // Otherwise, start from the begining of the next document. + ++doc_idx_index; + doc_offset = 0; + } + } + // Record the sequence. + sample_idx[2 * sample_index] = doc_idx_index; + sample_idx[2 * sample_index + 1] = doc_offset; + ++sample_index; + } + + // Method to deallocate memory. + py::capsule free_when_done(sample_idx, [](void *mem_) { + int32_t *mem = reinterpret_cast(mem_); + delete[] mem; + }); + + // Return the numpy array. + const auto byte_size = sizeof(int32_t); + return py::array(std::vector{num_samples+1, 2}, // shape + {2*byte_size, byte_size}, // C-style contiguous strides + sample_idx, // the data pointer + free_when_done); // numpy array references + +} + + +inline int32_t get_target_sample_len(const int32_t short_seq_ratio, + const int32_t max_length, + std::mt19937& rand32_gen) { + /* Training sample length. */ + if (short_seq_ratio == 0) { + return max_length; + } + const auto random_number = rand32_gen(); + if ((random_number % short_seq_ratio) == 0) { + return 2 + random_number % (max_length - 1); + } + return max_length; +} + + +template +py::array build_mapping_impl(const py::array_t& docs_, + const py::array_t& sizes_, + const int32_t num_epochs, + const uint64_t max_num_samples, + const int32_t max_seq_length, + const double short_seq_prob, + const int32_t seed, + const bool verbose, + const int32_t min_num_sent) { + /* Build a mapping of (start-index, end-index, sequence-length) where + start and end index are the indices of the sentences in the sample + and sequence-length is the target sequence length. + */ + + // Consistency checks. + assert(num_epochs > 0); + assert(max_seq_length > 1); + assert(short_seq_prob >= 0.0); + assert(short_seq_prob <= 1.0); + assert(seed > 0); + + // Remove bound checks. + auto docs = docs_.unchecked<1>(); + auto sizes = sizes_.unchecked<1>(); + + // For efficiency, convert probability to ratio. Note: rand() generates int. + int32_t short_seq_ratio = 0; + if (short_seq_prob > 0) { + short_seq_ratio = static_cast(round(1.0 / short_seq_prob)); + } + + if (verbose) { + const auto sent_start_index = docs[0]; + const auto sent_end_index = docs[docs_.shape(0) - 1]; + const auto num_sentences = sent_end_index - sent_start_index; + cout << " using:" << endl << std::flush; + cout << " number of documents: " << docs_.shape(0) - 1 << + endl << std::flush; + cout << " sentences range: [" << sent_start_index << + ", " << sent_end_index << ")" << endl << std::flush; + cout << " total number of sentences: " << num_sentences << + endl << std::flush; + cout << " number of epochs: " << num_epochs << + endl << std::flush; + cout << " maximum number of samples: " << max_num_samples << + endl << std::flush; + cout << " maximum sequence length: " << max_seq_length << + endl << std::flush; + cout << " short sequence probability: " << short_seq_prob << + endl << std::flush; + cout << " short sequence ration (1/prob): " << short_seq_ratio << + endl << std::flush; + cout << " seed: " << seed << endl << + std::flush; + } + + // Mapping and it's length (1D). + int64_t num_samples = -1; + DocIdx* maps = NULL; + + // Perform two iterations, in the first iteration get the size + // and allocate memory and in the second iteration populate the map. + bool second = false; + for (int32_t iteration=0; iteration<2; ++iteration) { + + // Set the seed so both iterations produce the same results. + std::mt19937 rand32_gen(seed); + + // Set the flag on second iteration. + second = (iteration == 1); + + // Counters: + uint64_t empty_docs = 0; + uint64_t one_sent_docs = 0; + uint64_t long_sent_docs = 0; + + // Current map index. + uint64_t map_index = 0; + + // For each epoch: + for (int32_t epoch=0; epoch= max_num_samples) { + if (verbose && (!second)) { + cout << " reached " << max_num_samples << " samples after " + << epoch << " epochs ..." << endl << std::flush; + } + break; + } + // For each document: + for (int32_t doc=0; doc<(docs.shape(0) - 1); ++doc) { + + // Document sentences are in [sent_index_first, sent_index_last) + const auto sent_index_first = docs[doc]; + const auto sent_index_last = docs[doc + 1]; + + // At the begining of the document previous index is the + // start index. + auto prev_start_index = sent_index_first; + + // Remaining documents. + auto num_remain_sent = sent_index_last - sent_index_first; + + // Some bookkeeping + if ((epoch == 0) && (!second)) { + if (num_remain_sent == 0) { + ++empty_docs; + } + if (num_remain_sent == 1) { + ++one_sent_docs; + } + } + + // Detect documents with long sentences. + bool contains_long_sentence = false; + if (num_remain_sent > 1) { + for (auto sent_index=sent_index_first; + sent_index < sent_index_last; ++sent_index) { + if (sizes[sent_index] > LONG_SENTENCE_LEN){ + if ((epoch == 0) && (!second)) { + ++long_sent_docs; + } + contains_long_sentence = true; + break; + } + } + } + + // If we have more than two sentences. + if ((num_remain_sent >= min_num_sent) && (!contains_long_sentence)) { + + // Set values. + auto seq_len = int32_t{0}; + auto num_sent = int32_t{0}; + auto target_seq_len = get_target_sample_len(short_seq_ratio, + max_seq_length, + rand32_gen); + + // Loop through sentences. + for (auto sent_index=sent_index_first; + sent_index < sent_index_last; ++sent_index) { + + // Add the size and number of sentences. + seq_len += sizes[sent_index]; + ++num_sent; + --num_remain_sent; + + // If we have reached the target length. + // and if not only one sentence is left in the document. + // and if we have at least two sentneces. + // and if we have reached end of the document. + if (((seq_len >= target_seq_len) && + (num_remain_sent > 1) && + (num_sent >= min_num_sent) ) || (num_remain_sent == 0)) { + + // Check for overflow. + if ((3 * map_index + 2) > + std::numeric_limits::max()) { + cout << "number of samples exceeded maximum " + << "allowed by type int64: " + << std::numeric_limits::max() + << endl; + throw std::overflow_error("Number of samples"); + } + + // Populate the map. + if (second) { + const auto map_index_0 = 3 * map_index; + maps[map_index_0] = static_cast(prev_start_index); + maps[map_index_0 + 1] = static_cast(sent_index + 1); + maps[map_index_0 + 2] = static_cast(target_seq_len); + } + + // Update indices / counters. + ++map_index; + prev_start_index = sent_index + 1; + target_seq_len = get_target_sample_len(short_seq_ratio, + max_seq_length, + rand32_gen); + seq_len = 0; + num_sent = 0; + } + + } // for (auto sent_index=sent_index_first; ... + } // if (num_remain_sent > 1) { + } // for (int doc=0; doc < num_docs; ++doc) { + } // for (int epoch=0; epoch < num_epochs; ++epoch) { + + if (!second) { + if (verbose) { + cout << " number of empty documents: " << empty_docs << + endl << std::flush; + cout << " number of documents with one sentence: " << + one_sent_docs << endl << std::flush; + cout << " number of documents with long sentences: " << + long_sent_docs << endl << std::flush; + cout << " will create mapping for " << map_index << + " samples" << endl << std::flush; + } + assert(maps == NULL); + assert(num_samples < 0); + maps = new DocIdx[3*map_index]; + num_samples = static_cast(map_index); + } + + } // for (int iteration=0; iteration < 2; ++iteration) { + + // Shuffle. + // We need a 64 bit random number generator as we might have more + // than 2 billion samples. + std::mt19937_64 rand64_gen(seed + 1); + for (auto i=(num_samples - 1); i > 0; --i) { + const auto j = static_cast(rand64_gen() % (i + 1)); + const auto i0 = 3 * i; + const auto j0 = 3 * j; + // Swap values. + swap(maps[i0], maps[j0]); + swap(maps[i0 + 1], maps[j0 + 1]); + swap(maps[i0 + 2], maps[j0 + 2]); + } + + // Method to deallocate memory. + py::capsule free_when_done(maps, [](void *mem_) { + DocIdx *mem = reinterpret_cast(mem_); + delete[] mem; + }); + + // Return the numpy array. + const auto byte_size = sizeof(DocIdx); + return py::array(std::vector{num_samples, 3}, // shape + {3*byte_size, byte_size}, // C-style contiguous strides + maps, // the data pointer + free_when_done); // numpy array references + +} + + +py::array build_mapping(const py::array_t& docs_, + const py::array_t& sizes_, + const int num_epochs, + const uint64_t max_num_samples, + const int max_seq_length, + const double short_seq_prob, + const int seed, + const bool verbose, + const int32_t min_num_sent) { + + if (sizes_.size() > std::numeric_limits::max()) { + if (verbose) { + cout << " using uint64 for data mapping..." << endl << std::flush; + } + return build_mapping_impl(docs_, sizes_, num_epochs, + max_num_samples, max_seq_length, + short_seq_prob, seed, verbose, + min_num_sent); + } else { + if (verbose) { + cout << " using uint32 for data mapping..." << endl << std::flush; + } + return build_mapping_impl(docs_, sizes_, num_epochs, + max_num_samples, max_seq_length, + short_seq_prob, seed, verbose, + min_num_sent); + } +} + +template +py::array build_blocks_mapping_impl(const py::array_t& docs_, + const py::array_t& sizes_, + const py::array_t& titles_sizes_, + const int32_t num_epochs, + const uint64_t max_num_samples, + const int32_t max_seq_length, + const int32_t seed, + const bool verbose, + const bool use_one_sent_blocks) { + /* Build a mapping of (start-index, end-index, sequence-length) where + start and end index are the indices of the sentences in the sample + and sequence-length is the target sequence length. + */ + + // Consistency checks. + assert(num_epochs > 0); + assert(max_seq_length > 1); + assert(seed > 0); + + // Remove bound checks. + auto docs = docs_.unchecked<1>(); + auto sizes = sizes_.unchecked<1>(); + auto titles_sizes = titles_sizes_.unchecked<1>(); + + if (verbose) { + const auto sent_start_index = docs[0]; + const auto sent_end_index = docs[docs_.shape(0) - 1]; + const auto num_sentences = sent_end_index - sent_start_index; + cout << " using:" << endl << std::flush; + cout << " number of documents: " << docs_.shape(0) - 1 << + endl << std::flush; + cout << " sentences range: [" << sent_start_index << + ", " << sent_end_index << ")" << endl << std::flush; + cout << " total number of sentences: " << num_sentences << + endl << std::flush; + cout << " number of epochs: " << num_epochs << + endl << std::flush; + cout << " maximum number of samples: " << max_num_samples << + endl << std::flush; + cout << " maximum sequence length: " << max_seq_length << + endl << std::flush; + cout << " seed: " << seed << endl << + std::flush; + } + + // Mapping and its length (1D). + int64_t num_samples = -1; + DocIdx* maps = NULL; + + // Acceptable number of sentences per block. + int min_num_sent = 2; + if (use_one_sent_blocks) { + min_num_sent = 1; + } + + // Perform two iterations, in the first iteration get the size + // and allocate memory and in the second iteration populate the map. + bool second = false; + for (int32_t iteration=0; iteration<2; ++iteration) { + + // Set the flag on second iteration. + second = (iteration == 1); + + // Current map index. + uint64_t map_index = 0; + + uint64_t empty_docs = 0; + uint64_t one_sent_docs = 0; + uint64_t long_sent_docs = 0; + // For each epoch: + for (int32_t epoch=0; epoch= max_num_samples) { + if (verbose && (!second)) { + cout << " reached " << max_num_samples << " samples after " + << epoch << " epochs ..." << endl << std::flush; + } + break; + } + // For each document: + for (int32_t doc=0; doc<(docs.shape(0) - 1); ++doc) { + + // Document sentences are in [sent_index_first, sent_index_last) + const auto sent_index_first = docs[doc]; + const auto sent_index_last = docs[doc + 1]; + const auto target_seq_len = max_seq_length - titles_sizes[doc]; + + // At the begining of the document previous index is the + // start index. + auto prev_start_index = sent_index_first; + + // Remaining documents. + auto num_remain_sent = sent_index_last - sent_index_first; + + // Some bookkeeping + if ((epoch == 0) && (!second)) { + if (num_remain_sent == 0) { + ++empty_docs; + } + if (num_remain_sent == 1) { + ++one_sent_docs; + } + } + // Detect documents with long sentences. + bool contains_long_sentence = false; + if (num_remain_sent >= min_num_sent) { + for (auto sent_index=sent_index_first; + sent_index < sent_index_last; ++sent_index) { + if (sizes[sent_index] > LONG_SENTENCE_LEN){ + if ((epoch == 0) && (!second)) { + ++long_sent_docs; + } + contains_long_sentence = true; + break; + } + } + } + // If we have enough sentences and no long sentences. + if ((num_remain_sent >= min_num_sent) && (!contains_long_sentence)) { + + // Set values. + auto seq_len = int32_t{0}; + auto num_sent = int32_t{0}; + + // Loop through sentences. + for (auto sent_index=sent_index_first; + sent_index < sent_index_last; ++sent_index) { + + // Add the size and number of sentences. + seq_len += sizes[sent_index]; + ++num_sent; + --num_remain_sent; + + // If we have reached the target length. + // and there are an acceptable number of sentences left + // and if we have at least the minimum number of sentences. + // or if we have reached end of the document. + if (((seq_len >= target_seq_len) && + (num_remain_sent >= min_num_sent) && + (num_sent >= min_num_sent) ) || (num_remain_sent == 0)) { + + // Populate the map. + if (second) { + const auto map_index_0 = 4 * map_index; + // Each sample has 4 items: the starting sentence index, ending sentence index, + // the index of the document from which the block comes (used for fetching titles) + // and the unique id of the block (used for creating block indexes) + + maps[map_index_0] = static_cast(prev_start_index); + maps[map_index_0 + 1] = static_cast(sent_index + 1); + maps[map_index_0 + 2] = static_cast(doc); + maps[map_index_0 + 3] = static_cast(block_id); + } + + // Update indices / counters. + ++map_index; + ++block_id; + prev_start_index = sent_index + 1; + seq_len = 0; + num_sent = 0; + } + } // for (auto sent_index=sent_index_first; ... + } // if (num_remain_sent > 1) { + } // for (int doc=0; doc < num_docs; ++doc) { + } // for (int epoch=0; epoch < num_epochs; ++epoch) { + + if (!second) { + if (verbose) { + cout << " number of empty documents: " << empty_docs << + endl << std::flush; + cout << " number of documents with one sentence: " << + one_sent_docs << endl << std::flush; + cout << " number of documents with long sentences: " << + long_sent_docs << endl << std::flush; + cout << " will create mapping for " << map_index << + " samples" << endl << std::flush; + } + assert(maps == NULL); + assert(num_samples < 0); + maps = new DocIdx[4*map_index]; + num_samples = static_cast(map_index); + } + + } // for (int iteration=0; iteration < 2; ++iteration) { + + // Shuffle. + // We need a 64 bit random number generator as we might have more + // than 2 billion samples. + std::mt19937_64 rand64_gen(seed + 1); + for (auto i=(num_samples - 1); i > 0; --i) { + const auto j = static_cast(rand64_gen() % (i + 1)); + const auto i0 = 4 * i; + const auto j0 = 4 * j; + // Swap values. + swap(maps[i0], maps[j0]); + swap(maps[i0 + 1], maps[j0 + 1]); + swap(maps[i0 + 2], maps[j0 + 2]); + swap(maps[i0 + 3], maps[j0 + 3]); + } + + // Method to deallocate memory. + py::capsule free_when_done(maps, [](void *mem_) { + DocIdx *mem = reinterpret_cast(mem_); + delete[] mem; + }); + + // Return the numpy array. + const auto byte_size = sizeof(DocIdx); + return py::array(std::vector{num_samples, 4}, // shape + {4*byte_size, byte_size}, // C-style contiguous strides + maps, // the data pointer + free_when_done); // numpy array references + +} + +py::array build_blocks_mapping(const py::array_t& docs_, + const py::array_t& sizes_, + const py::array_t& titles_sizes_, + const int num_epochs, + const uint64_t max_num_samples, + const int max_seq_length, + const int seed, + const bool verbose, + const bool use_one_sent_blocks) { + + if (sizes_.size() > std::numeric_limits::max()) { + if (verbose) { + cout << " using uint64 for data mapping..." << endl << std::flush; + } + return build_blocks_mapping_impl(docs_, sizes_, titles_sizes_, + num_epochs, max_num_samples, max_seq_length, seed, verbose, use_one_sent_blocks); + } else { + if (verbose) { + cout << " using uint32 for data mapping..." << endl << std::flush; + } + return build_blocks_mapping_impl(docs_, sizes_, titles_sizes_, + num_epochs, max_num_samples, max_seq_length, seed, verbose, use_one_sent_blocks); + } +} + +PYBIND11_MODULE(helpers, m) { + m.def("build_mapping", &build_mapping); + m.def("build_blocks_mapping", &build_blocks_mapping); + m.def("build_sample_idx", &build_sample_idx); + m.def("build_blending_indices", &build_blending_indices); +} diff --git a/multilinguality_megatron/megatron/data/helpers.cpython-39-x86_64-linux-gnu.so b/multilinguality_megatron/megatron/data/helpers.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dc5c0bd3186e7f9b6d959bea6c55254daf74bc79 Binary files /dev/null and b/multilinguality_megatron/megatron/data/helpers.cpython-39-x86_64-linux-gnu.so differ diff --git a/multilinguality_megatron/megatron/data/ict_dataset.py b/multilinguality_megatron/megatron/data/ict_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..258c32337d372158d81261c52e6e3d6f35049ce8 --- /dev/null +++ b/multilinguality_megatron/megatron/data/ict_dataset.py @@ -0,0 +1,158 @@ +import itertools +import random + +import numpy as np +from torch.utils.data import Dataset + +from megatron import get_tokenizer +from megatron import get_args +from megatron.data.dataset_utils import get_indexed_dataset_ +from megatron.data.realm_dataset_utils import get_block_samples_mapping + + +def make_attention_mask(source_block, target_block): + """ + Returns a 2-dimensional (2-D) attention mask + :param source_block: 1-D array + :param target_block: 1-D array + """ + mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) + mask = mask.astype(np.int64) + # (source_length, target_length) + return mask + + +def get_ict_dataset(use_titles=True, query_in_block_prob=1): + """Get a dataset which uses block samples mappings to get ICT/block indexing data (via get_block()) + rather than for training, since it is only built with a single epoch sample mapping. + """ + args = get_args() + block_dataset = get_indexed_dataset_(args.data_path, 'mmap', True) + titles_dataset = get_indexed_dataset_(args.titles_data_path, 'mmap', True) + + kwargs = dict( + name='full', + block_dataset=block_dataset, + title_dataset=titles_dataset, + data_prefix=args.data_path, + num_epochs=1, + max_num_samples=None, + max_seq_length=args.seq_length, + seed=1, + query_in_block_prob=query_in_block_prob, + use_titles=use_titles, + use_one_sent_docs=args.use_one_sent_docs + ) + dataset = ICTDataset(**kwargs) + return dataset + + +class ICTDataset(Dataset): + """Dataset containing sentences and their blocks for an inverse cloze task.""" + def __init__(self, name, block_dataset, title_dataset, data_prefix, + num_epochs, max_num_samples, max_seq_length, query_in_block_prob, + seed, use_titles=True, use_one_sent_docs=False, binary_head=False): + self.name = name + self.seed = seed + self.max_seq_length = max_seq_length + self.query_in_block_prob = query_in_block_prob + self.block_dataset = block_dataset + self.title_dataset = title_dataset + self.rng = random.Random(self.seed) + self.use_titles = use_titles + self.use_one_sent_docs = use_one_sent_docs + + self.samples_mapping = get_block_samples_mapping( + block_dataset, title_dataset, data_prefix, num_epochs, + max_num_samples, max_seq_length, seed, name, use_one_sent_docs) + self.tokenizer = get_tokenizer() + self.vocab_id_list = list(self.tokenizer.inv_vocab.keys()) + self.vocab_id_to_token_list = self.tokenizer.inv_vocab + self.cls_id = self.tokenizer.cls + self.sep_id = self.tokenizer.sep + self.mask_id = self.tokenizer.mask + self.pad_id = self.tokenizer.pad + + def __len__(self): + return len(self.samples_mapping) + + def __getitem__(self, idx): + """Get an ICT example of a pseudo-query and the block of text from which it was extracted""" + sample_data = self.samples_mapping[idx] + start_idx, end_idx, doc_idx, block_idx = sample_data.as_tuple() + + if self.use_titles: + title = self.title_dataset[int(doc_idx)] + title_pad_offset = 3 + len(title) + else: + title = None + title_pad_offset = 2 + block = [self.block_dataset[i] for i in range(start_idx, end_idx)] + assert len(block) > 1 or self.use_one_sent_docs or self.query_in_block_prob == 1 + + # randint() is inclusive for Python rng + rand_sent_idx = self.rng.randint(0, len(block) - 1) + + # keep the query in the context query_in_block_prob fraction of the time. + if self.rng.random() < self.query_in_block_prob: + query = block[rand_sent_idx].copy() + else: + query = block.pop(rand_sent_idx) + + # still need to truncate because blocks are concluded when + # the sentence lengths have exceeded max_seq_length. + query = query[:self.max_seq_length - 2] + block = list(itertools.chain(*block))[:self.max_seq_length - title_pad_offset] + + query_tokens, query_pad_mask = self.concat_and_pad_tokens(query) + context_tokens, context_pad_mask = self.concat_and_pad_tokens(block, title) + + query_mask = make_attention_mask(query_tokens, query_tokens) + context_mask = make_attention_mask(context_tokens, context_tokens) + + block_data = sample_data.as_array() + + sample = { + 'query_tokens': query_tokens, + 'query_mask': query_mask, + 'query_pad_mask': query_pad_mask, + 'context_tokens': context_tokens, + 'context_mask': context_mask, + 'context_pad_mask': context_pad_mask, + 'block_data': block_data, + } + + return sample + + def get_block(self, start_idx, end_idx, doc_idx): + """Get the IDs for an evidence block plus the title of the corresponding document""" + block = [self.block_dataset[i] for i in range(start_idx, end_idx)] + title = self.title_dataset[int(doc_idx)] + + block = list(itertools.chain(*block))[:self.max_seq_length - (3 + len(title))] + block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title) + + return block_tokens, block_pad_mask + + def get_null_block(self): + """Get empty block and title - used in REALM pretraining""" + block, title = [], [] + block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title) + + return block_tokens, block_pad_mask + + def concat_and_pad_tokens(self, tokens, title=None): + """Concat with special tokens and pad sequence to self.max_seq_length""" + tokens = list(tokens) + if title is None: + tokens = [self.cls_id] + tokens + [self.sep_id] + else: + title = list(title) + tokens = [self.cls_id] + title + [self.sep_id] + tokens + [self.sep_id] + assert len(tokens) <= self.max_seq_length + + num_pad = self.max_seq_length - len(tokens) + pad_mask = [1] * len(tokens) + [0] * num_pad + tokens += [self.pad_id] * num_pad + + return np.array(tokens), np.array(pad_mask) diff --git a/multilinguality_megatron/megatron/data/image_folder.py b/multilinguality_megatron/megatron/data/image_folder.py new file mode 100644 index 0000000000000000000000000000000000000000..de15b29bf0665562a00bfcab8b106ff2d4ca26f2 --- /dev/null +++ b/multilinguality_megatron/megatron/data/image_folder.py @@ -0,0 +1,302 @@ +# BSD 3-Clause License +# +# Copyright (c) Soumith Chintala 2016, +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# code taken from +# https://github.com/pytorch/vision/blob/main/torchvision/datasets/folder.py +# added support for classes_fraction and data_per_class_fraction + +from torchvision.datasets import VisionDataset +from PIL import Image + +import os +import os.path +from typing import Any, Callable, cast, Dict, List, Optional, Tuple +import numpy as np + +def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool: + """Checks if a file is an allowed extension. + Args: + filename (string): path to a file + extensions (tuple of strings): extensions to consider (lowercase) + Returns: + bool: True if the filename ends with one of given extensions + """ + return filename.lower().endswith(extensions) + + +def is_image_file(filename: str) -> bool: + """Checks if a file is an allowed image extension. + Args: + filename (string): path to a file + Returns: + bool: True if the filename ends with a known image extension + """ + return has_file_allowed_extension(filename, IMG_EXTENSIONS) + + +def make_dataset( + directory: str, + class_to_idx: Dict[str, int], + data_per_class_fraction: float, + extensions: Optional[Tuple[str, ...]] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, +) -> List[Tuple[str, int]]: + """Generates a list of samples of a form (path_to_sample, class). + Args: + directory (str): root dataset directory + class_to_idx (Dict[str, int]): dictionary mapping class name to class index + extensions (optional): A list of allowed extensions. + Either extensions or is_valid_file should be passed. Defaults to None. + is_valid_file (optional): A function that takes path of a file + and checks if the file is a valid file + (used to check of corrupt files) both extensions and + is_valid_file should not be passed. Defaults to None. + Raises: + ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None. + Returns: + List[Tuple[str, int]]: samples of a form (path_to_sample, class) + """ + instances = [] + directory = os.path.expanduser(directory) + both_none = extensions is None and is_valid_file is None + both_something = extensions is not None and is_valid_file is not None + if both_none or both_something: + raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") + if extensions is not None: + def is_valid_file(x: str) -> bool: + return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions)) + is_valid_file = cast(Callable[[str], bool], is_valid_file) + for target_class in sorted(class_to_idx.keys()): + class_index = class_to_idx[target_class] + target_dir = os.path.join(directory, target_class) + if not os.path.isdir(target_dir): + continue + local_instances = [] + for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)): + for fname in sorted(fnames): + path = os.path.join(root, fname) + if is_valid_file(path): + item = path, class_index + local_instances.append(item) + + instances.extend(local_instances[0:int(len(local_instances) * data_per_class_fraction)]) + + return instances + + +class DatasetFolder(VisionDataset): + """A generic data loader where the samples are arranged in this way: :: + root/class_x/xxx.ext + root/class_x/xxy.ext + root/class_x/[...]/xxz.ext + root/class_y/123.ext + root/class_y/nsdf3.ext + root/class_y/[...]/asd932_.ext + Args: + root (string): Root directory path. + loader (callable): A function to load a sample given its path. + extensions (tuple[string]): A list of allowed extensions. + both extensions and is_valid_file should not be passed. + transform (callable, optional): A function/transform that takes in + a sample and returns a transformed version. + E.g, ``transforms.RandomCrop`` for images. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + is_valid_file (callable, optional): A function that takes path of a file + and check if the file is a valid file (used to check of corrupt files) + both extensions and is_valid_file should not be passed. + Attributes: + classes (list): List of the class names sorted alphabetically. + class_to_idx (dict): Dict with items (class_name, class_index). + samples (list): List of (sample path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__( + self, + root: str, + loader: Callable[[str], Any], + extensions: Optional[Tuple[str, ...]] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + classes_fraction=1.0, + data_per_class_fraction=1.0, + is_valid_file: Optional[Callable[[str], bool]] = None, + ) -> None: + super(DatasetFolder, self).__init__(root, transform=transform, + target_transform=target_transform) + self.classes_fraction = classes_fraction + self.data_per_class_fraction = data_per_class_fraction + classes, class_to_idx = self._find_classes(self.root) + samples = self.make_dataset(self.root, + class_to_idx, + self.data_per_class_fraction, + extensions, + is_valid_file) + if len(samples) == 0: + msg = "Found 0 files in subfolders of: {}\n".format(self.root) + if extensions is not None: + msg += "Supported extensions are: {}".format(",".join(extensions)) + raise RuntimeError(msg) + + self.loader = loader + self.extensions = extensions + self.total = len(samples) + self.classes = classes + self.class_to_idx = class_to_idx + self.samples = samples + self.targets = [s[1] for s in samples] + + @staticmethod + def make_dataset( + directory: str, + class_to_idx: Dict[str, int], + data_per_class_fraction: float, + extensions: Optional[Tuple[str, ...]] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, + ) -> List[Tuple[str, int]]: + return make_dataset(directory, + class_to_idx, + data_per_class_fraction, + extensions=extensions, + is_valid_file=is_valid_file) + + def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]: + """ + Finds the class folders in a dataset. + Args: + dir (string): Root directory path. + Returns: + tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary. + Ensures: + No class is a subdirectory of another. + """ + all_classes = [d.name for d in os.scandir(dir) if d.is_dir()] + classes = all_classes[0:int(len(all_classes) * self.classes_fraction)] + classes.sort() + class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)} + return classes, class_to_idx + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + Returns: + tuple: (sample, target) where target is class_index of the target class. + """ + curr_index = index + for x in range(self.total): + try: + path, target = self.samples[curr_index] + sample = self.loader(path) + break + except Exception as e: + curr_index = np.random.randint(0, self.total) + + if self.transform is not None: + sample = self.transform(sample) + if self.target_transform is not None: + target = self.target_transform(target) + + return sample, target + + def __len__(self) -> int: + return len(self.samples) + + +IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') + + +def pil_loader(path: str) -> Image.Image: + # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) + with open(path, 'rb') as f: + img = Image.open(f) + return img.convert('RGB') + + +# TODO: specify the return type +def accimage_loader(path: str) -> Any: + import accimage + try: + return accimage.Image(path) + except IOError: + # Potentially a decoding problem, fall back to PIL.Image + return pil_loader(path) + + +def default_loader(path: str) -> Any: + from torchvision import get_image_backend + if get_image_backend() == 'accimage': + return accimage_loader(path) + else: + return pil_loader(path) + + +class ImageFolder(DatasetFolder): + """A generic data loader where the images are arranged in this way: :: + root/dog/xxx.png + root/dog/xxy.png + root/dog/[...]/xxz.png + root/cat/123.png + root/cat/nsdf3.png + root/cat/[...]/asd932_.png + Args: + root (string): Root directory path. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + is_valid_file (callable, optional): A function that takes path of an Image file + and check if the file is a valid file (used to check of corrupt files) + Attributes: + classes (list): List of the class names sorted alphabetically. + class_to_idx (dict): Dict with items (class_name, class_index). + imgs (list): List of (image path, class_index) tuples + """ + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + classes_fraction=1.0, + data_per_class_fraction=1.0, + loader: Callable[[str], Any] = default_loader, + is_valid_file: Optional[Callable[[str], bool]] = None, + ): + super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None, + transform=transform, + target_transform=target_transform, + classes_fraction=classes_fraction, + data_per_class_fraction=data_per_class_fraction, + is_valid_file=is_valid_file) + self.imgs = self.samples + diff --git a/multilinguality_megatron/megatron/data/indexed_dataset.py b/multilinguality_megatron/megatron/data/indexed_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d4e2581bf6c72e9fb4eef8949dd947a78895ec56 --- /dev/null +++ b/multilinguality_megatron/megatron/data/indexed_dataset.py @@ -0,0 +1,585 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +# copied from fairseq/fairseq/data/indexed_dataset.py +# Removed IndexedRawTextDataset since it relied on Fairseq dictionary +# other slight modifications to remove fairseq dependencies +# Added document index to index file and made it accessible. +# An empty sentence no longer separates documents. + +from functools import lru_cache +import os +import shutil +import struct +from itertools import accumulate + +import numpy as np +import torch +from megatron import print_rank_0 + + +def __best_fitting_dtype(vocab_size=None): + if vocab_size is not None and vocab_size < 65500: + return np.uint16 + else: + return np.int32 + + +def get_available_dataset_impl(): + return ['lazy', 'cached', 'mmap'] + + +def infer_dataset_impl(path): + if IndexedDataset.exists(path): + with open(index_file_path(path), 'rb') as f: + magic = f.read(8) + if magic == IndexedDataset._HDR_MAGIC: + return 'cached' + elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: + return 'mmap' + else: + return None + else: + print(f"Dataset does not exist: {path}") + print("Path should be a basename that both .idx and .bin can be appended to get full filenames.") + return None + + +def make_builder(out_file, impl, vocab_size=None): + if impl == 'mmap': + return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size)) + else: + return IndexedDatasetBuilder(out_file) + + +def make_dataset(path, impl: str, skip_warmup=False): + if not IndexedDataset.exists(path): + print(f"Dataset does not exist: {path}") + print("Path should be a basename that both .idx and .bin can be appended to get full filenames.") + return None + + if impl == 'infer': + impl = infer_dataset_impl(path) + if impl == 'lazy' and IndexedDataset.exists(path): + return IndexedDataset(path) + elif impl == 'cached' and IndexedDataset.exists(path): + return IndexedCachedDataset(path) + elif impl == 'mmap' and MMapIndexedDataset.exists(path): + return MMapIndexedDataset(path, skip_warmup) + print(f"Unknown dataset implementation: {impl}") + return None + + +def dataset_exists(path, impl): + if impl == 'mmap': + return MMapIndexedDataset.exists(path) + else: + return IndexedDataset.exists(path) + + +def read_longs(f, n): + a = np.empty(n, dtype=np.int64) + f.readinto(a) + return a + + +def write_longs(f, a): + f.write(np.array(a, dtype=np.int64)) + + +dtypes = { + 1: np.uint8, + 2: np.int8, + 3: np.int16, + 4: np.int32, + 5: np.int64, + 6: float, + 7: np.double, + 8: np.uint16 +} + + +def code(dtype): + for k in dtypes.keys(): + if dtypes[k] == dtype: + return k + raise ValueError(dtype) + + +def index_file_path(prefix_path): + return prefix_path + '.idx' + + +def data_file_path(prefix_path): + return prefix_path + '.bin' + + +def create_doc_idx(sizes): + doc_idx = [0] + for i, s in enumerate(sizes): + if s == 0: + doc_idx.append(i + 1) + return doc_idx + + +class IndexedDataset(torch.utils.data.Dataset): + """Loader for IndexedDataset""" + _HDR_MAGIC = b'TNTIDX\x00\x00' + + def __init__(self, path): + super().__init__() + self.path = path + self.data_file = None + self.read_index(path) + + def read_index(self, path): + with open(index_file_path(path), 'rb') as f: + magic = f.read(8) + assert magic == self._HDR_MAGIC, ( + 'Index file doesn\'t match expected format. ' + 'Make sure that --dataset_impl is configured properly.' + ) + version = f.read(8) + assert struct.unpack('= self._len: + raise IndexError('index out of range') + + def __del__(self): + if self.data_file: + self.data_file.close() + + # @lru_cache(maxsize=8) + def __getitem__(self, idx): + if not self.data_file: + self.read_data(self.path) + if isinstance(idx, int): + i = idx + self.check_index(i) + tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] + a = np.empty(tensor_size, dtype=self.dtype) + self.data_file.seek(self.data_offsets[i] * self.element_size) + self.data_file.readinto(a) + return a + elif isinstance(idx, slice): + start, stop, step = idx.indices(len(self)) + if step != 1: + raise ValueError("Slices into indexed_dataset must be contiguous") + sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]] + size = sum(sizes) + a = np.empty(size, dtype=self.dtype) + self.data_file.seek(self.data_offsets[start] * self.element_size) + self.data_file.readinto(a) + offsets = list(accumulate(sizes)) + sents = np.split(a, offsets[:-1]) + return sents + + def __len__(self): + return self._len + + def num_tokens(self, index): + return self.sizes[index] + + def size(self, index): + return self.sizes[index] + + @staticmethod + def exists(path): + return ( + os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)) + ) + + @property + def supports_prefetch(self): + return False # avoid prefetching to save memory + + +class IndexedCachedDataset(IndexedDataset): + + def __init__(self, path): + super().__init__(path) + self.cache = None + self.cache_index = {} + + @property + def supports_prefetch(self): + return True + + def prefetch(self, indices): + if all(i in self.cache_index for i in indices): + return + if not self.data_file: + self.read_data(self.path) + indices = sorted(set(indices)) + total_size = 0 + for i in indices: + total_size += self.data_offsets[i + 1] - self.data_offsets[i] + self.cache = np.empty(total_size, dtype=self.dtype) + ptx = 0 + self.cache_index.clear() + for i in indices: + self.cache_index[i] = ptx + size = self.data_offsets[i + 1] - self.data_offsets[i] + a = self.cache[ptx: ptx + size] + self.data_file.seek(self.data_offsets[i] * self.element_size) + self.data_file.readinto(a) + ptx += size + if self.data_file: + # close and delete data file after prefetch so we can pickle + self.data_file.close() + self.data_file = None + + # @lru_cache(maxsize=8) + def __getitem__(self, idx): + if isinstance(idx, int): + i = idx + self.check_index(i) + tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] + a = np.empty(tensor_size, dtype=self.dtype) + ptx = self.cache_index[i] + np.copyto(a, self.cache[ptx: ptx + a.size]) + return a + elif isinstance(idx, slice): + # Hack just to make this work, can optimizer later if necessary + sents = [] + for i in range(*idx.indices(len(self))): + sents.append(self[i]) + return sents + + +class IndexedDatasetBuilder(object): + element_sizes = { + np.uint8: 1, + np.int8: 1, + np.int16: 2, + np.int32: 4, + np.int64: 8, + float: 4, + np.double: 8 + } + + def __init__(self, out_file, dtype=np.int32): + self.out_file = open(out_file, 'wb') + self.dtype = dtype + self.data_offsets = [0] + self.dim_offsets = [0] + self.sizes = [] + self.element_size = self.element_sizes[self.dtype] + self.doc_idx = [0] + + def add_item(self, tensor): + bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype)) + self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) + for s in tensor.size(): + self.sizes.append(s) + self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) + + def end_document(self): + self.doc_idx.append(len(self.sizes)) + + def merge_file_(self, another_file): + index = IndexedDataset(another_file) + assert index.dtype == self.dtype + + doc_offset = len(self.sizes) + + begin = self.data_offsets[-1] + for data_offset in index.data_offsets[1:]: + self.data_offsets.append(begin + data_offset) + self.sizes.extend(index.sizes) + + begin = self.dim_offsets[-1] + for dim_offset in index.dim_offsets[1:]: + self.dim_offsets.append(begin + dim_offset) + + self.doc_idx.extend((doc_offset + index.doc_idx)[1:]) + + with open(data_file_path(another_file), 'rb') as f: + while True: + data = f.read(1024) + if data: + self.out_file.write(data) + else: + break + + def finalize(self, index_file): + self.out_file.close() + index = open(index_file, 'wb') + index.write(b'TNTIDX\x00\x00') + index.write(struct.pack('= 0 + assert np.max(sample_indices) < len(self.indexed_text) + assert len(self.indexed_text) == len(self.indexed_role) + + self.name = name + self.sample_indices = sample_indices + self.seq_length = seq_length + + def __len__(self) -> int: + return self.sample_indices.shape[0] + + def __getitem__(self, idx) -> dict: + # Get the shuffled index. + idx = self.sample_indices[idx] + text = self.indexed_text.get(idx) + role = self.indexed_role.get(idx) + assert text is not None and role is not None and text.shape == role.shape + return {"text": text.astype(np.int64), "role": role.astype(np.int64)} + + +def _build_dataset_kernel( + dataset_name: str, + data_prefix, + data_impl: str, + num_samples: int, + seq_length: int, + seed: int, + skip_warmup: bool, +) -> InstructionDataset: + """ + Build dataset. This method is called when individual + train, valid, test datasets are provided + """ + + # Indexed dataset. + indexed_datasets = get_indexed_datasets_(data_prefix, data_impl, skip_warmup) + + total_num_of_documents = len(indexed_datasets["text"]) + + print_rank_0(" {}:".format(dataset_name)) + print_rank_0( + " document indices in [0, {}) total of {} " + "documents".format(total_num_of_documents, total_num_of_documents) + ) + + documents = np.arange(start=0, stop=total_num_of_documents, step=1, dtype=np.int32) + np_rng = np.random.RandomState(seed=seed) + dataset = _sample_dataset( + np_rng, documents, indexed_datasets, dataset_name, num_samples, seq_length + ) + + return dataset + + +def _build_dataset( + dataset_name: str, + data_prefix, + data_impl: str, + num_samples: int, + seq_length: int, + seed: int, + skip_warmup: bool, +): + dataset = None + if len(data_prefix) == 1: + dataset = _build_dataset_kernel( + dataset_name, + data_prefix[0], + data_impl, + num_samples, + seq_length, + seed, + skip_warmup, + ) + else: + # Blending dataset. + # Parse the values. + output = get_datasets_weights_and_num_samples(data_prefix, num_samples) + prefixes, weights, dataset_num_samples = output + + # Build individual datasets. + datasets = [] + for i in range(len(prefixes)): + ds = _build_dataset_kernel( + dataset_name, + prefixes[i], + data_impl, + dataset_num_samples[i], + seq_length, + seed, + skip_warmup, + ) + if ds: + datasets.append(ds) + + if datasets: + dataset = BlendableDataset(datasets, weights) + return dataset + + + +def get_indexed_datasets_(data_prefix: str, data_impl: str, + skip_warmup: bool) -> dict[str, Dataset]: + print_rank_0(" > building dataset index ...") + start_time = time.time() + indexed_text = make_dataset(f"{data_prefix}-text", data_impl, skip_warmup) + indexed_role = make_dataset(f"{data_prefix}-role", data_impl, skip_warmup) + assert indexed_text is not None + print_rank_0(" > finished creating indexed dataset in " + f"{time.time() - start_time:4f} seconds") + num_docs = len(indexed_text) + print_rank_0(" number of documents: {}".format(num_docs)) + indices = np.arange(start=0, stop=num_docs, step=1, dtype=np.int32) + n_tokens = np.sum(indexed_text.sizes[indices]) + print_rank_0(" number of tokens: {}".format(n_tokens)) + return {"text": indexed_text, "role": indexed_role} + + +def _sample_dataset(np_rng: np.random.RandomState, document_indices: np.ndarray, + indexed_datasets: dict[str, Dataset], name: str, + num_samples: int, seq_length: int) -> Optional[InstructionDataset]: + """Compute randomized index of samples for all epochs (num_samples)""" + assert num_samples > 0 + + remaining = num_samples + index_list = [] + while remaining > 0: + count = min(remaining, len(document_indices)) + index_list.append(np_rng.permutation(document_indices)[:count]) + remaining -= count + sample_indices = np.concatenate(index_list) + + dataset = InstructionDataset(name, sample_indices, indexed_datasets, + seq_length) + return dataset + + +def _build_train_valid_test_datasets(data_prefix, data_impl: str, splits_string: str, + train_valid_test_num_samples: list[int], + seq_length: int, seed: int, skip_warmup: bool): + """Build train, valid, and test datasets.""" + + # Indexed dataset. + indexed_datasets = get_indexed_datasets_(data_prefix, data_impl, skip_warmup) + total_num_of_documents = len(indexed_datasets["text"]) + splits = get_train_valid_test_split_(splits_string, total_num_of_documents) + + # Print stats about the splits. + print_rank_0(" > dataset split:") + for index, name in enumerate(["train", "validation", "test"]): + print_rank_0(f" {name}") + print_rank_0(f" document indices in [{splits[index]}, {splits[index + 1]})" + f" total of {splits[index + 1] - splits[index]}") + + # generate random permutation of documents + np_rng = np.random.RandomState(seed=seed) + document_indices = np_rng.permutation(total_num_of_documents) + + datasets = {} + for index, name in enumerate(["train", "validation", "test"]): + begin, end = splits[index], splits[index + 1] + if end <= begin: + datasets[name] = None + else: + split_subset = document_indices[begin:end] + num_samples = train_valid_test_num_samples[index] + datasets[name] = _sample_dataset(np_rng, split_subset, indexed_datasets, + name, num_samples, seq_length) + + return datasets["train"], datasets["validation"], datasets["test"] + + +# TODO: somewhat similar to gpt_dataset._build_train_valid_test_datasets, could we merge them? +def build_train_valid_test_datasets(data_prefix: Optional[str], + data_impl: str, + splits_string: str, + train_valid_test_num_samples: list[int], + seq_length: int, + seed: int, + skip_warmup: bool, + train_data_prefix=None, + valid_data_prefix=None, + test_data_prefix=None, + ): + + """Build train, valid, and test datasets.""" + if data_prefix: + print_rank_0("Single data path provided for train, valid & test") + # Single dataset. + if len(data_prefix) == 1: + return _build_train_valid_test_datasets( + data_prefix[0], + data_impl, + splits_string, + train_valid_test_num_samples, + seq_length, + seed, + skip_warmup, + ) + # Blending dataset. + # Parse the values. + ( + prefixes, + weights, + datasets_train_valid_test_num_samples, + ) = get_datasets_weights_and_num_samples( + data_prefix, train_valid_test_num_samples + ) + + # Build individual datasets. + train_datasets = [] + valid_datasets = [] + test_datasets = [] + for i in range(len(prefixes)): + train_ds, valid_ds, test_ds = _build_train_valid_test_datasets( + prefixes[i], + data_impl, + splits_string, + datasets_train_valid_test_num_samples[i], + seq_length, + seed, + skip_warmup, + ) + if train_ds: + train_datasets.append(train_ds) + if valid_ds: + valid_datasets.append(valid_ds) + if test_ds: + test_datasets.append(test_ds) + + # Blend. + blending_train_dataset = None + if train_datasets: + blending_train_dataset = BlendableDataset(train_datasets, weights) + blending_valid_dataset = None + if valid_datasets: + blending_valid_dataset = BlendableDataset(valid_datasets, weights) + blending_test_dataset = None + if test_datasets: + blending_test_dataset = BlendableDataset(test_datasets, weights) + + return (blending_train_dataset, blending_valid_dataset, blending_test_dataset) + else: + print_rank_0( + "Separate data paths provided for train, valid & test. Split string will be ignored." + ) + train_dataset, valid_dataset, test_dataset = None, None, None + # Single dataset. + if train_data_prefix is not None: + train_dataset = _build_dataset( + "train", + train_data_prefix, + data_impl, + train_valid_test_num_samples[0], + seq_length, + seed, + skip_warmup, + ) + + if valid_data_prefix is not None: + valid_dataset = _build_dataset( + "valid", + valid_data_prefix, + data_impl, + train_valid_test_num_samples[1], + seq_length, + seed, + False, + ) + + if test_data_prefix is not None: + test_dataset = _build_dataset( + "test", + test_data_prefix, + data_impl, + train_valid_test_num_samples[2], + seq_length, + seed, + False, + ) + return train_dataset, valid_dataset, test_dataset + + +def round_to_multiple_of(x: int, y: int) -> int: + return ((x + y - 1) // y) * y + + +def instruction_collator(data): + args = get_args() + tokenizer = get_tokenizer() + pad_id = tokenizer.pad + seq_len = args.seq_length + + if args.variable_seq_lengths: + max_sample_length = max(len(x["text"]) for x in data) + seq_len = min(args.seq_length, round_to_multiple_of(max_sample_length, 16)) + seq_len += 1 # +1 to get seq_len tokens after shifting (token[t+1] is label for token[t]) + + # pad data to seq_len, create attention mask + batch_size = len(data) + attention_mask = torch.ones((batch_size, seq_len), dtype=torch.long) + role = torch.full_like(attention_mask, -1) + input = torch.full_like(attention_mask, pad_id) + + + for i, x in enumerate(data): + t = x["text"] + r = x["role"] + l = len(t) + + if l < seq_len: + attention_mask[i, l:] = 0 + input[i, :l] = torch.from_numpy(t) + role[i, :l] = torch.from_numpy(r) + else: + input[i] = torch.from_numpy(t[:seq_len]) + role[i] = torch.from_numpy(r[:seq_len]) + + assistant_mask = (role == Role.assistant.value).long() + pad_mask = (input == pad_id).long() + return {"text": input, "attention_mask": attention_mask, + "assistant_mask": assistant_mask, "pad_mask": pad_mask} diff --git a/multilinguality_megatron/megatron/data/orqa_wiki_dataset.py b/multilinguality_megatron/megatron/data/orqa_wiki_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4019cd764c204b34de0df28bfcc7969c3c19d937 --- /dev/null +++ b/multilinguality_megatron/megatron/data/orqa_wiki_dataset.py @@ -0,0 +1,193 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Wikipedia dataset from DPR code for ORQA.""" + +from abc import ABC +import csv +import numpy as np +import random +import torch +from torch.utils.data import Dataset + +from megatron import print_rank_0, get_args, get_tokenizer +from megatron.core import tensor_parallel +from megatron.data.biencoder_dataset_utils import make_attention_mask + +def get_open_retrieval_wiki_dataset(): + args = get_args() + tokenizer = get_tokenizer() + + dataset = OpenRetrievalEvidenceDataset('2018 Wikipedia from DPR codebase', + 'evidence', + args.evidence_data_path, + tokenizer, + args.retriever_seq_length) + return dataset + + +def get_open_retrieval_batch(data_iterator): + # Items and their type. + keys = ['row_id', 'context', 'context_mask', 'context_types', + 'context_pad_mask'] + datatype = torch.int64 + + # Broadcast data. + data = None if data_iterator is None else next(data_iterator) + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + row_id = data_b['row_id'].long() + context = data_b['context'].long() + + # TODO: make the context mask a binary one + context_mask = (data_b['context_mask'] < 0.5) + + context_types = data_b['context_types'].long() + context_pad_mask = data_b['context_pad_mask'].long() + + return row_id, context, context_mask, context_types, context_pad_mask + + +def build_tokens_types_paddings_from_text(row, tokenizer, max_seq_length): + """Build token types and paddings, trim if needed, and pad if needed.""" + + title_ids = tokenizer.tokenize(row['title']) + context_ids = tokenizer.tokenize(row['text']) + + # Appending the title of the context at front + extended_context_ids = title_ids + [tokenizer.sep_id] + context_ids + + context_ids, context_types, context_pad_mask = \ + build_tokens_types_paddings_from_ids(extended_context_ids, + max_seq_length, tokenizer.cls, tokenizer.sep, tokenizer.pad) + + return context_ids, context_types, context_pad_mask + + +# noinspection DuplicatedCode +def build_tokens_types_paddings_from_ids(text_ids, max_seq_length, + cls_id, sep_id, pad_id): + """Build token types and paddings, trim if needed, and pad if needed.""" + enc_ids = [] + tokentypes_enc = [] + + # [CLS]. + enc_ids.append(cls_id) + tokentypes_enc.append(0) + + # A. + len_src = len(text_ids) + enc_ids.extend(text_ids) + tokentypes_enc.extend([0] * len_src) + + # Cap the size. + if len(enc_ids) > max_seq_length - 1: + enc_ids = enc_ids[0: max_seq_length - 1] + tokentypes_enc = tokentypes_enc[0: max_seq_length - 1] + + # [SEP]. + enc_ids.append(sep_id) + tokentypes_enc.append(0) + + num_tokens_enc = len(enc_ids) + # Padding. + padding_length = max_seq_length - len(enc_ids) + if padding_length > 0: + enc_ids.extend([pad_id] * padding_length) + tokentypes_enc.extend([pad_id] * padding_length) + + pad_mask = ([1] * num_tokens_enc) + ([0] * padding_length) + pad_mask = np.array(pad_mask, dtype=np.int64) + + return enc_ids, tokentypes_enc, pad_mask + + +def build_sample(row_id, context_ids, context_types, context_pad_mask): + """Convert to numpy and return a sample consumed by the batch producer.""" + + context_ids = np.array(context_ids, dtype=np.int64) + context_types = np.array(context_types, dtype=np.int64) + context_mask = make_attention_mask(context_ids, context_ids) + + sample = ({ + 'row_id': row_id, + 'context': context_ids, + 'context_mask': context_mask, + 'context_types': context_types, + 'context_pad_mask': context_pad_mask + }) + return sample + + +class OpenRetrievalEvidenceDataset(ABC, Dataset): + """Open Retrieval Evidence dataset class.""" + + def __init__(self, task_name, dataset_name, datapath, tokenizer, + max_seq_length): + # Store inputs. + self.task_name = task_name + self.dataset_name = dataset_name + self.tokenizer = tokenizer + self.max_seq_length = max_seq_length + print_rank_0(' > building {} dataset for {}:'.format(self.task_name, + self.dataset_name)) + # Process the files. + print_rank_0(datapath) + self.samples, self.id2text = self.process_samples_from_single_path( + datapath) + + args = get_args() + if args.sample_rate < 1: # subsample + k = int(len(self.samples) * args.sample_rate) + self.samples = random.sample(self.samples, k) + + print_rank_0(' >> total number of samples: {}'.format( + len(self.samples))) + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + row = self.samples[idx] + + context_ids, context_types, context_pad_mask = \ + build_tokens_types_paddings_from_text(row, self.tokenizer, + self.max_seq_length) + + sample = build_sample(row['doc_id'], + context_ids, + context_types, + context_pad_mask) + return sample + + @staticmethod + def process_samples_from_single_path(filename): + print_rank_0(' > Processing {} ...'.format(filename)) + total = 0 + + rows = [] + id2text = {} + + with open(filename) as tsvfile: + reader = csv.reader(tsvfile, delimiter='\t') + next(reader, None) # skip the headers + for row in reader: + # file format: doc_id, doc_text, title + doc_id = int(row[0]) + text = row[1] + title = row[2] + + rows.append({'doc_id': doc_id, + 'text': text, + 'title': title}) + + assert doc_id not in id2text + id2text[doc_id] = (text, title) + + total += 1 + if total % 100000 == 0: + print_rank_0(' > processed {} rows so far ...'.format( + total)) + + print_rank_0(' >> processed {} samples.'.format(len(rows))) + return rows, id2text diff --git a/multilinguality_megatron/megatron/data/realm_dataset_utils.py b/multilinguality_megatron/megatron/data/realm_dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..21445573e35ab0f9448d8c1312eb006e568f0fd5 --- /dev/null +++ b/multilinguality_megatron/megatron/data/realm_dataset_utils.py @@ -0,0 +1,199 @@ +import os +import time + +import numpy as np +import torch + +from megatron import print_rank_0 +from megatron.core import mpu, tensor_parallel +from megatron.data.dataset_utils import create_masked_lm_predictions, pad_and_convert_to_numpy +from megatron import get_args, get_tokenizer, print_rank_0 + + +def get_one_epoch_dataloader(dataset, micro_batch_size=None): + """Specifically one epoch to be used in an indexing job.""" + args = get_args() + + world_size = mpu.get_data_parallel_world_size() + rank = mpu.get_data_parallel_rank() + if micro_batch_size is None: + micro_batch_size = args.micro_batch_size + global_batch_size = micro_batch_size * world_size + num_workers = args.num_workers + + sampler = torch.utils.data.SequentialSampler(dataset) + # importantly, drop_last must be False to get all the data. + assert False, 'DistributedBatchSampler deprecated, change the implementation' + from megatron.data.samplers import DistributedBatchSampler + batch_sampler = DistributedBatchSampler(sampler, + batch_size=global_batch_size, + drop_last=False, + rank=rank, + world_size=world_size) + + return torch.utils.data.DataLoader(dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + pin_memory=True) + + +def get_ict_batch(data_iterator): + # Items and their type. + keys = ['query_tokens', 'query_pad_mask', + 'block_tokens', 'block_pad_mask', 'block_data'] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is None: + data = None + else: + data = next(data_iterator) + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + query_tokens = data_b['query_tokens'].long() + query_pad_mask = data_b['query_pad_mask'].long() + block_tokens = data_b['block_tokens'].long() + block_pad_mask = data_b['block_pad_mask'].long() + block_indices = data_b['block_data'].long() + + return query_tokens, query_pad_mask,\ + block_tokens, block_pad_mask, block_indices + + +def join_str_list(str_list): + """Join a list of strings, handling spaces appropriately""" + result = "" + for s in str_list: + if s.startswith("##"): + result += s[2:] + else: + result += " " + s + return result + + +class BlockSampleData(object): + """A struct for fully describing a fixed-size block of data as used in REALM + + :param start_idx: for first sentence of the block + :param end_idx: for last sentence of the block (may be partially truncated in sample construction) + :param doc_idx: the index of the document from which the block comes in the original indexed dataset + :param block_idx: a unique integer identifier given to every block. + """ + def __init__(self, start_idx, end_idx, doc_idx, block_idx): + self.start_idx = start_idx + self.end_idx = end_idx + self.doc_idx = doc_idx + self.block_idx = block_idx + + def as_array(self): + return np.array([self.start_idx, self.end_idx, self.doc_idx, self.block_idx]).astype(np.int64) + + def as_tuple(self): + return self.start_idx, self.end_idx, self.doc_idx, self.block_idx + + +class BlockSamplesMapping(object): + def __init__(self, mapping_array): + # make sure that the array is compatible with BlockSampleData + assert mapping_array.shape[1] == 4 + self.mapping_array = mapping_array + + def __len__(self): + return self.mapping_array.shape[0] + + def __getitem__(self, idx): + """Get the data associated with an indexed sample.""" + sample_data = BlockSampleData(*self.mapping_array[idx]) + return sample_data + + +def get_block_samples_mapping(block_dataset, title_dataset, data_prefix, num_epochs, + max_num_samples, max_seq_length, seed, name, use_one_sent_docs=False): + """Get samples mapping for a dataset over fixed size blocks. This function also requires + a dataset of the titles for the source documents since their lengths must be taken into account. + + :return: samples_mapping (BlockSamplesMapping) + """ + + if not num_epochs: + if not max_num_samples: + raise ValueError("Need to specify either max_num_samples " + "or num_epochs") + num_epochs = np.iinfo(np.int32).max - 1 + if not max_num_samples: + max_num_samples = np.iinfo(np.int64).max - 1 + + # Filename of the index mapping + indexmap_filename = data_prefix + indexmap_filename += '_{}_indexmap'.format(name) + if num_epochs != (np.iinfo(np.int32).max - 1): + indexmap_filename += '_{}ep'.format(num_epochs) + if max_num_samples != (np.iinfo(np.int64).max - 1): + indexmap_filename += '_{}mns'.format(max_num_samples) + indexmap_filename += '_{}msl'.format(max_seq_length) + indexmap_filename += '_{}s'.format(seed) + if use_one_sent_docs: + indexmap_filename += '_1sentok' + indexmap_filename += '.npy' + + # Build the indexed mapping if not exist. + if mpu.get_data_parallel_rank() == 0 and \ + not os.path.isfile(indexmap_filename): + print(' > WARNING: could not find index map file {}, building ' + 'the indices on rank 0 ...'.format(indexmap_filename)) + + # Make sure the types match the helpers input types. + assert block_dataset.doc_idx.dtype == np.int64 + assert block_dataset.sizes.dtype == np.int32 + + # Build samples mapping + verbose = torch.distributed.get_rank() == 0 + start_time = time.time() + print_rank_0(' > building samples index mapping for {} ...'.format( + name)) + + from megatron.data import helpers + mapping_array = helpers.build_blocks_mapping( + block_dataset.doc_idx, + block_dataset.sizes, + title_dataset.sizes, + num_epochs, + max_num_samples, + max_seq_length - 3, # account for added tokens + seed, + verbose, + use_one_sent_docs) + + + print_rank_0(' > done building samples index mapping') + np.save(indexmap_filename, mapping_array, allow_pickle=True) + print_rank_0(' > saved the index mapping in {}'.format( + indexmap_filename)) + # Make sure all the ranks have built the mapping + print_rank_0(' > elapsed time to build and save samples mapping ' + '(seconds): {:4f}'.format( + time.time() - start_time)) + + # This should be a barrier but nccl barrier assumes + # device_index=rank which is not the case for model + # parallel case + counts = torch.cuda.LongTensor([1]) + torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group()) + assert counts[0].item() == torch.distributed.get_world_size( + group=mpu.get_data_parallel_group()) + + # Load indexed dataset. + print_rank_0(' > loading indexed mapping from {}'.format( + indexmap_filename)) + start_time = time.time() + + mapping_array = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r') + samples_mapping = BlockSamplesMapping(mapping_array) + + print_rank_0(' loaded indexed file in {:3.3f} seconds'.format( + time.time() - start_time)) + print_rank_0(' total number of samples: {}'.format( + mapping_array.shape[0])) + + return samples_mapping diff --git a/multilinguality_megatron/megatron/data/realm_index.py b/multilinguality_megatron/megatron/data/realm_index.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa4a309edcd5a761b3a87b973b799c30ac73458 --- /dev/null +++ b/multilinguality_megatron/megatron/data/realm_index.py @@ -0,0 +1,224 @@ +import itertools +import os +import pickle +import shutil + +import numpy as np +import torch + +from megatron import get_args +from megatron.core import mpu + + +def detach(tensor): + return tensor.detach().cpu().numpy() + + +class OpenRetreivalDataStore(object): + """ + Serializable data structure for holding data for blocks -- + embeddings and necessary metadata for Retriever + """ + def __init__(self, embedding_path=None, load_from_path=True, rank=None): + self.embed_data = dict() + if embedding_path is None: + args = get_args() + embedding_path = args.embedding_path + rank = args.rank + self.embedding_path = embedding_path + self.rank = rank + + if load_from_path: + self.load_from_file() + + block_data_name = os.path.splitext(self.embedding_path)[0] + self.temp_dir_name = block_data_name + '_tmp' + + def state(self): + return { + 'embed_data': self.embed_data, + } + + def clear(self): + """ + Clear the embedding data structures to save memory. + The metadata ends up getting used, and is also much smaller in + dimensionality so it isn't really worth clearing. + """ + self.embed_data = dict() + + def load_from_file(self): + """Populate members from instance saved to file""" + + if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0: + print("\n> Unpickling BlockData", flush=True) + state_dict = pickle.load(open(self.embedding_path, 'rb')) + if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0: + print(">> Finished unpickling BlockData\n", flush=True) + + self.embed_data = state_dict['embed_data'] + + def add_block_data(self, row_id, block_embeds, allow_overwrite=False): + """ + Add data for set of blocks + :param row_id: 1D array of unique int ids for the blocks + :param block_embeds: 2D array of embeddings of the blocks + In the case of retriever this will be [start_idx, end_idx, doc_idx] + """ + for idx, embed in zip(row_id, block_embeds): + if not allow_overwrite and idx in self.embed_data: + raise ValueError("Unexpectedly tried to overwrite block data") + + self.embed_data[idx] = np.float16(embed) + + def save_shard(self): + """ + Save the block data that was created this in this process + """ + if not os.path.isdir(self.temp_dir_name): + os.makedirs(self.temp_dir_name, exist_ok=True) + + # save the data for each shard + with open('{}/{}.pkl'.format(self.temp_dir_name, self.rank), 'wb') \ + as writer: + pickle.dump(self.state(), writer) + + def merge_shards_and_save(self): + #Combine all the shards made using save_shard + shard_names = os.listdir(self.temp_dir_name) + seen_own_shard = False + + for fname in os.listdir(self.temp_dir_name): + shard_rank = int(os.path.splitext(fname)[0]) + if shard_rank == self.rank: + seen_own_shard = True + continue + + with open('{}/{}'.format(self.temp_dir_name, fname), 'rb') as f: + data = pickle.load(f) + old_size = len(self.embed_data) + shard_size = len(data['embed_data']) + + # add the shard's data and check to make sure there + # is no overlap + self.embed_data.update(data['embed_data']) + assert len(self.embed_data) == old_size + shard_size + + assert seen_own_shard + + # save the consolidated shards and remove temporary directory + with open(self.embedding_path, 'wb') as final_file: + pickle.dump(self.state(), final_file) + shutil.rmtree(self.temp_dir_name, ignore_errors=True) + + print("Finished merging {} shards for a total of {} embeds".format( + len(shard_names), len(self.embed_data)), flush=True) + + +class FaissMIPSIndex(object): + """ + Wrapper object for a BlockData which similarity search via FAISS under the hood + """ + def __init__(self, embed_size, embed_data=None, use_gpu=False): + self.embed_size = embed_size + self.embed_data = embed_data + self.use_gpu = use_gpu + + self.mips_index = None + self._set_mips_index() + + def _set_mips_index(self): + """ + Create a Faiss Flat index with inner product as the metric + to search against + """ + try: + import faiss + except ImportError: + raise Exception("Error: Please install faiss to use FaissMIPSIndex") + + if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0: + print("\n> Building index", flush=True) + + cpu_index = faiss.IndexFlatIP(self.embed_size) + + if self.use_gpu: + # create resources and config for GpuIndex + config = faiss.GpuMultipleClonerOptions() + config.shard = True + config.useFloat16 = True + gpu_index = faiss.index_cpu_to_all_gpus(cpu_index, co=config) + self.mips_index = faiss.IndexIDMap(gpu_index) + if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0: + print(">> Initialized index on GPU", flush=True) + else: + # CPU index supports IDs so wrap with IDMap + self.mips_index = faiss.IndexIDMap(cpu_index) + if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0: + print(">> Initialized index on CPU", flush=True) + + # if we were constructed with a BlockData, then automatically load it + # when the FAISS structure is built + if self.embed_data is not None: + self.add_embed_data(self.embed_data) + + def reset_index(self): + """Delete existing index and create a new""" + del self.mips_index + + # reset the block data so that _set_block_index will reload it as well + if self.embed_data is not None: + embed_data_path = self.embed_data.embedding_path + del self.embed_data + self.embed_data = OpenRetreivalDataStore(embed_data_path) + + self._set_mips_index() + + def update_index(self): + """Delete existing index and create a new""" + del self.mips_index + + # reset the block data so that _set_mips_index will reload it as well + if self.embed_data is not None: + self.embed_data.load_from_file() + self._set_mips_index() + + def add_embed_data(self, all_embed_data): + """Add the embedding of each block to the underlying FAISS index""" + + # this assumes the embed_data is a dict : {int: np.array} + block_indices, block_embeds = zip(*all_embed_data.embed_data.items()) + + # the embeddings have to be entered in as float32 even though the math + # internally is done with float16. + embeds_arr = np.float32(np.array(block_embeds)) + indices_arr = np.array(block_indices) + + # we no longer need the embedding data since it's in the index now + all_embed_data.clear() + + self.mips_index.add_with_ids(embeds_arr, indices_arr) + + if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0: + print(">>> Finished adding block data to index", flush=True) + + def search_mips_index(self, query_embeds, top_k, reconstruct=True): + """ + Get the top-k blocks by the index distance metric. + + :param reconstruct: if True: return a [num_queries x k x embed_dim] + array of blocks + if False: return [num_queries x k] array of + distances, and another for indices + """ + query_embeds = np.float32(detach(query_embeds)) + + if reconstruct: + # get the vectors themselves + top_k_block_embeds = self.mips_index.search_and_reconstruct(\ + query_embeds, top_k) + return top_k_block_embeds + else: + # get distances and indices of closest vectors + distances, block_indices = self.mips_index.search(query_embeds, top_k) + return distances, block_indices diff --git a/multilinguality_megatron/megatron/data/t5_dataset.py b/multilinguality_megatron/megatron/data/t5_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d2a623e3e33bce2312d8c14c47ae88dd5aae7c65 --- /dev/null +++ b/multilinguality_megatron/megatron/data/t5_dataset.py @@ -0,0 +1,257 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""T5 Style dataset.""" + +import collections + +import numpy as np +import torch + +from megatron import get_tokenizer +from megatron.data.dataset_utils import ( + create_masked_lm_predictions, + get_samples_mapping +) + +class T5Dataset(torch.utils.data.Dataset): + + def __init__(self, name, indexed_dataset, data_prefix, + num_epochs, max_num_samples, masked_lm_prob, + max_seq_length, max_seq_length_dec, + short_seq_prob, seed): + + # Params to store. + self.name = name + self.seed = seed + self.masked_lm_prob = masked_lm_prob + self.max_seq_length = max_seq_length + self.max_seq_length_dec = max_seq_length_dec + + # Dataset. + self.indexed_dataset = indexed_dataset + + # Build the samples mapping. + self.samples_mapping = get_samples_mapping(self.indexed_dataset, + data_prefix, + num_epochs, + max_num_samples, + self.max_seq_length - 2, # account for added tokens + short_seq_prob, + self.seed, + self.name, + False) + + # Vocab stuff. + tokenizer = get_tokenizer() + self.vocab_id_list = list(tokenizer.inv_vocab.keys()) + self.vocab_id_to_token_dict = tokenizer.inv_vocab + self.cls_id = tokenizer.cls + self.sep_id = tokenizer.sep + self.mask_id = tokenizer.mask + self.pad_id = tokenizer.pad + self.bos_id = tokenizer.bos_token_id + self.eos_id = tokenizer.eos_token_id + self.sentinel_tokens = tokenizer.additional_special_tokens_ids + assert len(self.sentinel_tokens) > 0, "Provide the argument --vocab_extra_ids 100 to the script" + + def __len__(self): + return self.samples_mapping.shape[0] + + def __getitem__(self, idx): + + start_index, end_index, seq_length = self.samples_mapping[idx] + sample = [] + for index in range(start_index, end_index): + sample.append(self.indexed_dataset[index]) + # Note that this rng state should be numpy and not python since + # python randint is inclusive whereas the numpy one is exclusive. + np_rng = np.random.RandomState(seed=(self.seed + idx)) + return build_training_sample(sample, seq_length, + self.max_seq_length, # needed for padding + self.max_seq_length_dec, + self.vocab_id_list, + self.vocab_id_to_token_dict, + self.cls_id, self.sep_id, + self.mask_id, self.pad_id, + self.masked_lm_prob, np_rng, + self.bos_id, self.eos_id, + self.sentinel_tokens) + + +def build_training_sample(sample, target_seq_length, + max_seq_length, max_seq_length_dec, + vocab_id_list, vocab_id_to_token_dict, + cls_id, sep_id, mask_id, pad_id, + masked_lm_prob, np_rng, bos_id=None, + eos_id=None, sentinel_tokens=None): + """Build training sample. + + Arguments: + sample: A list of sentences in which each sentence is a list token ids. + target_seq_length: Desired sequence length. + max_seq_length: Maximum length of the sequence. All values are padded to + this length. + vocab_id_list: List of vocabulary ids. Used to pick a random id. + vocab_id_to_token_dict: A dictionary from vocab ids to text tokens. + cls_id: Start of example id. + sep_id: Separator id. + mask_id: Mask token id. + pad_id: Padding token id. + masked_lm_prob: Probability to mask tokens. + np_rng: Random number genenrator. Note that this rng state should be + numpy and not python since python randint is inclusive for + the opper bound whereas the numpy one is exclusive. + bos_id: start of decoder example id + eos_id: end of generation id + sentinel_tokens: unique value to be substituted for every replaced span + """ + + assert target_seq_length <= max_seq_length + + # flatten sentences into one list + tokens = [token for sentence in sample for token in sentence] + + # Truncate to `target_sequence_length`. + max_num_tokens = target_seq_length + truncated = len(tokens) > max_num_tokens + tokens = tokens[:max_num_tokens] + + # Masking. + max_predictions_per_seq = masked_lm_prob * max_num_tokens + (tokens, masked_positions, masked_labels, _, masked_spans) = create_masked_lm_predictions( + tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, + cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng, + max_ngrams=10, geometric_dist=True, masking_style="t5") + + # Padding. + tokens_enc, tokens_dec_in, labels, enc_mask, \ + dec_mask, enc_dec_mask, loss_mask \ + = pad_and_convert_to_numpy(tokens, masked_positions, + masked_labels, pad_id, max_seq_length, + max_seq_length_dec, masked_spans, + bos_id, eos_id, sentinel_tokens) + + train_sample = { + 'text_enc': tokens_enc, + 'text_dec': tokens_dec_in, + 'labels': labels, + 'loss_mask': loss_mask, + 'truncated': int(truncated), + 'enc_mask': enc_mask, + 'dec_mask': dec_mask, + 'enc_dec_mask': enc_dec_mask, + } + return train_sample + + +def pad_and_convert_to_numpy(tokens, masked_positions, + masked_labels, pad_id, + max_seq_length, max_seq_length_dec, + masked_spans=None, bos_id=None, + eos_id=None, sentinel_tokens=None): + """Pad sequences and convert them to numpy.""" + + sentinel_tokens = collections.deque(sentinel_tokens) + t5_input = [] + (t5_decoder_in, t5_decoder_out) = ([bos_id], []) + (start_index, end_index) = (0, None) + for span in masked_spans: + flag = sentinel_tokens.popleft() + + # Append the same tokens in decoder input and output + t5_decoder_in.append(flag) + t5_decoder_in.extend(span.label) + t5_decoder_out.append(flag) + t5_decoder_out.extend(span.label) + + end_index = span.index[0] + t5_input.extend(tokens[start_index: end_index]) + t5_input.append(flag) + + # the next start index is the token after the last span token + start_index = span.index[-1] + 1 + + # Add token to the t5_decoder_out + t5_decoder_out.append(eos_id) + + # Add the remaining tokens to the t5 input + t5_input.extend(tokens[start_index:]) + + # assert (len(t5_input) - len(masked_spans)) + \ + # (len(t5_decoder_in) - (len(masked_spans) + 1)) == len(tokens) + + # Some checks. + + # Encoder-side padding mask. + num_tokens = len(t5_input) + padding_length = max_seq_length - num_tokens + assert padding_length >= 0 + assert len(masked_positions) == len(masked_labels) + + # Tokens.. + filler = [pad_id] * padding_length + tokens_enc = np.array(t5_input + filler, dtype=np.int64) + + # Decoder-side padding mask. + num_tokens_dec = len(t5_decoder_in) + padding_length_dec = max_seq_length_dec - num_tokens_dec + assert padding_length_dec >= 0 + filler_dec = [pad_id] * padding_length_dec + tokens_dec_in = np.array(t5_decoder_in + filler_dec, dtype=np.int64) + + # Create attention masks + enc_mask = make_attention_mask(tokens_enc, tokens_enc) + enc_dec_mask = make_attention_mask(tokens_dec_in, tokens_enc) + dec_mask = make_attention_mask(tokens_dec_in, tokens_dec_in) + dec_mask = dec_mask * make_history_mask(tokens_dec_in) + + # Labels mask. + labels = t5_decoder_out + ([-1] * padding_length_dec) + labels = np.array(labels, dtype=np.int64) + + # Loss mask + loss_mask = ([1] * num_tokens_dec) + ([0] * padding_length_dec) + loss_mask = np.array(loss_mask, dtype=np.int64) + + return tokens_enc, tokens_dec_in, labels, enc_mask, \ + dec_mask, enc_dec_mask, loss_mask + + +def make_attention_mask(source_block, target_block): + """ + Returns a 2-dimensional (2-D) attention mask + :param source_block: 1-D array + :param target_block: 1-D array + """ + mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) + mask = mask.astype(np.int64) + # (source_length, target_length) + return mask + + +def make_attention_mask_3d(source_block, target_block): + """ + Returns a 3-dimensional (3-D) attention mask + :param source_block: 1-D array + :param target_block: 1-D array + """ + mask = (target_block[:, None, :] >= 1) * (source_block[:, :, None] >= 1) + # (batch, source_length, target_length) + # mask = mask.astype(np.int64) + return mask + + +def make_history_mask(block): + length = block.shape[0] + arange = np.arange(length) + history_mask = (arange[None, ] <= arange[:, None]) + history_mask = history_mask.astype(np.int64) + return history_mask + + +def make_history_mask_3d(block): + batch, length = block.shape + arange = torch.arange(length, device=block.device) + history_mask = (arange[None, ] <= arange[:, None])[None, ] + history_mask = history_mask.expand(batch, length, length) + return history_mask diff --git a/multilinguality_megatron/megatron/data/test/test_indexed_dataset.py b/multilinguality_megatron/megatron/data/test/test_indexed_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..7b479b852058c572700330b2fd6408bf0489df2a --- /dev/null +++ b/multilinguality_megatron/megatron/data/test/test_indexed_dataset.py @@ -0,0 +1,124 @@ +# This file isn't really a formal automated test, it's just a place to +# put some code used during development and manual testing of +# indexed_dataset. + +from megatron.data import indexed_dataset +from megatron.tokenizer import build_tokenizer +import argparse +import os +import sys + +import torch + +script_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(script_dir, "../../../")) + + +def test_indexed_dataset(args): + ds = indexed_dataset.make_dataset(args.data, args.dataset_impl) + tokenizer = build_tokenizer(args) + print(len(ds.doc_idx)) + print(len(ds)) + print(ds.doc_idx[-1]) + if ds.supports_prefetch: + # just prefetch the whole thing in test (so assume it is small) + ds.prefetch(range(len(ds))) + if args.count > len(ds.doc_idx) - 1: + args.count = len(ds.doc_idx) - 1 + + for i in range(args.count): + start = ds.doc_idx[i] + end = ds.doc_idx[i + 1] + ids = ds[start:end] + print(f"Document {i}:") + print("--------------") + for s in ids: + assert len(s) > 0 + l = s.data.tolist() + text = tokenizer.detokenize(l) + print(text) + print("---") + + +def test_indexed_dataset_get(args): + ds = indexed_dataset.make_dataset(args.data, args.dataset_impl) + tokenizer = build_tokenizer(args) + size = ds.sizes[0] + print(f"size: {size}") + full = ds.get(0) + print(full) + # print(tokenizer.detokenize(full.data.tolist())) + print("---") + end = ds.get(0, offset=size - 10) + print(end) + # print(tokenizer.detokenize(end.data.tolist())) + + start = ds.get(0, length=10) + print(start) + # print(tokenizer.detokenize(start.data.tolist())) + + part = ds.get(0, offset=2, length=8) + print(part) + # print(tokenizer.detokenize(part.data.tolist())) + +# def test_albert_dataset(args): +# # tokenizer = FullBertTokenizer(args.vocab, do_lower_case=True) +# # idataset = indexed_dataset.make_dataset(args.data, args.dataset_impl) +# # ds = AlbertDataset(idataset, tokenizer) +# ds = AlbertDataset.from_paths(args.vocab, args.data, args.dataset_impl, +# args.epochs, args.max_num_samples, +# args.masked_lm_prob, args.seq_length, +# args.short_seq_prob, args.seed) +# truncated = 0 +# total = 0 +# for i, s in enumerate(ds): +# ids = s['text'] +# tokens = ds.tokenizer.convert_ids_to_tokens(ids) +# print(tokens) +# if i >= args.count-1: +# exit() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, help='prefix to data files') + parser.add_argument('--dataset_impl', type=str, default='infer', + choices=['lazy', 'cached', 'mmap', 'infer']) + parser.add_argument('--count', type=int, default=10, + help='Number of samples/documents to print') + + group = parser.add_argument_group(title='tokenizer') + group.add_argument('--tokenizer_type', type=str, required=True, + choices=['BertWordPieceLowerCase', + 'GPT2BPETokenizer'], + help='What type of tokenizer to use.') + group.add_argument('--vocab_file', type=str, default=None, + help='Path to the vocab file') + group.add_argument('--merge_file', type=str, default=None, + help='Path to the BPE merge file (if necessary).') + parser.add_argument('--epochs', type=int, default=5, + help='Number of epochs to plan for') + parser.add_argument('--max_num_samples', type=int, default=None, + help='Maximum number of samples to plan for') + parser.add_argument('--masked_lm_prob', type=float, default=0.15, + help='probability of masking tokens') + parser.add_argument('--seq_length', type=int, default=512, + help='maximum sequence length') + parser.add_argument('--short_seq_prob', type=float, default=0.1, + help='probability of creating a short sequence') + parser.add_argument('--seed', type=int, default=1234, + help='random seed') + args = parser.parse_args() + args.rank = 0 + args.make_vocab_size_divisible_by = 128 + args.tensor_model_parallel_size = 1 + + if args.dataset_impl == "infer": + args.dataset_impl = indexed_dataset.infer_dataset_impl(args.data) + +# test_albert_dataset(args) + test_indexed_dataset_get(args) + + +if __name__ == "__main__": + main() diff --git a/multilinguality_megatron/megatron/data/test/test_preprocess_data.sh b/multilinguality_megatron/megatron/data/test/test_preprocess_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..b2601e0b984673f29d941020d185e3e089edae68 --- /dev/null +++ b/multilinguality_megatron/megatron/data/test/test_preprocess_data.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +IMPL=cached +python ../preprocess_data.py \ + --input test_samples.json \ + --vocab vocab.txt \ + --dataset_impl ${IMPL} \ + --output_prefix test_samples_${IMPL} \ + --workers 1 \ + --log_interval 2 diff --git a/multilinguality_megatron/megatron/dist_signal_handler.py b/multilinguality_megatron/megatron/dist_signal_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..a60204f004a3f149da0bc059cd875b6ec390c0c4 --- /dev/null +++ b/multilinguality_megatron/megatron/dist_signal_handler.py @@ -0,0 +1,81 @@ +import signal + +import torch + + +def get_world_size(): + if torch.distributed.is_available() and torch.distributed.is_initialized(): + world_size = torch.distributed.get_world_size() + else: + world_size = 1 + return world_size + + +def get_device(local_rank=None): + backend = torch.distributed.get_backend() + if backend == 'nccl': + if local_rank is None: + device = torch.device('cuda') + else: + device = torch.device(f'cuda:{local_rank}') + elif backend == 'gloo': + device = torch.device('cpu') + else: + raise RuntimeError + return device + + +def all_gather_item(item, dtype, group=None, async_op=False, local_rank=None): + if not torch.distributed.is_available() or \ + not torch.distributed.is_initialized(): + return [item] + + device = get_device(local_rank) + + if group is not None: + group_size = group.size() + else: + group_size = get_world_size() + + tensor = torch.tensor([item], device=device, dtype=dtype) + output_tensors = [ + torch.zeros(1, dtype=tensor.dtype, device=tensor.device) + for _ in range(group_size) + ] + torch.distributed.all_gather(output_tensors, tensor, group, async_op) + output = [elem.item() for elem in output_tensors] + return output + + +class DistributedSignalHandler: + def __init__(self, sig=signal.SIGTERM): + self.sig = sig + + def signals_received(self): + all_received = all_gather_item( + self._signal_received, dtype=torch.int32 + ) + return all_received + + def __enter__(self): + self._signal_received = False + self.released = False + self.original_handler = signal.getsignal(self.sig) + + def handler(signum, frame): + self._signal_received = True + + signal.signal(self.sig, handler) + + return self + + def __exit__(self, type, value, tb): + self.release() + + def release(self): + if self.released: + return False + + signal.signal(self.sig, self.original_handler) + self.released = True + return True diff --git a/multilinguality_megatron/megatron/fp16_deprecated/loss_scaler.py b/multilinguality_megatron/megatron/fp16_deprecated/loss_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..cb64aa928923e138f504c6d118ff7a67882dd34c --- /dev/null +++ b/multilinguality_megatron/megatron/fp16_deprecated/loss_scaler.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""For backward compatibility, we need the class definitions to deserialize.""" + +class LossScaler: + def __init__(self, scale=1): + self.cur_scale = scale + +class DynamicLossScaler: + def __init__(self, + init_scale=2**32, + scale_factor=2., + scale_window=1000, + min_scale=1, + delayed_shift=1, + consecutive_hysteresis=False): + self.cur_scale = init_scale + self.cur_iter = 0 + self.last_overflow_iter = -1 + self.scale_factor = scale_factor + self.scale_window = scale_window + self.min_scale = min_scale + self.delayed_shift = delayed_shift + self.cur_hysteresis = delayed_shift + self.consecutive_hysteresis = consecutive_hysteresis + diff --git a/multilinguality_megatron/megatron/fused_kernels/__init__.py b/multilinguality_megatron/megatron/fused_kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7646ddb1a6197eb0eea0844625ef17346519fbe1 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/__init__.py @@ -0,0 +1,118 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import os +import pathlib +import subprocess + +from torch.utils import cpp_extension + +# Setting this param to a list has a problem of generating different +# compilation commands (with diferent order of architectures) and +# leading to recompilation of fused kernels. Set it to empty string +# to avoid recompilation and assign arch flags explicity in +# extra_cuda_cflags below +os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + +def load(args): + + # Check if cuda 11 is installed for compute capability 8.0 + cc_flag = [] + _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version( + cpp_extension.CUDA_HOME) + if int(bare_metal_major) >= 11: + cc_flag.append('-gencode') + cc_flag.append('arch=compute_80,code=sm_80') + if int(bare_metal_minor) >= 7: + cc_flag.append('-gencode') + cc_flag.append('arch=compute_90,code=sm_90') + + # Build path + srcpath = pathlib.Path(__file__).parent.absolute() + buildpath = srcpath / 'build' + _create_build_dir(buildpath) + + # Helper function to build the kernels. + def _cpp_extention_load_helper(name, sources, extra_cuda_flags): + return cpp_extension.load( + name=name, + sources=sources, + build_directory=buildpath, + extra_cflags=['-O3',], + extra_cuda_cflags=['-O3', + '-gencode', 'arch=compute_70,code=sm_70', + '--use_fast_math'] + extra_cuda_flags + cc_flag, + verbose=(args.rank == 0) + ) + + # ============== + # Fused softmax. + # ============== + + if args.masked_softmax_fusion: + extra_cuda_flags = ['-U__CUDA_NO_HALF_OPERATORS__', + '-U__CUDA_NO_HALF_CONVERSIONS__', + '--expt-relaxed-constexpr', + '--expt-extended-lambda'] + + # Upper triangular softmax. + sources=[srcpath / 'scaled_upper_triang_masked_softmax.cpp', + srcpath / 'scaled_upper_triang_masked_softmax_cuda.cu'] + scaled_upper_triang_masked_softmax_cuda = _cpp_extention_load_helper( + "scaled_upper_triang_masked_softmax_cuda", + sources, extra_cuda_flags) + + # Masked softmax. + sources=[srcpath / 'scaled_masked_softmax.cpp', + srcpath / 'scaled_masked_softmax_cuda.cu'] + scaled_masked_softmax_cuda = _cpp_extention_load_helper( + "scaled_masked_softmax_cuda", sources, extra_cuda_flags) + + # Softmax + sources=[srcpath / 'scaled_softmax.cpp', + srcpath / 'scaled_softmax_cuda.cu'] + scaled_softmax_cuda = _cpp_extention_load_helper( + "scaled_softmax_cuda", sources, extra_cuda_flags) + + # ================================= + # Mixed precision fused layer norm. + # ================================= + + extra_hopper_flags = ['-U__CUDA_NO_HALF_OPERATORS__', + '-U__CUDA_NO_HALF_CONVERSIONS__'] + + extra_cuda_flags = ['-maxrregcount=50'] + sources=[srcpath / 'layer_norm_cuda.cpp', + srcpath / 'layer_norm_cuda_kernel.cu'] + fused_mix_prec_layer_norm_cuda = _cpp_extention_load_helper( + "fused_mix_prec_layer_norm_cuda", sources, extra_cuda_flags + extra_hopper_flags) + + # ================================= + # Fused gradient accumulation to weight gradient computation of linear layer + # ================================= + + if args.gradient_accumulation_fusion: + sources=[srcpath / 'fused_weight_gradient_dense.cpp', + srcpath / 'fused_weight_gradient_dense.cu'] + fused_dense_cuda = _cpp_extention_load_helper( + "fused_dense_cuda", sources, extra_hopper_flags) + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], + universal_newlines=True) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def _create_build_dir(buildpath): + try: + os.mkdir(buildpath) + except OSError: + if not os.path.isdir(buildpath): + print(f"Creation of the build directory {buildpath} failed") diff --git a/multilinguality_megatron/megatron/fused_kernels/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/megatron/fused_kernels/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f656675fdf7d577b99d57afbbf5e3975a5afa5f8 Binary files /dev/null and b/multilinguality_megatron/megatron/fused_kernels/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/fused_kernels/build/.ninja_deps b/multilinguality_megatron/megatron/fused_kernels/build/.ninja_deps new file mode 100644 index 0000000000000000000000000000000000000000..5287280174530bc9caa05dd6e2415b4f57a1c5d3 Binary files /dev/null and b/multilinguality_megatron/megatron/fused_kernels/build/.ninja_deps differ diff --git a/multilinguality_megatron/megatron/fused_kernels/build/.ninja_log b/multilinguality_megatron/megatron/fused_kernels/build/.ninja_log new file mode 100644 index 0000000000000000000000000000000000000000..7b27c3574f8edef3de405154a06141c3a5fa9551 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/build/.ninja_log @@ -0,0 +1,7 @@ +# ninja log v5 +1 21202 1716701816589590430 layer_norm_cuda.o 275f065f68f79667 +1 46908 1716701842237534274 layer_norm_cuda_kernel.cuda.o 4b39a9a47602b255 +46944 47328 1716701842709533241 fused_mix_prec_layer_norm_cuda.so d794b264aeaff528 +3 19109 1716701861937491247 fused_weight_gradient_dense.o cd3229733474c767 +3 124280 1716701967017263189 fused_weight_gradient_dense.cuda.o ae13df4365add4a6 +124284 124646 1716701967469262213 fused_dense_cuda.so 1c9407684bc1c980 diff --git a/multilinguality_megatron/megatron/fused_kernels/build/build.ninja b/multilinguality_megatron/megatron/fused_kernels/build/build.ninja new file mode 100644 index 0000000000000000000000000000000000000000..007d236b2ed21c49cc4a7c3605b8654b0db6b6bc --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/build/build.ninja @@ -0,0 +1,34 @@ +ninja_required_version = 1.3 +cxx = c++ +nvcc = /home/kshitij/miniconda3/envs/towerllm-env/bin/nvcc + +cflags = -DTORCH_EXTENSION_NAME=fused_dense_cuda -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -isystem /home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/include -isystem /home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/include/TH -isystem /home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/include/THC -isystem /home/kshitij/miniconda3/envs/towerllm-env/include -isystem /home/kshitij/miniconda3/envs/towerllm-env/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++17 -O3 +post_cflags = +cuda_cflags = -DTORCH_EXTENSION_NAME=fused_dense_cuda -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -isystem /home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/include -isystem /home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/include/TH -isystem /home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/include/THC -isystem /home/kshitij/miniconda3/envs/towerllm-env/include -isystem /home/kshitij/miniconda3/envs/towerllm-env/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_86,code=sm_86 --compiler-options '-fPIC' -O3 -gencode arch=compute_70,code=sm_70 --use_fast_math -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -gencode arch=compute_80,code=sm_80 -gencode arch=compute_90,code=sm_90 -std=c++17 +cuda_post_cflags = +cuda_dlink_post_cflags = +ldflags = -shared -L/home/kshitij/miniconda3/envs/towerllm-env/lib/python3.9/site-packages/torch/lib -lc10 -lc10_cuda -ltorch_cpu -ltorch_cuda -ltorch -ltorch_python -L/home/kshitij/miniconda3/envs/towerllm-env/lib -lcudart + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags + depfile = $out.d + deps = gcc + +rule cuda_compile + depfile = $out.d + deps = gcc + command = $nvcc --generate-dependencies-with-compile --dependency-output $out.d $cuda_cflags -c $in -o $out $cuda_post_cflags + + + +rule link + command = $cxx $in $ldflags -o $out + +build fused_weight_gradient_dense.o: compile /media/scratch/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron/megatron/fused_kernels/fused_weight_gradient_dense.cpp +build fused_weight_gradient_dense.cuda.o: cuda_compile /media/scratch/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron/megatron/fused_kernels/fused_weight_gradient_dense.cu + + + +build fused_dense_cuda.so: link fused_weight_gradient_dense.o fused_weight_gradient_dense.cuda.o + +default fused_dense_cuda.so diff --git a/multilinguality_megatron/megatron/fused_kernels/build/fused_dense_cuda.so b/multilinguality_megatron/megatron/fused_kernels/build/fused_dense_cuda.so new file mode 100644 index 0000000000000000000000000000000000000000..74e7e01849e1072c73bb0534cb56aaec2570d295 Binary files /dev/null and b/multilinguality_megatron/megatron/fused_kernels/build/fused_dense_cuda.so differ diff --git a/multilinguality_megatron/megatron/fused_kernels/build/fused_mix_prec_layer_norm_cuda.so b/multilinguality_megatron/megatron/fused_kernels/build/fused_mix_prec_layer_norm_cuda.so new file mode 100644 index 0000000000000000000000000000000000000000..ed5bb0109971b186b7b2fc27c99d208d28e37891 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/build/fused_mix_prec_layer_norm_cuda.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aae6be94385c511185de3a3df8e3157e0ca898431f69df1c066f6315a8a6d2e +size 1198816 diff --git a/multilinguality_megatron/megatron/fused_kernels/build/fused_weight_gradient_dense.cuda.o b/multilinguality_megatron/megatron/fused_kernels/build/fused_weight_gradient_dense.cuda.o new file mode 100644 index 0000000000000000000000000000000000000000..cd3cd93320ef6b703170f794c118f05d3e1cafae Binary files /dev/null and b/multilinguality_megatron/megatron/fused_kernels/build/fused_weight_gradient_dense.cuda.o differ diff --git a/multilinguality_megatron/megatron/fused_kernels/build/fused_weight_gradient_dense.o b/multilinguality_megatron/megatron/fused_kernels/build/fused_weight_gradient_dense.o new file mode 100644 index 0000000000000000000000000000000000000000..c1f1a8fd080150f6c038a4c5d614a19e8d5d1712 Binary files /dev/null and b/multilinguality_megatron/megatron/fused_kernels/build/fused_weight_gradient_dense.o differ diff --git a/multilinguality_megatron/megatron/fused_kernels/build/layer_norm_cuda.o b/multilinguality_megatron/megatron/fused_kernels/build/layer_norm_cuda.o new file mode 100644 index 0000000000000000000000000000000000000000..2479af0556adcf756c17f3a9941cc9192d4cb166 Binary files /dev/null and b/multilinguality_megatron/megatron/fused_kernels/build/layer_norm_cuda.o differ diff --git a/multilinguality_megatron/megatron/fused_kernels/build/layer_norm_cuda_kernel.cuda.o b/multilinguality_megatron/megatron/fused_kernels/build/layer_norm_cuda_kernel.cuda.o new file mode 100644 index 0000000000000000000000000000000000000000..a43cbb585b71aab50c78a9e459fb36310f4aa38f --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/build/layer_norm_cuda_kernel.cuda.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15b55ff5b0fd97619ccd2904018dd703b406e86608431a797214bf501a593bec +size 1047176 diff --git a/multilinguality_megatron/megatron/fused_kernels/compat.h b/multilinguality_megatron/megatron/fused_kernels/compat.h new file mode 100644 index 0000000000000000000000000000000000000000..5495d7807762d8b4e3dbc11b28dba15f85bd8108 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/compat.h @@ -0,0 +1,17 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + + + +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif diff --git a/multilinguality_megatron/megatron/fused_kernels/fused_weight_gradient_dense.cpp b/multilinguality_megatron/megatron/fused_kernels/fused_weight_gradient_dense.cpp new file mode 100644 index 0000000000000000000000000000000000000000..194ee59353d2a8c9da24e50c592f4e086806d078 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/fused_weight_gradient_dense.cpp @@ -0,0 +1,47 @@ +#include +#include + +#include +#include + +#include "type_shim.h" + + +template +int wgrad_gemm_accum_fp32_cuda(T *input, T *d_output, float *d_weight, int in_dim, int hidden_dim, int out_dim); + +void wgrad_gemm_accum_fp32(const at::Tensor input, const at::Tensor d_output, at::Tensor d_weight) { + at::Tensor input_2d, d_output_2d; + // input tensor: collapse to the first dim + auto in_sizes = input.sizes(); + if (input.dim() > 2) { + input_2d = input.view({-1, in_sizes[in_sizes.size() - 1]}); + } else { + input_2d = input; + } + // d_output tensor: collapse to the first dim + auto d_out_sizes = d_output.sizes(); + if (d_output.dim() > 2) { + d_output_2d = d_output.view({-1, d_out_sizes[d_out_sizes.size() - 1]}); + } else { + d_output_2d = d_output; + } + + int hidden_dim = input_2d.size(0); + int in_dim = input_2d.size(1); + int out_dim = d_weight.size(0); + + DISPATCH_HALF_BFLOAT_AND_FLOAT(input_2d.scalar_type(), "wgrad_gemm_accum_fp32", + int result = wgrad_gemm_accum_fp32_cuda( + input_2d.data_ptr(), + d_output_2d.data_ptr(), + d_weight.data_ptr(), + in_dim, + hidden_dim, + out_dim); + ); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("wgrad_gemm_accum_fp32", &wgrad_gemm_accum_fp32, "wgrad gemm accum in fp32"); +} diff --git a/multilinguality_megatron/megatron/fused_kernels/fused_weight_gradient_dense.cu b/multilinguality_megatron/megatron/fused_kernels/fused_weight_gradient_dense.cu new file mode 100644 index 0000000000000000000000000000000000000000..7dc10e65d37e531b54d2c875011f9c47eeb8ff7f --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/fused_weight_gradient_dense.cu @@ -0,0 +1,157 @@ +#include +#include +#include +#include +#include +#include +#include + +/* Includes, cuda */ +#include +#include + + +// BF16 Tensor core wrapper around cublas GEMMEx +cublasStatus_t gemmex_wrapper( + cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + at::BFloat16* A, + int lda, + at::BFloat16* B, + int ldb, + const float* beta, + float* C, + int ldc) { + return cublasGemmEx( + handle, + transa, + transb, + m, + n, + k, + alpha, + A, + CUDA_R_16BF, + lda, + B, + CUDA_R_16BF, + ldb, + beta, + C, + CUDA_R_32F, + ldc, + CUDA_R_32F, + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +} + +// FP16 Tensor core wrapper around cublas GEMMEx +cublasStatus_t gemmex_wrapper( + cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + at::Half* A, + int lda, + at::Half* B, + int ldb, + const float* beta, + float* C, + int ldc) { + return cublasGemmEx( + handle, + transa, + transb, + m, + n, + k, + alpha, + A, + CUDA_R_16F, + lda, + B, + CUDA_R_16F, + ldb, + beta, + C, + CUDA_R_32F, + ldc, + CUDA_R_32F, + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +} + +// FP32 Tensor core wrapper around cublas GEMMEx +cublasStatus_t gemmex_wrapper( + cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + float* A, + int lda, + float* B, + int ldb, + const float* beta, + float* C, + int ldc) { + return cublasGemmEx( + handle, + transa, + transb, + m, + n, + k, + alpha, + A, + CUDA_R_32F, + lda, + B, + CUDA_R_32F, + ldb, + beta, + C, + CUDA_R_32F, + ldc, + CUDA_R_32F, + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +} + +template +int wgrad_gemm_accum_fp32_cuda(T *input, T *d_output, float *d_weight, int in_dim, int hidden_dim, int out_dim) { + cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); + cudaStream_t stream; + cublasGetStream(handle, &stream); + const float alpha = 1.0; + const float beta = 1.0; + int status = 1; + + status = gemmex_wrapper( + handle, + CUBLAS_OP_N, + CUBLAS_OP_T, + in_dim, + out_dim, + hidden_dim, + &alpha, + input, + in_dim, + d_output, + out_dim, + &beta, + d_weight, + in_dim); + return status; +} + +template int wgrad_gemm_accum_fp32_cuda(at::Half *input, at::Half *d_output, float *d_weight, int in_dim, int hidden_dim, int out_dim); +template int wgrad_gemm_accum_fp32_cuda(at::BFloat16 *input, at::BFloat16 *d_output, float *d_weight, int in_dim, int hidden_dim, int out_dim); +template int wgrad_gemm_accum_fp32_cuda(float *input, float *d_output, float *d_weight, int in_dim, int hidden_dim, int out_dim); diff --git a/multilinguality_megatron/megatron/fused_kernels/layer_norm_cuda.cpp b/multilinguality_megatron/megatron/fused_kernels/layer_norm_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f0925fcdd06738a8c3db864d91bde9c7d3012919 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/layer_norm_cuda.cpp @@ -0,0 +1,187 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + +#include +#include +#include +#include "compat.h" + +namespace { + +void compute_n1_n2( + at::Tensor input, + at::IntArrayRef normalized_shape, + int& n1, + int& n2) { + int idiff = input.ndimension() - normalized_shape.size(); + n2 = 1; + for (int i = 0; i < (int)normalized_shape.size(); ++i) { + assert( input.sizes()[i+idiff] == normalized_shape[i] ); + n2 *= normalized_shape[i]; + } + n1 = 1; + for (int i = 0; i < idiff; ++i) { + n1 *= input.sizes()[i]; + } +} + +void check_args( + at::IntArrayRef normalized_shape, + at::Tensor gamma, + at::Tensor beta + ) +{ + TORCH_CHECK(!gamma.defined() || gamma.sizes().equals(normalized_shape)); + TORCH_CHECK(!beta.defined() || beta.sizes().equals(normalized_shape)); +} + +void check_args( + at::Tensor input, + at::IntArrayRef normalized_shape, + int& n1, + int& n2 + ) +{ + int64_t normalized_ndim = normalized_shape.size(); + + if (normalized_ndim < 1) { + std::stringstream ss; + ss << "Expected normalized_shape to be at least 1-dimensional, i.e., " + << "containing at least one element, but got normalized_shape=" + << normalized_shape; + throw std::runtime_error(ss.str()); + } + + auto input_shape = input.sizes(); + auto input_ndim = input.dim(); + + if (input_ndim < normalized_ndim || + !input_shape.slice(input_ndim - normalized_ndim).equals(normalized_shape)) { + std::stringstream ss; + ss << "Given normalized_shape=" << normalized_shape + << ", expected input with shape [*"; + for (auto size : normalized_shape) { + ss << ", " << size; + } + ss << "], but got input of size" << input_shape; + throw std::runtime_error(ss.str()); + } + + compute_n1_n2(input,normalized_shape,n1,n2); +} + + +void check_args( + at::Tensor input, + at::IntArrayRef normalized_shape, + at::Tensor gamma, + at::Tensor beta, + int& n1, + int& n2 + ) +{ + check_args(input,normalized_shape,n1,n2); + check_args(normalized_shape,gamma,beta); +} +} + +void cuda_layer_norm( + at::Tensor* output, + at::Tensor* mean, + at::Tensor* invvar, + at::Tensor* input, + int n1, + int n2, + at::IntArrayRef normalized_shape, + at::Tensor* gamma, + at::Tensor* beta, + double epsilon); + +#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) + +std::vector layer_norm_affine( + at::Tensor input, + at::IntArrayRef normalized_shape, + at::Tensor gamma, + at::Tensor beta, + double epsilon) { + + CHECK_INPUT(input); + CHECK_INPUT(gamma); + CHECK_INPUT(beta); + int n1, n2; + check_args(input, normalized_shape, gamma, beta, n1, n2); + + at::Tensor output = at::empty_like( + input, gamma.options().dtype(gamma.scalar_type())); + at::Tensor mean = at::empty( + {n1}, input.options().dtype(at::ScalarType::Float)); + at::Tensor invvar = at::empty_like(mean); + + cuda_layer_norm(&output, &mean, &invvar, &input, n1, n2, + normalized_shape, &gamma, &beta, epsilon); + + return {output, mean, invvar}; + +} + + +void cuda_layer_norm_gradient( + at::Tensor* dout, + at::Tensor* mean, + at::Tensor* invvar, + at::Tensor* input, + int n1, + int n2, + at::IntArrayRef normalized_shape, + at::Tensor* gamma, + at::Tensor* beta, + double epsilon, + at::Tensor* grad_input, + at::Tensor* grad_gamma, + at::Tensor* grad_beta + ); + +std::vector layer_norm_gradient_affine( + at::Tensor dout, + at::Tensor mean, + at::Tensor invvar, + at::Tensor input, + at::IntArrayRef normalized_shape, + at::Tensor gamma, + at::Tensor beta, + double epsilon) { + + CHECK_INPUT(dout); + CHECK_INPUT(mean); + CHECK_INPUT(invvar); + CHECK_INPUT(input); + CHECK_INPUT(gamma); + CHECK_INPUT(beta); + int n1, n2; + check_args(input, normalized_shape, gamma, beta, n1, n2); + + at::Tensor grad_input = at::empty_like(input); + at::Tensor grad_gamma = at::empty_like(gamma); + at::Tensor grad_beta = at::empty_like(beta); + + cuda_layer_norm_gradient(&dout, &mean, &invvar, &input, n1, n2, + normalized_shape, &gamma, &beta, epsilon, + &grad_input, &grad_gamma, &grad_beta); + + return {grad_input, grad_gamma, grad_beta}; + +} + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward_affine", &layer_norm_affine, + "LayerNorm forward (CUDA)"); + m.def("backward_affine", &layer_norm_gradient_affine, + "LayerNorm backward (CUDA)"); +} diff --git a/multilinguality_megatron/megatron/fused_kernels/layer_norm_cuda_kernel.cu b/multilinguality_megatron/megatron/fused_kernels/layer_norm_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..30b376501a8b8e6e45f098f8606e3004e5d4c69b --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/layer_norm_cuda_kernel.cu @@ -0,0 +1,818 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + +#include "ATen/ATen.h" +#include "ATen/AccumulateType.h" +#include "ATen/cuda/CUDAContext.h" +#include "ATen/cuda/DeviceUtils.cuh" + +#include +#include + +#include "type_shim.h" + +template __device__ +void cuWelfordOnlineSum( + const U curr, + U& mu, + U& sigma2, + U& count) +{ + count = count + U(1); + U delta = curr - mu; + U lmean = mu + delta / count; + mu = lmean; + U delta2 = curr - lmean; + sigma2 = sigma2 + delta * delta2; +} + +template __device__ +void cuChanOnlineSum( + const U muB, + const U sigma2B, + const U countB, + U& mu, + U& sigma2, + U& count) +{ + U delta = muB - mu; + U nA = count; + U nB = countB; + count = count + countB; + U nX = count; + if (nX > U(0)) { + nA = nA / nX; + nB = nB / nX; + mu = nA*mu + nB*muB; + sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX; + } else { + mu = U(0); + sigma2 = U(0); + } +} + +template __device__ +void cuWelfordMuSigma2( + const T* __restrict__ vals, + const int n1, + const int n2, + const int i1, + U& mu, + U& sigma2, + U* buf) +{ + // Assumptions: + // 1) blockDim.x == warpSize + // 2) Tensor is contiguous + // 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available. + // + // compute variance and mean over n2 + U count = U(0); + mu= U(0); + sigma2 = U(0); + if (i1 < n1) { + // one warp normalizes one n1 index, + // synchronization is implicit + // initialize with standard Welford algorithm + const int numx = blockDim.x * blockDim.y; + const int thrx = threadIdx.x + threadIdx.y * blockDim.x; + const T* lvals = vals + i1*n2; + int l = 4*thrx; + for (; l+3 < n2; l+=4*numx) { + for (int k = 0; k < 4; ++k) { + U curr = static_cast(lvals[l+k]); + cuWelfordOnlineSum(curr,mu,sigma2,count); + } + } + for (; l < n2; ++l) { + U curr = static_cast(lvals[l]); + cuWelfordOnlineSum(curr,mu,sigma2,count); + } + // intra-warp reductions + for (int l = 0; l <= 4; ++l) { + int srcLaneB = (threadIdx.x+(1<(muB,sigma2B,countB,mu,sigma2,count); + } + // threadIdx.x == 0 has correct values for each warp + // inter-warp reductions + if (blockDim.y > 1) { + U* ubuf = (U*)buf; + U* ibuf = (U*)(ubuf + blockDim.y); + for (int offset = blockDim.y/2; offset > 0; offset /= 2) { + // upper half of warps write to shared + if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) { + const int wrt_y = threadIdx.y - offset; + ubuf[2*wrt_y] = mu; + ubuf[2*wrt_y+1] = sigma2; + ibuf[wrt_y] = count; + } + __syncthreads(); + // lower half merges + if (threadIdx.x == 0 && threadIdx.y < offset) { + U muB = ubuf[2*threadIdx.y]; + U sigma2B = ubuf[2*threadIdx.y+1]; + U countB = ibuf[threadIdx.y]; + cuChanOnlineSum(muB,sigma2B,countB,mu,sigma2,count); + } + __syncthreads(); + } + // threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values + if (threadIdx.x == 0 && threadIdx.y == 0) { + ubuf[0] = mu; + ubuf[1] = sigma2; + } + __syncthreads(); + mu = ubuf[0]; + sigma2 = ubuf[1]/U(n2); + // don't care about final value of count, we know count == n2 + } else { + mu = WARP_SHFL(mu, 0); + sigma2 = WARP_SHFL(sigma2/U(n2), 0); + } + } +} + +template<> __device__ +void cuWelfordMuSigma2( + const at::Half* __restrict__ vals, + const int n1, + const int n2, + const int i1, + float& mu, + float& sigma2, + float* buf) +{ + // Assumptions: + // 1) blockDim.x == warpSize + // 2) Tensor is contiguous + // 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available. + // + // compute variance and mean over n2 + float count = 0.0f; + mu= float(0); + sigma2 = float(0); + if (i1 < n1) { + // one warp normalizes one n1 index, + // synchronization is implicit + // initialize with standard Welford algorithm + const int numx = blockDim.x * blockDim.y; + const int thrx = threadIdx.x + threadIdx.y * blockDim.x; + const at::Half* lvals = vals + i1*n2; + int l = 8*thrx; + if ((((size_t)lvals)&3) != 0) { + // 16 bit alignment + // first thread consumes first point + if (thrx == 0) { + float curr = static_cast(lvals[0]); + cuWelfordOnlineSum(curr,mu,sigma2,count); + } + ++l; + } + // at this point, lvals[l] are 32 bit aligned for all threads. + for (; l+7 < n2; l+=8*numx) { + for (int k = 0; k < 8; k+=2) { + float2 curr = __half22float2(*((__half2*)(lvals+l+k))); + cuWelfordOnlineSum(curr.x,mu,sigma2,count); + cuWelfordOnlineSum(curr.y,mu,sigma2,count); + } + } + for (; l < n2; ++l) { + float curr = static_cast(lvals[l]); + cuWelfordOnlineSum(curr,mu,sigma2,count); + } + // intra-warp reductions + for (int l = 0; l <= 4; ++l) { + int srcLaneB = (threadIdx.x+(1< 1) { + float* ubuf = (float*)buf; + float* ibuf = (float*)(ubuf + blockDim.y); + for (int offset = blockDim.y/2; offset > 0; offset /= 2) { + // upper half of warps write to shared + if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) { + const int wrt_y = threadIdx.y - offset; + ubuf[2*wrt_y] = mu; + ubuf[2*wrt_y+1] = sigma2; + ibuf[wrt_y] = count; + } + __syncthreads(); + // lower half merges + if (threadIdx.x == 0 && threadIdx.y < offset) { + float muB = ubuf[2*threadIdx.y]; + float sigma2B = ubuf[2*threadIdx.y+1]; + float countB = ibuf[threadIdx.y]; + cuChanOnlineSum(muB,sigma2B,countB,mu,sigma2,count); + } + __syncthreads(); + } + // threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values + if (threadIdx.x == 0 && threadIdx.y == 0) { + ubuf[0] = mu; + ubuf[1] = sigma2; + } + __syncthreads(); + mu = ubuf[0]; + sigma2 = ubuf[1]/float(n2); + // don't care about final value of count, we know count == n2 + } else { + mu = WARP_SHFL(mu, 0); + sigma2 = WARP_SHFL(sigma2/float(n2), 0); + } + } +} + +template U rsqrt(U v) { + return U(1) / sqrt(v); +} +template<> float rsqrt(float v) { + return rsqrtf(v); +} +template<> double rsqrt(double v) { + return rsqrt(v); +} + +namespace { +// This is the un-specialized struct. Note that we prevent instantiation of this +// struct by putting an undefined symbol in the function body so it won't compile. +// template +// struct SharedMemory +// { +// // Ensure that we won't compile any un-specialized types +// __device__ T *getPointer() +// { +// extern __device__ void error(void); +// error(); +// return NULL; +// } +// }; +// https://github.com/NVIDIA/apex/issues/246 +template +struct SharedMemory; + +template <> +struct SharedMemory +{ + __device__ float *getPointer() + { + extern __shared__ float s_float[]; + return s_float; + } +}; + +} + +template __global__ +void cuApplyLayerNorm( + V* __restrict__ output_vals, + U* __restrict__ mean, + U* __restrict__ invvar, + const T* __restrict__ vals, + const int n1, + const int n2, + const U epsilon, + const V* __restrict__ gamma, + const V* __restrict__ beta + ) +{ + // Assumptions: + // 1) blockDim.x == warpSize + // 2) Tensors are contiguous + // + for (auto i1=blockIdx.y; i1 < n1; i1 += gridDim.y) { + SharedMemory shared; + U* buf = shared.getPointer(); + U mu,sigma2; + cuWelfordMuSigma2(vals,n1,n2,i1,mu,sigma2,buf); + const T* lvals = vals + i1*n2; + V* ovals = output_vals + i1*n2; + U c_invvar = rsqrt(sigma2 + epsilon); + const int numx = blockDim.x * blockDim.y; + const int thrx = threadIdx.x + threadIdx.y * blockDim.x; + if (gamma != NULL && beta != NULL) { + for (int i = thrx; i < n2; i+=numx) { + U curr = static_cast(lvals[i]); + ovals[i] = gamma[i] * static_cast(c_invvar * (curr - mu)) + beta[i]; + } + } else { + for (int i = thrx; i < n2; i+=numx) { + U curr = static_cast(lvals[i]); + ovals[i] = static_cast(c_invvar * (curr - mu)); + } + } + if (threadIdx.x == 0 && threadIdx.y == 0) { + mean[i1] = mu; + invvar[i1] = c_invvar; + } + __syncthreads(); + } +} + +template __device__ +void cuLoadWriteStridedInputs( + const int i1_block, + const int thr_load_row_off, + const int thr_load_col_off, + const int i2_off, + const int row_stride, + U* warp_buf1, + U* warp_buf2, + const T* input, + const V* dout, + const int i1_end, + const int n2, + const U* __restrict__ mean, + const U* __restrict__ invvar + ) +{ + int i1 = i1_block+thr_load_row_off; + if (i1 < i1_end) { + U curr_mean = mean[i1]; + U curr_invvar = invvar[i1]; + for (int k = 0; k < blockDim.y; ++k) { + int i2 = i2_off + k; + int load_idx = i1*n2+i2; + int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; + if (i2(input[load_idx]); + U curr_dout = static_cast(dout[load_idx]); + warp_buf1[write_idx] = curr_dout; + warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar; + } else { + warp_buf1[write_idx] = U(0); + warp_buf2[write_idx] = U(0); + } + } + } else { + for (int k = 0; k < blockDim.y; ++k) { + int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; + warp_buf1[write_idx] = U(0); + warp_buf2[write_idx] = U(0); + } + } +} + +template __device__ +void cuLoadAddStridedInputs( + const int i1_block, + const int thr_load_row_off, + const int thr_load_col_off, + const int i2_off, + const int row_stride, + U* warp_buf1, + U* warp_buf2, + const T* input, + const V* dout, + const int i1_end, + const int n2, + const U* __restrict__ mean, + const U* __restrict__ invvar + ) +{ + int i1 = i1_block+thr_load_row_off; + if (i1 < i1_end) { + U curr_mean = mean[i1]; + U curr_invvar = invvar[i1]; + for (int k = 0; k < blockDim.y; ++k) { + int i2 = i2_off + k; + int load_idx = i1*n2+i2; + int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; + if (i2(input[load_idx]); + U curr_dout = static_cast(dout[load_idx]); + warp_buf1[write_idx] += curr_dout; + warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; + } + } + } +} + +template __global__ +void cuComputePartGradGammaBeta( + const V* __restrict__ dout, + const T* __restrict__ input, + const int n1, + const int n2, + const U* __restrict__ mean, + const U* __restrict__ invvar, + U epsilon, + U* part_grad_gamma, + U* part_grad_beta) +{ + const int numsegs_n1 = (n1+blockDim.y*blockDim.y-1) / (blockDim.y*blockDim.y); + const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y; + const int i1_beg = blockIdx.y * segs_per_block * blockDim.y*blockDim.y; + const int i1_beg_plus_one = (blockIdx.y+1) * segs_per_block * blockDim.y*blockDim.y; + const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1; + const int row_stride = blockDim.x+1; + const int thr_load_col_off = (threadIdx.x*blockDim.y)&(blockDim.x-1); + const int thr_load_row_off = (threadIdx.x*blockDim.y)/blockDim.x + threadIdx.y*blockDim.y; + const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off; + SharedMemory shared; + U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + (blockDim.y - 1)*(blockDim.x/blockDim.y) elements + U* warp_buf1 = (U*)buf; + U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride; + // compute partial sums from strided inputs + // do this to increase number of loads in flight + cuLoadWriteStridedInputs(i1_beg,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar); + for (int i1_block = i1_beg+blockDim.y*blockDim.y; i1_block < i1_end; i1_block+=blockDim.y*blockDim.y) { + cuLoadAddStridedInputs(i1_block,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar); + } + __syncthreads(); + // inter-warp reductions + // sum within each warp + U acc1 = U(0); + U acc2 = U(0); + for (int k = 0; k < blockDim.y; ++k) { + int row1 = threadIdx.y + k*blockDim.y; + int idx1 = row1*row_stride + threadIdx.x; + acc1 += warp_buf1[idx1]; + acc2 += warp_buf2[idx1]; + } + warp_buf1[threadIdx.y*row_stride+threadIdx.x] = acc1; + warp_buf2[threadIdx.y*row_stride+threadIdx.x] = acc2; + __syncthreads(); + // sum all warps + for (int offset = blockDim.y/2; offset > 1; offset /= 2) { + if (threadIdx.y < offset) { + int row1 = threadIdx.y; + int row2 = threadIdx.y + offset; + int idx1 = row1*row_stride + threadIdx.x; + int idx2 = row2*row_stride + threadIdx.x; + warp_buf1[idx1] += warp_buf1[idx2]; + warp_buf2[idx1] += warp_buf2[idx2]; + } + __syncthreads(); + } + int i2 = blockIdx.x * blockDim.x + threadIdx.x; + if (threadIdx.y == 0 && i2 < n2) { + int row1 = threadIdx.y; + int row2 = threadIdx.y + 1; + int idx1 = row1*row_stride + threadIdx.x; + int idx2 = row2*row_stride + threadIdx.x; + part_grad_beta[blockIdx.y*n2+i2] = warp_buf1[idx1] + warp_buf1[idx2]; + part_grad_gamma[blockIdx.y*n2+i2] = warp_buf2[idx1] + warp_buf2[idx2]; + } +} + +template __global__ +void cuComputeGradGammaBeta( + const U* part_grad_gamma, + const U* part_grad_beta, + const int part_size, + const int n1, + const int n2, + V* grad_gamma, + V* grad_beta) +{ + // sum partial gradients for gamma and beta + SharedMemory shared; + U* buf = shared.getPointer(); + int i2 = blockIdx.x * blockDim.x + threadIdx.x; + if (i2 < n2) { + // each warp does sequential reductions until reduced part_size is num_warps + int num_warp_reductions = part_size / blockDim.y; + U sum_gamma = U(0); + U sum_beta = U(0); + const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; + const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; + for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { + sum_gamma += part_grad_gamma_ptr[warp_offset*n2]; + sum_beta += part_grad_beta_ptr[warp_offset*n2]; + } + // inter-warp reductions + const int nbsize3 = blockDim.x * blockDim.y / 2; + for (int offset = blockDim.y/2; offset >= 1; offset /= 2) { + // top half write to shared memory + if (threadIdx.y >= offset && threadIdx.y < 2*offset) { + const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; + buf[write_idx] = sum_gamma; + buf[write_idx+nbsize3] = sum_beta; + } + __syncthreads(); + // bottom half sums + if (threadIdx.y < offset) { + const int read_idx = threadIdx.y * blockDim.x + threadIdx.x; + sum_gamma += buf[read_idx]; + sum_beta += buf[read_idx+nbsize3]; + } + __syncthreads(); + } + // write out fully summed gradients + if (threadIdx.y == 0) { + grad_gamma[i2] = sum_gamma; + grad_beta[i2] = sum_beta; + } + } +} + +template __global__ +void cuComputeGradInput( + const V* __restrict__ dout, + const T* __restrict__ input, + const int n1, + const int n2, + const U* __restrict__ mean, + const U* __restrict__ invvar, + U epsilon, + const V* gamma, + T* grad_input) +{ + for (auto i1=blockIdx.y; i1 < n1; i1 += gridDim.y) { + U sum_loss1 = U(0); + U sum_loss2 = U(0); + const U c_mean = mean[i1]; + const U c_invvar = invvar[i1]; + const T* k_input = input + i1*n2; + const V* k_dout = dout + i1*n2; + const int numx = blockDim.x * blockDim.y; + const int thrx = threadIdx.x + threadIdx.y * blockDim.x; + if (gamma != NULL) { + int l = 4*thrx; + for (; l+3 < n2; l+=4*numx) { + for (int k = 0; k < 4; ++k) { + const U c_h = static_cast(k_input[l+k]); + const U c_loss = static_cast(k_dout[l+k]); + sum_loss1 += c_loss * gamma[l+k]; + sum_loss2 += c_loss * gamma[l+k] * (c_h - c_mean) * c_invvar; + } + } + for (; l < n2; ++l) { + const U c_h = static_cast(k_input[l]); + const U c_loss = static_cast(k_dout[l]); + sum_loss1 += c_loss * gamma[l]; + sum_loss2 += c_loss * gamma[l] * (c_h - c_mean) * c_invvar; + } + } else { + int l = 4*thrx; + for (; l+3 < n2; l+=4*numx) { + for (int k = 0; k < 4; ++k) { + const U c_h = static_cast(k_input[l+k]); + const U c_loss = static_cast(k_dout[l+k]); + sum_loss1 += c_loss; + sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; + } + } + for (; l < n2; ++l) { + const U c_h = static_cast(k_input[l]); + const U c_loss = static_cast(k_dout[l]); + sum_loss1 += c_loss; + sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; + } + } + // intra-warp reductions + for (int mask = blockDim.x/2; mask > 0; mask /= 2) { + sum_loss1 += WARP_SHFL_XOR(sum_loss1, mask); + sum_loss2 += WARP_SHFL_XOR(sum_loss2, mask); + } + // inter-warp reductions + if (blockDim.y > 1) { + SharedMemory shared; + U* buf = shared.getPointer(); + for (int offset = blockDim.y/2; offset > 0; offset /= 2) { + // upper half of warps write to shared + if (threadIdx.y >= offset && threadIdx.y < 2*offset) { + const int wrt_i = (threadIdx.y - offset) * blockDim.x + threadIdx.x; + buf[2*wrt_i] = sum_loss1; + buf[2*wrt_i+1] = sum_loss2; + } + __syncthreads(); + // lower half merges + if (threadIdx.y < offset) { + const int read_i = threadIdx.y * blockDim.x + threadIdx.x; + sum_loss1 += buf[2*read_i]; + sum_loss2 += buf[2*read_i+1]; + } + __syncthreads(); + } + if (threadIdx.y == 0) { + buf[2*threadIdx.x] = sum_loss1; + buf[2*threadIdx.x+1] = sum_loss2; + } + __syncthreads(); + if (threadIdx.y !=0) { + sum_loss1 = buf[2*threadIdx.x]; + sum_loss2 = buf[2*threadIdx.x+1]; + } + } + // all threads now have the two sums over l + U fH = (U)n2; + U term1 = (U(1) / fH) * c_invvar; + T* k_grad_input = grad_input + i1*n2; + if (gamma != NULL) { + for (int l = thrx; l < n2; l+=numx) { + const U c_h = static_cast(k_input[l]); + const U c_loss = static_cast(k_dout[l]); + U f_grad_input = fH * c_loss * gamma[l]; + f_grad_input -= sum_loss1; + f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; + f_grad_input *= term1; + k_grad_input[l] = static_cast(f_grad_input); + } + } else { + for (int l = thrx; l < n2; l+=numx) { + const U c_h = static_cast(k_input[l]); + const U c_loss = static_cast(k_dout[l]); + U f_grad_input = fH * c_loss; + f_grad_input -= sum_loss1; + f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; + f_grad_input *= term1; + k_grad_input[l] = static_cast(f_grad_input); + } + } + // prevent race where buf is written again before reads are done + __syncthreads(); + } +} + + + + +template +void HostApplyLayerNorm( + V* output, + U* mean, + U* invvar, + const T* input, + int n1, + int n2, + double epsilon, + const V* gamma, + const V* beta + ) +{ + auto stream = at::cuda::getCurrentCUDAStream().stream(); + const dim3 threads(32,4,1); + const uint64_t maxGridY = + at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; + const dim3 blocks(1, std::min((uint64_t)n1, maxGridY), 1); + int nshared = + threads.y > 1 ? + threads.y*sizeof(U)+(threads.y/2)*sizeof(U) : + 0; + cuApplyLayerNorm<<>>( + output, + mean, + invvar, + input, + n1,n2, + U(epsilon), + gamma,beta); +} + + +void cuda_layer_norm( + at::Tensor* output, + at::Tensor* mean, + at::Tensor* invvar, + at::Tensor* input, + int n1, + int n2, + #ifdef VERSION_GE_1_1 + at::IntArrayRef normalized_shape, + #else + at::IntList normalized_shape, + #endif + at::Tensor* gamma, + at::Tensor* beta, + double epsilon) +{ + using namespace at; + DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES( + input->scalar_type(), output->scalar_type(), "cuda_layer_norm_kernel", + HostApplyLayerNorm( + output->DATA_PTR(), + mean->DATA_PTR(), + invvar->DATA_PTR(), + input->DATA_PTR(), + n1,n2, + epsilon, + gamma != NULL ? gamma->DATA_PTR() : NULL, + beta != NULL ? beta->DATA_PTR() : NULL); + ) +} + + +template +void HostLayerNormGradient( + const V* dout, + const U* mean, + const U* invvar, + at::Tensor* input, + int n1, + int n2, + const V* gamma, + const V* beta, + double epsilon, + T* grad_input, + V* grad_gamma, + V* grad_beta + ) +{ + auto stream = at::cuda::getCurrentCUDAStream().stream(); + + if (gamma != NULL && beta != NULL) { + // compute grad_gamma(j) and grad_beta(j) + const int part_size = 16; + const dim3 threads2(32,4,1); + const dim3 blocks2((n2+threads2.x-1)/threads2.x,part_size,1); + const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * + (threads2.x + 1); + const int nshared2_b = threads2.x * threads2.y * sizeof(U); + const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; + at::Tensor part_grad_gamma = at::empty( + {part_size,n2}, input->options().dtype(at::ScalarType::Float)); + at::Tensor part_grad_beta = at::empty_like(part_grad_gamma); + cuComputePartGradGammaBeta<<>>( + dout, + input->DATA_PTR(), + n1,n2, + mean, + invvar, + U(epsilon), + part_grad_gamma.DATA_PTR(), + part_grad_beta.DATA_PTR()); + + const dim3 threads3(32,8,1); + const dim3 blocks3((n2+threads2.x-1)/threads2.x,1,1); + const int nshared3 = threads3.x * threads3.y * sizeof(U); + cuComputeGradGammaBeta<<>>( + part_grad_gamma.DATA_PTR(), + part_grad_beta.DATA_PTR(), + part_size, + n1,n2, + grad_gamma, + grad_beta); + } + + // compute grad_input + const uint64_t maxGridY = + at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; + const dim3 blocks1(1, std::min((uint64_t)n1, maxGridY), 1); + const dim3 threads1(32,4,1); + int nshared = + threads1.y > 1 ? + threads1.y*threads1.x*sizeof(U) : + 0; + cuComputeGradInput<<>>( + dout, + input->DATA_PTR(), + n1,n2, + mean, + invvar, + U(epsilon), + gamma, + grad_input); +} + + +void cuda_layer_norm_gradient( + at::Tensor* dout, + at::Tensor* mean, + at::Tensor* invvar, + at::Tensor* input, + int n1, + int n2, + #ifdef VERSION_GE_1_1 + at::IntArrayRef normalized_shape, + #else + at::IntList normalized_shape, + #endif + at::Tensor* gamma, + at::Tensor* beta, + double epsilon, + at::Tensor* grad_input, + at::Tensor* grad_gamma, + at::Tensor* grad_beta) +{ + using namespace at; + DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES( + input->scalar_type(), gamma->scalar_type(), + "cuda_layer_norm_gradient_kernel", + HostLayerNormGradient( + dout->DATA_PTR(), + mean->DATA_PTR(), + invvar->DATA_PTR(), + input, + n1,n2, + // TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta + // if gamma Tensor is NULL on input. + gamma != NULL ? gamma->DATA_PTR() : NULL, + gamma != NULL ? beta->DATA_PTR() : NULL, + epsilon, + grad_input->DATA_PTR(), + gamma != NULL ? grad_gamma->DATA_PTR() : NULL, + gamma != NULL ? grad_beta->DATA_PTR() : NULL); + ) +} diff --git a/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax.cpp b/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4c8a8c2ee39bcc0f9d04b23b2bf19032d8327e44 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax.cpp @@ -0,0 +1,83 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_masked_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + torch::Tensor const& mask, + float scale_factor); + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor); + +int get_batch_per_block_cuda( + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads); + +torch::Tensor fwd( + torch::Tensor const& input, + torch::Tensor const& mask, + float scale_factor) { + AT_ASSERTM(input.dim() == 4, "expected 4D tensor"); + AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) || + (input.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM(mask.dim() == 4, "expected 4D tensor"); + + return fwd_cuda(input, mask, scale_factor); +} + +torch::Tensor bwd( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor) { + + AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor"); + AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor"); + + AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) || + (output_grads.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) || + (softmax_results.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return bwd_cuda(output_grads, softmax_results, scale_factor); +} + +int get_batch_per_block( + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads) { + return get_batch_per_block_cuda(query_seq_len, key_seq_len, batches, attn_heads); +} + +} // end namespace scaled_masked_softmax +} // end namespace fused_softmax +} // end namespace multihead_attn + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", + &multihead_attn::fused_softmax::scaled_masked_softmax::fwd, + "Self Multihead Attention scaled, time masked softmax -- Forward."); + + m.def("backward", + &multihead_attn::fused_softmax::scaled_masked_softmax::bwd, + "Self Multihead Attention scaled, time masked softmax -- Backward."); + + m.def("get_batch_per_block", + &multihead_attn::fused_softmax::scaled_masked_softmax::get_batch_per_block, + "Return Batch per block size." + ); +} diff --git a/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax.h b/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..21ebbd52284203a64f6c7acab82e36fdb6cd7f6f --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax.h @@ -0,0 +1,710 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace { + +template +__device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src); + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); } + +template <> +__device__ __inline__ void copy_vector(c10::Half *dst, const c10::Half *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); } + +template <> +__device__ __inline__ void copy_vector(uint8_t *dst, const uint8_t *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); } + +int log2_ceil(int value) { + int log2_value = 0; + while ((1 << log2_value) < value) ++log2_value; + return log2_value; +} + +template +struct Add { + __device__ __forceinline__ T operator()(T a, T b) const { + return a + b; + } +}; + +template +struct Max { + __device__ __forceinline__ T operator()(T a, T b) const { + return a < b ? b : a; + } +}; + +template +__device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template class ReduceOp> +__device__ __forceinline__ void warp_reduce(acc_t* sum) { + ReduceOp r; + #pragma unroll + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE); + sum[i] = r(sum[i], b); + } + } +} + + +/* + * Extended softmax (from native aten pytorch) with following additional features + * 1) input scaling + */ +template +__global__ void scaled_softmax_warp_forward( + output_t *dst, + const input_t *src, + const acc_t scale, + int micro_batch_size, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) + // gridDim/blockIdx = (seq_len, attn_heads, batches) + int first_batch = (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z))+ threadIdx.y) * WARP_BATCH; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + input_t temp_data[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < batch_element_count) { + int itr_idx = i*element_count+it*WARP_SIZE; + copy_vector(temp_data, src + itr_idx); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = (acc_t)temp_data[element] * scale; + } + } else { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + acc_t sum[WARP_BATCH] { 0.0f }; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + elements[i][it] = std::exp((elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + warp_reduce(sum); + + // store result + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = elements[i][it + element] / sum[i]; + } + copy_vector(dst + i * element_count + it * WARP_SIZE, out); + } else { + break; + } + } + } +} + + +/* + * Extended softmax (from native aten pytorch) with following additional features + * 1) input scaling + * 2) Explicit masking + */ +template +__global__ void scaled_masked_softmax_warp_forward( + output_t *dst, + const input_t *src, + const uint8_t *mask, + const acc_t scale, + int micro_batch_size, + int element_count, + int pad_batches) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) + // gridDim/blockIdx = (seq_len, attn_heads, batches) + int first_batch = (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z))+ threadIdx.y) * WARP_BATCH; + int pad_first_batch = 0; + if (pad_batches != 1) { // bert style + pad_first_batch = (blockDim.y * (blockIdx.x + gridDim.x * blockIdx.z) + threadIdx.y) * WARP_BATCH; + } else { // gpt2 style + pad_first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + } + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + mask += pad_first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + input_t temp_data[ELEMENTS_PER_LDG_STG]; + uint8_t temp_mask[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < batch_element_count) { + int itr_idx = i*element_count+it*WARP_SIZE; + copy_vector(temp_data, src + itr_idx); + copy_vector(temp_mask, mask + itr_idx); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (temp_mask[element] != 1) { + elements[i][it + element] = (acc_t)temp_data[element] * scale; + } else { + elements[i][it + element] = -10000.0; + } + } + } else { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + // compute scale value to account for full mask + acc_t scale_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + scale_value[i] = (max_value[i] == -10000.0) ? 0.0 : 1.0; + } + + acc_t sum[WARP_BATCH] { 0.0f }; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + elements[i][it] = std::exp((elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + warp_reduce(sum); + + // store result + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = elements[i][it + element] * scale_value[i] / sum[i]; + } + copy_vector(dst + i * element_count + it * WARP_SIZE, out); + } else { + break; + } + } + } +} + +template +__global__ void scaled_masked_softmax_warp_backward( + output_t *gradInput, + input_t *grad, + const input_t *output, + acc_t scale, + int micro_batch_size, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_backward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) + // gridDim/blockIdx = (seq_len, attn_heads, batches) + int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + // the first element to process by the current thread + int thread_offset = first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + grad += thread_offset; + output += thread_offset; + gradInput += thread_offset; + + // load data from global memory + acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; + acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; + input_t temp_grad[ELEMENTS_PER_LDG_STG]; + input_t temp_output[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + copy_vector(temp_grad, grad + i * element_count + it * WARP_SIZE); + copy_vector(temp_output, output + i * element_count + it * WARP_SIZE); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + output_reg[i][it + element] = (acc_t)temp_output[element]; + } + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element]; + } + } + } + } + + acc_t sum[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + sum[i] = grad_reg[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + sum[i] += grad_reg[i][it]; + } + } + warp_reduce(sum); + + // store result + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + // compute gradients + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i])); + } + copy_vector(gradInput + i * element_count + it * WARP_SIZE, out); + } + } + } +} +} // end of anonymous namespace + +int get_batch_per_block(int query_seq_len, int key_seq_len, int batches, int attn_heads){ + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + constexpr int threads_per_block = 128; + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + + return batches_per_block; +} + +template +void dispatch_scaled_softmax_forward( + output_t *dst, + const input_t *src, + const input_t scale, + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads) +{ + TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 4096 ); + if (key_seq_len == 0) { + return; + } else { + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + int batch_count = batches * attn_heads * query_seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + TORCH_INTERNAL_ASSERT(query_seq_len%batches_per_block == 0); + dim3 blocks(query_seq_len/batches_per_block, attn_heads, batches); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 1: // 2 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 2: // 4 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 3: // 8 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 4: // 16 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 5: // 32 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 6: // 64 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 7: // 128 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 8: // 256 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 9: // 512 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 10: // 1024 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 11: // 2048 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 12: // 4096 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + default: + break; + } + } +} + +template +void dispatch_scaled_masked_softmax_forward( + output_t *dst, + const input_t *src, + const uint8_t *mask, + const input_t scale, + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads, + int pad_batches) +{ + TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 4096 ); + if (key_seq_len == 0) { + return; + } else { + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + int batch_count = batches * attn_heads * query_seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + TORCH_INTERNAL_ASSERT(query_seq_len%batches_per_block == 0); + dim3 blocks(query_seq_len/batches_per_block, attn_heads, batches); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 1: // 2 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 2: // 4 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 3: // 8 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 4: // 16 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 5: // 32 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 6: // 64 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 7: // 128 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 8: // 256 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 9: // 512 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 10: // 1024 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 11: // 2048 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 12: // 4096 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + default: + break; + } + } +} + +template +void dispatch_scaled_masked_softmax_backward( + output_t *grad_input, + input_t *grad, + const input_t *output, + const acc_t scale, + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads) +{ + TORCH_INTERNAL_ASSERT( key_seq_len >= 0 && key_seq_len <= 4096 ); + if (key_seq_len == 0) { + return; + } else { + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + int batch_count = batches * attn_heads * query_seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + int blocks = batch_count/batches_per_block; + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 1: // 2 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 2: // 4 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 3: // 8 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 4: // 16 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 5: // 32 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 6: // 64 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 7: // 128 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 8: // 256 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 9: // 512 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 10: // 1024 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 11: // 2048 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 12: // 4096 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + + default: + break; + } + } +} diff --git a/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax_cuda.cu b/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..a8be57c0525f2693245b2f46c2a07e00c6e0dd67 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/scaled_masked_softmax_cuda.cu @@ -0,0 +1,107 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include "scaled_masked_softmax.h" +#include "type_shim.h" + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_masked_softmax { + +int get_batch_per_block_cuda(int query_seq_len, int key_seq_len, int batches, int attn_heads){ + return get_batch_per_block(query_seq_len, key_seq_len, batches, attn_heads); +} + + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + torch::Tensor const& mask, + float scale_factor) +{ + // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = input.size(0); + const int pad_batches = mask.size(0); + const int attn_heads = input.size(1); + const int query_seq_len = input.size(2); + const int key_seq_len = input.size(3); + TORCH_INTERNAL_ASSERT(key_seq_len <= 4096); + TORCH_INTERNAL_ASSERT(query_seq_len > 1); + TORCH_INTERNAL_ASSERT(pad_batches == 1 || pad_batches == batches); + TORCH_INTERNAL_ASSERT(mask.size(1) == 1); + TORCH_INTERNAL_ASSERT(mask.size(2) == query_seq_len); + TORCH_INTERNAL_ASSERT(mask.size(3) == key_seq_len); + + // Output + auto act_options = input.options().requires_grad(false); + torch::Tensor softmax_results = + torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); + + // Softmax Intermediate Result Ptr + void* input_ptr = static_cast(input.data_ptr()); + void* mask_ptr = static_cast(mask.data_ptr()); + void* softmax_results_ptr = static_cast(softmax_results.data_ptr()); + + DISPATCH_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch_scaled_masked_softmax_forward", + dispatch_scaled_masked_softmax_forward( + reinterpret_cast(softmax_results_ptr), + reinterpret_cast(input_ptr), + reinterpret_cast(mask_ptr), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads, + pad_batches); + ); + return softmax_results; +} + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads_, + torch::Tensor const& softmax_results_, + float scale_factor) { + + auto output_grads = output_grads_.contiguous(); + auto softmax_results = softmax_results_.contiguous(); + + //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = output_grads.size(0); + const int attn_heads = output_grads.size(1); + const int query_seq_len = output_grads.size(2); + const int key_seq_len = output_grads.size(3); + + auto act_options = output_grads.options().requires_grad(false); + torch::Tensor input_grads = + torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); + + void* output_grads_ptr = static_cast(output_grads.data_ptr()); + void* input_grads_ptr = static_cast(input_grads.data_ptr()); + + //Softmax Grad + DISPATCH_HALF_AND_BFLOAT( + output_grads_.scalar_type(), + "dispatch_scaled_masked_softmax_backward", + dispatch_scaled_masked_softmax_backward( + reinterpret_cast(input_grads_ptr), + reinterpret_cast(output_grads_ptr), + reinterpret_cast(softmax_results.data_ptr()), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads); + ); + + return input_grads; +} +} +} +} diff --git a/multilinguality_megatron/megatron/fused_kernels/scaled_softmax.cpp b/multilinguality_megatron/megatron/fused_kernels/scaled_softmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e10cd77e7fb35247fac2b547d8bccc18c3dca58e --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/scaled_softmax.cpp @@ -0,0 +1,61 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + float scale_factor); + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor); + +torch::Tensor fwd( + torch::Tensor const& input, + float scale_factor) { + AT_ASSERTM(input.dim() == 4, "expected 4D tensor"); + AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) || + (input.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return fwd_cuda(input, scale_factor); +} + +torch::Tensor bwd( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor) { + + AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor"); + AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor"); + + AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) || + (output_grads.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) || + (softmax_results.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return bwd_cuda(output_grads, softmax_results, scale_factor); +} + +} // end namespace scaled_softmax +} // end namespace fused_softmax +} // end namespace multihead_attn + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", + &multihead_attn::fused_softmax::scaled_softmax::fwd, + "Self Multihead Attention scaled, softmax -- Forward."); + m.def("backward", + &multihead_attn::fused_softmax::scaled_softmax::bwd, + "Self Multihead Attention scaled, softmax -- Backward."); +} + diff --git a/multilinguality_megatron/megatron/fused_kernels/scaled_softmax_cuda.cu b/multilinguality_megatron/megatron/fused_kernels/scaled_softmax_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..ecc6eb06e83e5fee38707c1ccd4bc362b0d1df49 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/scaled_softmax_cuda.cu @@ -0,0 +1,90 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include "scaled_masked_softmax.h" +#include "type_shim.h" + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + float scale_factor) +{ + // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = input.size(0); + const int attn_heads = input.size(1); + const int query_seq_len = input.size(2); + const int key_seq_len = input.size(3); + TORCH_INTERNAL_ASSERT(key_seq_len <= 4096); + TORCH_INTERNAL_ASSERT(query_seq_len > 1); + + // Output + auto act_options = input.options().requires_grad(false); + torch::Tensor softmax_results = + torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); + + // Softmax Intermediate Result Ptr + void* input_ptr = static_cast(input.data_ptr()); + void* softmax_results_ptr = static_cast(softmax_results.data_ptr()); + + DISPATCH_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch_scaled_softmax_forward", + dispatch_scaled_softmax_forward( + reinterpret_cast(softmax_results_ptr), + reinterpret_cast(input_ptr), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads); + ); + return softmax_results; +} + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads_, + torch::Tensor const& softmax_results_, + float scale_factor) { + + auto output_grads = output_grads_.contiguous(); + auto softmax_results = softmax_results_.contiguous(); + + //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = output_grads.size(0); + const int attn_heads = output_grads.size(1); + const int query_seq_len = output_grads.size(2); + const int key_seq_len = output_grads.size(3); + + void* output_grads_ptr = static_cast(output_grads.data_ptr()); + + //Softmax Grad + DISPATCH_HALF_AND_BFLOAT( + output_grads_.scalar_type(), + "dispatch_scaled_masked_softmax_backward", + dispatch_scaled_masked_softmax_backward( + reinterpret_cast(output_grads_ptr), + reinterpret_cast(output_grads_ptr), + reinterpret_cast(softmax_results.data_ptr()), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads); + ); + + //backward pass is completely in-place + return output_grads; +} +} +} +} + diff --git a/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp b/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ddfc8646a3dd109b31d633f109079e2c1af98e9d --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp @@ -0,0 +1,58 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_upper_triang_masked_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + float scale_factor); + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor); + +torch::Tensor fwd(torch::Tensor const& input, float scale_factor) { + AT_ASSERTM(input.dim() == 3, "expected 3D tensor"); + AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) || + (input.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return fwd_cuda(input, scale_factor); +} + +torch::Tensor bwd( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor) { + + AT_ASSERTM(output_grads.dim() == 3, "expected 3D tensor"); + AT_ASSERTM(softmax_results.dim() == 3, "expected 3D tensor"); + + AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) || + (output_grads.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) || + (softmax_results.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return bwd_cuda(output_grads, softmax_results, scale_factor); +} + +} // end namespace scaled_upper_triang_masked_softmax +} // end namespace fused_softmax +} // end namespace multihead_attn + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", + &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::fwd, + "Self Multihead Attention scaled, time masked softmax -- Forward."); + m.def("backward", + &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::bwd, + "Self Multihead Attention scaled, time masked softmax -- Backward."); +} diff --git a/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h b/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..98aaf884c9ed99b8c75f6179ef73156699d644c4 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h @@ -0,0 +1,499 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace { + +template +__device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src); + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); } + +template <> +__device__ __inline__ void copy_vector(c10::Half *dst, const c10::Half *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); } + +template <> +__device__ __inline__ void copy_vector(uint8_t *dst, const uint8_t *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); } + +template +__device__ __inline__ void copy_zero_vector(Datatype *dst); + +template <> +__device__ __inline__ void copy_zero_vector(c10::BFloat16 *dst) { *dst = 0.0; } + +template <> +__device__ __inline__ void copy_zero_vector(c10::BFloat16 *dst) { *((float2*) dst) = make_float2(0.0f, 0.0f); } + +template <> +__device__ __inline__ void copy_zero_vector(c10::Half *dst) { *dst = 0.0; } + +template <> +__device__ __inline__ void copy_zero_vector(c10::Half *dst) { *((float2*) dst) = make_float2(0.0f, 0.0f); } + + +int log2_ceil(int value) { + int log2_value = 0; + while ((1 << log2_value) < value) ++log2_value; + return log2_value; +} + +template +struct Add { + __device__ __forceinline__ T operator()(T a, T b) const { + return a + b; + } +}; + +template +struct Max { + __device__ __forceinline__ T operator()(T a, T b) const { + return a < b ? b : a; + } +}; + +template +__device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template class ReduceOp> +__device__ __forceinline__ void warp_reduce(acc_t* sum) { + ReduceOp r; + #pragma unroll + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE); + sum[i] = r(sum[i], b); + } + } +} + +/* + * Extended softmax (from native aten pytorch) with following additional features + * 1) input scaling + * 2) Implicit time (diagonal masking) + */ +template +__global__ void scaled_upper_triang_masked_softmax_warp_forward( + output_t *dst, + const input_t *src, + const acc_t scale, + int micro_batch_size, + int stride, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x; + int local_seq = blockIdx.x + 1; + int warp_iteration_limit = (local_seq + ELEMENTS_PER_LDG_STG * WARP_SIZE - 1)/ WARP_SIZE; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + src += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + dst += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + input_t temp_data[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : local_seq; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < batch_element_count) { + copy_vector(temp_data, src + i*element_count*stride + it*WARP_SIZE); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if ((element_index + element) < batch_element_count) { + elements[i][it+element] = (acc_t)temp_data[element] * scale; + } else { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } else { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + acc_t sum[WARP_BATCH] { 0.0f }; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + if (it < warp_iteration_limit) { + elements[i][it] = std::exp((elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + } + warp_reduce(sum); + + // store result + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < local_seq) { + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < local_seq) { + out[element] = elements[i][it + element] / sum[i]; + } else { + out[element] = 0; + } + } + copy_vector(dst + i * element_count * stride + it * WARP_SIZE, out); + } else if (element_index < element_count) { + copy_zero_vector(dst + i * element_count * stride + it * WARP_SIZE); + } else { + break; + } + } + } +} + +template +__global__ void scaled_upper_triang_masked_softmax_warp_backward( + output_t *gradInput, + input_t *grad, + const input_t *output, + acc_t scale, + int micro_batch_size, + int stride, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_backward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x; + int local_seq = blockIdx.x + 1; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + // the first element to process by the current thread + int thread_offset = first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + grad += thread_offset; + output += thread_offset; + gradInput += thread_offset; + + // load data from global memory + acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; + acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; + input_t temp_grad[ELEMENTS_PER_LDG_STG]; + input_t temp_output[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : local_seq; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + copy_vector(temp_grad, grad + i * element_count * stride + it * WARP_SIZE); + copy_vector(temp_output, output + i * element_count * stride + it * WARP_SIZE); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < batch_element_count) { + output_reg[i][it + element] = (acc_t)temp_output[element]; + } + } + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < batch_element_count) { + grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element]; + } + } + } + } + } + + acc_t sum[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + sum[i] = grad_reg[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + sum[i] += grad_reg[i][it]; + } + } + warp_reduce(sum); + + // store result + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + // compute gradients + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i])); + } + copy_vector(gradInput + i * element_count * stride + it * WARP_SIZE, out); + } + } + } +} + +} // end of anonymous namespace + +template +void dispatch_scaled_upper_triang_masked_softmax_forward( + output_t *dst, + const input_t *src, + const input_t scale, + int softmax_elements, + int softmax_elements_stride, + int attn_batches) +{ + TORCH_INTERNAL_ASSERT(softmax_elements >= 0 && softmax_elements <= 2048 ); + if (softmax_elements == 0) { + return; + } else { + int log2_elements = log2_ceil(softmax_elements); + const int next_power_of_two = 1 << log2_elements; + int seq_len = softmax_elements; + int batch_count = attn_batches * seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0); + + int blocks_per_seq = attn_batches / batches_per_block; + dim3 blocks(seq_len, blocks_per_seq, 1); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 1: // 2 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 2: // 4 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 3: // 8 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 4: // 16 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 5: // 32 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 6: // 64 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 7: // 128 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 8: // 256 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 9: // 512 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 10: // 1024 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 11: // 2048 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + default: + break; + } + } +} + +template +void dispatch_scaled_upper_triang_masked_softmax_backward( + output_t *grad_input, + input_t *grad, + const input_t *output, + const acc_t scale, + int softmax_elements, + int softmax_elements_stride, + int attn_batches) +{ + TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 2048 ); + if (softmax_elements == 0) { + return; + } else { + int log2_elements = log2_ceil(softmax_elements); + const int next_power_of_two = 1 << log2_elements; + int seq_len = softmax_elements; + int batch_count = attn_batches * seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0); + + int blocks_per_seq = attn_batches / batches_per_block; + dim3 blocks(seq_len, blocks_per_seq, 1); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 1: // 2 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 2: // 4 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 3: // 8 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 4: // 16 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 5: // 32 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 6: // 64 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 7: // 128 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 8: // 256 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 9: // 512 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 10: // 1024 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 11: // 2048 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + default: + break; + } + } +} diff --git a/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu b/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..c21e5fb4ee181ee60bbdc468e3b6bea832ac1d24 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu @@ -0,0 +1,84 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include "scaled_upper_triang_masked_softmax.h" +#include "type_shim.h" + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_upper_triang_masked_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + float scale_factor) +{ + // input is a 3d tensor with dimensions [attn_batches, seq_len, seq_len] + const int attn_batches = input.size(0); + const int seq_len = input.size(1); + TORCH_INTERNAL_ASSERT(seq_len <= 2048); + + // Output + auto act_options = input.options().requires_grad(false); + torch::Tensor softmax_results = + torch::empty({attn_batches, seq_len, seq_len}, act_options); + + // Softmax Intermediate Result Ptr + void* input_ptr = static_cast(input.data_ptr()); + void* softmax_results_ptr = static_cast(softmax_results.data_ptr()); + + DISPATCH_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch_scaled_upper_triang_masked_softmax_forward", + dispatch_scaled_upper_triang_masked_softmax_forward( + reinterpret_cast(softmax_results_ptr), + reinterpret_cast(input_ptr), + scale_factor, + seq_len, + seq_len, + attn_batches); + ); + return softmax_results; +} + + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads_, + torch::Tensor const& softmax_results_, + float scale_factor) { + + auto output_grads = output_grads_.contiguous(); + auto softmax_results = softmax_results_.contiguous(); + + //output grads is a 3d tensor with dimensions [attn_batches, seq_len, seq_len] + const int attn_batches = output_grads.size(0); + const int seq_len = output_grads.size(1); + TORCH_INTERNAL_ASSERT(output_grads.size(1) == output_grads.size(2)); + + void* output_grads_ptr = static_cast(output_grads.data_ptr()); + + //Softmax Grad + DISPATCH_HALF_AND_BFLOAT( + output_grads_.scalar_type(), + "dispatch_scaled_upper_triang_masked_softmax_backward", + dispatch_scaled_upper_triang_masked_softmax_backward( + reinterpret_cast(output_grads_ptr), + reinterpret_cast(output_grads_ptr), + reinterpret_cast(softmax_results.data_ptr()), + scale_factor, + seq_len, + seq_len, + attn_batches); + ); + + //backward pass is completely in-place + return output_grads; +} +} +} +} diff --git a/multilinguality_megatron/megatron/fused_kernels/tests/__init__.py b/multilinguality_megatron/megatron/fused_kernels/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/multilinguality_megatron/megatron/fused_kernels/tests/test_fused_kernels.py b/multilinguality_megatron/megatron/fused_kernels/tests/test_fused_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..938330086834e92c29c99f4a2cbd80fefd72cb2c --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/tests/test_fused_kernels.py @@ -0,0 +1,390 @@ +import math + +import torch +from torch.nn import LayerNorm + +from megatron.model.enums import AttnMaskType +from megatron.model.fused_layer_norm import MixedFusedLayerNorm +from megatron.model.fused_softmax import FusedScaleMaskSoftmax +from megatron.model.utils import attention_mask_func +from megatron.fused_kernels import load + + +def test_load_fused_kernels(): + try: + import fused_mix_prec_layer_norm_cuda + import scaled_masked_softmax_cuda + import scaled_upper_triang_masked_softmax_cuda + import torch + + print("[Success] load_fused_kernels") + except ImportError as e: + print("[Fail] load_fused_kernels") + raise e + + +def test_fused_softmax(): + bert = BertModel.from_pretrained("bert-base-cased").cuda().half() + tokenizer = BertTokenizer.from_pretrained("bert-base-cased") + test_text = ( + "Hello. How are you? I am fine thank you and you? yes Good. " + "hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32 + ) + + tokens = tokenizer( + [test_text] * 4, + return_tensors="pt", + ) + + embedding_output = bert.embeddings( + input_ids=tokens["input_ids"].cuda(), + position_ids=None, + token_type_ids=tokens["token_type_ids"].cuda(), + inputs_embeds=None, + past_key_values_length=0, + ) + + # (bsz, 1, 1, seq_len) + mask = bert.get_extended_attention_mask( + attention_mask=tokens["attention_mask"].cuda(), + input_shape=tokens["input_ids"].shape, + device=bert.device, + ) + # (bsz, 1, seq_len, seq_len) + mask = mask.repeat(1, 1, mask.size()[-1], 1) + + attention = bert.encoder.layer[0].attention.self + key_layer = attention.transpose_for_scores(attention.key(embedding_output)) + query_layer = attention.transpose_for_scores(attention.query(embedding_output)) + + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores /= math.sqrt(key_layer.size()[-1]) + + fused_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + attn_mask_type=AttnMaskType.padding, + scaled_masked_softmax_fusion=True, + ) + .cuda() + .half() + ) + + fused_softmax_output = fused_softmax( + attention_scores, + (mask != 0), + ) + + torch_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + attn_mask_type=AttnMaskType.padding, + scaled_masked_softmax_fusion=False, + ) + .cuda() + .half() + ) + + torch_softmax_output = torch_softmax( + attention_scores, + (mask != 0), + ) + + test_result = (fused_softmax_output - torch_softmax_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_fused_softmax" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}" + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + else: + print( + f"\n[Fail] test_fused_softmax" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, " + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + + +def test_fused_upper_triangle_mask_softmax(): + gpt = GPT2Model.from_pretrained("gpt2").cuda().half() + tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + test_text = ( + "Hello. How are you? I am fine thank you and you? yes Good. " + "hi hi hi hi hi hi hi" # 24 + ) + + tokens = tokenizer( + [test_text] * 4, + return_tensors="pt", + ) + + attention_mask = tokens["attention_mask"].cuda() + attention_mask = attention_mask.view(attention_mask.size(0), -1) + attention_mask = attention_mask[:, None, None, :] + attention_mask = (1.0 - attention_mask) * -10000.0 + attention_mask = attention_mask.repeat(1, 1, attention_mask.size()[-1], 1) + attn = gpt.h[0] + + hidden_states = gpt.wte(tokens["input_ids"].cuda()) + q, k, v = attn.attn.c_attn(hidden_states).split(768, dim=-1) + q = attn.attn._split_heads(q, attn.attn.num_heads, attn.attn.head_dim) + k = attn.attn._split_heads(k, attn.attn.num_heads, attn.attn.head_dim) + attn_weights = torch.matmul(q, k.transpose(-1, -2)) + + sq, sk = q.size(-2), k.size(-2) + causal_mask = attn.attn.bias[:, :, sk - sq : sk, :sk].bool() + total_mask = ~(causal_mask & (attention_mask == 0)) + """ + tensor([[[[False, True, True, ..., True, True, True], + [False, False, True, ..., True, True, True], + [False, False, False, ..., True, True, True], + ..., + [False, False, False, ..., False, True, True], + [False, False, False, ..., False, False, True], + [False, False, False, ..., False, False, False]]] + """ + + fused_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + attn_mask_type=AttnMaskType.causal, + scaled_masked_softmax_fusion=True, + ) + .cuda() + .half() + ) + + fused_softmax_output = fused_softmax( + attn_weights, + total_mask, + ) + + torch_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + attn_mask_type=AttnMaskType.causal, + scaled_masked_softmax_fusion=False, + ) + .cuda() + .half() + ) + + torch_softmax_output = torch_softmax( + attn_weights, + total_mask, + ) + + test_result = (fused_softmax_output - torch_softmax_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_fused_upper_triangle_mask_softmax" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}" + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + else: + print( + f"\n[Fail] test_fused_upper_triangle_mask_softmax" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, " + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + + +def test_layer_norm(): + bert = BertModel.from_pretrained("bert-base-cased").cuda().half() + tokenizer = BertTokenizer.from_pretrained("bert-base-cased") + test_text = ( + "Hello. How are you? I am fine thank you and you? yes Good. " + "hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32 + ) + + tokens = tokenizer( + [test_text] * 4, + return_tensors="pt", + ) + + # [bsz, seq_len, d_model] + embedding_output = ( + bert.embeddings( + input_ids=tokens["input_ids"].cuda(), + position_ids=None, + token_type_ids=tokens["token_type_ids"].cuda(), + inputs_embeds=None, + past_key_values_length=0, + ) + .cuda() + .half() + ) + + fused_layernorm_layer = ( + MixedFusedLayerNorm(normalized_shape=embedding_output.size(-1)).cuda().half() + ) + + torch_layernorm_layer = ( + LayerNorm(normalized_shape=embedding_output.size(-1)).cuda().half() + ) + + fused_output = fused_layernorm_layer(embedding_output) + torch_output = torch_layernorm_layer(embedding_output) + test_result = (fused_output - torch_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_layer_norm" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_output[-1][-1][:5].tolist()}" + f"\n > torch_values={torch_output[-1][-1][:5].tolist()}" + ) + else: + print( + f"\n[Fail] test_layer_norm" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_output[-1][-1][:5].tolist()}, " + f"\n > torch_values={torch_output[-1][-1][:5].tolist()}" + ) + + +def attention_mask_func(attention_scores, attention_mask): + attention_scores.masked_fill_(attention_mask, -10000.0) + return attention_scores + + +def forward_torch_softmax(input, mask, scale): + input = input * scale + mask_output = attention_mask_func(input, mask) if mask is not None else input + probs = torch.nn.Softmax(dim=-1)(mask_output) + return probs + + +def test_masked_softmax_forward(): + import scaled_masked_softmax_cuda + + batch = 2 + attn = 16 + scale_t = torch.tensor([1.0]) + for qlen in [128, 256, 1024, 2048, 4096]: + for klen in [128, 256, 1024, 2048]: + inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0') + masks = torch.randint(0, 2, (batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0') + softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item()) + softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item()) + error = (softmax_results_torch - softmax_results).abs().max() + assert error < 1e-3 + +def test_masked_softmax_backward(): + import scaled_masked_softmax_cuda + + batch = 2 + attn = 16 + scale_t = torch.tensor([1.0]) + for qlen in [128, 256, 1024, 2048, 4096]: + for klen in [128, 256, 1024, 2048]: + inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0') + backward = torch.rand_like(inputs, dtype=torch.float16, device='cuda:0') + masks = torch.randint(0, 2, (batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0') + softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item()) + back_grad = scaled_masked_softmax_cuda.backward(backward, softmax_results, scale_t[0].item()) + + inputs.requires_grad = True + softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item()) + softmax_results_torch.backward(backward) + error = (back_grad - inputs.grad).abs().max() + assert error < 1e-3 + + +def test_allmasked_softmax_forward(): + import scaled_masked_softmax_cuda + + batch = 2 + attn = 16 + scale_t = torch.tensor([1.0]) + for qlen in [128, 256, 1024, 2048, 4096]: + for klen in [128, 256, 1024, 2048]: + inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0') + masks = torch.ones((batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0') + softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item()) + softmax_results_torch = torch.zeros_like(inputs) + error = (softmax_results_torch - softmax_results).abs().max() + assert error == 0.0 + + +def test_allmasked_softmax_backward(): + import scaled_masked_softmax_cuda + + batch = 2 + attn = 16 + scale_t = torch.tensor([1.0]) + for qlen in [128, 256, 1024, 2048, 4096]: + for klen in [128, 256, 1024, 2048]: + inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0') + backward = torch.rand_like(inputs, dtype=torch.float16, device='cuda:0') + masks = torch.ones((batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0') + softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item()) + back_grad = scaled_masked_softmax_cuda.backward(backward, softmax_results, scale_t[0].item()) + inputs.requires_grad = True + softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item()) + softmax_results_torch.backward(backward) + error = (back_grad - inputs.grad).abs().max() + assert error < 1e-3 + + +if __name__ == "__main__": + try: + from transformers import BertTokenizer, GPT2Tokenizer + from transformers.models.bert.modeling_bert import BertModel + from transformers.models.gpt2.modeling_gpt2 import GPT2Model + import transformers + + transformers.logging.set_verbosity( + transformers.logging.FATAL, + ) + + except: + print("\n[Fail] Please install `transformers` package to test fused kernels\n") + exit(-1) + + load() + test_masked_softmax_forward() + test_masked_softmax_backward() + test_allmasked_softmax_forward() + test_allmasked_softmax_backward() + test_load_fused_kernels() + test_fused_softmax() + test_fused_upper_triangle_mask_softmax() + test_layer_norm() diff --git a/multilinguality_megatron/megatron/fused_kernels/type_shim.h b/multilinguality_megatron/megatron/fused_kernels/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..d60a6f8c6fb50e241f9ddcc852adec71e963e1b2 --- /dev/null +++ b/multilinguality_megatron/megatron/fused_kernels/type_shim.h @@ -0,0 +1,103 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + + +#include +#include "compat.h" + + +#define DISPATCH_HALF_AND_BFLOAT(TYPE, NAME, ...) \ + switch(TYPE) \ + { \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + + +#define DISPATCH_HALF_BFLOAT_AND_FLOAT(TYPE, NAME, ...) \ + switch(TYPE) \ + { \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: \ + { \ + using scalar_t = float; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + + + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch(TYPEIN) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_in = float; \ + switch(TYPEOUT) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } + diff --git a/multilinguality_megatron/megatron/global_vars.py b/multilinguality_megatron/megatron/global_vars.py new file mode 100644 index 0000000000000000000000000000000000000000..f4c812c2b39da1d758e09ca81986c435fb7f9576 --- /dev/null +++ b/multilinguality_megatron/megatron/global_vars.py @@ -0,0 +1,207 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron global variables.""" + +import os +import sys +from collections import defaultdict + +from megatron import dist_signal_handler +from megatron.tokenizer import build_tokenizer +from .microbatches import build_num_microbatches_calculator +from .timers import Timers + +_GLOBAL_ARGS = None +_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None +_GLOBAL_TOKENIZER = None +_GLOBAL_TENSORBOARD_WRITER = None +_GLOBAL_ADLR_AUTORESUME = None +_GLOBAL_TIMERS = None +_GLOBAL_SIGNAL_HANDLER = None +_GLOBAL_COUNTERS = None + + +def get_args(): + """Return arguments.""" + _ensure_var_is_initialized(_GLOBAL_ARGS, 'args') + return _GLOBAL_ARGS + + +def get_num_microbatches(): + return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get() + + +def get_current_global_batch_size(): + return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size() + + +def update_num_microbatches(consumed_samples, consistency_check=True): + _GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples, + consistency_check) + + +def get_tokenizer(): + """Return tokenizer.""" + _ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer') + return _GLOBAL_TOKENIZER + + +def get_tensorboard_writer(): + """Return our wrapped tensorboard/wandb writer. It can be None so no need + to check if it is initialized.""" + return _GLOBAL_TENSORBOARD_WRITER + + +def get_adlr_autoresume(): + """ADLR autoresume object. It can be None so no need + to check if it is initialized.""" + return _GLOBAL_ADLR_AUTORESUME + + +def get_timers(): + """Return timers.""" + _ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers') + return _GLOBAL_TIMERS + + +def get_counters(): + """Return counters.""" + _ensure_var_is_initialized(_GLOBAL_COUNTERS, 'counters') + return _GLOBAL_COUNTERS + + +def get_signal_handler(): + _ensure_var_is_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler') + return _GLOBAL_SIGNAL_HANDLER + + +def _set_signal_handler(): + global _GLOBAL_SIGNAL_HANDLER + _ensure_var_is_not_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler') + _GLOBAL_SIGNAL_HANDLER = dist_signal_handler.DistributedSignalHandler().__enter__() + + +def _set_args(args): + global _GLOBAL_ARGS + _GLOBAL_ARGS = args + + +def set_global_variables(args): + """Set args, tokenizer, tensorboard_writer, adlr_autoresume, and timers.""" + assert args is not None + _ensure_var_is_not_initialized(_GLOBAL_ARGS, 'args') + _set_args(args) + + _build_num_microbatches_calculator(args) + if args.vocab_file or args.tokenizer_type in ["FalconTokenizer", "LlamaTokenizer", "GPT2BPETokenizer"]: + _ = _build_tokenizer(args) + _set_tensorboard_writer(args) + _set_adlr_autoresume(args) + _set_timers(args) + _set_counters(args) + + if args.exit_signal_handler: + _set_signal_handler() + + +def _build_num_microbatches_calculator(args): + global _GLOBAL_NUM_MICROBATCHES_CALCULATOR + _ensure_var_is_not_initialized(_GLOBAL_NUM_MICROBATCHES_CALCULATOR, + 'num microbatches calculator') + _GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(args) + + +def _build_tokenizer(args): + """Initialize tokenizer.""" + global _GLOBAL_TOKENIZER + _ensure_var_is_not_initialized(_GLOBAL_TOKENIZER, 'tokenizer') + _GLOBAL_TOKENIZER = build_tokenizer(args) + return _GLOBAL_TOKENIZER + + +def rebuild_tokenizer(args): + global _GLOBAL_TOKENIZER + _GLOBAL_TOKENIZER = None + return _build_tokenizer(args) + + +def _set_tensorboard_writer(args): + """Set our wrapped tensorboard/wandb writer.""" + global _GLOBAL_TENSORBOARD_WRITER + _ensure_var_is_not_initialized(_GLOBAL_TENSORBOARD_WRITER, + 'tensorboard writer') + + if getattr(args,"wandb_logger",False): + """ + if this arg is set to True, we check the other wandb relevant arguments and + return a shim which exposes the wandb logging via a tensorboard-y API + """ + if args.rank == (args.world_size - 1): + try: + from megatron.wandb_logger import WandBConfig,WandbTBShim + cfg=WandBConfig.from_args(args) + shim=WandbTBShim(cfg) + print('> setting wandb ...') + _GLOBAL_TENSORBOARD_WRITER=shim + except ModuleNotFoundError: + print('WARNING: WanDB writing requested but is not ' + 'available, ' + 'no WandB logs will be written.', flush=True) + else: + if hasattr(args, 'tensorboard_dir') and \ + args.tensorboard_dir and args.rank == (args.world_size - 1): + try: + from torch.utils.tensorboard import SummaryWriter + print('> setting tensorboard ...') + _GLOBAL_TENSORBOARD_WRITER = SummaryWriter( + log_dir=args.tensorboard_dir, + max_queue=args.tensorboard_queue_size) + except ModuleNotFoundError: + print('WARNING: TensorBoard writing requested but is not ' + 'available (are you using PyTorch 1.1.0 or later?), ' + 'no TensorBoard logs will be written.', flush=True) + + +def _set_adlr_autoresume(args): + """Initialize ADLR autoresume.""" + global _GLOBAL_ADLR_AUTORESUME + _ensure_var_is_not_initialized(_GLOBAL_ADLR_AUTORESUME, 'adlr autoresume') + + if args.adlr_autoresume: + if args.rank == 0: + print('enabling autoresume ...', flush=True) + sys.path.append(os.environ.get('SUBMIT_SCRIPTS', '.')) + try: + from userlib.auto_resume import AutoResume + except BaseException: + print('ADLR autoresume is not available, exiting ...') + sys.exit() + + _GLOBAL_ADLR_AUTORESUME = AutoResume + + +def _set_timers(args): + """Initialize timers.""" + global _GLOBAL_TIMERS + _ensure_var_is_not_initialized(_GLOBAL_TIMERS, 'timers') + _GLOBAL_TIMERS = Timers(args.timing_log_level, args.timing_log_option) + + +def _set_counters(args): + global _GLOBAL_COUNTERS + _ensure_var_is_not_initialized(_GLOBAL_COUNTERS, 'counters') + _GLOBAL_COUNTERS = defaultdict(int) + + +def _ensure_var_is_initialized(var, name): + """Make sure the input variable is not None.""" + assert var is not None, '{} is not initialized.'.format(name) + + +def _ensure_var_is_not_initialized(var, name): + """Make sure the input variable is not None.""" + assert var is None, '{} is already initialized.'.format(name) + + + + diff --git a/multilinguality_megatron/megatron/indexer.py b/multilinguality_megatron/megatron/indexer.py new file mode 100644 index 0000000000000000000000000000000000000000..16df0929c27541182609b86763cebd22a742565e --- /dev/null +++ b/multilinguality_megatron/megatron/indexer.py @@ -0,0 +1,123 @@ +import sys +import time +import torch +import torch.distributed as dist + +from megatron.core import mpu +import megatron.checkpointing +from megatron.data.orqa_wiki_dataset import get_open_retrieval_wiki_dataset +from megatron.data.orqa_wiki_dataset import get_open_retrieval_batch +from megatron.data.biencoder_dataset_utils import get_one_epoch_dataloader +from megatron.data.realm_index import detach, OpenRetreivalDataStore +import megatron.model.biencoder_model + +import megatron.training + + +class IndexBuilder(object): + """ + Object for taking one pass over a dataset and creating a BlockData of its + embeddings + """ + def __init__(self, args): + self.model = None + self.dataloader = None + self.evidence_embedder_obj = None + self.biencoder_shared_query_context_model = args.biencoder_shared_query_context_model + + # need to know whether we're using a REALM checkpoint (args.load) + # or ICT checkpoint + assert not (args.load and args.ict_load) + + self.log_interval = args.indexer_log_interval + self.batch_size = args.indexer_batch_size + + self.load_attributes(args) + self.is_main_builder = mpu.get_data_parallel_rank() == 0 + self.num_total_builders = mpu.get_data_parallel_world_size() + self.iteration = self.total_processed = 0 + + def load_attributes(self, args): + """ + Load the necessary attributes: model, dataloader and empty BlockData + """ + only_context_model = True + if self.biencoder_shared_query_context_model: + only_context_model = False + model_provider_func = megatron.model.biencoder_model.get_model_provider(only_context_model=only_context_model, + biencoder_shared_query_context_model=self.biencoder_shared_query_context_model) + model = megatron.training.get_model(model_provider_func, args=args) + + self.model = megatron.checkpointing.load_biencoder_checkpoint(model, only_context_model=only_context_model) + assert len(self.model) == 1 + self.model[0].eval() + + self.dataset = get_open_retrieval_wiki_dataset() + self.dataloader = iter(get_one_epoch_dataloader(self.dataset, self.batch_size)) + + self.evidence_embedder_obj = OpenRetreivalDataStore( \ + load_from_path=False) + + def track_and_report_progress(self, batch_size): + """ + Utility function for tracking progress + """ + self.iteration += 1 + self.total_processed += batch_size * self.num_total_builders + if self.is_main_builder and self.iteration % self.log_interval == 0: + print('Batch {:10d} | Total {:10d}'.format(self.iteration, + self.total_processed), flush=True) + + def build_and_save_index(self): + """ + Goes through one epoch of the dataloader and adds all data to this + instance's BlockData. + + The copy of BlockData is saved as a shard, which when run in a + distributed setting will be consolidated by the rank 0 process + and saved as a final pickled BlockData. + """ + assert len(self.model) == 1 + unwrapped_model = self.model[0] + + while not hasattr(unwrapped_model, 'embed_text'): + unwrapped_model = unwrapped_model.module + + while True: + try: + # batch also has query_tokens and query_pad_data + row_id, context_tokens, context_mask, context_types, \ + context_pad_mask = get_open_retrieval_batch( \ + self.dataloader) + except (StopIteration, IndexError): + break + + # TODO: can we add with torch.no_grad() to reduce memory usage + # detach, separate fields and add to BlockData + assert context_mask.dtype == torch.bool + context_logits = unwrapped_model.embed_text( + unwrapped_model.context_model, context_tokens, context_mask, + context_types) + + context_logits = detach(context_logits) + row_id = detach(row_id) + + self.evidence_embedder_obj.add_block_data(row_id, context_logits) + self.track_and_report_progress(batch_size=len(row_id)) + + # This process signals to finalize its shard and then synchronize with + # the other processes + self.evidence_embedder_obj.save_shard() + torch.distributed.barrier() + del self.model + + # rank 0 process builds the final copy + if self.is_main_builder: + self.evidence_embedder_obj.merge_shards_and_save() + # make sure that every single piece of data was embedded + assert len(self.evidence_embedder_obj.embed_data) == \ + len(self.dataset) + self.evidence_embedder_obj.clear() + + # complete building the final copy + torch.distributed.barrier() diff --git a/multilinguality_megatron/megatron/initialize.py b/multilinguality_megatron/megatron/initialize.py new file mode 100644 index 0000000000000000000000000000000000000000..f9e572edc75d443b43b9853e0000342728481eca --- /dev/null +++ b/multilinguality_megatron/megatron/initialize.py @@ -0,0 +1,275 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron initialization.""" + +import random +import time + +import numpy as np +import torch +from datetime import timedelta + +import megatron +import megatron.fused_kernels +from megatron import get_adlr_autoresume +from megatron import get_tensorboard_writer +from megatron.core import mpu, tensor_parallel + +import megatron.arguments + +from megatron.checkpointing import load_args_from_checkpoint +from megatron.global_vars import set_global_variables +from megatron.model.transformer import bias_dropout_add_fused_train +from megatron.model.fused_bias_gelu import bias_gelu + + +def initialize_megatron(extra_args_provider=None, + args_defaults={}): + """Set global variables, initialize distributed, and + set autoresume and random seeds. + `allow_no_cuda` should not be set unless using megatron for cpu only + data processing. In general this arg should not be set unless you know + what you are doing. + """ + + # Make sure cuda is available. + assert torch.cuda.is_available(), 'Megatron requires CUDA.' + + # Parse arguments + args = megatron.arguments.parse_args(extra_args_provider) + + if args.use_checkpoint_args or args_defaults.get('use_checkpoint_args', False): + assert args.load is not None, '--use-checkpoints-args requires --load argument' + load_args_from_checkpoint(args) + + megatron.arguments.validate_args(args, args_defaults) + + # set global args, build tokenizer, and set adlr_autoresume, + # tensorboard-writer, and timers. + set_global_variables(args) + + # torch.distributed initialization + def _finish_mpu_init(): + _initialize_distributed(args) + + # Random seeds for reproducibility. + if args.rank == 0: + print('> setting random seeds to {} ...'.format(args.seed)) + _set_random_seed(args.seed, args.data_parallel_random_init) + + # Megatron's MPU is the master. Complete initialization right away. + _finish_mpu_init() + _init_autoresume() + # _compile_dependencies(args) + + # No continuation function + return None + + +def _compile_dependencies(args): + # ========================= + # Compile dataset C++ code. + # ========================= + # TODO: move this to ninja + if torch.distributed.get_rank() == 0: + start_time = time.time() + print('> compiling dataset index builder ...') + from megatron.data.dataset_utils import compile_helper + compile_helper() + print('>>> done with dataset index builder. Compilation time: {:.3f} ' + 'seconds'.format(time.time() - start_time), flush=True) + + # ================== + # Load fused kernels + # ================== + + # Custom kernel constraints check. + seq_len = args.seq_length + attn_batch_size = \ + (args.num_attention_heads / args.tensor_model_parallel_size) * \ + args.micro_batch_size + # Constraints on sequence length and attn_batch_size to enable warp based + # optimization and upper triangular optimization (for causal mask) + custom_kernel_constraint = seq_len > 16 and seq_len <= 4096 and \ + seq_len % 4 == 0 and attn_batch_size % 4 == 0 + + if not ((args.fp16 or args.bf16) and + custom_kernel_constraint and + args.masked_softmax_fusion): + if args.rank == 0: + print('WARNING: constraints for invoking optimized' + ' fused softmax kernel are not met. We default' + ' back to unfused kernel invocations.', flush=True) + + # Always build on rank zero first. + if torch.distributed.get_rank() == 0: + start_time = time.time() + print('> compiling and loading fused kernels ...', flush=True) + megatron.fused_kernels.load(args) + torch.distributed.barrier() + else: + torch.distributed.barrier() + megatron.fused_kernels.load(args) + # Simple barrier to make sure all ranks have passed the + # compilation phase successfully before moving on to the + # rest of the program. We think this might ensure that + # the lock is released. + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>>> done with compiling and loading fused kernels. ' + 'Compilation time: {:.3f} seconds'.format( + time.time() - start_time), flush=True) + + +def _initialize_distributed(args): + """Initialize torch.distributed and core model parallel.""" + device_count = torch.cuda.device_count() + if torch.distributed.is_initialized(): + if args.rank == 0: + print('torch distributed is already initialized, ' + 'skipping initialization ...', flush=True) + args.rank = torch.distributed.get_rank() + args.world_size = torch.distributed.get_world_size() + else: + if args.rank == 0: + print('> initializing torch distributed ...', flush=True) + # Manually set the device ids. + if device_count > 0: + device = args.rank % device_count + if args.local_rank is not None: + assert args.local_rank == device, \ + 'expected local-rank to be the same as rank % device-count.' + else: + args.local_rank = device + torch.cuda.set_device(device) + # Call the init process + torch.distributed.init_process_group( + backend=args.distributed_backend, + world_size=args.world_size, + rank=args.rank, + timeout=timedelta(minutes=10) + ) + + # Set the tensor model-parallel, pipeline model-parallel, and + # data-parallel communicators. + if device_count > 0: + if mpu.model_parallel_is_initialized(): + print('model parallel is already initialized') + else: + mpu.initialize_model_parallel(args.tensor_model_parallel_size, + args.pipeline_model_parallel_size, + args.virtual_pipeline_model_parallel_size, + args.pipeline_model_parallel_split_rank) + if args.rank == 0: + print(f'> initialized tensor model parallel with size ' + f'{mpu.get_tensor_model_parallel_world_size()}') + print(f'> initialized pipeline model parallel with size ' + f'{mpu.get_pipeline_model_parallel_world_size()}') + + +def _init_autoresume(): + """Set autoresume start time.""" + autoresume = get_adlr_autoresume() + if autoresume: + torch.distributed.barrier() + autoresume.init() + torch.distributed.barrier() + + +def _set_random_seed(seed_, data_parallel_random_init=False): + """Set random seed for reproducability.""" + if seed_ is not None and seed_ > 0: + # Ensure that different pipeline MP stages get different seeds. + seed = seed_ + (100 * mpu.get_pipeline_model_parallel_rank()) + # Ensure different data parallel ranks get different seeds + if data_parallel_random_init: + seed = seed + (10 * mpu.get_data_parallel_rank()) + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.device_count() > 0: + tensor_parallel.model_parallel_cuda_manual_seed(seed) + else: + raise ValueError('Seed ({}) should be a positive integer.'.format(seed_)) + + +def write_args_to_tensorboard(args): + """Write arguments to tensorboard.""" + # NOTE: if we use wandb, then the args are logged on creation, so nothing happens in this + # function. + if not getattr(args,"wandb_logger",False): + writer = get_tensorboard_writer() + if writer: + for arg in vars(args): + writer.add_text(arg, str(getattr(args, arg)), + global_step=args.iteration) + + +def set_jit_fusion_options(args): + """Set PyTorch JIT layer fusion options.""" + # flags required to enable jit fusion kernels + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10): + # nvfuser + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(True) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_nvfuser_enabled(True) + torch._C._debug_set_autodiff_subgraph_inlining(False) + else: + # legacy pytorch fuser + torch._C._jit_set_profiling_mode(False) + torch._C._jit_set_profiling_executor(False) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + + _warmup_jit_function(args) + + +def _warmup_jit_function(args): + """ Compile JIT functions before the main training steps """ + if args.bf16: + dtype = torch.bfloat16 + elif args.fp16: + dtype = torch.float16 + else: + dtype = torch.float32 + + # Warmup fused bias+gelu + bias = torch.rand(args.ffn_hidden_size // args.tensor_model_parallel_size, + dtype=dtype, device='cuda') + input = torch.rand((args.seq_length, args.micro_batch_size, + args.ffn_hidden_size // args.tensor_model_parallel_size), + dtype=dtype, device='cuda') + # Warmup JIT fusions with the input grad_enable state of both forward + # prop and recomputation + for bias_grad, input_grad in zip([True, True], [False, True]): + bias.requires_grad, input.requires_grad = bias_grad, input_grad + for _ in range(5): + output = bias_gelu(bias, input) + del bias, input, output + + # Warmup fused bias+dropout+add + if args.sequence_parallel: + seq_length = args.seq_length // mpu.get_tensor_model_parallel_world_size() + else: + seq_length = args.seq_length + input = torch.rand((seq_length, args.micro_batch_size, args.hidden_size), + dtype=dtype, device='cuda') + residual = torch.rand((seq_length, args.micro_batch_size, args.hidden_size), + dtype=dtype, device='cuda') + bias = torch.rand((args.hidden_size), dtype=dtype, device='cuda').expand_as(residual) + dropout_rate = 0.1 + # Warmup JIT fusions with the input grad_enable state of both forward + # prop and recomputation + for input_grad, bias_grad, residual_grad in zip([False, True], [True, True], [True, True]): + input.requires_grad = input_grad + bias.requires_grad = bias_grad + residual.requires_grad = residual_grad + for _ in range(5): + output = bias_dropout_add_fused_train(input, bias, residual, dropout_rate) + del bias, input, residual, output + torch.cuda.empty_cache() diff --git a/multilinguality_megatron/megatron/memory.py b/multilinguality_megatron/megatron/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..a5fef75baa749d557da227bbccf706501ffdd10f --- /dev/null +++ b/multilinguality_megatron/megatron/memory.py @@ -0,0 +1,132 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +import torch + + +# A dictionary of all the memory buffers allocated. +_MEM_BUFFS = dict() + + +def allocate_mem_buff(name, numel, dtype, track_usage): + """Allocate a memory buffer.""" + assert name not in _MEM_BUFFS, \ + 'memory buffer {} already allocated.'.format(name) + _MEM_BUFFS[name] = MemoryBuffer(name, numel, dtype, track_usage) + return _MEM_BUFFS[name] + + +def get_mem_buff(name): + """Get the memory buffer.""" + return _MEM_BUFFS[name] + + +class MemoryBuffer: + """Contiguous memory buffer. + Allocate a contiguous memory of type `dtype` and size `numel`. It is + used to reduce memory fragmentation. + + Usage: After the allocation, the `_start` index is set tot the first + index of the memory. A memory chunk starting from `_start` index + can be `allocated` for an input tensor, with the elements of the + tensor being coppied. The buffer can be reused by resetting the + `_start` index. + + """ + def __init__(self, name, numel, dtype, track_usage): + if torch.distributed.get_rank() == 0: + element_size = torch.tensor([], dtype=dtype).element_size() + print('> building the {} memory buffer with {} num elements ' + 'and {} dtype ({:.1f} MB)...'.format( + name, numel, dtype, numel*element_size/1024/1024), + flush=True) + self.name = name + self.numel = numel + self.dtype = dtype + self.data = torch.empty(self.numel, + dtype=self.dtype, + device=torch.cuda.current_device(), + requires_grad=False) + + # Index tracking the start of the free memory. + self._start = 0 + + # Values used for tracking usage. + self.track_usage = track_usage + if self.track_usage: + self.in_use_value = 0.0 + self.total_value = 0.0 + + + def reset(self): + """Reset the buffer start index to the beginning of the buffer.""" + self._start = 0 + + + def is_in_use(self): + """Whether the current buffer hold on to any memory.""" + return self._start > 0 + + + def numel_in_use(self): + """Return number of elements in use.""" + return self._start + + + def add(self, tensor): + """Allocate a chunk of memory from the buffer to tensor and copy + the values.""" + assert tensor.dtype == self.dtype, \ + 'Input tensor type {} different from buffer type {}'.format( + tensor.dtype, self.dtype) + # Number of elements of the input tensor. + tensor_numel = torch.numel(tensor) + new_start = self._start + tensor_numel + assert new_start <= self.numel, \ + 'Not enough memory left in the buffer ({} > {})'.format( + tensor_numel, self.numel - self._start) + # New tensor is a view into the memory. + new_tensor = self.data[self._start:new_start] + self._start = new_start + new_tensor = new_tensor.view(tensor.shape) + new_tensor.copy_(tensor) + # Return a pointer to the new tensor. + return new_tensor + + + def get_data(self): + """Return the data currently in use.""" + if self.track_usage: + self.in_use_value += float(self._start) + self.total_value += float(self.numel) + return self.data[:self._start] + + + def print_average_usage(self): + """Print memory usage average over time. We would like this value + to be as high as possible.""" + assert self.track_usage, 'You need to enable track usage.' + if torch.distributed.get_rank() == 0: + print(' > usage of {} memory buffer: {:.2f} %'.format( + self.name, self.in_use_value * 100.0 / self.total_value), + flush=True) + + + +class RingMemBuffer: + """A ring of memory buffers.""" + + def __init__(self, name, num_buffers, numel, dtype, track_usage): + self.num_buffers = num_buffers + self.buffers = [ + allocate_mem_buff(name+' {}'.format(i), numel, dtype, track_usage) + for i in range(num_buffers)] + self._index = -1 + + + def get_next_buffer(self): + self._index += 1 + self._index = self._index % self.num_buffers + buff = self.buffers[self._index] + assert not buff.is_in_use(), 'buffer is already in use.' + return buff diff --git a/multilinguality_megatron/megatron/metrics.py b/multilinguality_megatron/megatron/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..3332f618bb719754042d322f4033abd11a77d9b6 --- /dev/null +++ b/multilinguality_megatron/megatron/metrics.py @@ -0,0 +1,110 @@ +import math +from typing import Callable + +import torch + +from megatron import get_tokenizer +from megatron.utils import average_losses_across_data_parallel_group +from megatron.core.tensor_parallel import vocab_parallel_max_indices + + +class MetricInput: + def __init__(self, batch: tuple, output: torch.Tensor, loss: torch.Tensor): + # regular parameters + (self.tokens, self.labels, self.loss_mask, self.attention_mask, + self.position_ids) = batch + self.output = output + self.loss = loss + # lazy parameters + self._max_indices = None + self._instruct_mask = None + + @property + def max_indices(self) -> torch.Tensor: + if self._max_indices is None: + self._max_indices = vocab_parallel_max_indices(self.output) + return self._max_indices + + @property + def instruct_mask(self) -> torch.Tensor: + if self._instruct_mask is None: + # like loss_mask but ignoring the <|im_end|> and <|im_start|>role\n too + tokenizer = get_tokenizer() + im_start_id, = tokenizer.tokenize("<|im_start|>") + im_end_id, = tokenizer.tokenize("<|im_end|>") + should_keep = torch.ones_like(self.loss_mask) + # mask all indices where <|im_start|> is found plus the next two tokens + # (corresponds to the role and the newline) + i, j = torch.nonzero(self.labels == im_start_id, as_tuple=True) + if torch.any(j + 2 >= should_keep.size(1)): + print("Error calculating instruct mask") + self._instruct_mask = None + return self._instruct_mask + should_keep[i, j] = 0.0 + should_keep[i, j + 1] = 0.0 + should_keep[i, j + 2] = 0.0 + # mask <|im_end|> plus the next token (newline) and the next one + # that is a weird space or something + i, j = torch.nonzero(self.labels == im_end_id, as_tuple=True) + if torch.any(j + 2 >= should_keep.size(1)): + print("Error calculating instruct mask") + self._instruct_mask = None + return self._instruct_mask + should_keep[i, j] = 0.0 + should_keep[i, j] = 0.0 + should_keep[i, j + 1] = 0.0 + should_keep[i, j + 2] = 0.0 + # update mask + self._instruct_mask = self.loss_mask*should_keep + return self._instruct_mask + + +def perplexity(inputs: MetricInput): + ppl = math.exp(min(20, inputs.loss.item())) + return {"ppl": ppl} + + +def accuracy(inputs: MetricInput): + matching = torch.masked_fill(inputs.labels == inputs.max_indices, + inputs.loss_mask == 0, False) + accuracy = torch.count_nonzero(matching)/torch.count_nonzero(inputs.loss_mask) + averaged_accuracy = average_losses_across_data_parallel_group([accuracy]) + return {"lm accuracy": averaged_accuracy[0]} + + +# like accuracy but ignoring the <|im_end|> and <|im_start|> in the +# accuracy calculation +def instruct_accuracy(inputs: MetricInput): + if inputs.instruct_mask is None: + accuracy = torch.tensor(torch.nan, device=inputs.labels.device) + else: + matching = torch.masked_fill(inputs.labels == inputs.max_indices, + inputs.instruct_mask == 0, False) + accuracy = torch.count_nonzero(matching)/torch.count_nonzero(inputs.instruct_mask) + averaged_accuracy = average_losses_across_data_parallel_group([accuracy]) + return {"instruct accuracy": averaged_accuracy[0]} + + +def count_loss_mask(inputs: MetricInput): + count = torch.count_nonzero(inputs.loss_mask)/inputs.loss_mask.size(0) + return {"count loss mask": count} + + +def count_instruct_mask(inputs: MetricInput): + if inputs.instruct_mask is None: + return {} + count = torch.count_nonzero(inputs.instruct_mask)/inputs.instruct_mask.size(0) + return {"count instruct mask": count} + + +METRICS = { + "perplexity": perplexity, + "accuracy": accuracy, + "instruct_accuracy": instruct_accuracy, + "count_loss_mask": count_loss_mask, + "count_instruct_mask": count_instruct_mask, +} + + +def get_metric(name: str): + return METRICS[name] diff --git a/multilinguality_megatron/megatron/microbatches.py b/multilinguality_megatron/megatron/microbatches.py new file mode 100644 index 0000000000000000000000000000000000000000..137685af93dd57aea2b0723d3232396d92f9c54c --- /dev/null +++ b/multilinguality_megatron/megatron/microbatches.py @@ -0,0 +1,144 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron number of micro-batches calculators.""" + +from abc import ABC +from abc import abstractmethod + + +def build_num_microbatches_calculator(args): + + # Constant num micro-batches. + if args.rampup_batch_size is None: + num_microbatches_calculator = ConstantNumMicroBatches( + args.global_batch_size, args.micro_batch_size, + args.data_parallel_size) + if args.rank == 0: + print('setting number of micro-batches to constant {}'.format( + num_microbatches_calculator.get()), flush=True) + + else: + assert len(args.rampup_batch_size) == 3, 'expected the following ' \ + 'format: --rampup_batch_size ' \ + ' ' + start_batch_size = int(args.rampup_batch_size[0]) + batch_size_increment = int(args.rampup_batch_size[1]) + ramup_samples = int(args.rampup_batch_size[2]) + if args.rank == 0: + print('will use batch size rampup starting from global batch ' + 'size {} to global batch size {} with batch size increments ' + '{} over {} samples.'.format(start_batch_size, + args.global_batch_size, + batch_size_increment, + ramup_samples), flush=True) + num_microbatches_calculator = RampupBatchsizeNumMicroBatches( + start_batch_size, batch_size_increment, ramup_samples, + args.global_batch_size, args.micro_batch_size, + args.data_parallel_size) + + return num_microbatches_calculator + + +class NumMicroBatchesCalculator(ABC): + + def __init__(self): + self.num_micro_batches = None + self.current_global_batch_size = None + + def get(self): + return self.num_micro_batches + + def get_current_global_batch_size(self): + return self.current_global_batch_size + + @abstractmethod + def update(self, consumed_samples, consistency_check): + pass + + +class ConstantNumMicroBatches(NumMicroBatchesCalculator): + + def __init__(self, global_batch_size, micro_batch_size, data_parallel_size): + micro_batch_times_data_parallel = micro_batch_size * \ + data_parallel_size + assert global_batch_size % micro_batch_times_data_parallel == 0, \ + 'global batch size ({}) is not divisible by micro batch size ({})' \ + ' times data parallel size ({})'.format(global_batch_size, + micro_batch_size, + data_parallel_size) + self.num_micro_batches = global_batch_size // \ + micro_batch_times_data_parallel + assert self.num_micro_batches >= 1 + self.current_global_batch_size = global_batch_size + + def update(self, consumed_samples, consistency_check): + pass + + +class RampupBatchsizeNumMicroBatches(NumMicroBatchesCalculator): + + def __init__(self, start_batch_size, batch_size_increment, ramup_samples, + global_batch_size, micro_batch_size, data_parallel_size): + """Batch size ramp up. + Over + steps = (global_batch_size - start_batch_size) / batch_size_increment + increment batch size from start-batch-size to global_batch_size using + rampup-samples / steps + samples. + Arguments: + start_batch_size: global batch size to start with + batch_size_increment: global batch size increments + ramup_samples: number of samples to use ramp up global + batch size from `start_batch_size` to `global_batch_size` + global_batch_size: global batch size post rampup + micro_batch_size: micro batch size + data_parallel_size: data parallel size. + """ + + self.micro_batch_size = micro_batch_size + self.data_parallel_size = data_parallel_size + self.micro_batch_times_data_parallel_size = self.micro_batch_size * \ + self.data_parallel_size + assert self.micro_batch_times_data_parallel_size > 0 + + assert start_batch_size > 0 + self.start_batch_size = start_batch_size + + assert global_batch_size > 0 + self.global_batch_size = global_batch_size + diff_batch_size = self.global_batch_size - self.start_batch_size + assert diff_batch_size >= 0 + assert batch_size_increment > 0 + self.batch_size_increment = batch_size_increment + assert diff_batch_size % batch_size_increment == 0, 'expected ' \ + 'global batch size interval ({}) to be divisible by global batch ' \ + 'size increment ({})'.format(diff_batch_size, batch_size_increment) + + num_increments = diff_batch_size // self.batch_size_increment + self.ramup_samples = ramup_samples + assert self.ramup_samples >= 0 + self.rampup_samples_per_increment = self.ramup_samples / num_increments + + # Initialize number of microbatches. + self.update(0, False) + + + def update(self, consumed_samples, consistency_check): + + if consumed_samples > self.ramup_samples: + self.current_global_batch_size = self.global_batch_size + else: + steps = int(consumed_samples / self.rampup_samples_per_increment) + self.current_global_batch_size = self.start_batch_size + \ + steps * self.batch_size_increment + assert self.current_global_batch_size <= self.global_batch_size + + if consistency_check: + assert self.current_global_batch_size % \ + self.micro_batch_times_data_parallel_size == 0, 'current global ' \ + 'batch size ({}) is not divisible by micro_batch_size ({}) times' \ + 'data parallel size ({})'.format(self.current_global_batch_size, + self.micro_batch_size, + self.data_parallel_size) + self.num_micro_batches = self.current_global_batch_size // \ + self.micro_batch_times_data_parallel_size diff --git a/multilinguality_megatron/megatron/model/__init__.py b/multilinguality_megatron/megatron/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72899e1025733fc255525ddf158000707779352c --- /dev/null +++ b/multilinguality_megatron/megatron/model/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +from .distributed import DistributedDataParallel +from .enums import ModelType +from .fused_layer_norm import MixedFusedLayerNorm as LayerNorm +from .fused_layer_norm import RMSNorm as RMSNorm +from .gpt_model import GPTModel +from .module import Float16Module + +a = 1 + +from .bert_model import BertModel +from .falcon_model import FalconModel +from .llama_model import LlamaModel +from .mistral_model import MistralModel +from .t5_model import T5Model +from .gemma_model import GemmaModel \ No newline at end of file diff --git a/multilinguality_megatron/megatron/model/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01bb30ccbc4f908dcc543b7bd4da2dde79e91335 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/bert_model.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/bert_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85a3f6cb6ccd6613ad11d18db28d47c1adab9236 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/bert_model.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/distributed.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/distributed.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe8126a8ea22fc84f69e257d4cb2d84d6075319e Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/distributed.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/enums.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/enums.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe3ba85a27767d16832e7cbe45e0251c39b6b9f1 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/enums.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/falcon_model.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/falcon_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..168f0e0f12080db54233e558d14da384b13c9d74 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/falcon_model.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/fused_bias_gelu.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/fused_bias_gelu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6c1f8ff4f2256a223c78ad17a1974c6d8441f24 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/fused_bias_gelu.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/fused_layer_norm.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/fused_layer_norm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49eb2f853740deae85507c82afd1003d786b87d8 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/fused_layer_norm.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/fused_softmax.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/fused_softmax.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74d528ecbe017a7626f23c02f3149589293313c0 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/fused_softmax.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/gemma_model.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/gemma_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49c284d232fa9005c7d06dbe9c205f36a30eca0a Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/gemma_model.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/glu_activations.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/glu_activations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92da24a4e4bd99ba6cc54fed263e25ea3094bbf7 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/glu_activations.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/gpt_model.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/gpt_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91f11314ab54632d8d5ddae565f84018abf8f5f0 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/gpt_model.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/language_model.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/language_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eefde4494b04115252ce02458a55478dcdd8cf6 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/language_model.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/llama_model.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/llama_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..893b319e530901e3e18012db1191d04e827ba899 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/llama_model.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/mistral_model.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/mistral_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfdca8906292e224e0474e06f87d0b116dcc9bfe Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/mistral_model.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/module.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/module.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eec1c06a40c8ebd79b2a18eb6f3673133eac1cb1 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/module.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/positional_embeddings.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/positional_embeddings.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..074ba65b8bb4e82c47a4219fe11a76a11f01f4f8 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/positional_embeddings.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/t5_model.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/t5_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69dea29661a0c480e12e2e327378a51468a8dc70 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/t5_model.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/transformer.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/transformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..802254f7645c277c2b7cbcd7af3c0e8364ba78cc Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/transformer.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/__pycache__/utils.cpython-39.pyc b/multilinguality_megatron/megatron/model/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f31dec2c6845e90801348f984ae470ec6cc3b054 Binary files /dev/null and b/multilinguality_megatron/megatron/model/__pycache__/utils.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/model/bert_model.py b/multilinguality_megatron/megatron/model/bert_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ab859015613024a320d82d00e8a21be78f5cec2e --- /dev/null +++ b/multilinguality_megatron/megatron/model/bert_model.py @@ -0,0 +1,242 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""BERT model.""" + +import torch + +from megatron import get_args +from megatron.core import tensor_parallel +from megatron.model.enums import AttnMaskType +from megatron.model.language_model import parallel_lm_logits +import megatron.model.language_model +from megatron.model import LayerNorm +import megatron.model.utils + +from megatron.model.utils import erf_gelu +from megatron.model.utils import init_method_normal +from megatron.model.utils import scaled_init_method_normal +from .module import MegatronModule + + +def bert_extended_attention_mask(attention_mask): + # We create a 3D attention mask from a 2D tensor mask. + # [b, 1, s] + attention_mask_b1s = attention_mask.unsqueeze(1) + # [b, s, 1] + attention_mask_bs1 = attention_mask.unsqueeze(2) + # [b, s, s] + attention_mask_bss = attention_mask_b1s * attention_mask_bs1 + # [b, 1, s, s] + extended_attention_mask = attention_mask_bss.unsqueeze(1) + + # Convert attention mask to binary: + extended_attention_mask = (extended_attention_mask < 0.5) + + return extended_attention_mask + +def bert_position_ids(token_ids): + # Create position ids + seq_length = token_ids.size(1) + position_ids = torch.arange(seq_length, dtype=torch.long, + device=token_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(token_ids) + + return position_ids + + +class BertLMHead(MegatronModule): + """Masked LM head for Bert + + Arguments: + mpu_vocab_size: model parallel size of vocabulary. + hidden_size: hidden size + init_method: init method for weight initialization + layernorm_epsilon: tolerance for layer norm divisions + parallel_output: whether output logits being distributed or not. + """ + + def __init__(self, mpu_vocab_size, hidden_size, init_method, + layernorm_epsilon, parallel_output): + + super(BertLMHead, self).__init__() + + args = get_args() + + self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size)) + tensor_parallel.set_tensor_model_parallel_attributes(self.bias, True, 0, 1) + self.parallel_output = parallel_output + + self.dense = megatron.model.utils.get_linear_layer(hidden_size, + hidden_size, + init_method, + args.perform_initialization) + setattr(self.dense.weight, 'sequence_parallel', args.sequence_parallel) + setattr(self.dense.bias, 'sequence_parallel', args.sequence_parallel) + + self.layernorm = LayerNorm(hidden_size, + eps=layernorm_epsilon, + sequence_parallel=args.sequence_parallel) + self.gelu = torch.nn.functional.gelu + if args.onnx_safe: + self.gelu = erf_gelu + + def forward(self, hidden_states, word_embeddings_weight): + hidden_states = self.dense(hidden_states) + hidden_states = self.gelu(hidden_states) + hidden_states = self.layernorm(hidden_states) + output = parallel_lm_logits(hidden_states, + word_embeddings_weight, + self.parallel_output, + bias=self.bias) + return output + + +def post_language_model_processing(lm_output, pooled_output, + lm_head, binary_head, + lm_labels, + logit_weights, + fp16_lm_cross_entropy): + # Output. + lm_logits = lm_head( + lm_output, logit_weights) + + binary_logits = None + if binary_head is not None: + binary_logits = binary_head(pooled_output) + + if lm_labels is None: + # [s b h] => [b s h] + return lm_logits.transpose(0,1).contiguous(), binary_logits + else: + # [b s] => [s b] + lm_labels = lm_labels.transpose(0, 1).contiguous() + # lm_logits : [s, b, h] and lm_labels: [s, b] + if fp16_lm_cross_entropy: + assert lm_logits.dtype == torch.half + lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels) + else: + lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(), + lm_labels) + # [s, b] => [b s] + lm_loss = lm_loss.transpose(0,1).contiguous() + return lm_loss, binary_logits + + +class BertModel(MegatronModule): + """Bert Language model.""" + + def __init__(self, + num_tokentypes: int=2, + add_binary_head=True, + parallel_output=True, + pre_process=True, + post_process=True, + model_type=None): + super(BertModel, self).__init__() + args = get_args() + + self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy + self.add_binary_head = add_binary_head + self.parallel_output = parallel_output + self.pre_process = pre_process + self.post_process = post_process + + init_method = init_method_normal(args.init_method_std) + scaled_init_method = scaled_init_method_normal(args.init_method_std, + args.num_layers) + + self.language_model, self._language_model_key = megatron.model.language_model.get_language_model( + num_tokentypes=num_tokentypes, + add_pooler=self.add_binary_head, + encoder_attn_mask_type=AttnMaskType.padding, + init_method=init_method, + scaled_init_method=scaled_init_method, + pre_process=self.pre_process, + post_process=self.post_process, + args=args, + model_type=model_type) + + self.initialize_word_embeddings(init_method_normal, args) + if self.post_process: + self.lm_head = BertLMHead( + self.word_embeddings_weight().size(0), + args.hidden_size, init_method, args.layernorm_epsilon, parallel_output) + self._lm_head_key = 'lm_head' + self.binary_head = None + if self.add_binary_head: + self.binary_head = megatron.model.utils.get_linear_layer(args.hidden_size, + 2, + init_method, + args.perform_initialization) + self._binary_head_key = 'binary_head' + + def set_input_tensor(self, input_tensor): + """See megatron.model.transformer.set_input_tensor()""" + self.language_model.set_input_tensor(input_tensor) + + def forward(self, bert_model_input, attention_mask, + tokentype_ids=None, lm_labels=None): + + extended_attention_mask = bert_extended_attention_mask(attention_mask) + input_ids = bert_model_input + position_ids = bert_position_ids(input_ids) + + lm_output = self.language_model( + input_ids, + position_ids, + extended_attention_mask, + tokentype_ids=tokentype_ids + ) + + if self.post_process and self.add_binary_head: + lm_output, pooled_output = lm_output + else: + pooled_output = None + + if self.post_process: + return post_language_model_processing(lm_output, + pooled_output, + self.lm_head, + self.binary_head, + lm_labels, + self.word_embeddings_weight(), + self.fp16_lm_cross_entropy) + else: + return lm_output + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + """For easy load when model is combined with other heads, + add an extra key.""" + + state_dict_ = {} + state_dict_[self._language_model_key] \ + = self.language_model.state_dict_for_save_checkpoint(prefix=prefix, + keep_vars=keep_vars) + if self.post_process: + state_dict_[self._lm_head_key] \ + = self.lm_head.state_dict_for_save_checkpoint(prefix=prefix, + keep_vars=keep_vars) + if self.post_process and self.add_binary_head: + state_dict_[self._binary_head_key] \ + = self.binary_head.state_dict(prefix=prefix, keep_vars=keep_vars) + # Save word_embeddings. + if self.post_process and not self.pre_process: + state_dict_[self._word_embeddings_for_head_key] \ + = self.word_embeddings.state_dict(prefix=prefix, keep_vars=keep_vars) + return state_dict_ + + def load_state_dict(self, state_dict, strict=True): + """Customized load.""" + + self.language_model.load_state_dict( + state_dict[self._language_model_key], strict=strict) + if self.post_process: + self.lm_head.load_state_dict( + state_dict[self._lm_head_key], strict=strict) + if self.post_process and self.add_binary_head: + self.binary_head.load_state_dict( + state_dict[self._binary_head_key], strict=strict) + # Load word_embeddings. + if self.post_process and not self.pre_process: + self.word_embeddings.load_state_dict( + state_dict[self._word_embeddings_for_head_key], strict=strict) diff --git a/multilinguality_megatron/megatron/model/biencoder_model.py b/multilinguality_megatron/megatron/model/biencoder_model.py new file mode 100644 index 0000000000000000000000000000000000000000..92fd7399cac1917fce7839599042411bcc9ad5ec --- /dev/null +++ b/multilinguality_megatron/megatron/model/biencoder_model.py @@ -0,0 +1,345 @@ +import os +import torch +import sys + +import megatron.model.language_model +from megatron import get_args, print_rank_0, get_tokenizer +from megatron.core import mpu +from megatron.checkpointing import fix_query_key_value_ordering +from megatron.checkpointing import get_checkpoint_tracker_filename +from megatron.checkpointing import get_checkpoint_name +from megatron.model.bert_model import bert_position_ids +from megatron.model.enums import AttnMaskType +import megatron.model.utils +from megatron.model.utils import init_method_normal +from megatron.model.utils import scaled_init_method_normal +from .module import MegatronModule + + +def get_model_provider(only_query_model=False, + only_context_model=False, + biencoder_shared_query_context_model=False, + model_type=None): + + def model_provider(pre_process=True, + post_process=True): + """Build the model.""" + print_rank_0('building Bienoder model ...') + model = biencoder_model_provider(only_query_model=only_query_model, + only_context_model=only_context_model, + biencoder_shared_query_context_model=biencoder_shared_query_context_model, + pre_process=pre_process, + post_process=post_process, + model_type=model_type) + + return model + + return model_provider + + +def biencoder_model_provider(only_query_model=False, + only_context_model=False, + biencoder_shared_query_context_model=False, + pre_process=True, + post_process=True, + model_type=None): + """Build the model.""" + + assert mpu.get_tensor_model_parallel_world_size() == 1 and \ + mpu.get_pipeline_model_parallel_world_size() == 1, \ + "Model parallel size > 1 not supported for ICT" + + print_rank_0('building BiEncoderModel...') + + # simpler to just keep using 2 tokentypes since + # the LM we initialize with has 2 tokentypes + model = BiEncoderModel( + num_tokentypes=2, + parallel_output=False, + only_query_model=only_query_model, + only_context_model=only_context_model, + biencoder_shared_query_context_model=\ + biencoder_shared_query_context_model, + pre_process=pre_process, + post_process=post_process, + model_type=model_type + ) + + return model + + +class BiEncoderModel(MegatronModule): + """Bert-based module for Biencoder model.""" + + def __init__(self, + num_tokentypes=1, + parallel_output=True, + only_query_model=False, + only_context_model=False, + biencoder_shared_query_context_model=False, + pre_process=True, + post_process=True, + model_type=None): + super(BiEncoderModel, self).__init__() + args = get_args() + + bert_kwargs = dict( + num_tokentypes=num_tokentypes, + parallel_output=parallel_output, + pre_process=pre_process, + post_process=post_process, + model_type=model_type + ) + + self.biencoder_shared_query_context_model = \ + biencoder_shared_query_context_model + assert not (only_context_model and only_query_model) + self.use_context_model = not only_query_model + self.use_query_model = not only_context_model + self.biencoder_projection_dim = args.biencoder_projection_dim + + if self.biencoder_shared_query_context_model: + self.model = PretrainedBertModel(**bert_kwargs) + self._model_key = 'shared_model' + self.query_model, self.context_model = self.model, self.model + else: + if self.use_query_model: + # this model embeds (pseudo-)queries - Embed_input in the paper + self.query_model = PretrainedBertModel(**bert_kwargs) + self._query_key = 'query_model' + + if self.use_context_model: + # this model embeds evidence blocks - Embed_doc in the paper + self.context_model = PretrainedBertModel(**bert_kwargs) + self._context_key = 'context_model' + + def set_input_tensor(self, input_tensor): + """See megatron.model.transformer.set_input_tensor()""" + # this is just a placeholder and will be needed when model + # parallelism will be used + # self.language_model.set_input_tensor(input_tensor) + return + + def forward(self, query_tokens, query_attention_mask, query_types, + context_tokens, context_attention_mask, context_types): + """Run a forward pass for each of the models and + return the respective embeddings.""" + + if self.use_query_model: + query_logits = self.embed_text(self.query_model, + query_tokens, + query_attention_mask, + query_types) + else: + raise ValueError("Cannot embed query without the query model.") + if self.use_context_model: + context_logits = self.embed_text(self.context_model, + context_tokens, + context_attention_mask, + context_types) + else: + raise ValueError("Cannot embed block without the block model.") + return query_logits, context_logits + + @staticmethod + def embed_text(model, tokens, attention_mask, token_types): + """Embed a batch of tokens using the model""" + logits = model(tokens, + attention_mask, + token_types) + return logits + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + """Save dict with state dicts of each of the models.""" + state_dict_ = {} + if self.biencoder_shared_query_context_model: + state_dict_[self._model_key] = \ + self.model.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars) + else: + if self.use_query_model: + state_dict_[self._query_key] = \ + self.query_model.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars) + + if self.use_context_model: + state_dict_[self._context_key] = \ + self.context_model.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars) + + return state_dict_ + + def load_state_dict(self, state_dict, strict=True): + """Load the state dicts of each of the models""" + if self.biencoder_shared_query_context_model: + print_rank_0("Loading shared query-context model") + self.model.load_state_dict(state_dict[self._model_key], \ + strict=strict) + else: + if self.use_query_model: + print_rank_0("Loading query model") + self.query_model.load_state_dict( \ + state_dict[self._query_key], strict=strict) + + if self.use_context_model: + print_rank_0("Loading context model") + self.context_model.load_state_dict( \ + state_dict[self._context_key], strict=strict) + + def init_state_dict_from_bert(self): + """Initialize the state from a pretrained BERT model + on iteration zero of ICT pretraining""" + args = get_args() + + if args.bert_load is None: + print_rank_0("bert_load argument is None") + return + + tracker_filename = get_checkpoint_tracker_filename(args.bert_load) + if not os.path.isfile(tracker_filename): + raise FileNotFoundError("Could not find BERT checkpoint") + with open(tracker_filename, 'r') as f: + iteration = int(f.read().strip()) + assert iteration > 0 + + checkpoint_name = get_checkpoint_name(args.bert_load, iteration, False) + if mpu.get_data_parallel_rank() == 0: + print('global rank {} is loading BERT checkpoint {}'.format( + torch.distributed.get_rank(), checkpoint_name)) + + # Load the checkpoint. + try: + state_dict = torch.load(checkpoint_name, map_location='cpu') + except ModuleNotFoundError: + from megatron.fp16_deprecated import loss_scaler + # For backward compatibility. + print_rank_0(' > deserializing using the old code structure ...') + sys.modules['fp16.loss_scaler'] = sys.modules[ + 'megatron.fp16_deprecated.loss_scaler'] + sys.modules['megatron.fp16.loss_scaler'] = sys.modules[ + 'megatron.fp16_deprecated.loss_scaler'] + state_dict = torch.load(checkpoint_name, map_location='cpu') + sys.modules.pop('fp16.loss_scaler', None) + sys.modules.pop('megatron.fp16.loss_scaler', None) + except BaseException: + print_rank_0('could not load the BERT checkpoint') + sys.exit() + + checkpoint_version = state_dict.get('checkpoint_version', 0) + + # load the LM state dict into each model + model_dict = state_dict['model']['language_model'] + + if self.biencoder_shared_query_context_model: + self.model.language_model.load_state_dict(model_dict) + fix_query_key_value_ordering(self.model, checkpoint_version) + else: + if self.use_query_model: + self.query_model.language_model.load_state_dict(model_dict) + # give each model the same ict_head to begin with as well + if self.biencoder_projection_dim > 0: + query_proj_state_dict = \ + self.state_dict_for_save_checkpoint()\ + [self._query_key]['projection_enc'] + fix_query_key_value_ordering(self.query_model, checkpoint_version) + + if self.use_context_model: + self.context_model.language_model.load_state_dict(model_dict) + if self.query_model is not None and \ + self.biencoder_projection_dim > 0: + self.context_model.projection_enc.load_state_dict\ + (query_proj_state_dict) + fix_query_key_value_ordering(self.context_model, checkpoint_version) + + +class PretrainedBertModel(MegatronModule): + """BERT-based encoder for queries or contexts used for + learned information retrieval.""" + + def __init__(self, + num_tokentypes=2, + parallel_output=True, + pre_process=True, + post_process=True, + model_type=None): + super(PretrainedBertModel, self).__init__() + + args = get_args() + tokenizer = get_tokenizer() + + self.pad_id = tokenizer.pad + self.biencoder_projection_dim = args.biencoder_projection_dim + self.parallel_output = parallel_output + self.pre_process = pre_process + self.post_process = post_process + init_method = init_method_normal(args.init_method_std) + scaled_init_method = scaled_init_method_normal( + args.init_method_std, args.num_layers) + + self.language_model, self._language_model_key = megatron.model.language_model.get_language_model( + num_tokentypes=num_tokentypes, + add_pooler=False, + encoder_attn_mask_type=AttnMaskType.padding, + init_method=init_method, + scaled_init_method=scaled_init_method, + pre_process=self.pre_process, + post_process=self.post_process, + args=args, + model_type=model_type) + + if args.biencoder_projection_dim > 0: + self.projection_enc = megatron.model.utils.get_linear_layer(args.hidden_size, + args.biencoder_projection_dim, + init_method, + args.perform_initialization) + self._projection_enc_key = 'projection_enc' + + def forward(self, input_ids, attention_mask, tokentype_ids=None): + extended_attention_mask = attention_mask.unsqueeze(1) + position_ids = bert_position_ids(input_ids) + + lm_output = self.language_model(input_ids, + position_ids, + extended_attention_mask, + tokentype_ids=tokentype_ids) + # This mask will be used in average-pooling and max-pooling + pool_mask = (input_ids == self.pad_id).unsqueeze(2) + + # Taking the representation of the [CLS] token of BERT + pooled_output = lm_output[0, :, :] + + # Converting to float16 dtype + pooled_output = pooled_output.to(lm_output.dtype) + + # Output. + if self.biencoder_projection_dim: + pooled_output = self.projection_enc(pooled_output) + + return pooled_output + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + """For easy load when model is combined with other heads, + add an extra key.""" + + state_dict_ = {} + state_dict_[self._language_model_key] \ + = self.language_model.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars) + + if self.biencoder_projection_dim > 0: + state_dict_[self._projection_enc_key] = \ + self.projection_enc.state_dict(prefix=prefix, + keep_vars=keep_vars) + + return state_dict_ + + def load_state_dict(self, state_dict, strict=True): + """Customized load.""" + print_rank_0("loading pretrained weights") + self.language_model.load_state_dict( + state_dict[self._language_model_key], strict=strict) + + if self.biencoder_projection_dim > 0: + print_rank_0("loading projection head weights") + self.projection_enc.load_state_dict( + state_dict[self._projection_enc_key], strict=strict) diff --git a/multilinguality_megatron/megatron/model/classification.py b/multilinguality_megatron/megatron/model/classification.py new file mode 100644 index 0000000000000000000000000000000000000000..032f396f30c2303b5181e5be06a16ddbb4ab175c --- /dev/null +++ b/multilinguality_megatron/megatron/model/classification.py @@ -0,0 +1,107 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Classification model.""" + +import torch + +import megatron.model.language_model +from megatron import get_args, print_rank_last +from megatron.model.enums import AttnMaskType +from megatron.model.bert_model import bert_extended_attention_mask, bert_position_ids +import megatron.model.utils +from megatron.model.utils import init_method_normal +from megatron.model.utils import scaled_init_method_normal +from .module import MegatronModule + + +class Classification(MegatronModule): + def __init__(self, + num_classes, + num_tokentypes=2, + pre_process=True, + post_process=True, + model_type=None): + super(Classification, self).__init__(share_word_embeddings=False) + args = get_args() + + self.num_classes = num_classes + self.pre_process = pre_process + self.post_process = post_process + init_method = init_method_normal(args.init_method_std) + + self.language_model, self._language_model_key = megatron.model.language_model.get_language_model( + num_tokentypes=num_tokentypes, + add_pooler=True, + encoder_attn_mask_type=AttnMaskType.padding, + init_method=init_method, + scaled_init_method=scaled_init_method_normal(args.init_method_std, + args.num_layers), + pre_process=self.pre_process, + post_process=self.post_process, + args=args, + model_type=model_type + ) + + # Multi-choice head. + if self.post_process: + self.classification_dropout = torch.nn.Dropout(args.hidden_dropout) + self.classification_head = megatron.model.utils.get_linear_layer(args.hidden_size, + self.num_classes, + init_method, + args.perform_initialization) + self._classification_head_key = 'classification_head' + + def set_input_tensor(self, input_tensor): + """See megatron.model.transformer.set_input_tensor()""" + self.language_model.set_input_tensor(input_tensor) + + def forward(self, model_input, attention_mask, tokentype_ids=None): + + extended_attention_mask = bert_extended_attention_mask(attention_mask) + input_ids = model_input + position_ids = bert_position_ids(input_ids) + + lm_output = self.language_model( + input_ids, + position_ids, + extended_attention_mask, + tokentype_ids=tokentype_ids + ) + + if self.post_process: + _, pooled_output = lm_output + classification_output = self.classification_dropout(pooled_output) + classification_logits = self.classification_head(classification_output) + + # Reshape back to separate choices. + classification_logits = classification_logits.view(-1, self.num_classes) + + return classification_logits + return lm_output + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + """For easy load when model is combined with other heads, + add an extra key.""" + + state_dict_ = {} + state_dict_[self._language_model_key] \ + = self.language_model.state_dict_for_save_checkpoint(prefix=prefix, + keep_vars=keep_vars) + if self.post_process: + state_dict_[self._classification_head_key] \ + = self.classification_head.state_dict(prefix=prefix, keep_vars=keep_vars) + return state_dict_ + + def load_state_dict(self, state_dict, strict=True): + """Customized load.""" + + self.language_model.load_state_dict( + state_dict[self._language_model_key], strict=strict) + if self.post_process: + if self._classification_head_key in state_dict: + self.classification_head.load_state_dict( + state_dict[self._classification_head_key], strict=strict) + else: + print_rank_last('***WARNING*** could not find {} in the checkpoint, ' + 'initializing to random'.format( + self._classification_head_key)) diff --git a/multilinguality_megatron/megatron/model/distributed.py b/multilinguality_megatron/megatron/model/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..f91f8a63e3001a228d971f8a85fe021aaaf4a675 --- /dev/null +++ b/multilinguality_megatron/megatron/model/distributed.py @@ -0,0 +1,232 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from abc import ABC +from abc import abstractmethod +import math + +import torch +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + +from megatron import get_args +from megatron.core import mpu +from .module import MegatronModule + + +class MemoryBuffer: + + def __init__(self, numel, numel_padded, dtype): + self.numel = numel + self.numel_padded = numel_padded + self.dtype = dtype + self.data = torch.zeros(self.numel_padded, + dtype=self.dtype, + device=torch.cuda.current_device(), + requires_grad=False) + + def zero(self): + """Reset the buffer to zero.""" + self.data.zero_() + + + def get(self, shape, start_index): + """Return a tensor with the input `shape` as a view into the + 1-D data starting at `start_index`.""" + end_index = start_index + shape.numel() + assert end_index <= self.numel, \ + 'requested tensor is out of the buffer range.' + buffer_tensor = self.data[start_index:end_index] + buffer_tensor = buffer_tensor.view(shape) + return buffer_tensor + + + +class DistributedDataParallelBase(MegatronModule, ABC): + """Abstract class for DDP.""" + + def __init__(self, module): + super(DistributedDataParallelBase, self).__init__() + # Keep a pointer to the model. + self.module = module + + + @abstractmethod + def allreduce_gradients(self): + pass + + + def forward(self, *inputs, **kwargs): + return self.module(*inputs, **kwargs) + + + def state_dict(self, prefix='', keep_vars=False): + return self.module.state_dict(prefix=prefix, keep_vars=keep_vars) + + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + return self.module.state_dict_for_save_checkpoint(prefix=prefix, + keep_vars=keep_vars) + + + def load_state_dict(self, state_dict, strict=True): + self.module.load_state_dict(state_dict, strict=strict) + + + +class DistributedDataParallel(DistributedDataParallelBase): + """DDP with contiguous buffers options to storre and accumulate gradients. + This class: + - has the potential to reduce memory fragmentation. + - provides the option to do the gradient accumulation + in a type other than the params type (for example fp32) + + Arguments: + module: input model. + accumulate_allreduce_grads_in_fp32: if true do the gradient accumulation + and the gradient all-reduce all in in float32. If this option is + true, we require `use_contiguous_buffers` to be true too. + use_contiguous_buffers: if true, use a contiguous buffer to store the + gradients. + """ + + def __init__(self, module, + accumulate_allreduce_grads_in_fp32, + use_contiguous_buffers): + + super(DistributedDataParallel, self).__init__(module) + + self.accumulate_allreduce_grads_in_fp32 \ + = accumulate_allreduce_grads_in_fp32 + self.use_contiguous_buffers = use_contiguous_buffers + # If we are using fp32-accumulate-allreduce explicitly + # this means we need main grads in a continous buffer. + if self.accumulate_allreduce_grads_in_fp32: + assert self.use_contiguous_buffers + + # =================================== + # Rest of this part applies only to + # the case we use continuous buffers. + # =================================== + self._grad_buffers = None + self._grad_buffer_param_index_map = None + if self.use_contiguous_buffers: + self._grad_buffers = {} + self._grad_buffer_param_index_map = {} + data_parallel_world_size = mpu.get_data_parallel_world_size() + + # Simple function to define buffer type. + def _get_buffer_type(param): + return torch.float if \ + self.accumulate_allreduce_grads_in_fp32 else param.dtype + + # First calculate total number of elements per type. + type_num_elements = {} + for param in self.module.parameters(): + if param.requires_grad: + dtype = _get_buffer_type(param) + type_num_elements[dtype] = type_num_elements.get(dtype, 0) \ + + param.data.nelement() + + # Allocate the buffer. + for dtype, num_elements in type_num_elements.items(): + + # If using distributed optimizer, pad memory buffer to be + # multiple of data_parallel_world_size. (This padding is done + # due to a constraint with the reduce_scatter op, which requires + # all tensors have equal size. See: optimizer.py.) + num_elements_padded = data_parallel_world_size * \ + int(math.ceil(num_elements / data_parallel_world_size)) + + # Allocate grad buffer. + self._grad_buffers[dtype] = MemoryBuffer(num_elements, + num_elements_padded, + dtype) + + # Assume the back prop order is reverse the params order, + # store the start index for the gradients. + for param in self.module.parameters(): + if param.requires_grad: + dtype = _get_buffer_type(param) + type_num_elements[dtype] -= param.data.nelement() + param.main_grad = self._grad_buffers[dtype].get( + param.data.shape, type_num_elements[dtype]) + if dtype not in self._grad_buffer_param_index_map: + self._grad_buffer_param_index_map[dtype] = {} + self._grad_buffer_param_index_map[dtype][param] = ( + type_num_elements[dtype], + type_num_elements[dtype] + param.data.nelement(), + ) + + # Backward hook. + # Accumalation function for the gradients. We need + # to store them so they don't go out of scope. + self.grad_accs = [] + # Loop over all the parameters in the model. + for param in self.module.parameters(): + if param.requires_grad: + # Expand so we get access to grad_fn. + param_tmp = param.expand_as(param) + # Get the gradient accumulator functtion. + grad_acc = param_tmp.grad_fn.next_functions[0][0] + grad_acc.register_hook(self._make_param_hook(param)) + self.grad_accs.append(grad_acc) + + + def _make_param_hook(self, param): + """Create the all-reduce hook for backprop.""" + # Hook used for back-prop. + def param_hook(*unused): + # Add the gradient to the buffer. + if param.grad is not None: + # The gradient function of linear layers is fused with GEMMs + param.main_grad.add_(param.grad.data) + # Now we can deallocate grad memory. + param.grad = None + return param_hook + + + def zero_grad_buffer(self): + """Set the grad buffer data to zero. Needs to be called at the + begining of each iteration.""" + assert self._grad_buffers is not None, 'buffers are not initialized.' + for _, buffer_ in self._grad_buffers.items(): + buffer_.zero() + + + def broadcast_params(self): + for param in self.module.parameters(): + torch.distributed.broadcast(param.data, + src=mpu.get_data_parallel_src_rank(), + group=mpu.get_data_parallel_group()) + + + def allreduce_gradients(self): + """Reduce gradients across data parallel ranks.""" + # If we have buffers, simply reduce the data in the buffer. + if self._grad_buffers is not None: + for _, buffer_ in self._grad_buffers.items(): + buffer_.data /= mpu.get_data_parallel_world_size() + torch.distributed.all_reduce( + buffer_.data, group=mpu.get_data_parallel_group()) + else: + # Otherwise, bucketize and all-reduce + buckets = {} + # Pack the buckets. + for param in self.module.parameters(): + if param.requires_grad and param.grad is not None: + tp = param.data.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(param) + param.main_grad = param.grad + + # For each bucket, all-reduce and copy all-reduced grads. + for tp in buckets: + bucket = buckets[tp] + grads = [param.grad.data for param in bucket] + coalesced = _flatten_dense_tensors(grads) + coalesced /= mpu.get_data_parallel_world_size() + torch.distributed.all_reduce( + coalesced, group=mpu.get_data_parallel_group()) + for buf, synced in zip(grads, _unflatten_dense_tensors( + coalesced, grads)): + buf.copy_(synced) diff --git a/multilinguality_megatron/megatron/model/enums.py b/multilinguality_megatron/megatron/model/enums.py new file mode 100644 index 0000000000000000000000000000000000000000..45e352178d8ec63c84902bc929ff07fa50231a94 --- /dev/null +++ b/multilinguality_megatron/megatron/model/enums.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import enum + + +class ModelType(enum.Enum): + encoder_or_decoder = 1 + encoder_and_decoder = 2 + + +class LayerType(enum.Enum): + encoder = 1 + decoder = 2 + + +class AttnType(enum.Enum): + self_attn = 1 + cross_attn = 2 + + +class AttnMaskType(enum.Enum): + padding = 1 + causal = 2 + + +class PositionEmbeddingType(enum.Enum): + rotary = 1 + absolute = 2 diff --git a/multilinguality_megatron/megatron/model/falcon_model.py b/multilinguality_megatron/megatron/model/falcon_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf5a2ac244d12717b54c15e88962465d0bd9ada --- /dev/null +++ b/multilinguality_megatron/megatron/model/falcon_model.py @@ -0,0 +1,41 @@ +"""Falcon Model.""" + +import warnings + +from megatron import get_args +from .enums import PositionEmbeddingType +from . import GPTModel + + +class FalconModel(GPTModel): + def __init__(self, + num_tokentypes: int = 0, + parallel_output: bool = True, + pre_process: bool = True, + post_process: bool = True, + model_type=None): + args = get_args() + assert args.position_embedding_type == PositionEmbeddingType.rotary, \ + f"Falcon uses rotary embedding, not {args.position_embedding_type}" + assert isinstance(args.num_attention_heads_kv, int), \ + "Falcon needs a not None num_attention_heads_kv parameter" + assert not args.use_post_ln, \ + "FalconModel requires pre-normalization, not use_post_ln" + assert args.glu_activation is None, \ + "FalconModel requires gelu activation (set glu_activation=None)" + assert not args.use_bias, "Falcon does not use bias" + assert args.parallel_attn, "Falcon uses parallel_attn" + if not args.parallel_layernorm: + warnings.warn("Falcon uses parallel_layernorm, or are you running falcon-7b?") + + if not args.use_flash_attn: + warnings.warn("Falcon should use flash attn") + if args.bias_gelu_fusion: + warnings.warn("Falcon should not use bias_gelu_fusion") + if args.bias_dropout_fusion: + warnings.warn("Falcon should not use bias_dropout_fusion") + if args.hidden_dropout > 0.0 and not args.lima_dropout: + warnings.warn("Falcon should not use dropout") + super().__init__(num_tokentypes=num_tokentypes, parallel_output=parallel_output, + pre_process=pre_process, post_process=post_process, + model_type=model_type) diff --git a/multilinguality_megatron/megatron/model/fused_bias_gelu.py b/multilinguality_megatron/megatron/model/fused_bias_gelu.py new file mode 100644 index 0000000000000000000000000000000000000000..29222db024eb5c5e54c7f38f58be8edd45c49b39 --- /dev/null +++ b/multilinguality_megatron/megatron/model/fused_bias_gelu.py @@ -0,0 +1,43 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch + + +###### BIAS GELU FUSION/ NO AUTOGRAD ################ +# 1/sqrt(2*pi)-> 0.3989423 +# 1/sqrt(2) -> 0.70710678 +# sqrt(2/pi) -> 0.79788456 +# this function is tanh approximation of gelu +# actual gelu is: +# x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) + +@torch.jit.script +def bias_gelu(bias, y): + x = bias + y + return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) + +# gradient of tanh approximation of gelu +# gradient of actual gelu is: +# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) +@torch.jit.script +def bias_gelu_back(g, bias, y): + x = bias + y + tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) + # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243 + ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out) + return ff*g + +class GeLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input, bias): + ctx.save_for_backward(input, bias) + return bias_gelu(bias, input) + + @staticmethod + def backward(ctx, grad_output): + input, bias = ctx.saved_tensors + tmp = bias_gelu_back(grad_output, bias, input) + return tmp, tmp + +bias_gelu_impl = GeLUFunction.apply diff --git a/multilinguality_megatron/megatron/model/fused_layer_norm.py b/multilinguality_megatron/megatron/model/fused_layer_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..29bb082ed03caa786744ccacce11f68361ac0b31 --- /dev/null +++ b/multilinguality_megatron/megatron/model/fused_layer_norm.py @@ -0,0 +1,188 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""This code is copied fron NVIDIA apex: + https://github.com/NVIDIA/apex + with some changes. """ + +import importlib +import numbers + +import torch +import torch.nn as nn +from torch.nn.parameter import Parameter +from torch.nn import init +from megatron.core.utils import make_viewless_tensor + +try: + from apex.contrib.layer_norm.layer_norm import FastLayerNormFN + + HAVE_PERSIST_LAYER_NORM = True +except: + HAVE_PERSIST_LAYER_NORM = False + +global fused_mix_prec_layer_norm_cuda +fused_mix_prec_layer_norm_cuda = None + + +class FusedLayerNormAffineFunction(torch.autograd.Function): + + @staticmethod + def forward(ctx, input, weight, bias, normalized_shape, eps): + + ctx.normalized_shape = normalized_shape + ctx.eps = eps + input_ = input.contiguous() + weight_ = weight.contiguous() + bias_ = bias.contiguous() + output, mean, invvar = fused_mix_prec_layer_norm_cuda.forward_affine( + input_, ctx.normalized_shape, weight_, bias_, ctx.eps + ) + if False: + print(input_.shape) + print(ctx.normalized_shape) + print(weight_.shape) + print(bias_.shape) + print(ctx.eps) + + ctx.save_for_backward(input_, weight_, bias_, mean, invvar) + + return output + + @staticmethod + def backward(ctx, grad_output): + + input_, weight_, bias_, mean, invvar = ctx.saved_tensors + grad_input = grad_weight = grad_bias = None + grad_input, grad_weight, grad_bias = ( + fused_mix_prec_layer_norm_cuda.backward_affine( + grad_output.contiguous(), + mean, + invvar, + input_, + ctx.normalized_shape, + weight_, + bias_, + ctx.eps, + ) + ) + + return grad_input, grad_weight, grad_bias, None, None + + +class MixedFusedLayerNorm(torch.nn.Module): + + def __init__( + self, + normalized_shape, + eps=1e-5, + no_persist_layer_norm=True, + sequence_parallel=False, + ): + super(MixedFusedLayerNorm, self).__init__() + + global fused_mix_prec_layer_norm_cuda + # fused_mix_prec_layer_norm_cuda = importlib.import_module( + # "fused_mix_prec_layer_norm_cuda") + fused_mix_prec_layer_norm_cuda = importlib.import_module( + "fused_layer_norm_cuda" + ) + + # List of hiddens sizes supported in the persistent layer norm kernel + # If the hidden size is not supported, fall back to the non-persistent + # kernel. + persist_ln_hidden_sizes = [ + 1024, + 1536, + 2048, + 2304, + 3072, + 3840, + 4096, + 5120, + 6144, + 8192, + 10240, + 12288, + 12800, + 15360, + 16384, + 18432, + 20480, + 24576, + 25600, + 30720, + 32768, + 40960, + 49152, + 65536, + ] + if ( + normalized_shape not in persist_ln_hidden_sizes + or not HAVE_PERSIST_LAYER_NORM + ): + no_persist_layer_norm = True + + if isinstance(normalized_shape, numbers.Integral): + normalized_shape = (normalized_shape,) + self.normalized_shape = torch.Size(normalized_shape) + self.eps = eps + self.weight = Parameter(torch.Tensor(*normalized_shape)) + self.bias = Parameter(torch.Tensor(*normalized_shape)) + self.reset_parameters() + self.no_persist_layer_norm = no_persist_layer_norm + self.sequence_parallel = sequence_parallel + + # set sequence parallelism flag on weight and bias parameters + setattr(self.weight, "sequence_parallel", self.sequence_parallel) + setattr(self.bias, "sequence_parallel", self.sequence_parallel) + + def reset_parameters(self): + + init.ones_(self.weight) + init.zeros_(self.bias) + + def forward(self, input): + if self.no_persist_layer_norm: + return FusedLayerNormAffineFunction.apply( + input, self.weight, self.bias, self.normalized_shape, self.eps + ) + else: + output = FastLayerNormFN.apply(input, self.weight, self.bias, self.eps) + + # Apex's fast layer norm function outputs a 'view' tensor (i.e., has + # a populated '_base' field). This will result in schedule.py's + # deallocate_output_tensor() throwing an error, so a viewless tensor is + # created to prevent this. + output = make_viewless_tensor( + inp=output, requires_grad=input.requires_grad, keep_graph=True + ) + + return output + + +class RMSNorm(torch.nn.Module): + def __init__( + self, + dim: int, + eps: float = 1e-6, + sequence_parallel: bool = False, + gemma: bool = False, + ): + super().__init__() + self.eps = eps + self.weight = nn.Parameter(torch.ones(dim)) + self.gemma = gemma + + setattr(self.weight, "sequence_parallel", sequence_parallel) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + if self.gemma: + output = self._norm(x.float()) + output = output * (1.0 + self.weight.float()) + return output.type_as(x) + else: + output = self._norm(x.float()).type_as(x) + return output * self.weight diff --git a/multilinguality_megatron/megatron/model/fused_softmax.py b/multilinguality_megatron/megatron/model/fused_softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..ed29262acdce88eef5c06496db78b2c8d5c5f4b5 --- /dev/null +++ b/multilinguality_megatron/megatron/model/fused_softmax.py @@ -0,0 +1,213 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +import torch +import torch.nn as nn +from megatron.model.enums import AttnMaskType + + +class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function): + """ + Fused operation which performs following three operations in sequence + 1. Scale the tensor. + 2. Apply upper triangular mask (typically used in gpt models). + 3. Perform softmax. + """ + + @staticmethod + def forward(ctx, inputs, scale): + import scaled_upper_triang_masked_softmax_cuda + + scale_t = torch.tensor([scale]) + softmax_results = scaled_upper_triang_masked_softmax_cuda.forward( + inputs, scale_t[0] + ) + + ctx.save_for_backward(softmax_results, scale_t) + return softmax_results + + @staticmethod + def backward(ctx, output_grads): + import scaled_upper_triang_masked_softmax_cuda + + softmax_results, scale_t = ctx.saved_tensors + input_grads = scaled_upper_triang_masked_softmax_cuda.backward( + output_grads, softmax_results, scale_t[0] + ) + + return input_grads, None + + +class ScaledMaskedSoftmax(torch.autograd.Function): + """ + Fused operation which performs following three operations in sequence + 1. Scale the tensor. + 2. Apply the mask. + 3. Perform softmax. + """ + + @staticmethod + def forward(ctx, inputs, mask, scale): + import scaled_masked_softmax_cuda + + scale_t = torch.tensor([scale]) + + softmax_results = scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0]) + ctx.save_for_backward(softmax_results, scale_t) + return softmax_results + + @staticmethod + def backward(ctx, output_grads): + import scaled_masked_softmax_cuda + + softmax_results, scale_t = ctx.saved_tensors + + input_grads = scaled_masked_softmax_cuda.backward( + output_grads, softmax_results, scale_t[0] + ) + return input_grads, None, None + + +class ScaledSoftmax(torch.autograd.Function): + """ + Fused operation which performs following two operations in sequence + 1. Scale the tensor. + 2. Perform softmax. + """ + + @staticmethod + def forward(ctx, inputs, scale): + import scaled_softmax_cuda + + scale_t = torch.tensor([scale]) + + softmax_results = scaled_softmax_cuda.forward( + inputs, scale_t[0] + ) + ctx.save_for_backward(softmax_results, scale_t) + return softmax_results + + @staticmethod + def backward(ctx, output_grads): + import scaled_softmax_cuda + + softmax_results, scale_t = ctx.saved_tensors + + input_grads = scaled_softmax_cuda.backward( + output_grads, softmax_results, scale_t[0] + ) + return input_grads, None, None + + +class FusedScaleMaskSoftmax(nn.Module): + """ + fused operation: scaling + mask + softmax + + Arguments: + input_in_fp16: flag to indicate if input in fp16 data format. + input_in_bf16: flag to indicate if input in bf16 data format. + attn_mask_type: attention mask type (pad or causal) + scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion + mask_func: mask function to be applied. + softmax_in_fp32: if true, softmax in performed at fp32 precision. + scale: scaling factor used in input tensor scaling. + """ + + def __init__( + self, + input_in_fp16, + input_in_bf16, + attn_mask_type, + scaled_masked_softmax_fusion, + mask_func, + softmax_in_fp32, + scale, + ): + super(FusedScaleMaskSoftmax, self).__init__() + self.input_in_fp16 = input_in_fp16 + self.input_in_bf16 = input_in_bf16 + assert not ( + self.input_in_fp16 and self.input_in_bf16 + ), "both fp16 and bf16 flags cannot be active at the same time." + self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16 + self.attn_mask_type = attn_mask_type + self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion + self.mask_func = mask_func + self.softmax_in_fp32 = softmax_in_fp32 + self.scale = scale + + assert ( + self.scale is None or softmax_in_fp32 + ), "softmax should be in fp32 when scaled" + + def forward(self, input, mask): + # [b, np, sq, sk] + assert input.dim() == 4 + + if self.is_kernel_available(mask, *input.size()): + return self.forward_fused_softmax(input, mask) + else: + return self.forward_torch_softmax(input, mask) + + def is_kernel_available(self, mask, b, np, sq, sk): + attn_batches = b * np + + if ( + self.scaled_masked_softmax_fusion # user want to fuse + and self.input_in_float16 # input must be fp16 + and 16 < sk <= 4096 # sk must be 16 ~ 2048 + and sq % 4 == 0 # sq must be divisor of 4 + and sk % 4 == 0 # sk must be divisor of 4 + and attn_batches % 4 == 0 # np * b must be divisor of 4 + ): + if 0 <= sk <= 4096: + batch_per_block = self.get_batch_per_block(sq, sk, b, np) + + if self.attn_mask_type == AttnMaskType.causal: + if attn_batches % batch_per_block == 0: + return True + else: + if sq % batch_per_block == 0: + return True + return False + + def forward_fused_softmax(self, input, mask): + b, np, sq, sk = input.size() + scale = self.scale if self.scale is not None else 1.0 + + if self.attn_mask_type == AttnMaskType.causal: + assert sq == sk, "causal mask is only for self attention" + + # input is 3D tensor (attn_batches, sq, sk) + input = input.view(-1, sq, sk) + probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale) + return probs.view(b, np, sq, sk) + else: + # input is 4D tensor (b, np, sq, sk) + if mask is not None: + return ScaledMaskedSoftmax.apply(input, mask, scale) + else: + return ScaledSoftmax.apply(input, scale) + + def forward_torch_softmax(self, input, mask): + if self.input_in_float16 and self.softmax_in_fp32: + input = input.float() + + if self.scale is not None: + input = input * self.scale + mask_output = self.mask_func(input, mask) if mask is not None else input + probs = torch.nn.Softmax(dim=-1)(mask_output) + + if self.input_in_float16 and self.softmax_in_fp32: + if self.input_in_fp16: + probs = probs.half() + else: + probs = probs.bfloat16() + + return probs + + @staticmethod + def get_batch_per_block(sq, sk, b, np): + import scaled_masked_softmax_cuda + + return scaled_masked_softmax_cuda.get_batch_per_block(sq, sk, b, np) diff --git a/multilinguality_megatron/megatron/model/gemma_model.py b/multilinguality_megatron/megatron/model/gemma_model.py new file mode 100644 index 0000000000000000000000000000000000000000..220b7420eec975fdc4123217fcb2a6d9ab3e2974 --- /dev/null +++ b/multilinguality_megatron/megatron/model/gemma_model.py @@ -0,0 +1,43 @@ +"""Llama Model.""" + +import warnings + +from megatron import get_args +from .enums import PositionEmbeddingType +from . import GPTModel + + +class GemmaModel(GPTModel): + def __init__(self, + num_tokentypes: int = 0, + parallel_output: bool = True, + pre_process: bool = True, + post_process: bool = True, + model_type=None, + version: int = 2): + + args = get_args() + + # mandatory arguments + assert version in {1, 2}, f"Unknown llama version {version}" + assert args.position_embedding_type == PositionEmbeddingType.rotary, \ + f"Llama uses rotary embedding, not {args.position_embedding_type}" + assert not args.use_post_ln, "Llama does not use post_ln" + assert args.glu_activation == "geglu", "Llama works with swiglu activation" + assert not args.use_bias, "Llama does not use bias" + assert not args.parallel_attn, "Llama does not use parallel_attn" + assert args.use_rms_norm, "Llama uses rms_norm" + assert args.tie_embed_logits , "Gemma ties embedding and lm_head weights" + + # recomended arguments + if args.bias_gelu_fusion: + warnings.warn("Llama is not intended to use bias_gelu_fusion") + if args.bias_dropout_fusion: + warnings.warn("Llama is not intended to use bias_dropout_fusion") + if args.hidden_dropout > 0.0 and not args.lima_dropout: + warnings.warn( "Llama is not intended to use dropout") + if args.attention_dropout > 0.0: + warnings.warn( "Llama is not intended to use dropout") + super().__init__(num_tokentypes=num_tokentypes, parallel_output=parallel_output, + pre_process=pre_process, post_process=post_process, + model_type=model_type) diff --git a/multilinguality_megatron/megatron/model/glu_activations.py b/multilinguality_megatron/megatron/model/glu_activations.py new file mode 100644 index 0000000000000000000000000000000000000000..ca2ae8cbf4a678716f0f2e142d98078abdb49d13 --- /dev/null +++ b/multilinguality_megatron/megatron/model/glu_activations.py @@ -0,0 +1,49 @@ +# Adapted from: https://github.com/bigscience-workshop/Megatron-DeepSpeed + +import torch +from torch import nn +from torch.nn import functional as F + + +class _GLUBaseModule(nn.Module): + def __init__(self, activation_fn): + super().__init__() + self.activation_fn = activation_fn + + def forward(self, x): + x1, x2 = torch.chunk(x, 2, dim=-1) + return x1 * self.activation_fn(x2) + + +class LiGLU(_GLUBaseModule): + def __init__(self): + super().__init__(nn.Identity()) + + +class GEGLU(_GLUBaseModule): + def __init__(self): + super().__init__(F.gelu) + + +class ReGLU(_GLUBaseModule): + def __init__(self): + super().__init__(F.relu) + + +class SwiGLU(_GLUBaseModule): + def __init__(self): + super().__init__(F.silu) + + +liglu = LiGLU() +geglu = GEGLU() +reglu = ReGLU() +swiglu = SwiGLU() + + +GLU_ACTIVATIONS = { + "geglu": geglu, + "liglu": liglu, + "reglu": reglu, + "swiglu": swiglu, +} diff --git a/multilinguality_megatron/megatron/model/gpt_model.py b/multilinguality_megatron/megatron/model/gpt_model.py new file mode 100644 index 0000000000000000000000000000000000000000..b3d94eccecc1105ee5031a154b470c3ebeecc165 --- /dev/null +++ b/multilinguality_megatron/megatron/model/gpt_model.py @@ -0,0 +1,123 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""GPT-2 model.""" + +import torch + +from megatron import get_args +from megatron.core import tensor_parallel +from .module import MegatronModule + +from .enums import AttnMaskType +from .language_model import parallel_lm_logits +import megatron.model.language_model +from .utils import init_method_normal +from .utils import scaled_init_method_normal + + +def post_language_model_processing(lm_output, labels, logit_weights, + parallel_output, + fp16_lm_cross_entropy): + + # Output. Format [s b h] + output = parallel_lm_logits( + lm_output, + logit_weights, + parallel_output) + + if labels is None: + # [s b h] => [b s h] + return None, output.transpose(0,1).contiguous() + else: + # [b s] => [s b] + labels = labels.transpose(0,1).contiguous() + if fp16_lm_cross_entropy: + assert output.dtype == torch.half + loss = tensor_parallel.vocab_parallel_cross_entropy(output, labels) + else: + loss = tensor_parallel.vocab_parallel_cross_entropy(output.float(), labels) + + # [s b] => [b, s] + loss = loss.transpose(0,1).contiguous() + return loss, output.transpose(0, 1).contiguous() + + +class GPTModel(MegatronModule): + """GPT-2 Language model.""" + + def __init__(self, + num_tokentypes=0, + parallel_output=True, + pre_process=True, + post_process=True, + model_type=None): + + args = get_args() + super(GPTModel, self).__init__(share_word_embeddings=args.tie_embed_logits) + self.tie_embed_logits = args.tie_embed_logits + + self.parallel_output = parallel_output + self.pre_process = pre_process + self.post_process = post_process + self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy + + # self.language_model, self._language_model_key = megatron.model.language_model( + self.language_model, self._language_model_key = megatron.model.language_model.get_language_model( + num_tokentypes=num_tokentypes, + add_pooler=False, + encoder_attn_mask_type=AttnMaskType.causal, + init_method=init_method_normal(args.init_method_std), + scaled_init_method=scaled_init_method_normal(args.init_method_std, + args.num_layers), + pre_process=self.pre_process, + post_process=self.post_process, + args=args, + model_type=model_type) + + if self.tie_embed_logits: + self.initialize_word_embeddings(init_method_normal, args) + + def set_input_tensor(self, input_tensor): + """See megatron.model.transformer.set_input_tensor()""" + self.language_model.set_input_tensor(input_tensor) + + def forward(self, input_ids, position_ids, attention_mask, labels=None, + tokentype_ids=None, inference_params=None): + + lm_output = self.language_model( + input_ids, + position_ids, + attention_mask, + inference_params=inference_params) + + if self.post_process: + return post_language_model_processing( + lm_output, labels, + self.word_embeddings_weight(), + self.parallel_output, + self.fp16_lm_cross_entropy) + else: + return lm_output + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + state_dict_ = {} + state_dict_[self._language_model_key] \ + = self.language_model.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars) + # Save word_embeddings. + if self.post_process and not self.pre_process and self.tie_embed_logits: + state_dict_[self._word_embeddings_for_head_key] \ + = self.word_embeddings.state_dict(prefix=prefix, + keep_vars=keep_vars) + return state_dict_ + + def load_state_dict(self, state_dict, strict=True): + """Customized load.""" + + # Load word_embeddings. + if self.post_process and not self.pre_process and self.tie_embed_logits: + self.word_embeddings.load_state_dict( + state_dict[self._word_embeddings_for_head_key], strict=strict) + if self._language_model_key in state_dict: + state_dict = state_dict[self._language_model_key] + self.language_model.load_state_dict(state_dict, strict=strict) diff --git a/multilinguality_megatron/megatron/model/language_model.py b/multilinguality_megatron/megatron/model/language_model.py new file mode 100644 index 0000000000000000000000000000000000000000..bb3e07746630ef640a1340a423fae12263903a2d --- /dev/null +++ b/multilinguality_megatron/megatron/model/language_model.py @@ -0,0 +1,735 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Transformer based language model.""" +from typing import Callable + +import torch +from torch import nn + +import megatron +import megatron.model.transformer +import megatron.model.utils +from megatron.core import mpu, tensor_parallel +from megatron.core.parallel_state import get_tensor_model_parallel_rank +from megatron.core.tensor_parallel.layers import ( + _initialize_affine_weight_cpu, + _initialize_affine_weight_gpu, +) +from megatron.core.tensor_parallel.utils import VocabUtility +from megatron.model.enums import AttnMaskType, LayerType, PositionEmbeddingType +from megatron.model.utils import init_method_normal, scaled_init_method_normal + +from .module import MegatronModule + + +def parallel_lm_logits(input_, word_embeddings_weight, parallel_output, bias=None): + """LM logits using word embedding weights.""" + + args = megatron.get_args() + # Parallel logits. + if args.async_tensor_model_parallel_allreduce or args.sequence_parallel: + input_parallel = input_ + model_parallel = mpu.get_tensor_model_parallel_world_size() > 1 + async_grad_allreduce = ( + args.async_tensor_model_parallel_allreduce + and model_parallel + and not args.sequence_parallel + ) + else: + input_parallel = tensor_parallel.copy_to_tensor_model_parallel_region(input_) + async_grad_allreduce = False + + # Matrix multiply. + logits_parallel = tensor_parallel.linear_with_grad_accumulation_and_async_allreduce( + input=input_parallel, + weight=word_embeddings_weight, + bias=bias, + gradient_accumulation_fusion=args.gradient_accumulation_fusion, + async_grad_allreduce=async_grad_allreduce, + sequence_parallel_enabled=args.sequence_parallel, + ) + # Gather if needed. + if parallel_output: + return logits_parallel + return tensor_parallel.gather_from_tensor_model_parallel_region(logits_parallel) + + +def get_language_model( + num_tokentypes, + add_pooler: bool, + encoder_attn_mask_type, + init_method=None, + scaled_init_method=None, + add_encoder=True, + add_decoder=False, + decoder_attn_mask_type=AttnMaskType.causal, + pre_process=True, + post_process=True, + args=None, + model_type=None, +): + assert args is not None + # model_type = args.model_type + """Build language model and return along with the key to save.""" + if init_method is None: + init_method = init_method_normal(args.init_method_std) + + if scaled_init_method is None: + scaled_init_method = scaled_init_method_normal( + args.init_method_std, args.num_layers + ) + # Language model. + print(f"add_encoder: {add_encoder}") + print(f"add_decoder: {add_decoder}") + language_model = TransformerLanguageModel( + init_method, + scaled_init_method, + encoder_attn_mask_type, + num_tokentypes=num_tokentypes, + add_encoder=add_encoder, + add_decoder=add_decoder, + decoder_attn_mask_type=decoder_attn_mask_type, + add_pooler=add_pooler, + pre_process=pre_process, + post_process=post_process, + args=args, + model_type=model_type, + ) + # key used for checkpoints. + language_model_key = "language_model" + return language_model, language_model_key + + +class Pooler(MegatronModule): + """ + Pool hidden states of a specific token (for example start of the + sequence) and add a linear transformation followed by a tanh. + + Arguments: + hidden_size: hidden size + init_method: weight initialization method for the linear layer. + bias is set to zero. + """ + + def __init__(self, hidden_size, init_method, args): + super(Pooler, self).__init__() + self.dense = megatron.model.utils.get_linear_layer( + hidden_size, hidden_size, init_method, args.perform_initialization + ) + self.sequence_parallel = args.sequence_parallel + + def forward(self, hidden_states, sequence_index=0): + # hidden_states: [s, b, h] + # sequence_index: index of the token to pool. + + # gather data along sequence dimensions + # same pooler is run on all tensor parallel nodes + if self.sequence_parallel: + hidden_states = tensor_parallel.gather_from_sequence_parallel_region( + hidden_states, tensor_parallel_output_grad=False + ) + + pooled = hidden_states[sequence_index, :, :] + pooled = self.dense(pooled) + pooled = torch.tanh(pooled) + return pooled + + +class Embedding(MegatronModule): + """Language model embeddings. + + Arguments: + hidden_size: hidden size + vocab_size: vocabulary size + max_sequence_length: maximum size of sequence. This + is used for positional embedding + embedding_dropout_prob: dropout probability for embeddings + init_method: weight initialization method + num_tokentypes: size of the token-type embeddings. 0 value + will ignore this embedding + """ + + def __init__( + self, + hidden_size, + vocab_size, + max_position_embeddings, + embedding_dropout_prob, + init_method, + num_tokentypes=0, + ): + super(Embedding, self).__init__() + + self.hidden_size = hidden_size + self.init_method = init_method + self.num_tokentypes = num_tokentypes + + args = megatron.get_args() + + # Word embeddings (parallel). + self.word_embeddings = tensor_parallel.VocabParallelEmbedding( + vocab_size, + self.hidden_size, + init_method=self.init_method, + params_dtype=args.params_dtype, + use_cpu_initialization=args.use_cpu_initialization, + perform_initialization=args.perform_initialization, + ) + self._word_embeddings_key = "word_embeddings" + + # Position embedding (serial). + self.position_embedding_type = args.position_embedding_type + if self.position_embedding_type == PositionEmbeddingType.absolute: + assert max_position_embeddings is not None + self.position_embeddings = torch.nn.Embedding( + max_position_embeddings, self.hidden_size + ) + self._position_embeddings_key = "position_embeddings" + # Initialize the position embeddings. + # if args.perform_initialization: # NOTE: always initialize them if absolute? + self.init_method(self.position_embeddings.weight) + else: + self.position_embeddings = None + + # Token type embedding. + # Add this as an optional field that can be added through + # method call so we can load a pretrain model without + # token types and add them as needed. + self._tokentype_embeddings_key = "tokentype_embeddings" + if self.num_tokentypes > 0: + self.tokentype_embeddings = torch.nn.Embedding( + self.num_tokentypes, self.hidden_size + ) + # Initialize the token-type embeddings. + if args.perform_initialization: + self.init_method(self.tokentype_embeddings.weight) + else: + self.tokentype_embeddings = None + + self.fp32_residual_connection = args.fp32_residual_connection + self.sequence_parallel = args.sequence_parallel + # Embeddings dropout + self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob) + + def zero_parameters(self): + """Zero out all parameters in embedding.""" + self.word_embeddings.weight.data.fill_(0) + self.word_embeddings.weight.shared = True + self.position_embeddings.weight.data.fill_(0) + self.position_embeddings.weight.shared = True + if self.num_tokentypes > 0: + self.tokentype_embeddings.weight.data.fill_(0) + self.tokentype_embeddings.weight.shared = True + + def add_tokentype_embeddings(self, num_tokentypes): + """Add token-type embedding. This function is provided so we can add + token-type embeddings in case the pretrained model does not have it. + This allows us to load the model normally and then add this embedding. + """ + if self.tokentype_embeddings is not None: + raise Exception("tokentype embeddings is already initialized") + if torch.distributed.get_rank() == 0: + print( + "adding embedding for {} tokentypes".format(num_tokentypes), flush=True + ) + self.num_tokentypes = num_tokentypes + self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size) + # Initialize the token-type embeddings. + self.init_method(self.tokentype_embeddings.weight) + + def forward(self, input_ids, position_ids, tokentype_ids=None): + # Embeddings. + words_embeddings = self.word_embeddings(input_ids) + embeddings = words_embeddings + + if self.position_embedding_type == PositionEmbeddingType.absolute: + assert self.position_embeddings is not None + embeddings = embeddings + self.position_embeddings(position_ids) + else: + assert self.position_embeddings is None + + if tokentype_ids is not None: + assert self.tokentype_embeddings is not None + embeddings = embeddings + self.tokentype_embeddings(tokentype_ids) + else: + assert self.tokentype_embeddings is None + + # Data format change to avoid explicit tranposes : [b s h] --> [s b h]. + embeddings = embeddings.transpose(0, 1).contiguous() + + # If the input flag for fp32 residual connection is set, convert for float. + if self.fp32_residual_connection: + embeddings = embeddings.float() + + # Dropout. + if self.sequence_parallel: + embeddings = tensor_parallel.scatter_to_sequence_parallel_region(embeddings) + with tensor_parallel.get_cuda_rng_tracker().fork(): + embeddings = self.embedding_dropout(embeddings) + else: + embeddings = self.embedding_dropout(embeddings) + + return embeddings + + def state_dict_for_save_checkpoint(self, prefix="", keep_vars=False): + """For easy load.""" + + state_dict_ = {} + state_dict_[self._word_embeddings_key] = self.word_embeddings.state_dict( + prefix=prefix, keep_vars=keep_vars + ) + if self.position_embedding_type == PositionEmbeddingType.absolute: + state_dict_[self._position_embeddings_key] = ( + self.position_embeddings.state_dict(prefix=prefix, keep_vars=keep_vars) + ) + if self.num_tokentypes > 0: + state_dict_[self._tokentype_embeddings_key] = ( + self.tokentype_embeddings.state_dict(prefix=prefix, keep_vars=keep_vars) + ) + + return state_dict_ + + def load_state_dict(self, state_dict, strict=True): + """Customized load.""" + + # Word embedding. + if self._word_embeddings_key in state_dict: + state_dict_ = state_dict[self._word_embeddings_key] + else: + # for backward compatibility. + state_dict_ = {} + for key in state_dict.keys(): + if "word_embeddings" in key: + state_dict_[key.split("word_embeddings.")[1]] = state_dict[key] + self.word_embeddings.load_state_dict(state_dict_, strict=strict) + + # Position embedding. + if self.position_embedding_type == PositionEmbeddingType.absolute: + if self._position_embeddings_key in state_dict: + state_dict_ = state_dict[self._position_embeddings_key] + else: + # for backward compatibility. + state_dict_ = {} + for key in state_dict.keys(): + if "position_embeddings" in key: + state_dict_[key.split("position_embeddings.")[1]] = state_dict[ + key + ] + self.position_embeddings.load_state_dict(state_dict_, strict=strict) + + # Tokentype embedding. + if self.num_tokentypes > 0: + state_dict_ = {} + if self._tokentype_embeddings_key in state_dict: + state_dict_ = state_dict[self._tokentype_embeddings_key] + else: + # for backward compatibility. + for key in state_dict.keys(): + if "tokentype_embeddings" in key: + state_dict_[key.split("tokentype_embeddings.")[1]] = state_dict[ + key + ] + if len(state_dict_.keys()) > 0: + self.tokentype_embeddings.load_state_dict(state_dict_, strict=strict) + else: + print( + "***WARNING*** expected tokentype embeddings in the " + "checkpoint but could not find it", + flush=True, + ) + + +class TransformerLanguageModel(MegatronModule): + """Transformer language model. + + Arguments: + transformer_hparams: transformer hyperparameters + vocab_size: vocabulary size + max_sequence_length: maximum size of sequence. This + is used for positional embedding + embedding_dropout_prob: dropout probability for embeddings + num_tokentypes: size of the token-type embeddings. 0 value + will ignore this embedding + """ + + def __init__( + self, + init_method: Callable, + output_layer_init_method, + encoder_attn_mask_type, + num_tokentypes=0, + add_encoder=True, + add_decoder=False, + decoder_attn_mask_type=AttnMaskType.causal, + add_pooler=False, + pre_process=True, + post_process=True, + args=None, + model_type=None, + ): + super(TransformerLanguageModel, self).__init__() + assert args is not None + + self.pre_process = pre_process + self.post_process = post_process + self.hidden_size = args.hidden_size + self.num_tokentypes = num_tokentypes + self.init_method = init_method + self.add_encoder = add_encoder + self.encoder_attn_mask_type = encoder_attn_mask_type + self.add_decoder = add_decoder + self.decoder_attn_mask_type = decoder_attn_mask_type + self.add_pooler = add_pooler + self.encoder_hidden_state = None + + s = args.max_position_embeddings + ell = args.num_layers + v = args.padded_vocab_size + h = args.hidden_size + mlp_mult_term = 64 if args.glu_activation else 16 + + qkv_estimate = 6 * s * (h**2) + attention_mat_estimate = 2 * (s**2) * h + attention_vals_estimate = 2 * (s**2) * h + linear_proj_estimate = 2 * s * (h**2) + mlp_estimate = mlp_mult_term * s * h**2 + embedding_estimate = 6 * s * h * v + + per_layer_estimate = ( + qkv_estimate + + attention_mat_estimate + + attention_vals_estimate + + linear_proj_estimate + + mlp_estimate + ) + self.flop_estimate = ell * per_layer_estimate + embedding_estimate + + # Embeddings. + if self.pre_process: + self.embedding = Embedding( + self.hidden_size, + args.padded_vocab_size, + args.max_position_embeddings, + args.hidden_dropout if not args.lima_dropout else 0.0, + self.init_method, + self.num_tokentypes, + ) + self._embedding_key = "embedding" + + # Transformer. + # Encoder (usually set to True, False if part of an encoder-decoder + # architecture and in encoder-only stage). + if self.add_encoder: + self.encoder = megatron.model.transformer.ParallelTransformer( + self.init_method, + output_layer_init_method, + self_attn_mask_type=self.encoder_attn_mask_type, + pre_process=self.pre_process, + post_process=self.post_process, + args=args, + model_type=model_type, + ) + self._encoder_key = "encoder" + else: + self.encoder = None + + # Decoder (usually set to False, True if part of an encoder-decoder + # architecture and in decoder-only stage). + if self.add_decoder: + self.decoder = megatron.model.transformer.ParallelTransformer( + self.init_method, + output_layer_init_method, + layer_type=LayerType.decoder, + self_attn_mask_type=self.decoder_attn_mask_type, + pre_process=self.pre_process, + post_process=self.post_process, + args=args, + model_type=model_type, + ) + self._decoder_key = "decoder" + else: + self.decoder = None + + if self.post_process: + if self.add_pooler: + self.pooler = Pooler(self.hidden_size, self.init_method, args) + self._pooler_key = "pooler" + + # Classifiaction head. + self.tie_embed_logits = args.tie_embed_logits + if self.post_process and not self.tie_embed_logits: + # instantiate head + vocab_start_index, vocab_end_index = ( + VocabUtility.vocab_range_from_global_vocab_size( + args.padded_vocab_size, + get_tensor_model_parallel_rank(), + args.tensor_model_parallel_size, + ) + ) + num_embeds = vocab_end_index - vocab_start_index + data = torch.empty( + num_embeds, + self.hidden_size, + dtype=args.params_dtype, + device=( + None if args.use_cpu_initialization else torch.cuda.current_device() + ), + ) + self.lm_head = nn.Parameter(data) + self._lm_key = "lm_head" + init_method = ( + nn.init.xavier_uniform_ + if args.init_method_xavier_uniform + else nn.init.xavier_normal_ + ) + # init weights + if args.perform_initialization: + if args.use_cpu_initialization: + _initialize_affine_weight_cpu( + self.lm_head, + args.padded_vocab_size, + num_embeds, + 0, + init_method, + params_dtype=args.params_dtype, + ) + else: + _initialize_affine_weight_gpu( + self.lm_head, init_method, partition_dim=0, stride=1 + ) + + def set_input_tensor(self, input_tensor): + """See megatron.model.transformer.set_input_tensor()""" + + # This is usually handled in schedules.py but some inference code still + # gives us non-lists or None + if not isinstance(input_tensor, list): + input_tensor = [input_tensor] + + if self.add_encoder and self.add_decoder: + assert ( + len(input_tensor) == 1 + ), "input_tensor should only be length 1 for stage with both encoder and decoder" + self.encoder.set_input_tensor(input_tensor[0]) + elif self.add_encoder: + assert ( + len(input_tensor) == 1 + ), "input_tensor should only be length 1 for stage with only encoder" + self.encoder.set_input_tensor(input_tensor[0]) + elif self.add_decoder: + if len(input_tensor) == 2: + self.decoder.set_input_tensor(input_tensor[0]) + self.encoder_hidden_state = input_tensor[1] + elif len(input_tensor) == 1: + self.decoder.set_input_tensor(None) + self.encoder_hidden_state = input_tensor[0] + else: + raise Exception("input_tensor must have either length 1 or 2") + else: + raise Exception("Stage must have at least either encoder or decoder") + + def forward( + self, + enc_input_ids, + enc_position_ids, + enc_attn_mask, + dec_input_ids=None, + dec_position_ids=None, + dec_attn_mask=None, + enc_dec_attn_mask=None, + tokentype_ids=None, + inference_params=None, + pooling_sequence_index=0, + enc_hidden_states=None, + output_enc_hidden=False, + ): + args = megatron.get_args() + # Encoder embedding. + if self.pre_process: + encoder_input = self.embedding( + enc_input_ids, enc_position_ids, tokentype_ids=tokentype_ids + ) + else: + encoder_input = None + + if args.model_name == "gemma": + normalizer = torch.tensor(args.hidden_size**0.5, dtype=encoder_input.dtype) + encoder_input = encoder_input * normalizer + + # Run encoder. + if args.freeze_layers: + with torch.no_grad(): + if enc_hidden_states is None: + if self.encoder is not None: + encoder_output = self.encoder( + encoder_input, + enc_attn_mask, + inference_params=inference_params, + position_ids=enc_position_ids, + ) + else: + encoder_output = self.encoder_hidden_state + else: + encoder_output = enc_hidden_states.to(encoder_input.dtype) + else: + if enc_hidden_states is None: + if self.encoder is not None: + encoder_output = self.encoder( + encoder_input, + enc_attn_mask, + inference_params=inference_params, + position_ids=enc_position_ids, + ) + else: + encoder_output = self.encoder_hidden_state + else: + encoder_output = enc_hidden_states.to(encoder_input.dtype) + + if self.post_process: + if self.add_pooler: + pooled_output = self.pooler(encoder_output, pooling_sequence_index) + + # output_enc_hidden refers to when we just need the encoder's + # output. For example, it is helpful to compute + # similarity between two sequences by average pooling + if not self.add_decoder or output_enc_hidden: + if self.add_pooler and self.post_process: + return encoder_output, pooled_output + else: + return encoder_output + + # Decoder embedding. + if self.pre_process: + decoder_input = self.embedding(dec_input_ids, dec_position_ids) + else: + decoder_input = None + + # Run decoder. + if args.freeze_layers: + with torch.no_grad(): + decoder_output = self.decoder( + decoder_input, + dec_attn_mask, + encoder_output=encoder_output, + enc_dec_attn_mask=enc_dec_attn_mask, + inference_params=inference_params, + ) + else: + decoder_output = self.decoder( + decoder_input, + dec_attn_mask, + encoder_output=encoder_output, + enc_dec_attn_mask=enc_dec_attn_mask, + inference_params=inference_params, + ) + + if self.add_pooler and self.post_process: + return decoder_output, encoder_output, pooled_output + else: + return decoder_output, encoder_output + + def state_dict_for_save_checkpoint(self, prefix="", keep_vars=False): + """For easy load.""" + + state_dict_ = {} + if self.pre_process: + state_dict_[self._embedding_key] = ( + self.embedding.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars + ) + ) + if self.add_encoder: + state_dict_[self._encoder_key] = ( + self.encoder.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars + ) + ) + if self.post_process: + if self.add_pooler: + state_dict_[self._pooler_key] = ( + self.pooler.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars + ) + ) + if not self.tie_embed_logits: + state_dict_[self._lm_key] = self.lm_head.data + if self.add_decoder: + state_dict_[self._decoder_key] = ( + self.decoder.state_dict_for_save_checkpoint( + prefix=prefix, keep_vars=keep_vars + ) + ) + + return state_dict_ + + def no_requires_grad(self, module): + if len(module._modules.keys()) != 0: + for submodule_name, submodule in module._modules.items(): + print(f"Enter {submodule_name}") + self.no_requires_grad(submodule) + else: + print(f"Setting {module._get_name()}.requires_grad = False") + module.requires_grad = False + + def load_state_dict(self, state_dict, strict=True): + """Customized load.""" + args = megatron.get_args() + + # Embedding. + if self.pre_process: + if self._embedding_key in state_dict: + state_dict_ = state_dict[self._embedding_key] + else: + # for backward compatibility. + state_dict_ = {} + for key in state_dict.keys(): + if "_embeddings" in key: + state_dict_[key] = state_dict[key] + self.embedding.load_state_dict(state_dict_, strict=strict) + + # Classifiaction head. + if self.post_process and not self.tie_embed_logits: + self.lm_head.data.copy_(state_dict[self._lm_key]) + + # Encoder. + if self.add_encoder: + if self._encoder_key in state_dict: + state_dict_ = state_dict[self._encoder_key] + # For backward compatibility. + elif "transformer" in state_dict: + state_dict_ = state_dict["transformer"] + else: + # For backward compatibility. + state_dict_ = {} + for key in state_dict.keys(): + if "transformer." in key: + state_dict_[key.split("transformer.")[1]] = state_dict[key] + + # For backward compatibility. + state_dict_self_attention = {} + for key in state_dict_.keys(): + if ".attention." in key: + state_dict_self_attention[ + key.replace(".attention.", ".self_attention.") + ] = state_dict_[key] + else: + state_dict_self_attention[key] = state_dict_[key] + state_dict_ = state_dict_self_attention + self.encoder.load_state_dict(state_dict_, strict=strict) + if args.freeze_layers: + self.no_requires_grad(self.encoder) + + if self.post_process: + if self.add_pooler: + assert ( + "pooler" in state_dict + ), "could not find data for pooler in the checkpoint" + self.pooler.load_state_dict(state_dict[self._pooler_key], strict=strict) + # Decoder. + if self.add_decoder: + assert ( + "decoder" in state_dict + ), "could not find data for pooler in the checkpoint" + self.decoder.load_state_dict(state_dict[self._decoder_key], strict=strict) + if args.freeze_layers: + self.no_requires_grad(self.decoder) diff --git a/multilinguality_megatron/megatron/model/llama_model.py b/multilinguality_megatron/megatron/model/llama_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ae8b9ae0bb14443505393ed5861e6c92a840c36b --- /dev/null +++ b/multilinguality_megatron/megatron/model/llama_model.py @@ -0,0 +1,43 @@ +"""Llama Model.""" + +import warnings + +from megatron import get_args +from .enums import PositionEmbeddingType +from . import GPTModel + + +class LlamaModel(GPTModel): + def __init__(self, + num_tokentypes: int = 0, + parallel_output: bool = True, + pre_process: bool = True, + post_process: bool = True, + model_type=None, + version: int = 2): + + args = get_args() + + # mandatory arguments + assert version in {1, 2}, f"Unknown llama version {version}" + assert args.position_embedding_type == PositionEmbeddingType.rotary, \ + f"Llama uses rotary embedding, not {args.position_embedding_type}" + assert not args.use_post_ln, "Llama does not use post_ln" + assert args.glu_activation == "swiglu", "Llama works with swiglu activation" + assert not args.use_bias, "Llama does not use bias" + assert not args.parallel_attn, "Llama does not use parallel_attn" + assert args.use_rms_norm, "Llama uses rms_norm" + assert not args.tie_embed_logits , "Llama unties embedding and lm_head weights" + + # recomended arguments + if args.bias_gelu_fusion: + warnings.warn("Llama is not intended to use bias_gelu_fusion") + if args.bias_dropout_fusion: + warnings.warn("Llama is not intended to use bias_dropout_fusion") + if args.hidden_dropout > 0.0 and not args.lima_dropout: + warnings.warn( "Llama is not intended to use dropout") + if args.attention_dropout > 0.0: + warnings.warn( "Llama is not intended to use dropout") + super().__init__(num_tokentypes=num_tokentypes, parallel_output=parallel_output, + pre_process=pre_process, post_process=post_process, + model_type=model_type) diff --git a/multilinguality_megatron/megatron/model/mistral_model.py b/multilinguality_megatron/megatron/model/mistral_model.py new file mode 100644 index 0000000000000000000000000000000000000000..5200692cb8c3ab99573ce82fc7dd4b0e86e14a45 --- /dev/null +++ b/multilinguality_megatron/megatron/model/mistral_model.py @@ -0,0 +1,56 @@ +"""Mistral Model.""" + +import warnings + +from megatron import get_args + +from . import GPTModel +from .enums import PositionEmbeddingType + + +class MistralModel(GPTModel): + def __init__( + self, + num_tokentypes: int = 0, + parallel_output: bool = True, + pre_process: bool = True, + post_process: bool = True, + model_type=None, + ): + args = get_args() + + # mandatory arguments + assert ( + args.position_embedding_type == PositionEmbeddingType.rotary + ), f"Mistral uses rotary embedding, not {args.position_embedding_type}" + assert not args.use_post_ln, "Mistral does not use post_ln" + assert args.glu_activation == "swiglu", "Mistral works with swiglu activation" + assert not args.use_bias, "Mistral does not use bias" + assert not args.parallel_attn, "Mistral does not use parallel_attn" + assert args.use_rms_norm, "Mistral uses rms_norm" + assert not args.tie_embed_logits, "Mistral unties embedding and lm_head weights" + assert ( + args.sliding_window_size == 4096 + ), "Mistral uses sliding window attention (sliding_window=4096)" + + # recomended arguments + if not args.use_flash_attn: + warnings.warn( + "Mistral should use flash attn (for sliding window local attention)" + ) + + if args.bias_gelu_fusion: + warnings.warn("Mistral is not intended to use bias_gelu_fusion") + if args.bias_dropout_fusion: + warnings.warn("Mistral is not intended to use bias_dropout_fusion") + if args.hidden_dropout > 0.0 and not args.lima_dropout: + warnings.warn("Mistral is not intended to use dropout") + if args.attention_dropout > 0.0: + warnings.warn("Mistral is not intended to use dropout") + super().__init__( + num_tokentypes=num_tokentypes, + parallel_output=parallel_output, + pre_process=pre_process, + post_process=post_process, + model_type=model_type, + ) diff --git a/multilinguality_megatron/megatron/model/module.py b/multilinguality_megatron/megatron/model/module.py new file mode 100644 index 0000000000000000000000000000000000000000..129b78186c5d434de2129a6b8901b8ef025804a5 --- /dev/null +++ b/multilinguality_megatron/megatron/model/module.py @@ -0,0 +1,202 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron Module""" + +import torch +from torch.autograd import Variable +from torch.nn.parameter import Parameter + +from megatron import get_args +from megatron.core import mpu, tensor_parallel + + +_FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor) +_HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor) +_BF16_TYPES = (torch.BFloat16Tensor, torch.cuda.BFloat16Tensor) + + + +def param_is_not_shared(param): + return not hasattr(param, 'shared') or not param.shared + + + +class MegatronModule(torch.nn.Module): + """Megatron specific extensions of torch Module with support + for pipelining.""" + + def __init__(self, share_word_embeddings=True): + super(MegatronModule, self).__init__() + self.share_word_embeddings = share_word_embeddings + + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + """Use this function to override the state dict for + saving checkpoints.""" + return self.state_dict(prefix=prefix, keep_vars=keep_vars) + + + def word_embeddings_weight(self): + if self.pre_process: + if self.language_model.tie_embed_logits: + return self.language_model.embedding.word_embeddings.weight + return self.language_model.lm_head + else: + if not self.language_model.tie_embed_logits: + return self.language_model.lm_head + if not self.share_word_embeddings: + raise Exception('word_embeddings_weight() called for last ' + 'stage, but share_word_embeddings is false') + return self.word_embeddings.weight + + def initialize_word_embeddings(self, init_method_normal, args): + if not self.share_word_embeddings: + raise Exception('initialize_word_embeddings() was called but ' + 'share_word_embeddings is false') + + # This function just initializes the word embeddings in the final stage + # when we are using pipeline parallelism. Nothing to do if we aren't + # using pipeline parallelism. + if args.pipeline_model_parallel_size == 1: + return + + # Parameters are shared between the word embeddings layers, and the + # heads at the end of the model. In a pipelined setup with more than + # one stage, the initial embedding layer and the head are on different + # workers, so we do the following: + # 1. Create a second copy of word_embeddings on the last stage, with + # initial parameters of 0.0. + # 2. Do an all-reduce between the first and last stage to ensure that + # the two copies of word_embeddings start off with the same + # parameter values. + # 3. In the training loop, before an all-reduce between the grads of + # the two word_embeddings layers to ensure that every applied weight + # update is the same on both stages. + if mpu.is_pipeline_last_stage() and \ + not self.pre_process: + assert not mpu.is_pipeline_first_stage() + self._word_embeddings_for_head_key = 'word_embeddings_for_head' + # set word_embeddings weights to 0 here, then copy first + # stage's weights using all_reduce below. + self.word_embeddings = tensor_parallel.VocabParallelEmbedding( + args.padded_vocab_size, args.hidden_size, + init_method=init_method_normal(args.init_method_std), + params_dtype=args.params_dtype, + use_cpu_initialization=args.use_cpu_initialization, + perform_initialization=args.perform_initialization) + self.word_embeddings.weight.data.fill_(0) + self.word_embeddings.weight.shared = True + + # Zero out initial weights for decoder embedding. + # NOTE: We don't currently support T5 with the interleaved schedule. + if not mpu.is_pipeline_first_stage(ignore_virtual=True) and \ + self.pre_process: + self.language_model.embedding.zero_parameters() + + if not torch.distributed.is_initialized(): + if not getattr(MegatronModule, "embedding_warning_printed", False): + print("WARNING! Distributed processes aren't initialized, so " + "word embeddings in the last layer are not initialized. " + "If you are just manipulating a model this is fine, but " + "this needs to be handled manually. If you are training " + "something is definitely wrong.") + MegatronModule.embedding_warning_printed = True + return + + # Ensure that first and last stages have the same initial parameter + # values. + if mpu.is_rank_in_embedding_group(): + torch.distributed.all_reduce(self.word_embeddings_weight().data, + group=mpu.get_embedding_group()) + + # Ensure that encoder(first stage) and decoder(split stage) position + # embeddings have the same initial parameter values + # NOTE: We don't currently support T5 with the interleaved schedule. + if mpu.is_rank_in_position_embedding_group() and \ + args.pipeline_model_parallel_split_rank is not None: + # TODO: Support tokentype embedding. + self.language_model.embedding.cuda() + position_embeddings = self.language_model.embedding.position_embeddings + torch.distributed.all_reduce(position_embeddings.weight.data, + group=mpu.get_position_embedding_group()) + + +def conversion_helper(val, conversion): + """Apply conversion to val. Recursively apply conversion if `val` + #is a nested tuple/list structure.""" + if not isinstance(val, (tuple, list)): + return conversion(val) + rtn = [conversion_helper(v, conversion) for v in val] + if isinstance(val, tuple): + rtn = tuple(rtn) + return rtn + + +def fp32_to_float16(val, float16_convertor): + """Convert fp32 `val` to fp16/bf16""" + def half_conversion(val): + val_typecheck = val + if isinstance(val_typecheck, (Parameter, Variable)): + val_typecheck = val.data + if isinstance(val_typecheck, _FLOAT_TYPES): + val = float16_convertor(val) + return val + return conversion_helper(val, half_conversion) + + +def float16_to_fp32(val): + """Convert fp16/bf16 `val` to fp32""" + def float_conversion(val): + val_typecheck = val + if isinstance(val_typecheck, (Parameter, Variable)): + val_typecheck = val.data + if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)): + val = val.float() + return val + return conversion_helper(val, float_conversion) + + + +class Float16Module(MegatronModule): + + def __init__(self, module, args): + super(Float16Module, self).__init__() + + if args.fp16: + self.add_module('module', module.half()) + def float16_convertor(val): + return val.half() + elif args.bf16: + self.add_module('module', module.bfloat16()) + def float16_convertor(val): + return val.bfloat16() + else: + raise Exception('should not be here') + + self.float16_convertor = float16_convertor + + + def set_input_tensor(self, input_tensor): + return self.module.set_input_tensor(input_tensor) + + + def forward(self, *inputs, **kwargs): + if mpu.is_pipeline_first_stage(): + inputs = fp32_to_float16(inputs, self.float16_convertor) + outputs = self.module(*inputs, **kwargs) + if mpu.is_pipeline_last_stage(): + outputs = float16_to_fp32(outputs) + return outputs + + + def state_dict(self, prefix='', keep_vars=False): + return self.module.state_dict(prefix=prefix, keep_vars=keep_vars) + + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + return self.module.state_dict_for_save_checkpoint(prefix=prefix, + keep_vars=keep_vars) + + + def load_state_dict(self, state_dict, strict=True): + self.module.load_state_dict(state_dict, strict=strict) diff --git a/multilinguality_megatron/megatron/model/multiple_choice.py b/multilinguality_megatron/megatron/model/multiple_choice.py new file mode 100644 index 0000000000000000000000000000000000000000..45bcdd674fea02669a2bc7dc6ecdf008db0199fa --- /dev/null +++ b/multilinguality_megatron/megatron/model/multiple_choice.py @@ -0,0 +1,120 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Multiple choice model.""" + +import torch + +import megatron.model.language_model +from megatron import get_args, print_rank_last +from megatron.model.enums import AttnMaskType +from megatron.model.bert_model import bert_extended_attention_mask, bert_position_ids + +import megatron.model.utils +from megatron.model.utils import init_method_normal +from megatron.model.utils import scaled_init_method_normal +from .module import MegatronModule + + +class MultipleChoice(MegatronModule): + def __init__(self, + num_tokentypes=2, + pre_process=True, + post_process=True, + model_type=None): + super(MultipleChoice, self).__init__(share_word_embeddings=False) + + args = get_args() + assert model_type is not None + + init_method = init_method_normal(args.init_method_std) + self.pre_process = pre_process + self.post_process = post_process + self.language_model, self._language_model_key = megatron.model.language_model.get_language_model( + num_tokentypes=num_tokentypes, + add_pooler=True, + encoder_attn_mask_type=AttnMaskType.padding, + init_method=init_method, + scaled_init_method=scaled_init_method_normal(args.init_method_std, + args.num_layers), + pre_process=self.pre_process, + post_process=self.post_process, + args=args, + model_type=model_type) + + # Multi-choice head. + if self.post_process: + self.multichoice_dropout = torch.nn.Dropout(args.hidden_dropout) + self.multichoice_head = megatron.model.utils.get_linear_layer(args.hidden_size, + 1, + init_method, + args.perform_initialization) + self._multichoice_head_key = 'multichoice_head' + + def set_input_tensor(self, input_tensor): + """See megatron.model.transformer.set_input_tensor()""" + self.language_model.set_input_tensor(input_tensor) + + def forward(self, model_input, attention_mask, tokentype_ids=None): + + # [batch, choices, sequence] --> [batch * choices, sequence] --> + # transformer --> [batch, choices] --> softmax + + # Ensure the shape is [batch-size, choices, sequence] + assert len(attention_mask.shape) == 3 + num_choices = attention_mask.shape[1] + + # Reshape and treat choice dimension the same as batch. + attention_mask = attention_mask.view(-1, attention_mask.size(-1)) + extended_attention_mask = bert_extended_attention_mask(attention_mask) + + input_ids = model_input + # Do the same as attention_mask for input_ids, tokentype_ids + assert len(input_ids.shape) == 3 + assert len(tokentype_ids.shape) == 3 + input_ids = input_ids.view(-1, input_ids.size(-1)) + tokentype_ids = tokentype_ids.view(-1, tokentype_ids.size(-1)) + position_ids = bert_position_ids(input_ids) + + lm_output = self.language_model( + input_ids, + position_ids, + extended_attention_mask, + tokentype_ids=tokentype_ids + ) + if self.post_process: + _, pooled_output = lm_output + multichoice_output = self.multichoice_dropout(pooled_output) + multichoice_logits = self.multichoice_head(multichoice_output) + + # Reshape back to separate choices. + multichoice_logits = multichoice_logits.view(-1, num_choices) + + return multichoice_logits + return lm_output + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + """For easy load when model is combined with other heads, + add an extra key.""" + + state_dict_ = {} + state_dict_[self._language_model_key] \ + = self.language_model.state_dict_for_save_checkpoint(prefix=prefix, + keep_vars=keep_vars) + if self.post_process: + state_dict_[self._multichoice_head_key] \ + = self.multichoice_head.state_dict(prefix=prefix, keep_vars=keep_vars) + return state_dict_ + + def load_state_dict(self, state_dict, strict=True): + """Customized load.""" + + self.language_model.load_state_dict( + state_dict[self._language_model_key], strict=strict) + if self.post_process: + if self._multichoice_head_key in state_dict: + self.multichoice_head.load_state_dict( + state_dict[self._multichoice_head_key], strict=strict) + else: + print_rank_last('***WARNING*** could not find {} in the checkpoint, ' + 'initializing to random'.format( + self._multichoice_head_key)) diff --git a/multilinguality_megatron/megatron/model/positional_embeddings.py b/multilinguality_megatron/megatron/model/positional_embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..1cc9d3fbd82e62d5c7b3e97ca4e9fed2b51e76d9 --- /dev/null +++ b/multilinguality_megatron/megatron/model/positional_embeddings.py @@ -0,0 +1,51 @@ +# Extracted from: https://github.com/facebookresearch/llama + +from typing import Optional +import torch + + +def precompute_freqs_cis( + dim: int, end: int, theta: float = 10000.0, scaling_factor: float = 1.0 +) -> torch.Tensor: + freqs = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim)) + t = torch.arange(end, device=freqs.device).float() / scaling_factor # type: ignore + freqs = torch.outer(t, freqs).float() # type: ignore + return torch.polar(torch.ones_like(freqs), freqs) # complex64 + + +def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor) -> torch.Tensor: + ndim = x.ndim + assert 0 <= 1 < ndim + assert freqs_cis.shape == (x.shape[0], x.shape[-1]) + shape = [d if i == 0 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def apply_rotary_emb( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, + position_ids: Optional[torch.Tensor] = None, +) -> tuple[torch.Tensor, torch.Tensor]: + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + + freqs_cis = freqs_cis.to(xq.device) + if position_ids is None: + # we assume position_ids to be torch.arange(seq_len) + freqs_cis = reshape_for_broadcast(freqs_cis, xq_) + # freqs_cis: [seq_len, 1, 1, head_dim//2] (complex64) + else: + # use specified position_ids, possibly not monotonically increasing + # tensor shapes & tpyes: + # xq_: [seq_len, batch_size, heads, head_dim//2] (complex64) + # position_ids: [batch_size, seq_len] (long) + position_ids = position_ids.to(xq.device) # normally already on correct device + assert position_ids.shape == (xq_.shape[1], xq_.shape[0]) + assert (freqs_cis.shape[1] == xq_.shape[-1]) + freqs_cis = freqs_cis[position_ids].transpose(0, 1).unsqueeze(-2) + # freqs_cis: [seq_len, batch_size, 1, head_dim//2] (complex64) + + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq), xk_out.type_as(xk) diff --git a/multilinguality_megatron/megatron/model/t5_model.py b/multilinguality_megatron/megatron/model/t5_model.py new file mode 100644 index 0000000000000000000000000000000000000000..d2a85d3679ac660d5665456bec4b973c946e6c69 --- /dev/null +++ b/multilinguality_megatron/megatron/model/t5_model.py @@ -0,0 +1,198 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""T5 model.""" + +import torch + +import megatron.model.language_model +from megatron import get_args +from megatron.core import tensor_parallel +from megatron.model.enums import AttnMaskType +from megatron.model.language_model import parallel_lm_logits + +from megatron.model.utils import ( + init_method_normal, + scaled_init_method_normal +) +from .module import MegatronModule + + +def t5_extended_attention_mask(attention_mask_list): + + def attn_mask_postprocess(attn_mask): + # [b, 1, s, s] + extended_attention_mask = attn_mask.unsqueeze(1) + return extended_attention_mask + + return [attn_mask_postprocess(attn_mask) for attn_mask in attention_mask_list] + + +def t5_position_ids(token_ids): + # Create position ids + seq_length = token_ids.size(1) + position_ids = torch.arange(seq_length, dtype=torch.long, + device=token_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(token_ids) + + return position_ids + + +class T5LMHead(MegatronModule): + """Masked LM head for T5 + + Arguments: + mpu_vocab_size: model parallel size of vocabulary. + hidden_size: hidden size + init_method: init method for weight initialization + layernorm_epsilon: tolerance for layer norm divisions + parallel_output: wether output logits being distributed or not. + """ + + def __init__(self, mpu_vocab_size, parallel_output): + super(T5LMHead, self).__init__() + + args = get_args() + + self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size)) + self.bias.model_parallel = True + self.bias.partition_dim = 0 + self.bias.stride = 1 + self.parallel_output = parallel_output + + def forward(self, hidden_states, word_embeddings_weight): + output = parallel_lm_logits(hidden_states, + word_embeddings_weight, + self.parallel_output, + bias=self.bias) + return output + + +class T5Model(MegatronModule): + """T5 Language model.""" + + def __init__(self, + num_tokentypes=0, + parallel_output=True, + pre_process=True, + post_process=True, + add_encoder=True, + add_decoder=True, + model_type=None): + super(T5Model, self).__init__() + args = get_args() + + self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy + self.parallel_output = parallel_output + init_method = init_method_normal(args.init_method_std) + scaled_init_method = scaled_init_method_normal(args.init_method_std, + args.num_layers) + self.pre_process = pre_process + self.post_process = post_process + self.add_encoder = add_encoder + self.add_decoder = add_decoder + + self.language_model, self._language_model_key = megatron.model.language_model.get_language_model( + num_tokentypes=num_tokentypes, + add_pooler=False, + add_encoder=add_encoder, + add_decoder=add_decoder, + encoder_attn_mask_type=AttnMaskType.padding, + init_method=init_method, + scaled_init_method=scaled_init_method, + pre_process=self.pre_process, + post_process=self.post_process, + args=args, + model_type=model_type) + + self.initialize_word_embeddings(init_method_normal, args) + + if self.post_process and self.add_decoder: + self.lm_head = T5LMHead( + self.word_embeddings_weight().size(0), + parallel_output) + self._lm_head_key = 'lm_head' + + def set_input_tensor(self, input_tensor): + """See megatron.model.transformer.set_input_tensor()""" + self.language_model.set_input_tensor(input_tensor) + + def forward(self, encoder_input_ids, decoder_input_ids, encoder_attn_mask, + decoder_attn_mask, encoder_decoder_attn_mask, + tokentype_ids=None, lm_labels=None, enc_hidden_states=None): + + # Converting the attention masks to proper parameter settings + encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask = t5_extended_attention_mask( + [encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask]) + + encoder_position_ids = t5_position_ids(encoder_input_ids) + decoder_position_ids = t5_position_ids(decoder_input_ids) + + lm_output = self.language_model(encoder_input_ids, + encoder_position_ids, + encoder_attn_mask, + decoder_input_ids, + decoder_position_ids, + decoder_attn_mask, + encoder_decoder_attn_mask, + tokentype_ids=tokentype_ids, + enc_hidden_states=enc_hidden_states) + + if self.post_process and self.add_decoder: + decoder_output, encoder_output = lm_output + # Output. [s, b, h] + lm_logits = self.lm_head(decoder_output, + self.word_embeddings_weight()) + + if lm_labels is None: + # [s b h] => [b s h] + return lm_logits.transpose(0,1).contiguous() + else: + # [b s] => [s b] + lm_labels = lm_labels.transpose(0, 1).contiguous() + if self.fp16_lm_cross_entropy: + assert lm_logits.dtype == torch.half + lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels) + else: + lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(), + lm_labels) + # [s b] => [b s] + lm_loss = lm_loss.transpose(0,1).contiguous() + return lm_loss + elif self.add_decoder and not self.add_encoder: + decoder_output, encoder_output = lm_output + return decoder_output + else: + encoder_output = lm_output + return encoder_output + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + """For easy load when model is combined with other heads, + add an extra key.""" + + state_dict_ = {} + state_dict_[self._language_model_key] \ + = self.language_model.state_dict_for_save_checkpoint(prefix=prefix, + keep_vars=keep_vars) + if self.post_process and self.add_decoder: + state_dict_[self._lm_head_key] \ + = self.lm_head.state_dict_for_save_checkpoint(prefix=prefix, + keep_vars=keep_vars) + # Save word_embeddings. + if self.post_process and not self.pre_process and self.add_decoder: + state_dict_[self._word_embeddings_for_head_key] \ + = self.word_embeddings.state_dict(prefix=prefix, + keep_vars=keep_vars) + return state_dict_ + + def load_state_dict(self, state_dict, strict=True): + """Customized load.""" + + self.language_model.load_state_dict( + state_dict[self._language_model_key], strict=strict) + if self.post_process and self.add_decoder: + self.lm_head.load_state_dict(state_dict[self._lm_head_key], + strict=strict) + # Load word embeddings. + if self.post_process and not self.pre_process and self.add_decoder: + self.word_embeddings.load_state_dict( + state_dict[self._word_embeddings_for_head_key], strict=strict) diff --git a/multilinguality_megatron/megatron/model/transformer.py b/multilinguality_megatron/megatron/model/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..793157f54c37cb3082ebc9f8cd6ef5c56de07370 --- /dev/null +++ b/multilinguality_megatron/megatron/model/transformer.py @@ -0,0 +1,1534 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Transformer.""" +import math +from contextlib import nullcontext +from typing import Callable + +import flash_attn +import torch +from einops import rearrange +from flash_attn.bert_padding import pad_input, unpad_input_for_concatenated_sequences +from torch.nn import functional as F + +import megatron.core +from megatron import core, get_num_microbatches +from megatron.model import LayerNorm, RMSNorm +from megatron.model.enums import ( + AttnMaskType, + AttnType, + LayerType, + ModelType, + PositionEmbeddingType, +) +from megatron.model.fused_bias_gelu import bias_gelu_impl +from megatron.model.fused_softmax import FusedScaleMaskSoftmax +from megatron.model.positional_embeddings import apply_rotary_emb, precompute_freqs_cis +from megatron.model.utils import attention_mask_func, erf_gelu + +# Extracted from: https://github.com/bigscience-workshop/Megatron-DeepSpeed +from .glu_activations import GLU_ACTIVATIONS +from .module import MegatronModule + +""" We use the following notation throughout this file: + h: hidden size + n: number of attention heads + p: number of model parallel partitions + np: n/p + hp: h/p + hn: h/n + b: batch size + s: sequence length + l: number of layers + Transformer takes input of size [s, b, h] and returns a + tensor of the same size. We use the following arguments: + hyperparameters: transformer hyperparameters +""" + + +class DropPath(MegatronModule): + """Drop paths (Stochastic Depth) per sample + (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=0.0): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, hidden_state): + if self.drop_prob == 0.0 or not self.training: + return hidden_state + keep_prob = 1 - self.drop_prob + # work with diff dim tensors, not just 2D ConvNets + # hidden_state: [s, b, h] + shape = (1,) + (hidden_state.shape[1],) + (1,) * (hidden_state.ndim - 2) + random_tensor = keep_prob + torch.rand( + shape, dtype=hidden_state.dtype, device=hidden_state.device + ) + random_tensor.floor_() # binarize + output = hidden_state.div(keep_prob) * random_tensor + return output + + +def _args_to_kwargs(args): + common_kwargs = { + "params_dtype": args.params_dtype, + "use_cpu_initialization": args.use_cpu_initialization, + "perform_initialization": args.perform_initialization, + "gradient_accumulation_fusion": args.gradient_accumulation_fusion, + "sequence_parallel_enabled": args.sequence_parallel, + } + return common_kwargs + + +class ParallelMLP(MegatronModule): + """MLP. + + MLP will take the input with h hidden state, project it to 4*h + hidden dimension, perform nonlinear transformation, and project the + state back into h hidden dimension. + """ + + def __init__(self, init_method, output_layer_init_method, args, world_size): + super(ParallelMLP, self).__init__() + # Project to 4h. + self.dense_h_to_4h = megatron.core.tensor_parallel.ColumnParallelLinear( + args.hidden_size, + # GLU is a special activation that divides the dimension by a factor 2. + 2 * args.ffn_hidden_size if args.glu_activation else args.ffn_hidden_size, + bias=args.use_bias, + gather_output=False, + init_method=init_method, + skip_bias_add=True, + async_tensor_model_parallel_allreduce=args.async_tensor_model_parallel_allreduce, + **_args_to_kwargs(args), + world_size=world_size, + ) + self.use_bias = args.use_bias + + self.bias_gelu_fusion = args.bias_gelu_fusion + + if args.glu_activation: + self.activation_func = GLU_ACTIVATIONS[args.glu_activation] + elif args.onnx_safe: + self.activation_func = erf_gelu + else: + self.activation_func = F.gelu + + # Project back to h. + self.dense_4h_to_h = megatron.core.tensor_parallel.RowParallelLinear( + args.ffn_hidden_size, + args.hidden_size, + bias=args.use_bias, + input_is_parallel=True, + init_method=output_layer_init_method, + skip_bias_add=True, + **_args_to_kwargs(args), + world_size=world_size, + ) + + def forward(self, hidden_states): + # [s, b, 4hp] + intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states) + + if self.bias_gelu_fusion: + intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel) + elif self.use_bias: + intermediate_parallel = self.activation_func( + intermediate_parallel + bias_parallel + ) + else: + intermediate_parallel = self.activation_func(intermediate_parallel) + + # [s, b, h] + output, output_bias = self.dense_4h_to_h(intermediate_parallel) + return output, output_bias + + +class CoreAttention(MegatronModule): + def __init__( + self, + layer_number, + attn_mask_type=AttnMaskType.padding, + args=None, + world_size=None, + ): + super(CoreAttention, self).__init__() + self.fp16 = args.fp16 + self.bf16 = args.bf16 + + self.apply_query_key_layer_scaling = args.apply_query_key_layer_scaling + self.attention_softmax_in_fp32 = args.attention_softmax_in_fp32 + if self.apply_query_key_layer_scaling: + self.attention_softmax_in_fp32 = True + self.layer_number = max(1, layer_number) + self.attn_mask_type = attn_mask_type + self.sequence_parallel = args.sequence_parallel + + projection_size = args.kv_channels * args.num_attention_heads + + # Per attention head and per partition values. + self.hidden_size_per_partition = core.utils.divide(projection_size, world_size) + self.hidden_size_per_attention_head = core.utils.divide( + projection_size, args.num_attention_heads + ) + self.num_attention_heads_per_partition = core.utils.divide( + args.num_attention_heads, world_size + ) + + coeff = None + self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) + if self.apply_query_key_layer_scaling: + coeff = self.layer_number + self.norm_factor *= coeff + + self.scale_mask_softmax = FusedScaleMaskSoftmax( + self.fp16, + self.bf16, + self.attn_mask_type, + args.masked_softmax_fusion, + attention_mask_func, + self.attention_softmax_in_fp32, + coeff, + ) + + # Dropout. Note that for a single iteration, this layer will generate + # different outputs on different number of parallel partitions but + # on average it should not be partition dependent. + self.attention_dropout = torch.nn.Dropout(args.attention_dropout) + + def forward(self, query_layer, key_layer, value_layer, attention_mask): + # =================================== + # Raw attention scores. [b, np, s, s] + # =================================== + + # [b, np, sq, sk] + output_size = ( + query_layer.size(1), + query_layer.size(2), + query_layer.size(0), + key_layer.size(0), + ) + + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view( + output_size[2], output_size[0] * output_size[1], -1 + ) + # [sk, b, np, hn] -> [sk, b * np, hn] + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + + # preallocting input tensor: [b * np, sq, sk] + matmul_input_buffer = megatron.core.mpu.get_global_memory_buffer().get_tensor( + (output_size[0] * output_size[1], output_size[2], output_size[3]), + query_layer.dtype, + "mpu", + ) + + # Raw attention scores. [b * np, sq, sk] + matmul_result = torch.baddbmm( + matmul_input_buffer, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=(1.0 / self.norm_factor), + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + # =========================== + # Attention probs and dropout + # =========================== + + # attention scores and attention mask [b, np, sq, sk] + attention_probs = self.scale_mask_softmax(attention_scores, attention_mask) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + + if not self.sequence_parallel: + with megatron.core.tensor_parallel.get_cuda_rng_tracker().fork(): + attention_probs = self.attention_dropout(attention_probs) + else: + attention_probs = self.attention_dropout(attention_probs) + + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = ( + value_layer.size(1), + value_layer.size(2), + query_layer.size(0), + value_layer.size(3), + ) + + # change view [sk, b * np, hn] + value_layer = value_layer.view( + value_layer.size(0), output_size[0] * output_size[1], -1 + ) + + # change view [b * np, sq, sk] + attention_probs = attention_probs.view( + output_size[0] * output_size[1], output_size[2], -1 + ) + + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + ( + self.hidden_size_per_partition, + ) + context_layer = context_layer.view(*new_context_layer_shape) + return context_layer + + +class ParallelAttention(MegatronModule): + """Parallel self-attention layer abstract class. + + Self-attention layer takes input with size [s, b, h] + and returns output of the same size. + """ + + def __init__( + self, + init_method, + output_layer_init_method, + layer_number, + attention_type=AttnType.self_attn, + attn_mask_type=AttnMaskType.padding, + world_size: int = None, + args=None, + ): + super(ParallelAttention, self).__init__() + assert world_size is not None + + self.layer_number = max(1, layer_number) + self.attention_type = attention_type + self.attn_mask_type = attn_mask_type + self.params_dtype = args.params_dtype + self.sequence_parallel = args.sequence_parallel + self.use_flash_attn = args.use_flash_attn + self.sliding_window_size = args.sliding_window_size + self.num_attention_heads_kv = args.num_attention_heads_kv + self.num_attention_heads = args.num_attention_heads + self.seq_length = args.seq_length + if self.use_flash_attn: + assert attention_type == AttnType.self_attn, ( + "FlashAttention code path only supports " "self-attention for now" + ) + assert self.attn_mask_type == AttnMaskType.causal, ( + "FlashAttention code path only " "supports causal mask for now" + ) + # If sliding window is enabled, we need to make sure that the sliding window is supported. + if self.sliding_window_size is not None: + import inspect + + # https://github.com/huggingface/transformers/blob/7e1eff7600085814eac65876d4d8a0e38c2f6ccc/src/transformers/models/mistral/modeling_mistral.py#L50C5-L50C32 + assert "window_size" in list( + inspect.signature(flash_attn.flash_attn_func).parameters + ), "The current flash attention version does not support sliding window attention, please update to the latest version." + assert ( + self.use_flash_attn + ), "Sliding window attention is only supported with flash attention for now." + projection_size = args.kv_channels * args.num_attention_heads + qkv_projection_size = ( + args.kv_channels * args.num_attention_heads + + 2 * args.kv_channels * args.num_attention_heads_kv + ) + + # Per attention head and per partition values. + self.hidden_size_per_attention_head = core.utils.divide( + projection_size, args.num_attention_heads + ) + + self.num_attention_heads_per_partition = core.utils.divide( + args.num_attention_heads, world_size + ) + # Strided linear layer. + if attention_type == AttnType.self_attn: + self.query_key_value = megatron.core.tensor_parallel.ColumnParallelLinear( + args.hidden_size, + qkv_projection_size, + bias=args.use_bias, + gather_output=False, + init_method=init_method, + async_tensor_model_parallel_allreduce=args.async_tensor_model_parallel_allreduce, + **_args_to_kwargs(args), + world_size=world_size, + ) + else: + assert attention_type == AttnType.cross_attn + self.query = megatron.core.tensor_parallel.ColumnParallelLinear( + args.hidden_size, + projection_size, + bias=args.use_bias, + gather_output=False, + init_method=init_method, + async_tensor_model_parallel_allreduce=args.async_tensor_model_parallel_allreduce, + **_args_to_kwargs(args), + world_size=world_size, + ) + + self.key_value = megatron.core.tensor_parallel.ColumnParallelLinear( + args.hidden_size, + 2 * projection_size, + bias=args.use_bias, + gather_output=False, + init_method=init_method, + async_tensor_model_parallel_allreduce=args.async_tensor_model_parallel_allreduce, + **_args_to_kwargs(args), + world_size=world_size, + ) + self.core_attention = CoreAttention( + self.layer_number, self.attn_mask_type, args, world_size + ) + self.checkpoint_core_attention = args.recompute_granularity == "selective" + + if self.use_flash_attn: + self.core_attention_flash = flash_attn.flash_attn_func + + # Output. + self.dense = megatron.core.tensor_parallel.RowParallelLinear( + projection_size, + args.hidden_size, + bias=args.use_bias, + input_is_parallel=True, + init_method=output_layer_init_method, + skip_bias_add=True, + **_args_to_kwargs(args), + world_size=world_size, + ) + + self.position_embedding_type = args.position_embedding_type + if self.position_embedding_type == PositionEmbeddingType.rotary: + self.freqs_cis = precompute_freqs_cis( + dim=args.kv_channels, + end=self.seq_length, + theta=args.rope_theta, + scaling_factor=args.rope_scaling_factor, + ) + + def _checkpointed_attention_forward( + self, query_layer, key_layer, value_layer, attention_mask + ): + """Forward method with activation checkpointing.""" + + def custom_forward(*inputs): + query_layer = inputs[0] + key_layer = inputs[1] + value_layer = inputs[2] + attention_mask = inputs[3] + output_ = self.core_attention( + query_layer, key_layer, value_layer, attention_mask + ) + return output_ + + hidden_states = megatron.core.tensor_parallel.checkpoint( + custom_forward, False, query_layer, key_layer, value_layer, attention_mask + ) + + return hidden_states + + def _allocate_memory(self, inference_max_sequence_len, batch_size): + return torch.empty( + inference_max_sequence_len, + batch_size, + self.num_attention_heads_per_partition, + self.hidden_size_per_attention_head, + dtype=self.params_dtype, + device=torch.cuda.current_device(), + ) + + def forward( + self, + hidden_states, + attention_mask, + encoder_output=None, + inference_params=None, + position_ids=None, + ): + # hidden_states: [sq, b, h] + + # ================================================= + # Pre-allocate memory for key-values for inference. + # ================================================= + if inference_params: + if self.layer_number not in inference_params.key_value_memory_dict: + inf_max_seq_len = inference_params.max_sequence_len + inf_max_batch_size = inference_params.max_batch_size + inference_key_memory = self._allocate_memory( + inf_max_seq_len, inf_max_batch_size + ) + inference_value_memory = self._allocate_memory( + inf_max_seq_len, inf_max_batch_size + ) + inference_params.key_value_memory_dict[self.layer_number] = ( + inference_key_memory, + inference_value_memory, + ) + else: + ( + inference_key_memory, + inference_value_memory, + ) = inference_params.key_value_memory_dict[self.layer_number] + + # ===================== + # Query, Key, and Value + # ===================== + + if self.attention_type == AttnType.self_attn: + # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)] + mixed_x_layer, _ = self.query_key_value(hidden_states) + + # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn] + sq, b = mixed_x_layer.shape[:2] + # , we simply expand smaller keys and values tensors to have the usual shapes and then + # feed those tensor to the standard attention/flash attention + qkv = mixed_x_layer.view( + sq, + b, + -1, + self.num_attention_heads // self.num_attention_heads_kv + 2, + self.hidden_size_per_attention_head, + ) + query_layer = qkv[:, :, :, :-2] + key_layer = qkv[:, :, :, [-2]] + value_layer = qkv[:, :, :, [-1]] + key_layer = torch.broadcast_to(key_layer, query_layer.shape) + value_layer = torch.broadcast_to(value_layer, query_layer.shape) + query_layer, key_layer, value_layer = [ + rearrange( + x, + "seq_len batch group num_heads head_dim -> seq_len batch (group num_heads) head_dim", + head_dim=self.hidden_size_per_attention_head, + ) + for x in [query_layer, key_layer, value_layer] + ] + else: + # Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)] + mixed_kv_layer, _ = self.key_value(encoder_output) + + # [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn] + new_tensor_shape = mixed_kv_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + 2 * self.hidden_size_per_attention_head, + ) + mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape) + + # [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn] + ( + key_layer, + value_layer, + ) = megatron.core.tensor_parallel.split_tensor_along_last_dim( + mixed_kv_layer, 2 + ) + + # Attention head [sq, b, h] --> [sq, b, hp] + query_layer, _ = self.query(hidden_states) + # [sq, b, hp] --> [sq, b, np, hn] + new_tensor_shape = query_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + self.hidden_size_per_attention_head, + ) + query_layer = query_layer.view(*new_tensor_shape) + + # ================================== + # Adjust key and value for inference + # ================================== + + if inference_params: + batch_start = inference_params.batch_size_offset + batch_end = batch_start + key_layer.size(1) + assert batch_end <= inference_key_memory.size(1) + sequence_start = inference_params.sequence_len_offset + sequence_end = sequence_start + key_layer.size(0) + assert sequence_end <= inference_key_memory.size(0) + # Copy key and values. + inference_key_memory[ + sequence_start:sequence_end, batch_start:batch_end, ... + ] = key_layer + inference_value_memory[ + sequence_start:sequence_end, batch_start:batch_end, ... + ] = value_layer + key_layer = inference_key_memory[:sequence_end, batch_start:batch_end, ...] + value_layer = inference_value_memory[ + :sequence_end, batch_start:batch_end, ... + ] + + # ================================== + # Rotary embeddings + # ================================== + if self.position_embedding_type == PositionEmbeddingType.rotary: + query_layer, key_layer = apply_rotary_emb( + query_layer, key_layer, self.freqs_cis, position_ids=position_ids + ) + + # ================================== + # core attention computation + # ================================== + + if not self.use_flash_attn: + if self.checkpoint_core_attention: + context_layer = self._checkpointed_attention_forward( + query_layer, key_layer, value_layer, attention_mask + ) + else: + context_layer = self.core_attention( + query_layer, key_layer, value_layer, attention_mask + ) + else: + flash_attn_extra_kwargs = {} + # check if we need to use sliding window attention + # https://github.com/huggingface/transformers/blob/7ee995fd9c692761c4601ddbffa2ac2ec9f27b0b/src/transformers/models/mistral/modeling_mistral.py#L353 + if self.sliding_window_size is not None: + kv_seq_len = key_layer.shape[0] + if kv_seq_len > self.sliding_window_size: + # https://github.com/huggingface/transformers/blob/7ee995fd9c692761c4601ddbffa2ac2ec9f27b0b/src/transformers/models/mistral/modeling_mistral.py#L510C21-L510C89 + flash_attn_extra_kwargs["window_size"] = ( + self.sliding_window_size, + self.sliding_window_size, + ) + # It will be truncated to the actual sequence length inside flash attention + # https://github.com/Dao-AILab/flash-attention/blob/83aef842beec1037eb8c1d9c3ef3ed8aae80b091/csrc/flash_attn/src/softmax.h#L159-L161 + q, k, v = [ + rearrange(x, "s b n h -> b s n h").contiguous() + for x in (query_layer, key_layer, value_layer) + ] + if not self.sequence_parallel: + with megatron.core.tensor_parallel.get_cuda_rng_tracker().fork(): + context_layer = self.core_attention_flash( + q, k, v, causal=True, **flash_attn_extra_kwargs + ) + else: + context_layer = self.core_attention_flash( + q, k, v, causal=True, **flash_attn_extra_kwargs + ) + context_layer = rearrange( + context_layer, "b s n h -> s b (n h)" + ).contiguous() + + # ================= + # Output. [sq, b, h] + # ================= + # print(self.dense) + output, bias = self.dense(context_layer) + return output, bias + + +def dropout_add(x, residual, prob, training): + out = torch.nn.functional.dropout(x, p=prob, training=training) + out = residual + out + return out + + +def bias_dropout_add(x, bias, residual, prob, training): + # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor + out = torch.nn.functional.dropout(x + bias, p=prob, training=training) + out = residual + out + return out + + +def get_bias_dropout_add(training): + def _bias_dropout_add(x, bias, residual, prob): + return bias_dropout_add(x, bias, residual, prob, training) + + return _bias_dropout_add + + +def dropout_add(x, residual, prob, training): + out = torch.nn.functional.dropout(x, p=prob, training=training) + out = residual + out + return out + + +def get_dropout_add(training): + def _dropout_add(x, residual, prob): + return dropout_add(x, residual, prob, training) + + return _dropout_add + + +@torch.jit.script +def bias_dropout_add_fused_train( + x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float +) -> torch.Tensor: + return bias_dropout_add(x, bias, residual, prob, True) + + +@torch.jit.script +def bias_dropout_add_fused_inference( + x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float +) -> torch.Tensor: + return bias_dropout_add(x, bias, residual, prob, False) + + +class ParallelTransformerLayer(MegatronModule): + """A single transformer layer. + Transformer layer takes input with size [s, b, h] and returns an + output of the same size. + """ + + def __init__( + self, + init_method: Callable, + output_layer_init_method: Callable, + layer_number: int, + layer_type=LayerType.encoder, + self_attn_mask_type=AttnMaskType.padding, + drop_path_rate: float = 0.0, + world_size: int = None, + hidden_dropout: float = 0.0, + args=None, + ): + super(ParallelTransformerLayer, self).__init__() + + self.model_name = args.model_name + self.hidden_size = args.hidden_size + self.layer_number = layer_number + self.layer_type = layer_type + self.apply_residual_connection_post_layernorm = ( + args.apply_residual_connection_post_layernorm + ) + self.bf16 = args.bf16 + self.fp32_residual_connection = args.fp32_residual_connection + self.parallel_layernorm = args.parallel_layernorm + if args.model_name == 'gemma': + gemma = True + else: + gemma = False + + # Layernorm on the input data. + if args.use_rms_norm: + if args.freeze_layers: + with torch.no_grad(): + self.input_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + self.output_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + else: + self.input_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + self.output_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + if self.parallel_layernorm: + if args.freeze_layers: + with torch.no_grad(): + self.mlp_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + else: + self.mlp_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + else: + self.input_layernorm = LayerNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + no_persist_layer_norm=args.no_persist_layer_norm, + sequence_parallel=args.sequence_parallel, + ) + self.output_layernorm = LayerNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + no_persist_layer_norm=args.no_persist_layer_norm, + sequence_parallel=args.sequence_parallel, + ) + if self.parallel_layernorm: + self.mlp_layernorm = LayerNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + no_persist_layer_norm=args.no_persist_layer_norm, + sequence_parallel=args.sequence_parallel, + ) + self.use_post_ln = args.use_post_ln + if args.use_post_ln: + self.input_layernorm = torch.nn.Identity() + else: + self.output_layernorm = torch.nn.Identity() + + # Self attention. + self.self_attention = ParallelAttention( + init_method, + output_layer_init_method, + layer_number, + attention_type=AttnType.self_attn, + attn_mask_type=self_attn_mask_type, + world_size=world_size, + args=args, + ) + self.hidden_dropout = hidden_dropout + self.bias_dropout_fusion = args.bias_dropout_fusion + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else None + self.parallel_attn = args.parallel_attn + self.use_bias = args.use_bias + + # Layernorm on the attention output + if not args.parallel_attn: + if not args.use_rms_norm: + self.post_attention_layernorm = LayerNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + no_persist_layer_norm=args.no_persist_layer_norm, + sequence_parallel=args.sequence_parallel, + ) + else: + if args.freeze_layers: + with torch.no_grad(): + self.post_attention_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + else: + self.post_attention_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + + if self.layer_type == LayerType.decoder: + self.inter_attention = ParallelAttention( + init_method, + output_layer_init_method, + layer_number, + attention_type=AttnType.cross_attn, + world_size=world_size, + args=args, + ) + # Layernorm on the attention output. + if not args.use_rms_norm: + self.post_inter_attention_layernorm = LayerNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + no_persist_layer_norm=args.no_persist_layer_norm, + sequence_parallel=args.sequence_parallel, + ) + else: + if args.freeze_layers: + with torch.no_grad(): + self.post_inter_attention_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + else: + self.post_inter_attention_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=gemma + ) + + self.mlp = ParallelMLP(init_method, output_layer_init_method, args, world_size) + + # Set bias+dropout+add fusion grad_enable execution handler. + TORCH_MAJOR = int(torch.__version__.split(".")[0]) + TORCH_MINOR = int(torch.__version__.split(".")[1]) + use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10) + self.bias_dropout_add_exec_handler = ( + nullcontext if use_nvfuser else torch.enable_grad + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + encoder_output=None, + enc_dec_attn_mask=None, + inference_params=None, + position_ids=None, + ): + ## + # PRELIMINARIES - utilities to compute residual + dropout + ## + + # function to compute residual + dropout(x + bias) + def add_dropout(x, bias, residual, prob, make_viewless=False): + # Jit compiled function creates 'view' tensor. This tensor + # potentially gets saved in the MPU checkpoint function context, + # which rejects view tensors. While making a viewless tensor here + # won't result in memory savings (like the data loader, or + # p2p_communication), it serves to document the origin of this + # 'view' tensor. + if self.use_bias: + bias = bias.expand_as(residual) + if self.drop_path is None: + with self.bias_dropout_add_exec_handler(): + output = bias_dropout_add_func(x, bias, residual, prob) + if make_viewless: + return core.utils.make_viewless_tensor( + inp=output, + requires_grad=output.requires_grad, + keep_graph=True, + ) + return output + out = torch.nn.functional.dropout( + x + bias, p=prob, training=self.training + ) + return residual + self.drop_path(out) + elif self.drop_path is None: + with self.bias_dropout_add_exec_handler(): + return dropout_add_func(x, residual, prob) + out = torch.nn.functional.dropout(x, p=prob, training=self.training) + return residual + self.drop_path(out) + + # determine the dropout_add_func to use in the add_dropout function + if self.drop_path is None: + # jit scripting for a nn.module (with dropout) is not + # triggerring the fusion kernel. For now, we use two + # different nn.functional routines to account for varying + # dropout semantics during training and inference phases. + if not self.use_bias: + dropout_add_func = get_dropout_add(self.training) + elif self.bias_dropout_fusion: + if self.training: + bias_dropout_add_func = bias_dropout_add_fused_train + else: + bias_dropout_add_func = bias_dropout_add_fused_inference + else: + bias_dropout_add_func = get_bias_dropout_add(self.training) + + ## + # Transformer computation begins now. + ## + + # hidden_states: [s, b, h] + # Layer norm at the beginning of the transformer layer. + layernorm_output = self.input_layernorm(hidden_states) + # Get attention. + attention_output, attention_bias = self.self_attention( + layernorm_output, + attention_mask, + inference_params=inference_params, + position_ids=position_ids, + ) + + # Determines the value of the next residual connection. + # if not parallel_attn: used after the post_attention_layernorm, + # else: used just before returning the output. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = hidden_states + + # dedicated mlp layernorm module + if self.parallel_layernorm: + layernorm_output = self.mlp_layernorm(hidden_states) + + if self.parallel_attn: + # used only if layer is decoder and not residual_post_layernorm + # which seems a bit strange, but it's kept just in case for now + layernorm_input = attention_output + else: + layernorm_input = add_dropout( + attention_output, attention_bias, residual, self.hidden_dropout + ) + layernorm_output = self.post_attention_layernorm(layernorm_input) + + if self.layer_type == LayerType.decoder: + attention_output, attention_bias = self.inter_attention( + layernorm_output, enc_dec_attn_mask, encoder_output=encoder_output + ) + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = layernorm_input + + layernorm_input = add_dropout( + attention_output, attention_bias, residual, self.hidden_dropout + ) + # Layer norm post the decoder attention + layernorm_output = self.post_inter_attention_layernorm(layernorm_input) + # Compute MLP. + # At this point, layernorm_output is: + # if layer is decoder: the post_inter_attention_layernorm output, + # elif parallel_layernorm: the mlp_layernorm output, + # elif parallel_attention: the input_layernorm tensor. + # else: the post_attention_layernorm output, + mlp_output, mlp_bias = self.mlp(layernorm_output) + + # Second residual connection. + if self.parallel_attn: + mlp_output = mlp_output + attention_output + elif self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = layernorm_input + output = add_dropout( + mlp_output, mlp_bias, residual, self.hidden_dropout, make_viewless=True + ) + + # Apply final layernorm, return. + output = self.output_layernorm(output) + return output + + +class NoopTransformerLayer(MegatronModule): + """A single 'no-op' transformer layer. + + The sole purpose of this layer is for when a standalone embedding layer + is used (i.e., args.standalone_embedding_stage == True). In this case, + zero transformer layers are assigned when pipeline rank == 0. Additionally, + when virtual pipeline rank >= 1, zero total model parameters are created + (virtual rank 0 contains the input embedding). This results in the model's + input and output tensors being the same, which causes an error when + performing certain memory optimiations on the output tensor (e.g., + deallocating it). Thus, this layer disconnects the input from the output + via a clone. Since ranks containing a no-op layer are generally under- + utilized (both compute and memory), there's no worry of any performance + degredation. + """ + + def __init__(self, layer_number): + super().__init__() + self.layer_number = layer_number + + def forward( + self, + hidden_states, + attention_mask, + encoder_output=None, + enc_dec_attn_mask=None, + inference_params=None, + ): + return hidden_states.clone() + + +def _get_num_layers(args, is_encoder_and_decoder_model, is_decoder=False): + """Compute the number of transformer layers resident on the current rank.""" + if megatron.core.mpu.get_pipeline_model_parallel_world_size() > 1: + if is_encoder_and_decoder_model: + assert args.pipeline_model_parallel_split_rank is not None + + # When a standalone embedding stage is used, a rank is taken from + # the encoder's ranks, to be used for the encoder's embedding + # layer. This way, the rank referenced by the 'split rank' remains + # the same whether or not a standalone embedding stage is used. + num_ranks_in_encoder = ( + args.pipeline_model_parallel_split_rank - 1 + if args.standalone_embedding_stage + else args.pipeline_model_parallel_split_rank + ) + num_ranks_in_decoder = ( + args.transformer_pipeline_model_parallel_size - num_ranks_in_encoder + ) + assert args.encoder_num_layers % num_ranks_in_encoder == 0, ( + "encoder_num_layers (%d) must be divisible by number of ranks given to encoder (%d)" + % (args.encoder_num_layers, num_ranks_in_encoder) + ) + assert args.decoder_num_layers % num_ranks_in_decoder == 0, ( + "decoder_num_layers (%d) must be divisible by number of ranks given to decoder (%d)" + % (args.decoder_num_layers, num_ranks_in_decoder) + ) + if megatron.core.mpu.is_pipeline_stage_before_split(): + num_layers = ( + 0 + if args.standalone_embedding_stage + and megatron.core.mpu.get_pipeline_model_parallel_rank() == 0 + else args.encoder_num_layers // num_ranks_in_encoder + ) + else: + num_layers = args.decoder_num_layers // num_ranks_in_decoder + else: + assert args.num_layers == args.encoder_num_layers + assert ( + args.num_layers % args.transformer_pipeline_model_parallel_size == 0 + ), "num_layers must be divisible by transformer_pipeline_model_parallel_size" + + # When a standalone embedding stage is used, all transformer layers + # are divided among pipeline rank >= 1, while on pipeline rank 0, + # ranks either contain the input embedding layer (virtual pp rank 0), + # or no layers at all (virtual pp rank >= 1). + num_layers = ( + 0 + if args.standalone_embedding_stage + and megatron.core.mpu.get_pipeline_model_parallel_rank() == 0 + else args.num_layers // args.transformer_pipeline_model_parallel_size + ) + else: + if not is_decoder: + num_layers = args.encoder_num_layers + else: + num_layers = args.decoder_num_layers + return num_layers + + +class ParallelTransformer(MegatronModule): + def __init__( + self, + init_method: Callable, + output_layer_init_method, + layer_type=LayerType.encoder, + self_attn_mask_type=AttnMaskType.padding, + pre_process=True, + post_process=True, + drop_path_rate=0.0, + args=None, + model_type=None, + ): + super(ParallelTransformer, self).__init__() + world_size = megatron.core.mpu.get_tensor_model_parallel_world_size() + assert args is not None + assert model_type is not None + + self.layer_type = layer_type + self.model_type = model_type + self.bf16 = args.bf16 + self.fp32_residual_connection = args.fp32_residual_connection + self.pre_process = pre_process + self.post_process = post_process + self.input_tensor = None + self.drop_path_rate = drop_path_rate + self.transformer_impl = args.transformer_impl + + # Store activation checkpointing flag. + self.recompute_granularity = args.recompute_granularity + self.recompute_method = args.recompute_method + self.recompute_num_layers = args.recompute_num_layers + self.distribute_saved_activations = ( + args.distribute_saved_activations and not args.sequence_parallel + ) + + self.sequence_parallel = args.sequence_parallel + + # Transformer Engine Init. + if self.transformer_impl == "transformer_engine": + global transformer_engine + import transformer_engine + self.use_fp8 = args.fp8_e4m3 or args.fp8_hybrid + self.fp8_recipe = None + self.fp8_group = megatron.core.mpu.get_data_parallel_group() + if self.use_fp8: + if args.fp8_e4m3: + fp8_format = transformer_engine.common.recipe.Format.E4M3 + elif args.fp8_hybrid: + fp8_format = transformer_engine.common.recipe.Format.HYBRID + self.fp8_recipe = transformer_engine.common.recipe.DelayedScaling( + margin=args.fp8_margin, + interval=args.fp8_interval, + fp8_format=fp8_format, + amax_history_len=args.fp8_amax_history_len, + amax_compute_algo=args.fp8_amax_compute_algo, + override_linear_precision=(False, False, not args.fp8_wgrad), + ) + + self.num_microbatches_in_previous_step = -1 + self.microbatch_count = 0 + self.checkpoint_core_attention = args.recompute_granularity == "selective" + + # Number of layers. + self.num_layers = _get_num_layers( + args, + model_type == ModelType.encoder_and_decoder, + layer_type == LayerType.decoder, + ) + + self.drop_path_rates = [ + rate.item() + for rate in torch.linspace(0, self.drop_path_rate, args.num_layers) + ] + + if args.lima_dropout: + # Use a layer dependent dropout probability, starting at p_d=0.0 at the bottom layer + # and linearly raising the rate to the value specified by `args.hidden_dropout` at the last layer. + # see "LIMA: Less Is More for Alignment", Zhou et al 2023, https://arxiv.org/abs/2305.11206 + self.hidden_dropouts = [ + rate.item() + for rate in torch.linspace(0, args.hidden_dropout, args.num_layers) + ] + else: + # Use standard residual dropout with the same dropout probability for all layers. + self.hidden_dropouts = [args.hidden_dropout] * args.num_layers + + if args.model_name == 'gemma': + self.gemma = True + else: + self.gemma = False + + # Transformer layers. + def build_layer(layer_number: int): + if args.transformer_impl == "local": + return ParallelTransformerLayer( + init_method, + output_layer_init_method, + layer_number, + layer_type=layer_type, + self_attn_mask_type=self_attn_mask_type, + drop_path_rate=self.drop_path_rates[layer_number - 1], + world_size=world_size, + hidden_dropout=self.hidden_dropouts[layer_number - 1], + args=args, + ) + else: + return transformer_engine.pytorch.TransformerLayer( + args.hidden_size, + args.ffn_hidden_size, + args.num_attention_heads, + layernorm_epsilon=args.layernorm_epsilon, + hidden_dropout=self.hidden_dropouts[layer_number - 1], + attention_dropout=args.attention_dropout, + init_method=init_method, + output_layer_init_method=output_layer_init_method, + layer_number=layer_number, + kv_channels=args.kv_channels, + self_attn_mask_type=self_attn_mask_type.name, + tp_group=megatron.core.mpu.get_tensor_model_parallel_group(), + get_rng_state_tracker=megatron.core.tensor_parallel.get_cuda_rng_tracker, + fuse_wgrad_accumulation=args.gradient_accumulation_fusion, + apply_query_key_layer_scaling=args.apply_query_key_layer_scaling, + attention_softmax_in_fp32=args.attention_softmax_in_fp32, + seq_length=args.seq_length, + micro_batch_size=args.micro_batch_size, + sequence_parallel=args.sequence_parallel, + params_dtype=args.params_dtype, + apply_residual_connection_post_layernorm=args.apply_residual_connection_post_layernorm, + output_layernorm=False, + layer_type="encoder", + drop_path_rate=self.drop_path_rates[layer_number - 1], + set_parallel_mode=True, + fuse_qkv_params=True, + ) + + if args.virtual_pipeline_model_parallel_size is not None: + assert args.num_layers % args.virtual_pipeline_model_parallel_size == 0, ( + "num_layers_per_stage must be divisible by " + "virtual_pipeline_model_parallel_size" + ) + assert args.model_type != ModelType.encoder_and_decoder + # Number of layers in each model chunk is the number of layers in the stage, + # divided by the number of model chunks in a stage. + self.num_layers = ( + self.num_layers // args.virtual_pipeline_model_parallel_size + ) + # With 8 layers, 2 stages, and 4 model chunks, we want an assignment of + # layers to stages like (each list is a model chunk): + # Stage 0: [0] [2] [4] [6] + # Stage 1: [1] [3] [5] [7] + # With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of + # layers to stages like (each list is a model chunk): + # Stage 0: [0, 1] [4, 5] + # Stage 1: [2, 3] [6, 7] + offset = megatron.core.mpu.get_virtual_pipeline_model_parallel_rank() * ( + args.num_layers // args.virtual_pipeline_model_parallel_size + ) + (megatron.core.mpu.get_pipeline_model_parallel_rank() * self.num_layers) + else: + # Each stage gets a contiguous set of layers. + if ( + model_type == ModelType.encoder_and_decoder + and megatron.core.mpu.get_pipeline_model_parallel_world_size() > 1 + ): + pipeline_rank = megatron.core.mpu.get_pipeline_model_parallel_rank() + if layer_type == LayerType.encoder: + offset = pipeline_rank * self.num_layers + else: + num_ranks_in_enc = args.pipeline_model_parallel_split_rank + offset = (pipeline_rank - num_ranks_in_enc) * self.num_layers + else: + offset = ( + megatron.core.mpu.get_pipeline_model_parallel_rank() + * self.num_layers + ) + + if self.num_layers == 0: + # When a standalone embedding stage is used (e.g., + # args.standalone_embedding_stage == True), virtual pipeline ranks + # on pipeline rank 0 will have zero transformer layers assigned to + # them. This results in the model's input and output tensors to be + # the same, which will cause failure for certain output tensor + # optimizations (e.g., pipeline output deallocation). To remedy + # this, we assign a 'no-op' layer on these ranks, which will + # disconnect the input tensor from the output tensor. + self.num_layers = 1 + self.layers = torch.nn.ModuleList([NoopTransformerLayer(1)]) + else: + self.layers = torch.nn.ModuleList( + [build_layer(i + 1 + offset) for i in range(self.num_layers)] + ) + + self.use_post_ln = args.use_post_ln + if self.post_process: + # Final layer norm before output. + if not args.use_rms_norm: + self.final_layernorm = LayerNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + no_persist_layer_norm=args.no_persist_layer_norm, + sequence_parallel=args.sequence_parallel, + ) + else: + if args.freeze_layers: + with torch.no_grad(): + self.final_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=self.gemma + ) + else: + self.final_layernorm = RMSNorm( + args.hidden_size, + eps=args.layernorm_epsilon, + sequence_parallel=args.sequence_parallel, + gemma=self.gemma + ) + + def _get_layer(self, layer_number): + return self.layers[layer_number] + + def _checkpointed_forward( + self, + hidden_states, + attention_mask, + encoder_output, + enc_dec_attn_mask, + is_first_microbatch, + ): + """Forward method with activation checkpointing.""" + + def custom(start, end, is_transformer_engine=False): + def custom_forward(*args, **kwargs): + for index in range(start, end): + layer = self._get_layer(index) + x_ = layer(*args, **kwargs) + return x_ + + def custom_forward_transformer_engine(*args, **kwargs): + return custom_forward( + *args, is_first_microbatch=is_first_microbatch, **kwargs + ) + + if not is_transformer_engine: + return custom_forward + else: + return custom_forward_transformer_engine + + if self.recompute_method == "uniform": + # Uniformly divide the total number of Transformer layers and checkpoint + # the input activation of each divided chunk. + # A method to further reduce memory usage reducing checkpoints. + l = 0 + while l < self.num_layers: + if self.transformer_impl == "transformer_engine": + hidden_states = transformer_engine.pytorch.distributed.checkpoint( + custom( + l, l + self.recompute_num_layers, is_transformer_engine=True + ), + self.distribute_saved_activations, + megatron.core.tensor_parallel.get_cuda_rng_tracker, + megatron.core.mpu.get_tensor_model_parallel_group(), + hidden_states, + attention_mask, + encoder_output, + enc_dec_attn_mask, + ) + else: + hidden_states = megatron.core.tensor_parallel.checkpoint( + custom(l, l + self.recompute_num_layers), + self.distribute_saved_activations, + hidden_states, + attention_mask, + encoder_output, + enc_dec_attn_mask, + ) + + l += self.recompute_num_layers + + elif self.recompute_method == "block": + # Checkpoint the input activation of only a set number of individual + # Transformer layers and skip the rest. + # A method fully use the device memory removing redundant re-computation. + for l in range(self.num_layers): + if l < self.recompute_num_layers: + if self.transformer_impl == "transformer_engine": + hidden_states = ( + transformer_engine.pytorch.distributed.checkpoint( + custom(l, l + 1, is_transformer_engine=True), + self.distribute_saved_activations, + megatron.core.tensor_parallel.get_cuda_rng_tracker, + megatron.core.mpu.get_tensor_model_parallel_group(), + hidden_states, + attention_mask, + encoder_output, + enc_dec_attn_mask, + ) + ) + else: + hidden_states = megatron.core.tensor_parallel.checkpoint( + custom(l, l + 1), + self.distribute_saved_activations, + hidden_states, + attention_mask, + encoder_output, + enc_dec_attn_mask, + ) + else: + if self.transformer_impl == "transformer_engine": + hidden_states = custom(l, l + 1, is_transformer_engine=True)( + hidden_states, + attention_mask, + encoder_output, + enc_dec_attn_mask, + ) + else: + hidden_states = custom(l, l + 1)( + hidden_states, + attention_mask, + encoder_output, + enc_dec_attn_mask, + ) + else: + raise ValueError("Invalid activation recompute method.") + + return hidden_states + + def set_input_tensor(self, input_tensor): + """Set input tensor to be used instead of forward()'s input. + + When doing pipeline parallelism the input from the previous + stage comes from communication, not from the input, so the + model's forward_step_func won't have it. This function is thus + used by internal code to bypass the input provided by the + forward_step_func""" + self.input_tensor = input_tensor + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + encoder_output=None, + enc_dec_attn_mask=None, + inference_params=None, + position_ids=None, + ): + # hidden_states: [s, b, h] + + # Checks. + if inference_params: + assert ( + self.recompute_granularity is None + ), "inference does not work with activation checkpointing" + + if not self.pre_process: + # See set_input_tensor() + hidden_states = self.input_tensor + + # Viewless tensor. + # - We only need to create a viewless tensor in the case of micro batch + # size (mbs) == 1, since in this case, 'hidden_states.transpose()' + # above creates a view tensor, and '.contiguous()' is a pass-through. + # For mbs >= 2, '.contiguous()' creates a new tensor, eliminating + # the need to make it viewless. + # + # However, we don't explicitly check mbs == 1 here because + # make_viewless_tensor() has negligible overhead when its input + # is already viewless. + # + # - For the 'else' case above, calling make_viewless_tensor() here is + # likely redundant, since p2p_communication.py (likely originator) + # already creates viewless tensors. That said, make_viewless_tensor() + # is called here to be future-proof and corner-case-proof. + hidden_states = core.utils.make_viewless_tensor( + hidden_states, + requires_grad=True, + keep_graph=True, + ) + + if self.sequence_parallel: + rng_context = megatron.core.tensor_parallel.get_cuda_rng_tracker().fork() + else: + rng_context = nullcontext() + + with rng_context: + # The fp8_autocast context manager is a no-op when enabled=True + # The if...else serves to short circuit name resolution for fp8_autocast + with transformer_engine.pytorch.fp8_autocast( + enabled=self.use_fp8, + fp8_recipe=self.fp8_recipe, + fp8_group=self.fp8_group, + ) if self.use_fp8 else nullcontext(): + # Determine if the current iteration is first microbatch + if self.num_microbatches_in_previous_step != get_num_microbatches(): + self.microbatch_count = ( + 0 # Reset count on new batch size rampup interval + ) + self.num_microbatches_in_previous_step = get_num_microbatches() + is_first_microbatch = ( + self.microbatch_count % get_num_microbatches() == 0 + ) + + # Forward pass. + if self.recompute_granularity == "full": + hidden_states = self._checkpointed_forward( + hidden_states, + attention_mask, + encoder_output, + enc_dec_attn_mask, + is_first_microbatch, + ) + else: + forward_kwargs = { + "encoder_output": encoder_output, + "enc_dec_attn_mask": enc_dec_attn_mask, + "inference_params": inference_params, + "position_ids": position_ids, + } + + if self.transformer_impl == "transformer_engine": + forward_kwargs["is_first_microbatch"] = is_first_microbatch + forward_kwargs[ + "checkpoint_core_attention" + ] = self.checkpoint_core_attention + + for index in range(self.num_layers): + layer = self._get_layer(index) + + hidden_states = layer( + hidden_states, attention_mask, **forward_kwargs + ) + + # Skip counter update for eval and activation checkpointing + if torch.is_grad_enabled() and self.training: + self.microbatch_count += 1 + + # Final layer norm. + # not done for the "post_ln" convention https://sh-tsang.medium.com/review-pre-ln-transformer-on-layer-normalization-in-the-transformer-architecture-b6c91a89e9ab + if self.post_process and (not self.use_post_ln): + hidden_states = self.final_layernorm(hidden_states) + return hidden_states diff --git a/multilinguality_megatron/megatron/model/utils.py b/multilinguality_megatron/megatron/model/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0d951836da3fe500c1a465d1bced8051dfbdd40b --- /dev/null +++ b/multilinguality_megatron/megatron/model/utils.py @@ -0,0 +1,48 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Utilities for models.""" + +import math +from typing import Callable + +import torch + + +def init_method_normal(sigma): + """Init method based on N(0, sigma).""" + def init_(tensor): + return torch.nn.init.normal_(tensor, mean=0.0, std=sigma) + return init_ + + +def scaled_init_method_normal(sigma, num_layers): + """Init method based on N(0, sigma/sqrt(2*num_layers).""" + std = sigma / math.sqrt(2.0 * num_layers) + + def init_(tensor): + return torch.nn.init.normal_(tensor, mean=0.0, std=std) + return init_ + + +def attention_mask_func(attention_scores, attention_mask): + attention_scores.masked_fill_(attention_mask, -10000.0) + return attention_scores + + +def get_linear_layer(rows: int, + columns: int, + init_method: Callable, + perform_initialization: bool): + """Simple linear layer with weight initialization.""" + layer = torch.nn.Linear(rows, columns) + if perform_initialization: + init_method(layer.weight) + with torch.no_grad(): + layer.bias.zero_() + return layer + + +@torch.jit.script +def erf_gelu(x): + # This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter + return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype)+torch.ones_like(x).to(dtype=x.dtype)) diff --git a/multilinguality_megatron/megatron/mpu/tests/__init__.py b/multilinguality_megatron/megatron/mpu/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/multilinguality_megatron/megatron/mpu/tests/commons.py b/multilinguality_megatron/megatron/mpu/tests/commons.py new file mode 100644 index 0000000000000000000000000000000000000000..611daf0f66692426ee5ad59824f3c421d7b94a90 --- /dev/null +++ b/multilinguality_megatron/megatron/mpu/tests/commons.py @@ -0,0 +1,70 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import argparse +import os +import random +import numpy +import torch + +import mpu + + +class IdentityLayer(torch.nn.Module): + def __init__(self, size, scale=1.0): + super(IdentityLayer, self).__init__() + self.weight = torch.nn.Parameter(scale * torch.randn(size)) + + def forward(self): + return self.weight + + +def set_random_seed(seed): + """Set random seed for reproducability.""" + random.seed(seed) + numpy.random.seed(seed) + torch.manual_seed(seed) + mpu.model_parallel_cuda_manual_seed(seed) + + +def initialize_distributed(backend='nccl'): + """Initialize torch.distributed.""" + # Get local rank in case it is provided. + parser = argparse.ArgumentParser() + parser.add_argument('--local_rank', type=int, default=None, + help='local rank passed from distributed launcher') + args = parser.parse_args() + local_rank = args.local_rank + + # Get rank and world size. + rank = int(os.getenv('RANK', '0')) + world_size = int(os.getenv("WORLD_SIZE", '1')) + + print('> initializing torch.distributed with local rank: {}, ' + 'rank: {}, world size: {}'.format(local_rank, rank, world_size)) + + # Set the device id. + device = rank % torch.cuda.device_count() + if local_rank is not None: + device = local_rank + torch.cuda.set_device(device) + + # Call the init process. + init_method = 'tcp://' + master_ip = os.getenv('MASTER_ADDR', 'localhost') + master_port = os.getenv('MASTER_PORT', '6000') + init_method += master_ip + ':' + master_port + torch.distributed.init_process_group( + backend=backend, + world_size=world_size, + rank=rank, + init_method=init_method) + + +def print_separator(message): + torch.distributed.barrier() + filler_len = (78 - len(message)) // 2 + filler = '-' * filler_len + string = '\n' + filler + ' {} '.format(message) + filler + if torch.distributed.get_rank() == 0: + print(string, flush=True) + torch.distributed.barrier() diff --git a/multilinguality_megatron/megatron/mpu/tests/test_cross_entropy.py b/multilinguality_megatron/megatron/mpu/tests/test_cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..3fcb92673f80874f2d364f6efc7e7a68ed687b85 --- /dev/null +++ b/multilinguality_megatron/megatron/mpu/tests/test_cross_entropy.py @@ -0,0 +1,90 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from commons import set_random_seed +from commons import IdentityLayer +from commons import print_separator +from commons import initialize_distributed +from mpu.cross_entropy import vocab_parallel_cross_entropy +import mpu +import torch.nn.functional as F +import torch +import random +import sys +sys.path.append("../..") + + +def torch_cross_entropy(batch_size, seq_length, vocab_size, + logits_scale, seed): + set_random_seed(seed) + identity = IdentityLayer((batch_size, seq_length, vocab_size), + scale=logits_scale).cuda() + logits = identity() + target = torch.cuda.LongTensor( + size=(batch_size, seq_length)).random_(0, vocab_size) + loss = F.cross_entropy(logits.view(-1, logits.size()[-1]), + target.view(-1), + reduction='none').view_as(target).mean() + loss.backward() + return loss, identity.weight.grad + + +def mpu_cross_entropy(batch_size, seq_length, vocab_size, + logits_scale, seed): + set_random_seed(seed) + identity = IdentityLayer((batch_size, seq_length, vocab_size), + scale=logits_scale).cuda() + logits = identity() + logits_parallel = mpu.scatter_to_tensor_model_parallel_region(logits) + target = torch.cuda.LongTensor( + size=(batch_size, seq_length)).random_(0, vocab_size) + loss = vocab_parallel_cross_entropy(logits_parallel, target).mean() + loss.backward() + return loss, identity.weight.grad + + +def test_cross_entropy(tensor_model_parallel_size): + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + batch_size = 13 + seq_length = 17 + vocab_size_per_partition = 11 + logits_scale = 1000.0 + vocab_size = vocab_size_per_partition * tensor_model_parallel_size + seed = 1234 + + loss_torch, grad_torch = torch_cross_entropy(batch_size, seq_length, + vocab_size, logits_scale, + seed) + loss_mpu, grad_mpu = mpu_cross_entropy(batch_size, seq_length, + vocab_size, logits_scale, + seed) + + error = loss_torch.sub_(loss_mpu).abs().max() + print(' max error in loss on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + error = grad_torch.sub_(grad_mpu).abs().max() + print(' max error in grad on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset groups + mpu.destroy_tensor_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +if __name__ == '__main__': + + initialize_distributed() + world_size = torch.distributed.get_world_size() + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test cross entropy') + test_cross_entropy(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 diff --git a/multilinguality_megatron/megatron/mpu/tests/test_data.py b/multilinguality_megatron/megatron/mpu/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..c30bf4bb8d4dbb0c2d576d20b18b4ae640d00d2c --- /dev/null +++ b/multilinguality_megatron/megatron/mpu/tests/test_data.py @@ -0,0 +1,75 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from commons import print_separator +from commons import initialize_distributed +from mpu import data as data_utils +import mpu +import torch +import functools +import operator +import sys +sys.path.append("../..") + + +def test_broadcast_data(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing broadcast_data with model parallel size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + torch.manual_seed(1234 + mpu.get_data_parallel_rank()) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + key_size_t = {'key1': [7, 11], + 'key2': [8, 2, 1], + 'key3': [13], + 'key4': [5, 1, 2], + 'key5': [5, 12]} + keys = list(key_size_t.keys()) + + data = {} + data_t = {} + for key in key_size_t: + data[key] = torch.LongTensor(size=key_size_t[key]).random_(0, 1000) + data_t[key] = data[key].clone() + data['keyX'] = torch.FloatTensor(size=(5, )).random_(0, 1000) + data_t['keyX'] = data['keyX'].clone() + if mpu.get_tensor_model_parallel_rank() != 0: + data = None + + data_utils._check_data_types(keys, data_t, torch.int64) + key_size, key_numel, \ + total_numel = data_utils._build_key_size_numel_dictionaries(keys, data) + for key in keys: + assert key_size[key] == key_size_t[key] + total_numel_t = 0 + for key in keys: + target_size = functools.reduce(operator.mul, key_size_t[key], 1) + assert key_numel[key] == target_size + total_numel_t += target_size + assert total_numel == total_numel_t + + data_b = data_utils.broadcast_data(keys, data, torch.int64) + for key in keys: + tensor = data_t[key].cuda() + assert data_b[key].sub(tensor).abs().max() == 0 + + # Reset groups + mpu.destroy_tensor_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +if __name__ == '__main__': + + initialize_distributed() + world_size = torch.distributed.get_world_size() + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test test broadcast data') + test_broadcast_data(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 diff --git a/multilinguality_megatron/megatron/mpu/tests/test_initialize.py b/multilinguality_megatron/megatron/mpu/tests/test_initialize.py new file mode 100644 index 0000000000000000000000000000000000000000..e5d2be37e269d8176a987b8a6ef5d7f47de98394 --- /dev/null +++ b/multilinguality_megatron/megatron/mpu/tests/test_initialize.py @@ -0,0 +1,82 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from commons import print_separator +from commons import initialize_distributed +import mpu +import torch +import sys +sys.path.append("../..") + + +def test_initialize_model_parallel(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing initialize_model_parallel with size {} ...'.format( + tensor_model_parallel_size)) + tensor_model_parallel_size_ = min(tensor_model_parallel_size, + torch.distributed.get_world_size()) + assert not mpu.model_parallel_is_initialized() + mpu.initialize_model_parallel(tensor_model_parallel_size_) + assert mpu.model_parallel_is_initialized() + + # Checks. + def check(group, world_size, rank): + assert world_size == torch.distributed.get_world_size(group=group) + assert rank == torch.distributed.get_rank(group=group) + + # Model parallel. + world_size = tensor_model_parallel_size_ + rank = torch.distributed.get_rank() % tensor_model_parallel_size_ + assert world_size == mpu.get_tensor_model_parallel_world_size() + assert rank == mpu.get_tensor_model_parallel_rank() + check(mpu.get_tensor_model_parallel_group(), world_size, rank) + + # Data parallel. + world_size = torch.distributed.get_world_size() // tensor_model_parallel_size_ + rank = torch.distributed.get_rank() // tensor_model_parallel_size + assert world_size == mpu.get_data_parallel_world_size() + assert rank == mpu.get_data_parallel_rank() + check(mpu.get_data_parallel_group(), world_size, rank) + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +def test_get_tensor_model_parallel_src_rank(tensor_model_parallel_size_): + + if torch.distributed.get_rank() == 0: + print('> testing get_tensor_model_parallel_src_rank with size {} ...'.format( + tensor_model_parallel_size_)) + tensor_model_parallel_size = min(tensor_model_parallel_size_, + torch.distributed.get_world_size()) + assert not mpu.model_parallel_is_initialized() + mpu.initialize_model_parallel(tensor_model_parallel_size) + assert mpu.model_parallel_is_initialized() + + # Checks + src_rank = torch.distributed.get_rank() - mpu.get_tensor_model_parallel_rank() + assert mpu.get_tensor_model_parallel_src_rank() == src_rank + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +if __name__ == '__main__': + + initialize_distributed() + world_size = torch.distributed.get_world_size() + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test initialize model parallel') + test_initialize_model_parallel(tensor_model_parallel_size) + print_separator('test model parallel source rank') + test_get_tensor_model_parallel_src_rank(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 diff --git a/multilinguality_megatron/megatron/mpu/tests/test_layers.py b/multilinguality_megatron/megatron/mpu/tests/test_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..d1cbcbd4157e97f9c836680d564b00b46947b610 --- /dev/null +++ b/multilinguality_megatron/megatron/mpu/tests/test_layers.py @@ -0,0 +1,506 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from mpu import layers +from commons import set_random_seed +from commons import print_separator +from commons import initialize_distributed +import mpu +from torch.nn.parameter import Parameter +import torch.nn.init as init +import torch +import random +import sys +sys.path.append("../..") + + +def test_parallel_embedding(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing parallel embedding with model parallel size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + batch_size = 17 + seq_length = 23 + vocab_size = 48 + hidden_size = 16 + seed = 1236 + + set_random_seed(123) + input_data = torch.LongTensor( + size=(batch_size, seq_length)).random_(0, vocab_size).cuda() + loss_weight = torch.randn([batch_size, seq_length, hidden_size]).cuda() + + set_random_seed(seed) + embedding_original = torch.nn.Embedding(vocab_size, hidden_size).cuda() + + output = embedding_original(input_data) + loss_original = torch.mul(output, loss_weight).sum() + loss_original.backward() + + set_random_seed(seed) + embedding_parallel = layers.ParallelEmbedding( + vocab_size, hidden_size, init_method=init.normal_).cuda() + output = embedding_parallel(input_data) + loss_parallel = torch.mul(output, loss_weight).sum() + loss_parallel.backward() + + set_random_seed(seed) + embedding_vocab_parallel = layers.VocabParallelEmbedding( + vocab_size, hidden_size, init_method=init.normal_).cuda() + output = embedding_vocab_parallel(input_data) + loss_vocab_parallel = torch.mul(output, loss_weight).sum() + loss_vocab_parallel.backward() + + torch.distributed.barrier() + error = loss_parallel.sub(loss_original).abs() + print(' error in loss (parallel) on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-12, 'error: {}'.format(error) + + torch.distributed.barrier() + error = loss_vocab_parallel.sub(loss_original).abs() + print(' error in loss (vocab parallel) on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-12, 'error: {}'.format(error) + + weight_grad_orig = torch.split(embedding_original.weight.grad, + hidden_size // tensor_model_parallel_size, + 1)[mpu.get_tensor_model_parallel_rank()] + error = embedding_parallel.weight.grad.sub(weight_grad_orig).abs().max() + print(' error in grad (parallel) on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-12, 'error: {}'.format(error) + + weight_grad_orig = torch.split(embedding_original.weight.grad, + vocab_size // tensor_model_parallel_size, + 0)[mpu.get_tensor_model_parallel_rank()] + error = embedding_vocab_parallel.weight.grad.sub( + weight_grad_orig).abs().max() + print(' error in grad (vocab parallel) on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-12, 'error: {}'.format(error) + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +def test_initialize_affine_weight(tensor_model_parallel_size): + + mpu.initialize_model_parallel(tensor_model_parallel_size) + if torch.distributed.get_rank() == 0: + print('> testing initialize_affine_weight with model parallel ' + 'size: {}'.format(tensor_model_parallel_size)) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + input_size_coeff = 13 + input_size = input_size_coeff * tensor_model_parallel_size + output_size_coeff = 17 + output_size = output_size_coeff * tensor_model_parallel_size + + # --------------- + # Column parallel + # --------------- + weight = torch.empty(output_size_coeff, input_size) + set_random_seed(seed) + layers._initialize_affine_weight(weight, output_size, input_size, + + output_size_coeff, 0, + torch.nn.init.normal_) + # Target. + set_random_seed(seed) + master_weight = torch.empty(output_size, input_size) + torch.nn.init.normal_(master_weight) + rank = mpu.get_tensor_model_parallel_rank() + my_weight = torch.split(master_weight, output_size_coeff, + dim=0)[rank].contiguous().clone() + + # Compare. + error = weight.sub(my_weight).abs().max() + torch.distributed.barrier() + print(' column parallel max error (should be zero) on global rank ' + '{}: {}'.format(torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # ------------ + # Row parallel + # ------------ + weight = torch.empty(output_size, input_size_coeff) + set_random_seed(seed) + mpu.layers._initialize_affine_weight(weight, output_size, input_size, + input_size_coeff, 1, + torch.nn.init.normal_) + # Target. + set_random_seed(seed) + master_weight = torch.empty(output_size, input_size) + torch.nn.init.normal_(master_weight) + rank = mpu.get_tensor_model_parallel_rank() + my_weight = torch.split(master_weight, input_size_coeff, + dim=1)[rank].contiguous().clone() + + # Compare. + error = weight.sub(my_weight).abs().max() + torch.distributed.barrier() + print(' row parallel max error (should be zero) on global rank ' + '{}: {}'.format(torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +class IdentityLayer2D(torch.nn.Module): + def __init__(self, m, n): + super(IdentityLayer2D, self).__init__() + self.weight = Parameter(torch.Tensor(m, n)) + torch.nn.init.xavier_normal_(self.weight) + + def forward(self): + return self.weight + + +def test_column_parallel_linear(tensor_model_parallel_size): + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + set_random_seed(seed) + input_size_coeff = 13 + input_size = input_size_coeff * tensor_model_parallel_size + output_size_coeff = 17 + output_size = output_size_coeff * tensor_model_parallel_size + batch_size = 7 + + # Network + identity_layer = IdentityLayer2D(batch_size, input_size).cuda() + linear_layer = mpu.ColumnParallelLinear( + input_size, output_size, keep_master_weight_for_test=True, world_size=tensor_model_parallel_size).cuda() + loss_weight = torch.randn([batch_size, output_size]).cuda() + # Forward + input_ = identity_layer() + output = linear_layer(input_) + loss = torch.mul(output, loss_weight).sum() + # Backward + loss.backward() + + # Values. + dLdY = loss_weight + X = identity_layer.weight + A = linear_layer.master_weight.cuda() + dLdA = torch.matmul(dLdY.t(), X) + dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view(-1) + dLdX = torch.matmul(dLdY, A) + + rank = mpu.get_tensor_model_parallel_rank() + my_dLdA = torch.split(dLdA, output_size_coeff, + dim=0)[rank].contiguous().clone() + error = my_dLdA.sub(linear_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdA on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + my_dLdb = torch.split(dLdb, output_size_coeff, + dim=0)[rank].contiguous().clone() + error = my_dLdb.sub(linear_layer.bias.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdb on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + error = dLdX.sub(identity_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdX on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +def test_row_parallel_linear(tensor_model_parallel_size): + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + set_random_seed(seed) + input_size_coeff = 13 + input_size = input_size_coeff * tensor_model_parallel_size + output_size_coeff = 17 + output_size = output_size_coeff * tensor_model_parallel_size + batch_size = 7 + + # Network + identity_layer = IdentityLayer2D(batch_size, input_size).cuda() + linear_layer = mpu.RowParallelLinear( + input_size, output_size, keep_master_weight_for_test=True, world_size=tensor_model_parallel_size).cuda() + loss_weight = torch.randn([batch_size, output_size]).cuda() + # Forward + input_ = identity_layer() + output = linear_layer(input_) + loss = torch.mul(output, loss_weight).sum() + # Backward + loss.backward() + + # Values. + dLdY = loss_weight + X = identity_layer.weight + A = linear_layer.master_weight.cuda() + dLdA = torch.matmul(dLdY.t(), X) + dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view(-1) + dLdX = torch.matmul(dLdY, A) + + rank = mpu.get_tensor_model_parallel_rank() + my_dLdA = torch.split(dLdA, input_size_coeff, + dim=1)[rank].contiguous().clone() + error = my_dLdA.sub(linear_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdA on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + error = dLdb.sub(linear_layer.bias.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdb on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + error = dLdX.sub(identity_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdX on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +class IdentityLayer3D(torch.nn.Module): + def __init__(self, m, n, k): + super(IdentityLayer3D, self).__init__() + self.weight = Parameter(torch.Tensor(m, n, k)) + torch.nn.init.xavier_normal_(self.weight) + + def forward(self): + return self.weight + + +def parallel_self_attention(tensor_model_parallel_size, num_att_heads_per_partition, + hidden_size_per_att_head, dropout_prob, batch_size, + sequence_length): + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + set_random_seed(seed) + + num_att_heads = num_att_heads_per_partition * \ + torch.distributed.get_world_size() + hidden_size = hidden_size_per_att_head * num_att_heads + + # Network + identity_layer = IdentityLayer3D(batch_size, sequence_length, + hidden_size).cuda() + attention_layer = mpu.BertParallelSelfAttention(hidden_size, num_att_heads, + dropout_prob).cuda() + loss_weight = torch.randn([batch_size, sequence_length, hidden_size]).cuda() + attention_mask = torch.randn([batch_size, 1, 1, sequence_length]).cuda() + # Forward + input_ = identity_layer() + output = attention_layer(input_, attention_mask) + loss = torch.mul(output, loss_weight).sum() + # Backward + loss.backward() + + rank = mpu.get_tensor_model_parallel_rank() + mpu.destroy_model_parallel() + return rank, hidden_size, tensor_model_parallel_size, loss, \ + attention_layer, identity_layer + + +def test_parallel_self_attention(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing ParallelSelfAttention with model parallel ' + 'size: {}'.format(tensor_model_parallel_size)) + + num_att_heads_per_partition = 3 + hidden_size_per_att_head = 7 + dropout_prob = 0.0 # has to be zero + batch_size = 5 + sequence_length = 13 + + rank_1, hideen_size_1, tensor_model_parallel_size_1, loss_1, \ + attention_layer_1, identity_layer_1 = parallel_self_attention( + 1, num_att_heads_per_partition, + hidden_size_per_att_head, dropout_prob, batch_size, sequence_length) + + rank, hidden_size, tensor_model_parallel_size, loss, \ + attention_layer, identity_layer = parallel_self_attention( + tensor_model_parallel_size, num_att_heads_per_partition, + hidden_size_per_att_head, dropout_prob, batch_size, sequence_length) + assert hideen_size_1 == hidden_size + + error = loss_1.sub(loss).abs().max() + torch.distributed.barrier() + print(' loss error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-6 + + my_lin_grad_list = torch.split( + attention_layer_1.query_key_value.weight.grad, + hidden_size // tensor_model_parallel_size, 0)[rank::tensor_model_parallel_size] + my_lin_grad = torch.cat(my_lin_grad_list, dim=0) + error = my_lin_grad.sub( + attention_layer.query_key_value.weight.grad).abs().max() + torch.distributed.barrier() + print(' weight gradient error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-6 + + error = identity_layer_1.weight.grad.sub( + identity_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' input gradient error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-6 + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +def parallel_transformer(tensor_model_parallel_size, + num_att_heads_per_partition, + hidden_size_per_att_head, + batch_size, + sequence_length): + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + set_random_seed(seed) + + num_att_heads = num_att_heads_per_partition * \ + torch.distributed.get_world_size() + hidden_size = hidden_size_per_att_head * num_att_heads + intermediate_size = 4 * hidden_size + + # Network + identity_layer = IdentityLayer3D(batch_size, sequence_length, + hidden_size).cuda() + transformer_layer = mpu.BertParallelTransformerLayer( + hidden_size, intermediate_size, num_att_heads, 0.0, 0.0, + torch.nn.functional.relu, 1.0e-5).cuda() + + loss_weight = torch.randn([batch_size, sequence_length, hidden_size]).cuda() + attention_mask = torch.randn([batch_size, 1, 1, sequence_length]).cuda() + # Forward + input_ = identity_layer() + output = transformer_layer(input_, attention_mask) + loss = torch.mul(output, loss_weight).sum() + # Backward + loss.backward() + + rank = mpu.get_tensor_model_parallel_rank() + mpu.destroy_model_parallel() + return rank, hidden_size, tensor_model_parallel_size, loss, \ + transformer_layer, identity_layer + + +def test_parallel_transformer_layer(tensor_model_parallel_size): + num_att_heads_per_partition = 3 + hidden_size_per_att_head = 7 + batch_size = 5 + sequence_length = 13 + + rank_1, hidden_size_1, tensor_model_parallel_size_1, loss_1, \ + transformer_layer_1, identity_layer_1 = parallel_transformer( + 1, num_att_heads_per_partition, + hidden_size_per_att_head, batch_size, sequence_length) + + rank, hidden_size, tensor_model_parallel_size, loss, \ + transformer_layer, identity_layer = parallel_transformer( + tensor_model_parallel_size, num_att_heads_per_partition, + hidden_size_per_att_head, batch_size, sequence_length) + + error = loss_1.sub(loss).abs().max() + torch.distributed.barrier() + print(' loss error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-5, 'error: {}'.format(error) + + error = identity_layer_1.weight.grad.sub( + identity_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' input gradient error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-5, 'error: {}'.format(error) + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +if __name__ == '__main__': + + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + initialize_distributed() + world_size = torch.distributed.get_world_size() + + print_separator('test initialize affine weight') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_initialize_affine_weight(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test parallel embedding') + test_parallel_embedding(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + print_separator('test column-parallel linear') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_column_parallel_linear(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + print_separator('test row-parallel linear') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_row_parallel_linear(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + print_separator('test parallel self-attention') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_parallel_self_attention(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + print_separator('test parallel transformer') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_parallel_transformer_layer(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 diff --git a/multilinguality_megatron/megatron/mpu/tests/test_random.py b/multilinguality_megatron/megatron/mpu/tests/test_random.py new file mode 100644 index 0000000000000000000000000000000000000000..8ee6942cf01fd7d9c93012c37f7b5e4b351f3c15 --- /dev/null +++ b/multilinguality_megatron/megatron/mpu/tests/test_random.py @@ -0,0 +1,191 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from commons import print_separator +from commons import initialize_distributed +import mpu +import torch +import sys +sys.path.append("../..") + + +def test_set_cuda_rng_state(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing set_rng_state with size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + size = 123 + seed = 1234 + torch.cuda.manual_seed(1234) + tensor = torch.cuda.FloatTensor(size) + + # Get the state + rng_state = torch.cuda.get_rng_state() + rng_state_copy = rng_state.clone() + + # Do some stuff. + for _ in range(5): + torch.randn(size, out=tensor) + result_1 = tensor.clone() + + assert rng_state.sub(rng_state_copy).max() == 0 + assert torch.cuda.get_rng_state().sub(rng_state_copy).max() > 0 + + # State should be different. + new_rng_state = torch.cuda.get_rng_state() + max_diff = new_rng_state.sub(rng_state).max() + print(' max diff in rng state (should be non-zero) on global rank {}: {}'. + format(torch.distributed.get_rank(), max_diff)) + assert max_diff > 0 + + # Reset the rng state and do the same stuff. + mpu.random._set_cuda_rng_state(rng_state) + for _ in range(5): + torch.randn(size, out=tensor) + mpu.random._set_cuda_rng_state(rng_state) + for _ in range(5): + torch.randn(size, out=tensor) + result_2 = tensor.clone() + + # Results should be the same + error = result_2.sub(result_1).abs().max() + print(' max error in generated tensors (should be zero) on ' + 'global rank {}: {}'.format(torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Input state should have remained intact. + error = rng_state.sub(rng_state_copy).max() + print(' max error in rng state (should be zero) on global rank {}: {}'. + format(torch.distributed.get_rank(), error)) + assert error == 0 + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +def test_cuda_rng_tracker(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing cuda rng tracker with size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed_1 = 1234 + seed_2 = 4321 + size = [12, 21] + tensor = torch.cuda.FloatTensor(size) + + # Set to seed_1 and generate two tensors. + torch.cuda.manual_seed(seed_1) + torch.randn(size, out=tensor) + target_11 = tensor.clone() + torch.randn(size, out=tensor) + target_12 = tensor.clone() + + # Set to seed_2 and generate two tensors. + torch.cuda.manual_seed(seed_2) + torch.randn(size, out=tensor) + target_21 = tensor.clone() + torch.randn(size, out=tensor) + target_22 = tensor.clone() + + # Now if we interleave seed_1 and seed_2, + # we should still get the same tensors + torch.cuda.manual_seed(seed_1) + mpu.get_cuda_rng_tracker().add('test', seed_2) + + torch.randn(size, out=tensor) + result_11 = tensor.clone() + + with mpu.get_cuda_rng_tracker().fork('test'): + torch.randn(size, out=tensor) + result_21 = tensor.clone() + + torch.randn(size, out=tensor) + result_12 = tensor.clone() + + with mpu.get_cuda_rng_tracker().fork('test'): + torch.randn(size, out=tensor) + result_22 = tensor.clone() + + diff = result_11.sub(result_21).abs().max() + diff = min(diff, result_12.sub(result_22).abs().max()) + print(' max diff in generated tensors (should be non-zero) on ' + 'global rank {}: {}'.format(torch.distributed.get_rank(), diff)) + assert diff > 1.0e-6 + error = max(result_11.sub(target_11).abs().max(), + result_12.sub(target_12).abs().max()) + error = max(error, result_21.sub(target_21).abs().max()) + error = max(error, result_22.sub(target_22).abs().max()) + print(' max error in generated tensors (should be zero) on ' + 'global rank {}: {}'.format(torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset the tracker + mpu.get_cuda_rng_tracker().reset() + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +def test_model_parallel_cuda_manual_seed(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing model parallel cuda manual seed with size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + mpu.model_parallel_cuda_manual_seed(12345) + assert torch.cuda.initial_seed() == 12345 + with mpu.get_cuda_rng_tracker().fork(): + assert torch.cuda.initial_seed() == (12345 + 2718 + + mpu.get_tensor_model_parallel_rank()) + + # Reset the tracker + mpu.get_cuda_rng_tracker().reset() + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +if __name__ == '__main__': + + initialize_distributed() + world_size = torch.distributed.get_world_size() + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test set rng state') + test_set_cuda_rng_state(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test cuda rng tracker') + test_cuda_rng_tracker(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test model parallel cuda manual seed') + test_model_parallel_cuda_manual_seed(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 diff --git a/multilinguality_megatron/megatron/optimizer/__init__.py b/multilinguality_megatron/megatron/optimizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d5c9bf5a370bf17b1b944ceefc51d0be4f240542 --- /dev/null +++ b/multilinguality_megatron/megatron/optimizer/__init__.py @@ -0,0 +1,153 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from apex.optimizers import FusedAdam as Adam +from apex.optimizers import FusedSGD as SGD + +from megatron import get_args + +from .distrib_optimizer import DistributedOptimizer +from .grad_scaler import ConstantGradScaler, DynamicGradScaler +from .optimizer import Float16OptimizerWithFloat16Params, FP32Optimizer + + +def get_param_groups(modules, + no_weight_decay_cond, + scale_lr_cond, + lr_mult): + """creates param groups based on weight decay condition (regularized vs non regularized) + and learning rate scale condition (args.lr vs lr_mult * args.lr) + scale_lr_cond is used during finetuning where head of the network requires a scaled + version of the base learning rate. + """ + args = get_args() + wd_no_scale_lr = [] + wd_scale_lr = [] + no_wd_no_scale_lr = [] + no_wd_scale_lr = [] + for module in modules: + for name, param in module.named_parameters(): + #print(f"param name: {name}; requires grad: {param.requires_grad}") + if args.freeze_layers: + if "embedding" not in name and "lm_head" not in name: + param.requires_grad = False + print(f"Freezing {name}") + else: + param.requires_grad = True + print(f"Not freezing {name}") + if not param.requires_grad: + continue + + if no_weight_decay_cond is not None: + no_wd = no_weight_decay_cond(name, param) + else: + # do not regularize biases nor Norm parameters + no_wd = name.endswith(".bias") or len(param.shape) == 1 + + if scale_lr_cond is not None: + scale_lr = scale_lr_cond(name, param) + else: + scale_lr = False + + if not no_wd and not scale_lr: + wd_no_scale_lr.append(param) + elif not no_wd and scale_lr: + wd_scale_lr.append(param) + elif no_wd and not scale_lr: + no_wd_no_scale_lr.append(param) + else: + no_wd_scale_lr.append(param) + + param_groups = [] + if len(wd_no_scale_lr): + param_groups.append({'params': wd_no_scale_lr, 'wd_mult': 1.0, 'lr_mult': 1.0}) + if len(wd_scale_lr): + param_groups.append({'params': wd_scale_lr, 'wd_mult': 1.0, 'lr_mult': lr_mult}) + if len(no_wd_no_scale_lr): + param_groups.append({'params': no_wd_no_scale_lr, 'wd_mult': 0.0, 'lr_mult': 1.0}) + if len(no_wd_scale_lr): + param_groups.append({'params': no_wd_scale_lr, 'wd_mult': 0.0, 'lr_mult': lr_mult}) + + return param_groups + +def get_megatron_optimizer(model, + no_weight_decay_cond=None, + scale_lr_cond=None, + lr_mult=1.0): + args = get_args() + + # Base optimizer. + param_groups = get_param_groups(model, + no_weight_decay_cond, + scale_lr_cond, + lr_mult) + + if args.optimizer == 'adam': + optimizer = Adam(param_groups, + lr=args.lr, + weight_decay=args.weight_decay, + betas=(args.adam_beta1, args.adam_beta2), + eps=args.adam_eps) + elif args.optimizer == 'sgd': + optimizer = SGD(param_groups, + lr=args.lr, + weight_decay=args.weight_decay, + momentum=args.sgd_momentum) + else: + raise Exception('{} optimizer is not supported.'.format( + args.optimizer)) + + # Determine whether the params have main-grad field. + params_have_main_grad = False + if args.DDP_impl == 'local': + params_have_main_grad = True + + # Mixed precision optimizer. + # - Note: both the Float16Optimizer and the DistributedOptimizer inherit + # from the MixedPrecisionOptimizer, which manages any optimizer where + # the model params and main params are distinct. + if args.fp16 or args.bf16 or args.use_distributed_optimizer: + + # Grad scaler: + # if loss_scale is provided, instantiate the constant scaler. + # if we are using fp16 and loss_scale is not present, use a + # dynamic scaler. + # otherwise we are running in bf16 with no loss_scale so + # leave it as None. + grad_scaler = None + + # Constant loss scale. + if args.loss_scale: + grad_scaler = ConstantGradScaler(args.loss_scale) + + # Dynamic loss scale. + else: + if args.fp16: + grad_scaler = DynamicGradScaler( + initial_scale=args.initial_loss_scale, + min_scale=args.min_loss_scale, + growth_factor=2.0, + backoff_factor=0.5, + growth_interval=args.loss_scale_window, + hysteresis=args.hysteresis) + + # Megatron optimizer. + opt_ty = DistributedOptimizer \ + if args.use_distributed_optimizer else \ + Float16OptimizerWithFloat16Params + return opt_ty(optimizer, + args.clip_grad, + args.log_num_zeros_in_grad, + params_have_main_grad, + args.use_contiguous_buffers_in_local_ddp, + args.fp16, + args.bf16, + args.params_dtype, + grad_scaler, + model) + + # FP32. + return FP32Optimizer(optimizer, args.clip_grad, + args.log_num_zeros_in_grad, + params_have_main_grad, + args.use_contiguous_buffers_in_local_ddp, + model) diff --git a/multilinguality_megatron/megatron/optimizer/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/megatron/optimizer/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb1e70bacd736d45c202854d0f3c1cb7eb027c45 Binary files /dev/null and b/multilinguality_megatron/megatron/optimizer/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/optimizer/__pycache__/clip_grads.cpython-39.pyc b/multilinguality_megatron/megatron/optimizer/__pycache__/clip_grads.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bffb0d6e7d0f0474c9e1f4faa94c9115a1168f8 Binary files /dev/null and b/multilinguality_megatron/megatron/optimizer/__pycache__/clip_grads.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/optimizer/__pycache__/distrib_optimizer.cpython-39.pyc b/multilinguality_megatron/megatron/optimizer/__pycache__/distrib_optimizer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a20052379945693fa578b862fbc9c6dce3672ce5 Binary files /dev/null and b/multilinguality_megatron/megatron/optimizer/__pycache__/distrib_optimizer.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/optimizer/__pycache__/grad_scaler.cpython-39.pyc b/multilinguality_megatron/megatron/optimizer/__pycache__/grad_scaler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8cf9848f207b79188da7909004a77f1c0947853 Binary files /dev/null and b/multilinguality_megatron/megatron/optimizer/__pycache__/grad_scaler.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/optimizer/__pycache__/optimizer.cpython-39.pyc b/multilinguality_megatron/megatron/optimizer/__pycache__/optimizer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36f4bc52899808dd0af5c210b317be4abcf2f860 Binary files /dev/null and b/multilinguality_megatron/megatron/optimizer/__pycache__/optimizer.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/optimizer/clip_grads.py b/multilinguality_megatron/megatron/optimizer/clip_grads.py new file mode 100644 index 0000000000000000000000000000000000000000..32bbffd647a56cfd86e69b42d9c09ba12585aec1 --- /dev/null +++ b/multilinguality_megatron/megatron/optimizer/clip_grads.py @@ -0,0 +1,136 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Gradient clipping.""" + +import torch +from torch import inf +# from torch._six import inf + +from apex.multi_tensor_apply import multi_tensor_applier +import amp_C + +from megatron.model.module import param_is_not_shared +from megatron.core.tensor_parallel import param_is_not_tensor_parallel_duplicate + + +def clip_grad_norm_fp32(parameters, grads_for_norm, + max_norm, norm_type=2, + model_parallel_group=None): + """Clips gradient norm of an iterable of parameters whose gradients + are in fp32. + + This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + grads_for_norm (Iterable[Tensor]): an iterable of Tensors or a single + Tensor that will be used for calculating the grad norm. + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + model_parallel_group (group): given the nature of the distributed + optimizer, this is passed as an argument. + + Returns: + Total norm of the parameters (viewed as a single vector). + """ + + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + if isinstance(grads_for_norm, torch.Tensor): + grads_for_norm = [grads_for_norm] + + # Grads. + grads = [] + for param in parameters: + if param.grad is not None: + assert param.grad.type() == 'torch.cuda.FloatTensor' + grads.append(param.grad.detach()) + + # Norm parameters. + max_norm = float(max_norm) + norm_type = float(norm_type) + total_norm = 0.0 + + # Calculate norm. + if norm_type == inf: + total_norm = max(grad.abs().max() for grad in grads_for_norm) + total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + # Take max across all model-parallel GPUs. + torch.distributed.all_reduce(total_norm_cuda, + op=torch.distributed.ReduceOp.MAX, + group=model_parallel_group) + total_norm = total_norm_cuda[0].item() + + else: + if norm_type == 2.0: + dummy_overflow_buf = torch.cuda.IntTensor([0]) + # Use apex's multi-tensor applier for efficiency reasons. + # Multi-tensor applier takes a function and a list of list + # and performs the operation on that list all in one kernel. + if grads_for_norm: + grad_norm, _ = multi_tensor_applier( + amp_C.multi_tensor_l2norm, + dummy_overflow_buf, + [grads_for_norm], + False # no per-parameter norm + ) + else: + grad_norm = torch.cuda.FloatTensor([0]) + # Since we will be summing across data parallel groups, + # we need the pow(norm-type). + total_norm = grad_norm ** norm_type + + else: + for grad in grads_for_norm: + grad_norm = torch.norm(grad, norm_type) + total_norm += grad_norm ** norm_type + + # Sum across all model-parallel GPUs. + torch.distributed.all_reduce(total_norm, + op=torch.distributed.ReduceOp.SUM, + group=model_parallel_group) + total_norm = total_norm.item() ** (1.0 / norm_type) + + # Scale. + clip_coeff = max_norm / (total_norm + 1.0e-6) + if clip_coeff < 1.0: + dummy_overflow_buf = torch.cuda.IntTensor([0]) + multi_tensor_applier(amp_C.multi_tensor_scale, + dummy_overflow_buf, + [grads, grads], + clip_coeff) + + return total_norm + + +def count_zeros_fp32(parameters, model_parallel_group): + + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + + # Filter parameters based on: + # - grad should not be none + # - parameter should not be shared + # - should not be a replica due to tensor model parallelism + total_num_zeros = torch.cuda.FloatTensor([0.0]) + for param in parameters: + grad_not_none = param.grad is not None + is_not_shared = param_is_not_shared(param) + is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param) + if grad_not_none and is_not_shared and is_not_tp_duplicate: + grad = param.grad.detach() + num_zeros = grad.numel() - torch.count_nonzero(grad) + total_num_zeros = num_zeros + total_num_zeros + + # Sum across all model-parallel GPUs. + torch.distributed.all_reduce(total_num_zeros, + op=torch.distributed.ReduceOp.SUM, + group=model_parallel_group) + + total_num_zeros = total_num_zeros.item() + + return total_num_zeros diff --git a/multilinguality_megatron/megatron/optimizer/distrib_optimizer.py b/multilinguality_megatron/megatron/optimizer/distrib_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..79500b587bd6bf0d94de04321540e985897ff9fe --- /dev/null +++ b/multilinguality_megatron/megatron/optimizer/distrib_optimizer.py @@ -0,0 +1,700 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron distributed optimizer.""" + + +import math +import torch + +from megatron import print_rank_0 +from megatron.core import mpu, tensor_parallel + +from .optimizer import MixedPrecisionOptimizer, _zero_grad_group_helper + + +class Range: + """ + A range represents a start and end points for indexing a shard + from a full tensor. + """ + def __init__(self, start, end): + self.start = start + self.end = end + self.size = end - start + + def normalize(self, start = 0): + return Range(start, start + self.size) + + def __str__(self): + return "%d,%d [%d]" % (self.start, self.end, self.size) + + +class DistributedOptimizer(MixedPrecisionOptimizer): + """Distributed optimizer, for all data types (fp16, bf16, and fp32). + + Arguments: + optimizer: base optimizer such as Adam or SGD + clip_grad: clip gradeints with this global L2 norm. Note + that clipping is ignored if clip_grad == 0 + log_num_zeros_in_grad: return number of zeros in the gradients. + params_have_main_grad: flag indicating if parameters have + a `main_grad` field. If this is set, we are assuming + that the model parameters are store in the `main_grad` + field instead of the typical `grad` field. This happens + for the DDP cases where there is a continuous buffer + holding the gradients. For example for bfloat16, we want + to do gradient accumulation and all-reduces in float32 + and as a result we store those gradients in the main_grad. + Note that main grad is not necessarily in float32. + use_contiguous_buffers_in_local_ddp: if true, the local DDP model + is using a contiguous buffer to hold the model grads. + fp16: if true, the model is running in fp16. + bf16: if true, the model is running in bfloat16. + grad_scaler: used for scaling gradients. Note that this can be + None. This case happens when `bf16 = True` and we don't + use any loss scale. Note that for `bf16 = True`, we can have + a constnat gradient scaler. Also for `bf16 = False`, we + always require a grad scaler. + models: list of models (i.e., the virtual pipelining models). This + is used by the distributed optimizer for mapping parameters. + """ + + @classmethod + def build_model_gbuf_param_range_map(cls, model, dtype, gbuf_world_range): + """ + Build mapping from param reference to grad buffer shard ranges. + + This method builds a mapping from parameter references to grad + buffer shard ranges, specific to each data-parallel (DP) rank's + set of 'owned' parameters. Each grad buffer (padded to be an even + multiple of DP-world-size) is conceptually divided into DP-world-size + contiguous regions, where each DP rank 'owns' a contiguous regions. + Ownership in this sense means DP rank is responsible for reducing + the relevant subset of grads, and updating the relevant subset of + params. + + This conceptual partitioning of the grad buffer does NOT respect + parameter boundaries, and as such it is assumed that each created + range references a shard (or subset) of the full parameter. It is + easiest to think of each DP rank as operating (i.e., reducing, + gathering) purely on views into the grad buffer, for all model-to- + main & main-to-model operations. + + This method creates three ranges: + - The param's range within the entire grad buffer (i.e., world index). + - The param's range within the DP rank's local view of the grad buffer. + - The param's range within itself (i.e., its shard). + """ + + # Param range map. + param_world_index_map = model._grad_buffer_param_index_map[dtype] + param_range_map = {} + for param, param_world_indexes in param_world_index_map.items(): + + # Param range. + param_world_start, param_world_end = param_world_indexes + param_local_start = max( + 0, + param_world_start - gbuf_world_range.start) + param_local_end = min( + gbuf_world_range.size, + param_world_end - gbuf_world_range.start) + + # Add param, if within local gbuf range. + if param_local_end > param_local_start: + param_local_range = Range(param_local_start, param_local_end) + param_world_range = param_local_range.normalize( + param_local_start + gbuf_world_range.start) + sub_param_start = max(0, gbuf_world_range.start-param_world_start) + sub_param_range = param_local_range.normalize(sub_param_start) + param_range_map[param] = { + "gbuf_world": param_world_range, + "gbuf_local": param_local_range, + "param" : sub_param_range, + } + + return param_range_map + + @classmethod + def build_model_gbuf_range(cls, model, dtype): + """ + Build mapping between params and their grad buffers. + + This method does the initial setup for the method above. This setup + includes determining the shard ranges into the DDP's grad buffer for + each data-parallel (DP) rank. Each DP rank keeps range info for + all other DP ranks, for the purpose of creating args for + reduce-scatter and all-gather. + """ + + data_parallel_rank = mpu.get_data_parallel_rank() + data_parallel_world_size = mpu.get_data_parallel_world_size() + + # Grad buffer range. + grad_buffer = model._grad_buffers[dtype] + gbuf_size = grad_buffer.numel + max_gbuf_range_size = int(math.ceil(gbuf_size / data_parallel_world_size)) + + # All world ranges. (i.e., across all data parallel ranks) + gbuf_world_all_ranges = [] + for r in range(data_parallel_world_size): + gbuf_world_start = r * max_gbuf_range_size + gbuf_world_end = min(gbuf_size, gbuf_world_start+max_gbuf_range_size) + gbuf_world_range = Range(gbuf_world_start, gbuf_world_end) + gbuf_world_all_ranges.append(gbuf_world_range) + + # Local DP's ranges. + gbuf_world_range = gbuf_world_all_ranges[data_parallel_rank] + gbuf_local_range = gbuf_world_range.normalize() + + # Get each param's ranges. + param_range_map = cls.build_model_gbuf_param_range_map(model, + dtype, + gbuf_world_range) + + # Group into dict. + data = { + "local" : gbuf_local_range, + "world" : gbuf_world_range, + "world_all" : gbuf_world_all_ranges, + "param_map" : param_range_map, + "max_range_size" : max_gbuf_range_size, + } + + return data + + @classmethod + def build_model_gbuf_range_map(cls, model): + """ + Create param-to-grad-buffer mappings, for grad buffer data types + within a specific virtual model. + """ + return { + dtype : cls.build_model_gbuf_range(model, dtype) + for dtype in model._grad_buffers + } + + @classmethod + def build_model_param_gbuf_map(cls, model_gbuf_ranges): + """ + Create a reverse of the model_gbuf_ranges, for referencing in + opposite direction. + """ + param_gbuf_map = {} + for model_index, model_gbuf_range_map in enumerate(model_gbuf_ranges): + for dtype, gbuf_range_map in model_gbuf_range_map.items(): + for param, param_range_map in gbuf_range_map["param_map"].items(): + param_gbuf_map[param] = (model_index, dtype) + return param_gbuf_map + + @classmethod + def build_optimizer_group_ranges(cls, param_groups, model_gbuf_ranges): + """ + Create optimizer groups. + + Given the set of parameter shard ranges that are owned by the current + data-parallel (DP) rank, gather the set of parameters that will be + used (in the method below) to create the current DP's optimizer + groups. + """ + + num_groups = len(param_groups) + + # Param group map. + param_group_map = {} + for group_index, group in enumerate(param_groups): + for param in group["params"]: + assert param.requires_grad + param_group_map[param] = group_index + + # Optimizer group ranges. + group_ranges = [ {"params": []} for _ in param_groups ] + for model_gbuf_range_map in model_gbuf_ranges: + for dtype, gbuf_range_map in model_gbuf_range_map.items(): + for param in gbuf_range_map["param_map"]: + group_index = param_group_map[param] + group_range = group_ranges[group_index] + group_range["params"].append(param) + + # Squeeze zero-size group ranges. + for group_index, group_range in enumerate(group_ranges): + group_range["orig_group"] = param_groups[group_index] + group_ranges = [ g for g in group_ranges if len(g["params"]) > 0 ] + + return group_ranges + + @classmethod + def build_model_and_main_param_groups(cls, + model_gbuf_ranges, + param_gbuf_map, + opt_group_ranges): + """ + Create main parameter groups needed for the optimizer step. + + These groups encompass both: 1) groups used by this class, for + reducing/gather, and 2) groups used by the inner optimizer for the + parameter update. Given that the conceptual grad buffer partitioning + (created in earlier method) doesn't respect parameter boundaries, + the optimizer operates on shards of the model parameters, rather than + the full parameters. + """ + + # Parameter groups: + # model_float16_groups: original float16 parameters + # model_fp32_groups: original fp32 parameters + # shard_float16_groups: shards of original float16 parameters + # shard_fp32_groups: shards of original fp32 parameters + # shard_fp32_from_float16_groups: fp32 copy of float16 parameters + model_float16_groups = [] + model_fp32_groups = [] + shard_float16_groups = [] + shard_fp32_groups = [] + shard_fp32_from_float16_groups = [] + + # Allocate (or slice) each group's param shard. + for group_index, group_range in enumerate(opt_group_ranges): + + # Params of this group. + model_float16_params_this_group = [] + model_fp32_params_this_group = [] + shard_float16_params_this_group = [] + shard_fp32_params_this_group = [] + shard_fp32_from_float16_params_this_group = [] + model_float16_groups.append(model_float16_params_this_group) + model_fp32_groups.append(model_fp32_params_this_group) + shard_float16_groups.append(shard_float16_params_this_group) + shard_fp32_groups.append(shard_fp32_params_this_group) + shard_fp32_from_float16_groups.append( + shard_fp32_from_float16_params_this_group) + + for model_param in group_range["params"]: + + assert model_param.requires_grad + + model_index, dtype = param_gbuf_map[model_param] + gbuf_range = model_gbuf_ranges[model_index][dtype] + param_range = gbuf_range["param_map"][model_param]["param"] + + # fp16, bf16 params. + if model_param.type() in ['torch.cuda.HalfTensor', + 'torch.cuda.BFloat16Tensor']: + + # Clone model -> main. + shard_model_param = model_param.detach().view(-1) \ + [param_range.start:param_range.end] + shard_main_param = shard_model_param.clone().float() + tensor_parallel.copy_tensor_model_parallel_attributes( + shard_model_param, model_param) + tensor_parallel.copy_tensor_model_parallel_attributes( + shard_main_param, model_param) + if hasattr(model_param, 'shared'): + shard_model_param.shared = model_param.shared + shard_main_param.shared = model_param.shared + + # Add to group. + model_float16_params_this_group.append(model_param) + shard_float16_params_this_group.append(shard_model_param) + shard_fp32_from_float16_params_this_group.append(shard_main_param) + + # fp32 params. + elif model_param.type() == 'torch.cuda.FloatTensor': + shard_model_param = model_param.view(-1) \ + [param_range.start:param_range.end] + model_fp32_params_this_group.append(model_param) + shard_fp32_params_this_group.append(shard_model_param) + tensor_parallel.copy_tensor_model_parallel_attributes( + shard_model_param, model_param) + if hasattr(model_param, 'shared'): + shard_model_param.shared = model_param.shared + + else: + raise TypeError('Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(param.type())) + + # Update optimizer's params. + group_range["orig_group"]["params"] = [ + *shard_fp32_params_this_group, + *shard_fp32_from_float16_params_this_group, + ] + + return ( + model_float16_groups, + model_fp32_groups, + shard_float16_groups, + shard_fp32_groups, + shard_fp32_from_float16_groups, + ) + + def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, + params_have_main_grad, use_contiguous_buffers_in_local_ddp, + fp16, bf16, params_dtype, grad_scaler, models): + """ + See top of class definition for argument descriptions. + + The steps in this method create the core mapping between DDP grad + buffers, parameters, and parameter shard ranges, that is needed for + converting between model param indexes and main parameter shard + indexes. This method also updates the optimizer parameter groups + with the newly created shards. + """ + + super().__init__( + optimizer, clip_grad, log_num_zeros_in_grad, + params_have_main_grad, use_contiguous_buffers_in_local_ddp, + fp16, bf16, params_dtype, grad_scaler, models) + + # Verify that contiguous buffers are being used. + # - Note: this should already be checked in arguments.py. + assert use_contiguous_buffers_in_local_ddp + + # Model grad buffer ranges. + self.model_gbuf_ranges = [] + for model_index, model in enumerate(self.models): + self.model_gbuf_ranges.append(self.build_model_gbuf_range_map(model)) + self.model_param_gbuf_map = \ + self.build_model_param_gbuf_map(self.model_gbuf_ranges) + + # Optimizer ranges. + self.opt_group_ranges = self.build_optimizer_group_ranges( + self.optimizer.param_groups, + self.model_gbuf_ranges) + + # Allocate main param shards. + ( + self.model_float16_groups, + self.model_fp32_groups, + self.shard_float16_groups, + self.shard_fp32_groups, + self.shard_fp32_from_float16_groups, + ) = self.build_model_and_main_param_groups(self.model_gbuf_ranges, + self.model_param_gbuf_map, + self.opt_group_ranges) + + # Initialize param buffers. + # - These are views on the DDP model's grad buffers, that share + # storage & have their own dtype. This is safe because the param + # dtype size is always <= grad dtype size. + self.param_buffers = [] + for model_index, model in enumerate(self.models): + current_param_buffers = {} + for dtype, grad_buffer in model._grad_buffers.items(): + param_buffer = torch.tensor(grad_buffer.data.storage()._untyped(), + dtype = params_dtype, + device = grad_buffer.data.device) + param_buffer = param_buffer[:grad_buffer.numel_padded] + current_param_buffers[dtype] = param_buffer + self.param_buffers.append(current_param_buffers) + + # Update optimizer groups. + # - Also, leverage state_dict() and load_state_dict() to + # recast preexisting per-param state tensors. + self.optimizer.param_groups = \ + [ g["orig_group"] for g in self.opt_group_ranges ] + self.optimizer.load_state_dict(self.optimizer.state_dict()) + + def get_model_param_range_map(self, param): + """ + Given a model param, get the index sub-range of the param that this + data-parallel rank owns. + """ + model_index, dtype = self.model_param_gbuf_map[param] + gbuf_range_map = self.model_gbuf_ranges[model_index][dtype] + param_range_map = gbuf_range_map["param_map"][param] + return param_range_map + + def get_model_parallel_group(self): + """ + With the distributed optimizer, the model parallel group is the + entire world. + """ + return None + + def state_dict(self): + """ + The state dict must contain the fp32-from-float16 shards. + """ + state_dict = {} + state_dict['optimizer'] = self.optimizer.state_dict() + if self.grad_scaler: + state_dict['grad_scaler'] = self.grad_scaler.state_dict() + state_dict['shard_fp32_from_float16_groups'] = \ + self.shard_fp32_from_float16_groups + return state_dict + + def load_state_dict(self, state_dict): + """ + Load the state dict. + """ + + # Optimizer. + optimizer_key = 'optimizer' + if optimizer_key not in state_dict: + optimizer_key = 'optimizer_state_dict' + print_rank_0('***WARNING*** loading optimizer from ' + 'an old checkpoint ...') + self.optimizer.load_state_dict(state_dict[optimizer_key]) + + # Grad scaler. + if 'grad_scaler' not in state_dict: + if self.fp16: + print_rank_0('***WARNING*** found an old checkpoint, will not ' + 'load grad scaler ...') + else: + if self.grad_scaler: + self.grad_scaler.load_state_dict(state_dict['grad_scaler']) + else: + print_rank_0('***WARNING*** fould the grad scaler in the ' + 'checkpoint but it is None in the class. ' + 'Skipping loading grad scaler ...') + + # Copy data for the main params. + for current_group, saved_group in zip( + self.shard_fp32_from_float16_groups, + state_dict["shard_fp32_from_float16_groups"]): + for current_param, saved_param in zip(current_group, saved_group): + current_param.data.copy_(saved_param.data) + + def zero_grad(self, set_to_none=True): + """ + Zero grads. + + We only need to zero the model related parameters, i.e., + model_float16_groups & model_fp32_groups. We additionally zero + the remaining groups as a memory optimization to reduce + fragmentation; in the case of set_to_none==True, the space + used by this field can be safely deallocated at this point. + """ + for groups in ( + self.model_float16_groups, + self.model_fp32_groups, + self.shard_float16_groups, # grad empty/unused here? + self.shard_fp32_groups, # throws grad-access warning + self.shard_fp32_from_float16_groups): + for group in groups: + _zero_grad_group_helper(group, set_to_none) + + + @staticmethod + def get_model_buffer_dp_views(model_buffers): + """ + Get shard views of each of the DDP's param/grad buffers. + + In this nested list, the top level is grouped by the virtual model + index and the buffer's data type. The sub-level is a list of + shards of that buffer, where each shard in the list represents + a contiguous view of the buffer, that is owned by a data-parallel + rank. The shard boundary does not respect parameter boundaries, and + so the elements of some parameters are split across data parallel + ranks. + + Additionally, return references to the entire buffers, for use + in reduce_scatter_tensor and _all_gather_base. + """ + + data_parallel_world_size = mpu.get_data_parallel_world_size() + + # Buffer views. + view_items = [] + for model_index, buffers in enumerate(model_buffers): + for dtype, buf in buffers.items(): + + assert buf.numel() % data_parallel_world_size == 0 + shard_size = int(buf.numel() / data_parallel_world_size) + buf_views = [buf[(r*shard_size):((r+1)*shard_size)] + for r in range(data_parallel_world_size)] + view_items.append((model_index, dtype, buf, buf_views)) + + return view_items + + + def get_model_grad_buffer_dp_views(self): + return self.get_model_buffer_dp_views([ + {dtype : mem_buffer.data} + for model in self.models + for dtype, mem_buffer in model._grad_buffers.items()]) + + def get_model_param_buffer_dp_views(self): + return self.get_model_buffer_dp_views(self.param_buffers) + + def reduce_model_grads(self, args, timers): + """ + Reduce-scatter model grads. + + The DDP's grad buffer is used for the reduce-scatter, and thus no + tensors are dynamically allocated. + + Note: this is a different order of reduction, versus the non- + distributed optimizer, which reduces: 1) layernorm grads, 2) all + grads, 3) embedding grads. + """ + + # All-reduce layer-norm grads (for sequence parallelism). + timers('layernorm-grads-all-reduce', log_level=1).start( + barrier=args.barrier_with_L1_time) + self.allreduce_layernorm_grads(args) + timers('layernorm-grads-all-reduce').stop() + + # All-reduce embedding grads. + timers('embedding-grads-all-reduce', log_level=1).start( + barrier=args.barrier_with_L1_time) + self.allreduce_embedding_grads(args) + timers('embedding-grads-all-reduce').stop() + + # Reduce-scatter setup. + timers('grads-reduce-scatter', log_level=1).start( + barrier=args.barrier_with_L1_time) + data_parallel_rank = mpu.get_data_parallel_rank() + data_parallel_world_size = mpu.get_data_parallel_world_size() + data_parallel_group = mpu.get_data_parallel_group() + + # Scale grad buffers by '1 / data_parallel_world_size'. + for model in self.models: + for dtype, gbuf in model._grad_buffers.items(): + gbuf.data /= data_parallel_world_size + + # Reduce-scatter all grads. + gbuf_view_items = self.get_model_grad_buffer_dp_views() + for index, (model_index, dtype, gbuf, gbuf_views) \ + in enumerate(gbuf_view_items): + + torch.distributed.reduce_scatter_tensor( + gbuf_views[data_parallel_rank], + gbuf, + group = data_parallel_group, + ) + + timers('grads-reduce-scatter').stop() + + def gather_model_params(self, args, timers): + """ + All-gather updated model params. + + The DDP's param buffer is used for the all-gather, and thus no + tensors are dynamically allocated. After the all-gather, the params + can be copied from the param buffer to the param. + """ + + timers('params-all-gather', log_level=1).start( + barrier=args.barrier_with_L1_time) + + data_parallel_rank = mpu.get_data_parallel_rank() + data_parallel_group = mpu.get_data_parallel_group() + + # All-gather updated main params. + # - All param buffer views are guaranteed to have the same num elements + # across all data parallel ranks, due to grad buffer padding that is + # done in distributed.py, and extended to the param buffers. Thus, + # all sub-views will have consistent start/end indexes across data + # parallel ranks. + pbuf_view_items = self.get_model_param_buffer_dp_views() + for index, (model_index, dtype, pbuf, pbuf_views) \ + in enumerate(pbuf_view_items): + + torch.distributed._all_gather_base( + pbuf, + pbuf_views[data_parallel_rank], + group = data_parallel_group, + ) + + # Copy from param buffer to each param. + for model_id, model in enumerate(self.models): + for dtype, param_map in model._grad_buffer_param_index_map.items(): + for param, buf_range in param_map.items(): + param_buf = self.param_buffers[model_id][dtype] + param_buf_shard = param_buf[buf_range[0]:buf_range[1]] + param.view(-1).detach().copy_(param_buf_shard) + + timers('params-all-gather').stop() + + def _collect_main_grad_data_for_unscaling(self): + """ + Note: this should be equivalent to the float-16 optimizer's method, + but writtent differently, so the two should be combined. + """ + return [ + param.grad.data + for group in self.optimizer.param_groups + for param in group["params"] + ] + + def _get_model_and_main_params_data_float16(self): + """ + Get aligned list of model and main params. + """ + model_data = [] + main_data = [] + for model_group, main_group in zip(self.shard_float16_groups, + self.shard_fp32_from_float16_groups): + for model_param, main_param in zip(model_group, main_group): + model_data.append(model_param.data) + main_data.append(main_param.data) + return model_data, main_data + + def _copy_model_grads_to_main_grads(self): + """ + Copy model grads to main grads. + + Since this step follows a reduce-scatter through the DDP's grad + buffer, this method is responsible for copying the updated grads + from the grad buffer to the main shard's grad field. + """ + + # Utility method for copying group grads. + def copy_group_grads(model_groups, shard_main_groups): + for model_group, shard_main_group in zip(model_groups, + shard_main_groups): + for model_param, shard_main_param in zip(model_group, + shard_main_group): + + param_range_map = self.get_model_param_range_map(model_param) + param_range = param_range_map["param"] + assert param_range.size == shard_main_param.nelement() + + model_grad = model_param.main_grad + shard_model_grad = model_grad.view(-1) \ + [param_range.start:param_range.end] + shard_main_param.grad = shard_model_grad.float() + + # Copy model groups to shard groups. + copy_group_grads(self.model_float16_groups, + self.shard_fp32_from_float16_groups) + copy_group_grads(self.model_fp32_groups, + self.shard_fp32_groups) + + def _copy_main_params_to_model_params(self): + """ + Copy main params to model params. + + Since this step is followed by an all-gather through the DDP's grad + buffer, this method is responsible for copying the updated params + from the main shards into the correct position in the grad buffer. + """ + + # Utility method for copying group params. + def copy_group_params(shard_main_groups, model_groups): + for shard_main_group, model_group in zip(shard_main_groups, + model_groups): + for shard_main_param, model_param in zip(shard_main_group, + model_group): + + param_range_map = self.get_model_param_range_map(model_param) + world_range = param_range_map["gbuf_world"] + + assert world_range.size == shard_main_param.nelement() + + model_id, dtype = self.model_param_gbuf_map[model_param] + model_param_buffer = self.param_buffers[model_id][dtype] + + shard_model_param = model_param_buffer.view(-1) \ + [world_range.start:world_range.end] + + shard_model_param.data.copy_(shard_main_param) + + # Copy shard groups to model groups. + copy_group_params(self.shard_fp32_from_float16_groups, + self.model_float16_groups) + copy_group_params(self.shard_fp32_groups, + self.model_fp32_groups) diff --git a/multilinguality_megatron/megatron/optimizer/grad_scaler.py b/multilinguality_megatron/megatron/optimizer/grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..66f7c907a41816de17e0d6a7bd5cb626722b33a6 --- /dev/null +++ b/multilinguality_megatron/megatron/optimizer/grad_scaler.py @@ -0,0 +1,120 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron grad scaler.""" + +from abc import ABC +from abc import abstractmethod + +import torch + + +class MegatronGradScaler(ABC): + + def __init__(self, initial_scale): + """Initialize scale value with the input initial scale.""" + assert initial_scale > 0.0 + self._scale = torch.cuda.FloatTensor([initial_scale]) + + @property + def scale(self): + return self._scale + + @property + def inv_scale(self): + return self._scale.double().reciprocal().float() + + @abstractmethod + def update(self, found_inf): + pass + + @abstractmethod + def state_dict(self): + pass + + @abstractmethod + def load_state_dict(self, state_dict): + pass + + + +class ConstantGradScaler(MegatronGradScaler): + + def update(self, found_inf): + pass + + def state_dict(self): + return dict() + + def load_state_dict(self, state_dict): + pass + + + +class DynamicGradScaler(MegatronGradScaler): + + def __init__(self, initial_scale, min_scale, + growth_factor, backoff_factor, + growth_interval, hysteresis): + """"Grad scaler with dynamic scale that gets adjusted + during training.""" + super(DynamicGradScaler, self).__init__(initial_scale) + + # Lower bound on the scale. + assert min_scale > 0.0 + assert min_scale <= initial_scale + self.min_scale = torch.cuda.FloatTensor([min_scale]) + # Growth and backoff factors for the scale. + assert growth_factor > 1.0 + self.growth_factor = torch.cuda.FloatTensor([growth_factor]) + assert backoff_factor < 1.0 + assert backoff_factor > 0.0 + self.backoff_factor = torch.cuda.FloatTensor([backoff_factor]) + # Interval over which if we don't see any inf/nan, + # we will scale the grad scale by the growth factor. + assert growth_interval > 0 + self.growth_interval = growth_interval + # Number of inf/nans we should see before scaling down + # the grad scale by the backoff factor. + assert hysteresis > 0 + self.hysteresis = hysteresis + + # Trackers. + self._growth_tracker = 0 + self._hysteresis_tracker = self.hysteresis + + + def update(self, found_inf): + + # If we have an inf/nan, growth tracker is set to 0 + # and hysterisis tracker is reduced by 1. + if found_inf: + self._growth_tracker = 0 + self._hysteresis_tracker -= 1 + # Now if we are out of hysteresis count, scale down the loss. + if self._hysteresis_tracker <= 0: + self._scale = torch.max(self._scale * self.backoff_factor, + self.min_scale) + else: + # If there is no nan/inf, increment the growth tracker. + self._growth_tracker += 1 + # If we have had enough consequitive intervals with no nan/inf: + if self._growth_tracker == self.growth_interval: + # Reset the tracker and hysteresis trackers, + self._growth_tracker = 0 + self._hysteresis_tracker = self.hysteresis + # and scale up the loss scale. + self._scale = self._scale * self.growth_factor + + + def state_dict(self): + state_dict = {} + state_dict['scale'] = self._scale + state_dict['growth_tracker'] = self._growth_tracker + state_dict['hysteresis_tracker'] = self._hysteresis_tracker + return state_dict + + + def load_state_dict(self, state_dict): + self._scale = state_dict['scale'].cuda(torch.cuda.current_device()) + self._growth_tracker = state_dict['growth_tracker'] + self._hysteresis_tracker = state_dict['hysteresis_tracker'] diff --git a/multilinguality_megatron/megatron/optimizer/optimizer.py b/multilinguality_megatron/megatron/optimizer/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..1bb04fdbfb88eba16e60a3c4388d6502b0f6837b --- /dev/null +++ b/multilinguality_megatron/megatron/optimizer/optimizer.py @@ -0,0 +1,783 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron optimizer.""" + +import math +from abc import ABC +from abc import abstractmethod +from apex.multi_tensor_apply import multi_tensor_applier +import amp_C +import torch +from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + +from megatron import get_timers +from megatron import print_rank_0 +from megatron.core import mpu, tensor_parallel +from megatron.model import DistributedDataParallel as LocalDDP +from megatron.model import Float16Module +from megatron.model.module import param_is_not_shared +from megatron.utils import unwrap_model + +from .clip_grads import clip_grad_norm_fp32, count_zeros_fp32 + + +def _zero_grad_group_helper(group, set_to_none): + """Zero out the gradient for a group of parameters. + Note: copied from torch.optim.optimizer.""" + for param in group: + if param.grad is not None: + if set_to_none: + param.grad = None + else: + if param.grad.grad_fn is not None: + param.grad.detach_() + else: + param.grad.requires_grad_(False) + param.grad.zero_() + + +def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None): + """Use multi-tensor-applier to copy values from one list to another. + We don't have a blfoat16 implementation so for now if the overflow_buf + is not provided, we default back to simple loop copy to be compatible + with bfloat16.""" + if overflow_buf: + overflow_buf.fill_(0) + # Scaling with factor `1.0` is equivalent to copy. + multi_tensor_applier(amp_C.multi_tensor_scale, + overflow_buf, + [this, that], + 1.0) + else: + for this_, that_ in zip(this, that): + that_.copy_(this_) + + + +class MegatronOptimizer(ABC): + + + def __init__(self, optimizer, clip_grad, + log_num_zeros_in_grad, + params_have_main_grad, + use_contiguous_buffers_in_local_ddp, + models): + + """Input optimizer is the base optimizer for example Adam.""" + self.optimizer = optimizer + assert self.optimizer, 'no optimizer is provided.' + # Set gradient clipping and logging params. + self.clip_grad = clip_grad + self.log_num_zeros_in_grad = log_num_zeros_in_grad + self.params_have_main_grad = params_have_main_grad + self.use_contiguous_buffers_in_local_ddp = use_contiguous_buffers_in_local_ddp + + # 'models' are retained for access to the contiguous grad buffers. + # (see distributed optimizer) + self.models = models + + if self.use_contiguous_buffers_in_local_ddp: + assert self.params_have_main_grad, \ + "use of contiguous buffer requires that params have main grad" + + + def get_parameters(self): + params = [] + for param_group in self.optimizer.param_groups: + for param in param_group['params']: + params.append(param) + return params + + + def get_main_grads_for_grad_norm(self): + + # Filter parameters based on: + # - grad should not be none + # - parameter should not be shared + # - should not be a replica due to tensor model parallelism + params = self.get_parameters() + grads_for_norm = [] + for param in params: + grad = param.grad + grad_not_none = grad is not None + is_not_shared = param_is_not_shared(param) + is_not_tp_duplicate = tensor_parallel.param_is_not_tensor_parallel_duplicate(param) + if grad_not_none and is_not_shared and is_not_tp_duplicate: + grads_for_norm.append(grad) + + return grads_for_norm + + + def get_model_parallel_group(self): + """Default returned here, but the distributed optimizer overrides this.""" + return mpu.get_model_parallel_group() + + + def clip_grad_norm(self, clip_grad): + params = self.get_parameters() + grads_for_norm = self.get_main_grads_for_grad_norm() + return clip_grad_norm_fp32( + params, grads_for_norm, clip_grad, + model_parallel_group=self.get_model_parallel_group()) + + + def count_zeros(self): + params = self.get_parameters() + return count_zeros_fp32(params, + model_parallel_group=self.get_model_parallel_group()) + + + @abstractmethod + def zero_grad(self, set_to_none=True): + pass + + + @abstractmethod + def get_loss_scale(self): + """The output should be a cuda tensor of size 1.""" + pass + + + def scale_loss(self, loss): + """Simple scaling.""" + return self.get_loss_scale() * loss + + + @abstractmethod + def reload_model_params(self): + """Refreshes any internal state from the current model parameters. + Call whenever the parameters are changed outside of the optimizer. + For example, when we load a model from a checkpoint without loading + the optimizer, the model parameters are updated but for fp16 optimizer + with main parameters, the main parameters need to also be updated.""" + pass + + + @abstractmethod + def state_dict(self): + pass + + + @abstractmethod + def load_state_dict(self, state_dict): + pass + + + # Promote state so it can be retrieved or set via + # "optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + + # Promote param_groups so it can be retrieved or set via + # "optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + + param_groups = property(_get_param_groups, _set_param_groups) + + + @abstractmethod + def step(self, args, timers): + pass + + + def gather_model_params(self, args, timers): + """ + For the case of a non-distributed-optimizer, there is nothing to + do here. + """ + pass + + + def allreduce_word_embedding_grads(self, args): + """ + All-reduce word embedding grads. + + Reduce grads across first and last stages to ensure that word_embeddings + parameters stay in sync. This should only run for models that support + pipelined model parallelism (BERT and GPT-2). + """ + + if mpu.is_rank_in_embedding_group(ignore_virtual=True) and \ + mpu.get_pipeline_model_parallel_world_size() > 1: + if mpu.is_pipeline_first_stage(ignore_virtual=True): + unwrapped_model = self.models[0] + elif mpu.is_pipeline_last_stage(ignore_virtual=True): + unwrapped_model = self.models[-1] + else: # We do not support the interleaved schedule for T5 yet. + unwrapped_model = self.models[0] + unwrapped_model = unwrap_model( + unwrapped_model, (torchDDP, LocalDDP, Float16Module)) + + if unwrapped_model.share_word_embeddings: + word_embeddings_weight = unwrapped_model.word_embeddings_weight() + if args.DDP_impl == 'local': + grad = word_embeddings_weight.main_grad + else: + grad = word_embeddings_weight.grad + torch.distributed.all_reduce(grad, group=mpu.get_embedding_group()) + + + def allreduce_position_embedding_grads(self, args): + """ + All-reduce position_embeddings grad across first (encoder) and + split (decoder) stages to ensure that position embeddings parameters + stay in sync. This should only run for T5 models with pipeline + parallelism. + """ + if mpu.is_rank_in_position_embedding_group() and \ + mpu.get_pipeline_model_parallel_world_size() > 1 and \ + args.pipeline_model_parallel_split_rank is not None: + unwrapped_model = self.models[0] + unwrapped_model = unwrap_model( + unwrapped_model, (torchDDP, LocalDDP, Float16Module)) + assert args.DDP_impl == 'local', \ + 'T5 model is only supported with local DDP mode' + grad = unwrapped_model.language_model.embedding.position_embeddings.weight.main_grad + torch.distributed.all_reduce(grad, group=mpu.get_position_embedding_group()) + + + def allreduce_embedding_grads(self, args): + """All-reduce both word and position embeddings.""" + self.allreduce_word_embedding_grads(args) + self.allreduce_position_embedding_grads(args) + + + def allreduce_layernorm_grads(self, args): + """All-reduce layernorm grads (for sequence parallelism).""" + + # All-reduce layernorm parameters across model parallel nodes + # when sequence parallelism is used + if mpu.get_tensor_model_parallel_world_size() > 1 and \ + args.sequence_parallel: + grads = [] + for model_module in self.models: + unwrapped_model = unwrap_model( + model_module, (torchDDP, LocalDDP, Float16Module)) + for param in unwrapped_model.parameters(): + if getattr(param, 'sequence_parallel', False): + grad = param.main_grad if args.DDP_impl == 'local' else param.grad + grads.append(grad.data) + coalesced = _flatten_dense_tensors(grads) + torch.distributed.all_reduce( + coalesced, group=mpu.get_tensor_model_parallel_group()) + for buf, synced in zip(grads, _unflatten_dense_tensors( + coalesced, grads)): + buf.copy_(synced) + + + def reduce_model_grads(self, args, timers): + """All-reduce all grads, and all-reduce embeddings.""" + + # All-reduce layer-norm grads (for sequence parallelism). + timers('layernorm-grads-all-reduce', log_level=1).start( + barrier=args.barrier_with_L1_time) + self.allreduce_layernorm_grads(args) + timers('layernorm-grads-all-reduce').stop() + + # All-reduce if needed. + if args.DDP_impl == 'local': + timers('grads-all-reduce', log_level=1).start( + barrier=args.barrier_with_L1_time) + for model in self.models: + model.allreduce_gradients() + timers('grads-all-reduce').stop() + + # All-reduce embedding grads. + timers('embedding-grads-all-reduce', log_level=1).start( + barrier=args.barrier_with_L1_time) + self.allreduce_embedding_grads(args) + timers('embedding-grads-all-reduce').stop() + + +class MixedPrecisionOptimizer(MegatronOptimizer): + """Base class for both the float-16 and the distributed optimizer. + + Arguments: + optimizer: base optimizer such as Adam or SGD + clip_grad: clip gradeints with this global L2 norm. Note + that clipping is ignored if clip_grad == 0 + log_num_zeros_in_grad: return number of zeros in the gradients. + params_have_main_grad: flag indicating if parameters have + a `main_grad` field. If this is set, we are assuming + that the model parameters are store in the `main_grad` + field instead of the typical `grad` field. This happens + for the DDP cases where there is a continuous buffer + holding the gradients. For example for bfloat16, we want + to do gradient accumulation and all-reduces in float32 + and as a result we store those gradients in the main_grad. + Note that main grad is not necessarily in float32. + use_contiguous_buffers_in_local_ddp: if true, the local DDP model + is using a contiguous buffer to hold the model grads. + fp16: if true, the model is running in fp16. + bf16: if true, the model is running in bfloat16. + params_dtype: used by distributed optimizer. + grad_scaler: used for scaling gradients. Note that this can be + None. This case happens when `bf16 = True` and we don't + use any loss scale. Note that for `bf16 = True`, we can have + a constnat gradient scaler. Also for `bf16 = False`, we + always require a grad scaler. + models: list of models (i.e., the virtual pipelining models). This + is used by the distributed optimizer for mapping parameters. + """ + + def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, + params_have_main_grad, use_contiguous_buffers_in_local_ddp, + fp16, bf16, params_dtype, grad_scaler, + models): + + super().__init__( + optimizer, clip_grad, log_num_zeros_in_grad, + params_have_main_grad, use_contiguous_buffers_in_local_ddp, + models) + + self.fp16 = fp16 + self.bf16 = bf16 + self.params_dtype = params_dtype + self.grad_scaler = grad_scaler + + # None grad scaler is only supported for bf16. + if self.grad_scaler is None: + assert not self.fp16, 'fp16 expects a grad scaler.' + + # Tensor used to determine if a nan/if has happend. + # Any non-zero value indicates inf/nan. + # Note that we keep this for the cases that grad scaler is none. + # We still record nan/inf if we have a bfloat16 with a grad scaler. + if self.grad_scaler: + self.found_inf = torch.cuda.FloatTensor([0.0]) + + # Dummy tensor needed for apex multi-apply tensor. + # For bfloat, we don't have multi-tensor apply and for now + # we set it to none so the multi-tensor apply gets ignored. + if bf16: + self._dummy_overflow_buf = None + else: + self._dummy_overflow_buf = torch.cuda.IntTensor([0]) + + # In case grad scaler is not passed, define the unity scale. + if self.grad_scaler is None: + self._scale_one = torch.cuda.FloatTensor([1.0]) + + + def get_loss_scale(self): + if self.grad_scaler is None: + return self._scale_one + return self.grad_scaler.scale + + + def reload_model_params(self): + self._copy_model_params_to_main_params() + + + def _unscale_main_grads_and_check_for_nan(self): + + # Collect main grads. + main_grads = self._collect_main_grad_data_for_unscaling() + + # Reset found inf. + self.found_inf.fill_(0.0) + + # Unscale and set found inf/nan + torch._amp_foreach_non_finite_check_and_unscale_( + main_grads, self.found_inf, self.grad_scaler.inv_scale) + + # Update across all model parallel instances. + torch.distributed.all_reduce(self.found_inf, + op=torch.distributed.ReduceOp.MAX, + group=self.get_model_parallel_group()) + + # Check for nan. + found_inf_flag = (self.found_inf.item() > 0) + + return found_inf_flag + + + @torch.no_grad() + def step(self, args, timers): + + # Copy gradients from model params to main params. + timers('optimizer-copy-to-main-grad', log_level=1).start( + barrier=args.barrier_with_L1_time) + self._copy_model_grads_to_main_grads() + timers('optimizer-copy-to-main-grad').stop() + + # Do unscale, check for inf, and update grad scaler only for + # the case that grad scaler is provided. + if self.grad_scaler: + + # Unscale and check for inf/nan. + timers('optimizer-unscale-and-check-inf', log_level=1).start( + barrier=args.barrier_with_L1_time) + found_inf_flag = self._unscale_main_grads_and_check_for_nan() + timers('optimizer-unscale-and-check-inf').stop() + + # We are done with scaling gradients + # so we can update the loss scale. + self.grad_scaler.update(found_inf_flag) + + # If we found inf/nan, skip the update. + if found_inf_flag: + return False, None, None + + # Clip the main gradients. + timers('optimizer-clip-main-grad', log_level=1).start( + barrier=args.barrier_with_L1_time) + grad_norm = None + if self.clip_grad > 0.0: + grad_norm = self.clip_grad_norm(self.clip_grad) + timers('optimizer-clip-main-grad').stop() + + if grad_norm is not None and not math.isfinite(grad_norm): + print_rank_0(f'***WARNING*** Bad grad_norm detected (grad_norm={grad_norm})') + return False, grad_norm, None + + # Count the zeros in the grads. + timers('optimizer-count-zeros', log_level=1).start( + barrier=args.barrier_with_L1_time) + num_zeros_in_grad = self.count_zeros() if \ + self.log_num_zeros_in_grad else None + timers('optimizer-count-zeros').stop() + + # Step the optimizer. + timers('optimizer-inner-step', log_level=1).start( + barrier=args.barrier_with_L1_time) + self.optimizer.step() + timers('optimizer-inner-step').stop() + + # Update params from main params. + timers('optimizer-copy-main-to-model-params', log_level=1).start( + barrier=args.barrier_with_L1_time) + self._copy_main_params_to_model_params() + timers('optimizer-copy-main-to-model-params').stop() + + # Successful update. + return True, grad_norm, num_zeros_in_grad + + +class Float16OptimizerWithFloat16Params(MixedPrecisionOptimizer): + """Float16 optimizer for fp16 and bf16 data types. + + Arguments: + optimizer: base optimizer such as Adam or SGD + clip_grad: clip gradeints with this global L2 norm. Note + that clipping is ignored if clip_grad == 0 + log_num_zeros_in_grad: return number of zeros in the gradients. + params_have_main_grad: flag indicating if parameters have + a `main_grad` field. If this is set, we are assuming + that the model parameters are store in the `main_grad` + field instead of the typical `grad` field. This happens + for the DDP cases where there is a continuous buffer + holding the gradients. For example for bfloat16, we want + to do gradient accumulation and all-reduces in float32 + and as a result we store those gradients in the main_grad. + Note that main grad is not necessarily in float32. + use_contiguous_buffers_in_local_ddp: if true, the local DDP model + is using a contiguous buffer to hold the model grads. + fp16: if true, the model is running in fp16. + bf16: if true, the model is running in bfloat16. + grad_scaler: used for scaling gradients. Note that this can be + None. This case happens when `bf16 = True` and we don't + use any loss scale. Note that for `bf16 = True`, we can have + a constnat gradient scaler. Also for `bf16 = False`, we + always require a grad scaler. + models: list of models (i.e., the virtual pipelining models). This + is used by the distributed optimizer for mapping parameters. + """ + + def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad, + params_have_main_grad, use_contiguous_buffers_in_local_ddp, + fp16, bf16, params_dtype, grad_scaler, models): + + super().__init__( + optimizer, clip_grad, log_num_zeros_in_grad, + params_have_main_grad, use_contiguous_buffers_in_local_ddp, + fp16, bf16, params_dtype, grad_scaler, models) + + # ====================== + # main parameter stuff + # ====================== + + # Three groups of parameters: + # float16_groups: original float16 parameters + # fp32_from_float16_groups: fp32 copy of float16 parameters + # fp32_from_fp32_groups: original fp32 parameters + self.float16_groups = [] + self.fp32_from_float16_groups = [] + self.fp32_from_fp32_groups = [] + + # For all the groups in the original optimizer: + for param_group in self.optimizer.param_groups: + float16_params_this_group = [] + fp32_params_this_group = [] + fp32_from_float16_params_this_group = [] + # For all the parameters in this group: + for i, param in enumerate(param_group['params']): + if param.requires_grad: + + # float16 params: + if param.type() in ['torch.cuda.HalfTensor', + 'torch.cuda.BFloat16Tensor']: + float16_params_this_group.append(param) + # Create a copy + main_param = param.detach().clone().float() + # Copy tensor model parallel attributes. + tensor_parallel.copy_tensor_model_parallel_attributes(main_param, + param) + if hasattr(param, 'shared'): + main_param.shared = param.shared + # Replace the optimizer params with the new fp32 copy. + param_group['params'][i] = main_param + + fp32_from_float16_params_this_group.append(main_param) + # Reset existing state dict key to the new main param. + if param in self.optimizer.state: + self.optimizer.state[main_param] \ + = self.optimizer.state.pop(param) + # fp32 params. + elif param.type() == 'torch.cuda.FloatTensor': + fp32_params_this_group.append(param) + param_group['params'][i] = param + + else: + raise TypeError('Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(param.type())) + + self.float16_groups.append(float16_params_this_group) + self.fp32_from_float16_groups.append( + fp32_from_float16_params_this_group) + self.fp32_from_fp32_groups.append(fp32_params_this_group) + + + def zero_grad(self, set_to_none=True): + """We only need to zero the model related parameters, i.e., + float16_groups & fp32_from_fp32_groups. We additionally zero + fp32_from_float16_groups as a memory optimization to reduce + fragmentation; in the case of set_to_none==True, the space + used by this field can be safely deallocated at this point.""" + for group in self.float16_groups: + _zero_grad_group_helper(group, set_to_none) + for group in self.fp32_from_float16_groups: + _zero_grad_group_helper(group, set_to_none) + for group in self.fp32_from_fp32_groups: + _zero_grad_group_helper(group, set_to_none) + + + def _collect_main_grad_data_for_unscaling(self): + + main_grads = [] + + # fp32 params from float16 ones. + for main_group in self.fp32_from_float16_groups: + for main_param in main_group: + if main_param.grad is not None: + main_grads.append(main_param.grad.data) + + # Append fp32 parameters. + for main_group in self.fp32_from_fp32_groups: + for main_param in main_group: + if main_param.grad is not None: + main_grads.append(main_param.grad.data) + + return main_grads + + + def _get_model_and_main_params_data_float16(self): + model_data = [] + main_data = [] + for model_group, main_group in zip(self.float16_groups, + self.fp32_from_float16_groups): + for model_param, main_param in zip(model_group, main_group): + model_data.append(model_param.data) + main_data.append(main_param.data) + return model_data, main_data + + + def _copy_model_grads_to_main_grads(self): + # This only needs to be done for the float16 group. + for model_group, main_group in zip(self.float16_groups, + self.fp32_from_float16_groups): + for model_param, main_param in zip(model_group, main_group): + if self.params_have_main_grad and hasattr(model_param, 'main_grad'): + main_param.grad = model_param.main_grad.float() + else: + if model_param.grad is not None: + main_param.grad = model_param.grad.float() + + # Safe to deallocate model's grad/main_grad after copying. + # (If using contiguous buffers, main_grad's memory should + # persist and therefore should not be deallocated.) + model_param.grad = None + if self.params_have_main_grad and \ + not self.use_contiguous_buffers_in_local_ddp: + model_param.main_grad = None + + # For fp32 grads, we need to reset the grads to main grad. + if self.params_have_main_grad: + for model_group in self.fp32_from_fp32_groups: + for model_param in model_group: + model_param.grad = model_param.main_grad + + # Safe to de-reference model's main_grad after copying. + # (If using contiguous buffers, main_grad's memory should + # persist and therefore should not be deallocated.) + if not self.use_contiguous_buffers_in_local_ddp: + model_param.main_grad = None + + + def _copy_main_params_to_model_params(self): + # Only needed for the float16 params. + model_data, main_data = self._get_model_and_main_params_data_float16() + _multi_tensor_copy_this_to_that(this=main_data, that=model_data, + overflow_buf=self._dummy_overflow_buf) + + + def _copy_model_params_to_main_params(self): + # Only needed for the float16 params. + model_data, main_data = self._get_model_and_main_params_data_float16() + _multi_tensor_copy_this_to_that(this=model_data, that=main_data, + overflow_buf=self._dummy_overflow_buf) + + + def state_dict(self): + state_dict = {} + state_dict['optimizer'] = self.optimizer.state_dict() + if self.grad_scaler: + state_dict['grad_scaler'] = self.grad_scaler.state_dict() + state_dict['fp32_from_fp16_params'] = self.fp32_from_float16_groups + return state_dict + + + def load_state_dict(self, state_dict): + # Optimizer. + optimizer_key = 'optimizer' + if optimizer_key not in state_dict: + optimizer_key = 'optimizer_state_dict' + print_rank_0('***WARNING*** loading optimizer from ' + 'an old checkpoint ...') + self.optimizer.load_state_dict(state_dict[optimizer_key]) + + # Grad scaler. + if 'grad_scaler' not in state_dict: + if self.fp16: + print_rank_0('***WARNING*** found an old checkpoint, will not ' + 'load grad scaler ...') + else: + if self.grad_scaler: + self.grad_scaler.load_state_dict(state_dict['grad_scaler']) + else: + print_rank_0('***WARNING*** fould the grad scaler in the ' + 'checkpoint but it is None in the class. ' + 'Skipping loading grad scaler ...') + + # Copy data for the main params. + fp32_from_float16_params_key = 'fp32_from_fp16_params' + if fp32_from_float16_params_key not in state_dict: + fp32_from_float16_params_key = 'fp32_from_fp16' + for current_group, saved_group in zip( + self.fp32_from_float16_groups, + state_dict[fp32_from_float16_params_key]): + for current_param, saved_param in zip(current_group, saved_group): + current_param.data.copy_(saved_param.data) + + +class FP32Optimizer(MegatronOptimizer): + + def __init__(self, optimizer, clip_grad, + log_num_zeros_in_grad, + params_have_main_grad, + use_contiguous_buffers_in_local_ddp, + models): + + super(FP32Optimizer, self).__init__( + optimizer, clip_grad, log_num_zeros_in_grad, + params_have_main_grad, use_contiguous_buffers_in_local_ddp, + models) + + self._scale = torch.cuda.FloatTensor([1.0]) + + + def zero_grad(self, set_to_none=True): + """Copied from torch.optim.optimizer""" + for group in self.optimizer.param_groups: + _zero_grad_group_helper(group['params'], set_to_none) + + + def get_loss_scale(self): + """FP32 optimizer does not do any scaling.""" + return self._scale + + + @torch.no_grad() + def step(self, args, timers): + """Clip gradients (if needed) and step the base optimizer. + Always return successful since there is no overflow.""" + + # Copy main_grads to grads. + timers('optimizer-copy-to-main-grad', log_level=1).start( + barrier=args.barrier_with_L1_time) + if self.params_have_main_grad: + for param_group in self.optimizer.param_groups: + for param in param_group['params']: + param.grad = param.main_grad + + # Safe to de-reference model's main_grad after copying. + # (If using contiguous buffers, main_grad's memory should + # persist and therefore should not be deallocated.) + if not self.use_contiguous_buffers_in_local_ddp: + param.main_grad = None + timers('optimizer-copy-to-main-grad').stop() + + # Clip gradients. + timers('optimizer-clip-main-grad', log_level=1).start( + barrier=args.barrier_with_L1_time) + grad_norm = None + if self.clip_grad > 0.0: + grad_norm = self.clip_grad_norm(self.clip_grad) + timers('optimizer-clip-main-grad').stop() + + if grad_norm is not None and not math.isfinite(grad_norm): + print_rank_0(f'***WARNING*** Bad grad_norm detected (grad_norm={grad_norm})') + return False, grad_norm, None + + # count the zeros in the grads + timers('optimizer-count-zeros', log_level=1).start( + barrier=args.barrier_with_L1_time) + num_zeros_in_grad = self.count_zeros() if \ + self.log_num_zeros_in_grad else None + timers('optimizer-count-zeros').stop() + + # Update parameters. + timers('optimizer-inner-step', log_level=1).start( + barrier=args.barrier_with_L1_time) + self.optimizer.step() + timers('optimizer-inner-step').stop() + + # No overflow for FP32 optimizer. + return True, grad_norm, num_zeros_in_grad + + + def reload_model_params(self): + pass + + + def state_dict(self): + return self.optimizer.state_dict() + + + def load_state_dict(self, state_dict): + self.optimizer.load_state_dict(state_dict) diff --git a/multilinguality_megatron/megatron/optimizer_param_scheduler.py b/multilinguality_megatron/megatron/optimizer_param_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..1f0a648f07591cac35240847da5ebd099b2f74bb --- /dev/null +++ b/multilinguality_megatron/megatron/optimizer_param_scheduler.py @@ -0,0 +1,228 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Learning rate decay and weight decay incr functions.""" + +import math + +from megatron import print_rank_0 + + +class OptimizerParamScheduler(object): + """Anneals learning rate and weight decay""" + + def __init__(self, optimizer, max_lr, min_lr, + lr_warmup_steps, lr_decay_steps, lr_decay_style, + start_wd, end_wd, wd_incr_steps, wd_incr_style, + use_checkpoint_opt_param_scheduler=True, + override_opt_param_scheduler=False): + + # Class values. + self.optimizer = optimizer + + self.max_lr = float(max_lr) + self.min_lr = min_lr + assert self.min_lr >= 0.0 + assert self.max_lr >= self.min_lr + + self.lr_warmup_steps = lr_warmup_steps + self.num_steps = 0 + self.lr_decay_steps = lr_decay_steps + assert self.lr_decay_steps > 0 + assert self.lr_warmup_steps < self.lr_decay_steps + + self.lr_decay_style = lr_decay_style + + self.start_wd = start_wd + self.end_wd = end_wd + assert self.start_wd >= 0.0 + assert self.end_wd >= self.start_wd + self.wd_incr_steps = wd_incr_steps + self.wd_incr_style = wd_incr_style + + self.override_opt_param_scheduler = override_opt_param_scheduler + self.use_checkpoint_opt_param_scheduler = use_checkpoint_opt_param_scheduler + if self.override_opt_param_scheduler: + assert not self.use_checkpoint_opt_param_scheduler, 'both override and '\ + 'use-checkpoint are set.' + + # Set the learning rate + self.step(0) + print_rank_0('> learning rate decay style: {}'.format(self.lr_decay_style)) + + + def get_wd(self): + """ Weight decay incr functions""" + if self.num_steps > self.wd_incr_steps: + return self.end_wd + + if self.wd_incr_style == 'constant': + assert self.start_wd == self.end_wd + return self.end_wd + + incr_ratio = float(self.num_steps) / float(self.wd_incr_steps) + assert incr_ratio >= 0.0 + assert incr_ratio <= 1.0 + delta_wd = self.end_wd - self.start_wd + + if self.wd_incr_style == 'linear': + coeff = incr_ratio + elif self.wd_incr_style == 'cosine': + coeff = 0.5 * (math.cos(math.pi * (1 - incr_ratio)) + 1.0) + else: + raise Exception('{} weight decay increment style is not supported.'.format( + self.wd_incr_style)) + + return self.start_wd + coeff * delta_wd + + + def get_lr(self): + """Learning rate decay functions from: + https://openreview.net/pdf?id=BJYwwY9ll pg. 4""" + + # Use linear warmup for the initial part. + if self.lr_warmup_steps > 0 and self.num_steps <= self.lr_warmup_steps: + return self.max_lr * float(self.num_steps) / \ + float(self.lr_warmup_steps) + + # If the learning rate is constant, just return the initial value. + if self.lr_decay_style == 'constant': + return self.max_lr + + # For any steps larger than `self.lr_decay_steps`, use `self.min_lr`. + if self.num_steps > self.lr_decay_steps: + return self.min_lr + + # If we are done with the warmup period, use the decay style. + if self.lr_decay_style == 'inverse-square-root': + warmup_steps = max(self.lr_warmup_steps, 1) + num_steps = max(self.num_steps, 1) + lr = self.max_lr * warmup_steps ** 0.5 / (num_steps ** 0.5) + return max(self.min_lr, lr) + + num_steps_ = self.num_steps - self.lr_warmup_steps + decay_steps_ = self.lr_decay_steps - self.lr_warmup_steps + decay_ratio = float(num_steps_) / float(decay_steps_) + assert decay_ratio >= 0.0 + assert decay_ratio <= 1.0 + delta_lr = self.max_lr - self.min_lr + + if self.lr_decay_style == 'linear': + coeff = (1.0 - decay_ratio) + elif self.lr_decay_style == 'cosine': + coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0) + else: + raise Exception('{} decay style is not supported.'.format( + self.lr_decay_style)) + + return self.min_lr + coeff * delta_lr + + + def step(self, increment): + """Set lr for all parameters groups.""" + self.num_steps += increment + new_lr = self.get_lr() + new_wd = self.get_wd() + for group in self.optimizer.param_groups: + group['lr'] = new_lr * group.get('lr_mult', 1.0) + group['weight_decay'] = new_wd * group.get('wd_mult', 1.0) + + + def state_dict(self): + state_dict = { + 'max_lr': self.max_lr, + 'lr_warmup_steps': self.lr_warmup_steps, + 'num_steps': self.num_steps, + 'lr_decay_style': self.lr_decay_style, + 'lr_decay_steps': self.lr_decay_steps, + 'min_lr': self.min_lr, + 'start_wd': self.start_wd, + 'end_wd': self.end_wd, + 'wd_incr_style': self.wd_incr_style, + 'wd_incr_steps': self.wd_incr_steps + } + return state_dict + + + def _check_and_set(self, cls_value, sd_value, name): + """Auxiliary function for checking the values in the checkpoint and + setting them.""" + if self.override_opt_param_scheduler: + print_rank_0(' > overriding {} value to {}'.format(name, cls_value)) + return cls_value + + if not self.use_checkpoint_opt_param_scheduler: + assert cls_value == sd_value, \ + f'OptimizerParamScheduler: class input value {cls_value} and checkpoint' \ + f'value {sd_value} for {name} do not match' + print_rank_0(' > using checkpoint value {} for {}'.format(sd_value, + name)) + return sd_value + + + def load_state_dict(self, sd): + + if 'start_lr' in sd: + max_lr_ = sd['start_lr'] + else: + max_lr_ = sd['max_lr'] + self.max_lr = self._check_and_set(self.max_lr, max_lr_, + 'learning rate') + + self.min_lr = self._check_and_set(self.min_lr, sd['min_lr'], + 'minimum learning rate') + + if 'warmup_iter' in sd: + lr_warmup_steps_ = sd['warmup_iter'] + elif 'warmup_steps' in sd: + lr_warmup_steps_ = sd['warmup_steps'] + else: + lr_warmup_steps_ = sd['lr_warmup_steps'] + self.lr_warmup_steps = self._check_and_set(self.lr_warmup_steps, + lr_warmup_steps_, + 'warmup iterations') + + if 'end_iter' in sd: + lr_decay_steps_ = sd['end_iter'] + elif 'decay_steps' in sd: + lr_decay_steps_ = sd['decay_steps'] + else: + lr_decay_steps_ = sd['lr_decay_steps'] + self.lr_decay_steps = self._check_and_set(self.lr_decay_steps, lr_decay_steps_, + 'total number of iterations') + + if 'decay_style' in sd: + lr_decay_style_ = sd['decay_style'] + else: + lr_decay_style_ = sd['lr_decay_style'] + self.lr_decay_style = self._check_and_set(self.lr_decay_style, + lr_decay_style_, + 'learning rate decay style') + + if 'num_iters' in sd: + num_steps = sd['num_iters'] + else: + num_steps = sd['num_steps'] + self.step(increment=num_steps) + + + if 'start_wd' in sd: + self.start_wd = self._check_and_set(self.start_wd, + sd['start_wd'], + "start weight decay") + self.end_wd = self._check_and_set(self.end_wd, + sd['end_wd'], + "end weight decay") + self.wd_incr_steps = self._check_and_set(self.wd_incr_steps, + sd['wd_incr_steps'], + "total number of weight decay iterations") + self.wd_incr_style = self._check_and_set(self.wd_incr_style, + sd['wd_incr_style'], + "weight decay incr style") + + + + + + + + diff --git a/multilinguality_megatron/megatron/p2p_communication.py b/multilinguality_megatron/megatron/p2p_communication.py new file mode 100644 index 0000000000000000000000000000000000000000..5f58df6fd420552a42dc6529dec51b142a0045d0 --- /dev/null +++ b/multilinguality_megatron/megatron/p2p_communication.py @@ -0,0 +1,405 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from functools import reduce +import operator +import torch + +from megatron import get_args, core +from megatron.core import mpu + + +def _communicate_shapes(tensor_send_next, tensor_send_prev, + recv_prev, recv_next): + """Communicate tensor shapes between stages. Used to communicate + tensor shapes before the actual tensor communication happens. + This is required when the sequence lengths across micro batches + are not uniform. + + Takes the following arguments: + tensor_send_next: tensor to send to next rank (no tensor sent if + set to None). + tensor_send_prev: tensor to send to prev rank (no tensor sent if + set to None). + recv_prev: boolean for whether tensor should be received from + previous rank. + recv_next: boolean for whether tensor should be received from + next rank. + Returns: + (recv_prev_shape, recv_next_shape) + """ + + args = get_args() + recv_prev_shape_tensor = None + recv_next_shape_tensor = None + send_prev_shape_tensor = None + send_next_shape_tensor = None + if recv_prev: + recv_prev_shape_tensor = torch.empty((3), + device=torch.cuda.current_device(), + dtype=torch.int64) + if recv_next: + recv_next_shape_tensor = torch.empty((3), + device=torch.cuda.current_device(), + dtype=torch.int64) + if tensor_send_prev is not None: + send_prev_shape_tensor = torch.tensor(tensor_send_prev.size(), + device=torch.cuda.current_device(), + dtype=torch.int64) + if tensor_send_next is not None: + send_next_shape_tensor = torch.tensor(tensor_send_next.size(), + device=torch.cuda.current_device(), + dtype=torch.int64) + + if args.use_ring_exchange_p2p: + torch.distributed.ring_exchange(tensor_send_prev=send_prev_shape_tensor, + tensor_recv_prev=recv_prev_shape_tensor, + tensor_send_next=send_next_shape_tensor, + tensor_recv_next=recv_next_shape_tensor, + group=mpu.get_pipeline_model_parallel_group()) + else: + ops = [] + if send_prev_shape_tensor is not None: + send_prev_op = torch.distributed.P2POp( + torch.distributed.isend, send_prev_shape_tensor, + mpu.get_pipeline_model_parallel_prev_rank()) + ops.append(send_prev_op) + if recv_prev_shape_tensor is not None: + recv_prev_op = torch.distributed.P2POp( + torch.distributed.irecv, recv_prev_shape_tensor, + mpu.get_pipeline_model_parallel_prev_rank()) + ops.append(recv_prev_op) + if send_next_shape_tensor is not None: + send_next_op = torch.distributed.P2POp( + torch.distributed.isend, send_next_shape_tensor, + mpu.get_pipeline_model_parallel_next_rank()) + ops.append(send_next_op) + if recv_next_shape_tensor is not None: + recv_next_op = torch.distributed.P2POp( + torch.distributed.irecv, recv_next_shape_tensor, + mpu.get_pipeline_model_parallel_next_rank()) + ops.append(recv_next_op) + if len(ops) > 0: + reqs = torch.distributed.batch_isend_irecv(ops) + for req in reqs: + req.wait() + + # To protect against race condition when using batch_isend_irecv(). + # should take this out once the bug with batch_isend_irecv is resolved. + torch.cuda.synchronize() + + recv_prev_shape = [0, 0, 0] + if recv_prev_shape_tensor is not None: + recv_prev_shape = recv_prev_shape_tensor.tolist() + + recv_next_shape = [0, 0, 0] + if recv_next_shape_tensor is not None: + recv_next_shape = recv_next_shape_tensor.tolist() + + return recv_prev_shape, recv_next_shape + + +def _communicate(tensor_send_next, tensor_send_prev, recv_prev, recv_next, + tensor_shape, + dtype_=None): + """Communicate tensors between stages. Used as helper method in other + communication methods that are used in megatron/schedules.py. + + Takes the following arguments: + tensor_send_next: tensor to send to next rank (no tensor sent if + set to None). + tensor_send_prev: tensor to send to prev rank (no tensor sent if + set to None). + recv_prev: boolean for whether tensor should be received from + previous rank. + recv_next: boolean for whether tensor should be received from + next rank. + tensor_shape: shape of tensor to receive (this method assumes that all + tensors sent and received in a single function call are + the same shape). + dtype_: optional, this is used when the tensor that needs to be + communicated is different from args.params_dtype. + Returns: + (tensor_recv_prev, tensor_recv_next) + """ + args = get_args() + + # Create placeholder tensors for receive in forward and backward directions + # if needed. + tensor_recv_prev = None + tensor_recv_next = None + + # Some legacy inference code doesn't set the tensor shape, do so now + # for the normal values for gpt/bert. This could be removed if inference + # code is changed to provide tensor_shape. + if not args.variable_seq_lengths: + if tensor_shape is None: + recv_prev_shape = (args.seq_length, args.micro_batch_size, args.hidden_size) + recv_next_shape = (args.seq_length, args.micro_batch_size, args.hidden_size) + else: + recv_prev_shape = tensor_shape + recv_next_shape = tensor_shape + else: + recv_prev_shape, recv_next_shape = \ + _communicate_shapes(tensor_send_next, + tensor_send_prev, + recv_prev, + recv_next) + + override_scatter_gather_tensors_in_pipeline = False + if args.scatter_gather_tensors_in_pipeline and \ + not args.sequence_parallel: + recv_prev_chunk_shape = reduce(operator.mul, recv_prev_shape, 1) + recv_next_chunk_shape = reduce(operator.mul, recv_next_shape, 1) + if recv_prev_chunk_shape % mpu.get_tensor_model_parallel_world_size() == 0 and \ + recv_next_chunk_shape % mpu.get_tensor_model_parallel_world_size() == 0: + recv_prev_chunk_shape = recv_prev_chunk_shape // \ + mpu.get_tensor_model_parallel_world_size() + recv_next_chunk_shape = recv_next_chunk_shape // \ + mpu.get_tensor_model_parallel_world_size() + else: + recv_prev_chunk_shape = recv_prev_shape + recv_next_chunk_shape = recv_next_shape + override_scatter_gather_tensors_in_pipeline = True + else: + recv_prev_chunk_shape = recv_prev_shape + recv_next_chunk_shape = recv_next_shape + + dtype = args.params_dtype + if args.fp32_residual_connection: + dtype = torch.float + + requires_grad = True + if dtype_ is not None: + dtype = dtype_ + requires_grad = False + + if recv_prev: + tensor_recv_prev = torch.empty(recv_prev_chunk_shape, + requires_grad=requires_grad, + device=torch.cuda.current_device(), + dtype=dtype) + if recv_next: + tensor_recv_next = torch.empty(recv_next_chunk_shape, + requires_grad=requires_grad, + device=torch.cuda.current_device(), + dtype=dtype) + + # Split tensor into smaller chunks if using scatter-gather optimization. + if not override_scatter_gather_tensors_in_pipeline and \ + args.scatter_gather_tensors_in_pipeline and \ + not args.sequence_parallel: + if tensor_send_next is not None: + tensor_send_next = core.tensor_parallel.split_tensor_into_1d_equal_chunks(tensor_send_next) + + if tensor_send_prev is not None: + tensor_send_prev = core.tensor_parallel.split_tensor_into_1d_equal_chunks(tensor_send_prev) + + # Send tensors in both the forward and backward directions as appropriate. + if args.use_ring_exchange_p2p: + torch.distributed.ring_exchange(tensor_send_prev=tensor_send_prev, + tensor_recv_prev=tensor_recv_prev, + tensor_send_next=tensor_send_next, + tensor_recv_next=tensor_recv_next, + group=mpu.get_pipeline_model_parallel_group()) + else: + ops = [] + if tensor_send_prev is not None: + send_prev_op = torch.distributed.P2POp( + torch.distributed.isend, tensor_send_prev, + mpu.get_pipeline_model_parallel_prev_rank()) + ops.append(send_prev_op) + if tensor_recv_prev is not None: + recv_prev_op = torch.distributed.P2POp( + torch.distributed.irecv, tensor_recv_prev, + mpu.get_pipeline_model_parallel_prev_rank()) + ops.append(recv_prev_op) + if tensor_send_next is not None: + send_next_op = torch.distributed.P2POp( + torch.distributed.isend, tensor_send_next, + mpu.get_pipeline_model_parallel_next_rank()) + ops.append(send_next_op) + if tensor_recv_next is not None: + recv_next_op = torch.distributed.P2POp( + torch.distributed.irecv, tensor_recv_next, + mpu.get_pipeline_model_parallel_next_rank()) + ops.append(recv_next_op) + if len(ops) > 0: + reqs = torch.distributed.batch_isend_irecv(ops) + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv(). + torch.cuda.synchronize() + + # If using scatter-gather optimization, gather smaller chunks. + if not override_scatter_gather_tensors_in_pipeline and \ + args.scatter_gather_tensors_in_pipeline and \ + not args.sequence_parallel: + if recv_prev: + tensor_recv_prev = core.tensor_parallel.gather_split_1d_tensor( + tensor_recv_prev).view(recv_prev_shape).requires_grad_() + tensor_recv_prev = core.utils.make_viewless_tensor(tensor_recv_prev, + requires_grad=True, + keep_graph=False) + + if recv_next: + tensor_recv_next = core.tensor_parallel.gather_split_1d_tensor( + tensor_recv_next).view(recv_next_shape).requires_grad_() + tensor_recv_next = core.utils.make_viewless_tensor(tensor_recv_next, + requires_grad=True, + keep_graph=False) + + return tensor_recv_prev, tensor_recv_next + + +def recv_forward(tensor_shape=None, dtype_=None, timers=None): + """Receive tensor from previous rank in pipeline (forward receive).""" + + if mpu.is_pipeline_first_stage(): + input_tensor = None + else: + if timers is not None: + timers('forward-recv', log_level=2).start() + input_tensor, _ = _communicate( + tensor_send_next=None, + tensor_send_prev=None, + recv_prev=True, + recv_next=False, + tensor_shape=tensor_shape, + dtype_=dtype_) + if timers is not None: + timers('forward-recv').stop() + return input_tensor + + +def recv_backward(tensor_shape=None, timers=None): + """Receive tensor from next rank in pipeline (backward receive).""" + if mpu.is_pipeline_last_stage(): + output_tensor_grad = None + else: + if timers is not None: + timers('backward-recv', log_level=2).start() + _, output_tensor_grad = _communicate( + tensor_send_next=None, + tensor_send_prev=None, + recv_prev=False, + recv_next=True, + tensor_shape=tensor_shape) + if timers is not None: + timers('backward-recv').stop() + return output_tensor_grad + + +def send_forward(output_tensor, tensor_shape=None, dtype_=None, timers=None): + """Send tensor to next rank in pipeline (forward send).""" + + if not mpu.is_pipeline_last_stage(): + if timers is not None: + timers('forward-send', log_level=2).start() + _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=False, + recv_next=False, + tensor_shape=tensor_shape, + dtype_=dtype_) + if timers is not None: + timers('forward-send').stop() + + +def send_backward(input_tensor_grad, tensor_shape=None, timers=None): + """Send tensor to previous rank in pipeline (backward send).""" + if not mpu.is_pipeline_first_stage(): + if timers is not None: + timers('backward-send', log_level=2).start() + _communicate( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=False, + recv_next=False, + tensor_shape=tensor_shape) + if timers is not None: + timers('backward-send').stop() + + +def send_forward_recv_backward(output_tensor, tensor_shape=None, timers=None): + """Batched send and recv with next rank in pipeline.""" + if mpu.is_pipeline_last_stage(): + output_tensor_grad = None + else: + if timers is not None: + timers('forward-send-backward-recv', log_level=2).start() + _, output_tensor_grad = _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=False, + recv_next=True, + tensor_shape=tensor_shape) + if timers is not None: + timers('forward-send-backward-recv').stop() + return output_tensor_grad + + +def send_backward_recv_forward(input_tensor_grad, tensor_shape=None, timers=None): + """Batched send and recv with previous rank in pipeline.""" + if mpu.is_pipeline_first_stage(): + input_tensor = None + else: + if timers is not None: + timers('backward-send-forward-recv', log_level=2).start() + input_tensor, _ = _communicate( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=True, + recv_next=False, + tensor_shape=tensor_shape) + if timers is not None: + timers('backward-send-forward-recv').stop() + return input_tensor + + +def send_forward_recv_forward(output_tensor, recv_prev, tensor_shape=None, timers=None): + """Batched recv from previous rank and send to next rank in pipeline.""" + if timers is not None: + timers('forward-send-forward-recv', log_level=2).start() + input_tensor, _ = _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=recv_prev, + recv_next=False, + tensor_shape=tensor_shape) + if timers is not None: + timers('forward-send-forward-recv').stop() + return input_tensor + + +def send_backward_recv_backward(input_tensor_grad, recv_next, tensor_shape=None, timers=None): + """Batched recv from next rank and send to previous rank in pipeline.""" + if timers is not None: + timers('backward-send-backward-recv', log_level=2).start() + _, output_tensor_grad = _communicate( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=False, + recv_next=recv_next, + tensor_shape=tensor_shape) + if timers is not None: + timers('backward-send-backward-recv').stop() + return output_tensor_grad + + +def send_forward_backward_recv_forward_backward( + output_tensor, input_tensor_grad, recv_prev, + recv_next, tensor_shape=None, timers=None): + """Batched send and recv with previous and next ranks in pipeline.""" + if timers is not None: + timers('forward-backward-send-forward-backward-recv', + log_level=2).start() + input_tensor, output_tensor_grad = _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=input_tensor_grad, + recv_prev=recv_prev, + recv_next=recv_next, + tensor_shape=tensor_shape) + if timers is not None: + timers('forward-backward-send-forward-backward-recv').stop() + return input_tensor, output_tensor_grad diff --git a/multilinguality_megatron/megatron/schedules.py b/multilinguality_megatron/megatron/schedules.py new file mode 100644 index 0000000000000000000000000000000000000000..2034b753ea9a0e89ab560d83fb913543e411a3c2 --- /dev/null +++ b/multilinguality_megatron/megatron/schedules.py @@ -0,0 +1,722 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from contextlib import contextmanager +import torch +from torch.autograd.variable import Variable +from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP + +from megatron import get_args +from megatron import get_num_microbatches +from megatron import p2p_communication +from megatron.core import mpu +from megatron.utils import unwrap_model +from megatron.model import DistributedDataParallel as LocalDDP +from megatron.model import Float16Module +from megatron.model import ModelType + + +def get_forward_backward_func(): + args = get_args() + if mpu.get_pipeline_model_parallel_world_size() > 1: + if args.virtual_pipeline_model_parallel_size is not None: + forward_backward_func = forward_backward_pipelining_with_interleaving + assert get_num_microbatches() % \ + args.pipeline_model_parallel_size == 0, \ + 'number of microbatches (%d) is not divisible by pipeline-' \ + 'model-parallel-size (%d) when using interleaved schedule' % ( + get_num_microbatches(), + args.pipeline_model_parallel_size, + ) + else: + forward_backward_func = forward_backward_pipelining_without_interleaving + else: + forward_backward_func = forward_backward_no_pipelining + return forward_backward_func + +def deallocate_output_tensor(out): + '''Pseudo-deallocate (i.e., set to scalar) the output tensor's '.data' field. + + This method should be called right after the output tensor has been + sent to the next pipeline stage. At this point, the output tensor is + only useful for its '.grad_fn' field, and not its '.data'. + ''' + if out is None: + return + assert isinstance(out, torch.Tensor), \ + "expected Tensor, found %s." % type(out).__name__ + assert out._base is None, \ + "counter-productive to free a view of another tensor." + out.data = torch.empty( + (1,), + device = out.device, + dtype = out.dtype, + ) + +def custom_backward(output, grad_output): + '''Directly call C++ autograd engine. + + To make the 'deallocate_output_tensor' (above) optimization work, the C++ + autograd engine must be called directly, bypassing Pytorch's + torch.autograd.backward. Pytorch's 'backward' checks that the output and + grad have the same shape, while C++'s 'backward' does not. + ''' + + assert output.numel() == 1, \ + "output should be pseudo-'freed' in schedule, to optimize memory" + assert isinstance(output, torch.Tensor), \ + "output == '%s'." % type(output).__name__ + assert isinstance(grad_output, (torch.Tensor, type(None))), \ + "grad_output == '%s'." % type(grad_output).__name__ + + # Handle scalar output + if grad_output is None: + assert output.numel() == 1, "implicit grad requires scalar output." + grad_output = torch.ones_like( + output, + memory_format = torch.preserve_format, + ) + + # Call c++ engine [ see torch/csrc/autograd/python_engine.cpp ] + Variable._execution_engine.run_backward( + tensors = (output,), + grad_tensors = (grad_output,), + keep_graph = False, + create_graph = False, + inputs = tuple(), + allow_unreachable=True, + accumulate_grad=True, + ) + + +def forward_step(forward_step_func, + data_iterator, + model, + input_tensor, + forward_data_store, + timers, + collect_non_loss_data=False): + """Forward step for passed-in model. + + If first stage, input tensor is obtained from data_iterator, otherwise + passed-in input_tensor is used. + + Returns output tensor.""" + args = get_args() + + if timers is not None: + timers('forward-compute', log_level=2).start() + unwrapped_model = unwrap_model( + model, (torchDDP, LocalDDP, Float16Module)) + + unwrap_output_tensor = False + if not isinstance(input_tensor, list): + input_tensor = [input_tensor] + unwrap_output_tensor = True + + unwrapped_model.set_input_tensor(input_tensor) + output_tensor, loss_func = forward_step_func(data_iterator, model) + if mpu.is_pipeline_last_stage(): + if not collect_non_loss_data: + output_tensor = loss_func(output_tensor) + loss, loss_reduced = output_tensor + output_tensor = loss / get_num_microbatches() + forward_data_store.append(loss_reduced) + else: + data = loss_func(output_tensor, non_loss_data=True) + forward_data_store.append(data) + + if timers is not None: + timers('forward-compute').stop() + + # If T5 model (or other model with encoder and decoder) + # and in decoder stack, then send encoder_hidden_state + # downstream as well. + if mpu.is_pipeline_stage_after_split() and \ + args.model_type == ModelType.encoder_and_decoder: + return [output_tensor, input_tensor[-1]] + if unwrap_output_tensor: + return output_tensor + return [output_tensor] + + +def backward_step(optimizer, input_tensor, output_tensor, + output_tensor_grad, timers): + """Backward step through passed-in output tensor. + + If last stage, output_tensor_grad is None, otherwise gradient of loss + with respect to stage's output tensor. + + Returns gradient of loss with respect to input tensor (None if first + stage).""" + + # NOTE: This code currently can handle at most one skip connection. It + # needs to be modified slightly to support arbitrary numbers of skip + # connections. + args = get_args() + + if timers is not None: + timers('backward-compute', log_level=2).start() + + # Retain the grad on the input_tensor. + unwrap_input_tensor_grad = False + if not isinstance(input_tensor, list): + input_tensor = [input_tensor] + unwrap_input_tensor_grad = True + for x in input_tensor: + if x is not None: + x.retain_grad() + + if not isinstance(output_tensor, list): + output_tensor = [output_tensor] + if not isinstance(output_tensor_grad, list): + output_tensor_grad = [output_tensor_grad] + + # Backward pass. + if output_tensor_grad[0] is None: + output_tensor = optimizer.scale_loss(output_tensor[0]) + custom_backward(output_tensor[0], output_tensor_grad[0]) + + # Collect the grad of the input_tensor. + input_tensor_grad = [None] + if input_tensor is not None: + input_tensor_grad = [] + for x in input_tensor: + if x is None: + input_tensor_grad.append(None) + else: + input_tensor_grad.append(x.grad) + + # Handle single skip connection if it exists (encoder_hidden_state in + # model with encoder and decoder). + if mpu.get_pipeline_model_parallel_world_size() > 1 and \ + mpu.is_pipeline_stage_after_split() and \ + args.model_type == ModelType.encoder_and_decoder: + if output_tensor_grad[1] is not None: + input_tensor_grad[-1].add_(output_tensor_grad[1]) + if unwrap_input_tensor_grad: + input_tensor_grad = input_tensor_grad[0] + + if timers is not None: + timers('backward-compute').stop() + + return input_tensor_grad + + +@contextmanager +def dummy_handler(): + try: + yield + finally: + pass + + +def forward_backward_no_pipelining(forward_step_func, + data_iterator, model, + optimizer, + timers, + forward_only, + collect_non_loss_data=False): + """Run forward and backward passes with no pipeline parallelism + (no inter-stage communication). + + Returns dictionary with losses.""" + assert len(model) == 1 + model = model[0] + + context_handler = dummy_handler + if isinstance(model, torchDDP): + context_handler = model.no_sync + + forward_data_store = [] + input_tensor, output_tensor_grad = None, None + with context_handler(): + for i in range(get_num_microbatches() - 1): + output_tensor = forward_step(forward_step_func, data_iterator, + model, input_tensor, forward_data_store, + timers, collect_non_loss_data) + if not forward_only: + backward_step(optimizer, input_tensor, output_tensor, + output_tensor_grad, timers) + + # Run computation for last microbatch out of context handler (want to + # synchronize gradients). + output_tensor = forward_step(forward_step_func, data_iterator, + model, input_tensor, forward_data_store, + timers, collect_non_loss_data) + if not forward_only: + backward_step(optimizer, input_tensor, output_tensor, + output_tensor_grad, timers) + + return forward_data_store + + +def forward_backward_pipelining_with_interleaving(forward_step_func, + data_iterator, model, + optimizer, + timers, + forward_only, + collect_non_loss_data=False): + """Run interleaved 1F1B schedule (model split into model chunks), with + communication between pipeline stages as needed. + + Returns dictionary with losses if the last stage, empty dict otherwise.""" + + args = get_args() + + input_tensors = [[] for _ in range(len(model))] + output_tensors = [[] for _ in range(len(model))] + forward_data_store = [] + if not forward_only: + output_tensor_grads = [[] for _ in range(len(model))] + + pipeline_parallel_size = mpu.get_pipeline_model_parallel_world_size() + pipeline_parallel_rank = mpu.get_pipeline_model_parallel_rank() + + if args.sequence_parallel: + seq_length = args.seq_length // mpu.get_tensor_model_parallel_world_size() + else: + seq_length = args.seq_length + tensor_shape = (seq_length, args.micro_batch_size, args.hidden_size) + + # Compute number of warmup and remaining microbatches. + num_model_chunks = len(model) + num_microbatches = get_num_microbatches() * num_model_chunks + all_warmup_microbatches = False + if forward_only: + num_warmup_microbatches = num_microbatches + else: + # Run all forward passes and then all backward passes if number of + # microbatches is just the number of pipeline stages. + # Otherwise, perform (num_model_chunks-1)*pipeline_parallel_size on + # all workers, followed by more microbatches after depending on + # stage ID (more forward passes for earlier stages, later stages can + # immediately start with 1F1B). + if get_num_microbatches() == pipeline_parallel_size: + num_warmup_microbatches = num_microbatches + all_warmup_microbatches = True + else: + num_warmup_microbatches = \ + (pipeline_parallel_size - pipeline_parallel_rank - 1) * 2 + num_warmup_microbatches += ( + num_model_chunks - 1) * pipeline_parallel_size + num_warmup_microbatches = min(num_warmup_microbatches, + num_microbatches) + num_microbatches_remaining = \ + num_microbatches - num_warmup_microbatches + + def get_model_chunk_id(microbatch_id, forward): + """Helper method to get the model chunk ID given the iteration number.""" + microbatch_id_in_group = microbatch_id % (pipeline_parallel_size * num_model_chunks) + model_chunk_id = microbatch_id_in_group // pipeline_parallel_size + if not forward: + model_chunk_id = (num_model_chunks - model_chunk_id - 1) + return model_chunk_id + + def forward_step_helper(microbatch_id): + """Helper method to run forward step with model split into chunks + (run set_virtual_pipeline_model_parallel_rank() before calling + forward_step()).""" + model_chunk_id = get_model_chunk_id(microbatch_id, forward=True) + mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id) + + # forward step + if mpu.is_pipeline_first_stage(): + if len(input_tensors[model_chunk_id]) == \ + len(output_tensors[model_chunk_id]): + input_tensors[model_chunk_id].append(None) + input_tensor = input_tensors[model_chunk_id][-1] + output_tensor = forward_step(forward_step_func, + data_iterator[model_chunk_id], + model[model_chunk_id], + input_tensor, + forward_data_store, + timers, + collect_non_loss_data) + output_tensors[model_chunk_id].append(output_tensor) + + # if forward-only, no need to save tensors for a backward pass + if forward_only: + input_tensors[model_chunk_id].pop() + output_tensors[model_chunk_id].pop() + + return output_tensor + + def backward_step_helper(microbatch_id): + """Helper method to run backward step with model split into chunks + (run set_virtual_pipeline_model_parallel_rank() before calling + backward_step()).""" + model_chunk_id = get_model_chunk_id(microbatch_id, forward=False) + mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id) + + if mpu.is_pipeline_last_stage(): + if len(output_tensor_grads[model_chunk_id]) == 0: + output_tensor_grads[model_chunk_id].append(None) + input_tensor = input_tensors[model_chunk_id].pop(0) + output_tensor = output_tensors[model_chunk_id].pop(0) + output_tensor_grad = output_tensor_grads[model_chunk_id].pop(0) + input_tensor_grad = \ + backward_step(optimizer, + input_tensor, + output_tensor, + output_tensor_grad, + timers) + + return input_tensor_grad + + # Run warmup forward passes. + mpu.set_virtual_pipeline_model_parallel_rank(0) + input_tensors[0].append( + p2p_communication.recv_forward(tensor_shape, timers=timers)) + for k in range(num_warmup_microbatches): + output_tensor = forward_step_helper(k) + + # Determine if tensor should be received from previous stage. + next_forward_model_chunk_id = get_model_chunk_id(k+1, forward=True) + recv_prev = True + if mpu.is_pipeline_first_stage(ignore_virtual=True): + if next_forward_model_chunk_id == 0: + recv_prev = False + if k == (num_microbatches - 1): + recv_prev = False + + # Don't send tensor downstream if on last stage. + if mpu.is_pipeline_last_stage(): + output_tensor = None + + # Send and receive tensors as appropriate (send tensors computed + # in this iteration; receive tensors for next iteration). + if k == (num_warmup_microbatches - 1) and not forward_only and \ + not all_warmup_microbatches: + input_tensor_grad = None + recv_next = True + if mpu.is_pipeline_last_stage(ignore_virtual=True): + recv_next = False + input_tensor, output_tensor_grad = \ + p2p_communication.send_forward_backward_recv_forward_backward( + output_tensor, input_tensor_grad, + recv_prev=recv_prev, recv_next=recv_next, + tensor_shape=tensor_shape, + timers=timers) + output_tensor_grads[num_model_chunks-1].append(output_tensor_grad) + else: + input_tensor = \ + p2p_communication.send_forward_recv_forward( + output_tensor, recv_prev=recv_prev, + tensor_shape=tensor_shape, + timers=timers) + input_tensors[next_forward_model_chunk_id].append(input_tensor) + deallocate_output_tensor(output_tensor) + + # Run 1F1B in steady state. + for k in range(num_microbatches_remaining): + # Forward pass. + forward_k = k + num_warmup_microbatches + output_tensor = forward_step_helper(forward_k) + + # Backward pass. + backward_k = k + input_tensor_grad = backward_step_helper(backward_k) + + # Send output_tensor and input_tensor_grad, receive input_tensor + # and output_tensor_grad. + + # Determine if current stage has anything to send in either direction, + # otherwise set tensor to None. + forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True) + mpu.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id) + if mpu.is_pipeline_last_stage(): + output_tensor = None + + backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False) + mpu.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id) + if mpu.is_pipeline_first_stage(): + input_tensor_grad = None + + # Determine if peers are sending, and where in data structure to put + # received tensors. + recv_prev = True + if mpu.is_pipeline_first_stage(ignore_virtual=True): + # First stage is ahead of last stage by (pipeline_parallel_size - 1). + next_forward_model_chunk_id = get_model_chunk_id( + forward_k - (pipeline_parallel_size - 1), forward=True) + if next_forward_model_chunk_id == (num_model_chunks - 1): + recv_prev = False + next_forward_model_chunk_id += 1 + else: + next_forward_model_chunk_id = get_model_chunk_id(forward_k + 1, + forward=True) + + recv_next = True + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Last stage is ahead of first stage by (pipeline_parallel_size - 1). + next_backward_model_chunk_id = get_model_chunk_id( + backward_k - (pipeline_parallel_size - 1), forward=False) + if next_backward_model_chunk_id == 0: + recv_next = False + next_backward_model_chunk_id -= 1 + else: + next_backward_model_chunk_id = get_model_chunk_id(backward_k + 1, + forward=False) + + # If last iteration, don't receive; we already received one extra + # before the start of the for loop. + if k == (num_microbatches_remaining - 1): + recv_prev = False + + # Communicate tensors. + input_tensor, output_tensor_grad = \ + p2p_communication.send_forward_backward_recv_forward_backward( + output_tensor, input_tensor_grad, + recv_prev=recv_prev, recv_next=recv_next, + tensor_shape=tensor_shape, timers=timers) + deallocate_output_tensor(output_tensor) + + # Put input_tensor and output_tensor_grad in data structures in the + # right location. + if recv_prev: + input_tensors[next_forward_model_chunk_id].append(input_tensor) + if recv_next: + output_tensor_grads[next_backward_model_chunk_id].append( + output_tensor_grad) + + # Run cooldown backward passes (flush out pipeline). + if not forward_only: + if all_warmup_microbatches: + output_tensor_grads[num_model_chunks-1].append( + p2p_communication.recv_backward(tensor_shape, timers=timers)) + for k in range(num_microbatches_remaining, num_microbatches): + input_tensor_grad = backward_step_helper(k) + next_backward_model_chunk_id = get_model_chunk_id(k+1, forward=False) + recv_next = True + if mpu.is_pipeline_last_stage(ignore_virtual=True): + if next_backward_model_chunk_id == (num_model_chunks - 1): + recv_next = False + if k == (num_microbatches - 1): + recv_next = False + output_tensor_grads[next_backward_model_chunk_id].append( + p2p_communication.send_backward_recv_backward( + input_tensor_grad, recv_next=recv_next, + tensor_shape=tensor_shape, + timers=timers)) + + return forward_data_store + + +def get_tensor_shapes(rank, model_type: ModelType): + # Determine right tensor sizes (based on position of rank with respect to split + # rank) and model size. + # Send two tensors if model is T5 and rank is in decoder stage: + # first tensor is decoder (pre-transpose), + # second tensor is encoder (post-transpose). + # If model is T5 and rank is at the boundary: + # send one tensor (post-transpose from encoder). + # Otherwise, send one tensor (pre-transpose). + args = get_args() + tensor_shapes = [] + + if args.sequence_parallel: + seq_length = args.seq_length // mpu.get_tensor_model_parallel_world_size() + else: + seq_length = args.seq_length + + if model_type == ModelType.encoder_and_decoder: + if args.sequence_parallel: + decoder_seq_length = args.decoder_seq_length // mpu.get_tensor_model_parallel_world_size() + else: + decoder_seq_length = args.decoder_seq_length + + if mpu.is_pipeline_stage_before_split(rank): + tensor_shapes.append((seq_length, args.micro_batch_size, args.hidden_size)) + else: + tensor_shapes.append((decoder_seq_length, args.micro_batch_size, args.hidden_size)) + tensor_shapes.append((seq_length, args.micro_batch_size, args.hidden_size)) + else: + tensor_shapes.append((seq_length, args.micro_batch_size, args.hidden_size)) + return tensor_shapes + + +def recv_forward(tensor_shapes, timers): + input_tensors = [] + for tensor_shape in tensor_shapes: + if tensor_shape is None: + input_tensors.append(None) + else: + input_tensors.append(p2p_communication.recv_forward(tensor_shape, + timers=timers)) + return input_tensors + + +def recv_backward(tensor_shapes, timers): + output_tensor_grads = [] + for tensor_shape in tensor_shapes: + if tensor_shape is None: + output_tensor_grads.append(None) + else: + output_tensor_grads.append(p2p_communication.recv_backward(tensor_shape, + timers=timers)) + return output_tensor_grads + + +def send_forward(output_tensors, tensor_shapes, timers): + if not isinstance(output_tensors, list): + output_tensors = [output_tensors] + for (output_tensor, tensor_shape) in zip(output_tensors, tensor_shapes): + if tensor_shape is None: + continue + p2p_communication.send_forward(output_tensor, tensor_shape, timers=timers) + + +def send_backward(input_tensor_grads, tensor_shapes, timers): + if not isinstance(input_tensor_grads, list): + input_tensor_grads = [input_tensor_grads] + for (input_tensor_grad, tensor_shape) in zip(input_tensor_grads, tensor_shapes): + if tensor_shape is None: + continue + p2p_communication.send_backward(input_tensor_grad, tensor_shape, timers=timers) + + +def send_forward_recv_backward(output_tensors, tensor_shapes, timers): + if not isinstance(output_tensors, list): + output_tensors = [output_tensors] + output_tensor_grads = [] + for (output_tensor, tensor_shape) in zip(output_tensors, tensor_shapes): + if tensor_shape is None: + output_tensor_grads.append(None) + continue + output_tensor_grad = p2p_communication.send_forward_recv_backward( + output_tensor, tensor_shape, timers=timers) + output_tensor_grads.append(output_tensor_grad) + return output_tensor_grads + + +def send_backward_recv_forward(input_tensor_grads, tensor_shapes, timers): + if not isinstance(input_tensor_grads, list): + input_tensor_grads = [input_tensor_grads] + input_tensors = [] + for (input_tensor_grad, tensor_shape) in zip(input_tensor_grads, tensor_shapes): + if tensor_shape is None: + input_tensors.append(None) + continue + input_tensor = p2p_communication.send_backward_recv_forward( + input_tensor_grad, tensor_shape, timers=timers) + input_tensors.append(input_tensor) + return input_tensors + + +def forward_backward_pipelining_without_interleaving(forward_step_func, + data_iterator, + model, + optimizer, + timers, + forward_only, + collect_non_loss_data=False): + """Run non-interleaved 1F1B schedule, with communication between pipeline + stages. + + Returns dictionary with losses if the last stage, empty dict otherwise.""" + args = get_args() + + assert len(model) == 1 + model = model[0] + + # Compute number of warmup microbatches. + num_microbatches = get_num_microbatches() + num_warmup_microbatches = \ + (mpu.get_pipeline_model_parallel_world_size() - + mpu.get_pipeline_model_parallel_rank() - 1) + num_warmup_microbatches = min( + num_warmup_microbatches, + num_microbatches) + num_microbatches_remaining = \ + num_microbatches - num_warmup_microbatches + + unwrapped_model = unwrap_model( + model, (torchDDP, LocalDDP, Float16Module)) + model_type = unwrapped_model.model_type + rank = mpu.get_pipeline_model_parallel_rank() + recv_tensor_shapes = get_tensor_shapes(rank-1, model_type) + send_tensor_shapes = get_tensor_shapes(rank, model_type) + + # Input, output tensors only need to be saved when doing backward passes + input_tensors = None + output_tensors = None + if not forward_only: + input_tensors = [] + output_tensors = [] + forward_data_store = [] + + # Run warmup forward passes. + for i in range(num_warmup_microbatches): + input_tensor = recv_forward(recv_tensor_shapes, timers=timers) + output_tensor = forward_step(forward_step_func, data_iterator, model, + input_tensor, forward_data_store, + timers, collect_non_loss_data) + send_forward(output_tensor, send_tensor_shapes, timers=timers) + + if not forward_only: + input_tensors.append(input_tensor) + output_tensors.append(output_tensor) + deallocate_output_tensor(output_tensor[0]) + + # Before running 1F1B, need to receive first forward tensor. + # If all microbatches are run in warmup / cooldown phase, then no need to + # receive this tensor here. + if num_microbatches_remaining > 0: + input_tensor = recv_forward(recv_tensor_shapes, timers=timers) + + # Run 1F1B in steady state. + for i in range(num_microbatches_remaining): + last_iteration = (i == (num_microbatches_remaining - 1)) + + output_tensor = forward_step(forward_step_func, data_iterator, model, + input_tensor, forward_data_store, + timers, collect_non_loss_data) + if forward_only: + send_forward(output_tensor, send_tensor_shapes, timers=timers) + + if not last_iteration: + input_tensor = recv_forward(recv_tensor_shapes, timers=timers) + + else: + output_tensor_grad = \ + send_forward_recv_backward(output_tensor, + send_tensor_shapes, + timers=timers) + + # Add input_tensor and output_tensor to end of list. + input_tensors.append(input_tensor) + output_tensors.append(output_tensor) + deallocate_output_tensor(output_tensor[0]) + + # Pop input_tensor and output_tensor from the start of the list for + # the backward pass. + input_tensor = input_tensors.pop(0) + output_tensor = output_tensors.pop(0) + + input_tensor_grad = \ + backward_step(optimizer, input_tensor, output_tensor, + output_tensor_grad, timers) + + if last_iteration: + input_tensor = None + send_backward(input_tensor_grad, recv_tensor_shapes, timers=timers) + else: + input_tensor = \ + send_backward_recv_forward( + input_tensor_grad, recv_tensor_shapes, timers=timers) + + # Run cooldown backward passes. + if not forward_only: + for i in range(num_warmup_microbatches): + input_tensor = input_tensors.pop(0) + output_tensor = output_tensors.pop(0) + + output_tensor_grad = recv_backward(send_tensor_shapes, timers=timers) + + input_tensor_grad = \ + backward_step(optimizer, input_tensor, output_tensor, + output_tensor_grad, timers) + + send_backward(input_tensor_grad, recv_tensor_shapes, timers=timers) + + return forward_data_store diff --git a/multilinguality_megatron/megatron/static/index.html b/multilinguality_megatron/megatron/static/index.html new file mode 100644 index 0000000000000000000000000000000000000000..806287955bcc02e2d4148855af5ddb36ba94ae72 --- /dev/null +++ b/multilinguality_megatron/megatron/static/index.html @@ -0,0 +1,124 @@ + + + + + + + +Megatron + + + +
+

Prompt Megatron

+ + + + + +
+0 +/ 1000 +
+ +
+ + + + + diff --git a/multilinguality_megatron/megatron/text_generation/__init__.py b/multilinguality_megatron/megatron/text_generation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77da7be30ae4d02bd7ab1e4bae86afc8923d4e23 --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +from .api import ( + generate, + generate_and_post_process, + beam_search_and_post_process) diff --git a/multilinguality_megatron/megatron/text_generation/api.py b/multilinguality_megatron/megatron/text_generation/api.py new file mode 100644 index 0000000000000000000000000000000000000000..714f76c8cb74b087b6807570662ee2ac7660437d --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation/api.py @@ -0,0 +1,201 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Inference API.""" + + +import torch + +from megatron.core import mpu +from .communication import broadcast_float_list +from .generation import ( + generate_tokens_probs_and_return_on_first_stage, + score_and_return_on_first_stage, + beam_search_and_return_on_first_stage) +from .tokenization import ( + tokenize_prompts, + detokenize_generations) + + +def generate_and_post_process(model, + prompts=None, + tokens_to_generate=0, + return_output_log_probs=False, + top_k_sampling=0, + top_p_sampling=0.0, + top_p_decay=0.0, + top_p_bound=0.0, + temperature=1.0, + add_BOS=False, + use_eod_token_for_early_termination=True, + stop_on_double_eol=False, + stop_on_eol=False, + prevent_newline_after_colon=False, + random_seed=-1): + """Run inference and post-process outputs, i.e., detokenize, + move to cpu and convert to list.""" + + # Main inference. + tokens, lengths, output_log_probs = generate( + model, + prompts=prompts, + tokens_to_generate=tokens_to_generate, + return_output_log_probs=return_output_log_probs, + top_k_sampling=top_k_sampling, + top_p_sampling=top_p_sampling, + top_p_decay=top_p_decay, + top_p_bound=top_p_bound, + temperature=temperature, + add_BOS=add_BOS, + use_eod_token_for_early_termination=use_eod_token_for_early_termination, + stop_on_double_eol=stop_on_double_eol, + stop_on_eol=stop_on_eol, + prevent_newline_after_colon=prevent_newline_after_colon, + random_seed=random_seed) + + # Only post-process on first stage. + if mpu.is_pipeline_first_stage(): + tokens, prompts_plus_generations, prompts_plus_generations_segments = \ + detokenize_generations(tokens, lengths, True) + + if return_output_log_probs: + output_log_probs = output_log_probs.cpu().numpy().tolist() + for i, (prob, seg) in enumerate(zip(output_log_probs, prompts_plus_generations_segments)): + output_log_probs[i] = prob[:len(seg)-1] + + return prompts_plus_generations, prompts_plus_generations_segments, \ + output_log_probs, tokens + + return None + +def generate(model, + prompts=None, + tokens_to_generate=0, + return_output_log_probs=False, + top_k_sampling=0, + top_p_sampling=0.0, + top_p_decay=0.0, + top_p_bound=0.0, + temperature=1.0, + add_BOS=False, + use_eod_token_for_early_termination=True, + stop_on_double_eol=False, + stop_on_eol=False, + prevent_newline_after_colon=False, + random_seed=-1): + """Given prompts and input parameters, run inference and return: + tokens: prompts plus the generated tokens. + lengths: length of the prompt + generations. Note that we can + discard tokens in the tokens tensor that are after the + corresponding length. + output_log_probs: log probs of the tokens. + """ + + # Make sure input params are avaialble to all ranks. + values = [tokens_to_generate, + return_output_log_probs, + top_k_sampling, top_p_sampling, top_p_decay, top_p_bound, + temperature, add_BOS, use_eod_token_for_early_termination, + stop_on_double_eol, + stop_on_eol, + prevent_newline_after_colon, + random_seed] + values_float_tensor = broadcast_float_list(len(values), float_list=values) + tokens_to_generate = int(values_float_tensor[0].item()) + return_output_log_probs = bool(values_float_tensor[1].item()) + top_k_sampling = int(values_float_tensor[2].item()) + top_p_sampling = values_float_tensor[3].item() + top_p_decay = values_float_tensor[4].item() + top_p_bound = values_float_tensor[5].item() + temperature = values_float_tensor[6].item() + add_BOS = bool(values_float_tensor[7].item()) + use_eod_token_for_early_termination = bool(values_float_tensor[8].item()) + stop_on_double_eol = bool(values_float_tensor[9].item()) + stop_on_eol = bool(values_float_tensor[10].item()) + prevent_newline_after_colon = bool(values_float_tensor[11].item()) + random_seed = int(values_float_tensor[12].item()) + + if random_seed != -1: + torch.random.manual_seed(random_seed) + + # Tokenize prompts and get the batch. + # Note that these tensors are broadcaseted to all ranks. + if torch.distributed.get_rank() == 0: + assert prompts is not None + + context_tokens_tensor, context_length_tensor = tokenize_prompts( + prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS) + + if tokens_to_generate == 0: + return score_and_return_on_first_stage( + model, context_tokens_tensor, context_length_tensor) + + # Main inference function. + # Note that the outputs are available on the first stage. + return generate_tokens_probs_and_return_on_first_stage( + model, context_tokens_tensor, context_length_tensor, + return_output_log_probs=return_output_log_probs, + top_k=top_k_sampling, + top_p=top_p_sampling, + top_p_decay=top_p_decay, + top_p_bound=top_p_bound, + temperature=temperature, + use_eod_token_for_early_termination=use_eod_token_for_early_termination, + stop_on_double_eol=stop_on_double_eol, + stop_on_eol=stop_on_eol, + prevent_newline_after_colon=prevent_newline_after_colon) + +def beam_search_and_post_process(model, + prompts=None, + tokens_to_generate=0, + beam_size=0, + add_BOS=False, + stop_token=50256, + num_return_gen=1, + length_penalty=1, + prevent_newline_after_colon=False): + """Run beam search and post-process outputs, i.e., detokenize, + move to cpu and convert to list.""" + + # Main inference. + tokens, scores = beam_search(model, + prompts=prompts, + tokens_to_generate=tokens_to_generate, + beam_size=beam_size, + add_BOS=add_BOS, + stop_token=stop_token, + num_return_gen=num_return_gen, + length_penalty=length_penalty, + prevent_newline_after_colon=prevent_newline_after_colon) + # Only post-process on first stage. + if mpu.is_pipeline_first_stage(): + lengths = tokens.size(1)*torch.ones(beam_size, dtype=torch.int64, device=torch.cuda.current_device()) + tokens, prompts_plus_generations, prompts_plus_generations_segments = detokenize_generations(tokens, lengths, True) + scores = scores.cpu().numpy().tolist() + return prompts_plus_generations, prompts_plus_generations_segments, scores + + return None + +def beam_search(model, prompts=None, tokens_to_generate=0, beam_size=0, add_BOS=False, stop_token=50256, num_return_gen=1, length_penalty=1, prevent_newline_after_colon=False): + # Make sure input params are avaialble to all ranks. + values = [tokens_to_generate, + beam_size, + add_BOS, + stop_token, + num_return_gen, + length_penalty, + prevent_newline_after_colon] + values_float_tensor = broadcast_float_list(len(values), float_list=values) + tokens_to_generate = int(values_float_tensor[0].item()) + beam_size = int(values_float_tensor[1].item()) + add_BOS = bool(values_float_tensor[2].item()) + stop_token = int(values_float_tensor[3].item()) + num_return_gen = int(values_float_tensor[4].item()) + length_penalty = values_float_tensor[5].item() + prevent_newline_after_colon = values_float_tensor[6].item() + + context_tokens_tensor, context_length_tensor = tokenize_prompts( + prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS) + + return beam_search_and_return_on_first_stage(model, context_tokens_tensor, context_length_tensor, + beam_size, stop_token=stop_token, num_return_gen=num_return_gen, length_penalty=length_penalty, + prevent_newline_after_colon=prevent_newline_after_colon) diff --git a/multilinguality_megatron/megatron/text_generation/beam_utils.py b/multilinguality_megatron/megatron/text_generation/beam_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..911a64143a86c8521abd9741df22de528a82f692 --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation/beam_utils.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +## from huggingface beam search +class BeamHypotheses(object): + def __init__(self, num_beams, length_penalty=1.0, early_stopping=False): + """ + Initialize n-best list of hypotheses. + """ + self.length_penalty = length_penalty + self.early_stopping = early_stopping + self.num_beams = num_beams + self.beams = [] + self.worst_score = 1e9 + + def __len__(self): + """ + Number of hypotheses in the list. + """ + return len(self.beams) + + def add(self, hyp, sum_logprobs, length): + """ + Add a new hypothesis to the list. + """ + score = sum_logprobs / length ** self.length_penalty + if len(self) < self.num_beams or score > self.worst_score: + self.beams.append((score, hyp)) + if len(self) > self.num_beams: + sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)]) + del self.beams[sorted_scores[0][1]] + self.worst_score = sorted_scores[1][0] + else: + self.worst_score = min(score, self.worst_score) + + def is_done(self, best_sum_logprobs, cur_len): + """ + If there are enough hypotheses and that none of the hypotheses being generated + can become better than the worst one in the heap, then we are done with this sentence. + """ + + if len(self) < self.num_beams: + return False + elif self.early_stopping: + return True + else: + cur_score = best_sum_logprobs / cur_len ** self.length_penalty + ret = self.worst_score >= cur_score + return ret + diff --git a/multilinguality_megatron/megatron/text_generation/communication.py b/multilinguality_megatron/megatron/text_generation/communication.py new file mode 100644 index 0000000000000000000000000000000000000000..dee32077f34904f7585fab0f5180a5d014f7829f --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation/communication.py @@ -0,0 +1,185 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Communications utilities.""" + + +import torch + +from megatron.core import mpu + + + +# TODO: use functions from megatron/p2p +def recv_from_prev_pipeline_rank_(recv_buffer=None): + """Receive from previous pipeline stage and update the + input buffer inplace.""" + if not mpu.is_pipeline_first_stage(): + assert recv_buffer is not None + recv_prev_op = torch.distributed.P2POp( + torch.distributed.irecv, recv_buffer, + mpu.get_pipeline_model_parallel_prev_rank()) + reqs = torch.distributed.batch_isend_irecv([recv_prev_op]) + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv(). + torch.cuda.synchronize() + + + +# TODO: use functions from megatron/p2p +def send_to_next_pipeline_rank(tensor=None): + """Send output to the next pipeline stage.""" + if not mpu.is_pipeline_last_stage(): + assert tensor is not None + send_next_op = torch.distributed.P2POp( + torch.distributed.isend, tensor, + mpu.get_pipeline_model_parallel_next_rank()) + reqs = torch.distributed.batch_isend_irecv([send_next_op]) + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv(). + torch.cuda.synchronize() + + + +def _is_cuda(tensor): + """Check if a tensor is not none and is cuda.""" + assert tensor is not None + assert tensor.is_cuda + + + +def _is_cuda_contiguous(tensor): + """Check if a tensor is not none, is cuda, and is contiguous.""" + _is_cuda(tensor) + assert tensor.is_contiguous() + + + +def broadcast_from_last_pipeline_stage(size, dtype, tensor=None): + """Broadcast a tensor from last pipeline stage to all ranks.""" + + is_last_stage = mpu.is_pipeline_last_stage() + # If first stage and last state are the same, then there is no + # pipeline parallelism and no need to communicate. + if mpu.is_pipeline_first_stage() and is_last_stage: + return tensor + + if is_last_stage: + _is_cuda_contiguous(tensor) + else: + tensor = torch.empty(size, + dtype=dtype, + device=torch.cuda.current_device()) + # Get the group and corresponding source rank. + src = mpu.get_pipeline_model_parallel_last_rank() + group = mpu.get_pipeline_model_parallel_group() + torch.distributed.broadcast(tensor, src, group) + + return tensor + + + +def broadcast_from_last_to_first_pipeline_stage(size, dtype, tensor=None): + """Broadcast tensor values from last stage into the first stage.""" + + is_last_stage = mpu.is_pipeline_last_stage() + is_first_stage = mpu.is_pipeline_first_stage() + # If first stage and last state are the same, then there is no + # pipeline parallelism and no need to communicate. + if is_first_stage and is_last_stage: + return tensor + # Only first and last stage pipeline stages need to be involved. + if is_last_stage or is_first_stage: + if is_last_stage: + _is_cuda_contiguous(tensor) + else: + tensor = torch.empty(size, + dtype=dtype, + device=torch.cuda.current_device()) + src = mpu.get_pipeline_model_parallel_last_rank() + group = mpu.get_embedding_group() + # Broadcast from last stage into the first stage. + torch.distributed.broadcast(tensor, src, group) + else: + tensor = None + + return tensor + + + +def copy_from_last_to_first_pipeline_stage(size, dtype, tensor=None): + """Copy tensor values from last stage into the first stage. + Note that the input tensor is updated in place.""" + + is_last_stage = mpu.is_pipeline_last_stage() + is_first_stage = mpu.is_pipeline_first_stage() + # If first stage and last state are the same, then there is no + # pipeline parallelism and no need to communicate. + if is_first_stage and is_last_stage: + return + # Only first and last stage pipeline stages need to be involved. + if is_last_stage or is_first_stage: + _is_cuda(tensor) + is_contiguous = tensor.is_contiguous() + src = mpu.get_pipeline_model_parallel_last_rank() + group = mpu.get_embedding_group() + if is_contiguous: + tensor_ = tensor + else: + if is_last_stage: + tensor_ = tensor.contiguous() + else: + tensor_ = torch.empty(size, + dtype=dtype, + device=torch.cuda.current_device()) + # Broadcast from last stage into the first stage. + torch.distributed.broadcast(tensor_, src, group) + # Update the first stage tensor + if is_first_stage and not is_contiguous: + tensor[...] = tensor_ + + + +def broadcast_tensor(size, dtype, tensor=None, rank=0): + """ Given size and type of a tensor on all ranks and the tensor value + only on a specific rank, broadcast from that rank to all other ranks. + """ + + if torch.distributed.get_rank() == rank: + _is_cuda_contiguous(tensor) + else: + tensor = torch.empty(size, + dtype=dtype, + device=torch.cuda.current_device()) + + torch.distributed.broadcast(tensor, rank) + + return tensor + + + +def broadcast_list(size, dtype, list_values=None, rank=0): + """Broadcast a list of values with a given type.""" + + tensor = None + if torch.distributed.get_rank() == rank: + tensor = torch.tensor(list_values, dtype=dtype, + device=torch.cuda.current_device()) + + return broadcast_tensor(size, dtype, tensor=tensor, rank=rank) + + + +def broadcast_int_list(size, int_list=None, rank=0): + """Broadcast a list of interger values.""" + + return broadcast_list(size, torch.int64, list_values=int_list, rank=rank) + + + +def broadcast_float_list(size, float_list=None, rank=0): + """Broadcast a list of float values.""" + + return broadcast_list(size, torch.float32, list_values=float_list, + rank=rank) diff --git a/multilinguality_megatron/megatron/text_generation/forward_step.py b/multilinguality_megatron/megatron/text_generation/forward_step.py new file mode 100644 index 0000000000000000000000000000000000000000..7ccb80cf9e551f39e8818e3e135522b4eb6abde6 --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation/forward_step.py @@ -0,0 +1,204 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Forward step utilities.""" + +from collections.abc import Iterable + +import torch + +from megatron import get_args +from megatron.core import mpu +from .communication import ( + send_to_next_pipeline_rank, + recv_from_prev_pipeline_rank_) + + + +class InferenceParams: + """Inference parameters that are passed to the main model in order + to efficienly calculate and store the context during inference.""" + + def __init__(self, max_batch_size, max_sequence_len): + """Note that offsets are set to zero and we always set the + flag to allocate memory. After the first call, make sure to + set this flag to False.""" + self.max_sequence_len = max_sequence_len + self.max_batch_size = max_batch_size + self.sequence_len_offset = 0 + self.batch_size_offset = 0 + self.key_value_memory_dict = {} + + def swap_key_value_dict(self, batch_idx): + "swap between batches" + if len(self.key_value_memory_dict) == 0: + raise ValueError("should not swap when dict in empty") + + for layer_number in self.key_value_memory_dict.keys(): + inference_key_memory, inference_value_memory = self.key_value_memory_dict[layer_number] + assert len(batch_idx) == inference_key_memory.shape[1] ## make sure batch size is the same + new_inference_key_memory = inference_key_memory[:, batch_idx] + new_inference_value_memory = inference_value_memory[:, batch_idx] + self.key_value_memory_dict[layer_number] = ( + new_inference_key_memory, new_inference_value_memory) + +class ForwardStep: + """Forward step function with all the communications. + We use a class here to hide the inference parameters + from the outside caller.""" + + def __init__(self, model, max_batch_size, max_sequence_len): + """Set values so we don't need to do it multiple times.""" + # Make sure model is in eval mode. + assert not isinstance(model, Iterable), \ + 'interleaving schedule is not supported for inference' + model.eval() + self.model = model + # Initialize inference parameters. + self.inference_params = InferenceParams(max_batch_size, + max_sequence_len) + # Pipelining arguments. + args = get_args() + self.pipeline_size_larger_than_one = ( + args.pipeline_model_parallel_size > 1) + # Threshold of pipelining. + self.pipelining_batch_x_seqlen = \ + args.inference_batch_times_seqlen_threshold + + def __call__(self, tokens, position_ids, attention_mask): + """Invocation of the forward methods. Note that self.inference_params + is being modified by the forward step.""" + # Pipelining case. + if self.pipeline_size_larger_than_one: + current_batch_x_seqlen = tokens.size(0) * tokens.size(1) + if current_batch_x_seqlen >= self.pipelining_batch_x_seqlen: + micro_batch_size = \ + max(1, self.pipelining_batch_x_seqlen // tokens.size(1)) + return _with_pipelining_forward_step(self.model, + tokens, + position_ids, + attention_mask, + self.inference_params, + micro_batch_size) + + return _no_pipelining_forward_step(self.model, + tokens, + position_ids, + attention_mask, + self.inference_params) + + + +def _get_recv_buffer_dtype(args): + """Receive happens between the layers.""" + if args.fp32_residual_connection: + return torch.float + return args.params_dtype + + + +def _allocate_recv_buffer(batch_size, sequence_length): + """Receive happens between the layers with size [s, b, h].""" + if mpu.is_pipeline_first_stage(): + return None + args = get_args() + recv_size = (sequence_length, batch_size, args.hidden_size) + return torch.empty(recv_size, + dtype=_get_recv_buffer_dtype(args), + device=torch.cuda.current_device()) + + + +def _forward_step_helper(model, tokens, position_ids, attention_mask, + inference_params, recv_buffer=None): + """Single forward step. Update the allocate memory flag so + only the first time the memory is allocated.""" + batch_size = tokens.size(0) + sequence_length = tokens.size(1) + if recv_buffer is None: + recv_buffer = _allocate_recv_buffer(batch_size, sequence_length) + + # Receive from previous stage. + recv_from_prev_pipeline_rank_(recv_buffer) + + # Forward pass through the model. + model.set_input_tensor(recv_buffer) + output_tensor = model(tokens, position_ids, attention_mask, + inference_params=inference_params) + + # Send output to the next stage. + send_to_next_pipeline_rank(output_tensor) + + return output_tensor + + + +def _no_pipelining_forward_step(model, tokens, position_ids, attention_mask, + inference_params, recv_buffer=None): + """If recv_buffer is none, we will allocate one on the fly.""" + # Run a simple forward pass. + output_tensor = _forward_step_helper(model, tokens, position_ids, + attention_mask, inference_params, + recv_buffer=recv_buffer) + # Update the sequence length offset. + inference_params.sequence_len_offset += tokens.size(1) + + logits = None + if mpu.is_pipeline_last_stage(): + logits = output_tensor + + return logits + + + +def _with_pipelining_forward_step(model, tokens, position_ids, attention_mask, + inference_params, micro_batch_size): + """No interleaving is supported.""" + sequence_length = tokens.size(1) + batch_size = tokens.size(0) + + # Divide the batch dimension into micro batches. + num_micro_batches, last_chunk = divmod(batch_size, + micro_batch_size) + if last_chunk > 0: + num_micro_batches += 1 + + # Preallocate memory for output logits. + logits = None + if mpu.is_pipeline_last_stage(): + args = get_args() + logits = torch.empty( + (batch_size, sequence_length, args.padded_vocab_size), + dtype=torch.float32, device=torch.cuda.current_device()) + + # Preallocate recv buffer. + recv_buffer = _allocate_recv_buffer(micro_batch_size, sequence_length) + + for micro_batch_index in range(num_micro_batches): + # Slice among the batch dimenion. + start = micro_batch_index * micro_batch_size + end = min(start + micro_batch_size, batch_size) + this_micro_batch_size = end - start + tokens2use = tokens[start:end, ...] + position_ids2use = position_ids[start:end, ...] + + # Run a simple forward pass. + if this_micro_batch_size != micro_batch_size: + recv_buffer = None + output = _forward_step_helper(model, tokens2use, position_ids2use, + attention_mask, inference_params, + recv_buffer=recv_buffer) + + # Adjust the batch size offset to account for the micro-batch. + inference_params.batch_size_offset += this_micro_batch_size + + # Copy logits. + if mpu.is_pipeline_last_stage(): + logits[start:end, ...] = output + + # Once we are done with all the micro-batches, we can + # adjust the sequence length offset. + inference_params.sequence_len_offset += sequence_length + # and reset the batch size offset + inference_params.batch_size_offset = 0 + + return logits diff --git a/multilinguality_megatron/megatron/text_generation/generation.py b/multilinguality_megatron/megatron/text_generation/generation.py new file mode 100644 index 0000000000000000000000000000000000000000..a8dca2bd8f689c1877ea0ebaf18f375f964f077f --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation/generation.py @@ -0,0 +1,429 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Generation utilities.""" + +import torch +import torch.nn.functional as F + +from megatron import get_args, get_tokenizer +from megatron.core import mpu +from megatron.utils import get_ltor_masks_and_position_ids +from .communication import ( + copy_from_last_to_first_pipeline_stage, + broadcast_from_last_pipeline_stage, + broadcast_from_last_to_first_pipeline_stage) +from .forward_step import ForwardStep +from .sampling import sample +from .beam_utils import BeamHypotheses + + +def score_and_return_on_first_stage(model, tokens, lengths): + """Function for just scoring. + Arguments: + model: no interleaving is supported. + tokens: prompt tokens extended to be of size [b, max_prompt_length] + lengths: original prompt length, size: [b] + Note: Outside of model, other parameters only need to be available on + rank 0. + Outputs: + output_log_probs: log probability of the selected tokens. size: [b, s] + """ + + args = get_args() + + batch_size = tokens.size(0) + max_prompt_length = lengths.max().item() + assert max_prompt_length == tokens.size(1) + + if max_prompt_length > args.max_position_embeddings: + raise ValueError("Length of prompt + tokens_to_generate longer than allowed") + + if max_prompt_length * batch_size > args.max_tokens_to_oom: + raise ValueError("Too many tokens. " + str(max_prompt_length*batch_size)+ " is greater than "+str(args.max_tokens_to_oom)) + + # forward step. + forward_step = ForwardStep(model, batch_size, max_prompt_length) + + # =================== + # Pre-allocate memory + # =================== + + # Log probability of the sequence (prompt + generated tokens). + output_log_probs = None + output_log_probs_size = (batch_size, max_prompt_length - 1) + + if mpu.is_pipeline_last_stage(): + output_log_probs = torch.empty(output_log_probs_size, + dtype=torch.float32, + device=torch.cuda.current_device()) + + # ============= + # Run infernece + # ============= + with torch.no_grad(): + attention_mask, position_ids = _build_attention_mask_and_position_ids(tokens) + + # logits will be meanigful only in the last pipeline stage. + logits = forward_step(tokens, position_ids, attention_mask) + + if mpu.is_pipeline_last_stage(): + # Always the last stage should have an output. + assert logits is not None + log_probs = F.log_softmax(logits, dim=2) + + # Pick the tokens that we need to get the log + # probabilities for. Note that next input token is + # the token which we selected in the current logits, + # so shift by 1. + indices = torch.unsqueeze(tokens[:, 1:], 2) + output_log_probs = torch.gather(log_probs, 2, indices).squeeze(2) + + # ====================================== + # Broadcast to the first pipeline stage. + # ====================================== + output_log_probs = broadcast_from_last_to_first_pipeline_stage( + output_log_probs_size, torch.float32, output_log_probs) + + return tokens, lengths, output_log_probs + +def generate_tokens_probs_and_return_on_first_stage( + model, tokens, lengths, + return_output_log_probs=False, + top_k=0, top_p=0.0, top_p_decay=0.0, top_p_bound=0.0, + temperature=1.0, + use_eod_token_for_early_termination=True, + stop_on_double_eol=False, + stop_on_eol=False, + prevent_newline_after_colon=True + ): + """Main token generation function. + Arguments: + model: no interleaving is supported. + tokens: prompt tokens extended to be of size [b, max-sequence-length] + lengths: original prompt length, size: [b] + return_output_log_probs: flag to calculate the log probability of + the generated tokens. Note that the log probability is the one + from the original logit. + top_k, top_p: top-k and top-p sampling parameters. + Note that top-k = 1 is gready. Also, these paramters are + exclusive meaning that: + if top-k > 0 then we expect top-p=0. + if top-p > 0 then we check for top-k=0. + temperature: sampling temperature. + use_eod_token_for_early_termination: if True, do early termination if + all the sequences have reached this token. + prevent_newline_after_colon: if True, it will disable generating new line \n after : + Note: Outside of model, other parameters only need to be available on + rank 0. + Outputs: Note that is size is adjusted to a lower value than + max-sequence-length if generation is terminated early. + tokens: prompt and generated tokens. size: [b, :] + generated_sequence_lengths: total length (including prompt) of + the generated sequence. size: [b] + output_log_probs: log probability of the selected tokens. size: [b, s] + """ + + args = get_args() + tokenizer = get_tokenizer() + + batch_size = tokens.size(0) + min_prompt_length = lengths.min().item() + max_sequence_length = tokens.size(1) + + if max_sequence_length > args.max_position_embeddings: + raise ValueError("Length of prompt + tokens_to_generate longer than allowed") + + if max_sequence_length * batch_size > args.max_tokens_to_oom: + raise ValueError("Too many tokens. " + str(max_sequence_length*batch_size)+ " is greater than "+str(args.max_tokens_to_oom)) + + # forward step. + forward_step = ForwardStep(model, batch_size, max_sequence_length) + + # Added termination_id to support the case that we want to terminate the + # generation once that id is generated. + if hasattr(args, 'eos_id'): + termination_id = args.eos_id + else: + termination_id = tokenizer.eod + + # =================== + # Pre-allocate memory + # =================== + + # Log probability of the sequence (prompt + generated tokens). + output_log_probs = None + output_log_probs_size = (batch_size, max_sequence_length - 1) + # Lengths of generated seuquence including including prompts. + generated_sequence_lengths = None + if mpu.is_pipeline_last_stage(): + if return_output_log_probs: + output_log_probs = torch.empty(output_log_probs_size, + dtype=torch.float32, + device=torch.cuda.current_device()) + generated_sequence_lengths = torch.ones( + batch_size, dtype=torch.int64, + device=torch.cuda.current_device()) * max_sequence_length + + # Whether we have reached a termination id. + is_generation_done = torch.zeros(batch_size, dtype=torch.uint8, + device=torch.cuda.current_device()) + + # ============= + # Run infernece + # ============= + + with torch.no_grad(): + attention_mask, position_ids = _build_attention_mask_and_position_ids( + tokens) + prev_context_length = 0 + for context_length in range(min_prompt_length, max_sequence_length): + + # Pick the slice that we need to pass through the network. + tokens2use = tokens[:, prev_context_length:context_length] + positions2use = position_ids[:, prev_context_length:context_length] + attention_mask2use = attention_mask[ + ..., prev_context_length:context_length, :context_length] + + # logits will be meanigful only in the last pipeline stage. + logits = forward_step(tokens2use, positions2use, attention_mask2use) + + if mpu.is_pipeline_last_stage(): + if prevent_newline_after_colon: + logits[tokens2use[:, -1] == tokenizer.tokenize(':')[0], -1, tokenizer.tokenize('\n')[0]] = -1e10 # disable "\n" after ":" + # Always the last stage should have an output. + assert logits is not None + + # Sample. + last_token_logits = logits[:, -1, :] + new_sample = sample(last_token_logits, + top_k=top_k, + top_p=top_p, + temperature=temperature, + vocab_size=tokenizer.vocab_size) + if top_p > 0.0 and top_p_decay > 0.0: + top_p = top_p * top_p_decay + if top_p_bound > 0.0: + top_p = max(top_p, top_p_bound) + + # If a prompt length is smaller or equal th current context + # length, it means we have started generating tokens + started = lengths <= context_length + # Update the tokens. + tokens[started, context_length] = new_sample[started] + + # Calculate the log probabilities. + if return_output_log_probs: + log_probs = F.log_softmax(logits, dim=2) + if return_output_log_probs: + # Pick the tokens that we need to get the log + # probabilities for. Note that next input token is + # the token which we selected in the current logits, + # so shift by 1. + indices = torch.unsqueeze( + tokens[ + :, + (prev_context_length + 1):(context_length + 1)], + 2) + output_log_probs[:, + prev_context_length:context_length] = \ + torch.gather(log_probs, 2, indices).squeeze(2) + + # Update the tokens on the first stage so the next input to + # the network is correct. + copy_from_last_to_first_pipeline_stage(batch_size, torch.int64, + tokens[:, context_length]) + + # Update the context length for the next token generation. + prev_context_length = context_length + + # Check if all the sequences have hit the termination_id. + done = None + if mpu.is_pipeline_last_stage(): + # TODO(rprenger) These stopping methods are tokenizer dependent + # instead tokenization should be in the inference loop so stop sequences can be used + if stop_on_double_eol: + hit_double_eol = (new_sample == 628).byte() & started.byte() + hit_two_eols = (new_sample == 198).byte() & (tokens[:, context_length-1] == 198).byte() & started.byte() + done_token = hit_double_eol | hit_two_eols + elif stop_on_eol: + hit_double_eol = (new_sample == 628).byte() & started.byte() + hit_eol = (new_sample == 198).byte() & started.byte() + done_token = hit_double_eol | hit_eol + else: + done_token = (new_sample == termination_id).byte() & \ + started.byte() + + just_finished = (done_token & ~is_generation_done).bool() + generated_sequence_lengths[just_finished.view(-1)] = \ + context_length + 1 + is_generation_done = is_generation_done | done_token + done = torch.all(is_generation_done) + done = broadcast_from_last_pipeline_stage(1, torch.uint8, + tensor=done) + if use_eod_token_for_early_termination and done: + break + + # =================================================== + # Update the length of based on max generated length. + # =================================================== + + tokens = tokens[:, :(context_length + 1)] + if mpu.is_pipeline_last_stage(): + if return_output_log_probs: + output_log_probs = output_log_probs[:, :context_length] + + # ====================================== + # Broadcast to the first pipeline stage. + # ====================================== + + generated_sequence_lengths = broadcast_from_last_to_first_pipeline_stage( + batch_size, torch.int64, generated_sequence_lengths) + if return_output_log_probs: + output_log_probs_size = (batch_size, context_length) + output_log_probs = broadcast_from_last_to_first_pipeline_stage( + output_log_probs_size, torch.float32, output_log_probs) + + return tokens, generated_sequence_lengths, output_log_probs + +def beam_search_and_return_on_first_stage(model, tokens, lengths, beam_size, stop_token, num_return_gen, length_penalty, prevent_newline_after_colon=True): + args = get_args() + tokenizer = get_tokenizer() + + batch_size = tokens.size(0) + assert(batch_size == 1) + prompt_length = lengths.item() + final_sequence_length = tokens.size(1) + final_sequence_length = min(final_sequence_length, args.max_position_embeddings) + + # If the context is too big, this happens + if prompt_length >= final_sequence_length: + raise ValueError("context length + tokens_to_generate too large") + + # forward step. + forward_step = ForwardStep(model, beam_size, final_sequence_length) + + beam_hyp = BeamHypotheses(beam_size, length_penalty) + best_batches = None + done = torch.zeros(1, dtype=torch.uint8, device=torch.cuda.current_device()) + scores = torch.zeros(beam_size, + dtype=torch.float32, + device=torch.cuda.current_device()).unsqueeze(1) + scores_size_tensor, tokens_size_tensor = None, None + # ============= + # Run infernece + # ============= + with torch.no_grad(): + tokens = tokens.repeat(beam_size, 1) + attention_mask, position_ids = _build_attention_mask_and_position_ids(tokens) + prev_context_length = 0 + for context_length in range(prompt_length, final_sequence_length): + + # Pick the slice that we need to pass through the network. + tokens2use = tokens[:, prev_context_length:context_length] + positions2use = position_ids[:, prev_context_length:context_length] + attention_mask2use = attention_mask[ + ..., prev_context_length:context_length, :context_length] + + # logits will be meanigful only in the last pipeline stage. + logits = forward_step(tokens2use, positions2use, attention_mask2use) + + if mpu.is_pipeline_last_stage(): + if prevent_newline_after_colon: + logits[tokens2use[:, -1] == tokenizer.tokenize(':')[0], -1, tokenizer.tokenize('\n')[0]] = -1e10 # disable "\n" after ":" + vocab_size = logits.size(2) + log_probs = F.log_softmax(logits, dim=2) + new_scores = log_probs[:, -1, :] + scores + + if context_length == prompt_length: # if this is the first one + sorted_scores, indices = torch.sort(new_scores[0,:], descending=True) + else: + sorted_scores, indices = torch.sort(new_scores.view(-1), descending=True) + + best_beam_ids = torch.div(indices[: 2 * beam_size], vocab_size).trunc().long() + best_words = indices[:2 * beam_size] % vocab_size + best_scores = sorted_scores[: 2 * beam_size] + + next_beams = [] + for beam_token_rank, (token_id, beam_score, beam_id) in enumerate( + zip(best_words, best_scores, best_beam_ids) + ): + if token_id.item() == stop_token: + # if beam_token does not belong to top num_beams tokens, it should not be added + is_beam_token_worse_than_top_num_beams = beam_token_rank >= beam_size + if is_beam_token_worse_than_top_num_beams: + continue + beam_hyp.add( + tokens[beam_id].clone(), + beam_score, + context_length + 1 - prompt_length + ) + else: + # add next predicted token since it is not eos_token + next_beams.append((token_id, beam_score, beam_id)) + + if len(next_beams) == beam_size: + break + + if beam_hyp.is_done(best_scores.max().item(), context_length + 1 - prompt_length): + done = torch.ones(1, dtype=torch.uint8, device=torch.cuda.current_device()) + + best_batches = tokens.new([item[2] for item in next_beams]) + tokens = tokens[best_batches,:] + tokens[:, context_length] = tokens.new([item[0] for item in next_beams]) + scores = scores.new([item[1] for item in next_beams]).unsqueeze(1) + + # torch.distributed.barrier() + done = broadcast_from_last_pipeline_stage(1, torch.uint8, done) + if done: + break + + # Update the tokens on the first stage so the next input to + # the network is correct. + copy_from_last_to_first_pipeline_stage(tokens.size(), torch.int64, + tokens) + + # set inference key values to make it consistent with best beam index + best_batches = broadcast_from_last_pipeline_stage(beam_size, torch.int64, best_batches) + forward_step.inference_params.swap_key_value_dict(best_batches) + + # Update the context length for the next token generation. + prev_context_length = context_length + + if mpu.is_pipeline_last_stage(): + # if cannot find stop token, add open beams to hyps + if not done: + for beam_id in range(beam_size): + beam_hyp.add(tokens[beam_id].clone(), scores[beam_id].squeeze(), context_length + 1 - prompt_length) + + # rank based on scores + sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0], reverse=True) + num_return_gen = min(num_return_gen, len(sorted_hyps)) + scores = [sorted_hyps[i][0] for i in range(num_return_gen)] + tokens = [sorted_hyps[i][1] for i in range(num_return_gen)] + scores = torch.stack(scores, dim=0) + tokens = torch.stack(tokens, dim=0) + scores_size_tensor = torch.tensor(scores.shape, dtype=torch.int64, device=torch.cuda.current_device()) + tokens_size_tensor = torch.tensor(tokens.shape, dtype=torch.int64, device=torch.cuda.current_device()) + + scores_size_tensor = broadcast_from_last_pipeline_stage(1, torch.int64, scores_size_tensor) + tokens_size_tensor = broadcast_from_last_pipeline_stage(2, torch.int64, tokens_size_tensor) + + scores = broadcast_from_last_to_first_pipeline_stage(tuple(scores_size_tensor), torch.float32, scores) + tokens = broadcast_from_last_to_first_pipeline_stage(tuple(tokens_size_tensor), torch.int64, tokens) + + return tokens, scores + + +def _build_attention_mask_and_position_ids(tokens): + """Build the attention mask and postition ids for the input tokens.""" + + # Since we are not interested in loss-mask and reset attention/position + # is also False, eod_token is not used so it is safe to set it to None. + attention_mask, _, position_ids = get_ltor_masks_and_position_ids( + data=tokens, + eod_token=None, + reset_position_ids=False, + reset_attention_mask=False, + eod_mask_loss=False) + + return attention_mask, position_ids diff --git a/multilinguality_megatron/megatron/text_generation/sampling.py b/multilinguality_megatron/megatron/text_generation/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..370773a36c087d01e75731e38724cfb35d4acd74 --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation/sampling.py @@ -0,0 +1,93 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Sampling utilities. +Part of this code is inspired by: + - https://github.com/ari-holtzman/degen/blob/master/gen.py + - https://huggingface.co/transformers/_modules/transformers/generation_logits_process.html +""" + + +import torch + + + +def modify_logits_for_top_k_filtering(logits, top_k): + """Set the logits for none top-k values to -inf.""" + + filter_ = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits.masked_fill_(filter_, float('-Inf')) + + + +def modify_logits_for_top_p_filtering(logits, top_p): + """Set the logits for none top-p values to -inf.""" + + # First sort and calculate cumulative sum of probabilities. + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + + # Filteration based on the cumulative sum. + filter_ = cumulative_probs > top_p + # This shift by 1 is weird and I cannot justify it. This existed + # in the original implementation: + # https://github.com/ari-holtzman/degen/blob/master/gen.py + # and I guess it is needed so keeping it for now. + filter_[:, 1:] = filter_[:, :-1].clone() + # Make sure we at least have one token to select from. + filter_[..., 0] = 0 + + # Fill in the filtered part + filter_ = filter_.scatter(1, sorted_indices, filter_) + logits.masked_fill_(filter_, float('-Inf')) + + + +def sample(logits, top_k=0, top_p=0.0, temperature=1.0, vocab_size=None): + """ Sample and generate a token. + Note: logits has the dimension [b, v] where b is the batch size + and v is the vocabulary size. + If vocab_size is provided, we will make sure the sample that is + generated is in [0, vocab-size). This will avoid out of vocabulary + generations due to padding. + """ + + # Check logits for consistency. + assert logits.ndim == 2, 'expected the logits to be of [b, v] shape.' + assert logits.type() == 'torch.cuda.FloatTensor', \ + 'input logits should be floats.' + + + # Greedy is just simple argmax. + if top_k == 1: + assert top_p == 0.0, 'cannot set both greedy and top-p samplings.' + samples = torch.argmax(logits, dim=-1) + + # Top-k or top-p sampling. + else: + # Clone so we do not modify the inputs, + logits = logits.clone() + # Apply temperature in place. + if temperature != 1.0: + logits.div_(temperature) + + if top_k > 1: + assert top_p == 0.0, 'cannot set both top-k and top-p samplings.' + assert top_k <= logits.size(1), 'top-k is larger than logit size.' + if vocab_size: + assert top_k < vocab_size, 'top-k is larger than vocab size.' + modify_logits_for_top_k_filtering(logits, top_k) + + elif top_p > 0.0: + assert top_p <= 1.0, 'top-p should be in (0, 1].' + modify_logits_for_top_p_filtering(logits, top_p) + + # After filtering, we need to recalculate the distribution. + probs = logits.softmax(dim=-1) + samples = torch.multinomial(probs, num_samples=1).view(-1) + + # If vocab size is provided, make sure the samples are in + # in the range [0, vocab-size). + if vocab_size: + samples = torch.clamp(samples, min=0, max=(vocab_size - 1)) + + return samples diff --git a/multilinguality_megatron/megatron/text_generation/tokenization.py b/multilinguality_megatron/megatron/text_generation/tokenization.py new file mode 100644 index 0000000000000000000000000000000000000000..157967523a6fc7bccd328a84e0e69444517a60c4 --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation/tokenization.py @@ -0,0 +1,118 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Tokenization utilities.""" + + +import torch + + +from megatron import get_tokenizer +from .communication import broadcast_int_list, broadcast_tensor + + +def detokenize_generations(tokens_gpu_tensor, + lengths_gpu_tensor, + return_segments): + """Detokenize the generated tokens.""" + + tokenizer = get_tokenizer() + + prompts_plus_generations = [] + if return_segments: + prompts_plus_generations_segments = [] + + tokens = tokens_gpu_tensor.cpu().numpy().tolist() + lengths = lengths_gpu_tensor.cpu().numpy().tolist() + for sequence_tokens, length in zip(tokens, lengths): + sequence_tokens = sequence_tokens[:length] + prompts_plus_generations.append( + tokenizer.detokenize(sequence_tokens)) + if return_segments: + words = [] + for token in sequence_tokens: + word = tokenizer.tokenizer.decoder[token] + word = bytearray( + [tokenizer.tokenizer.byte_decoder[c] for c in word]).decode( + 'utf-8', errors='replace') + words.append(word) + prompts_plus_generations_segments.append(words) + + if return_segments: + return tokens, prompts_plus_generations, \ + prompts_plus_generations_segments + + return tokens, prompts_plus_generations + + +def tokenize_prompts(prompts=None, tokens_to_generate=None, + add_BOS=None, rank=0): + """Tokenize prompts and make them avaiable on all ranks.""" + + # On all ranks set to None so we can pass them to functions + sizes_list = None + prompts_tokens_cuda_long_tensor = None + prompts_length_cuda_long_tensor = None + + # On the specified rank, build the above. + if torch.distributed.get_rank() == rank: + assert prompts is not None + assert tokens_to_generate is not None + # Tensor of tokens padded and their unpadded length. + prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor = \ + _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS) + # We need the sizes of these tensors for the boradcast + sizes_list = [prompts_tokens_cuda_long_tensor.size(0), # Batch size + prompts_tokens_cuda_long_tensor.size(1)] # Sequence lenght + + # First, broadcast the sizes. + sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=rank) + + # Now that we have the sizes, we can boradcast the tokens + # and length tensors. + sizes = sizes_tensor.tolist() + prompts_tokens_cuda_long_tensor = broadcast_tensor( + sizes, torch.int64, tensor=prompts_tokens_cuda_long_tensor, rank=rank) + prompts_length_cuda_long_tensor = broadcast_tensor( + sizes[0], torch.int64, tensor=prompts_length_cuda_long_tensor, + rank=rank) + + return prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor + + +def _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS): + """Given a set of prompts and number of tokens to generate: + - tokenize prompts + - set the sequence length to be the max of length of prompts + plus the number of tokens we would like to generate + - pad all the sequences to this length so we can convert them + into a 2D tensor. + """ + + # Tokenize all the prompts. + tokenizer = get_tokenizer() + if add_BOS: + prompts_tokens = [[tokenizer.eod] + tokenizer.tokenize(prompt) + for prompt in prompts] + else: + prompts_tokens = [tokenizer.tokenize(prompt) for prompt in prompts] + + # Now we have a list of list of tokens which each list has a different + # size. We want to extend this list to: + # - incorporate the tokens that need to be generated + # - make all the sequences equal length. + # Get the prompts length. + prompts_length = [len(prompt_tokens) for prompt_tokens in prompts_tokens] + # Get the max prompts length. + max_prompt_len = max(prompts_length) + # Number of tokens in the each sample of the batch. + samples_length = max_prompt_len + tokens_to_generate + # Now update the list of list to be of the same size: samples_length. + for prompt_tokens, prompt_length in zip(prompts_tokens, prompts_length): + padding_size = samples_length - prompt_length + prompt_tokens.extend([tokenizer.eod] * padding_size) + + # Now we are in a structured format, we can convert to tensors. + prompts_tokens_tensor = torch.cuda.LongTensor(prompts_tokens) + prompts_length_tensor = torch.cuda.LongTensor(prompts_length) + + return prompts_tokens_tensor, prompts_length_tensor diff --git a/multilinguality_megatron/megatron/text_generation_server.py b/multilinguality_megatron/megatron/text_generation_server.py new file mode 100644 index 0000000000000000000000000000000000000000..58550f2e631e5df5116e2211042e246663bc428e --- /dev/null +++ b/multilinguality_megatron/megatron/text_generation_server.py @@ -0,0 +1,241 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +import datetime +import torch +import json +import threading +from flask import Flask, request, jsonify, current_app +from flask_restful import Resource, Api +from megatron import get_args +from megatron.text_generation import generate_and_post_process +from megatron.text_generation import beam_search_and_post_process + + +GENERATE_NUM = 0 +BEAM_NUM = 1 +lock = threading.Lock() + +class MegatronGenerate(Resource): + def __init__(self, model): + self.model = model + + @staticmethod + def send_do_generate(): + choice = torch.cuda.LongTensor([GENERATE_NUM]) + torch.distributed.broadcast(choice, 0) + + @staticmethod + def send_do_beam_search(): + choice = torch.cuda.LongTensor([BEAM_NUM]) + torch.distributed.broadcast(choice, 0) + + def put(self): + args = get_args() + + if not "prompts" in request.get_json(): + return "prompts argument required", 400 + + if "max_len" in request.get_json(): + return "max_len is no longer used. Replace with tokens_to_generate", 400 + + if "sentences" in request.get_json(): + return "sentences is no longer used. Replace with prompts", 400 + + prompts = request.get_json()["prompts"] + if not isinstance(prompts, list): + return "prompts is not a list of strings", 400 + + if len(prompts) == 0: + return "prompts is empty", 400 + + if len(prompts) > 128: + return "Maximum number of prompts is 128", 400 + + tokens_to_generate = 64 # Choosing hopefully sane default. Full sequence is slow + if "tokens_to_generate" in request.get_json(): + tokens_to_generate = request.get_json()["tokens_to_generate"] + if not isinstance(tokens_to_generate, int): + return "tokens_to_generate must be an integer greater than 0" + if tokens_to_generate < 0: + return "tokens_to_generate must be an integer greater than or equal to 0" + + logprobs = False + if "logprobs" in request.get_json(): + logprobs = request.get_json()["logprobs"] + if not isinstance(logprobs, bool): + return "logprobs must be a boolean value" + + if tokens_to_generate == 0 and not logprobs: + return "tokens_to_generate=0 implies logprobs should be True" + + temperature = 1.0 + if "temperature" in request.get_json(): + temperature = request.get_json()["temperature"] + if not (type(temperature) == int or type(temperature) == float): + return "temperature must be a positive number less than or equal to 100.0" + if not (0.0 < temperature <= 100.0): + return "temperature must be a positive number less than or equal to 100.0" + + top_k = 0.0 + if "top_k" in request.get_json(): + top_k = request.get_json()["top_k"] + if not (type(top_k) == int): + return "top_k must be an integer equal to or greater than 0 and less than or equal to 1000" + if not (0 <= top_k <= 1000): + return "top_k must be equal to or greater than 0 and less than or equal to 1000" + + top_p = 0.0 + if "top_p" in request.get_json(): + top_p = request.get_json()["top_p"] + if not (type(top_p) == float): + return "top_p must be a positive float less than or equal to 1.0" + if top_p > 0.0 and top_k > 0.0: + return "cannot set both top-k and top-p samplings." + if not (0 <= top_p <= 1.0): + return "top_p must be less than or equal to 1.0" + + top_p_decay = 0.0 + if "top_p_decay" in request.get_json(): + top_p_decay = request.get_json()["top_p_decay"] + if not (type(top_p_decay) == float): + return "top_p_decay must be a positive float less than or equal to 1.0" + if top_p == 0.0: + return "top_p_decay cannot be set without top_p" + if not (0 <= top_p_decay <= 1.0): + return "top_p_decay must be less than or equal to 1.0" + + top_p_bound = 0.0 + if "top_p_bound" in request.get_json(): + top_p_bound = request.get_json()["top_p_bound"] + if not (type(top_p_bound) == float): + return "top_p_bound must be a positive float less than or equal to top_p" + if top_p == 0.0: + return "top_p_bound cannot be set without top_p" + if not (0.0 < top_p_bound <= top_p): + return "top_p_bound must be greater than 0 and less than top_p" + + add_BOS = False + if "add_BOS" in request.get_json(): + add_BOS = request.get_json()["add_BOS"] + if not isinstance(add_BOS, bool): + return "add_BOS must be a boolean value" + + if any([len(prompt) == 0 for prompt in prompts]) and not add_BOS: + return "Empty prompts require add_BOS=true" + + stop_on_double_eol = False + if "stop_on_double_eol" in request.get_json(): + stop_on_double_eol = request.get_json()["stop_on_double_eol"] + if not isinstance(stop_on_double_eol, bool): + return "stop_on_double_eol must be a boolean value" + + stop_on_eol = False + if "stop_on_eol" in request.get_json(): + stop_on_eol = request.get_json()["stop_on_eol"] + if not isinstance(stop_on_eol, bool): + return "stop_on_eol must be a boolean value" + + prevent_newline_after_colon = False + if "prevent_newline_after_colon" in request.get_json(): + prevent_newline_after_colon = request.get_json()["prevent_newline_after_colon"] + if not isinstance(prevent_newline_after_colon, bool): + return "prevent_newline_after_colon must be a boolean value" + + random_seed = -1 + if "random_seed" in request.get_json(): + random_seed = request.get_json()["random_seed"] + if not isinstance(random_seed, int): + return "random_seed must be integer" + if random_seed < 0: + return "random_seed must be a positive integer" + + no_log = False + if "no_log" in request.get_json(): + no_log = request.get_json()["no_log"] + if not isinstance(no_log, bool): + return "no_log must be a boolean value" + + beam_width = None + if "beam_width" in request.get_json(): + beam_width = request.get_json()["beam_width"] + if not isinstance(beam_width, int): + return "beam_width must be integer" + if beam_width < 1: + return "beam_width must be an integer > 1" + if len(prompts) > 1: + return "When doing beam_search, batch size must be 1" + + stop_token=50256 + if "stop_token" in request.get_json(): + stop_token = request.get_json()["stop_token"] + if not isinstance(stop_token, int): + return "stop_token must be an integer" + + length_penalty = 1 + if "length_penalty" in request.get_json(): + length_penalty = request.get_json()["length_penalty"] + if not isinstance(length_penalty, float): + return "length_penalty must be a float" + + with lock: # Need to get lock to keep multiple threads from hitting code + + if not no_log: + print("request IP: " + str(request.remote_addr)) + print(json.dumps(request.get_json()),flush=True) + print("start time: ", datetime.datetime.now()) + + try: + if beam_width is not None: + MegatronGenerate.send_do_beam_search() # Tell other ranks we're doing beam_search + response, response_seg, response_scores = \ + beam_search_and_post_process( + self.model, + prompts=prompts, + tokens_to_generate=tokens_to_generate, + beam_size = beam_width, + add_BOS=add_BOS, + stop_token=stop_token, + num_return_gen=beam_width, # Returning whole beam + length_penalty=length_penalty, + prevent_newline_after_colon=prevent_newline_after_colon + ) + + return jsonify({"text": response, + "segments": response_seg, + "scores": response_scores}) + else: + MegatronGenerate.send_do_generate() # Tell other ranks we're doing generate + response, response_seg, response_logprobs, _ = \ + generate_and_post_process( + self.model, + prompts=prompts, + tokens_to_generate=tokens_to_generate, + return_output_log_probs=logprobs, + top_k_sampling=top_k, + top_p_sampling=top_p, + top_p_decay=top_p_decay, + top_p_bound=top_p_bound, + temperature=temperature, + add_BOS=add_BOS, + use_eod_token_for_early_termination=True, + stop_on_double_eol=stop_on_double_eol, + stop_on_eol=stop_on_eol, + prevent_newline_after_colon=prevent_newline_after_colon, + random_seed=random_seed) + + return jsonify({"text": response, + "segments": response_seg, + "logprobs": response_logprobs}) + + except ValueError as ve: + return ve.args[0] + print("end time: ", datetime.datetime.now()) + + +class MegatronServer(object): + def __init__(self, model): + self.app = Flask(__name__, static_url_path='') + api = Api(self.app) + api.add_resource(MegatronGenerate, '/api', resource_class_args=[model]) + + def run(self, url): + self.app.run(url, threaded=True, debug=False) diff --git a/multilinguality_megatron/megatron/timers.py b/multilinguality_megatron/megatron/timers.py new file mode 100644 index 0000000000000000000000000000000000000000..a9478fa014b3a01dd514f74005a4b86294328dc2 --- /dev/null +++ b/multilinguality_megatron/megatron/timers.py @@ -0,0 +1,304 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron timers.""" + +from abc import ABC +from abc import abstractmethod +import time + +import torch + + + +class TimerBase(ABC): + + def __init__(self, name): + self.name = name + + @abstractmethod + def start(self, barrier=False): + pass + + @abstractmethod + def stop(self, barrier=False): + pass + + @abstractmethod + def reset(self): + pass + + @abstractmethod + def elapsed(self, reset=True, barrier=False): + pass + + + +class DummyTimer(TimerBase): + + def __init__(self): + super().__init__('dummy timer') + + def start(self, barrier=False): + return + + def stop(self, barrier=False): + return + + def reset(self): + return + + def elapsed(self, reset=True, barrier=False): + raise Exception('dummy timer should not be used to ' + 'calculate elapsed time') + + + +class Timer(TimerBase): + """ + Comment on using `barrier`: If this flag is passed, then all + the caller processes will wait till all reach the timing routine. + It is up to the user to make sure all the ranks in `barrier_group` + call it otherwise, it will result in a hang. + Comment on `barrier_group`: By default it is set to None which + in torch distributed land, it will result in the global communicator. + """ + + def __init__(self, name): + super().__init__(name) + self._elapsed = 0.0 + self._started = False + # Note that None will default to the global process group + self._barrier_group = None + self._start_time = time.time() + + + def set_barrier_group(self, barrier_group): + self._barrier_group = barrier_group + + + def start(self, barrier=False): + """Start the timer.""" + assert not self._started, 'timer has already been started' + if barrier: + torch.distributed.barrier(group=self._barrier_group) + torch.cuda.synchronize() + self._start_time = time.time() + self._started = True + + + def stop(self, barrier=False): + """Stop the timer.""" + assert self._started, 'timer is not started' + if barrier: + torch.distributed.barrier(group=self._barrier_group) + torch.cuda.synchronize() + self._elapsed += (time.time() - self._start_time) + self._started = False + + + def reset(self): + """Reset timer.""" + self._elapsed = 0.0 + self._started = False + + + def elapsed(self, reset=True, barrier=False): + """Calculate the elapsed time.""" + _started = self._started + # If the timing in progress, end it first. + if self._started: + self.stop(barrier=barrier) + # Get the elapsed time. + _elapsed = self._elapsed + # Reset the elapsed time + if reset: + self.reset() + # If timing was in progress, set it back. + if _started: + self.start(barrier=barrier) + return _elapsed + + + +class Timers: + """Group of timers.""" + + def __init__(self, log_level, log_option): + self._log_level = log_level + self._log_option = log_option + self._timers = {} + self._log_levels = {} + self._dummy_timer = DummyTimer() + self._max_log_level = 2 + + + def __call__(self, name, log_level=None): + # If the timer has already been set, then check if the log-level + # is provided, it matches the one that the timer was created with. + if name in self._timers: + if log_level is not None: + assert log_level == self._log_levels[name], \ + 'input log level {} does not match already existing '\ + 'log level {} for {} timer'.format( + log_level, self._log_levels[name], name) + return self._timers[name] + # If timer does not exist and no log level is provided, + # set it to the max log level which is 2. + if log_level is None: + log_level = self._max_log_level + assert log_level <= self._max_log_level, \ + 'log level {} is larger than max supported log level {}'.format( + log_level, self._max_log_level) + # Now if the input log level is larger than the one set for + # the timers class, just ignore it and return a dummy timer. + if log_level > self._log_level: + return self._dummy_timer + # Otherwise, initalize the timer and set the level. + self._timers[name] = Timer(name) + self._log_levels[name] = log_level + return self._timers[name] + + + def _get_elapsed_time_all_ranks(self, names, reset, barrier): + """ + Assumptions: + - All the ranks call this function. + - `names` are identical on all ranks. + If the above assumptions are not met, calling this function will + result in hang. + Arguments: + - names: list of timer names + - reset: reset the timer after recording the elapsed time + - barrier: if set, do a global barrier before time measurments + """ + + # First make sure all the callers are in sync. + if barrier: + torch.distributed.barrier() + + world_size = torch.distributed.get_world_size() + rank = torch.distributed.get_rank() + + # Here we can use gather on the rank we want to print the + # timing, however, there is no gather_base support in + # pytorch yet. It is simpler to deal with a single tensor + # and since we are only gathering a small amount of data, + # it should be ok to use all-gather instead of gather. + rank_name_to_time = torch.zeros((world_size, len(names)), + dtype=torch.float, + device=torch.cuda.current_device()) + for i, name in enumerate(names): + if name in self._timers: + # Here we don't need to pass the barrier flag as all + # the processes are already in sync. This avoids the + # issue of different timers having different barrier + # groups inside their class. + rank_name_to_time[rank, i] = self._timers[name].elapsed( + reset=reset) + + # See the note above for why we are not using gather. + torch.distributed._all_gather_base(rank_name_to_time.view(-1), + rank_name_to_time[rank, :].view(-1)) + + return rank_name_to_time + + + def _get_global_min_max_time(self, names, reset, barrier, normalizer): + """Report only min and max times across all ranks.""" + + rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, + barrier) + name_to_min_max_time = {} + for i, name in enumerate(names): + rank_to_time = rank_name_to_time[:, i] + # filter out the ones we did not have any timings for + rank_to_time = rank_to_time[rank_to_time > 0.0] + # If the timer exists: + if rank_to_time.numel() > 0: + name_to_min_max_time[name] = ( + rank_to_time.min().item() / normalizer, + rank_to_time.max().item() / normalizer) + return name_to_min_max_time + + + def _get_global_min_max_time_string(self, names, reset, barrier, + normalizer, max_only): + name_to_min_max_time = self._get_global_min_max_time( + names, reset, barrier, normalizer) + if not name_to_min_max_time: + return None + output_string = '(min, max) time across ranks (ms):' + for name in name_to_min_max_time: + min_time, max_time = name_to_min_max_time[name] + if max_only: + output_string += '\n {}: {:.2f}'.format( + (name+' ').ljust(48, '.'), max_time) + else: + output_string += '\n {}: ({:.2f}, {:.2f})'.format( + (name+' ').ljust(48, '.'), min_time, max_time) + return output_string + + + def _get_all_ranks_time_string(self, names, reset, barrier, normalizer): + """Report times across all ranks.""" + rank_name_to_time = self._get_elapsed_time_all_ranks(names, reset, + barrier) + + output_string = 'times across ranks (ms):' + no_reported_timing = True + for i, name in enumerate(names): + not_yet_found = True + for rank in range(torch.distributed.get_world_size()): + if rank_name_to_time[rank, i] > 0: + no_reported_timing = False + if not_yet_found: + not_yet_found = False + output_string += '\n {}:'.format(name) + output_string += '\n rank {:2d}: {:.2f}'.format( + rank, rank_name_to_time[rank, i] / normalizer) + if no_reported_timing: + return None + return output_string + + + def log(self, names, rank=None, normalizer=1.0, reset=True, barrier=False): + """Log a group of timers.""" + + # Print. + assert normalizer > 0.0 + if self._log_option in ['max', 'minmax']: + max_only = False + if self._log_option == 'max': + max_only = True + output_string = self._get_global_min_max_time_string( + names, reset, barrier, normalizer/1000.0, max_only) + elif self._log_option == 'all': + output_string = self._get_all_ranks_time_string(names, + reset, barrier, + normalizer/1000.0) + else: + raise Exception('unknown timing log option {}'.format( + self._log_option)) + + # If no input rank is provided, log on last rank. + if rank is None: + rank = torch.distributed.get_world_size() - 1 + if rank == torch.distributed.get_rank() and output_string is not None: + print(output_string, flush=True) + + + def write(self, names, writer, iteration, normalizer=1.0, + reset=False, barrier=False): + """Write timers to a tensorboard writer + Note that we only report maximum time across ranks to tensorboard. + """ + # currently when using add_scalars, + # torch.utils.add_scalars makes each timer its own run, which + # polutes the runs list, so we just add each as a scalar + assert normalizer > 0.0 + name_to_min_max_time = self._get_global_min_max_time( + names, reset, barrier, normalizer) + if writer is not None: + for name in name_to_min_max_time: + _, max_time = name_to_min_max_time[name] + writer.add_scalar(name + '-time', max_time, iteration) diff --git a/multilinguality_megatron/megatron/tokenizer/__init__.py b/multilinguality_megatron/megatron/tokenizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59ceb33865a384b4d5f12efee2d31944c79ff109 --- /dev/null +++ b/multilinguality_megatron/megatron/tokenizer/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +from .tokenizer import build_tokenizer diff --git a/multilinguality_megatron/megatron/tokenizer/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/megatron/tokenizer/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffc38d8f79cd04c7856b7016b4953899aefe0e67 Binary files /dev/null and b/multilinguality_megatron/megatron/tokenizer/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/tokenizer/__pycache__/bert_tokenization.cpython-39.pyc b/multilinguality_megatron/megatron/tokenizer/__pycache__/bert_tokenization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62fd61201b9801eebc2eeaec365e5aaa377bc1c4 Binary files /dev/null and b/multilinguality_megatron/megatron/tokenizer/__pycache__/bert_tokenization.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/tokenizer/__pycache__/gpt2_tokenization.cpython-39.pyc b/multilinguality_megatron/megatron/tokenizer/__pycache__/gpt2_tokenization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73be2249cd13df21dd4f1d3e58a5a4a8073f91a0 Binary files /dev/null and b/multilinguality_megatron/megatron/tokenizer/__pycache__/gpt2_tokenization.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/tokenizer/__pycache__/tokenizer.cpython-39.pyc b/multilinguality_megatron/megatron/tokenizer/__pycache__/tokenizer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bb0d914f1f9b3025c396d67d0fef2fe53acb05a Binary files /dev/null and b/multilinguality_megatron/megatron/tokenizer/__pycache__/tokenizer.cpython-39.pyc differ diff --git a/multilinguality_megatron/megatron/tokenizer/bert_tokenization.py b/multilinguality_megatron/megatron/tokenizer/bert_tokenization.py new file mode 100644 index 0000000000000000000000000000000000000000..99f9a87958d93bc96b7e48cd75dba67ddaf7e038 --- /dev/null +++ b/multilinguality_megatron/megatron/tokenizer/bert_tokenization.py @@ -0,0 +1,431 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization classes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import unicodedata +import six + + +def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): + """Checks whether the casing config is consistent with the checkpoint name.""" + + # The casing has to be passed in by the user and there is no explicit check + # as to whether it matches the checkpoint. The casing information probably + # should have been stored in the bert_config.json file, but it's not, so + # we have to heuristically detect it to validate. + + if not init_checkpoint: + return + + m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) + if m is None: + return + + model_name = m.group(1) + + lower_models = [ + "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", + "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" + ] + + cased_models = [ + "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", + "multi_cased_L-12_H-768_A-12" + ] + + is_bad_config = False + if model_name in lower_models and not do_lower_case: + is_bad_config = True + actual_flag = "False" + case_name = "lowercased" + opposite_flag = "True" + + if model_name in cased_models and do_lower_case: + is_bad_config = True + actual_flag = "True" + case_name = "cased" + opposite_flag = "False" + + if is_bad_config: + raise ValueError( + "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " + "However, `%s` seems to be a %s model, so you " + "should pass in `--do_lower_case=%s` so that the fine-tuning matches " + "how the model was pre-training. If this error is wrong, please " + "just comment out this check." % (actual_flag, init_checkpoint, + model_name, case_name, opposite_flag)) + + +def convert_to_unicode(text): + """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text.decode("utf-8", "ignore") + elif isinstance(text, unicode): + return text + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def printable_text(text): + """Returns text encoded in a way suitable for print or `tf.logging`.""" + + # These functions want `str` for both Python2 and Python3, but in one case + # it's a Unicode string and in the other it's a byte string. + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return text.encode("utf-8") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + index = 0 + with open(vocab_file, "r") as reader: + while True: + token = convert_to_unicode(reader.readline()) + if not token: + break + token = token.strip() + vocab[token] = index + index += 1 + return vocab + + +def convert_by_vocab(vocab, items): + """Converts a sequence of [tokens|ids] using the vocab.""" + output = [] + for item in items: + output.append(vocab[item]) + return output + + +def convert_tokens_to_ids(vocab, tokens): + return convert_by_vocab(vocab, tokens) + + +def convert_ids_to_tokens(inv_vocab, ids): + return convert_by_vocab(inv_vocab, ids) + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class FullTokenizer(object): + """Runs end-to-end tokenziation.""" + + def __init__(self, vocab_file, do_lower_case=True): + self.vocab = load_vocab(vocab_file) + self.inv_vocab = {v: k for k, v in self.vocab.items()} + self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) + + def tokenize(self, text): + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + + return split_tokens + + def convert_tokens_to_ids(self, tokens): + return convert_by_vocab(self.vocab, tokens) + + def convert_ids_to_tokens(self, ids): + return convert_by_vocab(self.inv_vocab, ids) + + @staticmethod + def convert_tokens_to_string(tokens, clean_up_tokenization_spaces=True): + """ Converts a sequence of tokens (string) in a single string. """ + + def clean_up_tokenization(out_string): + """ Clean up a list of simple English tokenization artifacts + like spaces before punctuations and abreviated forms. + """ + out_string = ( + out_string.replace(" .", ".") + .replace(" ?", "?") + .replace(" !", "!") + .replace(" ,", ",") + .replace(" ' ", "'") + .replace(" n't", "n't") + .replace(" 'm", "'m") + .replace(" 's", "'s") + .replace(" 've", "'ve") + .replace(" 're", "'re") + ) + return out_string + + text = ' '.join(tokens).replace(' ##', '').strip() + if clean_up_tokenization_spaces: + clean_text = clean_up_tokenization(text) + return clean_text + else: + return text + + def vocab_size(self): + return len(self.vocab) + + +class BasicTokenizer(object): + """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" + + def __init__(self, do_lower_case=True): + """Constructs a BasicTokenizer. + + Args: + do_lower_case: Whether to lower case the input. + """ + self.do_lower_case = do_lower_case + + def tokenize(self, text): + """Tokenizes a piece of text.""" + text = convert_to_unicode(text) + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + text = self._tokenize_chinese_chars(text) + + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if self.do_lower_case: + token = token.lower() + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text): + """Splits punctuation on a piece of text.""" + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ((cp >= 0x4E00 and cp <= 0x9FFF) or # + (cp >= 0x3400 and cp <= 0x4DBF) or # + (cp >= 0x20000 and cp <= 0x2A6DF) or # + (cp >= 0x2A700 and cp <= 0x2B73F) or # + (cp >= 0x2B740 and cp <= 0x2B81F) or # + (cp >= 0x2B820 and cp <= 0x2CEAF) or + (cp >= 0xF900 and cp <= 0xFAFF) or # + (cp >= 0x2F800 and cp <= 0x2FA1F)): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xfffd or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenziation.""" + + def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """Tokenizes a piece of text into its word pieces. + + This uses a greedy longest-match-first algorithm to perform tokenization + using the given vocabulary. + + For example: + input = "unaffable" + output = ["un", "##aff", "##able"] + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer. + + Returns: + A list of wordpiece tokens. + """ + + text = convert_to_unicode(text) + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + +def _is_whitespace(char): + """Checks whether `chars` is a whitespace character.""" + # \t, \n, and \r are technically contorl characters but we treat them + # as whitespace since they are generally considered as such. + if char == " " or char == "\t" or char == "\n" or char == "\r": + return True + cat = unicodedata.category(char) + if cat == "Zs": + return True + return False + + +def _is_control(char): + """Checks whether `chars` is a control character.""" + # These are technically control characters but we count them as whitespace + # characters. + if char == "\t" or char == "\n" or char == "\r": + return False + cat = unicodedata.category(char) + if cat in ("Cc", "Cf"): + return True + return False + + +def _is_punctuation(char): + """Checks whether `chars` is a punctuation character.""" + cp = ord(char) + # We treat all non-letter/number ASCII as punctuation. + # Characters such as "^", "$", and "`" are not in the Unicode + # Punctuation class but we treat them as punctuation anyways, for + # consistency. + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + return True + cat = unicodedata.category(char) + if cat.startswith("P"): + return True + return False diff --git a/multilinguality_megatron/megatron/tokenizer/gpt2_tokenization.py b/multilinguality_megatron/megatron/tokenizer/gpt2_tokenization.py new file mode 100644 index 0000000000000000000000000000000000000000..3f37e449089b8f779c93ac7f034085fd7607bfb0 --- /dev/null +++ b/multilinguality_megatron/megatron/tokenizer/gpt2_tokenization.py @@ -0,0 +1,321 @@ +# coding=utf-8 +# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization classes for OpenAI GPT.""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import sys +import json +import logging +import os +import regex as re +from io import open + +try: + from functools import lru_cache +except ImportError: + # Just a dummy decorator to get the checks to run on python2 + # because honestly I don't want to support a byte-level unicode BPE + # tokenizer on python 2 right now. + def lru_cache(): + return lambda func: func + + +logger = logging.getLogger(__name__) + +PRETRAINED_VOCAB_ARCHIVE_MAP = { + 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json", +} +PRETRAINED_MERGES_ARCHIVE_MAP = { + 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt", +} +PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { + 'gpt2': 1024, +} +VOCAB_NAME = 'vocab.json' +MERGES_NAME = 'merges.txt' +SPECIAL_TOKENS_NAME = 'special_tokens.txt' + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + _chr = unichr if sys.version_info[0] == 2 else chr + bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + \ + list(range(ord("®"), ord("ÿ") + 1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [_chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class GPT2Tokenizer(object): + """ + GPT-2 BPE tokenizer. Peculiarities: + - Byte-level BPE + """ + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): + """ + Instantiate a PreTrainedBertModel from a pre-trained model file. + Download and cache the pre-trained model file if needed. + """ + if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: + vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] + merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path] + special_tokens_file = None + else: + vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME) + merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME) + special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME) + if not os.path.exists(special_tokens_file): + special_tokens_file = None + else: + logger.info("loading special tokens file {}".format(special_tokens_file)) + # redirect to the cache, if necessary + try: + from .file_utils import cached_path + resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) + resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir) + except EnvironmentError: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find files {} and {} " + "at this path or url.".format( + pretrained_model_name_or_path, + ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), + pretrained_model_name_or_path, + vocab_file, merges_file)) + return None + if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file: + logger.info("loading vocabulary file {}".format(vocab_file)) + logger.info("loading merges file {}".format(merges_file)) + else: + logger.info("loading vocabulary file {} from cache at {}".format( + vocab_file, resolved_vocab_file)) + logger.info("loading merges file {} from cache at {}".format( + merges_file, resolved_merges_file)) + if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: + # if we're using a pretrained model, ensure the tokenizer wont index sequences longer + # than the number of positional embeddings + max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] + kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) + # Instantiate tokenizer. + if special_tokens_file and 'special_tokens' not in kwargs: + special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1] + else: + special_tokens = kwargs.pop('special_tokens', []) + tokenizer = cls( + resolved_vocab_file, + resolved_merges_file, + special_tokens=special_tokens, + *inputs, + **kwargs) + return tokenizer + + def __init__(self, vocab_file, merges_file, errors='replace', + special_tokens=None, max_len=None): + self.max_len = max_len if max_len is not None else int(1e12) + self.encoder = json.load(open(vocab_file)) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_data] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + + # Should haved added re.IGNORECASE so BPE merges can happen for + # capitalized versions of contractions + self.pat = re.compile( + r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + self.special_tokens = {} + self.special_tokens_decoder = {} + self.set_special_tokens(special_tokens) + + def __len__(self): + return len(self.encoder) + len(self.special_tokens) + + def set_special_tokens(self, special_tokens): + """ Add a list of additional tokens to the encoder. + The additional tokens are indexed starting from the last index of the + current vocabulary in the order of the `special_tokens` list. + """ + if not special_tokens: + self.special_tokens = {} + self.special_tokens_decoder = {} + return + self.special_tokens = dict((tok, len(self.encoder) + i) + for i, tok in enumerate(special_tokens)) + self.special_tokens_decoder = {v: k for k, v in self.special_tokens.items()} + logger.info("Special tokens {}".format(self.special_tokens)) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except BaseException: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def tokenize(self, text): + """ Tokenize a string. """ + bpe_tokens = [] + for token in re.findall(self.pat, text): + if sys.version_info[0] == 2: + token = ''.join(self.byte_encoder[ord(b)] for b in token) + else: + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def convert_tokens_to_ids(self, tokens): + """ Converts a sequence of tokens into ids using the vocab. """ + ids = [] + if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)): + if tokens in self.special_tokens: + return self.special_tokens[tokens] + else: + return self.encoder.get(tokens, 0) + for token in tokens: + if token in self.special_tokens: + ids.append(self.special_tokens[token]) + else: + ids.append(self.encoder.get(token, 0)) + if len(ids) > self.max_len: + logger.warning( + "Token indices sequence length is longer than the specified maximum " + " sequence length for this OpenAI GPT model ({} > {}). Running this" + " sequence through the model will result in indexing errors".format( + len(ids), self.max_len) + ) + return ids + + def convert_ids_to_tokens(self, ids, skip_special_tokens=False): + """Converts a sequence of ids in BPE tokens using the vocab.""" + tokens = [] + for i in ids: + if i in self.special_tokens_decoder: + if not skip_special_tokens: + tokens.append(self.special_tokens_decoder[i]) + else: + tokens.append(self.decoder[i]) + return tokens + + def encode(self, text): + return self.convert_tokens_to_ids(self.tokenize(text)) + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) + return text + + def save_vocabulary(self, vocab_path): + """Save the tokenizer vocabulary and merge files to a directory.""" + if not os.path.isdir(vocab_path): + logger.error("Vocabulary path ({}) should be a directory".format(vocab_path)) + return + vocab_file = os.path.join(vocab_path, VOCAB_NAME) + merge_file = os.path.join(vocab_path, MERGES_NAME) + special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME) + + with open(vocab_file, 'w', encoding='utf-8') as f: + f.write(json.dumps(self.encoder, ensure_ascii=False)) + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write(u'#version: 0.2\n') + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!".format(merge_file)) + index = token_index + writer.write(' '.join(bpe_tokens) + u'\n') + index += 1 + + index = len(self.encoder) + with open(special_tokens_file, 'w', encoding='utf-8') as writer: + for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive." + " Please check that the tokenizer is not corrupted!".format(special_tokens_file)) + index = token_index + writer.write(token + u'\n') + index += 1 + + return vocab_file, merge_file, special_tokens_file diff --git a/multilinguality_megatron/megatron/tokenizer/tokenizer.py b/multilinguality_megatron/megatron/tokenizer/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c43238d81f104b4172847f23add60a88a79e5c44 --- /dev/null +++ b/multilinguality_megatron/megatron/tokenizer/tokenizer.py @@ -0,0 +1,612 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron tokenizers.""" + +from abc import ABC +from abc import abstractmethod + +from .bert_tokenization import FullTokenizer as FullBertTokenizer +from .gpt2_tokenization import GPT2Tokenizer + + +def build_tokenizer(args): + """Initialize tokenizer.""" + if args.rank == 0: + print('> building {} tokenizer ...'.format(args.tokenizer_type), + flush=True) + + if args.tokenizer_type != 'FalconTokenizer': + assert args.vocab_file is not None + + # Select and instantiate the tokenizer. + if args.tokenizer_type == 'BertWordPieceLowerCase': + tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file, + lower_case=True, + vocab_extra_ids=args.vocab_extra_ids) + elif args.tokenizer_type == 'BertWordPieceCase': + tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file, + lower_case=False, + vocab_extra_ids=args.vocab_extra_ids) + elif args.tokenizer_type == 'GPT2BPETokenizer': + assert args.merge_file is not None + tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file) + elif args.tokenizer_type == 'SentencePieceTokenizer': + tokenizer = _SentencePieceTokenizer(args.vocab_file, vocab_extra_ids=args.vocab_extra_ids, + vocab_extra_ids_list=args.vocab_extra_ids_list, new_tokens=args.new_tokens) + elif args.tokenizer_type == 'FalconTokenizer': + tokenizer = _FalconTokenizer(vocab_extra_ids_list=args.vocab_extra_ids_list, new_tokens=args.new_tokens) + elif args.tokenizer_type == "PretrainedFromHF": + assert args.vocab_file is not None + + # prevent transformers from logging info and warnings on each rank + import transformers + import logging + if args.rank == 0: + transformers.utils.logging.set_verbosity(logging.INFO) + else: + # shut the warnings on replicas + transformers.utils.logging.set_verbosity(logging.ERROR) + + if args.rank == 0: + print(" vocab file is un-used. loading tokenizer from pre-trained model") + tokenizer = _AutoTokenizer(args.vocab_file, vocab_extra_ids=args.vocab_extra_ids) + else: + raise NotImplementedError('{} tokenizer is not ' + 'implemented.'.format(args.tokenizer_type)) + + # Add vocab size. + args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, + args) + + return tokenizer + + +def _vocab_size_with_padding(orig_vocab_size, args): + """Pad vocab size so it is divisible by model parallel size and + still having GPU friendly size.""" + + after = orig_vocab_size + multiple = args.make_vocab_size_divisible_by * \ + args.tensor_model_parallel_size + while (after % multiple) != 0: + after += 1 + if args.rank == 0: + print(' > padded vocab (size: {}) with {} dummy tokens ' + '(new size: {})'.format( + orig_vocab_size, after - orig_vocab_size, after), flush=True) + return after + + +class AbstractTokenizer(ABC): + """Abstract class for tokenizer.""" + + def __init__(self, name): + self.name = name + super().__init__() + + @property + @abstractmethod + def vocab_size(self): + pass + + @property + @abstractmethod + def vocab(self): + """Dictionary from vocab text token to id token.""" + pass + + @property + @abstractmethod + def inv_vocab(self): + """Dictionary from vocab id token to text token.""" + pass + + @abstractmethod + def tokenize(self, text): + pass + + def detokenize(self, token_ids): + raise NotImplementedError('detokenizer is not implemented for {} ' + 'tokenizer'.format(self.name)) + + @property + def cls(self): + raise NotImplementedError('CLS is not provided for {} ' + 'tokenizer'.format(self.name)) + + @property + def sep(self): + raise NotImplementedError('SEP is not provided for {} ' + 'tokenizer'.format(self.name)) + + @property + def pad(self): + raise NotImplementedError('PAD is not provided for {} ' + 'tokenizer'.format(self.name)) + + @property + def eod(self): + raise NotImplementedError('EOD is not provided for {} ' + 'tokenizer'.format(self.name)) + + @property + def mask(self): + raise NotImplementedError('MASK is not provided for {} ' + 'tokenizer'.format(self.name)) + + +class _BertWordPieceTokenizer(AbstractTokenizer): + """Original BERT wordpiece tokenizer.""" + + def __init__(self, vocab_file, lower_case=True, vocab_extra_ids=0): + if lower_case: + name = 'BERT Lower Case' + else: + name = 'BERT Upper Case' + super().__init__(name) + self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case) + self.cls_id = self.tokenizer.vocab['[CLS]'] + self.sep_id = self.tokenizer.vocab['[SEP]'] + self.pad_id = self.tokenizer.vocab['[PAD]'] + self.mask_id = self.tokenizer.vocab['[MASK]'] + self._additional_special_tokens = [] + + # (dsachan) Add BOS and EOS tokens + SPECIAL_TOKENS = {'eos_token': '[EOS]', + 'bos_token': '[BOS]'} + self._bos_token = '[BOS]' + self.add_token(self._bos_token) + self._bos_token_id = self.vocab.get(self._bos_token) + + self._eos_token = '[EOS]' + self.add_token(self._eos_token) + self._eos_token_id = self.vocab.get(self._eos_token) + + # (dsachan) Add additional special tokens + # These can be used as sentinel tokens in T5 model inputs + additional_special_tokens = [] + additional_special_tokens.extend( + ["".format(i) for i in range(vocab_extra_ids)]) + self.add_additional_special_tokens(additional_special_tokens) + + def add_token(self, token): + if token not in self.vocab: + self.inv_vocab[self.vocab_size] = token + # self.vocab_size comes from len(vocab) + # and it will increase as we add elements + self.vocab[token] = self.vocab_size + + def add_additional_special_tokens(self, tokens_list): + setattr(self, "additional_special_tokens", tokens_list) + for value in tokens_list: + self.add_token(value) + + @property + def vocab_size(self): + return self.tokenizer.vocab_size() + + @property + def vocab(self): + return self.tokenizer.vocab + + @property + def inv_vocab(self): + return self.tokenizer.inv_vocab + + def tokenize(self, text): + text_tokens = self.tokenizer.tokenize(text) + return self.tokenizer.convert_tokens_to_ids(text_tokens) + + def decode(self, ids): + tokens = self.tokenizer.convert_ids_to_tokens(ids) + return self.tokenizer.convert_tokens_to_string(tokens) + + def decode_token_ids(self, token_ids): + tokens = self.tokenizer.convert_ids_to_tokens(token_ids) + exclude_list = ['[PAD]', '[CLS]'] + non_pads = [t for t in tokens if t not in exclude_list] + + result = "" + for s in non_pads: + if s.startswith("##"): + result += s[2:] + else: + result += " " + s + + return result + + @property + def cls(self): + return self.cls_id + + @property + def sep(self): + return self.sep_id + + @property + def pad(self): + return self.pad_id + + @property + def mask(self): + return self.mask_id + + @property + def bos_token(self): + """ Beginning of sentence token id """ + return self._bos_token + + @property + def eos_token(self): + """ End of sentence token id """ + return self._eos_token + + @property + def additional_special_tokens(self): + """ All the additional special tokens you may want to use (list of strings).""" + return self._additional_special_tokens + + @property + def bos_token_id(self): + """ Id of the beginning of sentence token in the vocabulary.""" + return self._bos_token_id + + @property + def eos_token_id(self): + """ Id of the end of sentence token in the vocabulary.""" + return self._eos_token_id + + @property + def additional_special_tokens_ids(self): + """ Ids of all the additional special tokens in the vocabulary (list of integers).""" + return [self.vocab.get(token) for token in self._additional_special_tokens] + + @additional_special_tokens.setter + def additional_special_tokens(self, value): + self._additional_special_tokens = value + + +class _GPT2BPETokenizer(AbstractTokenizer): + """Original GPT2 BPE tokenizer.""" + + def __init__(self, vocab_file, merge_file): + name = 'GPT2 BPE' + super().__init__(name) + + self.tokenizer = GPT2Tokenizer(vocab_file, merge_file, errors='replace', + special_tokens=[], max_len=None) + self.eod_id = self.tokenizer.encoder['<|endoftext|>'] + + @property + def vocab_size(self): + return len(self.tokenizer.encoder) + + @property + def vocab(self): + return self.tokenizer.encoder + + @property + def inv_vocab(self): + return self.tokenizer.decoder + + def tokenize(self, text): + return self.tokenizer.encode(text) + + def detokenize(self, token_ids): + return self.tokenizer.decode(token_ids) + + @property + def eod(self): + return self.eod_id + + +class _FalconTokenizer(AbstractTokenizer): + """Wrapper of huggingface tokenizer.""" + + def __init__(self, vocab_extra_ids_list=None, new_tokens=True): + name = 'FalconTokenizer' + super().__init__(name) + from transformers import AutoTokenizer + self.tokenizer = AutoTokenizer.from_pretrained('tiiuae/falcon-40b') + self._eod = self.tokenizer.vocab['<|endoftext|>'] + + if vocab_extra_ids_list and new_tokens: + self.tokenizer.add_special_tokens({'additional_special_tokens': self.tokenizer.additional_special_tokens + vocab_extra_ids_list.split(",")}) + + self._inv_vocab = {idx: token for token, idx in self.tokenizer.vocab.items()} + + @property + def vocab_size(self): + return len(self.tokenizer.vocab) + + @property + def vocab(self): + return self.tokenizer.vocab + + def tokenize(self, text): + return self.tokenizer.encode(text) + + def detokenize(self, token_ids): + return self.tokenizer.decode(token_ids) + + @property + def inv_vocab(self): + return self._inv_vocab + + @property + def eod(self): + return self._eod + + +class _SentencePieceTokenizer(AbstractTokenizer): + """SentencePieceTokenizer-Megatron wrapper""" + + def __init__(self, model_file, vocab_extra_ids=0, vocab_extra_ids_list=None, new_tokens=True): + name = 'SentencePieceTokenizer' + super().__init__(name) + + import sentencepiece + self._tokenizer = sentencepiece.SentencePieceProcessor(model_file=model_file) + + self._initalize(vocab_extra_ids, vocab_extra_ids_list, new_tokens) + + def _initalize(self, vocab_extra_ids, vocab_extra_ids_list, new_tokens): + self._vocab = {} + self._inv_vocab = {} + + self._special_tokens = {} + self._inv_special_tokens = {} + + self._t5_tokens = [] + + for i in range(len(self._tokenizer)): + t = self._tokenizer.id_to_piece(i) + self._inv_vocab[i] = t + self._vocab[t] = i + + def _add_special_token(t): + if t not in self.vocab and not new_tokens: + return + if t not in self._vocab: + next_id = len(self._vocab) + self._vocab[t] = next_id + self._inv_vocab[next_id] = t + self._special_tokens[t] = self._vocab[t] + self._inv_special_tokens[self._vocab[t]] = t + + _add_special_token('') + self._cls_id = self._vocab.get('') + _add_special_token('') + self._sep_id = self._vocab.get('') + _add_special_token('') + self._eod_id = self._vocab.get('') + _add_special_token('') + self._mask_id = self._vocab.get('') + + pad_id = self._tokenizer.pad_id() + try: + pad_token = self._tokenizer.id_to_piece(pad_id) + except IndexError: + pad_token = '' + _add_special_token(pad_token) + self._pad_id = self._vocab.get(pad_token) + + bos_id = self._tokenizer.bos_id() + try: + bos_token = self._tokenizer.id_to_piece(bos_id) + except IndexError: + bos_token = '' + _add_special_token(bos_token) + self._bos_id = self._vocab.get(bos_token) + + eos_id = self._tokenizer.eos_id() + try: + eos_token = self._tokenizer.id_to_piece(eos_id) + except IndexError: + eos_token = '' + _add_special_token(eos_token) + self._eos_id = self._vocab.get(eos_token) + + for i in range(vocab_extra_ids): + t = "".format(i) + _add_special_token(t) + self._t5_tokens += [t] + if vocab_extra_ids_list: + for t in vocab_extra_ids_list.split(","): + _add_special_token(t) + print("Special tokens: {}".format(self._special_tokens)) + + @property + def vocab_size(self): + return len(self._vocab) + + @property + def vocab(self): + return self._vocab + + @property + def inv_vocab(self): + return self._inv_vocab + + # From: + # https://github.com/NVIDIA/NeMo/blob/c8fa217e811d60d11d014827c7f3845ff6c99ae7/nemo/collections/common/tokenizers/sentencepiece_tokenizer.py#L89 + def tokenize(self, text): + ids = [] + idx = 0 + + while 1: + indices = {} + for token in self._special_tokens: + try: + indices[token] = text[idx:].index(token) + except ValueError: + continue + if len(indices) == 0: + break + + next_token = min(indices, key=indices.get) + next_idx = idx + indices[next_token] + + ids.extend(self._tokenizer.encode_as_ids(text[idx:next_idx])) + ids.append(self._special_tokens[next_token]) + idx = next_idx + len(next_token) + + ids.extend(self._tokenizer.encode_as_ids(text[idx:])) + return ids + + # From: + # https://github.com/NVIDIA/NeMo/blob/c8fa217e811d60d11d014827c7f3845ff6c99ae7/nemo/collections/common/tokenizers/sentencepiece_tokenizer.py#L125 + def detokenize(self, ids): + text = "" + last_i = 0 + + for i, id in enumerate(ids): + if id in self._inv_special_tokens: + text += self._tokenizer.decode_ids(ids[last_i:i]) + " " + text += self._inv_special_tokens[id] + " " + last_i = i + 1 + text += self._tokenizer.decode_ids(ids[last_i:]) + return text.strip() + + @property + def cls(self): + return self._cls_id + + @property + def sep(self): + return self._sep_id + + @property + def pad(self): + return self._pad_id + + @property + def bos_token_id(self): + return self._bos_id + + @property + def bos(self): + return self._bos_id + + @property + def eod(self): + if self._eod_id is not None: + return self._eod_id + return self._eos_id # in case noe eod we can patch this up with an eos + + @property + def eos_token_id(self): + if self._eod_id is not None: + return self._eod_id + return self._eos_id + + @property + def eos(self): + return self._eos_id + + @property + def mask(self): + return self._mask_id + + @property + def additional_special_tokens_ids(self): + return [self.vocab[k] for k in self._t5_tokens] + +class _AutoTokenizer(AbstractTokenizer): + """AutoTokenizer for Hf Pretrained model loading.""" + + def __init__(self, tokenizer_name_or_path, vocab_extra_ids): + from transformers import AutoTokenizer + name = tokenizer_name_or_path + super().__init__(name) + hf_tokenizer_kwargs = {} + if vocab_extra_ids > 0: + # TODO @thomasw21 we might need to concatenate to a pre-existing list? + hf_tokenizer_kwargs["additional_special_tokens"] = [f"" for _id in range(vocab_extra_ids)] + self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, **hf_tokenizer_kwargs) + self.encoder = self.tokenizer.get_vocab() + self.decoder = {v: k for k, v in self.encoder.items()} + + @property + def vocab_size(self): + return len(self.tokenizer) # vocab_size doesn't contain additional tokens + + @property + def vocab(self): + # TODO @thomasw21 make sure that special tokens don't collapse with vocab tokens. + return { + **{special_token: self.tokenizer.convert_tokens_to_ids(special_token) for special_token in self.tokenizer.additional_special_tokens}, + **self.tokenizer.vocab, + } + + @property + def inv_vocab(self): + return {v: k for k, v in self.vocab.items()} + + def tokenize(self, text): + # HACK: this was hanging for very large inputs (>1M chars) + # chunking it into 100k chars max seems to make it much faster + # WARNING: this might be breaking tokenization every once in a while + CHUNK_MAX = 100000 + if len(text) > CHUNK_MAX: + tokens = [] + for i in range(0, len(text), CHUNK_MAX): + tokens += self.tokenizer.encode(text[i:i+CHUNK_MAX], add_special_tokens=False) + # add special tokens to beginning and end + if self.tokenizer.bos_token: + tokens = [self.tokenizer.bos_token_id] + tokens + if self.tokenizer.eos_token_id: + tokens = tokens + [self.tokenizer.eos_token_id] + return tokens + else: + return self.tokenizer.encode(text) + + def detokenize(self, token_ids): + # extract string from HF + return self.tokenizer.decode(token_ids) + + @property + def eod(self): + # TODO @thomasw21 might conflict with + return self.eos + + @property + def cls(self): + candidate = self.tokenizer.cls_token_id + return self._check_token_candidate(candidate) + + @property + def sep(self): + candidate = self.tokenizer.sep_token_id + return self._check_token_candidate(candidate) + + @property + def pad(self): + candidate = self.tokenizer.pad_token_id + return self._check_token_candidate(candidate) + + @property + def mask(self): + candidate = self.tokenizer.mask_token_id + return self._check_token_candidate(candidate) + + @property + def bos(self): + raise NotImplementedError("Missing ") + + @property + def eos(self): + # TODO @thomasw21 might conflict with the notion of + candidate = self.tokenizer.eos_token_id + return self._check_token_candidate(candidate) + + @property + def additional_special_tokens_ids(self): + """ All the additional special tokens you may want to use (list of strings).""" + return self.tokenizer.additional_special_tokens_ids + + @staticmethod + def _check_token_candidate(candidate): + if candidate is None: + raise AttributeError("Token doesn't exist") + return candidate \ No newline at end of file diff --git a/multilinguality_megatron/megatron/training.py b/multilinguality_megatron/megatron/training.py new file mode 100644 index 0000000000000000000000000000000000000000..3e87b91f8f8ca674ee4c9ecb52dfcc6f40357294 --- /dev/null +++ b/multilinguality_megatron/megatron/training.py @@ -0,0 +1,961 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Pretrain utilities.""" +import argparse +from datetime import datetime +import math +import sys +import time +from typing import Callable + +# The earliest we can measure the start time. +_TRAIN_START_TIME = time.time() +import torch +from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP + +import megatron +from megatron import get_args +from megatron import get_signal_handler +from megatron import get_timers +from megatron import get_counters +from megatron import get_tensorboard_writer +from megatron import get_current_global_batch_size +from megatron import get_num_microbatches +from megatron import is_last_rank +from megatron import update_num_microbatches +from megatron.core import mpu, tensor_parallel +from megatron import print_rank_0 +from megatron import print_rank_last +from megatron import print_all_nodes +from megatron.checkpointing import load_checkpoint +from megatron.checkpointing import save_checkpoint +from megatron.model import Float16Module +from megatron.model import ModelType +from megatron.model import GPTModel +from megatron.optimizer import get_megatron_optimizer +import megatron.initialize + +from megatron.optimizer_param_scheduler import OptimizerParamScheduler +from megatron.model import DistributedDataParallel as LocalDDP +import megatron.utils +from megatron.utils import unwrap_model +from megatron.data.data_samplers import build_pretraining_data_loader +from megatron.utils import calc_params_l2_norm +from megatron.schedules import get_forward_backward_func +from megatron.utils import report_memory + + +def print_datetime(string): + """Note that this call will sync across all ranks.""" + torch.distributed.barrier() + time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + print_rank_0('[' + string + '] datetime: {} '.format(time_str)) + + +def pretrain(args, + train_valid_test_dataset_provider, + model_provider_func, + model_type: ModelType, + forward_step_func, + process_non_loss_data_func=None, + collate_fn=None): + """Main training program. + + This function will run the followings in the order provided: + 1) initialize Megatron. + 2) setup model, optimizer and lr schedule using the model_provider_func. + 3) call train_val_test_data_provider to get train/val/test datasets. + 4) train the modle using the forward_step_func. + + Arguments: + train_valid_test_dataset_provider: a function that takes the size of + train/valid/test dataset and returns `train, valid, test` datasets. + model_provider_func: a function that returns a vanilla version of the + model. By vanilla we mean a simple model on cpu with no fp16 or ddp. + model_type: an enum that specifies the type of model being trained. + forward_step_func: a function that takes a `data iterator` and `model`, + and returns a `loss` scalar with a dictionary with key:values being + the info we would like to monitor during training, for example + `lm-loss: value`. We also require that this function add + `batch generator` to the timers class. + process_non_loss_data_func: a function to post process outputs of the + network. It can be used for dumping output tensors (e.g images) to + tensorboard. It takes `collected data`(list of tensors), + `current iteration index` and `tensorboard writer` as arguments. + extra_args_provider: a function that takes a parser and adds arguments + to it. It is used for programs to add their own arguments. + args_defaults: a dictionary from argument-name to argument-value. It + to set already parse arguments. + """ + # # Initalize and get arguments, timers, and Tensorboard writer. + # Set pytorch JIT layer fusion options and warmup JIT functions. + megatron.initialize.set_jit_fusion_options(args) + + # Adjust the startup time so it reflects the largest value. + # This will be closer to what scheduler will see (outside of + # image ... launches. + global _TRAIN_START_TIME + start_time_tensor = torch.tensor([_TRAIN_START_TIME], dtype=torch.float64, device='cuda') + torch.distributed.all_reduce(start_time_tensor, + op=torch.distributed.ReduceOp.MIN) + _TRAIN_START_TIME = start_time_tensor.item() + print_rank_0('time to initialize megatron (seconds): {:.3f}'.format(time.time() - _TRAIN_START_TIME)) + print_datetime('after megatron is initialized') + timers = get_timers() + + # Model, optimizer, and learning rate. + timers('model-and-optimizer-setup', log_level=0).start(barrier=True) + model, optimizer, opt_param_scheduler = _setup_model_and_optimizer( + model_provider_func, model_type, args=args) + timers('model-and-optimizer-setup').stop() + print_datetime('after model, optimizer, and learning rate scheduler are built') + + # Data stuff. + timers('train/valid/test-data-iterators-setup', log_level=0).start( + barrier=True) + if args.virtual_pipeline_model_parallel_size is not None: + all_data_iterators = [ + build_train_valid_test_data_iterators( + train_valid_test_dataset_provider, args, collate_fn=collate_fn + ) for _ in range(len(model)) + ] + train_data_iterator = [di[0] for di in all_data_iterators] + valid_data_iterator = [di[1] for di in all_data_iterators] + test_data_iterator = [di[2] for di in all_data_iterators] + else: + train_data_iterator, valid_data_iterator, test_data_iterator \ + = build_train_valid_test_data_iterators( + train_valid_test_dataset_provider, args, collate_fn=collate_fn) + timers('train/valid/test-data-iterators-setup').stop() + print_datetime('after dataloaders are built') + + # Print setup timing. + print_rank_0('done with setup ...') + timers.log(['model-and-optimizer-setup', 'train/valid/test-data-iterators-setup'], barrier=True) + print_rank_0('training ...') + + iteration = 0 + if args.do_train and args.train_iters > 0: + iteration = _train(args, + forward_step_func, + model, + optimizer, + opt_param_scheduler, + train_data_iterator, + valid_data_iterator, + process_non_loss_data_func) + print_datetime('after training is done') + + if args.do_valid: + prefix = 'the end of training for val data' + evaluate_and_print_results(prefix, + forward_step_func, + valid_data_iterator, + model, + iteration, + process_non_loss_data_func, + verbose=False, + args=args) + + if args.save and iteration != 0: + save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + + if args.do_test: + # Run on test data. + prefix = 'the end of training for test data' + evaluate_and_print_results(prefix, forward_step_func, + test_data_iterator, model, + 0, process_non_loss_data_func, + verbose=True, args=args) + + +def _update_train_iters(args): + # For iteration-based training, we don't need to do anything + if args.train_iters: + return + + # Constant batch size with sample-based training. + if args.rampup_batch_size is None: + args.train_iters = args.train_samples // args.global_batch_size + else: + # Sample based training with rampup batch size. + iterations = 0 + consumed_samples = 0 + # Rampup phase. + while consumed_samples <= int(args.rampup_batch_size[2]): + update_num_microbatches(consumed_samples, consistency_check=False) + consumed_samples += get_current_global_batch_size() + iterations += 1 + # Reset + update_num_microbatches(0, consistency_check=False) + # Constant phase + # Note that we throw away any partial last batch. + iterations += (args.train_samples - consumed_samples) // \ + args.global_batch_size + args.train_iters = iterations + print_rank_0('setting training iterations to {}'.format(args.train_iters)) + + +def get_model(model_provider_func: Callable, + model_type=ModelType.encoder_or_decoder, + wrap_with_ddp: bool=True, + args=None): + """Build the model.""" + assert args is not None + # Build model. + if mpu.get_pipeline_model_parallel_world_size() > 1 and \ + args.virtual_pipeline_model_parallel_size is not None: + assert model_type != ModelType.encoder_and_decoder, \ + "Interleaved schedule not supported for model with both encoder and decoder" + model = [] + for i in range(args.virtual_pipeline_model_parallel_size): + mpu.set_virtual_pipeline_model_parallel_rank(i) + # Set pre_process and post_process only after virtual rank is set. + pre_process = mpu.is_pipeline_first_stage() + post_process = mpu.is_pipeline_last_stage() + this_model = model_provider_func( + pre_process=pre_process, + post_process=post_process + ) + this_model.model_type = model_type + model.append(this_model) + else: + pre_process = mpu.is_pipeline_first_stage() + post_process = mpu.is_pipeline_last_stage() + add_encoder = True + add_decoder = True + if model_type == ModelType.encoder_and_decoder: + if mpu.get_pipeline_model_parallel_world_size() > 1: + assert args.pipeline_model_parallel_split_rank is not None, \ + "Split rank needs to be specified for model with both encoder and decoder" + rank = mpu.get_pipeline_model_parallel_rank() + split_rank = args.pipeline_model_parallel_split_rank + world_size = mpu.get_pipeline_model_parallel_world_size() + pre_process = rank == 0 or rank == split_rank + post_process = (rank == (split_rank - 1)) or ( + rank == (world_size - 1)) + add_encoder = mpu.is_pipeline_stage_before_split() + add_decoder = mpu.is_pipeline_stage_after_split() + model = model_provider_func( + pre_process=pre_process, + post_process=post_process, + add_encoder=add_encoder, + add_decoder=add_decoder) + else: + model = model_provider_func( + pre_process=pre_process, + post_process=post_process + ) + model.model_type = model_type + + if not isinstance(model, list): + model = [model] + + # Disallow training and inference with Transformer Engine + # for non-GPT models + allow_transformer_engine = all([type(m) == GPTModel for m in model]) + assert allow_transformer_engine or args.transformer_impl == 'local', \ + 'Transformer Engine is only approved for GPT models' + + # Set tensor model parallel attributes if not set. + # Only parameters that are already tensor model parallel have these + # attributes set for them. We should make sure the default attributes + # are set for all params so the optimizer can use them. + for model_module in model: + for param in model_module.parameters(): + tensor_parallel.set_defaults_if_not_set_tensor_model_parallel_attributes(param) + + # Print number of parameters. + if mpu.get_data_parallel_rank() == 0: + print(' > number of parameters on (tensor, pipeline) ' + 'model parallel rank ({}, {}): {}'.format( + mpu.get_tensor_model_parallel_rank(), + mpu.get_pipeline_model_parallel_rank(), + sum([sum([p.nelement() for p in model_module.parameters()]) + for model_module in model])), flush=True) + + # GPU allocation. + for model_module in model: + model_module.cuda(torch.cuda.current_device()) + + # Fp16 conversion. + if args.fp16 or args.bf16: + model = [Float16Module(model_module, args) for model_module in model] + + if wrap_with_ddp: + if args.DDP_impl == 'torch': + i = torch.cuda.current_device() + model = [torchDDP(model_module, device_ids=[i], output_device=i, + process_group=mpu.get_data_parallel_group()) + for model_module in model] + elif args.DDP_impl == 'local': + model = [LocalDDP(model_module, + args.accumulate_allreduce_grads_in_fp32, + args.use_contiguous_buffers_in_local_ddp) + for model_module in model] + # broad cast params from data parallel src rank to other data parallel ranks + if args.data_parallel_random_init: + for model_module in model: + model_module.broadcast_params() + else: + raise NotImplementedError('Unknown DDP implementation specified: ' + '{}. Exiting.'.format(args.DDP_impl)) + + return model + + +def _get_optimizer_param_scheduler(optimizer, args): + """Build the learning rate scheduler.""" + # Iteration-based training. + if args.train_iters: + if args.lr_decay_iters is None: + args.lr_decay_iters = args.train_iters + lr_decay_steps = args.lr_decay_iters * args.global_batch_size + wd_incr_steps = args.train_iters * args.global_batch_size + if args.lr_warmup_fraction is not None: + lr_warmup_steps = args.lr_warmup_fraction * lr_decay_steps + else: + lr_warmup_steps = args.lr_warmup_iters * args.global_batch_size + # Sample-based training. + elif args.train_samples: + # We need to set training iters for later use. Technically + # we need to adjust the training samples too (due to last + # batch being incomplete) but we leave it as is for now. + _update_train_iters(args) + if args.lr_decay_samples is None: + args.lr_decay_samples = args.train_samples + lr_decay_steps = args.lr_decay_samples + wd_incr_steps = args.train_samples + if args.lr_warmup_fraction is not None: + lr_warmup_steps = args.lr_warmup_fraction * lr_decay_steps + else: + lr_warmup_steps = args.lr_warmup_samples + else: + raise Exception( + 'either train_iters or train_samples should be provided.') + + opt_param_scheduler = OptimizerParamScheduler( + optimizer, + max_lr=args.lr, + min_lr=args.min_lr, + lr_warmup_steps=lr_warmup_steps, + lr_decay_steps=lr_decay_steps, + lr_decay_style=args.lr_decay_style, + start_wd=args.start_weight_decay, + end_wd=args.end_weight_decay, + wd_incr_steps=wd_incr_steps, + wd_incr_style=args.weight_decay_incr_style, + use_checkpoint_opt_param_scheduler=args.use_checkpoint_opt_param_scheduler, + override_opt_param_scheduler=args.override_opt_param_scheduler) + return opt_param_scheduler + + +def _setup_model_and_optimizer(model_provider_func, + model_type, + no_wd_decay_cond=None, + scale_lr_cond=None, + lr_mult=1.0, + args=None): + assert args is not None + model = get_model(model_provider_func, model_type, args=args) + unwrapped_model = unwrap_model(model, + (torchDDP, LocalDDP, Float16Module)) + + optimizer = get_megatron_optimizer(model, no_wd_decay_cond, + scale_lr_cond, lr_mult) + opt_param_scheduler = _get_optimizer_param_scheduler(optimizer, args) + + if args.load is not None: + timers = get_timers() + timers('load-checkpoint', log_level=0).start(barrier=True) + args.iteration = load_checkpoint(model, optimizer, opt_param_scheduler) + timers('load-checkpoint').stop(barrier=True) + timers.log(['load-checkpoint']) + optimizer.reload_model_params() + else: + args.iteration = 0 + + # We only support local DDP with multiple micro-batches. + if len(model) > 1 or mpu.get_pipeline_model_parallel_world_size() > 1: + assert args.DDP_impl == 'local' + + # get model without FP16 and/or TorchDDP wrappers + if args.iteration == 0 and len(unwrapped_model) == 1 \ + and hasattr(unwrapped_model[0], 'init_state_dict_from_bert'): + print_rank_0("Initializing ICT from pretrained BERT model") + unwrapped_model[0].init_state_dict_from_bert() + if args.fp16: + optimizer.reload_model_params() + + return model, optimizer, opt_param_scheduler + + +def train_step(forward_step_func, data_iterator, + model, optimizer, opt_param_scheduler, iteration, args): + """Single training step.""" + timers = get_timers() + skip_iter = iteration in args.skip_iters + if skip_iter: + print_all_nodes("IMPORTANT! Skipping backprop for this iteration!") + + # Set grad to zero. + if args.DDP_impl == 'local' and args.use_contiguous_buffers_in_local_ddp: + for partition in model: + partition.zero_grad_buffer() + optimizer.zero_grad() + + # Forward pass. + timers('forward-backward', log_level=1).start( + barrier=args.barrier_with_L1_time) + forward_backward_func = get_forward_backward_func() + fwd_bwd_timers = timers if args.timing_log_level > 1 else None + losses_reduced = forward_backward_func( + forward_step_func, data_iterator, model, + optimizer, fwd_bwd_timers, forward_only=skip_iter) + timers('forward-backward').stop() + + # Empty unused memory. + if args.empty_unused_memory_level >= 1: + torch.cuda.empty_cache() + + # Reduce gradients. + if skip_iter: + update_successful = False + grad_norm = None + num_zeros_in_grad = None + else: + optimizer.reduce_model_grads(args, timers) + + # Update parameters. + timers('optimizer', log_level=1).start(barrier=args.barrier_with_L1_time) + update_successful, grad_norm, num_zeros_in_grad = optimizer.step(args, timers) + timers('optimizer').stop() + + # Gather params. + if update_successful: + optimizer.gather_model_params(args, timers) + + # Update learning rate. + if update_successful: + increment = get_num_microbatches() * \ + args.micro_batch_size * \ + args.data_parallel_size + opt_param_scheduler.step(increment=increment) + skipped_iter = 0 + else: + skipped_iter = 1 + + # Empty unused memory. + if args.empty_unused_memory_level >= 2: + torch.cuda.empty_cache() + + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Average loss across microbatches. + loss_reduced = {} + for key in losses_reduced[0]: + losses_reduced_for_key = [x[key] for x in losses_reduced] + loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) + return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad + return {}, skipped_iter, grad_norm, num_zeros_in_grad + + +def training_log(loss_dict, total_loss_dict, learning_rate, iteration, + loss_scale, report_memory_flag, skipped_iter, + grad_norm, params_norm, num_zeros_in_grad): + """Log training information such as losses, timing, ....""" + args = get_args() + timers = get_timers() + writer = get_tensorboard_writer() + + # Advanced, skipped, and Nan iterations. + advanced_iters_key = 'advanced iterations' + skipped_iters_key = 'skipped iterations' + nan_iters_key = 'nan iterations' + # Advanced iterations. + if not skipped_iter: + total_loss_dict[advanced_iters_key] = total_loss_dict.get( + advanced_iters_key, 0) + 1 + else: + if advanced_iters_key not in total_loss_dict: + total_loss_dict[advanced_iters_key] = 0 + # Skipped iterations. + total_loss_dict[skipped_iters_key] = total_loss_dict.get( + skipped_iters_key, 0) + skipped_iter + # Update losses and set nan iterations + got_nan = False + for key in loss_dict: + if not skipped_iter: + total_loss_dict[key] = total_loss_dict.get( + key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] + else: + value = loss_dict[key].float().sum().item() + is_nan = value == float('inf') or \ + value == -float('inf') or \ + value != value + got_nan = got_nan or is_nan + total_loss_dict[nan_iters_key] = total_loss_dict.get( + nan_iters_key, 0) + int(got_nan) + + # Logging. + timers_to_log = [ + 'forward-backward', + 'forward-compute', + 'backward-compute', + 'batch-generator', + 'forward-recv', + 'forward-send', + 'backward-recv', + 'backward-send', + 'forward-send-forward-recv', + 'forward-send-backward-recv', + 'backward-send-forward-recv', + 'backward-send-backward-recv', + 'forward-backward-send-forward-backward-recv', + 'layernorm-grads-all-reduce', + 'embedding-grads-all-reduce', + 'grads-all-reduce', + 'grads-reduce-scatter', + 'params-all-gather', + 'optimizer-copy-to-main-grad', + 'optimizer-unscale-and-check-inf', + 'optimizer-clip-main-grad', + 'optimizer-count-zeros', + 'optimizer-inner-step', + 'optimizer-copy-main-to-model-params', + 'optimizer'] + + # Calculate batch size. + batch_size = args.micro_batch_size * args.data_parallel_size * \ + get_num_microbatches() + + total_iterations = total_loss_dict[advanced_iters_key] + \ + total_loss_dict[skipped_iters_key] + + # Tensorboard values. + # Timer requires all the ranks to call. + if args.log_timers_to_tensorboard and \ + (iteration % args.tensorboard_log_interval == 0): + timers.write(timers_to_log, writer, iteration, + normalizer=total_iterations) + if writer and (iteration % args.tensorboard_log_interval == 0): + if args.log_learning_rate_to_tensorboard: + writer.add_scalar('learning_rate', learning_rate, iteration) + writer.add_scalar('learning_rate vs samples', learning_rate, + args.consumed_train_samples) + if args.log_batch_size_to_tensorboard: + writer.add_scalar('batch_size', batch_size, iteration) + writer.add_scalar('batch_size vs samples', batch_size, + args.consumed_train_samples) + for key in loss_dict: + writer.add_scalar(key, loss_dict[key], iteration) + writer.add_scalar(key + ' vs samples', loss_dict[key], + args.consumed_train_samples) + if args.log_loss_scale_to_tensorboard: + writer.add_scalar('loss_scale', loss_scale, iteration) + writer.add_scalar('loss_scale vs samples', loss_scale, + args.consumed_train_samples) + if args.log_world_size_to_tensorboard: + writer.add_scalar('world_size', args.world_size, iteration) + writer.add_scalar('world_size vs samples', args.world_size, + args.consumed_train_samples) + if grad_norm is not None: + writer.add_scalar('grad_norm', grad_norm, iteration) + writer.add_scalar('grad_norm vs samples', grad_norm, + args.consumed_train_samples) + if num_zeros_in_grad is not None: + writer.add_scalar('num_zeros', num_zeros_in_grad, iteration) + writer.add_scalar('num_zeros vs samples', num_zeros_in_grad, + args.consumed_train_samples) + if params_norm is not None: + writer.add_scalar('params_norm', params_norm, iteration) + writer.add_scalar('params_norm vs samples', params_norm, + args.consumed_train_samples) + if args.log_memory_to_tensorboard: + mem_stats = torch.cuda.memory_stats() + writer.add_scalar( + "mem-reserved-bytes", + mem_stats["reserved_bytes.all.current"], + iteration, + ) + writer.add_scalar( + "mem-allocated-bytes", + mem_stats["allocated_bytes.all.current"], + iteration, + ) + writer.add_scalar( + "mem-allocated-count", + mem_stats["allocation.all.current"], + iteration, + ) + + if iteration % args.log_interval == 0: + elapsed_time = timers('interval-time').elapsed(barrier=True) + elapsed_time_per_iteration = elapsed_time / total_iterations + counters = get_counters() + tokens = counters.pop('tokens') # reset counter for future iterations + tokens_per_sec = tokens/(elapsed_time) + if writer: + if args.log_timers_to_tensorboard: + writer.add_scalar('iteration-time', + elapsed_time_per_iteration, iteration) + writer.add_scalar('tokens-per-sec', tokens_per_sec, iteration) + + log_string = ' iteration {:8d}/{:8d} |'.format( + iteration, args.train_iters) + log_string += ' consumed samples: {:12d} |'.format( + args.consumed_train_samples) + log_string += ' elapsed time per iteration (ms): {:.1f} |'.format( + elapsed_time_per_iteration * 1000.0) + log_string += f' rate (tokens/sec): {tokens_per_sec:.2f} |' + log_string += ' learning rate: {:.3E} |'.format(learning_rate) + log_string += ' global batch size: {:5d} |'.format(batch_size) + for key in total_loss_dict: + if key not in [advanced_iters_key, skipped_iters_key, + nan_iters_key]: + avg = total_loss_dict[key].item() / \ + float(max(1, total_loss_dict[advanced_iters_key])) + if avg > 0.0: + log_string += ' {}: {:.6E} |'.format(key, avg) + total_loss_dict[key] = torch.cuda.FloatTensor([0.0]) + log_string += ' loss scale: {:.1f} |'.format(loss_scale) + if grad_norm is not None: + log_string += ' grad norm: {:.3f} |'.format(grad_norm) + if num_zeros_in_grad is not None: + log_string += ' num zeros: {:.1f} |'.format(num_zeros_in_grad) + if params_norm is not None: + log_string += ' params norm: {:.3f} |'.format(params_norm) + log_string += ' number of skipped iterations: {:3d} |'.format( + total_loss_dict[skipped_iters_key]) + log_string += ' number of nan iterations: {:3d} |'.format( + total_loss_dict[nan_iters_key]) + total_loss_dict[advanced_iters_key] = 0 + total_loss_dict[skipped_iters_key] = 0 + total_loss_dict[nan_iters_key] = 0 + print_all_nodes(log_string) + if report_memory_flag and learning_rate > 0.: + # Report memory after optimizer state has been initialized. + report_memory('(after {} iterations)'.format(iteration)) + report_memory_flag = False + timers.log(timers_to_log, normalizer=args.log_interval) + + return report_memory_flag + + +def save_checkpoint_and_time(iteration, model, optimizer, opt_param_scheduler): + timers = get_timers() + # Extra barrier is added to make sure + # all ranks report the max time. + timers('save-checkpoint', log_level=0).start(barrier=True) + save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + timers('save-checkpoint').stop(barrier=True) + timers.log(['save-checkpoint']) + + +def _train(args, forward_step_func, + model, + optimizer, + opt_param_scheduler, + train_data_iterator, + valid_data_iterator, + process_non_loss_data_func): + """Train the model function.""" + timers = get_timers() + + # Write args to tensorboard + megatron.initialize.write_args_to_tensorboard(args) + + # Turn on training mode which enables dropout. + for model_module in model: + model_module.train() + + # Tracking loss. + total_loss_dict = {} + + # Iterations. + iteration = args.iteration + + counters = get_counters() + timers('interval-time', log_level=0).start(barrier=True) + print_datetime('before the start of training step') + report_memory_flag = True + while iteration < args.train_iters: + update_num_microbatches(args.consumed_train_samples) + args.curr_iteration = iteration + loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = \ + train_step(forward_step_func, + train_data_iterator, + model, + optimizer, + opt_param_scheduler, iteration, args) + iteration += 1 + args.consumed_train_samples += mpu.get_data_parallel_world_size() * \ + args.micro_batch_size * \ + get_num_microbatches() + + # Logging. + loss_scale = optimizer.get_loss_scale().item() + params_norm = None + if args.log_params_norm: + params_norm = calc_params_l2_norm(model) + report_memory_flag = training_log(loss_dict, total_loss_dict, + optimizer.param_groups[0]['lr'], + iteration, loss_scale, + report_memory_flag, skipped_iter, + grad_norm, params_norm, num_zeros_in_grad) + + # Autoresume + if args.adlr_autoresume and \ + (iteration % args.adlr_autoresume_interval == 0): + megatron.utils.check_adlr_autoresume_termination(iteration, model, optimizer, + opt_param_scheduler, args) + + # Evaluation + if args.eval_interval and iteration % args.eval_interval == 0 and \ + args.do_valid: + prefix = 'iteration {}'.format(iteration) + current_tokens = counters['tokens'] + evaluate_and_print_results(prefix, forward_step_func, + valid_data_iterator, model, + iteration, process_non_loss_data_func, + verbose=False, args=args) + counters['tokens'] = current_tokens + + + # if using wandb writer, flush the stats of train_step & potentially evaluate + writer = get_tensorboard_writer() + if hasattr(writer, "flush_all"): + writer.flush_all() + + # Checkpointing + saved_checkpoint = False + if args.exit_signal_handler: + signal_handler = get_signal_handler() + if any(signal_handler.signals_received()): + save_checkpoint_and_time(iteration, model, optimizer, + opt_param_scheduler) + print_datetime('exiting program after receiving SIGTERM.') + sys.exit() + + if args.save and args.save_interval and \ + iteration % args.save_interval == 0: + save_checkpoint_and_time(iteration, model, optimizer, + opt_param_scheduler) + saved_checkpoint = True + + # Exiting based on duration + if args.exit_duration_in_mins: + train_time = (time.time() - _TRAIN_START_TIME) / 60.0 + done_cuda = torch.cuda.IntTensor( + [train_time > args.exit_duration_in_mins]) + torch.distributed.all_reduce( + done_cuda, op=torch.distributed.ReduceOp.MAX) + done = done_cuda.item() + if done: + if not saved_checkpoint: + save_checkpoint_and_time(iteration, model, optimizer, + opt_param_scheduler) + print_datetime('exiting program after {} minutes'.format(train_time)) + sys.exit() + + # Exiting based on iterations + if args.exit_interval and iteration % args.exit_interval == 0: + if not saved_checkpoint: + save_checkpoint_and_time(iteration, model, optimizer, + opt_param_scheduler) + torch.distributed.barrier() + print_datetime('exiting program at iteration {}'.format(iteration)) + sys.exit() + + + return iteration + + +def evaluate(forward_step_func, + data_iterator, + model, + process_non_loss_data_func, + verbose=False): + """Evaluation.""" + args = get_args() + + # Turn on evaluation mode which disables dropout. + for model_module in model: + model_module.eval() + + total_loss_dict = {} + + with torch.no_grad(): + iteration = 0 + while iteration < args.eval_iters: + iteration += 1 + if verbose and iteration % args.log_interval == 0: + print_rank_0('Evaluating iter {}/{}'.format(iteration, + args.eval_iters)) + + forward_backward_func = get_forward_backward_func() + loss_dicts = forward_backward_func( + forward_step_func, data_iterator, model, optimizer=None, + timers=None, forward_only=True) + + # Empty unused memory + if args.empty_unused_memory_level >= 1: + torch.cuda.empty_cache() + + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Reduce across processes. + for loss_dict in loss_dicts: + for key in loss_dict: + total_loss_dict[key] = total_loss_dict.get( + key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] + + args.consumed_valid_samples += mpu.get_data_parallel_world_size() \ + * args.micro_batch_size \ + * get_num_microbatches() + collected_non_loss_data = None + if process_non_loss_data_func is not None and is_last_rank(): + collected_non_loss_data = forward_backward_func( + forward_step_func, data_iterator, model, optimizer=None, + timers=None, forward_only=True, collect_non_loss_data=True) + + # Move model back to the train mode. + for model_module in model: + model_module.train() + + for key in total_loss_dict: + total_loss_dict[key] /= args.eval_iters * get_num_microbatches() + return total_loss_dict, collected_non_loss_data + + +def evaluate_and_print_results(prefix, + forward_step_func, + data_iterator, model, + iteration, process_non_loss_data_func, + verbose=False, + args=None): + """Helper function to evaluate and dump results on screen.""" + writer = get_tensorboard_writer() + + total_loss_dict, collected_non_loss_data = evaluate( + forward_step_func, data_iterator, model, + process_non_loss_data_func, verbose) + string = ' validation loss at {} | '.format(prefix) + for key in total_loss_dict: + string += '{} value: {:.6E} | '.format(key, total_loss_dict[key].item()) + if "lm loss" in key: + ppl = math.exp(min(20, total_loss_dict[key].item())) + string += '{} PPL: {:.6E} | '.format(key, ppl) + else: + ppl = None + if writer: + writer.add_scalar('{} validation'.format(key), + total_loss_dict[key].item(), + iteration) + writer.add_scalar('{} validation vs samples'.format(key), + total_loss_dict[key].item(), + args.consumed_train_samples) + if args.log_validation_ppl_to_tensorboard and ppl is not None: + writer.add_scalar('{} validation ppl'.format(key), ppl, + iteration) + writer.add_scalar('{} validation ppl vs samples'.format(key), + ppl, args.consumed_train_samples) + + if process_non_loss_data_func is not None and writer and is_last_rank(): + process_non_loss_data_func(collected_non_loss_data, iteration, writer) + + length = len(string) + 1 + print_rank_last('-' * length) + print_rank_last(string) + print_rank_last('-' * length) + + +def cyclic_iter(iter): + while True: + for x in iter: + yield x + + +def build_train_valid_test_data_iterators(build_train_valid_test_datasets_provider: Callable, + args: argparse.Namespace, collate_fn=None): + (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None) + print_rank_0('> building train, validation, and test datasets ...') + + # Backward compatibility, assume fixed batch size. + if args.iteration > 0 and args.consumed_train_samples == 0: + assert args.train_samples is None, \ + 'only backward compatiblity support for iteration-based training' + args.consumed_train_samples = args.iteration * args.global_batch_size + if args.iteration > 0 and args.consumed_valid_samples == 0: + if args.train_samples is None: + args.consumed_valid_samples = (args.iteration // args.eval_interval) * \ + args.eval_iters * args.global_batch_size + + # Data loader only on rank 0 of each model parallel group. + if mpu.get_tensor_model_parallel_rank() == 0: + # Number of train/valid/test samples. + if args.train_samples: + train_samples = args.train_samples + else: + train_samples = args.train_iters * args.global_batch_size + eval_iters = (args.train_iters // args.eval_interval + 1) * \ + args.eval_iters + test_iters = args.eval_iters + train_val_test_num_samples = [train_samples, + eval_iters * args.global_batch_size, + test_iters * args.global_batch_size] + print_rank_0(' > datasets target sizes (minimum size):') + print_rank_0(' train: {}'.format(train_val_test_num_samples[0])) + print_rank_0(' validation: {}'.format(train_val_test_num_samples[1])) + print_rank_0(' test: {}'.format(train_val_test_num_samples[2])) + + # Build the datasets. + train_ds, valid_ds, test_ds = build_train_valid_test_datasets_provider( + train_val_test_num_samples) + + # Build dataloders. + train_dataloader = build_pretraining_data_loader( + train_ds, args.consumed_train_samples, collate_fn=collate_fn) + valid_dataloader = build_pretraining_data_loader( + valid_ds, args.consumed_valid_samples, collate_fn=collate_fn) + test_dataloader = build_pretraining_data_loader(test_ds, 0, collate_fn=collate_fn) + + # Flags to know if we need to do training/validation/testing. + do_train = train_dataloader is not None and args.train_iters > 0 + do_valid = valid_dataloader is not None and args.eval_iters > 0 + do_test = test_dataloader is not None and args.eval_iters > 0 + # Need to broadcast num_tokens and num_type_tokens. + flags = torch.cuda.LongTensor( + [int(do_train), int(do_valid), int(do_test)]) + else: + flags = torch.cuda.LongTensor([0, 0, 0]) + + # Broadcast num tokens. + torch.distributed.broadcast(flags, + mpu.get_tensor_model_parallel_src_rank(), + group=mpu.get_tensor_model_parallel_group()) + args.do_train = flags[0].item() + args.do_valid = flags[1].item() + args.do_test = flags[2].item() + + # Build iterators. + dl_type = args.dataloader_type + assert dl_type in ['single', 'cyclic'] + + if train_dataloader is not None: + train_data_iterator = iter(train_dataloader) if dl_type == 'single' \ + else iter(cyclic_iter(train_dataloader)) + else: + train_data_iterator = None + + if valid_dataloader is not None: + valid_data_iterator = iter(valid_dataloader) if dl_type == 'single' \ + else iter(cyclic_iter(valid_dataloader)) + else: + valid_data_iterator = None + + if test_dataloader is not None: + test_data_iterator = iter(test_dataloader) if dl_type == 'single' \ + else iter(cyclic_iter(test_dataloader)) + else: + test_data_iterator = None + + return train_data_iterator, valid_data_iterator, test_data_iterator diff --git a/multilinguality_megatron/megatron/utils.py b/multilinguality_megatron/megatron/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f9cd1184540092312a0a1c59ac6e07b8942c42dd --- /dev/null +++ b/multilinguality_megatron/megatron/utils.py @@ -0,0 +1,224 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""General utilities.""" + +import os +import sys + +import torch +from torch.nn.parallel import DistributedDataParallel as torchDDP + +from apex.multi_tensor_apply import multi_tensor_applier +import amp_C + +from megatron import ( + get_args, + get_adlr_autoresume, +) +from megatron.core import mpu +from megatron.core.tensor_parallel import param_is_not_tensor_parallel_duplicate +from megatron.model.module import param_is_not_shared + + +def unwrap_model(model, module_instances=(torchDDP)): + return_list = True + if not isinstance(model, list): + model = [model] + return_list = False + unwrapped_model = [] + for model_module in model: + while isinstance(model_module, module_instances): + model_module = model_module.module + unwrapped_model.append(model_module) + if not return_list: + return unwrapped_model[0] + return unwrapped_model + + +def calc_params_l2_norm(model): + """Calculate l2 norm of parameters""" + args = get_args() + if not isinstance(model, list): + model = [model] + # Remove duplicate params. + params_data = [] + for model_ in model: + for param in model_.parameters(): + is_not_shared = param_is_not_shared(param) + is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param) + if is_not_shared and is_not_tp_duplicate: + if args.bf16: + params_data.append(param.data.float()) + else: + params_data.append(param.data) + # Calculate norm + dummy_overflow_buf = torch.cuda.IntTensor([0]) + norm, _ = multi_tensor_applier( + amp_C.multi_tensor_l2norm, + dummy_overflow_buf, + [params_data], + False, # no per-parameter norm + ) + norm_2 = norm * norm + # Sum across all model-parallel GPUs. + torch.distributed.all_reduce( + norm_2, op=torch.distributed.ReduceOp.SUM, group=mpu.get_model_parallel_group() + ) + return norm_2.item() ** 0.5 + + +def average_losses_across_data_parallel_group(losses): + """Reduce a tensor of losses across all GPUs.""" + averaged_losses = torch.cat([loss.clone().detach().view(1) for loss in losses]) + torch.distributed.all_reduce(averaged_losses, group=mpu.get_data_parallel_group()) + averaged_losses = averaged_losses / torch.distributed.get_world_size( + group=mpu.get_data_parallel_group() + ) + + return averaged_losses + + +def report_memory(name): + """Simple GPU memory report.""" + mega_bytes = 1024.0 * 1024.0 + string = name + " memory (MB)" + string += " | allocated: {}".format(torch.cuda.memory_allocated() / mega_bytes) + string += " | max allocated: {}".format( + torch.cuda.max_memory_allocated() / mega_bytes + ) + string += " | reserved: {}".format(torch.cuda.memory_reserved() / mega_bytes) + string += " | max reserved: {}".format( + torch.cuda.max_memory_reserved() / mega_bytes + ) + if mpu.get_data_parallel_rank() == 0: + print("[Rank {}] {}".format(torch.distributed.get_rank(), string), flush=True) + + +def print_params_min_max_norm(optimizer, iteration): + """Print min, max, and norm of all parameters.""" + index = 0 + rank = torch.distributed.get_rank() + string = "iteration, rank, index, tensor-model-parallel, min, max, norm\n" + optimizer_ = optimizer.optimizer + for param_group in optimizer_.param_groups: + for param in param_group["params"]: + index += 1 + min_ = param.data.min() + max_ = param.data.max() + norm = torch.linalg.norm(param.data) + string += "{:7d}, {:4d}, {:4d}, {:2d}, ".format( + iteration, rank, index, int(param.tensor_model_parallel) + ) + string += "{:.6E}, {:.6E}, {:.6E}\n".format(min_, max_, norm) + print(string, flush=True) + + +def check_adlr_autoresume_termination( + iteration, model, optimizer, opt_param_scheduler, args +): + """Check for autoresume signal and exit if it is received.""" + from megatron.checkpointing import save_checkpoint + + autoresume = get_adlr_autoresume() + # Add barrier to ensure consistnecy. + torch.distributed.barrier() + if autoresume.termination_requested(): + if args.save: + save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + print_rank_0(">>> autoresume termination request found!") + if torch.distributed.get_rank() == 0: + autoresume.request_resume() + print_rank_0(">>> training terminated. Returning") + sys.exit(0) + + +def get_ltor_masks_and_position_ids( + data, eod_token, reset_position_ids, reset_attention_mask, eod_mask_loss +): + """Build masks and position id for left to right model.""" + + # Extract batch size and sequence length. + micro_batch_size, seq_length = data.size() + + # Attention mask (lower triangular). + if reset_attention_mask: + att_mask_batch = micro_batch_size + else: + att_mask_batch = 1 + attention_mask = torch.tril( + torch.ones((att_mask_batch, seq_length, seq_length), device=data.device) + ).view(att_mask_batch, 1, seq_length, seq_length) + + # Loss mask. + loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device) + if eod_mask_loss: + loss_mask[data == eod_token] = 0.0 + + # Position ids. + position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device) + position_ids = position_ids.unsqueeze(0).expand_as(data) + # We need to clone as the ids will be modifed based on batch index. + if reset_position_ids: + position_ids = position_ids.clone() + + if reset_position_ids or reset_attention_mask: + # Loop through the batches: + for b in range(micro_batch_size): + + # Find indecies where EOD token is. + eod_index = position_ids[b, data[b] == eod_token] + # Detach indecies from positions if going to modify positions. + if reset_position_ids: + eod_index = eod_index.clone() + + # Loop through EOD indecies: + prev_index = 0 + for j in range(eod_index.size()[0]): + i = eod_index[j] + # Mask attention loss. + if reset_attention_mask: + attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0 + # Reset positions. + if reset_position_ids: + position_ids[b, (i + 1) :] -= i + 1 - prev_index + prev_index = i + 1 + + # Convert attention mask to binary: + attention_mask = attention_mask < 0.5 + + return attention_mask, loss_mask, position_ids + + +def print_rank_0(message): + """If distributed is initialized, print only on rank 0.""" + if torch.distributed.is_initialized(): + if torch.distributed.get_rank() == 0: + print(message, flush=True) + else: + print(message, flush=True) + + +def is_last_rank(): + return torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1) + + +def print_rank_last(message): + """If distributed is initialized, print only on last rank.""" + if torch.distributed.is_initialized(): + if is_last_rank(): + print(message, flush=True) + else: + print(message, flush=True) + + +def is_last_local_rank(): + return get_args().local_rank == (int(os.environ["LOCAL_WORLD_SIZE"]) - 1) + + +def print_all_nodes(*args, **kwargs): + """If distributed is initialized, print on the last rank in all nodes.""" + if torch.distributed.is_initialized(): + if is_last_local_rank(): + print(*args, **kwargs) + else: + print(*args, **kwargs) diff --git a/multilinguality_megatron/megatron/wandb_logger.py b/multilinguality_megatron/megatron/wandb_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..a41563fe949332f8340384a402aa3e03603df2c2 --- /dev/null +++ b/multilinguality_megatron/megatron/wandb_logger.py @@ -0,0 +1,173 @@ +from dataclasses import dataclass,field +from typing import Optional,List,Dict,Tuple +import torch +from typing import Union +import wandb +from argparse import Namespace +from collections import defaultdict +import os +from warnings import warn + +#TODO: do we want to use the watch? https://docs.wandb.ai/ref/python/watch +@dataclass +class WandBConfig(object): + # compatibility to previous logger + # config to be logged in wandb + config:Optional[Namespace]=field(default_factory=lambda :None) + # NOTE: these are ignored by wandb + # filled from `tensorboard_log_interval`, frequency of logging + log_interval:int=field(default=1) + # filled from `tensorboard_queue_size`, ignored by wandb + queue_size:int=field(default=int(1e3)) + # if set, log locally into this dir, gets filled from tensorboard_dir + local_dir:Optional[str]=field(default=None) + + # wandb specific config + # filled from args + #filled with kwargs + entity:str=field(default="meditron") + #wandb project to log to + project:str=field(default="meditron") + # save the code to the notebook? + save_code:bool=field(default=False) + # stuff to filter by + tags:Optional[List[str]]=field(default=None) + # short descriptive name to quickly find things + name:Optional[str]=field(default=None) + # long form notes and info to store + notes:Optional[str]=field(default=None) + # TODO: discuss how we want to do this, do we want to resume logging? + resume:str=field(default="allow") + # TODO: can fill this from the environment variable `WANDB_RUN_ID` ? + # globally unique id, if passed + run_id:Optional[str]=field(default=None) + # "magic" auto instrumentation, see https://docs.wandb.ai/ref/python/init + magic:bool=field(default=False) + api_key:Optional[str]=field(default=None) + with_tensorboard:bool=field(default=True) + try_catch_guard:bool=field(default=True) + + @staticmethod + def default(project:str,run_id:str=run_id): + return WandBConfig(project=project,run_id=run_id) + + @staticmethod + def from_args(args)->'WandBConfig': + assert args.rank==args.world_size-1, f"Only supposed to launch on rank {args.rank+1}, but got {args.rank}" + # following the megatron setup for now, could also do groups instead: https://docs.wandb.ai/guides/track/log/distributed-training + return WandBConfig(local_dir=args.tensorboard_dir, + queue_size=args.tensorboard_queue_size, + log_interval=args.log_interval, + config=args,entity=args.wandb_entity, + project=args.wandb_project, + run_id=args.wandb_id, + resume=args.wandb_resume, + api_key=args.wandb_api_key, + try_catch_guard=False, + with_tensorboard=True) + +import functools +# dummy_named just because of bare * +def try_catch_guard(_func=None,*,dummy_named=None,**decorator_kwargs): + def decorator_try_catch_guard(func): + @functools.wraps(func) + def try_catch_wrapper(*args,**kwargs): + s=args[0] + if s.cfg.try_catch_guard: + try: + return func(*args,**kwargs) + except BaseException as e: + warn(f"Ignoring error {e} in WandbTBShim") + else: + return func(*args,**kwargs) + return try_catch_wrapper + if _func is None: + return decorator_try_catch_guard + else: + return decorator_try_catch_guard(_func) + + +class WandbTBShim(object): + """ + Shim class that holds the configuration we want the wandb wrapper to use + (e.g. to control sampling, delay upload etc) and that translates the API + """ + def __init__(self, config:WandBConfig): + super().__init__() + self.cfg=config + if os.environ.get("WANDB_API_KEY") is None: + if self.cfg.api_key is None: + raise ValueError("WANDB_API_KEY is not set, nor passed as an argument") + else: + os.environ["WANDB_API_KEY"]=self.cfg.api_key + wandb.init(config=config.config, + entity=config.entity, + project=config.project, + save_code=config.save_code, + tags=config.tags, + name=config.name, + notes=config.notes, + resume=config.resume, + id=config.run_id, + dir=config.local_dir + ) + self._last_step = None + self._log_accum = {} + if self.cfg.with_tensorboard: + try: + from torch.utils.tensorboard import SummaryWriter + print('> setting tensorboard ...') + self.tb_writer = SummaryWriter( + log_dir=config.local_dir, + max_queue=config.queue_size) + except ModuleNotFoundError: + print('WARNING: TensorBoard writing requested but is not ' + 'available (are you using PyTorch 1.1.0 or later?), ' + 'no TensorBoard logs will be written.', flush=True) + else: + self.tb_writer=None + + @try_catch_guard + def add_scalar(self, name: str, var: Union[float, int, torch.Tensor], step: int): + if isinstance(var, torch.Tensor): + var = var.item() + if self.tb_writer is not None: + self.tb_writer.add_scalar(name, var, global_step=step) + if " vs " in name: + # wandb does not allow logging to previous steps and the ' vs ' + # scalars are usually a lot of steps forward compared to the rest + # of the scalars (as they count per sample, not per batch) so we + # just ignore them and rely on tensorboard to log them + warn(f"Ignoring wandb log for {name}") + return + + if self._last_step is not None and step > self._last_step: + self.flush_all() + + self._last_step = step + self._log_accum[name] = var + + @try_catch_guard + def flush_all(self): + if len(self._log_accum) > 0: + wandb.log(self._log_accum, step=self._last_step, commit=True) + self._log_accum = {} + self._last_step = None + + @try_catch_guard + def add_text(self,name:str,value:str): + # we log this on the creation of the wandb object, hence no need to log it here + if self.tb_writer is not None: + self.tb_writer.add_text(name,value) + +def toy_test(writer): + for i,l in zip(range(10),range(10,20)): + r=10-i + for k in range(5): + writer.add_scalar(f"forward{k}",l,i) + writer.add_scalar(f"backward{k}",r,i) + writer.add_scalar(f"forward{k} vs forward",l,i) + writer.add_scalar(f"forward{k} vs backward",i,r) +if __name__=="__main__": + writer=WandbTBShim(WandBConfig.default("wandb-toy-test",run_id="meditron-wandb-test")) + toy_test(writer) diff --git a/multilinguality_megatron/model_sharding.sh b/multilinguality_megatron/model_sharding.sh new file mode 100644 index 0000000000000000000000000000000000000000..72e14791d3212e419f8d572e980c960e67dd2526 --- /dev/null +++ b/multilinguality_megatron/model_sharding.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +megatron_model="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/megatron_model" +sharded_model="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/shards" +tp="2" +pp="1" +repo="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron" +vocab_size="37005" + +# Parse command-line arguments +for arg in "$@" +do + case $arg in + --help) + echo "Usage: ./script.sh [OPTIONS]" + echo "Options:" + echo " --megatron_model=PATH Path to sharded megatron model" + echo " --sharded_model=PATH Path to save sharded model." + echo " --tp=NUMBER Number of shards to divide model in." + echo " --pp=NUMBER Pipeline parallel (default is 1)" + echo " --repo=PATH Path to repo" + echo " --vocab_size=NUMBER Vocab size of model without padding" + exit 0 + ;; + --megatron_model=*) + megatron_model="${arg#*=}" + shift + ;; + --sharded_model=*) + sharded_model="${arg#*=}" + shift + ;; + --tp=*) + tp="${arg#*=}" + shift + ;; + --pp=*) + pp="${arg#*=}" + shift + ;; + --repo=*) + repo="${arg#*=}" + shift + ;; + --vocab_size=*) + vocab_size="${arg#*=}" + shift + ;; + esac +done + +python $repo/tools/checkpoint_util.py \ + --target_tensor_parallel_size $tp \ + --target_pipeline_parallel_size $pp \ + --load_dir $megatron_model \ + --save_dir $sharded_model \ + --model_type llama \ + --true_vocab_size $vocab_size \ + --bf16 diff --git a/multilinguality_megatron/new_monolingual_data.py b/multilinguality_megatron/new_monolingual_data.py new file mode 100644 index 0000000000000000000000000000000000000000..dbc4d18413cd4e25339123380d19addd31a4f7a4 --- /dev/null +++ b/multilinguality_megatron/new_monolingual_data.py @@ -0,0 +1,149 @@ +import gzip +import json +import sys + +import pandas as pd +from pathlib import Path + +l = sys.argv[-1] + + +def _close_when_exhausted(file): + with file: + for line in file: + yield json.loads(line) + + +def open_read_cleaned(filename): + file: TextIO = gzip.open(filename, "rt") # type: ignore + return _close_when_exhausted(file) + + +def write_json_lines_to_gzip(filename: str, data): + try: + with gzip.open(filename, "wt") as f: + for item in data: + json_line = json.dumps(item) + f.write(json_line + "\n") + finally: + f.close() # Ensure file is closed even if an exception occurs + + +def write_json_lines(filename: str, data): + try: + with open(filename, "w") as f: + for item in data: + json_line = json.dumps(item) + f.write(json_line + "\n") + finally: + f.close() # Ensure file is closed even if an exception occurs + + +TEST_SIZE = 10000 +TRAIN_LEN = 2_000_000 # 2 million instances is likely enough, since 3.8M yields 9.6G italian tokens + +# red pajama (en, de, es, fr, it) + +root_dir = "/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered" +# l_datasets = { +# "it": { +# "train": [ +# "filtered_it_2023-06_head_documents.jsonl.gz", +# "filtered_it_2022-49_head_documents.jsonl.gz", +# "filtered_it_2022-40_head_documents.jsonl.gz", +# ], +# "test": "filtered_it_2023-14_head_documents.jsonl.gz", +# }, +# "es": { +# "train": [ +# "filtered_es_2023-06_head_documents.jsonl.gz", +# "filtered_es_2022-49_head_documents.jsonl.gz", +# ], +# "test": "filtered_es_2023-14_head_documents.jsonl.gz", +# }, +# "de": { +# "train": [ +# "filtered_de_2023-06_head_documents.jsonl.gz", +# "filtered_de_2022-49_head_documents.jsonl.gz", +# ], +# "test": "filtered_de_2023-14_head_documents.jsonl.gz", +# }, +# "fr": { +# "train": [ +# "filtered_fr_2023-06_head_documents.jsonl.gz", +# "filtered_fr_2022-49_head_documents.jsonl.gz", +# ], +# "test": "filtered_fr_2023-14_head_documents.jsonl.gz", +# }, +# "en": { +# "train": [ +# "filtered_en_2023-06_head_documents.jsonl.gz", +# ], +# "test": "filtered_en_2023-14_head_documents.jsonl.gz", +# }, +# } + +obs = [] +# train +# append = True +# for d in l_datasets[l]["train"]: +# if append: +# for o in open_read_cleaned(f"{root_dir}/{l}/{d}"): +# obs.append(o) +# print(f"Selected {len(obs)} instances...") +# if len(obs) == TRAIN_LEN: +# append = False +# break + +# print("Saving") +# write_json_lines_to_gzip(f"{root_dir}/{l}/train.jsonl.gz", obs) +# test +# obs = [] +# for o in open_read_cleaned(f'{root_dir}/{l}/{l_datasets[l]["test"]}'): +# obs.append(o) +# test = pd.DataFrame(obs) +# test = test.sample(n=TEST_SIZE, random_state=42).reset_index(drop=True) +# test.to_json( +# f"/mnt/data/jpombal/tower-results/raw_data/monolingual/red_pajama_filtered.{l}/test.jsonl", +# orient="records", +# lines=True, +# ) + +# number of words that exceeds by far the number of words for the training data; +# this way we ensure the test data does not overlap +n_words_dict = { + "nl": 933333330, + "pt": 933333330, + "ru": 600000000, + "zh": 33888888, + "ko": 350000000, +} + +corpus = open_read_cleaned( + f"/mnt/data/shared/tower_llm_data/webcorpus/{l}/0000.json.gz" +) + +n_words = 0 +rows = 0 +data = [] +for doc in corpus: + if l == "zh": + n_words += len(doc["text"]) + else: + n_words += len(doc["text"].split(" ")) + if n_words >= n_words_dict[l]: + data.append({"text": doc["text"]}) + rows += 1 + if rows == TEST_SIZE: + break + +Path(f"/mnt/data/jpombal/tower-results/raw_data/monolingual/webcorpus.{l}").mkdir( + exist_ok=True, parents=True +) + +write_json_lines( + f"/mnt/data/jpombal/tower-results/raw_data/monolingual/webcorpus.{l}/test.jsonl", + data, +) + +print("done") diff --git a/multilinguality_megatron/notebooks/data_perplexity_analysis.ipynb b/multilinguality_megatron/notebooks/data_perplexity_analysis.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..2e0b300991f8acb8fb36e21f4887cfffa4f995f0 --- /dev/null +++ b/multilinguality_megatron/notebooks/data_perplexity_analysis.ipynb @@ -0,0 +1,814 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import (\n", + " Callable,\n", + " Dict,\n", + " Iterable,\n", + " Iterator,\n", + " List,\n", + " Optional,\n", + " Sequence,\n", + " TextIO,\n", + " Tuple,\n", + " Union,\n", + ")\n", + "import gzip\n", + "import json\n", + "\n", + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def _close_when_exhausted(file: TextIO) -> Iterable[str]:\n", + " with file:\n", + " for line in file:\n", + " yield json.loads(line)\n", + "\n", + "def open_read_cleaned(filename) -> Iterable[str]:\n", + " file: TextIO = gzip.open(filename, \"rt\") # type: ignore\n", + " return _close_when_exhausted(file)\n", + "\n", + "def get_perplexities_and_text(corpus: Iterable[str], n: float = None) -> List[float]:\n", + " perplexities = []\n", + " texts = []\n", + " if n is None:\n", + " for entry in enumerate(corpus):\n", + " perplexities.append(entry['perplexity'])\n", + " texts.append(entry['text'])\n", + " else:\n", + " for i, entry in enumerate(corpus):\n", + " if i >= n:\n", + " break\n", + " perplexities.append(entry['perplexity'])\n", + " texts.append(entry['text'])\n", + "\n", + " return perplexities, texts\n", + "\n", + "def describe_perplexities(perplexities: List[float]):\n", + " print(pd.Series(perplexities).describe())\n", + "\n", + "def perplexity_histogram(perplexities: List[float]):\n", + " series = pd.Series(perplexities)\n", + " series.hist(bins=1000, weights=np.zeros_like(series) + 1. / len(series), cumulative=True)\n", + " plt.xlim(0, 5000)\n", + " plt.xticks(np.arange(0, 5000, 200), rotation=90)\n", + " plt.yticks(np.arange(0, 1.1, 0.1))\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Perplexity descriptive stats per language" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [], + "source": [ + "def perplexities_langs_stats(langs: list[str], n: int) -> None:\n", + " for lang in langs:\n", + " print(lang)\n", + " if lang == \"de\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/2/0000.json.gz\"\n", + " elif lang == \"es\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/3/0000.json.gz\"\n", + " elif lang == \"fr\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/1/0000.json.gz\"\n", + " elif lang == \"ru\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/6/0000.json.gz\"\n", + " else:\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/0000.json.gz\"\n", + " train_corpus = open_read_cleaned(corpus)\n", + " perplexities, _ = get_perplexities_and_text(train_corpus, n=n)\n", + " describe_perplexities(perplexities)\n", + " print(\"\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "de\n", + "count 100000.000000\n", + "mean 878.973901\n", + "std 960.759983\n", + "min 14.200000\n", + "25% 395.300000\n", + "50% 611.800000\n", + "75% 1019.800000\n", + "max 46451.000000\n", + "dtype: float64\n", + "es\n", + "count 100000.000000\n", + "mean 419.436802\n", + "std 492.000302\n", + "min 7.700000\n", + "25% 184.800000\n", + "50% 275.700000\n", + "75% 470.800000\n", + "max 28396.800000\n", + "dtype: float64\n", + "fr\n", + "count 100000.000000\n", + "mean 586.847523\n", + "std 959.302055\n", + "min 4.300000\n", + "25% 189.200000\n", + "50% 322.000000\n", + "75% 622.300000\n", + "max 70893.900000\n", + "dtype: float64\n", + "it\n", + "count 100000.000000\n", + "mean 522.677809\n", + "std 725.482231\n", + "min 1.800000\n", + "25% 224.100000\n", + "50% 332.900000\n", + "75% 562.500000\n", + "max 86453.800000\n", + "dtype: float64\n", + "ko\n", + "count 100000.000000\n", + "mean 193.209064\n", + "std 349.575732\n", + "min 1.500000\n", + "25% 46.100000\n", + "50% 72.800000\n", + "75% 198.000000\n", + "max 55450.000000\n", + "dtype: float64\n", + "nl\n", + "count 100000.000000\n", + "mean 920.457715\n", + "std 1077.452949\n", + "min 8.500000\n", + "25% 445.000000\n", + "50% 649.200000\n", + "75% 1016.900000\n", + "max 64355.100000\n", + "dtype: float64\n", + "pt\n", + "count 100000.000000\n", + "mean 393.057294\n", + "std 476.371983\n", + "min 4.100000\n", + "25% 169.900000\n", + "50% 257.500000\n", + "75% 441.225000\n", + "max 20750.100000\n", + "dtype: float64\n", + "ru\n", + "count 100000.000000\n", + "mean 474.163654\n", + "std 896.298660\n", + "min 6.100000\n", + "25% 228.500000\n", + "50% 334.300000\n", + "75% 532.400000\n", + "max 193807.100000\n", + "dtype: float64\n", + "zh\n", + "count 100000.000000\n", + "mean 2985.985015\n", + "std 2826.177214\n", + "min 6.800000\n", + "25% 1488.900000\n", + "50% 2041.600000\n", + "75% 3437.200000\n", + "max 121231.600000\n", + "dtype: float64\n" + ] + } + ], + "source": [ + "perplexities_langs_stats(\n", + " [\"en\", \"de\", \"es\", \"fr\", \"it\", \"ko\", \"nl\", \"pt\", \"ru\", \"zh\"],\n", + " n=100_000\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Perplexity histograms per language" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": {}, + "outputs": [], + "source": [ + "def perplexities_langs_histograms(langs: list[str], n: int) -> None:\n", + " for lang in langs:\n", + " print(lang)\n", + " if lang == \"de\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/2/0000.json.gz\"\n", + " elif lang == \"es\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/3/0000.json.gz\"\n", + " elif lang == \"fr\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/1/0000.json.gz\"\n", + " elif lang == \"ru\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/6/0000.json.gz\"\n", + " else:\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/0000.json.gz\"\n", + " train_corpus = open_read_cleaned(corpus)\n", + " perplexities, _ = get_perplexities_and_text(train_corpus, n=n)\n", + " perplexity_histogram(perplexities)\n", + " print(\"\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "de\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABDoklEQVR4nO3de1xUBfo/8M/MMMyIiiioKKGjaSnrBYMgrPXSYqRml23LosKlsivfsvluBJsKZkm3JSzZcE26s7ltrrWLQURh60KyYlqbZlkaJYG0FmPwdRiZ5/eHP6YmZnRucA7web9e86I5l895YOZ4ns5VIyICIiIiIoVolS6AiIiI+jc2I0RERKQoNiNERESkKDYjREREpCg2I0RERKQoNiNERESkKDYjREREpKggpQvwhN1uR0NDAwYPHgyNRqN0OUREROQBEcGxY8cwevRoaLXu93/0imakoaEB0dHRSpdBREREPvjqq69wxhlnuB3fK5qRwYMHAwAOHjyIYcOG+Z1ns9nw1ltv4aKLLoJer1dNFvPUlafm2pjHz5Z5PZOn5tp6Q57FYkF0dLRjO+5Or2hGOg/NDB48GKGhoX7n2Ww2hISEIDQ0NCBf1EBlMU9deWqujXn8bJnXM3lqrq035HU63SkWPIGViIiIFMVmhIiIiBTFZoSIiIgUxWaEiIiIFMVmhIiIiBTFZoSIiIgUxWaEiIiIFMVmhIiIiBTFZoSIiIgUxWaEiIiIFMVmhIiIiBTldTPy3nvvYdGiRRg9ejQ0Gg22bNly2nmqqqpwzjnnwGAwYMKECXjuued8KJWIiIj6Iq+bkdbWVkyfPh2FhYUeTX/w4EEsXLgQc+fOxe7du7Fs2TLcfPPNKC8v97pYIiIi6nu8fmrv/PnzMX/+fI+nLyoqwrhx4/CHP/wBADB58mRs374dTzzxBFJSUrxdPBEREfUxXjcj3qqpqUFycrLTsJSUFCxbtsztPFarFVar1fHeYrEAOPloY5vN5ndNnRlqy2KeuvLUXBvz1JPFvL6dp+baelPe6WhERHxdiEajwd/+9jdcfvnlbqc566yzkJ6ejuzsbMewrVu3YuHChWhra8OAAQO6zJObm4tVq1Z1GV5SUoKQkBBfyyUiIqIe1NbWhtTUVLS0tCA0NNTtdN2+Z8QX2dnZMJvNjvcWiwXR0dGYO3cuwsPD/c632WyoqKjAvHnzoNfrVZPFPHXlqbk25vGzZV7P5Km5tt6Q13lk43S6vRmJjIxEU1OT07CmpiaEhoa63CsCAAaDAQaDoctwvV4fkD9Od+SpuTbmqSeLeerKU3NtzFNXnpprU3Oepxnd3owkJSVh69atTsMqKiqQlJTU3YsmIiLqk0xZpQAAg07waAIwJbcc1g6N37mBzrNb2zyazutm5IcffsCBAwcc7w8ePIjdu3dj2LBhGDNmDLKzs3H48GG88MILAIDbbrsN69atQ2ZmJm688Ua88847+Mtf/oLS0lJvF01ERKQItW7s+wqvm5GdO3di7ty5jved53YsWbIEzz33HL755hvU19c7xo8bNw6lpaW45557sHbtWpxxxhl45plneFkvERGdVuceAG8EcoPfmUXdy+tmZM6cOTjVBTiu7q46Z84cfPDBB94uioiIVGhKbrmqDw1Q76PKq2mIiMg/P92jEPjzCvyOIHLCZoSISCW666REIrVjM0JE1A18OdeBqL9iM0JEdAqeNBXck0HkHzYjRNRvsXkgUgc2I0TU63l7SISXaxKpC5sRIlKt7rwihIjUg80IEakGT/ok6p/YjBBRj2CjQUTusBkhooAzZZXysAoReYzNCBF5hXs4iCjQ2IwQkVtsPIioJ7AZISInPKxCRD2NzQhRP/XzvR689wYRKYXNCFE/wMMtRKRmbEaI+iA2H0TUm7AZIerleI4HEfV2bEaIepnOvR48x4OI+go2I0QqxsMtRNQfaJUugIiIiPo37hkhUgnuBSGi/orNCJFC2HwQEZ3EwzRERESkKO4ZIeoBfIotEZF7Pu0ZKSwshMlkgtFoRGJiImpra91Oa7PZ8MADD+DMM8+E0WjE9OnTUVZW5nPBRL2BKavU6UVERO553Yxs2rQJZrMZOTk52LVrF6ZPn46UlBQcOXLE5fTLly/H+vXr8dRTT2Hv3r247bbbcMUVV+CDDz7wu3giIiLq/bw+TJOfn4+lS5ciPT0dAFBUVITS0lIUFxcjKyury/Qvvvgi7r//fixYsAAAcPvtt+Ptt9/GH/7wB7z00kt+lk+kPO75ICLyj1fNSHt7O+rq6pCdne0YptVqkZycjJqaGpfzWK1WGI1Gp2EDBgzA9u3b3S7HarXCarU63lssFgAnD/nYbDZvSnapM0NtWcxTV56nWQadeJRn0IrTT38xTx1ZzOvbeWqurTfkdXj476NGRDxeYkNDA6KiolBdXY2kpCTH8MzMTGzbtg07duzoMk9qair27NmDLVu24Mwzz0RlZSUuu+wydHR0ODUcP5Wbm4tVq1Z1GV5SUoKQkBBPyyUiIiIFtbW1ITU1FS0tLQgNDXU7XbdfTbN27VosXboUkyZNgkajwZlnnon09HQUFxe7nSc7Oxtms9nx3mKxIDo6GnPnzkV4eLjfNdlsNlRUVGDevHnQ6/WqyWKeuvJcZU3JLfc5z6AVrI63Y8VOLax2/6+mYZ46spjXt/PUXFtvyOuw6jyazqtmJCIiAjqdDk1NTU7Dm5qaEBkZ6XKe4cOHY8uWLTh+/Dj++9//YvTo0cjKysL48ePdLsdgMMBgMHQZrtfrA7LB6o48NdfGPP/MeOidn1yK6//KabVrAnppL/PUkcW8vp2n5trUnGf3MMOrq2mCg4MRFxeHysrKHxdkt6OystLpsI0rRqMRUVFROHHiBF577TVcdtll3iyaiIiI+iivD9OYzWYsWbIE8fHxSEhIQEFBAVpbWx1X16SlpSEqKgp5eXkAgB07duDw4cOIjY3F4cOHkZubC7vdjszMzMD+JkR++vlVMZ03KSMiou7ldTOyePFiNDc3Y+XKlWhsbERsbCzKysowcuRIAEB9fT202h93uBw/fhzLly/HF198gUGDBmHBggV48cUXERYWFrBfgoiIiHovn05gzcjIQEZGhstxVVVVTu9nz56NvXv3+rIYIiIi6gf4bBrqt3izMiIideBTe4mIiEhR3DNC/QL3ghARqRf3jBAREZGi2IwQERGRoniYhvqkKbnlAb0bIRERdR/uGSEiIiJFsRkhIiIiRfEwDfV6P71ShrdwJyLqfbhnhIiIiBTFZoSIiIgUxcM01Kvw5mVERH0P94wQERGRotiMEBERkaLYjBAREZGieM4IqRrPESEi6vu4Z4SIiIgUxWaEiIiIFMVmhIiIiBTFc0ZINXh+CBFR/8Q9I0RERKQoNiNERESkKDYjREREpCieM0KK6Dw/xKATPJoATMktB6BRtigiIlKET3tGCgsLYTKZYDQakZiYiNra2lNOX1BQgLPPPhsDBgxAdHQ07rnnHhw/ftyngomIiKhv8boZ2bRpE8xmM3JycrBr1y5Mnz4dKSkpOHLkiMvpS0pKkJWVhZycHOzbtw8bN27Epk2b8Pvf/97v4omIiKj387oZyc/Px9KlS5Geno6YmBgUFRUhJCQExcXFLqevrq7G+eefj9TUVJhMJlx00UW49tprT7s3hYiIiPoHr84ZaW9vR11dHbKzsx3DtFotkpOTUVNT43KemTNn4qWXXkJtbS0SEhLwxRdfYOvWrbjhhhvcLsdqtcJqtTreWywWAIDNZoPNZvOmZJc6M9SW1ZfzTp4T8iOD7v//1IrTT38FMk/NtTFPPVnM69t5aq6tN+R16DzL0YiIx0tsaGhAVFQUqqurkZSU5BiemZmJbdu2YceOHS7ne/LJJ/G73/0OIoITJ07gtttuw9NPP+12Obm5uVi1alWX4SUlJQgJCfG0XCIiIlJQW1sbUlNT0dLSgtDQULfTdfvVNFVVVVizZg3++Mc/IjExEQcOHMDdd9+N1atXY8WKFS7nyc7Ohtlsdry3WCyIjo7G3LlzER4e7ndNNpsNFRUVmDdvHvR6vWqy+nLez/eMdDJoBavj7VixUwur3f+raQKZp+bamMfPlnk9k6fm2npDXodV59F0XjUjERER0Ol0aGpqchre1NSEyMhIl/OsWLECN9xwA26++WYAwNSpU9Ha2opbbrkF999/P7TarqetGAwGGAyGLsP1en1ANqjdkafm2tSQZ+049ZfaatecdhpvBDJPzbUxTz1ZzOvbeWquTc15dg8zvDqBNTg4GHFxcaisrPxxQXY7KisrnQ7b/FRbW1uXhkOnO9kpeXGEiIiIiPoorw/TmM1mLFmyBPHx8UhISEBBQQFaW1uRnp4OAEhLS0NUVBTy8vIAAIsWLUJ+fj5mzJjhOEyzYsUKLFq0yNGUUN/CB94REZE3vG5GFi9ejObmZqxcuRKNjY2IjY1FWVkZRo4cCQCor6932hOyfPlyaDQaLF++HIcPH8bw4cOxaNEiPPTQQ4H7LYiIiKjX8ukE1oyMDGRkZLgcV1VV5byAoCDk5OQgJyfHl0URERFRH8cH5REREZGi2IwQERGRotiMEBERkaK6/aZn1LfxyhkiIvIX94wQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaJ4NQ15ZUpuOR5NOPkzkE+IJCKi/ot7RoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUbyahtxy9dwZg06BQoiIqE/jnhEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFO8zQg6u7itCRETU3bhnhIiIiBTlUzNSWFgIk8kEo9GIxMRE1NbWup12zpw50Gg0XV4LFy70uWgiIiLqO7xuRjZt2gSz2YycnBzs2rUL06dPR0pKCo4cOeJy+s2bN+Obb75xvP7zn/9Ap9Phqquu8rt4IiIi6v28bkby8/OxdOlSpKenIyYmBkVFRQgJCUFxcbHL6YcNG4bIyEjHq6KiAiEhIWxGiIiICICXJ7C2t7ejrq4O2dnZjmFarRbJycmoqanxKGPjxo245pprMHDgQLfTWK1WWK1Wx3uLxQIAsNlssNls3pTsUmeG2rKUzjPo5PTTaMXpp7/UnKfm2pinnizm9e08NdfWG/I6PNiuAIBGRDxeYkNDA6KiolBdXY2kpCTH8MzMTGzbtg07duw45fy1tbVITEzEjh07kJCQ4Ha63NxcrFq1qsvwkpIShISEeFouERERKaitrQ2pqaloaWlBaGio2+l69NLejRs3YurUqadsRAAgOzsbZrPZ8d5isSA6Ohpz585FeHi433XYbDZUVFRg3rx50Ov1qslSOm9Kbvlp8wxawep4O1bs1MJq1/hdn5rz1Fwb8/jZMq9n8tRcW2/I67DqPJrOq2YkIiICOp0OTU1NTsObmpoQGRl5ynlbW1vxyiuv4IEHHjjtcgwGAwwGQ5fher0+IBvo7shTc22u8lzfU8TzL57VroG1w/8vam/IU3NtzFNPFvP6dp6aa1Nznt3DDK9OYA0ODkZcXBwqKyt/XJDdjsrKSqfDNq68+uqrsFqtuP76671ZJBEREfVxXh+mMZvNWLJkCeLj45GQkICCggK0trYiPT0dAJCWloaoqCjk5eU5zbdx40ZcfvnlATnMQkRERH2H183I4sWL0dzcjJUrV6KxsRGxsbEoKyvDyJEjAQD19fXQap13uOzfvx/bt2/HW2+9FZiqiYiIqM/w6QTWjIwMZGRkuBxXVVXVZdjZZ58NLy7aISIion6Ez6YhIiIiRbEZISIiIkWxGSEiIiJF9ehNz0g5U3LLA3oNOhERUaBwzwgREREpis0IERERKYrNCBERESmKzQgREREpis0IERERKYrNCBERESmKzQgREREpis0IERERKYrNCBERESmKd2Dtg0xZpY7/NugEjyYoWAwREdFpcM8IERERKYrNCBERESmKzQgREREpis0IERERKYrNCBERESmKzQgREREpis0IERERKYrNCBERESmKzQgREREpindg7eV+erdVIiKi3sinPSOFhYUwmUwwGo1ITExEbW3tKaf//vvvceedd2LUqFEwGAw466yzsHXrVp8KJiIior7F6z0jmzZtgtlsRlFRERITE1FQUICUlBTs378fI0aM6DJ9e3s75s2bhxEjRuCvf/0roqKi8OWXXyIsLCwQ9RMREVEv53Uzkp+fj6VLlyI9PR0AUFRUhNLSUhQXFyMrK6vL9MXFxTh69Ciqq6uh1+sBACaTyb+qiYiIqM/wqhlpb29HXV0dsrOzHcO0Wi2Sk5NRU1Pjcp433ngDSUlJuPPOO/H6669j+PDhSE1NxX333QedTudyHqvVCqvV6nhvsVgAADabDTabzZuSXerMUFuWL3kGnZx6vFacfvqrP+WpuTbmqSeLeX07T8219Ya8jtNsozppRMTjJTY0NCAqKgrV1dVISkpyDM/MzMS2bduwY8eOLvNMmjQJhw4dwnXXXYc77rgDBw4cwB133IG77roLOTk5LpeTm5uLVatWdRleUlKCkJAQT8slIiIiBbW1tSE1NRUtLS0IDQ11O123X01jt9sxYsQI/OlPf4JOp0NcXBwOHz6Mxx57zG0zkp2dDbPZ7HhvsVgQHR2NuXPnIjw83O+abDYbKioqMG/ePMehIzVk+ZI3Jbf8lOMNWsHqeDtW7NTCatf4XV9/ylNzbczjZ8u8nslTc229Ia/D6voIyM951YxERERAp9OhqanJaXhTUxMiIyNdzjNq1Cjo9XqnQzKTJ09GY2Mj2tvbERwc3GUeg8EAg8HQZbherw/IBr878pSqzdrh2ZfFatd4PC3zui+LeerKU3NtzFNXnpprU3Oe3cMMry7tDQ4ORlxcHCorK39ckN2OyspKp8M2P3X++efjwIEDsNvtjmGffvopRo0a5bIRISIiov7F6/uMmM1mbNiwAc8//zz27duH22+/Ha2trY6ra9LS0pxOcL399ttx9OhR3H333fj0009RWlqKNWvW4M477wzcb0FERES9ltfnjCxevBjNzc1YuXIlGhsbERsbi7KyMowcORIAUF9fD632xx4nOjoa5eXluOeeezBt2jRERUXh7rvvxn333Re434KIiIh6LZ9OYM3IyEBGRobLcVVVVV2GJSUl4f333/dlUfQzvP07ERH1NXxQHhERESmKzQgREREpis0IERERKYrNCBERESmKzQgREREpis0IERERKYrNCBERESmKzQgREREpis0IERERKYrNCBERESmKzQgREREpis0IERERKcqnB+VRz+BD8YiIqD/gnhEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUpRPzUhhYSFMJhOMRiMSExNRW1vrdtrnnnsOGo3G6WU0Gn0umIiIiPoWr5uRTZs2wWw2IycnB7t27cL06dORkpKCI0eOuJ0nNDQU33zzjeP15Zdf+lU0ERER9R1eP7U3Pz8fS5cuRXp6OgCgqKgIpaWlKC4uRlZWlst5NBoNIiMj/au0j+t8Qq9BJ3g0AZiSWw5Ao2xRREREPcCrZqS9vR11dXXIzs52DNNqtUhOTkZNTY3b+X744QeMHTsWdrsd55xzDtasWYNf/OIXbqe3Wq2wWq2O9xaLBQBgs9lgs9m8Kdmlzgw1ZRl0cvKn1vmnv5injizmqStPzbUxT115aq6tN+R16DzL0YiIx0tsaGhAVFQUqqurkZSU5BiemZmJbdu2YceOHV3mqampwWeffYZp06ahpaUFjz/+ON577z18/PHHOOOMM1wuJzc3F6tWreoyvKSkBCEhIZ6WS0RERApqa2tDamoqWlpaEBoa6nY6rw/TeCspKcmpcZk5cyYmT56M9evXY/Xq1S7nyc7Ohtlsdry3WCyIjo7G3LlzER4e7ndNNpsNFRUVmDdvHvR6vSqyTh6WOdmNro63Y8VOLax2/w/TME8dWcxTV56aa2OeuvLUXFtvyOuw6jyazqtmJCIiAjqdDk1NTU7Dm5qaPD4nRK/XY8aMGThw4IDbaQwGAwwGg8t5/W0euivP3yxrh/OHbrVrugzzB/PUkcU8deWpuTbmqStPzbWpOc/uYYZXV9MEBwcjLi4OlZWVPy7IbkdlZaXT3o9T6ejowEcffYRRo0Z5s2giIiLqo7w+TGM2m7FkyRLEx8cjISEBBQUFaG1tdVxdk5aWhqioKOTl5QEAHnjgAZx33nmYMGECvv/+ezz22GP48ssvcfPNNwf2NyEiIqJeyetmZPHixWhubsbKlSvR2NiI2NhYlJWVYeTIkQCA+vp6aLU/7nD57rvvsHTpUjQ2NmLo0KGIi4tDdXU1YmJiAvdbEBERUa/l0wmsGRkZyMjIcDmuqqrK6f0TTzyBJ554wpfFEBERUT/AZ9MQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGiuv3ZNOSaKatU6RKIiIhUgXtGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFBShfQH5iySpUugYiISLV82jNSWFgIk8kEo9GIxMRE1NbWejTfK6+8Ao1Gg8svv9yXxRIREVEf5HUzsmnTJpjNZuTk5GDXrl2YPn06UlJScOTIkVPOd+jQIfzud7/DL3/5S5+LJSIior7H62YkPz8fS5cuRXp6OmJiYlBUVISQkBAUFxe7naejowPXXXcdVq1ahfHjx/tVMBEREfUtXp0z0t7ejrq6OmRnZzuGabVaJCcno6amxu18DzzwAEaMGIGbbroJ//znP0+7HKvVCqvV6nhvsVgAADabDTabzZuSXerM6Kksg048zjNoxemnv5injizmqStPzbUxT115aq6tN+R1eLj904iIx0tsaGhAVFQUqqurkZSU5BiemZmJbdu2YceOHV3m2b59O6655hrs3r0bERER+O1vf4vvv/8eW7Zscbuc3NxcrFq1qsvwkpIShISEeFouERERKaitrQ2pqaloaWlBaGio2+m69WqaY8eO4YYbbsCGDRsQERHh8XzZ2dkwm82O9xaLBdHR0Zg7dy7Cw8P9rstms6GiogLz5s2DXq/v9qwpueUe5xm0gtXxdqzYqYXVrvGrNub5l6fm2pjHz5Z5PZOn5tp6Q16HVefRdF41IxEREdDpdGhqanIa3tTUhMjIyC7Tf/755zh06BAWLVrkGGa3208uOCgI+/fvx5lnntllPoPBAIPB0GW4Xq/3u3norrxTZVk7vP9ArXaNT/MxL/B5aq6NeerJYl7fzlNzbWrOs3uY4dUJrMHBwYiLi0NlZeWPC7LbUVlZ6XTYptOkSZPw0UcfYffu3Y7XpZdeirlz52L37t2Ijo72ZvFERETUB3l9mMZsNmPJkiWIj49HQkICCgoK0NraivT0dABAWloaoqKikJeXB6PRiClTpjjNHxYWBgBdhhMREVH/5HUzsnjxYjQ3N2PlypVobGxEbGwsysrKMHLkSABAfX09tFreZZ6IiIg849MJrBkZGcjIyHA5rqqq6pTzPvfcc74skoiIiPoo7sIgIiIiRbEZISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkWxGSEiIiJFsRkhIiIiRXXrg/L6K1NWqdIlEBER9RrcM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESK8qkZKSwshMlkgtFoRGJiImpra91Ou3nzZsTHxyMsLAwDBw5EbGwsXnzxRZ8LJiIior7F62Zk06ZNMJvNyMnJwa5duzB9+nSkpKTgyJEjLqcfNmwY7r//ftTU1ODDDz9Eeno60tPTUV5e7nfxRERE1Pt53Yzk5+dj6dKlSE9PR0xMDIqKihASEoLi4mKX08+ZMwdXXHEFJk+ejDPPPBN33303pk2bhu3bt/tdPBEREfV+Qd5M3N7ejrq6OmRnZzuGabVaJCcno6am5rTziwjeeecd7N+/H4888ojb6axWK6xWq+O9xWIBANhsNthsNm9Kdqkzo7uyDDrxOc+gFaef/mKeOrKYp648NdfGPHXlqbm23pDX4eH2UCMiHi+xoaEBUVFRqK6uRlJSkmN4ZmYmtm3bhh07dricr6WlBVFRUbBardDpdPjjH/+IG2+80e1ycnNzsWrVqi7DS0pKEBIS4mm5REREpKC2tjakpqaipaUFoaGhbqfzas+IrwYPHozdu3fjhx9+QGVlJcxmM8aPH485c+a4nD47Oxtms9nx3mKxIDo6GnPnzkV4eLjf9dhsNlRUVGDevHnQ6/V+ZcU9UIbV8Xas2KmF1a7xuzaDVpinkjw118Y8frbM65k8NdfWG/I6rDqPpvOqGYmIiIBOp0NTU5PT8KamJkRGRrqdT6vVYsKECQCA2NhY7Nu3D3l5eW6bEYPBAIPB0GW4Xq/3u3kIdF7nh2W1a2Dt8P+D+2ku89SRp+bamKeeLOb17Tw116bmPLuHGV6dwBocHIy4uDhUVlb+uCC7HZWVlU6HbU5bnN3udE4IERER9V9eH6Yxm81YsmQJ4uPjkZCQgIKCArS2tiI9PR0AkJaWhqioKOTl5QEA8vLyEB8fjzPPPBNWqxVbt27Fiy++iKeffjqwvwkRERH1Sl43I4sXL0ZzczNWrlyJxsZGxMbGoqysDCNHjgQA1NfXQ6v9cYdLa2sr7rjjDnz99dcYMGAAJk2ahJdeegmLFy8O3G9BREREvZZPJ7BmZGQgIyPD5biqqiqn9w8++CAefPBBXxZDRERE/QCfTUNERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKClK6gN7ElFXaZZhBp0AhREREfQj3jBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRonxqRgoLC2EymWA0GpGYmIja2lq3027YsAG//OUvMXToUAwdOhTJycmnnJ6IiIj6F6+bkU2bNsFsNiMnJwe7du3C9OnTkZKSgiNHjricvqqqCtdeey3effdd1NTUIDo6GhdddBEOHz7sd/FERETU+3ndjOTn52Pp0qVIT09HTEwMioqKEBISguLiYpfTv/zyy7jjjjsQGxuLSZMm4ZlnnoHdbkdlZaXfxRMREVHv59Wzadrb21FXV4fs7GzHMK1Wi+TkZNTU1HiU0dbWBpvNhmHDhrmdxmq1wmq1Ot5bLBYAgM1mg81m86ZklzozvM0y6KTrMK04/fQX89STp+bamKeeLOb17Tw119Yb8jpcbDdd0YiIx0tsaGhAVFQUqqurkZSU5BiemZmJbdu2YceOHafNuOOOO1BeXo6PP/4YRqPR5TS5ublYtWpVl+ElJSUICQnxtFwiIiJSUFtbG1JTU9HS0oLQ0FC30/XoU3sffvhhvPLKK6iqqnLbiABAdnY2zGaz473FYkF0dDTmzp2L8PBwv+uw2WyoqKjAvHnzoNfrPZ5vSm55l2EGrWB1vB0rdmphtWv8ro156slTc23M42fLvJ7JU3NtvSGvw+rZo+29akYiIiKg0+nQ1NTkNLypqQmRkZGnnPfxxx/Hww8/jLfffhvTpk075bQGgwEGg6HLcL1e71XzcDre5lk73H8wVrvmlOO9xTz15Km5NuapJ4t5fTtPzbWpOc/uYYZXJ7AGBwcjLi7O6eTTzpNRf3rY5uceffRRrF69GmVlZYiPj/dmkURERNTHeX2Yxmw2Y8mSJYiPj0dCQgIKCgrQ2tqK9PR0AEBaWhqioqKQl5cHAHjkkUewcuVKlJSUwGQyobGxEQAwaNAgDBo0KIC/ChEREfVGXjcjixcvRnNzM1auXInGxkbExsairKwMI0eOBADU19dDq/1xh8vTTz+N9vZ2/OY3v3HKycnJQW5urn/VExERUa/n0wmsGRkZyMjIcDmuqqrK6f2hQ4d8WQQRERH1E3w2DRERESmKzQgREREpis0IERERKapHb3rW25iySpUugYiIqM/jnhEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUpRPzUhhYSFMJhOMRiMSExNRW1vrdtqPP/4YV155JUwmEzQaDQoKCnytlYiIiPqgIG9n2LRpE8xmM4qKipCYmIiCggKkpKRg//79GDFiRJfp29raMH78eFx11VW45557AlJ0dzBllSpdAhERUb/k9Z6R/Px8LF26FOnp6YiJiUFRURFCQkJQXFzscvpzzz0Xjz32GK655hoYDAa/CyYiIqK+xas9I+3t7airq0N2drZjmFarRXJyMmpqagJWlNVqhdVqdby3WCwAAJvNBpvN5nd+Z8ZPsww68SnLoBWnn/5innry1Fwb89STxby+nafm2npDXoeH21aNiHi8xIaGBkRFRaG6uhpJSUmO4ZmZmdi2bRt27NhxyvlNJhOWLVuGZcuWnXK63NxcrFq1qsvwkpIShISEeFouERERKaitrQ2pqaloaWlBaGio2+m8PmekJ2RnZ8NsNjveWywWREdHY+7cuQgPD/c732azoaKiAvPmzYNerwcATMkt9ynLoBWsjrdjxU4trHaN37UxTz15aq6NefxsmdczeWqurTfkdVh1Hk3nVTMSEREBnU6HpqYmp+FNTU2IjIz0JuqUDAaDy/NL9Hq9o3kIhJ/mWTv8+6Nb7Rq/M5inzjw118Y89WQxr2/nqbk2NefZPczw6gTW4OBgxMXFobKy8scF2e2orKx0OmxDRERE5CmvD9OYzWYsWbIE8fHxSEhIQEFBAVpbW5Geng4ASEtLQ1RUFPLy8gCcPOl17969jv8+fPgwdu/ejUGDBmHChAkB/FWIiIioN/K6GVm8eDGam5uxcuVKNDY2IjY2FmVlZRg5ciQAoL6+HlrtjztcGhoaMGPGDMf7xx9/HI8//jhmz56Nqqoq/38DIiIi6tV8OoE1IyMDGRkZLsf9vMEwmUzw4oIdIiIi6mf4bBoiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUpQqn9rbU6bklgf0wUJERETkPe4ZISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkX1i9vBm7JKnd4bdIJHExQqhoiIiJxwzwgREREpis0IERERKYrNCBERESmKzQgREREpyqdmpLCwECaTCUajEYmJiaitrT3l9K+++iomTZoEo9GIqVOnYuvWrT4VS0RERH2P11fTbNq0CWazGUVFRUhMTERBQQFSUlKwf/9+jBgxosv01dXVuPbaa5GXl4dLLrkEJSUluPzyy7Fr1y5MmTIlIL/ET/38yhkiIiJSN6/3jOTn52Pp0qVIT09HTEwMioqKEBISguLiYpfTr127FhdffDHuvfdeTJ48GatXr8Y555yDdevW+V08ERER9X5e7Rlpb29HXV0dsrOzHcO0Wi2Sk5NRU1Pjcp6amhqYzWanYSkpKdiyZYvb5VitVlitVsf7lpYWAMDRo0e7TJuYV+n03pNfKMguaGuzI8imRYdd48EcPZPFPHXlqbk25vGzZV7P5Km5tt6QZz/RBgAQkVNPKF44fPiwAJDq6mqn4ffee68kJCS4nEev10tJSYnTsMLCQhkxYoTb5eTk5AgAvvjiiy+++OKrD7y++uqrU/YXqrwDa3Z2ttPelO+//x5jx45FfX09hgwZ4ne+xWJBdHQ0vvrqK4SGhqomi3nqylNzbczjZ8u8nslTc229IU9EcOzYMYwePfqU03nVjERERECn06GpqclpeFNTEyIjI13OExkZ6dX0AGAwGGAwGLoMHzJkSED+OJ1CQ0MDlhfILOapK0/NtTFPPVnM69t5aq5N7Xme7ETw6gTW4OBgxMXFobLyx/M07HY7KisrkZSU5HKepKQkp+kBoKKiwu30RERE1L94fZjGbDZjyZIliI+PR0JCAgoKCtDa2or09HQAQFpaGqKiopCXlwcAuPvuuzF79mz84Q9/wMKFC/HKK69g586d+NOf/hTY34SIiIh6Ja+bkcWLF6O5uRkrV65EY2MjYmNjUVZWhpEjRwIA6uvrodX+uMNl5syZKCkpwfLly/H73/8eEydOxJYtW7y6x4jBYEBOTo7LQze+CGSemmtjnnqymKeuPDXXxjx15am5tt6Q5ymNyOmutyEiIiLqPnw2DRERESmKzQgREREpis0IERERKYrNCBERESmKzQgREREpSpW3g//2229RXFyMmpoaNDY2Ajh5J9eZM2fit7/9LYYPH65whUTK6nyQZKAuv1NzXktLi9O/A/48EiKQWaQ+av4eBzov0N9lpdcN1e0Z+fe//42zzjoLTz75JIYMGYJZs2Zh1qxZGDJkCJ588klMmjQJO3fu9Dr3xIkT2LNnD8rLy1FeXo49e/bAZrP5VGMgswCgsbERr7/+OtavX4/169fj9ddfd3wp1JAHnPyi7t+/H/v373c8Rbkv56lRRUUFFixYgKFDhyIkJAQhISEYOnQoFixYgLfffrvP5T3zzDOIiYnBsGHDEBMT4/TfGzduVCzr537+lHF/qTlPjeut2r/Hal4vuiPPZ548rbcnJSYmyi233CJ2u73LOLvdLrfccoucd955Hud1dHTI/fffL2FhYaLRaJxeYWFhsnz5cuno6OjxLBGRH374Qa677jrR6XQSFBQkI0aMkBEjRkhQUJDodDq5/vrrpbW1VbE8EZENGzbI5MmTRavVOr0mT54szzzzjFdZvSFPRMRms8nu3bulrKxMysrKZPfu3dLe3u5TVqDynnvuOQkKCpJrrrlGnn32Wdm6dats3bpVnn32Wbn22mtFr9fLCy+80GfyHn30UQkJCZGsrCx59913Ze/evbJ371559913JTs7WwYOHCiPPfZYj2d1euutt2T+/PkSFhbm+M6FhYXJ/PnzpaKiwqus3pCn1vVW7d9jNa8X3ZHnD9U1I0ajUfbt2+d2/L59+8RoNHqcd++998rw4cOlqKhIDh48KG1tbdLW1iYHDx6U9evXy4gRIyQzM7PHs0REbrrpJpk4caKUlZXJiRMnHMNPnDgh5eXlctZZZ8nNN9+sWJ7av/iBzgt0sxnIvIkTJ8q6devcji8sLJQJEyZ4XJva88aMGSObNm1yO/6VV16R6OjoHs8SUf8Gqz9tANX+PVbzetEdef5QXTNiMpnk+eefdzv++eefl7Fjx3qcN3LkSCkrK3M7vqysTEaMGNHjWSIiYWFh8q9//cvt+O3bt0tYWJhieWr/4gc6L9DNZiDzDAaDfPLJJ27Hf/LJJ1416WrPMxqNsnfvXrfjP/74YxkwYECPZ4mof4PVnzaAav8eq3m96I48f6iuGVm3bp0YDAa566675PXXX5f3339f3n//fXn99dflrrvukgEDBkhhYaHHeSEhIfLhhx+6Hb9nzx4ZOHBgj2eJiISGhsq///1vt+Nra2slNDRUsTy1f/EDnRfoZjOQeeecc47ce++9bsdnZmbKOeec43Ftas/75S9/KWlpaWKz2bqMO3HihKSlpcmsWbN6PEtE/Rus/rQBVPv3WM3rRXfk+UOVz6bZtGkTnnjiCdTV1aGjowMAoNPpEBcXB7PZjKuvvtrjrIULF+LEiRN4+eWXERER4TTu22+/xQ033ACdTod//OMfPZoFANdddx327duHjRs3YsaMGU7jPvjgAyxduhSTJk3CSy+9pEjerFmzMG7cOGzcuBFBQc4XXnV0dODGG2/EoUOHsG3btj6RN3DgQLz//vuYOnWqy/EffvghZs6ciR9++KHH86qqqnDJJZdg/PjxSE5OdjyYsqmpCZWVlfjiiy9QWlqKWbNmeVSb2vM+/PBDpKSkwGazYdasWU557733HoKDg/HWW2959MDNQGYBQFxcHH71q1/h0UcfdTn+vvvuw9tvv426uro+kafm9Vbt32M1rxfdkecPVTYjnWw2G7799lsAQEREBPR6vdcZX331FRYsWIBPPvkEU6dOdfpjf/TRR4iJicE//vEPREdH92gWAHz33XdITU1FeXk5hg4dihEjRgAAjhw5gu+//x4pKSkoKSlBWFiYInlq/+IHOi/QzWag8w4dOoSnn34a77//vtMleElJSbjttttgMpk8yukteceOHcNLL73kMi81NRWhoaGKZKl9g9XfNoBq/x6reb3ojjxfqboZCRS73Y7y8nKXf+yLLroIWq3nVzgHMqvTvn37XOZNmjTJ66xA56n9ix/IvEA3m4HOI/VQ+waLG0DqbfpFM0LkqUA3m4HOO3HiBD7++GNH1qhRozB58mSf9hr2hrzGxkbs2LHDKS8hIQGRkZGKZpG6qP17rOb1ojvyfNFvmpHa2lqXd3Q999xzFc1qb2/Hli1bXOZddtllCA4OVjQPUP8XXw0rUnez2+1YuXIlCgsLu9wcasiQIcjIyMCqVas8bm7Untfa2opbb70Vr7zyCjQaDYYNGwYAOHr0KEQE1157LdavX4+QkJAezfoptW+w+sMGUO3fYzWvF92R55ceOU1WQU1NTXLBBReIRqORsWPHSkJCgiQkJMjYsWNFo9HIBRdcIE1NTT2eJSLy2Wefyfjx48VoNMrs2bPl6quvlquvvlpmz54tRqNRJkyYIJ999plieWq/KVt33ORNRGTHjh1SUFAgWVlZkpWVJQUFBVJbW+t1TiDz1HzZcXfkBfKeOYG+/46a70fTHXlqXm/V/j1W83rRHXn+6PPNyJVXXilJSUkuL3X75JNPZObMmfKb3/ymx7NERJKTk+Wyyy6TlpaWLuNaWlrksssuk4suukixPLV/8QOdF+hmM5B5ar7suDvyAnnPnEDff0ftG6z+tAFU+/dYzetFd+T5o883I4MGDZJdu3a5Hb9z504ZNGhQj2eJiAwYMEA++ugjt+M//PBDr67fD3Se2r/4gc4LdLMZyLxA3+NG7XmBvGdOoO+/o/YNVn/aAKr9e6zm9aI78vyhugflBZrBYIDFYnE7/tixYx4/QTGQWQAQFhaGQ4cOuR1/6NAhjy/D7Y48u91+ynNMgoODYbfb+0xeeXk5CgsLcfbZZ3cZd/bZZ+PJJ59EWVmZInlz5szB7373O8el7j/17bff4r777sOcOXM8rk3teZdccgluueUWfPDBB13GffDBB7j99tuxaNGiHs8CTq7no0ePdjt+1KhRaG1t7TN5al5v1f49VvN60R15fumRlkdBd9xxh4wdO1Y2b97sdPiipaVFNm/eLCaTSTIyMno8S0RkxYoVMnToUMnPz5c9e/ZIY2OjNDY2yp49eyQ/P1+GDRsmOTk5iuWlpqbKjBkzXO4N2rVrl8TFxcl1113XZ/LCw8OlqqrK7fh3331XwsPDFcmrr6+XKVOmSFBQkMyYMUMuvvhiufjii2XGjBkSFBQk06ZNk/r6eo9rU3ve0aNH5eKLLxaNRiPDhg2TSZMmyaRJk2TYsGGi1Wpl/vz58t133/V4lojIggUL5KKLLpLm5uYu45qbm+Xiiy+WhQsX9pk8Na+3av8eq3m96I48f/T5q2msViuWLVuG4uJinDhxwtGRt7e3IygoCDfddBOeeOIJj/ZouMuyWq3Q6/VeZXV65JFHsHbtWjQ2NkKj0QAARASRkZFYtmwZMjMzvfp9A5mn9puyBTrvzjvvRGlpKZ544gn86le/ctzrwGKxoLKyEmazGZdccgmeeuopRfLUftlxd9yD55NPPulyZZiv98wJ1P131H4/mv52c0a1f4/Vvl4Agb/XlS/6fDPSyWKxoK6uzumPHRcX59PNdSwWC3bu3ImmpiYAwMiRIxEfH+/XjXoOHjzoVNu4ceN8zgp0nppvygYEbsUMZOPaHXmkHmrfYHEDSL1Nv2lGulNwcDD27NmDyZMnK10KBUAgG9dA5wXyHjfAyY2Wqw2T3W7H119/jTFjxviU2+nCCy/Es88+i7Fjx3o1n9VqhVarddwT4/PPP0dxcTHq6+sxduxY3HTTTV412Hv27EFdXR3mzJmD8ePH4+OPP0ZhYSHsdjuuuOIKpKSkeFUfqQvXC9/WC0A960a/aEb+7//+D3V1dRg2bBhiYmKcxh0/fhx/+ctfkJaWdtocs9nscvjatWtx/fXXIzw8HACQn5/vUV27du3C0KFDHV+eF198EUVFRY4vVkZGBq655hqPsjqtW7cOtbW1WLBgAa655hq8+OKLyMvLg91ux69//Ws88MADXR5OdSrdcRO1r7/+GmFhYRg0aJDTcJvNhpqaGo+fmeHO+PHjUV5ejokTJ/qVoyZHjhzBlVdeiX/9618YM2aM0673+vp6nH/++Xjttdccu7xPx2Kx4Oabb8bf//53hIaG4tZbb0VOTg50Op0jd/To0Y4HVZ7OG2+84XL4r3/9a6xdu9ZxSODSSy/1KG/OnDnIyMjAb37zG/zrX//Cr371K5x99tmYPHkyPv30U+zfvx9vv/02kpKSTpu1efNmXH311QgLC4PVasXf/vY3XHXVVYiPj4dOp8Pbb7+NF154AampqR7V1okbQOU3gFwvfF8vgO5bN3zSI2emKGj//v2O+zpotVqZNWuWHD582DG+sbFRtFqtR1kajUZiY2Nlzpw5Ti+NRiPnnnuuzJkzR+bOnetxbdOmTZOKigoREdmwYYMMGDBA7rrrLnn66adl2bJlMmjQINm4caPHeatXr5bBgwfLlVdeKZGRkfLwww9LeHi4PPjgg7JmzRoZPny4rFy50uO8QN9EraGhQc4991zRarWi0+nkhhtukGPHjjnGe/NZiIisXbvW5Uun00l2drbjvae++uorp5P+3nvvPUlNTZULLrhArrvuOqmurvY4q9Pf//53WbFihWzfvl1ERCorK2X+/PmSkpIi69ev9zgn0Jcd33XXXXLWWWfJq6++Khs2bJCxY8fKwoULxWq1isjJz0Kj0Xic17l+/fwmWz99efPZhoaGyqeffioiIrNnz5Z77rnHafzy5cvl/PPP9yjrnHPOkQcffFBERP785z9LWFiYPPDAA47xjz/+uMTGxnpcW6DvR9PS0iJXXXWVGI1GGTFihKxYscLp/hverhevv/66y5dOp5N169Y53ntq9uzZ8uqrr4rIyctuDQaDTJs2TRYvXiwzZsyQkJAQr9aN1157TXQ6nYSHh8ugQYOkoqJCwsLCJDk5WVJSUkSn08nLL7/sURbXC9/XC5HArxv+6PPNyOWXXy4LFy6U5uZm+eyzz2ThwoUybtw4+fLLL0XEuxU9Ly9Pxo0bJ5WVlU7Dg4KC5OOPP/a6tgEDBsihQ4dERGTGjBnypz/9yWn8yy+/LDExMR7nnXnmmfLaa6+JiMju3btFp9PJSy+95Bi/efNmmTBhgsd5gb6JWlpamiQmJsq///1vqaiokLi4OImPj5ejR4+KiG8r+hlnnCEmk8nppdFoJCoqSkwmk4wbN87jvISEBPn73/8uIiJbtmwRrVYrl156qdx3331yxRVXiF6vd4z3RFFRkQQFBUlcXJyEhobKiy++KIMHD5abb75Zbr31VhkwYIAUFBR4lBXoe9yMGTNG3n33Xcf75uZmSUhIkIsuukiOHz/u9Qaw8wqNn2+EfV03Bg4cKPv27RORk/fN2L17t9P4AwcOePz7Dhw4UA4ePCgiIna7XfR6vdO9Hz7//HOv/nbcAKpnA8j1wvf1ojMvkOuGP/p8MzJixAinP67dbpfbbrtNxowZI59//rnXX67a2lo566yz5H//93+lvb1dRHz/YoWHh8vOnTsddbr6Ynl707POJktERK/Xy3/+8x/H+0OHDklISIhXeYG8idro0aNlx44djvfHjx+XRYsWSWxsrPz3v//1+rO49dZbJTY2Vvbu3es03J8V/YsvvhARkcTERHn44Yedxj/11FMyY8YMj/NiYmIcDeY777wjRqNRCgsLHeOfffZZmTx5skdZgb7seMCAAY7ftZPFYpGkpCS58MIL5YsvvvDqsxARyc/Pl+joaKeGzdfP4sILL5RHH31URERmzpwpzz//vNP4v/71rzJmzBiPsiIjIx3r2dGjR0Wj0ThtcGprayUyMtLj2rgBVM8GkOuF7+uFSODXDX/0+WZk8ODBXTZWIiJ33nmnnHHGGfLee+95/eU6duyYpKWlybRp0+Sjjz4SvV7v0xfr+uuvl5tuuklERK666ipZvny50/g1a9bI1KlTPc4bN26cvPnmmyIi8umnn4pWq5W//OUvjvGlpaViMpk8zhs1atQp9wS88cYbMmrUKI/zBg4c6Pg/rE42m00uv/xymTZtmnz44YdefxabN2+W6OhoeeqppxzDfF3RhwwZInv27BGRk81h5393OnDggNfN3M+bw582dwcPHvQ4L9D3uDn77LOltLS0y/Bjx45JUlKSTJ8+3evPQkTkgw8+kJiYGLnllluktbXV58+iurpahgwZIjk5OfLUU09JRESELF++XF5++WVZuXKlhIWFySOPPOJR1vXXXy+JiYny0ksvyaJFiyQlJUXOO+882bdvn3zyyScye/Zsr/ZkcAOong0g1wvf1wuRwK8b/ujzzci5554rL7zwgstxd955p4SFhfn05RI5uYtx5MiRotVqffpiHT58WEwmk8yaNUvMZrMMGDBALrjgAlm6dKnMmjVLgoODXa4Y7ixfvlyGDx8uN998s4wbN06ysrJkzJgx8vTTT0tRUZFER0d32cV6KoG+idrUqVPlr3/9a5fhnQ3JmDFjfPosvv76a7nwwgvl4osvlm+++cbnFf3SSy+VrKwsERFJSUnpcr7Jhg0bZOLEiR7ndTa7Iic/a41G4/R5VlVVyRlnnOFR1vHjx+W2226T4OBg0Wq1YjQaxWg0ilarleDgYLn99tvl+PHjHtf2P//zP27/kbFYLJKYmOjzetHW1ia33nqrTJw4UXQ6nU+fhcjJf3jPO++8LocYoqKiPD68JXLyMMe8efNk0KBBkpKSIt9//71kZGQ4DldMnDhRDhw44HEeN4Dq2QC6Wy80Gg3XCw8Eet3wR59vRtasWSPz5893O/7222/36njsz3311VeyZcsW+eGHH3ya/7vvvpP77rtPYmJixGg0SnBwsIwdO1ZSU1NP+cwAVzo6OuShhx6SSy65RNasWSN2u13+/Oc/S3R0tISHh8tvf/tbr+t8+OGHZdSoUY4vZ+ex6FGjRnn1D5CISGZmpttzTGw2m1x66aU+fxZ2u13WrFkjkZGRPq/oe/fulfDwcElLS5PVq1fLoEGD5Prrr5eHHnpI0tLSxGAwyLPPPutx3p133ikTJ06UBx98UBISEmTJkiUyadIkefPNN6WsrEymTp0qN954o1c1trS0yDvvvCMlJSVSUlIi77zzjstzek7n6NGjTofwfs5isZzy//498frrr8uyZcu8OpnTlSNHjsj7778v1dXVjt37gfD555/LRx99JDabzav52Bh23wZQo9H4tAFsaWmRyspKx3pRWVkZ0PXCbreLSODWi7vuuiug68XP96z5y9d1wx/94tJe8l8gbqJ24sQJtLW1ub2/xokTJ3D48GGvLz38qbq6Omzfvh1paWkYOnSo1/N//vnnWL58OUpLS/HDDz8AAIKCgnDuuefi3nvvxeWXX+5xVmtrK+655x7U1NRg5syZeOqpp/Dkk0/i/vvvh81mw+zZs7Fp0yaPLzskdQnU/WO+++47NDQ04Be/+IXL8ceOHcOuXbswe/Zsn2t944038O677yI7O9uv71tzczO++OIL2O12jBo1CiaTyeesn/viiy/Q1taGSZMmeXX7AVcCfe8n5nU/NiPks6+++go5OTkoLi7uc3kigiNHjsButyMiIsJxj4VAOH78OGw2GwYPHuzVfIG6X05/zAt0bZ13D+28Y+gnn3yCtWvXwmq14vrrr8eFF17ocVZP5RUUFKC9vd2vvJkzZ+Lss88OWH3+5gX63k/M8y/PLz22D4b6nN27d/u8+7g35tXX10t6eroieYG8X467vIaGhj6ZF+ja3nzzTQkODpZhw4aJ0WiUN998U4YPHy7Jycly4YUXik6n63L5P/O6Jy/Q935inn95/mAzQm65u3lS5+uJJ54IyM2Y1JJ3Oko2S4G8X05/ywt0bUlJSXL//feLyMmT2IcOHSq///3vHeOzsrJk3rx5zOuBvEDf+4l5/uX5g80IuRXomyepPU/NzVKg75fTn/ICXVtoaKjjzsMdHR0SFBTkdN+Rjz76SEaOHMm8HsoL5L2fmOd/nq/YjJBbo0ePli1btrgd/8EHH3j1j7ja89TcLAX6fjn9KS/QtYWGhjpd7TFo0CD5/PPPHe8PHTokRqOReT2UJxK4ez8xLzB5vvD+OdLUb8TFxaGurs7teI1GA/Hi/Ge1540aNQqbN2+G3W53+dq1a5fHWYHOmzRpEnbu3Nll+Lp163DZZZd5/KCt/pgX6NpMJhM+++wzx/uamhqnh9jV19dj1KhRzOuhPAAYNGgQnn/+eWRnZyM5OdnjB9kxr3vyfMFmhNy69957MXPmTLfjJ0yYgHfffbfP5Km5Wbriiivw5z//2eW4devW4dprr/Wqtv6UF+jabr/9dqd/rKdMmeJ0Keqbb77p1dUlzPMv76euueYa7Ny5E5s3b/brFgHMC0yeN3hpL9H/989//hOtra24+OKLXY5vbW3Fzp07Pb7fQ6DziIj6KjYjREREpCgepiEiIiJFsRkhIiIiRbEZISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkWxGSEiIiJF/T8QbGgbJzIxmgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "es\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABC7ElEQVR4nO3de1yUdf7//+fMCINkhGKKEYp2UrcSgyCs9dBqVHawbcuiwmXLjnzK5rOZlIpmm221Zgc228rObG7larsWZpS1rSSrprWlHTVKA+1TgcHXYWTevz/6OTUBNjMMXBfwuN9uc6O5Ds/rBTOX16vr6DDGGAEAAFjEaXUBAACge6MZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAluphdQGh8Pv92rFjhw488EA5HA6rywEAACEwxmj37t065JBD5HS2vv+jUzQjO3bsUGpqqtVlAACACHz++ec69NBDWx3fKZqRAw88UJK0detW9enTp815Pp9PL7/8sk455RTFxMTYJos8e+XZuTby+GzJ65g8O9fWGfLq6uqUmpoa2I63plM0I/sOzRx44IFKSEhoc57P51N8fLwSEhKi8kWNVhZ59sqzc23k8dmS1zF5dq6tM+Tt83OnWHACKwAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsFTYzcgbb7yhM888U4cccogcDoeWLVv2s/OsXr1axx13nNxutw4//HA99thjEZQKAAC6orCbkfr6eo0YMUIlJSUhTb9161ZNnDhR48aN08aNGzVt2jRddtllWrlyZdjFAgCArifsp/aedtppOu2000KeftGiRRo8eLD+9Kc/SZKGDRumN998U3fffbdyc3PDXTwAAOhiwm5GwlVRUaHx48cHDcvNzdW0adNancfr9crr9Qbe19XVSfr+0cY+n6/NNe3LsFsWefbKs3Nt5Nkni7yunWfn2jpT3s9xGGNMpAtxOBz6+9//rkmTJrU6zZFHHqmCggIVFRUFhr344ouaOHGiGhoa1LNnz2bzzJkzR3Pnzm02vLS0VPHx8ZGWCwAAOlBDQ4Py8vJUW1urhISEVqdr9z0jkSgqKpLH4wm8r6urU2pqqsaNG6ekpKQ25/t8Pq1atUoTJkxQTEyMbbLIs1eenWsjr2t/thm3lGlepl+z1jnl9TvanOd2GvJskNUd85q8rpCma/dmJDk5WTU1NUHDampqlJCQ0OJeEUlyu91yu93NhsfExERlRW+PPDvXRp59ssiLbl7ajBURZbhdRndkSSP/8Kq8TVH4Bzzqed9neP2OqOTtQ549srpTnj/EjHZvRnJycvTiiy8GDVu1apVycnLae9EAOkBrDcG+DfTRc1ZGdYMfrTwA9hF2M/Ldd9/p448/DrzfunWrNm7cqD59+mjgwIEqKirS9u3b9cQTT0iSrrzySt1///2aPn26fve73+nVV1/V3/72N61YEdn/0QCIzM/tRWBjD8AqYTcj69at07hx4wLv953bMWXKFD322GP68ssvVVVVFRg/ePBgrVixQtdff73uueceHXrooXr44Ye5rBcIQ0uNBM0DgK4i7GZk7Nix2t8FOC3dXXXs2LF6++23w10U0CXRPABAMFteTQN0JqGeRLlvTwYAIBjNCPAz2JMBAO2LZgTd2v72arAnAwA6Bs0IuoVI70cBAGh/NCPoUo6es5IrTACgk6EZQafV8uWuFhQCAGgTmhF0KhxuAYCuh2YEtkTTAQDdB80IbIUmBAC6H5oR2AInnAJA90UzAkvs2wPCvTwAADQj6DAcggEAtIRmBO2KBgQA8HNoRtAuaEIAAKGiGUHU0IAAACJBM4I2oQEBALQVzQgiwqW4AIBocVpdAAAA6N7YM4KQcF8QAEB7oRlBqzgfBADQEThMAwAALMWeETTDHhEAQEeiGYEkGhAAgHU4TAMAACwVUTNSUlKitLQ0xcXFKTs7W5WVla1O6/P5dMstt+iwww5TXFycRowYobKysogLRvSkzVgReAEAYJWwm5ElS5bI4/GouLhYGzZs0IgRI5Sbm6udO3e2OP3MmTP14IMP6r777tP777+vK6+8Uuecc47efvvtNhcPAAA6v7CbkQULFmjq1KkqKCjQ8OHDtWjRIsXHx2vx4sUtTv/kk0/qpptu0umnn64hQ4boqquu0umnn64//elPbS4e4WNvCADAbsI6gbWxsVHr169XUVFRYJjT6dT48eNVUVHR4jxer1dxcXFBw3r27Kk333yz1eV4vV55vd7A+7q6OknfH/Lx+XzhlNyifRl2y+qIPLfLtCnP7TRBP9vKznl2ro08+2SR17Xz7FxbZ8hrCnGb4zDGhLzEHTt2KCUlRWvWrFFOTk5g+PTp0/X6669r7dq1zebJy8vTpk2btGzZMh122GEqLy/X2WefraampqCG48fmzJmjuXPnNhteWlqq+Pj4UMsFAAAWamhoUF5enmpra5WQkNDqdO1+ae8999yjqVOnaujQoXI4HDrssMNUUFDQ6mEdSSoqKpLH4wm8r6urU2pqqsaNG6ekpKQ21+Tz+bRq1SpNmDBBMTExtslqz7xZ65zy+tv+YDu302hepr9b5Nm5NvL4bMnrmDw719YZ8pq8rpCmC6sZ6du3r1wul2pqaoKG19TUKDk5ucV5Dj74YC1btkx79uzR//3f/+mQQw7RjBkzNGTIkFaX43a75Xa7mw2PiYmJyga6PfLsWFvajBWBZ8l4/Y6oPmW3O+XZuTby7JNFXtfOs3Ntds7zh5gR1gmssbGxysjIUHl5+Q8L8vtVXl4edNimJXFxcUpJSdHevXv1/PPP6+yzzw5n0QAAoIsK+zCNx+PRlClTlJmZqaysLC1cuFD19fUqKCiQJOXn5yslJUXz58+XJK1du1bbt29Xenq6tm/frjlz5sjv92v69OnR/U0AAECnFHYzMnnyZO3atUuzZ89WdXW10tPTVVZWpv79+0uSqqqq5HT+sMNlz549mjlzpj799FP16tVLp59+up588kklJiZG7ZfAD7hkFwDQ2UR0AmthYaEKCwtbHLd69eqg92PGjNH7778fyWIAAEA3wIPyugD2hgAAOjMelAcAACxFMwIAACzFYZpOikMzAICugj0jAADAUjQjAADAUhym6UQ4NAMA6IrYMwIAACxFMwIAACxFMwIAACzFOSM2x3kiAICujj0jAADAUjQjAADAUjQjAADAUpwzYlNHz1kpb5PD6jIAAGh37BkBAACWohkBAACWohkBAACW4pwRG0mbsUJul9EdWVZXAgBAx2HPCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBQnsFqMB+EBALo79owAAABLRdSMlJSUKC0tTXFxccrOzlZlZeV+p1+4cKGOOuoo9ezZU6mpqbr++uu1Z8+eiAoGAABdS9jNyJIlS+TxeFRcXKwNGzZoxIgRys3N1c6dO1ucvrS0VDNmzFBxcbE2b96sRx55REuWLNFNN93U5uIBAEDnF/Y5IwsWLNDUqVNVUFAgSVq0aJFWrFihxYsXa8aMGc2mX7NmjU488UTl5eVJktLS0nThhRdq7dq1rS7D6/XK6/UG3tfV1UmSfD6ffD5fuCU3sy/DDllulwl+7zRBP9uKPHtkkWevPDvXRp698uxcW2fIa3KFluMwxoS8xMbGRsXHx+u5557TpEmTAsOnTJmib7/9VsuXL282T2lpqa6++mq9/PLLysrK0qeffqqJEyfqkksuaXXvyJw5czR37twWs+Lj40MtFwAAWKihoUF5eXmqra1VQkJCq9OFtWfkq6++UlNTk/r37x80vH///tqyZUuL8+Tl5emrr77SSSedJGOM9u7dqyuvvHK/h2mKiork8XgC7+vq6pSamqpx48YpKSkpnJJb5PP5tGrVKk2YMEExMTEdnnX0nJWtjnM7jeZl+jVrnVNev6NNtZHXtjw710Yeny15HZNn59o6Q16T1xXSdO1+ae/q1at122236c9//rOys7P18ccf67rrrtO8efM0a9asFudxu91yu93NhsfExLS5eWivvHCyvE0//wF7/Y6QpgsVefbIIs9eeXaujTx75dm5Njvn+UPMCKsZ6du3r1wul2pqaoKG19TUKDk5ucV5Zs2apUsuuUSXXXaZJOmYY45RfX29Lr/8ct18881yOrm6GACA7iysTiA2NlYZGRkqLy8PDPP7/SovL1dOTk6L8zQ0NDRrOFyu73fbhHG6CgAA6KLCPkzj8Xg0ZcoUZWZmKisrSwsXLlR9fX3g6pr8/HylpKRo/vz5kqQzzzxTCxYs0MiRIwOHaWbNmqUzzzwz0JQAAIDuK+xmZPLkydq1a5dmz56t6upqpaenq6ysLHBSa1VVVdCekJkzZ8rhcGjmzJnavn27Dj74YJ155pn6wx/+EL3fohPgtu8AALQsohNYCwsLVVhY2OK41atXBy+gRw8VFxeruLg4kkUBAIAujrNHAQCApWhGAACApWhGAACApWhGAACApWhGAACApdr9dvDdHZf0AgCwf+wZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAluLS3nbA5bwAAISOPSMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBS3PQsSo6es1LeJofVZQAA0OmwZwQAAFiKZgQAAFgqomakpKREaWlpiouLU3Z2tiorK1udduzYsXI4HM1eEydOjLhoAADQdYTdjCxZskQej0fFxcXasGGDRowYodzcXO3cubPF6ZcuXaovv/wy8Prvf/8rl8ul8847r83FAwCAzi/sZmTBggWaOnWqCgoKNHz4cC1atEjx8fFavHhxi9P36dNHycnJgdeqVasUHx9PMwIAACSFeTVNY2Oj1q9fr6KiosAwp9Op8ePHq6KiIqSMRx55RBdccIEOOOCAVqfxer3yer2B93V1dZIkn88nn88XTskt2pcRzSy307Q568c55FmfZ+fayLNPFnldO8/OtXWGvCZXaDkOY0zIS9yxY4dSUlK0Zs0a5eTkBIZPnz5dr7/+utauXbvf+SsrK5Wdna21a9cqKyur1enmzJmjuXPnNhteWlqq+Pj4UMsFAAAWamhoUF5enmpra5WQkNDqdB16n5FHHnlExxxzzH4bEUkqKiqSx+MJvK+rq1NqaqrGjRunpKSkNtfh8/m0atUqTZgwQTExMVHJmrXOKa+/7fcZcTuN5mX6ybNBnp1rI4/PlryOybNzbZ0hr8nrCmm6sJqRvn37yuVyqaamJmh4TU2NkpOT9ztvfX29nnnmGd1yyy0/uxy32y23291seExMTJubh2jmpc1YIbfL6I4syet3RPWmZ+TZJ8/OtZFnnyzyunaenWuzc54/xIywTmCNjY1VRkaGysvLf1iQ36/y8vKgwzYtefbZZ+X1enXxxReHs0gAANDFhX2YxuPxaMqUKcrMzFRWVpYWLlyo+vp6FRQUSJLy8/OVkpKi+fPnB833yCOPaNKkSVE5zAIAALqOsJuRyZMna9euXZo9e7aqq6uVnp6usrIy9e/fX5JUVVUlpzN4h8sHH3ygN998Uy+//HJ0qgYAAF1GRCewFhYWqrCwsMVxq1evbjbsqKOOUhgX7QAAgG6EZ9MAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABLdeizabqCtBkrrC4BAIAuhT0jAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjwoLwQ8HA8AgPbDnhEAAGApmhEAAGApmhEAAGApmhEAAGCpiJqRkpISpaWlKS4uTtnZ2aqsrNzv9N9++62uueYaDRgwQG63W0ceeaRefPHFiAoGAABdS9hX0yxZskQej0eLFi1Sdna2Fi5cqNzcXH3wwQfq169fs+kbGxs1YcIE9evXT88995xSUlL02WefKTExMRr1AwCATi7sZmTBggWaOnWqCgoKJEmLFi3SihUrtHjxYs2YMaPZ9IsXL9bXX3+tNWvWKCYmRpKUlpbWtqoBAECXEVYz0tjYqPXr16uoqCgwzOl0avz48aqoqGhxnhdeeEE5OTm65pprtHz5ch188MHKy8vTjTfeKJfL1eI8Xq9XXq838L6urk6S5PP55PP5wim5RfsyQs1yu0zr45wm6GdbkWefPDvXRp59ssjr2nl2rq0z5DXtZ/v5Yw5jTMhL3LFjh1JSUrRmzRrl5OQEhk+fPl2vv/661q5d22yeoUOHatu2bbrooot09dVX6+OPP9bVV1+ta6+9VsXFxS0uZ86cOZo7d26z4aWlpYqPjw+1XAAAYKGGhgbl5eWptrZWCQkJrU7X7ndg9fv96tevn/7yl7/I5XIpIyND27dv15133tlqM1JUVCSPxxN4X1dXp9TUVI0bN05JSUltrsnn82nVqlWaMGFC4NDR/hw9Z2Wr49xOo3mZfs1a55TX72hzbeTZJ8/OtZHHZ0tex+TZubbOkNfkbfkIyE+F1Yz07dtXLpdLNTU1QcNramqUnJzc4jwDBgxQTExM0CGZYcOGqbq6Wo2NjYqNjW02j9vtltvtbjY8JiYmpOYhVKHmeZt+/gPx+h0hTRcq8uyTZ+fayLNPFnldO8/Otdk5zx9iRliX9sbGxiojI0Pl5eU/LMjvV3l5edBhmx878cQT9fHHH8vv9weGffjhhxowYECLjQgAAOhewr7PiMfj0UMPPaTHH39cmzdv1lVXXaX6+vrA1TX5+flBJ7heddVV+vrrr3Xdddfpww8/1IoVK3Tbbbfpmmuuid5vAQAAOq2wzxmZPHmydu3apdmzZ6u6ulrp6ekqKytT//79JUlVVVVyOn/ocVJTU7Vy5Updf/31OvbYY5WSkqLrrrtON954Y/R+CwAA0GlFdAJrYWGhCgsLWxy3evXqZsNycnL01ltvRbIoS6XNWGF1CQAAdHk8mwYAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiqh9UF2E3ajBVWlwAAQLfCnhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGCpiJqRkpISpaWlKS4uTtnZ2aqsrGx12scee0wOhyPoFRcXF3HBAACgawm7GVmyZIk8Ho+Ki4u1YcMGjRgxQrm5udq5c2er8yQkJOjLL78MvD777LM2FQ0AALqOsJuRBQsWaOrUqSooKNDw4cO1aNEixcfHa/Hixa3O43A4lJycHHj179+/TUUDAICuI6w7sDY2Nmr9+vUqKioKDHM6nRo/frwqKipane+7777ToEGD5Pf7ddxxx+m2227TL37xi1an93q98nq9gfd1dXWSJJ/PJ5/PF07JLdqX0VKW22XCynI7TdDPtiLPPnl2ro08+2SR17Xz7FxbZ8hrCnGb6jDGhLzEHTt2KCUlRWvWrFFOTk5g+PTp0/X6669r7dq1zeapqKjQRx99pGOPPVa1tbW666679MYbb+i9997ToYce2uJy5syZo7lz5zYbXlpaqvj4+FDLBQAAFmpoaFBeXp5qa2uVkJDQ6nTt/myanJycoMZl1KhRGjZsmB588EHNmzevxXmKiork8XgC7+vq6pSamqpx48YpKSmpzTX5fD6tWrVKEyZMUExMTNC4o+esDCvL7TSal+nXrHVOef2ONtdGnn3y7FwbeXy25HVMnp1r6wx5TV5XSNOF1Yz07dtXLpdLNTU1QcNramqUnJwcUkZMTIxGjhypjz/+uNVp3G633G53i/P+tHloi5byvE2R/fG9fkfE85Jn7zw710aefbLI69p5dq7Nznn+EDPCOoE1NjZWGRkZKi8v/2FBfr/Ky8uD9n7sT1NTk959910NGDAgnEUDAIAuKuzDNB6PR1OmTFFmZqaysrK0cOFC1dfXq6CgQJKUn5+vlJQUzZ8/X5J0yy236IQTTtDhhx+ub7/9Vnfeeac+++wzXXbZZdH9TQAAQKcUdjMyefJk7dq1S7Nnz1Z1dbXS09NVVlYWuFy3qqpKTucPO1y++eYbTZ06VdXV1erdu7cyMjK0Zs0aDR8+PHq/BQAA6LQiOoG1sLBQhYWFLY5bvXp10Pu7775bd999dySLAQAA3QDPpgEAAJaiGQEAAJaiGQEAAJZq95uedQZpM1ZYXQIAAN0We0YAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClaEYAAIClImpGSkpKlJaWpri4OGVnZ6uysjKk+Z555hk5HA5NmjQpksUCAIAuKOxmZMmSJfJ4PCouLtaGDRs0YsQI5ebmaufOnfudb9u2bfr973+vX/7ylxEXCwAAup6wm5EFCxZo6tSpKigo0PDhw7Vo0SLFx8dr8eLFrc7T1NSkiy66SHPnztWQIUPaVDAAAOhaeoQzcWNjo9avX6+ioqLAMKfTqfHjx6uioqLV+W655Rb169dPl156qf71r3/97HK8Xq+8Xm/gfV1dnSTJ5/PJ5/OFU3KL9mXs++l2mYiz3E4T9LOtyLNPnp1rI88+WeR17Tw719YZ8ppC3L46jDEhL3HHjh1KSUnRmjVrlJOTExg+ffp0vf7661q7dm2zed58801dcMEF2rhxo/r27avf/va3+vbbb7Vs2bJWlzNnzhzNnTu32fDS0lLFx8eHWi4AALBQQ0OD8vLyVFtbq4SEhFanC2vPSLh2796tSy65RA899JD69u0b8nxFRUXyeDyB93V1dUpNTdW4ceOUlJTU5rp8Pp9WrVqlWeuc8vodbcpyO43mZfqjkkWevfLsXBt5fLbkdUyenWvrDHlNXldI04XVjPTt21cul0s1NTVBw2tqapScnNxs+k8++UTbtm3TmWeeGRjm9/u/X3CPHvrggw902GGHNZvP7XbL7XY3Gx4TE6OYmJhwSt4vr98hb1Pb/9jRziLPXnl2ro08+2SR17Xz7FybnfP8IWaEdQJrbGysMjIyVF5e/sOC/H6Vl5cHHbbZZ+jQoXr33Xe1cePGwOuss87SuHHjtHHjRqWmpoazeAAA0AWFfZjG4/FoypQpyszMVFZWlhYuXKj6+noVFBRIkvLz85WSkqL58+crLi5ORx99dND8iYmJktRsOAAA6J7CbkYmT56sXbt2afbs2aqurlZ6errKysrUv39/SVJVVZWcTm7sCgAAQhPRCayFhYUqLCxscdzq1av3O+9jjz0WySIBAEAXxS4MAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgqR5WF9CR0maskCS5XUZ3ZFlcDAAAkMSeEQAAYDGaEQAAYKmImpGSkhKlpaUpLi5O2dnZqqysbHXapUuXKjMzU4mJiTrggAOUnp6uJ598MuKCAQBA1xJ2M7JkyRJ5PB4VFxdrw4YNGjFihHJzc7Vz584Wp+/Tp49uvvlmVVRU6J133lFBQYEKCgq0cuXKNhcPAAA6v7BPYF2wYIGmTp2qgoICSdKiRYu0YsUKLV68WDNmzGg2/dixY4PeX3fddXr88cf15ptvKjc3t8VleL1eeb3ewPu6ujpJks/nk8/nC7fkALfLfP/TGfyzLaKZRZ698uxcG3n2ySKva+fZubbOkNfkCi3HYYwJeYmNjY2Kj4/Xc889p0mTJgWGT5kyRd9++62WL1++3/mNMXr11Vd11llnadmyZZowYUKL082ZM0dz585tNry0tFTx8fGhlgsAACzU0NCgvLw81dbWKiEhodXpwtoz8tVXX6mpqUn9+/cPGt6/f39t2bKl1flqa2uVkpIir9crl8ulP//5z602IpJUVFQkj8cTeF9XV6fU1FSNGzdOSUlJ4ZQc5Og53x8acjuN5mX6NWudU16/I+K8aGeRZ688O9dGHp8teR2TZ+faOkNek9cV0nQdcp+RAw88UBs3btR3332n8vJyeTweDRkypNkhnH3cbrfcbnez4TExMYqJiYm4Dm9T8B/W63c0GxZxdhSzyLNXnp1rI88+WeR17Tw712bnPH+IGWE1I3379pXL5VJNTU3Q8JqaGiUnJ7c6n9Pp1OGHHy5JSk9P1+bNmzV//vxWmxEAANB9hHU1TWxsrDIyMlReXh4Y5vf7VV5erpycnJBz/H5/0AmqAACg+wr7MI3H49GUKVOUmZmprKwsLVy4UPX19YGra/Lz85WSkqL58+dLkubPn6/MzEwddthh8nq9evHFF/Xkk0/qgQceiO5vAgAAOqWwm5HJkydr165dmj17tqqrq5Wenq6ysrLASa1VVVVyOn/Y4VJfX6+rr75aX3zxhXr27KmhQ4fqqaee0uTJk6P3WwAAgE4rohNYCwsLVVhY2OK41atXB72/9dZbdeutt0ayGAAA0A3wbBoAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGCpHlYX0BHSZqywugQAANAK9owAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABLRdSMlJSUKC0tTXFxccrOzlZlZWWr0z700EP65S9/qd69e6t3794aP378fqcHAADdS9jNyJIlS+TxeFRcXKwNGzZoxIgRys3N1c6dO1ucfvXq1brwwgv12muvqaKiQqmpqTrllFO0ffv2NhcPAAA6v7CbkQULFmjq1KkqKCjQ8OHDtWjRIsXHx2vx4sUtTv/000/r6quvVnp6uoYOHaqHH35Yfr9f5eXlbS4eAAB0fmHd9KyxsVHr169XUVFRYJjT6dT48eNVUVERUkZDQ4N8Pp/69OnT6jRer1derzfwvq6uTpLk8/nk8/nCKVmS5HaZ4PdOE/SzLaKZRZ698uxcG3n2ySKva+fZubbOkNfkCi3HYYwJeYk7duxQSkqK1qxZo5ycnMDw6dOn6/XXX9fatWt/NuPqq6/WypUr9d577ykuLq7FaebMmaO5c+c2G15aWqr4+PhQywUAABZqaGhQXl6eamtrlZCQ0Op0HXo7+Ntvv13PPPOMVq9e3WojIklFRUXyeDyB93V1dUpNTdW4ceOUlJQU9nKPnrMy6L3baTQv069Z65zy+h1h57VXFnn2yrNzbeTx2ZLXMXl2rq0z5DV5XSFNF1Yz0rdvX7lcLtXU1AQNr6mpUXJy8n7nveuuu3T77bfrlVde0bHHHrvfad1ut9xud7PhMTExiomJCadkSZK3qeU/qNfvaHVc2MuIYhZ59sqzc23k2SeLvK6dZ+fa7JznDzEjrBNYY2NjlZGREXTy6b6TUX982Oan7rjjDs2bN09lZWXKzMwMZ5EAAKCLC/swjcfj0ZQpU5SZmamsrCwtXLhQ9fX1KigokCTl5+crJSVF8+fPlyT98Y9/1OzZs1VaWqq0tDRVV1dLknr16qVevXpF8VcBAACdUdjNyOTJk7Vr1y7Nnj1b1dXVSk9PV1lZmfr37y9JqqqqktP5ww6XBx54QI2NjfrNb34TlFNcXKw5c+a0rXoAANDpRXQCa2FhoQoLC1sct3r16qD327Zti2QRAACgm+DZNAAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI0IwAAwFI9rC6gvaTNWGF1CQAAIATsGQEAAJaiGQEAAJaiGQEAAJaiGQEAAJaiGQEAAJaiGQEAAJaiGQEAAJaiGQEAAJaKqBkpKSlRWlqa4uLilJ2drcrKylanfe+993TuuecqLS1NDodDCxcujLRWAADQBYXdjCxZskQej0fFxcXasGGDRowYodzcXO3cubPF6RsaGjRkyBDdfvvtSk5ObnPBAACgawm7GVmwYIGmTp2qgoICDR8+XIsWLVJ8fLwWL17c4vTHH3+87rzzTl1wwQVyu91tLhgAAHQtYT2bprGxUevXr1dRUVFgmNPp1Pjx41VRURG1orxer7xeb+B9XV2dJMnn88nn84WU4XaZ1sc5TdDPtohmFnn2yrNzbeTZJ4u8rp1n59o6Q17TfrbFP+YwxoS8xB07diglJUVr1qxRTk5OYPj06dP1+uuva+3atfudPy0tTdOmTdO0adP2O92cOXM0d+7cZsNLS0sVHx8farkAAMBCDQ0NysvLU21trRISElqdzpZP7S0qKpLH4wm8r6urU2pqqsaNG6ekpKSQMo6es7LVcW6n0bxMv2atc8rrd7Sp1mhmkWevPDvXRh6fLXkdk2fn2jpDXpPXFdJ0YTUjffv2lcvlUk1NTdDwmpqaqJ6c6na7Wzy/JCYmRjExMSFleJt+/o/o9TtCmi6k5UUxizx75dm5NvLsk0Ve186zc212zvOHmBHWCayxsbHKyMhQeXn5Dwvy+1VeXh502AYAACBUYR+m8Xg8mjJlijIzM5WVlaWFCxeqvr5eBQUFkqT8/HylpKRo/vz5kr4/6fX9998P/Pf27du1ceNG9erVS4cffngUfxUAANAZhd2MTJ48Wbt27dLs2bNVXV2t9PR0lZWVqX///pKkqqoqOZ0/7HDZsWOHRo4cGXh/11136a677tKYMWO0evXqtv8GAACgU4voBNbCwkIVFha2OO6nDUZaWprCuGAHAAB0MzybBgAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWMqWD8qLVNqMFVaXAAAAwsSeEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYKlO/9RentQLAEDnxp4RAABgKZoRAABgqYiakZKSEqWlpSkuLk7Z2dmqrKzc7/TPPvushg4dqri4OB1zzDF68cUXIyoWAAB0PWE3I0uWLJHH41FxcbE2bNigESNGKDc3Vzt37mxx+jVr1ujCCy/UpZdeqrfffluTJk3SpEmT9N///rfNxQMAgM4v7GZkwYIFmjp1qgoKCjR8+HAtWrRI8fHxWrx4cYvT33PPPTr11FN1ww03aNiwYZo3b56OO+443X///W0uHgAAdH5hXU3T2Nio9evXq6ioKDDM6XRq/PjxqqioaHGeiooKeTyeoGG5ublatmxZq8vxer3yer2B97W1tZKkr7/+WpKUPb88sl9g3zx+o4YGv3r4nGryOyJIaJ8s8uyVZ+fayOOzJa9j8uxcW2fI8+9tkCQZY/Y/oQnD9u3bjSSzZs2aoOE33HCDycrKanGemJgYU1paGjSspKTE9OvXr9XlFBcXG0m8ePHixYsXry7w+vzzz/fbX9jyPiNFRUVBe1O+/fZbDRo0SFVVVTrooIPanF9XV6fU1FR9/vnnSkhIsE0WefbKs3Nt5PHZktcxeXaurTPkGWO0e/duHXLIIfudLqxmpG/fvnK5XKqpqQkaXlNTo+Tk5BbnSU5ODmt6SXK73XK73c2GH3TQQVH54+yTkJAQtbxoZpFnrzw710aefbLI69p5dq7N7nmh7EQI6wTW2NhYZWRkqLz8h3M2/H6/ysvLlZOT0+I8OTk5QdNL0qpVq1qdHgAAdC9hH6bxeDyaMmWKMjMzlZWVpYULF6q+vl4FBQWSpPz8fKWkpGj+/PmSpOuuu05jxozRn/70J02cOFHPPPOM1q1bp7/85S/R/U0AAECnFHYzMnnyZO3atUuzZ89WdXW10tPTVVZWpv79+0uSqqqq5HT+sMNl1KhRKi0t1cyZM3XTTTfpiCOO0LJly3T00UeHvEy3263i4uIWD91EIpp5dq6NPPtkkWevPDvXRp698uxcW2fIC5XDmJ+73gYAAKD98GwaAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKVveDv6rr77S4sWLVVFRoerqaknf38l11KhR+u1vf6uDDz7Y4goBa+17kGS0Lr+zc15tbW3QvwNteSRENLNgP3b+Hkc7L9rfZavXDdvtGfnPf/6jI488Uvfee68OOuggjR49WqNHj9ZBBx2ke++9V0OHDtW6devCzt27d682bdqklStXauXKldq0aZN8Pl9ENUYzS5Kqq6u1fPlyPfjgg3rwwQe1fPnywJfCDnnS91/UDz74QB988EHgKcpdOc+OVq1apdNPP129e/dWfHy84uPj1bt3b51++ul65ZVXulzeww8/rOHDh6tPnz4aPnx40H8/8sgjlmX91E+fMt5Wds6z43pr9++xndeL9siLWChP6+1I2dnZ5vLLLzd+v7/ZOL/fby6//HJzwgknhJzX1NRkbr75ZpOYmGgcDkfQKzEx0cycOdM0NTV1eJYxxnz33XfmoosuMi6Xy/To0cP069fP9OvXz/To0cO4XC5z8cUXm/r6esvyjDHmoYceMsOGDTNOpzPoNWzYMPPwww+HldUZ8owxxufzmY0bN5qysjJTVlZmNm7caBobGyPKilbeY489Znr06GEuuOAC8+ijj5oXX3zRvPjii+bRRx81F154oYmJiTFPPPFEl8m74447THx8vJkxY4Z57bXXzPvvv2/ef/9989prr5mioiJzwAEHmDvvvLPDs/Z5+eWXzWmnnWYSExMD37nExERz2mmnmVWrVoWV1Rny7Lre2v17bOf1oj3y2sJ2zUhcXJzZvHlzq+M3b95s4uLiQs674YYbzMEHH2wWLVpktm7dahoaGkxDQ4PZunWrefDBB02/fv3M9OnTOzzLGGMuvfRSc8QRR5iysjKzd+/ewPC9e/ealStXmiOPPNJcdtllluXZ/Ysf7bxoN5vRzDviiCPM/fff3+r4kpISc/jhh4dcm93zBg4caJYsWdLq+GeeecakpqZ2eJYx9t9gdacNoN2/x3ZeL9ojry1s14ykpaWZxx9/vNXxjz/+uBk0aFDIef379zdlZWWtji8rKzP9+vXr8CxjjElMTDT//ve/Wx3/5ptvmsTERMvy7P7Fj3ZetJvNaOa53W6zZcuWVsdv2bIlrCbd7nlxcXHm/fffb3X8e++9Z3r27NnhWcbYf4PVnTaAdv8e23m9aI+8trBdM3L//fcbt9ttrr32WrN8+XLz1ltvmbfeesssX77cXHvttaZnz56mpKQk5Lz4+HjzzjvvtDp+06ZN5oADDujwLGOMSUhIMP/5z39aHV9ZWWkSEhIsy7P7Fz/aedFuNqOZd9xxx5kbbrih1fHTp083xx13XMi12T3vl7/8pcnPzzc+n6/ZuL1795r8/HwzevToDs8yxv4brO60AbT799jO60V75LWFLZ9Ns2TJEt19991av369mpqaJEkul0sZGRnyeDw6//zzQ86aOHGi9u7dq6efflp9+/YNGvfVV1/pkksukcvl0j//+c8OzZKkiy66SJs3b9YjjzyikSNHBo17++23NXXqVA0dOlRPPfWUJXmjR4/W4MGD9cgjj6hHj+ALr5qamvS73/1O27Zt0+uvv94l8g444AC99dZbOuaYY1oc/84772jUqFH67rvvOjxv9erVOuOMMzRkyBCNHz8+8GDKmpoalZeX69NPP9WKFSs0evTokGqze94777yj3Nxc+Xw+jR49OijvjTfeUGxsrF5++eWQHrgZzSxJysjI0K9+9SvdcccdLY6/8cYb9corr2j9+vVdIs/O663dv8d2Xi/aI68tbNmM7OPz+fTVV19Jkvr27auYmJiwMz7//HOdfvrp2rJli4455pigP/a7776r4cOH65///KdSU1M7NEuSvvnmG+Xl5WnlypXq3bu3+vXrJ0nauXOnvv32W+Xm5qq0tFSJiYmW5Nn9ix/tvGg3m9HO27Ztmx544AG99dZbQZfg5eTk6Morr1RaWlpIOZ0lb/fu3XrqqadazMvLy1NCQoIlWXbfYHW3DaDdv8d2Xi/aIy9Stm5GosXv92vlypUt/rFPOeUUOZ2hX+Eczax9Nm/e3GLe0KFDw86Kdp7dv/jRzIt2sxntPNiH3TdYbADR2XSLZgQIVbSbzWjn7d27V++9914ga8CAARo2bFhEew07Q151dbXWrl0blJeVlaXk5GRLs2Avdv8e23m9aI+8SHSbZqSysrLFO7oef/zxlmY1NjZq2bJlLeadffbZio2NtTRPsv8X3w4rUnvz+/2aPXu2SkpKmt0c6qCDDlJhYaHmzp0bcnNj97z6+npdccUVeuaZZ+RwONSnTx9J0tdffy1jjC688EI9+OCDio+P79CsH7P7Bqs7bADt/j2283rRHnlt0iGnyVqopqbGnHTSScbhcJhBgwaZrKwsk5WVZQYNGmQcDoc56aSTTE1NTYdnGWPMRx99ZIYMGWLi4uLMmDFjzPnnn2/OP/98M2bMGBMXF2cOP/xw89FHH1mWZ/ebsrXHTd6MMWbt2rVm4cKFZsaMGWbGjBlm4cKFprKyMuycaObZ+bLj9siL5j1zon3/HTvfj6Y98uy83tr9e2zn9aI98tqiyzcj5557rsnJyWnxUrctW7aYUaNGmd/85jcdnmWMMePHjzdnn322qa2tbTautrbWnH322eaUU06xLM/uX/xo50W72Yxmnp0vO26PvGjeMyfa99+x+warO20A7f49tvN60R55bdHlm5FevXqZDRs2tDp+3bp1plevXh2eZYwxPXv2NO+++26r4995552wrt+Pdp7dv/jRzot2sxnNvGjf48buedG8Z060779j9w1Wd9oA2v17bOf1oj3y2sJ2D8qLNrfbrbq6ulbH7969O+QnKEYzS5ISExO1bdu2Vsdv27Yt5Mtw2yPP7/fv9xyT2NhY+f3+LpO3cuVKlZSU6Kijjmo27qijjtK9996rsrIyS/LGjh2r3//+94FL3X/sq6++0o033qixY8eGXJvd88444wxdfvnlevvtt5uNe/vtt3XVVVfpzDPP7PAs6fv1/JBDDml1/IABA1RfX99l8uy83tr9e2zn9aI98tqkQ1oeC1199dVm0KBBZunSpUGHL2pra83SpUtNWlqaKSws7PAsY4yZNWuW6d27t1mwYIHZtGmTqa6uNtXV1WbTpk1mwYIFpk+fPqa4uNiyvLy8PDNy5MgW9wZt2LDBZGRkmIsuuqjL5CUlJZnVq1e3Ov61114zSUlJluRVVVWZo48+2vTo0cOMHDnSnHrqqebUU081I0eOND169DDHHnusqaqqCrk2u+d9/fXX5tRTTzUOh8P06dPHDB061AwdOtT06dPHOJ1Oc9ppp5lvvvmmw7OMMeb00083p5xyitm1a1ezcbt27TKnnnqqmThxYpfJs/N6a/fvsZ3Xi/bIa4sufzWN1+vVtGnTtHjxYu3duzfQkTc2NqpHjx669NJLdffdd4e0R6O1LK/Xq5iYmLCy9vnjH/+oe+65R9XV1XI4HJIkY4ySk5M1bdo0TZ8+PazfN5p5dr8pW7TzrrnmGq1YsUJ33323fvWrXwXudVBXV6fy8nJ5PB6dccYZuu+++yzJs/tlx+1xD54tW7Y0uzIs0nvmROv+O3a/H013uzmj3b/Hdl8vpOjf6yoSXb4Z2aeurk7r168P+mNnZGREdHOduro6rVu3TjU1NZKk/v37KzMzs0036tm6dWtQbYMHD444K9p5dr4pmxS9FTOajWt75ME+7L7BYgOIzqbbNCPtKTY2Vps2bdKwYcOsLgVREM3GNdp50bzHjfT9RqulDZPf79cXX3yhgQMHRpS7z8knn6xHH31UgwYNCms+r9crp9MZuCfGJ598osWLF6uqqkqDBg3SpZdeGlaDvWnTJq1fv15jx47VkCFD9N5776mkpER+v1/nnHOOcnNzw6oP9sJ6Edl6Idln3egWzcj/+3//T+vXr1efPn00fPjwoHF79uzR3/72N+Xn5/9sjsfjaXH4Pffco4svvlhJSUmSpAULFoRU14YNG9S7d+/Al+fJJ5/UokWLAl+swsJCXXDBBSFl7XP//fersrJSp59+ui644AI9+eSTmj9/vvx+v37961/rlltuafZwqv1pj5uoffHFF0pMTFSvXr2Chvt8PlVUVIT8zIzWDBkyRCtXrtQRRxzRphw72blzp84991z9+9//1sCBA4N2vVdVVenEE0/U888/H9jl/XPq6up02WWX6R//+IcSEhJ0xRVXqLi4WC6XK5B7yCGHBB5U+XNeeOGFFof/+te/1j333BM4JHDWWWeFlDd27FgVFhbqN7/5jf7973/rV7/6lY466igNGzZMH374oT744AO98sorysnJ+dmspUuX6vzzz1diYqK8Xq/+/ve/67zzzlNmZqZcLpdeeeUVPfHEE8rLywuptn3YAFq/AWS9iHy9kNpv3YhIh5yZYqEPPvggcF8Hp9NpRo8ebbZv3x4YX11dbZxOZ0hZDofDpKenm7Fjxwa9HA6HOf74483YsWPNuHHjQq7t2GOPNatWrTLGGPPQQw+Znj17mmuvvdY88MADZtq0aaZXr17mkUceCTlv3rx55sADDzTnnnuuSU5ONrfffrtJSkoyt956q7ntttvMwQcfbGbPnh1yXrRvorZjxw5z/PHHG6fTaVwul7nkkkvM7t27A+PD+SyMMeaee+5p8eVyuUxRUVHgfag+//zzoJP+3njjDZOXl2dOOukkc9FFF5k1a9aEnLXPP/7xDzNr1izz5ptvGmOMKS8vN6eddprJzc01Dz74YMg50b7s+NprrzVHHnmkefbZZ81DDz1kBg0aZCZOnGi8Xq8x5vvPwuFwhJy3b/366U22fvwK57NNSEgwH374oTHGmDFjxpjrr78+aPzMmTPNiSeeGFLWcccdZ2699VZjjDF//etfTWJiornlllsC4++66y6Tnp4ecm3Rvh9NbW2tOe+880xcXJzp16+fmTVrVtD9N8JdL5YvX97iy+Vymfvvvz/wPlRjxowxzz77rDHm+8tu3W63OfbYY83kyZPNyJEjTXx8fFjrxvPPP29cLpdJSkoyvXr1MqtWrTKJiYlm/PjxJjc317hcLvP000+HlMV6Efl6YUz014226PLNyKRJk8zEiRPNrl27zEcffWQmTpxoBg8ebD777DNjTHgr+vz5883gwYNNeXl50PAePXqY9957L+zaevbsabZt22aMMWbkyJHmL3/5S9D4p59+2gwfPjzkvMMOO8w8//zzxhhjNm7caFwul3nqqacC45cuXWoOP/zwkPOifRO1/Px8k52dbf7zn/+YVatWmYyMDJOZmWm+/vprY0xkK/qhhx5q0tLSgl4Oh8OkpKSYtLQ0M3jw4JDzsrKyzD/+8Q9jjDHLli0zTqfTnHXWWebGG28055xzjomJiQmMD8WiRYtMjx49TEZGhklISDBPPvmkOfDAA81ll11mrrjiCtOzZ0+zcOHCkLKifY+bgQMHmtdeey3wfteuXSYrK8uccsopZs+ePWFvAPddofHTjXCk68YBBxxgNm/ebIz5/r4ZGzduDBr/8ccfh/z7HnDAAWbr1q3GGGP8fr+JiYkJuvfDJ598Etbfjg2gfTaArBeRrxf78qK5brRFl29G+vXrF/TH9fv95sorrzQDBw40n3zySdhfrsrKSnPkkUea//3f/zWNjY3GmMi/WElJSWbdunWBOlv6YoV707N9TZYxxsTExJj//ve/gffbtm0z8fHxYeVF8yZqhxxyiFm7dm3g/Z49e8yZZ55p0tPTzf/93/+F/VlcccUVJj093bz//vtBw9uyon/66afGGGOys7PN7bffHjT+vvvuMyNHjgw5b/jw4YEG89VXXzVxcXGmpKQkMP7RRx81w4YNCykr2pcd9+zZM/C77lNXV2dycnLMySefbD799NOwPgtjjFmwYIFJTU0Natgi/SxOPvlkc8cddxhjjBk1apR5/PHHg8Y/99xzZuDAgSFlJScnB9azr7/+2jgcjqANTmVlpUlOTg65NjaA9tkAsl5Evl4YE/11oy26fDNy4IEHNttYGWPMNddcYw499FDzxhtvhP3l2r17t8nPzzfHHnuseffdd01MTExEX6yLL77YXHrppcYYY8477zwzc+bMoPG33XabOeaYY0LOGzx4sHnppZeMMcZ8+OGHxul0mr/97W+B8StWrDBpaWkh5w0YMGC/ewJeeOEFM2DAgJDzDjjggMD/Ye3j8/nMpEmTzLHHHmveeeedsD+LpUuXmtTUVHPfffcFhkW6oh900EFm06ZNxpjvm8N9/73Pxx9/HHYz99Pm8MfN3datW0POi/Y9bo466iizYsWKZsN3795tcnJyzIgRI8L+LIwx5u233zbDhw83l19+uamvr4/4s1izZo056KCDTHFxsbnvvvtM3759zcyZM83TTz9tZs+ebRITE80f//jHkLIuvvhik52dbZ566ilz5plnmtzcXHPCCSeYzZs3my1btpgxY8aEtSeDDaB9NoCsF5GvF8ZEf91oiy7fjBx//PHmiSeeaHHcNddcYxITEyP6chnz/S7G/v37G6fTGdEXa/v27SYtLc2MHj3aeDwe07NnT3PSSSeZqVOnmtGjR5vY2NgWV4zWzJw50xx88MHmsssuM4MHDzYzZswwAwcONA888IBZtGiRSU1NbbaLdX+ifRO1Y445xjz33HPNhu9rSAYOHBjRZ/HFF1+Yk08+2Zx66qnmyy+/jHhFP+uss8yMGTOMMcbk5uY2O9/koYceMkcccUTIefuaXWO+/6wdDkfQ57l69Wpz6KGHhpS1Z88ec+WVV5rY2FjjdDpNXFyciYuLM06n08TGxpqrrrrK7NmzJ+Ta/ud//qfVf2Tq6upMdnZ2xOtFQ0ODueKKK8wRRxxhXC5XRJ+FMd//w3vCCSc0O8SQkpIS8uEtY74/zDFhwgTTq1cvk5uba7799ltTWFgYOFxxxBFHmI8//jjkPDaA9tkAtrZeOBwO1osQRHvdaIsu34zcdttt5rTTTmt1/FVXXRXW8dif+vzzz82yZcvMd999F9H833zzjbnxxhvN8OHDTVxcnImNjTWDBg0yeXl5+31mQEuamprMH/7wB3PGGWeY2267zfj9fvPXv/7VpKammqSkJPPb3/427Dpvv/12M2DAgMCXc9+x6AEDBoT1D5AxxkyfPr3Vc0x8Pp8566yzIv4s/H6/ue2220xycnLEK/r7779vkpKSTH5+vpk3b57p1auXufjii80f/vAHk5+fb9xut3n00UdDzrvmmmvMEUccYW699VaTlZVlpkyZYoYOHWpeeuklU1ZWZo455hjzu9/9Lqwaa2trzauvvmpKS0tNaWmpefXVV1s8p+fnfP3110GH8H6qrq5uv//3H4rly5ebadOmhXUyZ0t27txp3nrrLbNmzZrA7v1o+OSTT8y7775rfD5fWPPRGLbfBtDhcES0AaytrTXl5eWB9aK8vDyq64Xf7zfGRG+9uPbaa6O6Xvx0z1pbRbputEW3uLQXbReNm6jt3btXDQ0Nrd5fY+/evdq+fXvYlx7+2Pr16/Xmm28qPz9fvXv3Dnv+Tz75RDNnztSKFSv03XffSZJ69Oih448/XjfccIMmTZoUclZ9fb2uv/56VVRUaNSoUbrvvvt077336uabb5bP59OYMWO0ZMmSkC87hL1E6/4x33zzjXbs2KFf/OIXLY7fvXu3NmzYoDFjxkRc6wsvvKDXXntNRUVFbfq+7dq1S59++qn8fr8GDBigtLS0iLN+6tNPP1VDQ4OGDh0a1u0HWhLtez+R1/5oRhCxzz//XMXFxVq8eHGXyzPGaOfOnfL7/erbt2/gHgvRsGfPHvl8Ph144IFhzRet++V0x7xo17bv7qH77hi6ZcsW3XPPPfJ6vbr44ot18sknh5zVUXkLFy5UY2Njm/JGjRqlo446Kmr1tTUv2vd+Iq9teW3SYftg0OVs3Lgx4t3HnTGvqqrKFBQUWJIXzfvltJa3Y8eOLpkX7dpeeuklExsba/r06WPi4uLMSy+9ZA4++GAzfvx4c/LJJxuXy9Xs8n/y2icv2vd+Iq9teW1BM4JWtXbzpH2vu+++Oyo3Y7JL3s+xslmK5v1yultetGvLyckxN998szHm+5PYe/fubW666abA+BkzZpgJEyaQ1wF50b73E3lty2sLmhG0Kto3T7J7np2bpWjfL6c75UW7toSEhMCdh5uamkyPHj2C7jvy7rvvmv79+5PXQXnRvPcTeW3PixTNCFp1yCGHmGXLlrU6/u233w7rH3G759m5WYr2/XK6U160a0tISAi62qNXr17mk08+Cbzftm2biYuLI6+D8oyJ3r2fyItOXiTCf440uo2MjAytX7++1fEOh0MmjPOf7Z43YMAALV26VH6/v8XXhg0bQs6Kdt7QoUO1bt26ZsPvv/9+nX322SE/aKs75kW7trS0NH300UeB9xUVFUEPsauqqtKAAQPI66A8SerVq5cef/xxFRUVafz48SE/yI689smLBM0IWnXDDTdo1KhRrY4//PDD9dprr3WZPDs3S+ecc47++te/tjju/vvv14UXXhhWbd0pL9q1XXXVVUH/WB999NFBl6K+9NJLYV1dQl7b8n7sggsu0Lp167R06dI23SKAvOjkhYNLe4H/37/+9S/V19fr1FNPbXF8fX291q1bF/L9HqKdBwBdFc0IAACwFIdpAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApf4/ZIha5WWhKpAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "fr\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABC+0lEQVR4nO3de1yUdf7//+fMCINohGKKEYqlpW4lBkG4rYdWw7KDbVsWFS5bduTTYT6byaai2WanNSzZaCs7s7mVq+1amFHWtpKumtaWdjZKA+1TgcHXYWTevz/6OS0BNjMMXBfwuN9u3Gyuw/N64czl9eo6vMdhjDECAACwiNPqAgAAQPdGMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACzVw+oCguH3+7Vr1y4dcsghcjgcVpcDAACCYIzR3r17dfjhh8vpbP38R6doRnbt2qXk5GSrywAAAGH4/PPPdcQRR7Q6v1M0I4cccogk6dNPP1Xfvn3bnOfz+fTSSy/p1FNPVVRUlG2yyLNXnp1rI4/3lryOybNzbZ0hr7a2VsnJyYHjeGs6RTNy4NLMIYccori4uDbn+Xw+xcbGKi4uLiIf1EhlkWevPDvXRh7vLXkdk2fn2jpD3gE/dYsFN7ACAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABLhdyMvP766zrzzDN1+OGHy+FwaMWKFT+5ztq1a3XCCSfI7XZr6NChevTRR8MoFQAAdEUhNyN1dXUaNWqUiouLg1r+008/1ZQpUzRhwgRt2bJF119/vS677DKtXr065GIBAEDXE/K39p522mk67bTTgl6+pKREQ4YM0R//+EdJ0ogRI/TGG2/onnvuUXZ2dqibBwAAXUzIzUioKioqNHHixCbTsrOzdf3117e6jtfrldfrDbyura2V9P1XG/t8vjbXdCDDblnk2SvPzrWRZ58s8rp2np1r60x5P8VhjDHhbsThcOhvf/ubpk6d2uoyRx99tPLy8lRQUBCY9sILL2jKlCmqr69Xz549m60zb948zZ8/v9n00tJSxcbGhlsuAADoQPX19crJyVFNTY3i4uJaXa7dz4yEo6CgQB6PJ/C6trZWycnJmjBhghISEtqc7/P5tGbNGk2aNElRUVG2ySLPXnl2ro083lvyOibPzrV1hrwDVzZ+Srs3I4mJiaqurm4yrbq6WnFxcS2eFZEkt9stt9vdbHpUVFRE/nLaI8/OtZFnnyzy7JVn59rIs1eenWtrr7xhc15qc47fWx/Ucu3ejGRlZemFF15oMm3NmjXKyspq700DANAmx85brTszvv/T2+hoc57bZTpNntT2vGCF3Ix89913+uijjwKvP/30U23ZskV9+/bVoEGDVFBQoJ07d+rxxx+XJF155ZVasmSJZs6cqd/+9rd65ZVX9Ne//lWrVq2K3G8BAOhUUmb9cAyw8wHa7WpzOQhCyM3Ixo0bNWHChMDrA/d2TJ8+XY8++qi+/PJLVVZWBuYPGTJEq1at0g033KDFixfriCOO0EMPPcRjvQDQwex4sAekMJqR8ePH62AP4LQ0uur48eP11ltvhbopAOjWIt08AHZly6dpAKAzaa/7CoDugmYEQLdx4D6FyN+j0OYIoFujGQHQ6fz3zY8AOj+aEQCWCaWpsOqRQwDtj2YEQJu11FTwxAWAYNGMAGiC5gFAR6MZAbqZ1i6N8AQHAKvQjABdEDd4AuhMaEaATirSY1sAgFVoRgAbO9gZDsa2ANBV0IwAFuOSCoDujmYE6AAps1bxqCsAtIJmBIgQznAAQHicVhcAAAC6N86MACHg7AcARB7NCPAj7fXNrgCAlnGZBgAAWIozI+iWuNwCAPbBmREAAGApzoygy+LsBwB0DpwZAQAAluLMCDq1H5/9+O8nYCSegAGAzoAzIwAAwFI0IwAAwFJhXaYpLi7WXXfdpaqqKo0aNUr33XefMjIyWlzW5/Np4cKFeuyxx7Rz504dc8wxuuOOOzR58uQ2FY7ugxtRAaBrC/nMyLJly+TxeFRYWKjNmzdr1KhRys7O1u7du1tcfvbs2XrggQd033336b333tOVV16pc845R2+99VabiwcAAJ1fyGdGFi1apBkzZigvL0+SVFJSolWrVmnp0qWaNWtWs+WfeOIJ3XzzzTr99NMlSVdddZVefvll/fGPf9STTz7ZxvLRVTDkOgB0XyE1Iw0NDdq0aZMKCgoC05xOpyZOnKiKiooW1/F6vYqJiWkyrWfPnnrjjTda3Y7X65XX6w28rq2tlfT9JR+fzxdKyS06kGG3rO6c53aaiOQdyIlEXiSzyLNXnp1rI89eeXaurTPkNbqCy3EYY4Le4q5du5SUlKR169YpKysrMH3mzJl67bXXtH79+mbr5OTkaOvWrVqxYoWOOuoolZeX6+yzz1ZjY2OThuO/zZs3T/Pnz282vbS0VLGxscGWCwAALFRfX6+cnBzV1NQoLi6u1eXafZyRxYsXa8aMGRo+fLgcDoeOOuoo5eXlaenSpa2uU1BQII/HE3hdW1ur5ORkTZgwQQkJCW2uyefzac2aNZo0aZKioqJsk9XV874f+6Mpt9NoQbpfczY65fW3/TJNJPPsXBt5vLfkdUyenWvrDHmNXldQy4XUjPTr108ul0vV1dVNpldXVysxMbHFdQ477DCtWLFC+/bt0//93//p8MMP16xZs3TkkUe2uh232y23291selRUVEQOqO2RZ+fa7JJ3sHtCvH5HRO8ZiWSenWsjzz5Z5HXtPDvXZuc8f5AZIT1NEx0drbS0NJWXl/+wIb9f5eXlTS7btCQmJkZJSUnav3+/nnvuOZ199tmhbBoAAHRRIV+m8Xg8mj59utLT05WRkaGioiLV1dUFnq7Jzc1VUlKSFi5cKElav369du7cqdTUVO3cuVPz5s2T3+/XzJkzI/ubAACATinkZmTatGnas2eP5s6dq6qqKqWmpqqsrEwDBgyQJFVWVsrp/OGEy759+zR79mx98skn6t27t04//XQ98cQTio+Pj9gvAftggDIAQKjCuoE1Pz9f+fn5Lc5bu3Ztk9fjxo3Te++9F85mAABAN8B30wAAAEvRjAAAAEu1+zgj6JoYvh0AECmcGQEAAJaiGQEAAJaiGQEAAJbinhG0qLXxQtwuozszOrgYAECXxpkRAABgKZoRAABgKZoRAABgKZoRAABgKW5g7cb4UjsAgB1wZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKp2m6uGPnrdadGd//6W10WF0OAADNcGYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYikd7O7mf+rI7t6uDCgEAIExhnRkpLi5WSkqKYmJilJmZqQ0bNhx0+aKiIh1zzDHq2bOnkpOTdcMNN2jfvn1hFQwAALqWkJuRZcuWyePxqLCwUJs3b9aoUaOUnZ2t3bt3t7h8aWmpZs2apcLCQm3btk0PP/ywli1bpt///vdtLh4AAHR+IV+mWbRokWbMmKG8vDxJUklJiVatWqWlS5dq1qxZzZZft26dfv7znysnJ0eSlJKSogsvvFDr169vdRter1derzfwura2VpLk8/nk8/lCLbmZAxl2ywonz+0yB5/vNE3+bKvulGfn2sizTxZ5XTvPzrV1hrzGnzhGHeAwxgS9xYaGBsXGxurZZ5/V1KlTA9OnT5+ub7/9VitXrmy2Tmlpqa6++mq99NJLysjI0CeffKIpU6bokksuafXsyLx58zR//vwWs2JjY4MtFwAAWKi+vl45OTmqqalRXFxcq8uFdGbkq6++UmNjowYMGNBk+oABA7R9+/YW18nJydFXX32lk08+WcYY7d+/X1deeeVBL9MUFBTI4/EEXtfW1io5OVkTJkxQQkJCKCW3yOfzac2aNZo0aZKioqJskxVO3rHzVh90vttptCDdrzkbnfL62/7dNN0pz861kcd7S17H5Nm5ts6Q1+gN7imKdn+aZu3atbrtttv0pz/9SZmZmfroo4903XXXacGCBZozZ06L67jdbrnd7mbTo6KiInLAb488q2oL9svvvH5HRL8orzvl2bk28uyTRV7XzrNzbXbO8weZEVIz0q9fP7lcLlVXVzeZXl1drcTExBbXmTNnji655BJddtllkqTjjjtOdXV1uvzyy3XzzTfL6WSoEwAAurOQmpHo6GilpaWpvLw8cM+I3+9XeXm58vPzW1ynvr6+WcPhcn1/2iaE21W6tZ8aSwQAgM4s5Ms0Ho9H06dPV3p6ujIyMlRUVKS6urrA0zW5ublKSkrSwoULJUlnnnmmFi1apNGjRwcu08yZM0dnnnlmoCkBAADdV8jNyLRp07Rnzx7NnTtXVVVVSk1NVVlZWeCm1srKyiZnQmbPni2Hw6HZs2dr586dOuyww3TmmWfqD3/4Q+R+CwAA0GmFdQNrfn5+q5dl1q5d23QDPXqosLBQhYWF4WwKAAB0cdw9CgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALNXuX5SH4BwY8t3tMroz4/tv443klx4BAGBXnBkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACW4rtpOtCB758BAAA/4MwIAACwFM0IAACwFM0IAACwFM0IAACwVFjNSHFxsVJSUhQTE6PMzExt2LCh1WXHjx8vh8PR7GfKlClhFw0AALqOkJuRZcuWyePxqLCwUJs3b9aoUaOUnZ2t3bt3t7j88uXL9eWXXwZ+/vOf/8jlcum8885rc/EAAKDzC7kZWbRokWbMmKG8vDyNHDlSJSUlio2N1dKlS1tcvm/fvkpMTAz8rFmzRrGxsTQjAABAUojjjDQ0NGjTpk0qKCgITHM6nZo4caIqKiqCynj44Yd1wQUXqFevXq0u4/V65fV6A69ra2slST6fTz6fL5SSW3Qgo6Oz3C7z08s4TZM/24o8e2SRZ688O9dGnr3y7FxbZ8hrDOK4J0kOY0zQW9y1a5eSkpK0bt06ZWVlBabPnDlTr732mtavX3/Q9Tds2KDMzEytX79eGRkZrS43b948zZ8/v9n00tJSxcbGBlsuAACwUH19vXJyclRTU6O4uLhWl+vQEVgffvhhHXfccQdtRCSpoKBAHo8n8Lq2tlbJycmaMGGCEhIS2lyHz+fTmjVrNGnSJEVFRXVY1rHzVv9knttptCDdrzkbnfL6HW2qjby25dm5NvJ4b8nrmDw719YZ8hq9rqCWC6kZ6devn1wul6qrq5tMr66uVmJi4kHXraur09NPP61bbrnlJ7fjdrvldrubTY+Kimpz89BeecFkeRuDf2O9fkdIy5PXfnl2ro08+2SR17Xz7FybnfP8QWaEdANrdHS00tLSVF5e/sOG/H6Vl5c3uWzTkmeeeUZer1cXX3xxKJsEAABdXMiXaTwej6ZPn6709HRlZGSoqKhIdXV1ysvLkyTl5uYqKSlJCxcubLLeww8/rKlTp0bkMgsAAOg6Qm5Gpk2bpj179mju3LmqqqpSamqqysrKNGDAAElSZWWlnM6mJ1zef/99vfHGG3rppZciUzUAAOgywrqBNT8/X/n5+S3OW7t2bbNpxxxzjEJ4aAcAAHQjHfo0TXeQMmuV1SUAANCp8EV5AADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUj2sLqAzSpm1KvDfbpfRnRnSsfNWy9vosLAqAAA6J86MAAAAS9GMAAAAS4XVjBQXFyslJUUxMTHKzMzUhg0bDrr8t99+q2uuuUYDBw6U2+3W0UcfrRdeeCGsggEAQNcS8j0jy5Ytk8fjUUlJiTIzM1VUVKTs7Gy9//776t+/f7PlGxoaNGnSJPXv31/PPvuskpKS9Nlnnyk+Pj4S9QMAgE4u5GZk0aJFmjFjhvLy8iRJJSUlWrVqlZYuXapZs2Y1W37p0qX6+uuvtW7dOkVFRUmSUlJS2lY1AADoMkJqRhoaGrRp0yYVFBQEpjmdTk2cOFEVFRUtrvP8888rKytL11xzjVauXKnDDjtMOTk5uummm+RyuVpcx+v1yuv1Bl7X1tZKknw+n3w+Xyglt+hARrhZbpf54b+dpsmfbUWeffLsXBt59skir2vn2bm2zpDX6Aoux2GMCXqLu3btUlJSktatW6esrKzA9JkzZ+q1117T+vXrm60zfPhw7dixQxdddJGuvvpqffTRR7r66qt17bXXqrCwsMXtzJs3T/Pnz282vbS0VLGxscGWCwAALFRfX6+cnBzV1NQoLi6u1eXafZwRv9+v/v37689//rNcLpfS0tK0c+dO3XXXXa02IwUFBfJ4PIHXtbW1Sk5O1oQJE5SQkNDmmnw+n9asWaNJkyYFLh2F4th5qwP/7XYaLUj3a85Gp7z+to8zQp598uxcG3m8t+R1TJ6da+sMeY3elq+A/FhIzUi/fv3kcrlUXV3dZHp1dbUSExNbXGfgwIGKiopqcklmxIgRqqqqUkNDg6Kjo5ut43a75Xa7m02PiooKq3loTbh5LQ1u5vU7IjroGXn2ybNzbeTZJ4u8rp1n59rsnOcPMiOkR3ujo6OVlpam8vLyHzbk96u8vLzJZZv/9vOf/1wfffSR/H5/YNoHH3yggQMHttiIAACA7iXkcUY8Ho8efPBBPfbYY9q2bZuuuuoq1dXVBZ6uyc3NbXKD61VXXaWvv/5a1113nT744AOtWrVKt912m6655prI/RYAAKDTCvmekWnTpmnPnj2aO3euqqqqlJqaqrKyMg0YMECSVFlZKafzhx4nOTlZq1ev1g033KDjjz9eSUlJuu6663TTTTdF7rcAAACdVlg3sObn5ys/P7/FeWvXrm02LSsrS2+++WY4mwIAAF0c300DAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsFdYX5XUHKbNWWV0CAADdAmdGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApcJqRoqLi5WSkqKYmBhlZmZqw4YNrS776KOPyuFwNPmJiYkJu2AAANC1hNyMLFu2TB6PR4WFhdq8ebNGjRql7Oxs7d69u9V14uLi9OWXXwZ+PvvsszYVDQAAuo6Qm5FFixZpxowZysvL08iRI1VSUqLY2FgtXbq01XUcDocSExMDPwMGDGhT0QAAoOvoEcrCDQ0N2rRpkwoKCgLTnE6nJk6cqIqKilbX++677zR48GD5/X6dcMIJuu222/Szn/2s1eW9Xq+8Xm/gdW1trSTJ5/PJ5/OFUnKLDmQcLMvtMkFluZ2myZ9tRZ598uxcG3n2ySKva+fZubbOkNcY5LHUYYwJeou7du1SUlKS1q1bp6ysrMD0mTNn6rXXXtP69eubrVNRUaEPP/xQxx9/vGpqanT33Xfr9ddf17vvvqsjjjiixe3MmzdP8+fPbza9tLRUsbGxwZYLAAAsVF9fr5ycHNXU1CguLq7V5UI6MxKOrKysJo3LmDFjNGLECD3wwANasGBBi+sUFBTI4/EEXtfW1io5OVkTJkxQQkJCm2vy+Xxas2aNJk2apKioqBaXOXbe6qCy3E6jBel+zdnolNfvaHNt5Nknz861kcd7S17H5Nm5ts6Q1+h1BbVcSM1Iv3795HK5VF1d3WR6dXW1EhMTg8qIiorS6NGj9dFHH7W6jNvtltvtbnHd1pqHcBwsz9sY2pvg9TtCXoe8zpFn59rIs08WeV07z8612TnPH2RGSDewRkdHKy0tTeXl5T9syO9XeXl5k7MfB9PY2Kh33nlHAwcODGXTAACgiwr5Mo3H49H06dOVnp6ujIwMFRUVqa6uTnl5eZKk3NxcJSUlaeHChZKkW265RSeddJKGDh2qb7/9VnfddZc+++wzXXbZZZH9TQAAQKcUcjMybdo07dmzR3PnzlVVVZVSU1NVVlYWeFy3srJSTucPJ1y++eYbzZgxQ1VVVerTp4/S0tK0bt06jRw5MnK/BQAA6LTCuoE1Pz9f+fn5Lc5bu3Ztk9f33HOP7rnnnnA2AwAAugG+mwYAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFgqrO+m6SqOnbda3kaH1WUAANCtcWYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYKqxmpLi4WCkpKYqJiVFmZqY2bNgQ1HpPP/20HA6Hpk6dGs5mAQBAFxRyM7Js2TJ5PB4VFhZq8+bNGjVqlLKzs7V79+6Drrdjxw797ne/0y9+8YuwiwUAAF1PyM3IokWLNGPGDOXl5WnkyJEqKSlRbGysli5d2uo6jY2NuuiiizR//nwdeeSRbSoYAAB0LT1CWbihoUGbNm1SQUFBYJrT6dTEiRNVUVHR6nq33HKL+vfvr0svvVT//Oc/f3I7Xq9XXq838Lq2tlaS5PP55PP5Qim5RQcy3E7T5qwDGZHIIs9eeXaujTz7ZJHXtfPsXFtnyGt0BZfjMMYEvcVdu3YpKSlJ69atU1ZWVmD6zJkz9dprr2n9+vXN1nnjjTd0wQUXaMuWLerXr59+85vf6Ntvv9WKFSta3c68efM0f/78ZtNLS0sVGxsbbLkAAMBC9fX1ysnJUU1NjeLi4lpdLqQzI6Hau3evLrnkEj344IPq169f0OsVFBTI4/EEXtfW1io5OVkTJkxQQkJCm+vy+Xxas2aN5mx0yut3tCnL7TRakO6PSBZ59sqzc23k8d6S1zF5dq6tM+Q1el1BLRdSM9KvXz+5XC5VV1c3mV5dXa3ExMRmy3/88cfasWOHzjzzzMA0v9///YZ79ND777+vo446qtl6brdbbre72fSoqChFRUWFUvJBef0OeRvb/pcd6Szy7JVn59rIs08WeV07z8612TnPH2RGSDewRkdHKy0tTeXl5T9syO9XeXl5k8s2BwwfPlzvvPOOtmzZEvg566yzNGHCBG3ZskXJycmhbB4AAHRBIV+m8Xg8mj59utLT05WRkaGioiLV1dUpLy9PkpSbm6ukpCQtXLhQMTExOvbYY5usHx8fL0nNpgMAgO4p5GZk2rRp2rNnj+bOnauqqiqlpqaqrKxMAwYMkCRVVlbK6WRgVwAAEJywbmDNz89Xfn5+i/PWrl170HUfffTRcDYJAAC6KE5hAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS/WwuoD2ljJrVbNpbpfRnRkWFAMAAJrhzAgAALBUWM1IcXGxUlJSFBMTo8zMTG3YsKHVZZcvX6709HTFx8erV69eSk1N1RNPPBF2wQAAoGsJuRlZtmyZPB6PCgsLtXnzZo0aNUrZ2dnavXt3i8v37dtXN998syoqKvT2228rLy9PeXl5Wr16dZuLBwAAnV/I94wsWrRIM2bMUF5eniSppKREq1at0tKlSzVr1qxmy48fP77J6+uuu06PPfaY3njjDWVnZ7e4Da/XK6/XG3hdW1srSfL5fPL5fCHV63aZ5tOcpsmfbRHJLPLslWfn2sizTxZ5XTvPzrV1hrzGFo7BLXEYY4LeYkNDg2JjY/Xss89q6tSpgenTp0/Xt99+q5UrVx50fWOMXnnlFZ111llasWKFJk2a1OJy8+bN0/z585tNLy0tVWxsbLDlAgAAC9XX1ysnJ0c1NTWKi4trdbmQzox89dVXamxs1IABA5pMHzBggLZv397qejU1NUpKSpLX65XL5dKf/vSnVhsRSSooKJDH4wm8rq2tVXJysiZMmKCEhIRQStax85pfDnI7jRak+zVno1NevyOkvPbMIs9eeXaujTzeW/I6Js/OtXWGvEavK6jlOuTR3kMOOURbtmzRd999p/Lycnk8Hh155JHNLuEc4Ha75Xa7m02PiopSVFRUSNv2Nrb+l+n1Ow46P6TtRDCLPHvl2bk28uyTRV7XzrNzbXbO8weZEVIz0q9fP7lcLlVXVzeZXl1drcTExFbXczqdGjp0qCQpNTVV27Zt08KFC1ttRgAAQPcR0tM00dHRSktLU3l5eWCa3+9XeXm5srKygs7x+/1NblAFAADdV8iXaTwej6ZPn6709HRlZGSoqKhIdXV1gadrcnNzlZSUpIULF0qSFi5cqPT0dB111FHyer164YUX9MQTT+j++++P7G8CAAA6pZCbkWnTpmnPnj2aO3euqqqqlJqaqrKyssBNrZWVlXI6fzjhUldXp6uvvlpffPGFevbsqeHDh+vJJ5/UtGnTIvdbAACATiusG1jz8/OVn5/f4ry1a9c2eX3rrbfq1ltvDWczAACgG+C7aQAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKXCakaKi4uVkpKimJgYZWZmasOGDa0u++CDD+oXv/iF+vTpoz59+mjixIkHXR4AAHQvITcjy5Ytk8fjUWFhoTZv3qxRo0YpOztbu3fvbnH5tWvX6sILL9Srr76qiooKJScn69RTT9XOnTvbXDwAAOj8Qm5GFi1apBkzZigvL08jR45USUmJYmNjtXTp0haXf+qpp3T11VcrNTVVw4cP10MPPSS/36/y8vI2Fw8AADq/HqEs3NDQoE2bNqmgoCAwzel0auLEiaqoqAgqo76+Xj6fT3379m11Ga/XK6/XG3hdW1srSfL5fPL5fKGULLfLNJ/mNE3+bItIZpFnrzw710aefbLI69p5dq6tM+Q1tnAMbonDGBP0Fnft2qWkpCStW7dOWVlZgekzZ87Ua6+9pvXr1/9kxtVXX63Vq1fr3XffVUxMTIvLzJs3T/Pnz282vbS0VLGxscGWCwAALFRfX6+cnBzV1NQoLi6u1eVCOjPSVrfffruefvpprV27ttVGRJIKCgrk8XgCr2tra5WcnKwJEyYoISEhpG0eO291s2lup9GCdL/mbHTK63eElNeeWeTZK8/OtZHHe0tex+TZubbOkNfodQW1XEjNSL9+/eRyuVRdXd1kenV1tRITEw+67t13363bb79dL7/8so4//viDLut2u+V2u5tNj4qKUlRUVCgly9vY+l+m1+846PyQthPBLPLslWfn2sizTxZ5XTvPzrXZOc8fZEZIN7BGR0crLS2tyc2nB25G/e/LNj925513asGCBSorK1N6enoomwQAAF1cyJdpPB6Ppk+frvT0dGVkZKioqEh1dXXKy8uTJOXm5iopKUkLFy6UJN1xxx2aO3euSktLlZKSoqqqKklS79691bt37wj+KgAAoDMKuRmZNm2a9uzZo7lz56qqqkqpqakqKyvTgAEDJEmVlZVyOn844XL//feroaFBv/71r5vkFBYWat68eW2rHgAAdHph3cCan5+v/Pz8FuetXbu2yesdO3aEswkAANBN8N00AADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUj2sLiASUmatsroEAAAQJs6MAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS4XVjBQXFyslJUUxMTHKzMzUhg0bWl323Xff1bnnnquUlBQ5HA4VFRWFWysAAOiCQm5Gli1bJo/Ho8LCQm3evFmjRo1Sdna2du/e3eLy9fX1OvLII3X77bcrMTGxzQUDAICuJeRmZNGiRZoxY4by8vI0cuRIlZSUKDY2VkuXLm1x+RNPPFF33XWXLrjgArnd7jYXDAAAupaQviivoaFBmzZtUkFBQWCa0+nUxIkTVVFREbGivF6vvF5v4HVtba0kyefzyefzNVve7TIh5budpsmfbRHJLPLslWfn2sizTxZ5XTvPzrV1hrzGII/PDmNM0FvctWuXkpKStG7dOmVlZQWmz5w5U6+99prWr19/0PVTUlJ0/fXX6/rrrz/ocvPmzdP8+fObTS8tLVVsbGyw5QIAAAvV19crJydHNTU1iouLa3W5kM6MdJSCggJ5PJ7A69raWiUnJ2vChAlKSEhotvyx81aHlO92Gi1I92vORqe8fkebao1kFnn2yrNzbeTx3pLXMXl2rq0z5DV6XUEtF1Iz0q9fP7lcLlVXVzeZXl1dHdGbU91ud4v3l0RFRSkqKqrZdG9jeH9hXr8j7HXbM4s8e+XZuTby7JNFXtfOs3Ntds7zB5kR0g2s0dHRSktLU3l5+Q8b8vtVXl7e5LINAABAsEK+TOPxeDR9+nSlp6crIyNDRUVFqqurU15eniQpNzdXSUlJWrhwoaTvb3p97733Av+9c+dObdmyRb1799bQoUMj+KsAAIDOKORmZNq0adqzZ4/mzp2rqqoqpaamqqysTAMGDJAkVVZWyun84YTLrl27NHr06MDru+++W3fffbfGjRuntWvXtv03AAAAnVpYN7Dm5+crPz+/xXk/bjBSUlIUwgM7AACgm+G7aQAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKXCakaKi4uVkpKimJgYZWZmasOGDQdd/plnntHw4cMVExOj4447Ti+88EJYxQIAgK6nR6grLFu2TB6PRyUlJcrMzFRRUZGys7P1/vvvq3///s2WX7dunS688EItXLhQZ5xxhkpLSzV16lRt3rxZxx57bEjbzlxYrv09eoVaMgAAsLGQz4wsWrRIM2bMUF5enkaOHKmSkhLFxsZq6dKlLS6/ePFiTZ48WTfeeKNGjBihBQsW6IQTTtCSJUvaXDwAAOj8Qjoz0tDQoE2bNqmgoCAwzel0auLEiaqoqGhxnYqKCnk8nibTsrOztWLFila34/V65fV6A69ramq+L9ZXF0q5rerhN6qv96uHz6lGv8M2WeTZK8/OtZHHe0tex+TZubbOkOffXy9JMsYcfEETgp07dxpJZt26dU2m33jjjSYjI6PFdaKiokxpaWmTacXFxaZ///6tbqewsNBI4ocffvjhhx9+usDP559/ftD+IuR7RjpCQUFBk7Mp3377rQYPHqzKykodeuihbc6vra1VcnKyPv/8c8XFxdkmizx75dm5NvJ4b8nrmDw719YZ8owx2rt3rw4//PCDLhdSM9KvXz+5XC5VV1c3mV5dXa3ExMQW10lMTAxpeUlyu91yu93Nph966KER+cs5IC4uLmJ5kcwiz155dq6NPPtkkde18+xcm93zgjmJENINrNHR0UpLS1N5eXlgmt/vV3l5ubKyslpcJysrq8nykrRmzZpWlwcAAN1LyJdpPB6Ppk+frvT0dGVkZKioqEh1dXXKy8uTJOXm5iopKUkLFy6UJF133XUaN26c/vjHP2rKlCl6+umntXHjRv35z3+O7G8CAAA6pZCbkWnTpmnPnj2aO3euqqqqlJqaqrKyMg0YMECSVFlZKafzhxMuY8aMUWlpqWbPnq3f//73GjZsmFasWBHSGCNut1uFhYUtXroJRyTz7FwbefbJIs9eeXaujTx75dm5ts6QFyyHMT/1vA0AAED74btpAACApWhGAACApWhGAACApWhGAACApWhGAACApWw5HPxXX32lpUuXqqKiQlVVVZK+H8l1zJgx+s1vfqPDDjvM4goBax34IslIPX5n57yampom/w605SshIpkF+7Hz5zjSeZH+LFu9b9juzMi///1vHX300br33nt16KGHauzYsRo7dqwOPfRQ3XvvvRo+fLg2btwYcu7+/fu1detWrV69WqtXr9bWrVvl8/nCqjGSWZJUVVWllStX6oEHHtADDzyglStXBj4UdsiTvv+gvv/++3r//fcD36LclfPsaM2aNTr99NPVp08fxcbGKjY2Vn369NHpp5+ul19+ucvlPfTQQxo5cqT69u2rkSNHNvnvhx9+2LKsH/vxt4y3lZ3z7Ljf2v1zbOf9oj3ywhbMt/V2pMzMTHP55Zcbv9/fbJ7f7zeXX365Oemkk4LOa2xsNDfffLOJj483DoejyU98fLyZPXu2aWxs7PAsY4z57rvvzEUXXWRcLpfp0aOH6d+/v+nfv7/p0aOHcblc5uKLLzZ1dXWW5RljzIMPPmhGjBhhnE5nk58RI0aYhx56KKSszpBnjDE+n89s2bLFlJWVmbKyMrNlyxbT0NAQVlak8h599FHTo0cPc8EFF5hHHnnEvPDCC+aFF14wjzzyiLnwwgtNVFSUefzxx7tM3p133mliY2PNrFmzzKuvvmree+89895775lXX33VFBQUmF69epm77rqrw7MOeOmll8xpp51m4uPjA5+5+Ph4c9ppp5k1a9aElNUZ8uy639r9c2zn/aI98trCds1ITEyM2bZtW6vzt23bZmJiYoLOu/HGG81hhx1mSkpKzKeffmrq6+tNfX29+fTTT80DDzxg+vfvb2bOnNnhWcYYc+mll5phw4aZsrIys3///sD0/fv3m9WrV5ujjz7aXHbZZZbl2f2DH+m8SDebkcwbNmyYWbJkSavzi4uLzdChQ4Ouze55gwYNMsuWLWt1/tNPP22Sk5M7PMsY+x+wutMB0O6fYzvvF+2R1xa2a0ZSUlLMY4891ur8xx57zAwePDjovAEDBpiysrJW55eVlZn+/ft3eJYxxsTHx5t//etfrc5/4403THx8vGV5dv/gRzov0s1mJPPcbrfZvn17q/O3b98eUpNu97yYmBjz3nvvtTr/3XffNT179uzwLGPsf8DqTgdAu3+O7bxftEdeW9iuGVmyZIlxu93m2muvNStXrjRvvvmmefPNN83KlSvNtddea3r27GmKi4uDzouNjTVvv/12q/O3bt1qevXq1eFZxhgTFxdn/v3vf7c6f8OGDSYuLs6yPLt/8COdF+lmM5J5J5xwgrnxxhtbnT9z5kxzwgknBF2b3fN+8YtfmNzcXOPz+ZrN279/v8nNzTVjx47t8Cxj7H/A6k4HQLt/ju28X7RHXlvY8rtpli1bpnvuuUebNm1SY2OjJMnlciktLU0ej0fnn39+0FlTpkzR/v379dRTT6lfv35N5n311Ve65JJL5HK59I9//KNDsyTpoosu0rZt2/Twww9r9OjRTea99dZbmjFjhoYPH64nn3zSkryxY8dqyJAhevjhh9WjR9MHrxobG/Xb3/5WO3bs0GuvvdYl8nr16qU333xTxx13XIvz3377bY0ZM0bfffddh+etXbtWZ5xxho488khNnDgx8MWU1dXVKi8v1yeffKJVq1Zp7NixQdVm97y3335b2dnZ8vl8Gjt2bJO8119/XdHR0XrppZeC+sLNSGZJUlpamn75y1/qzjvvbHH+TTfdpJdfflmbNm3qEnl23m/t/jm2837RHnltYctm5ACfz6evvvpKktSvXz9FRUWFnPH555/r9NNP1/bt23Xcccc1+ct+5513NHLkSP3jH/9QcnJyh2ZJ0jfffKOcnBytXr1affr0Uf/+/SVJu3fv1rfffqvs7GyVlpYqPj7ekjy7f/AjnRfpZjPSeTt27ND999+vN998s8kjeFlZWbryyiuVkpISVE5nydu7d6+efPLJFvNycnIUFxdnSZbdD1jd7QBo98+xnfeL9sgLl62bkUjx+/1avXp1i3/Zp556qpzO4J9wjmTWAdu2bWsxb/jw4SFnRTrP7h/8SOZFutmMdB7sw+4HLA6A6Gy6RTMCBCvSzWak8/bv36933303kDVw4ECNGDEirLOGnSGvqqpK69evb5KXkZGhxMRES7NgL3b/HNt5v2iPvHB0m2Zkw4YNLY7oeuKJJ1qa1dDQoBUrVrSYd/bZZys6OtrSPMn+H3w77Ejtze/3a+7cuSouLm42ONShhx6q/Px8zZ8/P+jmxu55dXV1uuKKK/T000/L4XCob9++kqSvv/5axhhdeOGFeuCBBxQbG9uhWf/N7ges7nAAtPvn2M77RXvktUmH3CZroerqanPyyScbh8NhBg8ebDIyMkxGRoYZPHiwcTgc5uSTTzbV1dUdnmWMMR9++KE58sgjTUxMjBk3bpw5//zzzfnnn2/GjRtnYmJizNChQ82HH35oWZ7dB2Vrj0HejDFm/fr1pqioyMyaNcvMmjXLFBUVmQ0bNoScE8k8Oz923B55kRwzJ9Lj79h5PJr2yLPzfmv3z7Gd94v2yGuLLt+MnHvuuSYrK6vFR922b99uxowZY3796193eJYxxkycONGcffbZpqamptm8mpoac/bZZ5tTTz3Vsjy7f/AjnRfpZjOSeXZ+7Lg98iI5Zk6kx9+x+wGrOx0A7f45tvN+0R55bdHlm5HevXubzZs3tzp/48aNpnfv3h2eZYwxPXv2NO+8806r899+++2Qnt+PdJ7dP/iRzot0sxnJvEiPcWP3vEiOmRPp8XfsfsDqTgdAu3+O7bxftEdeW9jui/Iize12q7a2ttX5e/fuDfobFCOZJUnx8fHasWNHq/N37NgR9GO47ZHn9/sPeo9JdHS0/H5/l8lbvXq1iouLdcwxxzSbd8wxx+jee+9VWVmZJXnjx4/X7373u8Cj7v/tq6++0k033aTx48cHXZvd88444wxdfvnleuutt5rNe+utt3TVVVfpzDPP7PAs6fv9/PDDD291/sCBA1VXV9dl8uy839r9c2zn/aI98tqkQ1oeC1199dVm8ODBZvny5U0uX9TU1Jjly5eblJQUk5+f3+FZxhgzZ84c06dPH7No0SKzdetWU1VVZaqqqszWrVvNokWLTN++fU1hYaFleTk5OWb06NEtng3avHmzSUtLMxdddFGXyUtISDBr165tdf6rr75qEhISLMmrrKw0xx57rOnRo4cZPXq0mTx5spk8ebIZPXq06dGjhzn++ONNZWVl0LXZPe/rr782kydPNg6Hw/Tt29cMHz7cDB8+3PTt29c4nU5z2mmnmW+++abDs4wx5vTTTzennnqq2bNnT7N5e/bsMZMnTzZTpkzpMnl23m/t/jm2837RHnlt0eWfpvF6vbr++uu1dOlS7d+/P9CRNzQ0qEePHrr00kt1zz33BHVGo7Usr9erqKiokLIOuOOOO7R48WJVVVXJ4XBIkowxSkxM1PXXX6+ZM2eG9PtGMs/ug7JFOu+aa67RqlWrdM899+iXv/xlYKyD2tpalZeXy+Px6IwzztB9991nSZ7dHztujzF4tm/f3uzJsHDHzInU+Dt2H4+muw3OaPfPsd33CynyY12Fo8s3IwfU1tZq06ZNTf6y09LSwhpcp7a2Vhs3blR1dbUkacCAAUpPT2/TQD2ffvppk9qGDBkSdlak8+w8KJsUuR0zko1re+TBPux+wOIAiM6m2zQj7Sk6Olpbt27ViBEjrC4FERDJxjXSeZEc40b6/qDV0oHJ7/friy++0KBBg8LKPeCUU07RI488osGDB4e0ntfrldPpDIyJ8fHHH2vp0qWqrKzU4MGDdemll4bUYG/dulWbNm3S+PHjdeSRR+rdd99VcXGx/H6/zjnnHGVnZ4dUH+yF/SK8/UKyz77RLZqR//f//p82bdqkvn37auTIkU3m7du3T3/961+Vm5v7kzkej6fF6YsXL9bFF1+shIQESdKiRYuCqmvz5s3q06dP4MPzxBNPqKSkJPDBys/P1wUXXBBU1gFLlizRhg0bdPrpp+uCCy7QE088oYULF8rv9+tXv/qVbrnllmZfTnUw7TGI2hdffKH4+Hj17t27yXSfz6eKioqgvzOjNUceeaRWr16tYcOGtSnHTnbv3q1zzz1X//rXvzRo0KAmp94rKyv185//XM8991zglPdPqa2t1WWXXaa///3viouL0xVXXKHCwkK5XK5A7uGHHx74osqf8vzzz7c4/Ve/+pUWL14cuCRw1llnBZU3fvx45efn69e//rX+9a9/6Ze//KWOOeYYjRgxQh988IHef/99vfzyy8rKyvrJrOXLl+v8889XfHy8vF6v/va3v+m8885Tenq6XC6XXn75ZT3++OPKyckJqrYDOABafwBkvwh/v5Dab98IS4fcmWKh999/PzCug9PpNGPHjjU7d+4MzK+qqjJOpzOoLIfDYVJTU8348eOb/DgcDnPiiSea8ePHmwkTJgRd2/HHH2/WrFljjDHmwQcfND179jTXXnutuf/++831119vevfubR5++OGg8xYsWGAOOeQQc+6555rExERz++23m4SEBHPrrbea2267zRx22GFm7ty5QedFehC1Xbt2mRNPPNE4nU7jcrnMJZdcYvbu3RuYH8p7YYwxixcvbvHH5XKZgoKCwOtgff75501u+nv99ddNTk6OOfnkk81FF11k1q1bF3TWAX//+9/NnDlzzBtvvGGMMaa8vNycdtppJjs72zzwwANB50T6seNrr73WHH300eaZZ54xDz74oBk8eLCZMmWK8Xq9xpjv3wuHwxF03oH968eDbP33TyjvbVxcnPnggw+MMcaMGzfO3HDDDU3mz5492/z85z8PKuuEE04wt956qzHGmL/85S8mPj7e3HLLLYH5d999t0lNTQ26tkiPR1NTU2POO+88ExMTY/r372/mzJnTZPyNUPeLlStXtvjjcrnMkiVLAq+DNW7cOPPMM88YY75/7Nbtdpvjjz/eTJs2zYwePdrExsaGtG8899xzxuVymYSEBNO7d2+zZs0aEx8fbyZOnGiys7ONy+UyTz31VFBZ7Bfh7xfGRH7faIsu34xMnTrVTJkyxezZs8d8+OGHZsqUKWbIkCHms88+M8aEtqMvXLjQDBkyxJSXlzeZ3qNHD/Puu++GXFvPnj3Njh07jDHGjB492vz5z39uMv+pp54yI0eODDrvqKOOMs8995wxxpgtW7YYl8tlnnzyycD85cuXm6FDhwadF+lB1HJzc01mZqb597//bdasWWPS0tJMenq6+frrr40x4e3oRxxxhElJSWny43A4TFJSkklJSTFDhgwJOi8jI8P8/e9/N8YYs2LFCuN0Os1ZZ51lbrrpJnPOOeeYqKiowPxglJSUmB49epi0tDQTFxdnnnjiCXPIIYeYyy67zFxxxRWmZ8+epqioKKisSI9xM2jQIPPqq68GXu/Zs8dkZGSYU0891ezbty/kA+CBJzR+fBAOd9/o1auX2bZtmzHm+3EztmzZ0mT+Rx99FPTv26tXL/Ppp58aY4zx+/0mKiqqydgPH3/8cUh/dxwA7XMAZL8If784kBfJfaMtunwz0r9//yZ/uX6/31x55ZVm0KBB5uOPPw75w7VhwwZz9NFHm//93/81DQ0NxpjwP1gJCQlm48aNgTpb+mCFOujZgSbLGGOioqLMf/7zn8DrHTt2mNjY2JDyIjmI2uGHH27Wr18feL1v3z5z5plnmtTUVPN///d/Ib8XV1xxhUlNTTXvvfdek+lt2dE/+eQTY4wxmZmZ5vbbb28y/7777jOjR48OOm/kyJGBBvOVV14xMTExpri4ODD/kUceMSNGjAgqK9KPHffs2TPwux5QW1trsrKyzCmnnGI++eSTkN4LY4xZtGiRSU5ObtKwhftenHLKKebOO+80xhgzZswY89hjjzWZ/+yzz5pBgwYFlZWYmBjYz77++mvjcDiaHHA2bNhgEhMTg66NA6B9DoDsF+HvF8ZEft9oiy7fjBxyyCHNDlbGGHPNNdeYI444wrz++ushf7j27t1rcnNzzfHHH2/eeecdExUVFdYH6+KLLzaXXnqpMcaY8847z8yePbvJ/Ntuu80cd9xxQecNGTLEvPjii8YYYz744APjdDrNX//618D8VatWmZSUlKDzBg4ceNAzAc8//7wZOHBg0Hm9evUK/B/WAT6fz0ydOtUcf/zx5u233w75vVi+fLlJTk429913X2BauDv6oYcearZu3WqM+b45PPDfB3z00UchN3M/bg7/u7n79NNPg86L9Bg3xxxzjFm1alWz6Xv37jVZWVlm1KhRIb8Xxhjz1ltvmZEjR5rLL7/c1NXVhf1erFu3zhx66KGmsLDQ3HfffaZfv35m9uzZ5qmnnjJz58418fHx5o477ggq6+KLLzaZmZnmySefNGeeeabJzs42J510ktm2bZvZvn27GTduXEhnMjgA2ucAyH4R/n5hTOT3jbbo8s3IiSeeaB5//PEW511zzTUmPj4+rA+XMd+fYhwwYIBxOp1hfbB27txpUlJSzNixY43H4zE9e/Y0J598spkxY4YZO3asiY6ObnHHaM3s2bPNYYcdZi677DIzZMgQM2vWLDNo0CBz//33m5KSEpOcnNzsFOvBRHoQteOOO848++yzzaYfaEgGDRoU1nvxxRdfmFNOOcVMnjzZfPnll2Hv6GeddZaZNWuWMcaY7OzsZvebPPjgg2bYsGFB5x1odo35/r12OBxN3s+1a9eaI444Iqisffv2mSuvvNJER0cbp9NpYmJiTExMjHE6nSY6OtpcddVVZt++fUHX9j//8z+t/iNTW1trMjMzw94v6uvrzRVXXGGGDRtmXC5XWO+FMd//w3vSSSc1u8SQlJQU9OUtY76/zDFp0iTTu3dvk52dbb799luTn58fuFwxbNgw89FHHwWdxwHQPgfA1vYLh8PBfhGESO8bbdHlm5HbbrvNnHbaaa3Ov+qqq0K6Hvtjn3/+uVmxYoX57rvvwlr/m2++MTfddJMZOXKkiYmJMdHR0Wbw4MEmJyfnoN8Z0JLGxkbzhz/8wZxxxhnmtttuM36/3/zlL38xycnJJiEhwfzmN78Juc7bb7/dDBw4MPDhPHAteuDAgSH9A2SMMTNnzmz1HhOfz2fOOuussN8Lv99vbrvtNpOYmBj2jv7ee++ZhIQEk5ubaxYsWGB69+5tLr74YvOHP/zB5ObmGrfbbR555JGg86655hozbNgwc+utt5qMjAwzffp0M3z4cPPiiy+asrIyc9xxx5nf/va3IdVYU1NjXnnlFVNaWmpKS0vNK6+80uI9PT/l66+/bnIJ78dqa2sP+n//wVi5cqW5/vrrQ7qZsyW7d+82b775plm3bl3g9H4kfPzxx+add94xPp8vpPVoDNvvAOhwOMI6ANbU1Jjy8vLAflFeXh7R/cLv9xtjIrdfXHvttRHdL358Zq2twt032qJbPNqLtovEIGr79+9XfX19q+Nr7N+/Xzt37gz50cP/tmnTJr3xxhvKzc1Vnz59Ql7/448/1uzZs7Vq1Sp99913kqQePXroxBNP1I033qipU6cGnVVXV6cbbrhBFRUVGjNmjO677z7de++9uvnmm+Xz+TRu3DgtW7Ys6McOYS+RGj/mm2++0a5du/Szn/2sxfl79+7V5s2bNW7cuLBrff755/Xqq6+qoKCgTZ+3PXv26JNPPpHf79fAgQOVkpISdtaPffLJJ6qvr9fw4cNDGn6gJZEe+4m89kczgrB9/vnnKiws1NKlS7tcnjFGu3fvlt/vV79+/QJjLETCvn375PP5dMghh4S0XqTGy+mOeZGu7cDooQdGDN2+fbsWL14sr9eriy++WKecckrQWR2VV1RUpIaGhjbljRkzRsccc0zE6mtrXqTHfiKvbXlt0mHnYNDlbNmyJezTx50xr7Ky0uTl5VmSF8nxclrL27VrV5fMi3RtL774oomOjjZ9+/Y1MTEx5sUXXzSHHXaYmThxojnllFOMy+Vq9vg/ee2TF+mxn8hrW15b0IygVa0NnnTg55577onIYEx2yfspVjZLkRwvp7vlRbq2rKwsc/PNNxtjvr+JvU+fPub3v/99YP6sWbPMpEmTyOuAvEiP/URe2/LagmYErYr04El2z7NzsxTp8XK6U16ka4uLiwuMPNzY2Gh69OjRZNyRd955xwwYMIC8DsqL5NhP5LU9L1w0I2jV4YcfblasWNHq/Lfeeiukf8TtnmfnZinS4+V0p7xI1xYXF9fkaY/evXubjz/+OPB6x44dJiYmhrwOyjMmcmM/kReZvHCE/j3S6DbS0tK0adOmVuc7HA6ZEO5/tnvewIEDtXz5cvn9/hZ/Nm/eHHRWpPOGDx+ujRs3Npu+ZMkSnX322UF/0VZ3zIt0bSkpKfrwww8DrysqKpp8iV1lZaUGDhxIXgflSVLv3r312GOPqaCgQBMnTgz6i+zIa5+8cNCMoFU33nijxowZ0+r8oUOH6tVXX+0yeXZuls455xz95S9/aXHekiVLdOGFF4ZUW3fKi3RtV111VZN/rI899tgmj6K++OKLIT1dQl7b8v7bBRdcoI0bN2r58uVtGiKAvMjkhYJHe4H/3z//+U/V1dVp8uTJLc6vq6vTxo0bgx7vIdJ5ANBV0YwAAABLcZkGAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABY6v8DoGtarR7QEK8AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "it\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABC10lEQVR4nO3de1yUdf7//+fMCINohGKKEYp2UtcSgyCs9dBqVHawbcuiwmXLjnw6zGczKRXMVttqzQ5stJWd2dzK1XYtzChrW0lWTWtLOxulgfapwODrMDLv3x/+nJoAmxlGrwt83G+3udFch+f1grkur1fXaRzGGCMAAACLOK0uAAAAHNhoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKW6WV1AKPx+v7Zu3aqDDjpIDofD6nIAAEAIjDHasWOHDj30UDmd7R//6BTNyNatW5Wammp1GQAAIAJffPGFDjvssHbHd4pm5KCDDpIkffbZZ+rdu3eH83w+n15++WWdcsopiomJsU0WefbKs3Nt5PHZkrd/8uxcW2fIa2hoUGpqamA/3p5O0YzsOTVz0EEHKSEhocN5Pp9P8fHxSkhIiMqKGq0s8uyVZ+fayOOzJW//5Nm5ts6Qt8fPXWLBBawAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSYTcjb7zxhs4880wdeuihcjgcWrJkyc/Os3LlSh133HFyu9064ogj9Nhjj0VQKgAA6IrCbkYaGxs1YsQIlZaWhjT9Z599pokTJ2rcuHFav369rr/+el122WVavnx52MUCAICuJ+xv7T3ttNN02mmnhTx9WVmZBg0apD/96U+SpKFDh+rNN9/U3Xffrdzc3HAXDwAAupiwm5FwVVVVafz48UHDcnNzdf3117c7j9frldfrDbxvaGiQtPurjX0+X4dr2pNhtyzy7JVn59rIs08WeV07z861daa8n+MwxphIF+JwOPT3v/9dkyZNaneao446SgUFBSoqKgoMe/HFFzVx4kQ1NTWpe/fureYpKSnR7NmzWw0vLy9XfHx8pOUCAID9qKmpSXl5eaqvr1dCQkK70+3zIyORKCoqksfjCbxvaGhQamqqxo0bp6SkpA7n+3w+rVixQhMmTFBMTIxtssizV56dayOPz5a8/ZO3r2qbucYpr9/R4Ty302hOpt+2eS1eV0jT7fNmJDk5WXV1dUHD6urqlJCQ0OZREUlyu91yu92thsfExERlZdgXeXaujTz7ZJFnrzw712anvLTpy4Leu11Gd2RJI//wqrwtUdgB2jhvX9Xm9TuikreHXfP8IWbs82YkJydHL774YtCwFStWKCcnZ18vGgBs66c7+B/bs8MaXrI8qjvAaOUB0RZ2M/L999/r448/Drz/7LPPtH79evXu3VsDBgxQUVGRtmzZoieeeEKSdOWVV+r+++/XtGnT9Lvf/U6vvvqq/va3v2nZsvY3RACwyt6ahPawswc6JuxmZM2aNRo3blzg/Z5rO6ZMmaLHHntMX331lWpqagLjBw0apGXLlumGG27QPffco8MOO0wPP/wwt/UCCMvwkuVR2+HTPAD2EnYzMnbsWO3tBpy2nq46duxYvf322+EuCkAXEcnRhp9yh3YdHIBOyJZ30wCwj0gbiR8ffZA4+gCgfTQjQBcWTiPBqQsAVqEZATqpaF5DAQBWohkBbOrnjmpwDQWAroJmBLBANC7oBICugmYEiLL2Gg0u6ASAttGMAGHiqAYARBfNCPAjP240uLsEAPYPp9UFAACAAxtHRnBA4RQLANgPR0YAAIClODKCLiNt+jKu8wCATogjIwAAwFIcGUGnwfUeANA1cWQEAABYimYEAABYitM0sAUuOAWAAxdHRgAAgKVoRgAAgKU4TYN9bm93wex5LggA4MDFkREAAGApmhEAAGApmhEAAGAprhlBh/FkVABAR0R0ZKS0tFRpaWmKi4tTdna2qqur253W5/Pp1ltv1eGHH664uDiNGDFCFRUVERcMAAC6lrCbkUWLFsnj8ai4uFjr1q3TiBEjlJubq23btrU5/YwZM/Tggw/qvvvu0/vvv68rr7xS55xzjt5+++0OFw8AADq/sJuR+fPna+rUqSooKNCwYcNUVlam+Ph4LVy4sM3pn3zySd188806/fTTNXjwYF111VU6/fTT9ac//anDxQMAgM4vrGtGmpubtXbtWhUVFQWGOZ1OjR8/XlVVVW3O4/V6FRcXFzSse/fuevPNN9tdjtfrldfrDbxvaGiQtPuUj8/nC6fkNu3JsFuWHfOGlywPeu92Gs3JlDJurZDXv/vx7W5X5PW5nSboZ0dFM8/OtZFnnyzyunaenWvrDHktrtByHMaYkJe4detWpaSkaNWqVcrJyQkMnzZtml5//XWtXr261Tx5eXnasGGDlixZosMPP1yVlZU6++yz1dLSEtRw/FhJSYlmz57danh5ebni4+NDLRcAAFioqalJeXl5qq+vV0JCQrvT7fO7ae655x5NnTpVQ4YMkcPh0OGHH66CgoJ2T+tIUlFRkTweT+B9Q0ODUlNTNW7cOCUlJXW4Jp/PpxUrVmjChAmKiYmxTZYd89o+MuLXzDXOwJGRjrBznp1rI4/Plrz9k2fn2jpDXos3tEPnYTUjffr0kcvlUl1dXdDwuro6JScntznPIYccoiVLlmjnzp36v//7Px166KGaPn26Bg8e3O5y3G633G53q+ExMTFR2UHvizw719aRvPa+Sdfrd0T1W3btnGfn2sizTxZ5XTvPzrXZOc8fYkZYF7DGxsYqIyNDlZWVPyzI71dlZWXQaZu2xMXFKSUlRbt27dLzzz+vs88+O5xFAwCALirs0zQej0dTpkxRZmamsrKytGDBAjU2NqqgoECSlJ+fr5SUFM2bN0+StHr1am3ZskXp6enasmWLSkpK5Pf7NW3atOj+JogIDywDAFgt7GZk8uTJ2r59u2bNmqXa2lqlp6eroqJC/fr1kyTV1NTI6fzhgMvOnTs1Y8YMffrpp+rZs6dOP/10Pfnkk0pMTIzaLwEAADqviC5gLSwsVGFhYZvjVq5cGfR+zJgxev/99yNZDAAAOADwRXkAAMBSNCMAAMBSNCMAAMBSNCMAAMBS+/wJrLBO2vRlcruM7sja/STVaD4QBwCAaOHICAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTPGenE0qYvs7oEAAA6jCMjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjwO3qaGlyyXt8VhdRkAAOxzHBkBAACWiqgZKS0tVVpamuLi4pSdna3q6uq9Tr9gwQIdffTR6t69u1JTU3XDDTdo586dERUMAAC6lrCbkUWLFsnj8ai4uFjr1q3TiBEjlJubq23btrU5fXl5uaZPn67i4mJt3LhRjzzyiBYtWqSbb765w8UDAIDOL+xrRubPn6+pU6eqoKBAklRWVqZly5Zp4cKFmj59eqvpV61apRNPPFF5eXmSpLS0NF144YVavXp1u8vwer3yer2B9w0NDZIkn88nn88Xbsmt7MmwW9aPc9xOE5W8PTnkWZtFnr3y7FwbefbKs3NtnSGvxRVajsMYE/ISm5ubFR8fr+eee06TJk0KDJ8yZYq+++47LV26tNU85eXluvrqq/Xyyy8rKytLn376qSZOnKhLLrmk3aMjJSUlmj17dptZ8fHxoZYLAAAs1NTUpLy8PNXX1yshIaHd6cI6MvL111+rpaVF/fr1Cxrer18/bdq0qc158vLy9PXXX+ukk06SMUa7du3SlVdeudfTNEVFRfJ4PIH3DQ0NSk1N1bhx45SUlBROyW3y+XxasWKFJkyYoJiYGNtk/Thv5hqnvP6O303jdhrNyfST18VqI4/Plrz9k2fn2jpDXovXFdJ0+/zW3pUrV2ru3Ln685//rOzsbH388ce67rrrNGfOHM2cObPNedxut9xud6vhMTExUdnh74u8aNfm9TuiemsvefbIIs9eeXaujTx75dm5Njvn+UPMCKsZ6dOnj1wul+rq6oKG19XVKTk5uc15Zs6cqUsuuUSXXXaZJOmYY45RY2OjLr/8ct1yyy1yOrm7GACAA1lYnUBsbKwyMjJUWVkZGOb3+1VZWamcnJw252lqamrVcLhcuw/bhHG5CgAA6KLCPk3j8Xg0ZcoUZWZmKisrSwsWLFBjY2Pg7pr8/HylpKRo3rx5kqQzzzxT8+fP18iRIwOnaWbOnKkzzzwz0JQAAIADV9jNyOTJk7V9+3bNmjVLtbW1Sk9PV0VFReCi1pqamqAjITNmzJDD4dCMGTO0ZcsWHXLIITrzzDP1hz/8IXq/BQAA6LQiuoC1sLBQhYWFbY5buXJl8AK6dVNxcbGKi4sjWRQAAOjiuHoUAABYim/ttUDa9GXtjnO7jO7I2o/FAABgMY6MAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS3WzuoCuKG36MqtLAACg0+DICAAAsBTNCAAAsBTNCAAAsFREzUhpaanS0tIUFxen7OxsVVdXtzvt2LFj5XA4Wr0mTpwYcdEAAKDrCLsZWbRokTwej4qLi7Vu3TqNGDFCubm52rZtW5vTL168WF999VXg9d///lcul0vnnXdeh4sHAACdX9jNyPz58zV16lQVFBRo2LBhKisrU3x8vBYuXNjm9L1791ZycnLgtWLFCsXHx9OMAAAASWHe2tvc3Ky1a9eqqKgoMMzpdGr8+PGqqqoKKeORRx7RBRdcoB49erQ7jdfrldfrDbxvaGiQJPl8Pvl8vnBKbtOejH2V5XaZiPPcThP0s6PIs0cWefbKs3Nt5Nkrz861dYa8lhD3hw5jTMhL3Lp1q1JSUrRq1Srl5OQEhk+bNk2vv/66Vq9evdf5q6urlZ2drdWrVysrK6vd6UpKSjR79uxWw8vLyxUfHx9quQAAwEJNTU3Ky8tTfX29EhIS2p1uvz707JFHHtExxxyz10ZEkoqKiuTxeALvGxoalJqaqnHjxikpKanDdfh8Pq1YsUITJkxQTExM1LOGlyyPOM/tNJqT6dfMNU55/Y4O1UZex/LsXBt5fLbk7Z88O9fWGfJavK6QpgurGenTp49cLpfq6uqChtfV1Sk5OXmv8zY2NuqZZ57Rrbfe+rPLcbvdcrvdrYbHxMR0uHnYV3k/zvK2dPwD9PodUckhz15Z5Nkrz861kWevPDvXZuc8f4gZYV3AGhsbq4yMDFVWVv6wIL9flZWVQadt2vLss8/K6/Xq4osvDmeRAACgiwv7NI3H49GUKVOUmZmprKwsLViwQI2NjSooKJAk5efnKyUlRfPmzQua75FHHtGkSZOicpoFAAB0HWE3I5MnT9b27ds1a9Ys1dbWKj09XRUVFerXr58kqaamRk5n8AGXDz74QG+++aZefvnl6FQNAAC6jIguYC0sLFRhYWGb41auXNlq2NFHH60wbtoBAAAHEL6bBgAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWCqib+09kKVNXxb03u0yuiNLGl6yXN4Wh0VVAQDQeXFkBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWCqiZqS0tFRpaWmKi4tTdna2qqur9zr9d999p2uuuUb9+/eX2+3WUUcdpRdffDGiggEAQNcS9nfTLFq0SB6PR2VlZcrOztaCBQuUm5urDz74QH379m01fXNzsyZMmKC+ffvqueeeU0pKij7//HMlJiZGo34AANDJhd2MzJ8/X1OnTlVBQYEkqaysTMuWLdPChQs1ffr0VtMvXLhQ33zzjVatWqWYmBhJUlpaWseqBgAAXUZYzUhzc7PWrl2roqKiwDCn06nx48erqqqqzXleeOEF5eTk6JprrtHSpUt1yCGHKC8vTzfddJNcLleb83i9Xnm93sD7hoYGSZLP55PP5wun5DbtyYgky+0ywe+dJuhnR5Fnnzw710aefbLI69p5dq6tM+S1uELLcRhjQl7i1q1blZKSolWrViknJycwfNq0aXr99de1evXqVvMMGTJEmzdv1kUXXaSrr75aH3/8sa6++mpde+21Ki4ubnM5JSUlmj17dqvh5eXlio+PD7VcAABgoaamJuXl5am+vl4JCQntThf2aZpw+f1+9e3bV3/5y1/kcrmUkZGhLVu26M4772y3GSkqKpLH4wm8b2hoUGpqqsaNG6ekpKQO1+Tz+bRixQpNmDAhcOooVMNLlge9dzuN5mT6NXONU16/o8O1kWefPDvXRh6fLXn7J8/OtXWGvBZv22dAfiqsZqRPnz5yuVyqq6sLGl5XV6fk5OQ25+nfv79iYmKCTskMHTpUtbW1am5uVmxsbKt53G633G53q+ExMTFhNw97E0met6XtD8frd7Q7LhLk2SfPzrWRZ58s8rp2np1rs3OeP8SMsG7tjY2NVUZGhiorK39YkN+vysrKoNM2P3biiSfq448/lt/vDwz78MMP1b9//zYbEQAAcGAJ+zkjHo9HDz30kB5//HFt3LhRV111lRobGwN31+Tn5wdd4HrVVVfpm2++0XXXXacPP/xQy5Yt09y5c3XNNddE77cAAACdVtjXjEyePFnbt2/XrFmzVFtbq/T0dFVUVKhfv36SpJqaGjmdP/Q4qampWr58uW644QYde+yxSklJ0XXXXaebbroper8FAADotCK6gLWwsFCFhYVtjlu5cmWrYTk5OXrrrbciWRQAAOji+G4aAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgqW5WF2A3adOXWV0CAAAHFI6MAAAAS9GMAAAAS9GMAAAAS9GMAAAAS0XUjJSWliotLU1xcXHKzs5WdXV1u9M+9thjcjgcQa+4uLiICwYAAF1L2M3IokWL5PF4VFxcrHXr1mnEiBHKzc3Vtm3b2p0nISFBX331VeD1+eefd6hoAADQdYTdjMyfP19Tp05VQUGBhg0bprKyMsXHx2vhwoXtzuNwOJScnBx49evXr0NFAwCAriOs54w0Nzdr7dq1KioqCgxzOp0aP368qqqq2p3v+++/18CBA+X3+3Xcccdp7ty5+sUvftHu9F6vV16vN/C+oaFBkuTz+eTz+cIpuU17MtrKcrtMWFlupwn62VHk2SfPzrWRZ58s8rp2np1r6wx5LSHuUx3GmJCXuHXrVqWkpGjVqlXKyckJDJ82bZpef/11rV69utU8VVVV+uijj3Tssceqvr5ed911l9544w299957Ouyww9pcTklJiWbPnt1qeHl5ueLj40MtFwAAWKipqUl5eXmqr69XQkJCu9Pt8yew5uTkBDUuo0aN0tChQ/Xggw9qzpw5bc5TVFQkj8cTeN/Q0KDU1FSNGzdOSUlJHa7J5/NpxYoVmjBhgmJiYoLGDS9ZHlaW22k0J9OvmWuc8vodHa6NPPvk2bk28vhsyds/eXaurTPktXhdIU0XVjPSp08fuVwu1dXVBQ2vq6tTcnJySBkxMTEaOXKkPv7443ancbvdcrvdbc770+ahI9rK87ZE9sf3+h0Rz0uevfPsXBt59skir2vn2bk2O+f5Q8wI6wLW2NhYZWRkqLKy8ocF+f2qrKwMOvqxNy0tLXr33XfVv3//cBYNAAC6qLBP03g8Hk2ZMkWZmZnKysrSggUL1NjYqIKCAklSfn6+UlJSNG/ePEnSrbfeqhNOOEFHHHGEvvvuO9155536/PPPddlll0X3NwEAAJ1S2M3I5MmTtX37ds2aNUu1tbVKT09XRUVF4HbdmpoaOZ0/HHD59ttvNXXqVNXW1qpXr17KyMjQqlWrNGzYsOj9FgAAoNOK6ALWwsJCFRYWtjlu5cqVQe/vvvtu3X333ZEsBgAAHAD4bhoAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGCpiJqR0tJSpaWlKS4uTtnZ2aqurg5pvmeeeUYOh0OTJk2KZLEAAKALCrsZWbRokTwej4qLi7Vu3TqNGDFCubm52rZt217n27x5s37/+9/rl7/8ZcTFAgCArifsZmT+/PmaOnWqCgoKNGzYMJWVlSk+Pl4LFy5sd56WlhZddNFFmj17tgYPHtyhggEAQNfSLZyJm5ubtXbtWhUVFQWGOZ1OjR8/XlVVVe3Od+utt6pv37669NJL9a9//etnl+P1euX1egPvGxoaJEk+n08+ny+cktu0J6OtLLfLhJXldpqgnx1Fnn3y7FwbefbJIq9r59m5ts6Q1xLiPtVhjAl5iVu3blVKSopWrVqlnJycwPBp06bp9ddf1+rVq1vN8+abb+qCCy7Q+vXr1adPH/32t7/Vd999pyVLlrS7nJKSEs2ePbvV8PLycsXHx4daLgAAsFBTU5Py8vJUX1+vhISEdqcL68hIuHbs2KFLLrlEDz30kPr06RPyfEVFRfJ4PIH3DQ0NSk1N1bhx45SUlNThunw+n1asWKEJEyYoJiYmaNzwkuVhZbmdRnMy/Zq5ximv39Hh2sizT56dayOPz5a8/ZNn59o6Q16L1xXSdGE1I3369JHL5VJdXV3Q8Lq6OiUnJ7ea/pNPPtHmzZt15plnBob5/f7dC+7WTR988IEOP/zwVvO53W653e5Ww2NiYlo1Dx3RVp63JbI/vtfviHhe8uydZ+fayLNPFnldO8/Otdk5zx9iRlgXsMbGxiojI0OVlZU/LMjvV2VlZdBpmz2GDBmid999V+vXrw+8zjrrLI0bN07r169XampqOIsHAABdUNinaTwej6ZMmaLMzExlZWVpwYIFamxsVEFBgSQpPz9fKSkpmjdvnuLi4jR8+PCg+RMTEyWp1XAAAHBgCrsZmTx5srZv365Zs2aptrZW6enpqqioUL9+/SRJNTU1cjp5sCsAAAhNRBewFhYWqrCwsM1xK1eu3Ou8jz32WCSLBAAAXRSHMAAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKUi+m6azipt+jJJkttldEeWNLxkubwtDourAgDgwMaREQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYCmaEQAAYKmImpHS0lKlpaUpLi5O2dnZqq6ubnfaxYsXKzMzU4mJierRo4fS09P15JNPRlwwAADoWsJuRhYtWiSPx6Pi4mKtW7dOI0aMUG5urrZt29bm9L1799Ytt9yiqqoqvfPOOyooKFBBQYGWL1/e4eIBAEDn1y3cGebPn6+pU6eqoKBAklRWVqZly5Zp4cKFmj59eqvpx44dG/T+uuuu0+OPP64333xTubm5bS7D6/XK6/UG3jc0NEiSfD6ffD5fuCUHuF1m909n8M+OiGYWefbKs3Nt5Nkni7yunWfn2jpDXosrtByHMSbkJTY3Nys+Pl7PPfecJk2aFBg+ZcoUfffdd1q6dOle5zfG6NVXX9VZZ52lJUuWaMKECW1OV1JSotmzZ7caXl5ervj4+FDLBQAAFmpqalJeXp7q6+uVkJDQ7nRhHRn5+uuv1dLSon79+gUN79evnzZt2tTufPX19UpJSZHX65XL5dKf//zndhsRSSoqKpLH4wm8b2hoUGpqqsaNG6ekpKRwSg4yvGT3qSG302hOpl8z1zjl9Tsizot2Fnn2yrNzbeTx2ZK3f/LsXFtnyGvxukKaLuzTNJE46KCDtH79en3//feqrKyUx+PR4MGDW53C2cPtdsvtdrcaHhMTo5iYmIjr8LYE/2G9fkerYRFnRzGLPHvl2bk28uyTRV7XzrNzbXbO84eYEVYz0qdPH7lcLtXV1QUNr6urU3JycrvzOZ1OHXHEEZKk9PR0bdy4UfPmzWu3GQEAAAeOsO6miY2NVUZGhiorKwPD/H6/KisrlZOTE3KO3+8PukAVAAAcuMI+TePxeDRlyhRlZmYqKytLCxYsUGNjY+Dumvz8fKWkpGjevHmSpHnz5ikzM1OHH364vF6vXnzxRT355JN64IEHovubAACATinsZmTy5Mnavn27Zs2apdraWqWnp6uioiJwUWtNTY2czh8OuDQ2Nurqq6/Wl19+qe7du2vIkCF66qmnNHny5Oj9FgAAoNOK6ALWwsJCFRYWtjlu5cqVQe9vu+023XbbbZEsBgAAHAD4bhoAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGCpiJqR0tJSpaWlKS4uTtnZ2aqurm532oceeki//OUv1atXL/Xq1Uvjx4/f6/QAAODAEnYzsmjRInk8HhUXF2vdunUaMWKEcnNztW3btjanX7lypS688EK99tprqqqqUmpqqk455RRt2bKlw8UDAIDOL+xmZP78+Zo6daoKCgo0bNgwlZWVKT4+XgsXLmxz+qefflpXX3210tPTNWTIED388MPy+/2qrKzscPEAAKDz6xbOxM3NzVq7dq2KiooCw5xOp8aPH6+qqqqQMpqamuTz+dS7d+92p/F6vfJ6vYH3DQ0NkiSfzyefzxdOyUHcLrP7pzP4Z0dEM4s8e+XZuTby7JNFXtfOs3NtnSGvxRVajsMYE/ISt27dqpSUFK1atUo5OTmB4dOmTdPrr7+u1atX/2zG1VdfreXLl+u9995TXFxcm9OUlJRo9uzZrYaXl5crPj4+1HIBAICFmpqalJeXp/r6eiUkJLQ7XVhHRjrq9ttv1zPPPKOVK1e224hIUlFRkTweT+B9Q0ODUlNTNW7cOCUlJUW8/OElyyXt7vjmZPo1c41TXr8j4rxoZ5Fnrzw710Yeny15+yfPzrV1hrwWryuk6cJqRvr06SOXy6W6urqg4XV1dUpOTt7rvHfddZduv/12vfLKKzr22GP3Oq3b7Zbb7W41PCYmRjExMeGUHMTbEvyH9fodrYZFnB3FLPLslWfn2sizTxZ5XTvPzrXZOc8fYkZYF7DGxsYqIyMj6OLTPRej/vi0zU/dcccdmjNnjioqKpSZmRnOIgEAQBcX9mkaj8ejKVOmKDMzU1lZWVqwYIEaGxtVUFAgScrPz1dKSormzZsnSfrjH/+oWbNmqby8XGlpaaqtrZUk9ezZUz179ozirwIAADqjsJuRyZMna/v27Zo1a5Zqa2uVnp6uiooK9evXT5JUU1Mjp/OHAy4PPPCAmpub9Zvf/CYop7i4WCUlJR2rHgAAdHoRXcBaWFiowsLCNsetXLky6P3mzZsjWQQAADhA8N00AADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUt2sLiBa0qYvs7oEAAAQAY6MAAAAS0XUjJSWliotLU1xcXHKzs5WdXV1u9O+9957Ovfcc5WWliaHw6EFCxZEWisAAOiCwm5GFi1aJI/Ho+LiYq1bt04jRoxQbm6utm3b1ub0TU1NGjx4sG6//XYlJyd3uGAAANC1hN2MzJ8/X1OnTlVBQYGGDRumsrIyxcfHa+HChW1Of/zxx+vOO+/UBRdcILfb3eGCAQBA1xLWBazNzc1au3atioqKAsOcTqfGjx+vqqqqqBXl9Xrl9XoD7xsaGiRJPp9PPp+vzXncLhNyvttpgn52RDSzyLNXnp1rI88+WeR17Tw719YZ8lpC3Dc7jDEhL3Hr1q1KSUnRqlWrlJOTExg+bdo0vf7661q9evVe509LS9P111+v66+/fq/TlZSUaPbs2a2Gl5eXKz4+PtRyAQCAhZqampSXl6f6+nolJCS0O50tb+0tKiqSx+MJvG9oaFBqaqrGjRunpKSkNucZXrI85Hy302hOpl8z1zjl9Ts6VGs0s8izV56dayOPz5a8/ZNn59o6Q16L1xXSdGE1I3369JHL5VJdXV3Q8Lq6uqhenOp2u9u8viQmJkYxMTFtzuNtCf+P5vU7IppvX2eRZ688O9dGnn2yyOvaeXauzc55/hAzwrqANTY2VhkZGaqsrPxhQX6/Kisrg07bAAAAhCrs0zQej0dTpkxRZmamsrKytGDBAjU2NqqgoECSlJ+fr5SUFM2bN0/S7ote33///cB/b9myRevXr1fPnj11xBFHRPFXAQAAnVHYzcjkyZO1fft2zZo1S7W1tUpPT1dFRYX69esnSaqpqZHT+cMBl61bt2rkyJGB93fddZfuuusujRkzRitXruz4bwAAADq1iC5gLSwsVGFhYZvjftpgpKWlKYwbdgAAwAGG76YBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWiqgZKS0tVVpamuLi4pSdna3q6uq9Tv/ss89qyJAhiouL0zHHHKMXX3wxomIBAEDXE3YzsmjRInk8HhUXF2vdunUaMWKEcnNztW3btjanX7VqlS688EJdeumlevvttzVp0iRNmjRJ//3vfztcPAAA6Py6hTvD/PnzNXXqVBUUFEiSysrKtGzZMi1cuFDTp09vNf0999yjU089VTfeeKMkac6cOVqxYoXuv/9+lZWVhbXs7HmV2tWtR7glAwAAGwurGWlubtbatWtVVFQUGOZ0OjV+/HhVVVW1OU9VVZU8Hk/QsNzcXC1ZsqTd5Xi9Xnm93sD7+vr63cX6GsMpt13d/EZNTX518znV4nfYJos8e+XZuTby+GzJ2z95dq6tM+T5dzVJkowxe5/QhGHLli1Gklm1alXQ8BtvvNFkZWW1OU9MTIwpLy8PGlZaWmr69u3b7nKKi4uNJF68ePHixYtXF3h98cUXe+0vwj5Nsz8UFRUFHU357rvvNHDgQNXU1Ojggw/ucH5DQ4NSU1P1xRdfKCEhwTZZ5Nkrz861kcdnS97+ybNzbZ0hzxijHTt26NBDD93rdGE1I3369JHL5VJdXV3Q8Lq6OiUnJ7c5T3JycljTS5Lb7Zbb7W41/OCDD47KH2ePhISEqOVFM4s8e+XZuTby7JNFXtfOs3Ntds8L5SBCWHfTxMbGKiMjQ5WVlYFhfr9flZWVysnJaXOenJycoOklacWKFe1ODwAADixhn6bxeDyaMmWKMjMzlZWVpQULFqixsTFwd01+fr5SUlI0b948SdJ1112nMWPG6E9/+pMmTpyoZ555RmvWrNFf/vKX6P4mAACgUwq7GZk8ebK2b9+uWbNmqba2Vunp6aqoqFC/fv0kSTU1NXI6fzjgMmrUKJWXl2vGjBm6+eabdeSRR2rJkiUaPnx4yMt0u90qLi5u89RNJKKZZ+fayLNPFnn2yrNzbeTZK8/OtXWGvFA5jPm5+20AAAD2Hb6bBgAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWMqWj4P/+uuvtXDhQlVVVam2tlbS7ie5jho1Sr/97W91yCGHWFwhYK09XyQZrdvv7JxXX18f9O9AR74SIppZsB87r8fRzov2umz1tmG7IyP/+c9/dNRRR+nee+/VwQcfrNGjR2v06NE6+OCDde+992rIkCFas2ZN2Lm7du3Shg0btHz5ci1fvlwbNmyQz+eLqMZoZklSbW2tli5dqgcffFAPPvigli5dGlgp7JAn7V5RP/jgA33wwQeBb1Huynl2tGLFCp1++unq1auX4uPjFR8fr169eun000/XK6+80uXyHn74YQ0bNky9e/fWsGHDgv77kUcesSzrp376LeMdZec8O263dl+P7bxd7Iu8iIXybb37U3Z2trn88suN3+9vNc7v95vLL7/cnHDCCSHntbS0mFtuucUkJiYah8MR9EpMTDQzZswwLS0t+z3LGGO+//57c9FFFxmXy2W6detm+vbta/r27Wu6detmXC6Xufjii01jY6NlecYY89BDD5mhQ4cap9MZ9Bo6dKh5+OGHw8rqDHnGGOPz+cz69etNRUWFqaioMOvXrzfNzc0RZUUr77HHHjPdunUzF1xwgXn00UfNiy++aF588UXz6KOPmgsvvNDExMSYJ554osvk3XHHHSY+Pt5Mnz7dvPbaa+b9998377//vnnttddMUVGR6dGjh7nzzjv3e9YeL7/8sjnttNNMYmJiYJ1LTEw0p512mlmxYkVYWZ0hz67brd3XYztvF/siryNs14zExcWZjRs3tjt+48aNJi4uLuS8G2+80RxyyCGmrKzMfPbZZ6apqck0NTWZzz77zDz44IOmb9++Ztq0afs9yxhjLr30UnPkkUeaiooKs2vXrsDwXbt2meXLl5ujjjrKXHbZZZbl2X3Fj3ZetJvNaOYdeeSR5v777293fGlpqTniiCNCrs3ueQMGDDCLFi1qd/wzzzxjUlNT93uWMfbfYR1IO0C7r8d23i72RV5H2K4ZSUtLM48//ni74x9//HEzcODAkPP69etnKioq2h1fUVFh+vbtu9+zjDEmMTHR/Pvf/253/JtvvmkSExMty7P7ih/tvGg3m9HMc7vdZtOmTe2O37RpU1hNut3z4uLizPvvv9/u+Pfee8907959v2cZY/8d1oG0A7T7emzn7WJf5HWE7ZqR+++/37jdbnPttdeapUuXmrfeesu89dZbZunSpebaa6813bt3N6WlpSHnxcfHm3feeafd8Rs2bDA9evTY71nGGJOQkGD+85//tDu+urraJCQkWJZn9xU/2nnRbjajmXfccceZG2+8sd3x06ZNM8cdd1zItdk975e//KXJz883Pp+v1bhdu3aZ/Px8M3r06P2eZYz9d1gH0g7Q7uuxnbeLfZHXEbb8bppFixbp7rvv1tq1a9XS0iJJcrlcysjIkMfj0fnnnx9y1sSJE7Vr1y49/fTT6tOnT9C4r7/+WpdccolcLpf++c9/7tcsSbrooou0ceNGPfLIIxo5cmTQuLfffltTp07VkCFD9NRTT1mSN3r0aA0aNEiPPPKIunULvvGqpaVFv/vd77R582a9/vrrXSKvR48eeuutt3TMMce0Of6dd97RqFGj9P333+/3vJUrV+qMM87Q4MGDNX78+MAXU9bV1amyslKffvqpli1bptGjR4dUm93z3nnnHeXm5srn82n06NFBeW+88YZiY2P18ssvh/SFm9HMkqSMjAz96le/0h133NHm+JtuukmvvPKK1q5d2yXy7Lzd2n09tvN2sS/yOsKWzcgePp9PX3/9tSSpT58+iomJCTvjiy++0Omnn65NmzbpmGOOCfpjv/vuuxo2bJj++c9/KjU1db9mSdK3336rvLw8LV++XL169VLfvn0lSdu2bdN3332n3NxclZeXKzEx0ZI8u6/40c6LdrMZ7bzNmzfrgQce0FtvvRV0C15OTo6uvPJKpaWlhZTTWfJ27Nihp556qs28vLw8JSQkWJJl9x3WgbYDtPt6bOftYl/kRcrWzUi0+P1+LV++vM0/9imnnCKnM/Q7nKOZtcfGjRvbzBsyZEjYWdHOs/uKH828aDeb0c6Dfdh9h8UOEJ3NAdGMAKGKdrMZ7bxdu3bpvffeC2T1799fQ4cOjeioYWfIq62t1erVq4PysrKylJycbGkW7MXu67Gdt4t9kReJA6YZqa6ubvOJrscff7ylWc3NzVqyZEmbeWeffbZiY2MtzZPsv+LbYUPa1/x+v2bNmqXS0tJWD4c6+OCDVVhYqNmzZ4fc3Ng9r7GxUVdccYWeeeYZORwO9e7dW5L0zTffyBijCy+8UA8++KDi4+P3a9aP2X2HdSDsAO2+Htt5u9gXeR2yXy6TtVBdXZ056aSTjMPhMAMHDjRZWVkmKyvLDBw40DgcDnPSSSeZurq6/Z5ljDEfffSRGTx4sImLizNjxowx559/vjn//PPNmDFjTFxcnDniiCPMRx99ZFme3R/Kti8e8maMMatXrzYLFiww06dPN9OnTzcLFiww1dXVYedEM8/Otx3vi7xoPjMn2s/fsfPzaPZFnp23W7uvx3beLvZFXkd0+Wbk3HPPNTk5OW3e6rZp0yYzatQo85vf/Ga/ZxljzPjx483ZZ59t6uvrW42rr683Z599tjnllFMsy7P7ih/tvGg3m9HMs/Ntx/siL5rPzIn283fsvsM6kHaAdl+P7bxd7Iu8jujyzUjPnj3NunXr2h2/Zs0a07Nnz/2eZYwx3bt3N++++2674995552w7t+Pdp7dV/xo50W72YxmXrSfcWP3vGg+Myfaz9+x+w7rQNoB2n09tvN2sS/yOsJ2X5QXbW63Ww0NDe2O37FjR8jfoBjNLElKTEzU5s2b2x2/efPmkG/D3Rd5fr9/r9eYxMbGyu/3d5m85cuXq7S0VEcffXSrcUcffbTuvfdeVVRUWJI3duxY/f73vw/c6v5jX3/9tW666SaNHTs25NrsnnfGGWfo8ssv19tvv91q3Ntvv62rrrpKZ5555n7PknZv54ceemi74/v376/GxsYuk2fn7dbu67Gdt4t9kdch+6XlsdDVV19tBg4caBYvXhx0+qK+vt4sXrzYpKWlmcLCwv2eZYwxM2fONL169TLz5883GzZsMLW1taa2ttZs2LDBzJ8/3/Tu3dsUFxdblpeXl2dGjhzZ5tGgdevWmYyMDHPRRRd1mbykpCSzcuXKdse/9tprJikpyZK8mpoaM3z4cNOtWzczcuRIc+qpp5pTTz3VjBw50nTr1s0ce+yxpqamJuTa7J73zTffmFNPPdU4HA7Tu3dvM2TIEDNkyBDTu3dv43Q6zWmnnWa+/fbb/Z5ljDGnn366OeWUU8z27dtbjdu+fbs59dRTzcSJE7tMnp23W7uvx3beLvZFXkd0+btpvF6vrr/+ei1cuFC7du0KdOTNzc3q1q2bLr30Ut19990hHdFoL8vr9SomJiasrD3++Mc/6p577lFtba0cDockyRij5ORkXX/99Zo2bVpYv2808+z+ULZo511zzTVatmyZ7r77bv3qV78KPOugoaFBlZWV8ng8OuOMM3TfffdZkmf32473xTN4Nm3a1OrOsEifmROt5+/Y/Xk0B9rDGe2+Htt9u5Ci/6yrSHT5ZmSPhoYGrV27NuiPnZGREdHDdRoaGrRmzRrV1dVJkvr166fMzMwOPajns88+C6pt0KBBEWdFO8/OD2WTordhRrNx3Rd5sA+777DYAaKzOWCakX0pNjZWGzZs0NChQ60uBVEQzcY12nnRfMaNtHun1daOye/368svv9SAAQMiyt3j5JNP1qOPPqqBAweGNZ/X65XT6Qw8E+OTTz7RwoULVVNTo4EDB+rSSy8Nq8HesGGD1q5dq7Fjx2rw4MF67733VFpaKr/fr3POOUe5ublh1Qd7YbuIbLuQ7LNtHBDNyP/7f/9Pa9euVe/evTVs2LCgcTt37tTf/vY35efn/2yOx+Npc/g999yjiy++WElJSZKk+fPnh1TXunXr1KtXr8DK8+STT6qsrCywYhUWFuqCCy4IKWuP+++/X9XV1Tr99NN1wQUX6Mknn9S8efPk9/v161//WrfeemurL6fam33xELUvv/xSiYmJ6tmzZ9Bwn8+nqqqqkL8zoz2DBw/W8uXLdeSRR3Yox062bdumc889V//+9781YMCAoEPvNTU1OvHEE/X8888HDnn/nIaGBl122WX6xz/+oYSEBF1xxRUqLi6Wy+UK5B566KGBL6r8OS+88EKbw3/961/rnnvuCZwSOOuss0LKGzt2rAoLC/Wb3/xG//73v/WrX/1KRx99tIYOHaoPP/xQH3zwgV555RXl5OT8bNbixYt1/vnnKzExUV6vV3//+9913nnnKTMzUy6XS6+88oqeeOIJ5eXlhVTbHuwArd8Bsl1Evl1I+27biMh+uTLFQh988EHguQ5Op9OMHj3abNmyJTC+trbWOJ3OkLIcDodJT083Y8eODXo5HA5z/PHHm7Fjx5px48aFXNuxxx5rVqxYYYwx5qGHHjLdu3c31157rXnggQfM9ddfb3r27GkeeeSRkPPmzJljDjroIHPuueea5ORkc/vtt5ukpCRz2223mblz55pDDjnEzJo1K+S8aD9EbevWreb44483TqfTuFwuc8kll5gdO3YExofzWRhjzD333NPmy+VymaKiosD7UH3xxRdBF/298cYbJi8vz5x00knmoosuMqtWrQo5a49//OMfZubMmebNN980xhhTWVlpTjvtNJObm2sefPDBkHOifdvxtddea4466ijz7LPPmoceesgMHDjQTJw40Xi9XmPM7s/C4XCEnLdn+/rpQ7Z+/Arns01ISDAffvihMcaYMWPGmBtuuCFo/IwZM8yJJ54YUtZxxx1nbrvtNmOMMX/9619NYmKiufXWWwPj77rrLpOenh5ybdF+Hk19fb0577zzTFxcnOnbt6+ZOXNm0PM3wt0uli5d2ubL5XKZ+++/P/A+VGPGjDHPPvusMWb3bbdut9sce+yxZvLkyWbkyJEmPj4+rG3j+eefNy6XyyQlJZmePXuaFStWmMTERDN+/HiTm5trXC6Xefrpp0PKYruIfLswJvrbRkd0+WZk0qRJZuLEiWb79u3mo48+MhMnTjSDBg0yn3/+uTEmvA193rx5ZtCgQaaysjJoeLdu3cx7770Xdm3du3c3mzdvNsYYM3LkSPOXv/wlaPzTTz9thg0bFnLe4Ycfbp5//nljjDHr1683LpfLPPXUU4HxixcvNkcccUTIedF+iFp+fr7Jzs42//nPf8yKFStMRkaGyczMNN98840xJrIN/bDDDjNpaWlBL4fDYVJSUkxaWpoZNGhQyHlZWVnmH//4hzHGmCVLlhin02nOOussc9NNN5lzzjnHxMTEBMaHoqyszHTr1s1kZGSYhIQE8+STT5qDDjrIXHbZZeaKK64w3bt3NwsWLAgpK9rPuBkwYIB57bXXAu+3b99usrKyzCmnnGJ27twZ9g5wzx0aP90JR7pt9OjRw2zcuNEYs/u5GevXrw8a//HHH4f8+/bo0cN89tlnxhhj/H6/iYmJCXr2wyeffBLW344doH12gGwXkW8Xe/KiuW10RJdvRvr27Rv0x/X7/ebKK680AwYMMJ988knYK1d1dbU56qijzP/+7/+a5uZmY0zkK1ZSUpJZs2ZNoM62VqxwH3q2p8kyxpiYmBjz3//+N/B+8+bNJj4+Pqy8aD5E7dBDDzWrV68OvN+5c6c588wzTXp6uvm///u/sD+LK664wqSnp5v3338/aHhHNvRPP/3UGGNMdna2uf3224PG33fffWbkyJEh5w0bNizQYL766qsmLi7OlJaWBsY/+uijZujQoSFlRfu24+7duwd+1z0aGhpMTk6OOfnkk82nn34a1mdhjDHz5883qampQQ1bpJ/FySefbO644w5jjDGjRo0yjz/+eND45557zgwYMCCkrOTk5MB29s033xiHwxG0w6murjbJyckh18YO0D47QLaLyLcLY6K/bXREl29GDjrooFY7K2OMueaaa8xhhx1m3njjjbBXrh07dpj8/Hxz7LHHmnfffdfExMREtGJdfPHF5tJLLzXGGHPeeeeZGTNmBI2fO3euOeaYY0LOGzRokHnppZeMMcZ8+OGHxul0mr/97W+B8cuWLTNpaWkh5/Xv33+vRwJeeOEF079//5DzevToEfg/rD18Pp+ZNGmSOfbYY80777wT9mexePFik5qaau67777AsEg39IMPPths2LDBGLO7Odzz33t8/PHHYTdzP20Of9zcffbZZyHnRfsZN0cffbRZtmxZq+E7duwwOTk5ZsSIEWF/FsYY8/bbb5thw4aZyy+/3DQ2Nkb8WaxatcocfPDBpri42Nx3332mT58+ZsaMGebpp582s2bNMomJieaPf/xjSFkXX3yxyc7ONk899ZQ588wzTW5urjnhhBPMxo0bzaZNm8yYMWPCOpLBDtA+O0C2i8i3C2Oiv210RJdvRo4//njzxBNPtDnummuuMYmJiRGtXMbsPsTYr18/43Q6I1qxtmzZYtLS0szo0aONx+Mx3bt3NyeddJKZOnWqGT16tImNjW1zw2jPjBkzzCGHHGIuu+wyM2jQIDN9+nQzYMAA88ADD5iysjKTmpra6hDr3kT7IWrHHHOMee6551oN39OQDBgwIKLP4ssvvzQnn3yyOfXUU81XX30V8YZ+1llnmenTpxtjjMnNzW11vclDDz1kjjzyyJDz9jS7xuz+rB0OR9DnuXLlSnPYYYeFlLVz505z5ZVXmtjYWON0Ok1cXJyJi4szTqfTxMbGmquuusrs3Lkz5Nr+53/+p91/ZBoaGkx2dnbE20VTU5O54oorzJFHHmlcLldEn4Uxu//hPeGEE1qdYkhJSQn59JYxu09zTJgwwfTs2dPk5uaa7777zhQWFgZOVxx55JHm448/DjmPHaB9doDtbRcOh4PtIgTR3jY6oss3I3PnzjWnnXZau+OvuuqqsM7H/tQXX3xhlixZYr7//vuI5v/222/NTTfdZIYNG2bi4uJMbGysGThwoMnLy9vrdwa0paWlxfzhD38wZ5xxhpk7d67x+/3mr3/9q0lNTTVJSUnmt7/9bdh13n777aZ///6BlXPPuej+/fuH9Q+QMcZMmzat3WtMfD6fOeussyL+LPx+v5k7d65JTk6OeEN///33TVJSksnPzzdz5swxPXv2NBdffLH5wx/+YPLz843b7TaPPvpoyHnXXHONOfLII81tt91msrKyzJQpU8yQIUPMSy+9ZCoqKswxxxxjfve734VVY319vXn11VdNeXm5KS8vN6+++mqb1/T8nG+++SboFN5PNTQ07PX//kOxdOlSc/3114d1MWdbtm3bZt566y2zatWqwOH9aPjkk0/Mu+++a3w+X1jz0Rjuux2gw+GIaAdYX19vKisrA9tFZWVlVLcLv99vjInednHttddGdbv46ZG1jop02+iIA+LWXnRcNB6itmvXLjU1NbX7fI1du3Zpy5YtYd96+GNr167Vm2++qfz8fPXq1Svs+T/55BPNmDFDy5Yt0/fffy9J6tatm44//njdeOONmjRpUshZjY2NuuGGG1RVVaVRo0bpvvvu07333qtbbrlFPp9PY8aM0aJFi0K+7RD2Eq3nx3z77bfaunWrfvGLX7Q5fseOHVq3bp3GjBkTca0vvPCCXnvtNRUVFXVofdu+fbs+/fRT+f1+9e/fX2lpaRFn/dSnn36qpqYmDRkyJKzHD7Ql2s9+Im/foxlBxL744gsVFxdr4cKFXS7PGKNt27bJ7/erT58+gWcsRMPOnTvl8/l00EEHhTVftJ6XcyDmRbu2PU8P3fPE0E2bNumee+6R1+vVxRdfrJNPPjnkrP2Vt2DBAjU3N3cob9SoUTr66KOjVl9H86L97CfyOpbXIfvtGAy6nPXr10d8+Lgz5tXU1JiCggJL8qL5vJz28rZu3dol86Jd20svvWRiY2NN7969TVxcnHnppZfMIYccYsaPH29OPvlk43K5Wt3+T96+yYv2s5/I61heR9CMoF3tPTxpz+vuu++OysOY7JL3c6xslqL5vJwDLS/ateXk5JhbbrnFGLP7IvZevXqZm2++OTB++vTpZsKECeTth7xoP/uJvI7ldQTNCNoV7Ycn2T3Pzs1StJ+XcyDlRbu2hISEwJOHW1paTLdu3YKeO/Luu++afv36kbef8qL57CfyOp4XKZoRtOvQQw81S5YsaXf822+/HdY/4nbPs3OzFO3n5RxIedGuLSEhIehuj549e5pPPvkk8H7z5s0mLi6OvP2UZ0z0nv1EXnTyIhH+90jjgJGRkaG1a9e2O97hcMiEcf2z3fP69++vxYsXy+/3t/lat25dyFnRzhsyZIjWrFnTavj999+vs88+O+Qv2joQ86JdW1pamj766KPA+6qqqqAvsaupqVH//v3J2095ktSzZ089/vjjKioq0vjx40P+Ijvy9k1eJGhG0K4bb7xRo0aNanf8EUccoddee63L5Nm5WTrnnHP017/+tc1x999/vy688MKwajuQ8qJd21VXXRX0j/Xw4cODbkV96aWXwrq7hLyO5f3YBRdcoDVr1mjx4sUdekQAedHJCwe39gL/v3/9619qbGzUqaee2ub4xsZGrVmzJuTnPUQ7DwC6KpoRAABgKU7TAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS/1/ztVI/aKrL1cAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "ko\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABCCElEQVR4nO3de1xUdeL/8ffMCINohHeUUDQtdVMxCMJaL61FaZZtWxYVLlt25dtlvpvJpqLZalutaclmW9qdza1cbdfCjNLWlWS91qZWlkZpoG0FBl8HZD6/P/o5RYDNwMA54Ov5ePBw51ze5wMzp/PeM2fOOIwxRgAAABZxWj0AAABwfKOMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYqp3VAwiEz+fT/v37dcIJJ8jhcFg9HAAAEABjjA4dOqRevXrJ6Wz4/EerKCP79+9XXFyc1cMAAACN8Nlnn+mkk05qcH6rKCMnnHCCJGnPnj3q3Llzk/Oqq6v1+uuv67zzzlNYWJhtssizV56dx0Yezy15LZNn57G1hrzy8nLFxcX5j+MNaRVl5OhbMyeccIKioqKanFddXa3IyEhFRUWF5IUaqizy7JVn57GRx3NLXsvk2XlsrSHvqJ+6xIILWAEAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgqaDLyNtvv60JEyaoV69ecjgcWrFixU+us3btWp1++ulyu93q37+/nnrqqUYMFQAAtEVBl5GKigoNGzZMubm5AS2/Z88ejR8/XmPGjNG2bdt0++2367rrrtPq1auDHiwAAGh7gv7W3gsuuEAXXHBBwMsvXrxYffv21R//+EdJ0qBBg7R+/Xo99NBDSktLC3bzAACgjQm6jASrsLBQY8eOrTUtLS1Nt99+e4PreL1eeb1e/+Py8nJJ3321cXV1dZPHdDTDblnk2SvPzmMjzz5Z5LXtPDuPrTXl/RSHMcY0diMOh0N/+9vfNHHixAaXOeWUU5SZmans7Gz/tFdffVXjx49XZWWl2rdvX2edWbNmafbs2XWm5+XlKTIysrHDBQAALaiyslLp6ekqKytTVFRUg8s1+5mRxsjOzpbH4/E/Li8vV1xcnMaMGaMuXbo0Ob+6ulpr1qzRueeeq7CwMNtkkWevvOYe22mzmnbdlNtpNCfJpxmbnPL6HE0e3/GUZ+exkWevPDuPrTXk1XhdAS3X7GUkJiZGpaWltaaVlpYqKiqq3rMikuR2u+V2u+tMDwsLC8lBoTny7Dw28ponK37aqqBy3C6j+5Ol4b9/U94ah6Sm7+iS5PU5/n9eaBxPeXYeG3n2yrPz2Oyc5wswo9nLSGpqql599dVa09asWaPU1NTm3jQQtB8WjKPl4bRZq0O6kwMAagu6jHz77bfavXu3//GePXu0bds2de7cWb1791Z2drb27dunZ555RpJ04403atGiRZo6dap+85vf6M0339Rf//pXrVoV3P+rBEIp2LMaAIDmE3QZ2bRpk8aMGeN/fPTajsmTJ+upp57SF198oeLiYv/8vn37atWqVbrjjju0cOFCnXTSSXriiSf4WC+apKEywdkMAGh9gi4jo0eP1rE+gFPf3VVHjx6trVu3BrspHGc4WwEAxydbfpoGbVtD12WE6oJOAEDrQhlBSHBWAwDQWJQRNArXZAAAQoUyggbVd7bj6NsqAACEStDf2gsAABBKnBmBJK75AABYhzJynKF0AADshjLShsVPW8VNwAAAtsc1IwAAwFKcGWkDeOsFANCacWYEAABYijMjrQxnQQAAbQ1nRgAAgKUoIwAAwFK8TWNTfBQXAHC84MwIAACwFGUEAABYijICAAAsxTUjNvDDj+sevX07AADHC86MAAAAS1FGAACApSgjAADAUpQRAABgKS5gbUF8rwwAAHU16sxIbm6u4uPjFRERoZSUFBUVFTW4bHV1te655x6dfPLJioiI0LBhw5Sfn9/oAQMAgLYl6DKybNkyeTwe5eTkaMuWLRo2bJjS0tJ04MCBepefPn26HnvsMT3yyCPasWOHbrzxRl1yySXaunVrkwcPAABav6DLyPz58zVlyhRlZmZq8ODBWrx4sSIjI7V06dJ6l3/22Wf1u9/9TuPGjVO/fv100003ady4cfrjH//Y5MEDAIDWL6hrRqqqqrR582ZlZ2f7pzmdTo0dO1aFhYX1ruP1ehUREVFrWvv27bV+/foGt+P1euX1ev2Py8vLJX33lk91dXUwQ67X0YyWznK7zE8v4zS1/m0q8uyRRZ698uw8NvLslWfnsbWGvJoAjnuS5DDGBLzF/fv3KzY2Vhs2bFBqaqp/+tSpU7Vu3Tpt3Lixzjrp6enavn27VqxYoZNPPlkFBQW6+OKLVVNTU6tw/NCsWbM0e/bsOtPz8vIUGRkZ6HABAICFKisrlZ6errKyMkVFRTW4XLN/mmbhwoWaMmWKBg4cKIfDoZNPPlmZmZkNvq0jSdnZ2fJ4PP7H5eXliouL05gxY9SlS5cmj6m6ulpr1qzRueeeq7CwsGbJOm3W6kbluZ1Gc5J8mrHJKa/P0aSxkde0PDuPjTyeW/JaJs/OY2sNeTVeV0DLBVVGunbtKpfLpdLS0lrTS0tLFRMTU+863bp104oVK3T48GH997//Va9evTRt2jT169evwe243W653e4608PCwppcHpor78dZ3pqmPYlen6PJGeSFJs/OYyPPPlnkte08O4/Nznm+ADOCuoA1PDxciYmJKigo+H5DPp8KCgpqvW1Tn4iICMXGxurIkSN6+eWXdfHFFwezaQAA0EYF/TaNx+PR5MmTlZSUpOTkZC1YsEAVFRXKzMyUJGVkZCg2Nlbz5s2TJG3cuFH79u1TQkKC9u3bp1mzZsnn82nq1Kmh/U0AAECrFHQZmTRpkg4ePKiZM2eqpKRECQkJys/PV48ePSRJxcXFcjq/P+Fy+PBhTZ8+XZ988ok6duyocePG6dlnn1V0dHTIfgkAANB6NeoC1qysLGVlZdU7b+3atbUejxo1Sjt27GjMZgAAwHGA76ZpotNmrdb9yd/9G8qLhwAAOF7wrb0AAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACzFp2mCED9tVZ1p7sBuuw8AABrAmREAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKW4z0g96rufCAAAaB6cGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAljru78DK3VYBALBWo86M5ObmKj4+XhEREUpJSVFRUdExl1+wYIFOPfVUtW/fXnFxcbrjjjt0+PDhRg0YAAC0LUGXkWXLlsnj8SgnJ0dbtmzRsGHDlJaWpgMHDtS7fF5enqZNm6acnBzt3LlTS5Ys0bJly/S73/2uyYMHAACtX9Bv08yfP19TpkxRZmamJGnx4sVatWqVli5dqmnTptVZfsOGDTrrrLOUnp4uSYqPj9eVV16pjRs3NrgNr9crr9frf1xeXi5Jqq6uVnV1dbBDruNoRnV1tdwu06Qst9PU+repyLNPnp3HRp59sshr23l2HltryKsJ8BjrMMYEvMWqqipFRkbqpZde0sSJE/3TJ0+erG+++UYrV66ss05eXp5uvvlmvf7660pOTtYnn3yi8ePH65prrmnw7MisWbM0e/bserMiIyMDHS4AALBQZWWl0tPTVVZWpqioqAaXC+rMyJdffqmamhr16NGj1vQePXpo165d9a6Tnp6uL7/8UmeffbaMMTpy5IhuvPHGY75Nk52dLY/H439cXl6uuLg4jRkzRl26dAlmyPWqrq7WmjVrdO6552r4799sUpbbaTQnyacZm5zy+hxNHht59smz89jI47klr2Xy7Dy21pBX43UFtFyzf5pm7dq1mjt3rv70pz8pJSVFu3fv1m233aY5c+ZoxowZ9a7jdrvldrvrTA8LC1NYWFjIxhYWFiZvTdP/2JLk9TlClkWevfLsPDby7JNFXtvOs/PY7JznCzAjqDLStWtXuVwulZaW1ppeWlqqmJiYeteZMWOGrrnmGl133XWSpCFDhqiiokLXX3+97r77bjmd3OoEAIDjWVBNIDw8XImJiSooKPBP8/l8KigoUGpqar3rVFZW1ikcLtd3p22CuFwFAAC0UUG/TePxeDR58mQlJSUpOTlZCxYsUEVFhf/TNRkZGYqNjdW8efMkSRMmTND8+fM1fPhw/9s0M2bM0IQJE/ylBAAAHL+CLiOTJk3SwYMHNXPmTJWUlCghIUH5+fn+i1qLi4trnQmZPn26HA6Hpk+frn379qlbt26aMGGCfv/734futwAAAK1Woy5gzcrKUlZWVr3z1q5dW3sD7dopJydHOTk5jdkUAABo47h6FAAAWIoyAgAALHXcfGvvD7+d1+0yuj9ZOm3Wakmh+1w2AAAIHmdGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYKl2Vg+gOcRPW2X1EAAAQIA4MwIAACzVqDKSm5ur+Ph4RUREKCUlRUVFRQ0uO3r0aDkcjjo/48ePb/SgAQBA2xF0GVm2bJk8Ho9ycnK0ZcsWDRs2TGlpaTpw4EC9yy9fvlxffPGF/+c///mPXC6XLrvssiYPHgAAtH5Bl5H58+drypQpyszM1ODBg7V48WJFRkZq6dKl9S7fuXNnxcTE+H/WrFmjyMhIyggAAJAU5AWsVVVV2rx5s7Kzs/3TnE6nxo4dq8LCwoAylixZoiuuuEIdOnRocBmv1yuv1+t/XF5eLkmqrq5WdXX1T27D7TLHnu80tf5tilBmkWevPDuPjTz7ZJHXtvPsPLbWkFfzE8fjoxzGmIC3uH//fsXGxmrDhg1KTU31T586darWrVunjRs3HnP9oqIipaSkaOPGjUpOTm5wuVmzZmn27Nl1pufl5SkyMjLQ4QIAAAtVVlYqPT1dZWVlioqKanC5Fv1o75IlSzRkyJBjFhFJys7Olsfj8T8uLy9XXFycxowZoy5duvzkdk6btfqY891OozlJPs3Y5JTX5whs8C2QRZ698uw8NvJ4bslrmTw7j6015NV4XQEtF1QZ6dq1q1wul0pLS2tNLy0tVUxMzDHXraio0AsvvKB77rnnJ7fjdrvldrvrTA8LC1NYWNhPru+tCewP6PU5Al62JbPIs1eencdGnn2yyGvbeXYem53zfAFmBHUBa3h4uBITE1VQUPD9hnw+FRQU1Hrbpj4vvviivF6vrr766mA2CQAA2rig36bxeDyaPHmykpKSlJycrAULFqiiokKZmZmSpIyMDMXGxmrevHm11luyZIkmTpwY0NssAADg+BF0GZk0aZIOHjyomTNnqqSkRAkJCcrPz1ePHj0kScXFxXI6a59w+eCDD7R+/Xq9/vrroRk1AABoMxp1AWtWVpaysrLqnbd27do600499VQF8aEdAABwHOG7aQAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBS7aweQFPET1tl9RAAAEATcWYEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClGlVGcnNzFR8fr4iICKWkpKioqOiYy3/zzTe65ZZb1LNnT7ndbp1yyil69dVXGzVgAADQtgT90d5ly5bJ4/Fo8eLFSklJ0YIFC5SWlqYPPvhA3bt3r7N8VVWVzj33XHXv3l0vvfSSYmNj9emnnyo6OjoU4wcAAK1c0GVk/vz5mjJlijIzMyVJixcv1qpVq7R06VJNmzatzvJLly7VV199pQ0bNigsLEySFB8f37RRAwCANiOoMlJVVaXNmzcrOzvbP83pdGrs2LEqLCysd51XXnlFqampuuWWW7Ry5Up169ZN6enpuuuuu+Ryuepdx+v1yuv1+h+Xl5dLkqqrq1VdXe2f7naZYIb//XpOU+vfpghlFnn2yrPz2MizTxZ5bTvPzmNrDXk1AR6nHcaYgLe4f/9+xcbGasOGDUpNTfVPnzp1qtatW6eNGzfWWWfgwIHau3evrrrqKt18883avXu3br75Zt16663KycmpdzuzZs3S7Nmz60zPy8tTZGRkoMMFAAAWqqysVHp6usrKyhQVFdXgcs1+O3ifz6fu3bvrz3/+s1wulxITE7Vv3z498MADDZaR7OxseTwe/+Py8nLFxcVpzJgx6tKli3/6abNWN2pMbqfRnCSfZmxyyutzNCqjObLIs1eencdGHs8teS2TZ+extYa8Gm/974D8WFBlpGvXrnK5XCotLa01vbS0VDExMfWu07NnT4WFhdV6S2bQoEEqKSlRVVWVwsPD66zjdrvldrvrTA8LC/NfdyJJ3pqm/aG8PkeTM5ojizx75dl5bOTZJ4u8tp1n57HZOc8XYEZQH+0NDw9XYmKiCgoKvt+Qz6eCgoJab9v80FlnnaXdu3fL5/P5p3344Yfq2bNnvUUEAAAcX4K+z4jH49Hjjz+up59+Wjt37tRNN92kiooK/6drMjIyal3getNNN+mrr77Sbbfdpg8//FCrVq3S3Llzdcstt4TutwAAAK1W0NeMTJo0SQcPHtTMmTNVUlKihIQE5efnq0ePHpKk4uJiOZ3fd5y4uDitXr1ad9xxh4YOHarY2Fjddtttuuuuu0L3WwAAgFarURewZmVlKSsrq955a9eurTMtNTVV77zzTmM2BQAA2ji+mwYAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALBUo8pIbm6u4uPjFRERoZSUFBUVFTW47FNPPSWHw1HrJyIiotEDBgAAbUvQZWTZsmXyeDzKycnRli1bNGzYMKWlpenAgQMNrhMVFaUvvvjC//Ppp582adAAAKDtCLqMzJ8/X1OmTFFmZqYGDx6sxYsXKzIyUkuXLm1wHYfDoZiYGP9Pjx49mjRoAADQdrQLZuGqqipt3rxZ2dnZ/mlOp1Njx45VYWFhg+t9++236tOnj3w+n04//XTNnTtXP/vZzxpc3uv1yuv1+h+Xl5dLkqqrq1VdXe2f7naZYIb//XpOU+vfpghlFnn2yrPz2MizTxZ5bTvPzmNrDXk1AR6nHcaYgLe4f/9+xcbGasOGDUpNTfVPnzp1qtatW6eNGzfWWaewsFAfffSRhg4dqrKyMj344IN6++239f777+ukk06qdzuzZs3S7Nmz60zPy8tTZGRkoMMFAAAWqqysVHp6usrKyhQVFdXgckGdGWmM1NTUWsVlxIgRGjRokB577DHNmTOn3nWys7Pl8Xj8j8vLyxUXF6cxY8aoS5cu/umnzVrdqDG5nUZzknyasckpr8/RqIzmyCLPXnl2Hht5PLfktUyencfWGvJqvK6AlguqjHTt2lUul0ulpaW1ppeWliomJiagjLCwMA0fPly7d+9ucBm32y23213vumFhYf7H3pqm/aG8PkeTM5ojizx75dl5bOTZJ4u8tp1n57HZOc8XYEZQF7CGh4crMTFRBQUF32/I51NBQUGtsx/HUlNTo/fee089e/YMZtMAAKCNCvptGo/Ho8mTJyspKUnJyclasGCBKioqlJmZKUnKyMhQbGys5s2bJ0m65557dOaZZ6p///765ptv9MADD+jTTz/VddddF9rfBAAAtEpBl5FJkybp4MGDmjlzpkpKSpSQkKD8/Hz/x3WLi4vldH5/wuXrr7/WlClTVFJSok6dOikxMVEbNmzQ4MGDQ/dbAACAVqtRF7BmZWUpKyur3nlr166t9fihhx7SQw891JjNAACA4wDfTQMAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiqUWUkNzdX8fHxioiIUEpKioqKigJa74UXXpDD4dDEiRMbs1kAANAGBV1Gli1bJo/Ho5ycHG3ZskXDhg1TWlqaDhw4cMz19u7dq9/+9rf6+c9/3ujBAgCAtifoMjJ//nxNmTJFmZmZGjx4sBYvXqzIyEgtXbq0wXVqamp01VVXafbs2erXr1+TBgwAANqWdsEsXFVVpc2bNys7O9s/zel0auzYsSosLGxwvXvuuUfdu3fXtddeq3/+858/uR2v1yuv1+t/XF5eLkmqrq5WdXW1f7rbZYIZ/vfrOU2tf5silFnk2SvPzmMjzz5Z5LXtPDuPrTXk1QR4nHYYYwLe4v79+xUbG6sNGzYoNTXVP33q1Klat26dNm7cWGed9evX64orrtC2bdvUtWtX/frXv9Y333yjFStWNLidWbNmafbs2XWm5+XlKTIyMtDhAgAAC1VWVio9PV1lZWWKiopqcLmgzowE69ChQ7rmmmv0+OOPq2vXrgGvl52dLY/H439cXl6uuLg4jRkzRl26dPFPP23W6kaNy+00mpPk04xNTnl9jkZlNEcWefbKs/PYyOO5Ja9l8uw8ttaQV+N1BbRcUGWka9eucrlcKi0trTW9tLRUMTExdZb/+OOPtXfvXk2YMME/zefzfbfhdu30wQcf6OSTT66zntvtltvtrjM9LCxMYWFh/sfemqb9obw+R5MzmiOLPHvl2Xls5Nkni7y2nWfnsdk5zxdgRlAXsIaHhysxMVEFBQXfb8jnU0FBQa23bY4aOHCg3nvvPW3bts3/c9FFF2nMmDHatm2b4uLigtk8AABog4J+m8bj8Wjy5MlKSkpScnKyFixYoIqKCmVmZkqSMjIyFBsbq3nz5ikiIkKnnXZarfWjo6Mlqc50AABwfAq6jEyaNEkHDx7UzJkzVVJSooSEBOXn56tHjx6SpOLiYjmdzXNj15R5BTrSrkOzZAMAAGs06gLWrKwsZWVl1Ttv7dq1x1z3qaeeaswmAQBAG8V30wAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEs1qozk5uYqPj5eERERSklJUVFRUYPLLl++XElJSYqOjlaHDh2UkJCgZ599ttEDBgAAbUvQZWTZsmXyeDzKycnRli1bNGzYMKWlpenAgQP1Lt+5c2fdfffdKiws1LvvvqvMzExlZmZq9erVTR48AABo/doFu8L8+fM1ZcoUZWZmSpIWL16sVatWaenSpZo2bVqd5UePHl3r8W233aann35a69evV1paWr3b8Hq98nq9/sfl5eWSJLfTyOUywQ65DrfT1PrXLlnk2SvPzmMjzz5Z5LXtPDuPrTXk1QR4zHYYYwLeYlVVlSIjI/XSSy9p4sSJ/umTJ0/WN998o5UrVx5zfWOM3nzzTV100UVasWKFzj333HqXmzVrlmbPnl1nel5eniIjIwMdLgAAsFBlZaXS09NVVlamqKioBpcL6szIl19+qZqaGvXo0aPW9B49emjXrl0NrldWVqbY2Fh5vV65XC796U9/arCISFJ2drY8Ho//cXl5ueLi4nTvVqeOhLmCGXK93E6jOUk+zdjklNfnsE0WefbKs/PYyOO5Ja9l8uw8ttaQV+MN7Jgd9Ns0jXHCCSdo27Zt+vbbb1VQUCCPx6N+/frVeQvnKLfbLbfbXWe61+fQkZqm/3F+mOcNUV4os8izV56dx0aefbLIa9t5dh6bnfN8AWYEVUa6du0ql8ul0tLSWtNLS0sVExPT4HpOp1P9+/eXJCUkJGjnzp2aN29eg2UEAAAcP4L6NE14eLgSExNVUFDgn+bz+VRQUKDU1NSAc3w+X60LVAEAwPEr6LdpPB6PJk+erKSkJCUnJ2vBggWqqKjwf7omIyNDsbGxmjdvniRp3rx5SkpK0sknnyyv16tXX31Vzz77rB599NHQ/iYAAKBVCrqMTJo0SQcPHtTMmTNVUlKihIQE5efn+y9qLS4ultP5/QmXiooK3Xzzzfr888/Vvn17DRw4UM8995wmTZoUut8CAAC0Wo26gDUrK0tZWVn1zlu7dm2tx/fee6/uvffexmwGAAAcB/huGgAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKNKiO5ubmKj49XRESEUlJSVFRU1OCyjz/+uH7+85+rU6dO6tSpk8aOHXvM5QEAwPEl6DKybNkyeTwe5eTkaMuWLRo2bJjS0tJ04MCBepdfu3atrrzySr311lsqLCxUXFyczjvvPO3bt6/JgwcAAK1f0GVk/vz5mjJlijIzMzV48GAtXrxYkZGRWrp0ab3LP//887r55puVkJCggQMH6oknnpDP51NBQUGTBw8AAFq/dsEsXFVVpc2bNys7O9s/zel0auzYsSosLAwoo7KyUtXV1ercuXODy3i9Xnm9Xv/j8vJySZLbaeRymWCGXC+309T61y5Z5Nkrz85jI88+WeS17Tw7j6015NUEeMx2GGMC3uL+/fsVGxurDRs2KDU11T996tSpWrdunTZu3PiTGTfffLNWr16t999/XxEREfUuM2vWLM2ePbvO9Ly8PEVGRgY6XAAAYKHKykqlp6errKxMUVFRDS4X1JmRprrvvvv0wgsvaO3atQ0WEUnKzs6Wx+PxPy4vL1dcXJzu3erUkTBXk8fhdhrNSfJpxianvD6HbbLIs1eencdGHs8teS2TZ+extYa8Gm9gx+ygykjXrl3lcrlUWlpaa3ppaaliYmKOue6DDz6o++67T2+88YaGDh16zGXdbrfcbned6V6fQ0dqmv7H+WGeN0R5ocwiz155dh4befbJIq9t59l5bHbO8wWYEdQFrOHh4UpMTKx18enRi1F/+LbNj91///2aM2eO8vPzlZSUFMwmAQBAGxf02zQej0eTJ09WUlKSkpOTtWDBAlVUVCgzM1OSlJGRodjYWM2bN0+S9Ic//EEzZ85UXl6e4uPjVVJSIknq2LGjOnbsGMJfBQAAtEZBl5FJkybp4MGDmjlzpkpKSpSQkKD8/Hz16NFDklRcXCyn8/sTLo8++qiqqqr0q1/9qlZOTk6OZs2a1bTRAwCAVq9RF7BmZWUpKyur3nlr166t9Xjv3r2N2QQAADhO8N00AADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApRpVRnJzcxUfH6+IiAilpKSoqKiowWXff/99XXrppYqPj5fD4dCCBQsaO1YAANAGBV1Gli1bJo/Ho5ycHG3ZskXDhg1TWlqaDhw4UO/ylZWV6tevn+677z7FxMQ0ecAAAKBtCbqMzJ8/X1OmTFFmZqYGDx6sxYsXKzIyUkuXLq13+TPOOEMPPPCArrjiCrnd7iYPGAAAtC3tglm4qqpKmzdvVnZ2tn+a0+nU2LFjVVhYGLJBeb1eeb1e/+Py8nJJkttp5HKZJue7nabWv3bJIs9eeXYeG3n2ySKvbefZeWytIa8mwGO2wxgT8Bb379+v2NhYbdiwQampqf7pU6dO1bp167Rx48Zjrh8fH6/bb79dt99++zGXmzVrlmbPnl1nel5eniIjIwMdLgAAsFBlZaXS09NVVlamqKioBpcL6sxIS8nOzpbH4/E/Li8vV1xcnO7d6tSRMFeT891OozlJPs3Y5JTX57BNFnn2yrPz2MjjuSWvZfLsPLbWkFfjDeyYHVQZ6dq1q1wul0pLS2tNLy0tDenFqW63u97rS7w+h47UNP2P88M8b4jyQplFnr3y7Dw28uyTRV7bzrPz2Oyc5wswI6gLWMPDw5WYmKiCgoLvN+TzqaCgoNbbNgAAAIEK+m0aj8ejyZMnKykpScnJyVqwYIEqKiqUmZkpScrIyFBsbKzmzZsn6buLXnfs2OH/3/v27dO2bdvUsWNH9e/fP4S/CgAAaI2CLiOTJk3SwYMHNXPmTJWUlCghIUH5+fnq0aOHJKm4uFhO5/cnXPbv36/hw4f7Hz/44IN68MEHNWrUKK1du7bpvwEAAGjVGnUBa1ZWlrKysuqd9+OCER8fryA+sAMAAI4zfDcNAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgqUaVkdzcXMXHxysiIkIpKSkqKio65vIvvviiBg4cqIiICA0ZMkSvvvpqowYLAADanqDLyLJly+TxeJSTk6MtW7Zo2LBhSktL04EDB+pdfsOGDbryyit17bXXauvWrZo4caImTpyo//znP00ePAAAaP2CLiPz58/XlClTlJmZqcGDB2vx4sWKjIzU0qVL611+4cKFOv/883XnnXdq0KBBmjNnjk4//XQtWrSoyYMHAACtX7tgFq6qqtLmzZuVnZ3tn+Z0OjV27FgVFhbWu05hYaE8Hk+taWlpaVqxYkWD2/F6vfJ6vf7HZWVl3w22uiKY4Taonc+ostKndtVO1fgctskiz155dh4beTy35LVMnp3H1hryfEcqJUnGmGMvaIKwb98+I8ls2LCh1vQ777zTJCcn17tOWFiYycvLqzUtNzfXdO/evcHt5OTkGEn88MMPP/zww08b+Pnss8+O2S+COjPSUrKzs2udTfnmm2/Up08fFRcX68QTT2xyfnl5ueLi4vTZZ58pKirKNlnk2SvPzmMjj+eWvJbJs/PYWkOeMUaHDh1Sr169jrlcUGWka9eucrlcKi0trTW9tLRUMTEx9a4TExMT1PKS5Ha75Xa760w/8cQTQ/LHOSoqKipkeaHMIs9eeXYeG3n2ySKvbefZeWx2zwvkJEJQF7CGh4crMTFRBQUF/mk+n08FBQVKTU2td53U1NRay0vSmjVrGlweAAAcX4J+m8bj8Wjy5MlKSkpScnKyFixYoIqKCmVmZkqSMjIyFBsbq3nz5kmSbrvtNo0aNUp//OMfNX78eL3wwgvatGmT/vznP4f2NwEAAK1S0GVk0qRJOnjwoGbOnKmSkhIlJCQoPz9fPXr0kCQVFxfL6fz+hMuIESOUl5en6dOn63e/+50GDBigFStW6LTTTgt4m263Wzk5OfW+ddMYocyz89jIs08WefbKs/PYyLNXnp3H1hryAuUw5qc+bwMAANB8+G4aAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWsuXt4L/88kstXbpUhYWFKikpkfTdnVxHjBihX//61+rWrZvFIwSsdfSLJEP18Ts755WVldX670BTvhIilFmwHzu/jkOdF+rXstX7hu3OjPz73//WKaecoocfflgnnniiRo4cqZEjR+rEE0/Uww8/rIEDB2rTpk1B5x45ckTbt2/X6tWrtXr1am3fvl3V1dWNGmMosySppKREK1eu1GOPPabHHntMK1eu9L8o7JAnffdC/eCDD/TBBx/4v0W5LefZ0Zo1azRu3Dh16tRJkZGRioyMVKdOnTRu3Di98cYbbS7viSee0ODBg9W5c2cNHjy41v9esmSJZVk/9uNvGW8qO+fZcb+1++vYzvtFc+Q1WiDf1tuSUlJSzPXXX298Pl+deT6fz1x//fXmzDPPDDivpqbG3H333SY6Oto4HI5aP9HR0Wb69OmmpqamxbOMMebbb781V111lXG5XKZdu3ame/fupnv37qZdu3bG5XKZq6++2lRUVFiWZ4wxjz/+uBk0aJBxOp21fgYNGmSeeOKJoLJaQ54xxlRXV5tt27aZ/Px8k5+fb7Zt22aqqqoalRWqvKeeesq0a9fOXHHFFebJJ580r776qnn11VfNk08+aa688koTFhZmnnnmmTaTd//995vIyEgzbdo089Zbb5kdO3aYHTt2mLfeestkZ2ebDh06mAceeKDFs456/fXXzQUXXGCio6P9r7no6GhzwQUXmDVr1gSV1Rry7Lrf2v11bOf9ojnymsJ2ZSQiIsLs3Lmzwfk7d+40ERERAefdeeedplu3bmbx4sVmz549prKy0lRWVpo9e/aYxx57zHTv3t1MnTq1xbOMMebaa681AwYMMPn5+ebIkSP+6UeOHDGrV682p5xyirnuuussy7P7Cz/UeaEum6HMGzBggFm0aFGD83Nzc03//v0DHpvd83r37m2WLVvW4PwXXnjBxMXFtXiWMfY/YB1PB0C7v47tvF80R15T2K6MxMfHm6effrrB+U8//bTp06dPwHk9evQw+fn5Dc7Pz8833bt3b/EsY4yJjo42//rXvxqcv379ehMdHW1Znt1f+KHOC3XZDGWe2+02u3btanD+rl27girpds+LiIgwO3bsaHD++++/b9q3b9/iWcbY/4B1PB0A7f46tvN+0Rx5TWG7MrJo0SLjdrvNrbfealauXGneeecd884775iVK1eaW2+91bRv397k5uYGnBcZGWnefffdBudv377ddOjQocWzjDEmKirK/Pvf/25wflFRkYmKirIsz+4v/FDnhbpshjLv9NNPN3feeWeD86dOnWpOP/30gMdm97yf//znJiMjw1RXV9eZd+TIEZORkWFGjhzZ4lnG2P+AdTwdAO3+OrbzftEceU1hy++mWbZsmR566CFt3rxZNTU1kiSXy6XExER5PB5dfvnlAWeNHz9eR44c0fPPP6+uXbvWmvfll1/qmmuukcvl0j/+8Y8WzZKkq666Sjt37tSSJUs0fPjwWvO2bt2qKVOmaODAgXruuecsyRs5cqT69u2rJUuWqF272h+8qqmp0W9+8xvt3btX69ataxN5HTp00DvvvKMhQ4bUO//dd9/ViBEj9O2337Z43tq1a3XhhReqX79+Gjt2rP+LKUtLS1VQUKBPPvlEq1at0siRIwMam93z3n33XaWlpam6ulojR46slff2228rPDxcr7/+ekBfuBnKLElKTEzUL37xC91///31zr/rrrv0xhtvaPPmzW0iz877rd1fx3beL5ojrylsWUaOqq6u1pdffilJ6tq1q8LCwoLO+OyzzzRu3Djt2rVLQ4YMqfXHfu+99zR48GD94x//UFxcXItmSdLXX3+t9PR0rV69Wp06dVL37t0lSQcOHNA333yjtLQ05eXlKTo62pI8u7/wQ50X6rIZ6ry9e/fq0Ucf1TvvvFPrI3ipqam68cYbFR8fH1BOa8k7dOiQnnvuuXrz0tPTFRUVZUmW3Q9Yx9sB0O6vYzvvF82R11i2LiOh4vP5tHr16nr/2Oedd56czsA/4RzKrKN27txZb97AgQODzgp1nt1f+KHMC3XZDHUe7MPuBywOgGhtjosyAgQq1GUz1HlHjhzR+++/78/q2bOnBg0a1Kizhq0hr6SkRBs3bqyVl5ycrJiYGEuzYC92fx3beb9ojrzGOG7KSFFRUb13dD3jjDMszaqqqtKKFSvqzbv44osVHh5uaZ5k/xe+HXak5ubz+TRz5kzl5ubWuTnUiSeeqKysLM2ePTvgcmP3vIqKCt1www164YUX5HA41LlzZ0nSV199JWOMrrzySj322GOKjIxs0awfsvsB63g4ANr9dWzn/aI58pqkRS6TtVBpaak5++yzjcPhMH369DHJyckmOTnZ9OnTxzgcDnP22Web0tLSFs8yxpiPPvrI9OvXz0RERJhRo0aZyy+/3Fx++eVm1KhRJiIiwvTv39989NFHluXZ/aZszXGTN2OM2bhxo1mwYIGZNm2amTZtmlmwYIEpKioKOieUeXb+2HFz5IXynjmhvv+One9H0xx5dt5v7f46tvN+0Rx5TdHmy8ill15qUlNT6/2o265du8yIESPMr371qxbPMsaYsWPHmosvvtiUlZXVmVdWVmYuvvhic95551mWZ/cXfqjzQl02Q5ln548dN0deKO+ZE+r779j9gHU8HQDt/jq2837RHHlN0ebLSMeOHc2WLVsanL9p0ybTsWPHFs8yxpj27dub9957r8H57777blCf3w91nt1f+KHOC3XZDGVeqO9xY/e8UN4zJ9T337H7Aet4OgDa/XVs5/2iOfKawnZflBdqbrdb5eXlDc4/dOhQwN+gGMosSYqOjtbevXsbnL93796AP4bbHHk+n++Y15iEh4fL5/O1mbzVq1crNzdXp556ap15p556qh5++GHl5+dbkjd69Gj99re/9X/U/Ye+/PJL3XXXXRo9enTAY7N73oUXXqjrr79eW7durTNv69atuummmzRhwoQWz5K+28979erV4PyePXuqoqKizeTZeb+1++vYzvtFc+Q1SYtUHgvdfPPNpk+fPmb58uW13r4oKyszy5cvN/Hx8SYrK6vFs4wxZsaMGaZTp05m/vz5Zvv27aakpMSUlJSY7du3m/nz55vOnTubnJwcy/LS09PN8OHD6z0btGXLFpOYmGiuuuqqNpPXpUsXs3bt2gbnv/XWW6ZLly6W5BUXF5vTTjvNtGvXzgwfPtycf/755vzzzzfDhw837dq1M0OHDjXFxcUBj83ueV999ZU5//zzjcPhMJ07dzYDBw40AwcONJ07dzZOp9NccMEF5uuvv27xLGOMGTdunDnvvPPMwYMH68w7ePCgOf/888348ePbTJ6d91u7v47tvF80R15TtPlP03i9Xt1+++1aunSpjhw54m/kVVVVateuna699lo99NBDAZ3RaCjL6/UqLCwsqKyj/vCHP2jhwoUqKSmRw+GQJBljFBMTo9tvv11Tp04N6vcNZZ7db8oW6rxbbrlFq1at0kMPPaRf/OIX/nsdlJeXq6CgQB6PRxdeeKEeeeQRS/Ls/rHj5rgHz65du+p8Mqyx98wJ1f137H4/muPt5ox2fx3bfb+QQn+vq8Zo82XkqPLycm3evLnWHzsxMbFRN9cpLy/Xpk2bVFpaKknq0aOHkpKSmnSjnj179tQaW9++fRudFeo8O9+UTQrdjhnK4tocebAPux+wOACitTluykhzCg8P1/bt2zVo0CCrh4IQCGVxDXVeKO9xI3130KrvwOTz+fT555+rd+/ejco96pxzztGTTz6pPn36BLWe1+uV0+n03xPj448/1tKlS1VcXKw+ffro2muvDapgb9++XZs3b9bo0aPVr18/vf/++8rNzZXP59Mll1yitLS0oMYHe2G/aNx+Idln3zguysj//d//afPmzercubMGDx5ca97hw4f117/+VRkZGT+Z4/F46p2+cOFCXX311erSpYskaf78+QGNa8uWLerUqZP/xfPss89q8eLF/hdWVlaWrrjiioCyjlq0aJGKioo0btw4XXHFFXr22Wc1b948+Xw+/fKXv9Q999xT58upjqU5bqL2+eefKzo6Wh07dqw1vbq6WoWFhQF/Z0ZD+vXrp9WrV2vAgAFNyrGTAwcO6NJLL9W//vUv9e7du9ap9+LiYp111ll6+eWX/ae8f0p5ebmuu+46/f3vf1dUVJRuuOEG5eTkyOVy+XN79erl/6LKn/LKK6/UO/2Xv/ylFi5c6H9L4KKLLgoob/To0crKytKvfvUr/etf/9IvfvELnXrqqRo0aJA+/PBDffDBB3rjjTeUmpr6k1nLly/X5ZdfrujoaHm9Xv3tb3/TZZddpqSkJLlcLr3xxht65plnlJ6eHtDYjuIAaP0BkP2i8fuF1Hz7RqO0yJUpFvrggw/893VwOp1m5MiRZt++ff75JSUlxul0BpTlcDhMQkKCGT16dK0fh8NhzjjjDDN69GgzZsyYgMc2dOhQs2bNGmOMMY8//rhp3769ufXWW82jjz5qbr/9dtOxY0ezZMmSgPPmzJljTjjhBHPppZeamJgYc99995kuXbqYe++918ydO9d069bNzJw5M+C8UN9Ebf/+/eaMM84wTqfTuFwuc80115hDhw755wfzXBhjzMKFC+v9cblcJjs72/84UJ999lmti/7efvttk56ebs4++2xz1VVXmQ0bNgScddTf//53M2PGDLN+/XpjjDEFBQXmggsuMGlpaeaxxx4LOCfUHzu+9dZbzSmnnGJefPFF8/jjj5s+ffqY8ePHG6/Xa4z57rlwOBwB5x3dv358k60f/gTz3EZFRZkPP/zQGGPMqFGjzB133FFr/vTp081ZZ50VUNbpp59u7r33XmOMMX/5y19MdHS0ueeee/zzH3zwQZOQkBDw2EJ9P5qysjJz2WWXmYiICNO9e3czY8aMWvffCHa/WLlyZb0/LpfLLFq0yP84UKNGjTIvvviiMea7j9263W4zdOhQM2nSJDN8+HATGRkZ1L7x8ssvG5fLZbp06WI6duxo1qxZY6Kjo83YsWNNWlqacblc5vnnnw8oi/2i8fuFMaHfN5qizZeRiRMnmvHjx5uDBw+ajz76yIwfP9707dvXfPrpp8aY4Hb0efPmmb59+5qCgoJa09u1a2fef//9oMfWvn17s3fvXmOMMcOHDzd//vOfa81//vnnzeDBgwPOO/nkk83LL79sjDFm27ZtxuVymeeee84/f/ny5aZ///4B54X6JmoZGRkmJSXF/Pvf/zZr1qwxiYmJJikpyXz11VfGmMbt6CeddJKJj4+v9eNwOExsbKyJj483ffv2DTgvOTnZ/P3vfzfGGLNixQrjdDrNRRddZO666y5zySWXmLCwMP/8QCxevNi0a9fOJCYmmqioKPPss8+aE044wVx33XXmhhtuMO3btzcLFiwIKCvU97jp3bu3eeutt/yPDx48aJKTk815551nDh8+HPQB8OgnNH58EG7svtGhQwezc+dOY8x3983Ytm1brfm7d+8O+Pft0KGD2bNnjzHGGJ/PZ8LCwmrd++Hjjz8O6m/HAdA+B0D2i8bvF0fzQrlvNEWbLyPdu3ev9cf1+XzmxhtvNL179zYff/xx0C+uoqIic8opp5j//d//NVVVVcaYxr+wunTpYjZt2uQfZ30vrGBvena0ZBljTFhYmPnPf/7jf7x3714TGRkZVF4ob6LWq1cvs3HjRv/jw4cPmwkTJpiEhATz3//+N+jn4oYbbjAJCQlmx44dtaY3ZUf/5JNPjDHGpKSkmPvuu6/W/EceecQMHz484LzBgwf7C+abb75pIiIiTG5urn/+k08+aQYNGhRQVqg/dty+fXv/73pUeXm5SU1NNeecc4755JNPgnoujDFm/vz5Ji4urlZha+xzcc4555j777/fGGPMiBEjzNNPP11r/ksvvWR69+4dUFZMTIx/P/vqq6+Mw+GodcApKioyMTExAY+NA6B9DoDsF43fL4wJ/b7RFG2+jJxwwgl1DlbGGHPLLbeYk046ybz99ttBv7gOHTpkMjIyzNChQ817771nwsLCGvXCuvrqq821115rjDHmsssuM9OnT681f+7cuWbIkCEB5/Xt29e89tprxhhjPvzwQ+N0Os1f//pX//xVq1aZ+Pj4gPN69ux5zDMBr7zyiunZs2fAeR06dPD/P6yjqqurzcSJE83QoUPNu+++G/RzsXz5chMXF2ceeeQR/7TG7ugnnnii2b59uzHmu3J49H8ftXv37qDL3I/L4Q/L3Z49ewLOC/U9bk499VSzatWqOtMPHTpkUlNTzbBhw4J+LowxZuvWrWbw4MHm+uuvNxUVFY1+LjZs2GBOPPFEk5OTYx555BHTtWtXM336dPP888+bmTNnmujoaPOHP/whoKyrr77apKSkmOeee85MmDDBpKWlmTPPPNPs3LnT7Nq1y4waNSqoMxkcAO1zAGS/aPx+YUzo942maPNl5IwzzjDPPPNMvfNuueUWEx0d3agXlzHfnWLs0aOHcTqdjXph7du3z8THx5uRI0caj8dj2rdvb84++2wzZcoUM3LkSBMeHl7vjtGQ6dOnm27dupnrrrvO9O3b10ybNs307t3bPProo2bx4sUmLi6uzinWYwn1TdSGDBliXnrppTrTjxaS3r17N+q5+Pzzz80555xjzj//fPPFF180eke/6KKLzLRp04wxxqSlpdW53uTxxx83AwYMCDjvaNk15rvn2uFw1Ho+165da0466aSAsg4fPmxuvPFGEx4ebpxOp4mIiDARERHG6XSa8PBwc9NNN5nDhw8HPLb/+Z//afA/MuXl5SYlJaXR+0VlZaW54YYbzIABA4zL5WrUc2HMd//hPfPMM+u8xRAbGxvw21vGfPc2x7nnnms6duxo0tLSzDfffGOysrL8b1cMGDDA7N69O+A8DoD2OQA2tF84HA72iwCEet9oijZfRubOnWsuuOCCBuffdNNNQb0f+2OfffaZWbFihfn2228btf7XX39t7rrrLjN48GATERFhwsPDTZ8+fUx6evoxvzOgPjU1Neb3v/+9ufDCC83cuXONz+czf/nLX0xcXJzp0qWL+fWvfx30OO+77z7Ts2dP/4vz6HvRPXv2DOo/QMYYM3Xq1AavMamurjYXXXRRo58Ln89n5s6da2JiYhq9o+/YscN06dLFZGRkmDlz5piOHTuaq6++2vz+9783GRkZxu12myeffDLgvFtuucUMGDDA3HvvvSY5OdlMnjzZDBw40Lz22msmPz/fDBkyxPzmN78JaoxlZWXmzTffNHl5eSYvL8+8+eab9V7T81O++uqrWm/h/Vh5efkx/99/IFauXGluv/32oC7mrM+BAwfMO++8YzZs2OA/vR8KH3/8sXnvvfdMdXV1UOtRDJvvAOhwOBp1ACwrKzMFBQX+/aKgoCCk+4XP5zPGhG6/uPXWW0O6X/z4zFpTNXbfaIrj4qO9aLpQ3ETtyJEjqqysbPD+GkeOHNG+ffuC/ujhD23evFnr169XRkaGOnXqFPT6H3/8saZPn65Vq1bp22+/lSS1a9dOZ5xxhu68805NnDgx4KyKigrdcccdKiws1IgRI/TII4/o4Ycf1t13363q6mqNGjVKy5YtC/hjh7CXUN0/5uuvv9b+/fv1s5/9rN75hw4d0pYtWzRq1KhGj/WVV17RW2+9pezs7Ca93g4ePKhPPvlEPp9PPXv2VHx8fKOzfuyTTz5RZWWlBg4cGNTtB+oT6ns/kdf8KCNotM8++0w5OTlaunRpm8szxujAgQPy+Xzq2rWr/x4LoXD48GFVV1frhBNOCGq9UN0v53jMC/XYjt499OgdQ3ft2qWFCxfK6/Xq6quv1jnnnBNwVkvlLViwQFVVVU3KGzFihE499dSQja+peaG+9xN5TctrkhY7B4M2Z9u2bY0+fdwa84qLi01mZqYleaG8X05Defv372+TeaEe22uvvWbCw8NN586dTUREhHnttddMt27dzNixY80555xjXC5XnY//k9c8eaG+9xN5TctrCsoIGtTQzZOO/jz00EMhuRmTXfJ+ipVlKZT3yzne8kI9ttTUVHP33XcbY767iL1Tp07md7/7nX/+tGnTzLnnnkteC+SF+t5P5DUtrykoI2hQqG+eZPc8O5elUN8v53jKC/XYoqKi/HcerqmpMe3atat135H33nvP9OjRg7wWygvlvZ/Ia3peY1FG0KBevXqZFStWNDh/69atQf1H3O55di5Lob5fzvGUF+qxRUVF1fq0R8eOHc3HH3/sf7x3714TERFBXgvlGRO6ez+RF5q8xgj+e6Rx3EhMTNTmzZsbnO9wOGSCuP7Z7nk9e/bU8uXL5fP56v3ZsmVLwFmhzhs4cKA2bdpUZ/qiRYt08cUXB/xFW8djXqjHFh8fr48++sj/uLCwsNaX2BUXF6tnz57ktVCeJHXs2FFPP/20srOzNXbs2IC/yI685slrDMoIGnTnnXdqxIgRDc7v37+/3nrrrTaTZ+eydMkll+gvf/lLvfMWLVqkK6+8MqixHU95oR7bTTfdVOs/1qeddlqtj6K+9tprQX26hLym5f3QFVdcoU2bNmn58uVNukUAeaHJCwYf7QX+v3/+85+qqKjQ+eefX+/8iooKbdq0KeD7PYQ6DwDaKsoIAACwFG/TAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAs9f8AwrEji6G13boAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "nl\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABDU0lEQVR4nO3de1xUdf4/8NfMMMyIiigoKKGjaV7WCwZBWOulxUjNsm3LosJly658y+a7GWwqmK221RKWbLgmXSw2t3K1XQojClsXkhUvtXkpTaM0ENdkFL4OI/P+/eGPqYkZnRucA7yejwcPm3N5nTfOHM+7c/mMRkQERERERArRKl0AERERdW9sRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFBShfgCbvdjqNHj6J3797QaDRKl0NEREQeEBGcOnUKgwYNglbr/vxHp2hGjh49ipiYGKXLICIiIh988803uOiii9zO7xTNSO/evQEAhw4dQr9+/fzOs9lseP/993H11VdDr9erJot56spTc23M43vLvI7JU3NtnSHPYrEgJibGcRx3p1M0I62XZnr37o3Q0FC/82w2G0JCQhAaGhqQD2qgspinrjw118Y8vrfM65g8NdfWGfJaXegWC97ASkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESK8roZ+fjjjzF79mwMGjQIGo0GGzduvOA65eXluPTSS2EwGDB8+HC8/PLLPpRKREREXZHXzUhjYyMmTJiA/Px8j5Y/dOgQZs2ahWnTpmHXrl1YsGAB7rrrLmzevNnrYomIiKjr8fpbe2fMmIEZM2Z4vHxBQQGGDh2KP/7xjwCA0aNHY+vWrXj22WeRkpLi7eaJiIioi/G6GfFWZWUlkpOTnaalpKRgwYIFbtexWq2wWq2O1xaLBcC5rza22Wx+19SaobYs5qkrT821MU89Wczr2nlqrq0z5V2IRkTE141oNBr87W9/w5w5c9wuc8kllyA9PR1ZWVmOae+++y5mzZqFpqYm9OjRo806OTk5WLp0aZvpRUVFCAkJ8bVcIiIi6kBNTU1ITU1FQ0MDQkND3S7X7mdGfJGVlQWz2ex4bbFYEBMTg2nTpiE8PNzvfJvNhtLSUkyfPh16vV41WcxTV56aa2Me31vmdUyemmvrDHmtVzYupN2bkaioKNTV1TlNq6urQ2hoqMuzIgBgMBhgMBjaTNfr9QH5y2mPPDXXxjz1ZDFPXXlqro156spTc21qzvM0o92bkaSkJLz77rtO00pLS5GUlNTemyYiIqJ2YsosvuAydmuTR1leNyOnT5/GgQMHHK8PHTqEXbt2oV+/fhg8eDCysrJw5MgRvPrqqwCAe++9F6tWrcLChQvxm9/8Bh9++CH++te/orj4wr8EERFRVzI2ZzOeSjj3p7VF43eeQSeqzvOU183I9u3bMW3aNMfr1ns75s2bh5dffhnfffcdampqHPOHDh2K4uJiPPzww1i5ciUuuugivPjii3ysl4iIOpwn/zf/Y4E/2Psd0SV53YxMnToV53sAx9XoqlOnTsXOnTu93RQREXUxrpqBrvJ/9+Q7VT5NQ0RE6vHTBoIHewo0NiNERF0MmwfqbNiMEBGpwI8bCDYP1N2wGSEiagfe3ihJ1J2xGSEiuoALNRY8k0HkHzYjRNStmDKL2TwQqQybESLqkniZhKjzYDNCRKrHsxlEXRubESJSDZ7NIOqe2IwQUYdiw0FEP8VmhIjaRaC/EIyIui42I0Tks/Od5eAXghGRp9iMENEF8dIKEbUnNiNE5ISXVYioo7EZIepm3J3laH10loioo7EZIeqieGmFiDoLrdIFEBERUffGMyNEnRzv8SCizo7NCFEn4OqSC+/xIKKugpdpiIiISFE8M0KkIrzplIi6I54ZISIiIkXxzAhRB+MNp0REznhmhIiIiBTlUzOSn58Pk8kEo9GIxMREVFVVuV3WZrPh8ccfx8UXXwyj0YgJEyagpKTE54KJOgtTZrHTz9iczUqXRESkSl43I+vXr4fZbEZ2djZ27NiBCRMmICUlBceOHXO5/KJFi7B69Wo8//zz2LNnD+69917ccMMN2Llzp9/FExERUefn9T0jubm5mD9/PtLT0wEABQUFKC4uRmFhITIzM9ssv27dOjz22GOYOXMmAOC+++7DBx98gD/+8Y947bXX/CyfSHl8AoaIyD9eNSPNzc2orq5GVlaWY5pWq0VycjIqKytdrmO1WmE0Gp2m9ejRA1u3bnW7HavVCqvV6nhtsVgAnLvkY7PZvCnZpdYMtWUxT115nmYZdOJRnkErTn/6i3nqyGJe185Tc22dIa/Fw38fNSLi8RaPHj2K6OhoVFRUICkpyTF94cKF2LJlC7Zt29ZmndTUVOzevRsbN27ExRdfjLKyMlx//fVoaWlxajh+LCcnB0uXLm0zvaioCCEhIZ6WS0RERApqampCamoqGhoaEBoa6na5dn+0d+XKlZg/fz5GjRoFjUaDiy++GOnp6SgsLHS7TlZWFsxms+O1xWJBTEwMpk2bhvDwcL9rstlsKC0txfTp06HX61WTxTx15f04a+LvP/S7NoNWsCzejsXbtbDa/X+0l3nqyGJe185Tc22dIa/FqvNoOa+akYiICOh0OtTV1TlNr6urQ1RUlMt1+vfvj40bN+LMmTP473//i0GDBiEzMxPDhg1zux2DwQCDwdBmul6vD8gBqz3y1Fwb8/zPCuS4IFa7hnkqyVNzbcxTV56aa1Nznt3DDK+akeDgYMTFxaGsrAxz5sw5tyG7HWVlZcjIyDjvukajEdHR0bDZbHj77bdx8803e7Npog7x45tRW7+I7twjuRykjIiovXh9mcZsNmPevHmIj49HQkIC8vLy0NjY6Hi6Ji0tDdHR0VixYgUAYNu2bThy5AhiY2Nx5MgR5OTkwG63Y+HChYH9TYiIiKhT8roZmTt3Lurr67FkyRLU1tYiNjYWJSUliIyMBADU1NRAq/1h+JIzZ85g0aJF+Oqrr9CrVy/MnDkT69atQ1hYWMB+CSIiIuq8fLqBNSMjw+1lmfLycqfXU6ZMwZ49e3zZDFG74LggRETqwu+mISIiIkWxGSEiIiJFsRkhIiIiRbX7oGdESuG9IUREnQPPjBAREZGi2IwQERGRotiMEBERkaJ4zwh1ehzCnYioc+OZESIiIlIUmxEiIiJSFJsRIiIiUhTvGaFOgWOGEBF1XTwzQkRERIpiM0JERESKYjNCREREimIzQkRERIriDaykKrxRlYio++GZESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUb2ClDufuW3atLfyWXSKi7ohnRoiIiEhRbEaIiIhIUWxGiIiISFE+NSP5+fkwmUwwGo1ITExEVVXVeZfPy8vDyJEj0aNHD8TExODhhx/GmTNnfCqYiIiIuhavm5H169fDbDYjOzsbO3bswIQJE5CSkoJjx465XL6oqAiZmZnIzs7G3r17sXbtWqxfvx6/+93v/C6eiIiIOj+vn6bJzc3F/PnzkZ6eDgAoKChAcXExCgsLkZmZ2Wb5iooKXHHFFUhNTQUAmEwm3Hrrrdi2bZvbbVitVlitVsdri8UCALDZbLDZbN6W3EZrhtqyukueQSc//LdWnP70VyDz1Fwb89STxbyunafm2jpDXovOsxyNiHi8xebmZoSEhOCtt97CnDlzHNPnzZuHkydPYtOmTW3WKSoqwv3334/3338fCQkJ+OqrrzBr1izccccdbs+O5OTkYOnSpS6zQkJCPC2XiIiIFNTU1ITU1FQ0NDQgNDTU7XJenRk5fvw4WlpaEBkZ6TQ9MjIS+/btc7lOamoqjh8/jiuvvBIigrNnz+Lee+8972WarKwsmM1mx2uLxYKYmBhMmzYN4eHh3pTsks1mQ2lpKaZPnw69Xq+arK6WNzZn8wXzDFrBsng7Fm/Xwmr3f5yRQOapuTbm8b1lXsfkqbm2zpDXYtV5tFy7D3pWXl6O5cuX409/+hMSExNx4MABPPTQQ1i2bBkWL17sch2DwQCDwdBmul6vD8gBtT3y1FybUnneDGJmtWsCOuhZIPPUXBvz1JPFvK6dp+ba1Jxn9zDDq2YkIiICOp0OdXV1TtPr6uoQFRXlcp3FixfjjjvuwF133QUAGDduHBobG3H33Xfjscceg1bLp4uJiIi6M686geDgYMTFxaGsrMwxzW63o6ysDElJSS7XaWpqatNw6HTnTtt4cbsKERERdVFeX6Yxm82YN28e4uPjkZCQgLy8PDQ2NjqerklLS0N0dDRWrFgBAJg9ezZyc3MxceJEx2WaxYsXY/bs2Y6mhIiIiLovr5uRuXPnor6+HkuWLEFtbS1iY2NRUlLiuKm1pqbG6UzIokWLoNFosGjRIhw5cgT9+/fH7Nmz8fvf/z5wvwURERF1Wj7dwJqRkYGMjAyX88rLy503EBSE7OxsZGdn+7Ip6gR+/C28RERE3uLdo0RERKQoNiNERESkKDYjREREpCg2I0RERKQoNiNERESkKDYjREREpCg2I0RERKSodv+iPOoaWscSMegETyWc+0beQH4pExERdV88M0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIriCKzkpHWkVSIioo7CMyNERESkKDYjREREpCg2I0RERKQoNiNERESkKDYjREREpCg2I0RERKQon5qR/Px8mEwmGI1GJCYmoqqqyu2yU6dOhUajafMza9Ysn4smIiKirsPrZmT9+vUwm83Izs7Gjh07MGHCBKSkpODYsWMul9+wYQO+++47x89//vMf6HQ63HTTTX4XT0RERJ2f181Ibm4u5s+fj/T0dIwZMwYFBQUICQlBYWGhy+X79euHqKgox09paSlCQkLYjBAREREAL0dgbW5uRnV1NbKyshzTtFotkpOTUVlZ6VHG2rVrccstt6Bnz55ul7FarbBarY7XFosFAGCz2WCz2bwp2aXWDLVlqSHPoJPzz9eK05/+UnOemmtjnnqymNe189RcW2fIa7nAMaWVRkQ83uLRo0cRHR2NiooKJCUlOaYvXLgQW7ZswbZt2867flVVFRITE7Ft2zYkJCS4XS4nJwdLly5tM72oqAghISGelktEREQKampqQmpqKhoaGhAaGup2uQ79bpq1a9di3Lhx521EACArKwtms9nx2mKxICYmBtOmTUN4eLjfddhsNpSWlmL69OnQ6/WqyerIvLE5m33KM2gFy+LtWLxdC6td43d9as5Tc23M43vLvI7JU3NtnSGvxarzaDmvmpGIiAjodDrU1dU5Ta+rq0NUVNR5121sbMQbb7yBxx9//ILbMRgMMBgMbabr9fqAHKDbI0/NtbnKs7b49yGz2jV+Z3SWPDXXxjz1ZDGva+epuTY159k9zPDqBtbg4GDExcWhrKzshw3Z7SgrK3O6bOPKm2++CavVittvv92bTRIREVEX5/VlGrPZjHnz5iE+Ph4JCQnIy8tDY2Mj0tPTAQBpaWmIjo7GihUrnNZbu3Yt5syZE5DLLERERNR1eN2MzJ07F/X19ViyZAlqa2sRGxuLkpISREZGAgBqamqg1TqfcNm/fz+2bt2K999/PzBVExERUZfh0w2sGRkZyMjIcDmvvLy8zbSRI0fCi4d2iIiIqBvhd9MQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRojr0i/KoY5kyi2HQCZ5KOPfFeIH83gIiIqJA4ZkRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlJUkNIFkP9MmcVKl0BEROQznhkhIiIiRfnUjOTn58NkMsFoNCIxMRFVVVXnXf7kyZN44IEHMHDgQBgMBlxyySV49913fSqYiIiIuhavL9OsX78eZrMZBQUFSExMRF5eHlJSUrB//34MGDCgzfLNzc2YPn06BgwYgLfeegvR0dH4+uuvERYWFoj6iYiIqJPzuhnJzc3F/PnzkZ6eDgAoKChAcXExCgsLkZmZ2Wb5wsJCnDhxAhUVFdDr9QAAk8nkX9VERETUZXjVjDQ3N6O6uhpZWVmOaVqtFsnJyaisrHS5zjvvvIOkpCQ88MAD2LRpE/r374/U1FQ8+uij0Ol0LtexWq2wWq2O1xaLBQBgs9lgs9m8Kdml1gy1ZfmaZ9CJ+3lacfrTX90pT821MU89Wczr2nlqrq0z5LWc5/j0YxoR8XiLR48eRXR0NCoqKpCUlOSYvnDhQmzZsgXbtm1rs86oUaNw+PBh3Hbbbbj//vtx4MAB3H///XjwwQeRnZ3tcjs5OTlYunRpm+lFRUUICQnxtFwiIiJSUFNTE1JTU9HQ0IDQ0FC3y7X7o712ux0DBgzAn//8Z+h0OsTFxeHIkSN4+umn3TYjWVlZMJvNjtcWiwUxMTGYNm0awsPD/a7JZrOhtLQU06dPd1w6UkOWr3ljcza7nWfQCpbF27F4uxZWu8bv+rpTnpprYx7fW+Z1TJ6aa+sMeS1W11dAfsqrZiQiIgI6nQ51dXVO0+vq6hAVFeVynYEDB0Kv1ztdkhk9ejRqa2vR3NyM4ODgNusYDAYYDIY20/V6fUAO+O2Rp2Rt1pYLf2Csdo1Hy3mqO+WpuTbmqSeLeV07T821qTnP7mGGV4/2BgcHIy4uDmVlZT9syG5HWVmZ02WbH7viiitw4MAB2O12x7QvvvgCAwcOdNmIEBERUffi9TgjZrMZa9aswSuvvIK9e/fivvvuQ2Njo+PpmrS0NKcbXO+77z6cOHECDz30EL744gsUFxdj+fLleOCBBwL3WxAREVGn5fU9I3PnzkV9fT2WLFmC2tpaxMbGoqSkBJGRkQCAmpoaaLU/9DgxMTHYvHkzHn74YYwfPx7R0dF46KGH8OijjwbutyAiIqJOy6cbWDMyMpCRkeFyXnl5eZtpSUlJ+OSTT3zZFBEREXVx/G4aIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSVLt/Nw0FhimzWOkSiIiI2gXPjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRonxqRvLz82EymWA0GpGYmIiqqiq3y7788svQaDROP0aj0eeCiYiIqGvxuhlZv349zGYzsrOzsWPHDkyYMAEpKSk4duyY23VCQ0Px3XffOX6+/vprv4omIiKirsPrZiQ3Nxfz589Heno6xowZg4KCAoSEhKCwsNDtOhqNBlFRUY6fyMhIv4omIiKiriPIm4Wbm5tRXV2NrKwsxzStVovk5GRUVla6Xe/06dMYMmQI7HY7Lr30Uixfvhw/+9nP3C5vtVphtVodry0WCwDAZrPBZrN5U7JLrRlqywKAuMdLsCz+3J9Wu8Yx3aDzLc+gFac//dWd8tRcG/PUk8W8rp2n5to6Q16LzrMcjYh4vMWjR48iOjoaFRUVSEpKckxfuHAhtmzZgm3btrVZp7KyEl9++SXGjx+PhoYGPPPMM/j444/x+eef46KLLnK5nZycHCxdurTN9KKiIoSEhHhaLhERESmoqakJqampaGhoQGhoqNvlvDoz4oukpCSnxmXSpEkYPXo0Vq9ejWXLlrlcJysrC2az2fHaYrEgJiYG06ZNQ3h4uN812Ww2lJaWYvr06dDr9arJAlrPjNixeLvW6cyIrwxaYZ4Kspinrjw118Y8deWpubbOkNdi9ey0vlfNSEREBHQ6Herq6pym19XVISoqyqMMvV6PiRMn4sCBA26XMRgMMBgMLtcNxAG/PfICldX65lvtGlhb/P8g/DiXecpnMU9deWqujXnqylNzbWrOs3uY4dUNrMHBwYiLi0NZWdkPG7LbUVZW5nT243xaWlrw2WefYeDAgd5smoiIiLoory/TmM1mzJs3D/Hx8UhISEBeXh4aGxuRnp4OAEhLS0N0dDRWrFgBAHj88cdx+eWXY/jw4Th58iSefvppfP3117jrrrsC+5sQERFRp+R1MzJ37lzU19djyZIlqK2tRWxsLEpKShyP69bU1ECr/eGEy/fff4/58+ejtrYWffv2RVxcHCoqKjBmzJjA/RZERETUafl0A2tGRgYyMjJczisvL3d6/eyzz+LZZ5/1ZTNERETUDfC7aYiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRQUoX0F2ZMotdTjfoOrgQIiIihfHMCBERESmKzQgREREpis0IERERKYrNCBERESmKzQgREREpis0IERERKYrNCBERESnKp2YkPz8fJpMJRqMRiYmJqKqq8mi9N954AxqNBnPmzPFls0RERNQFed2MrF+/HmazGdnZ2dixYwcmTJiAlJQUHDt27LzrHT58GL/97W/x85//3OdiiYiIqOvxuhnJzc3F/PnzkZ6ejjFjxqCgoAAhISEoLCx0u05LSwtuu+02LF26FMOGDfOrYCIiIupavBoOvrm5GdXV1cjKynJM02q1SE5ORmVlpdv1Hn/8cQwYMAB33nkn/vnPf15wO1arFVar1fHaYrEAAGw2G2w2mzclu9SaoWSWQSeup2vF6U9/MU8dWcxTV56aa2OeuvLUXFtnyGtxc6z7KY2IeLzFo0ePIjo6GhUVFUhKSnJMX7hwIbZs2YJt27a1WWfr1q245ZZbsGvXLkRERODXv/41Tp48iY0bN7rdTk5ODpYuXdpmelFREUJCQjwtl4iIiBTU1NSE1NRUNDQ0IDQ01O1y7fpFeadOncIdd9yBNWvWICIiwuP1srKyYDabHa8tFgtiYmIwbdo0hIeH+12XzWZDaWkppk+fDr1er0jW2JzNLqcbtIJl8XYs3q6F1a7xqzbm+Zen5tqYx/eWeR2Tp+baOkNei9Wzb3/1qhmJiIiATqdDXV2d0/S6ujpERUW1Wf7gwYM4fPgwZs+e7Zhmt9vPbTgoCPv378fFF1/cZj2DwQCDwdBmul6v97t5aK88b7OsLed/k612zQWX8Qbz1JHFPHXlqbk25qkrT821qTnP7mGGVzewBgcHIy4uDmVlZT9syG5HWVmZ02WbVqNGjcJnn32GXbt2OX6uu+46TJs2Dbt27UJMTIw3myciIqIuyOvLNGazGfPmzUN8fDwSEhKQl5eHxsZGpKenAwDS0tIQHR2NFStWwGg0YuzYsU7rh4WFAUCb6URERNQ9ed2MzJ07F/X19ViyZAlqa2sRGxuLkpISREZGAgBqamqg1XJgVyIiIvKMTzewZmRkICMjw+W88vLy86778ssv+7JJIiIi6qJ4CoOIiIgUxWaEiIiIFMVmhIiIiBTFZoSIiIgUxWaEiIiIFMVmhIiIiBTFZoSIiIgUxWaEiIiIFMVmhIiIiBTl0wis5BlTZrHSJRAREakez4wQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGi2IwQERGRotiMEBERkaLYjBAREZGifGpG8vPzYTKZYDQakZiYiKqqKrfLbtiwAfHx8QgLC0PPnj0RGxuLdevW+VwwERERdS1eNyPr16+H2WxGdnY2duzYgQkTJiAlJQXHjh1zuXy/fv3w2GOPobKyEp9++inS09ORnp6OzZs3+108ERERdX5B3q6Qm5uL+fPnIz09HQBQUFCA4uJiFBYWIjMzs83yU6dOdXr90EMP4ZVXXsHWrVuRkpLichtWqxVWq9Xx2mKxAABsNhtsNpu3JbfRmtHeWQadeJ1n0IrTn/5injqymKeuPDXXxjx15am5ts6Q1+LhcVAjIh5vsbm5GSEhIXjrrbcwZ84cx/R58+bh5MmT2LRp03nXFxF8+OGHuO6667Bx40ZMnz7d5XI5OTlYunRpm+lFRUUICQnxtFwiIiJSUFNTE1JTU9HQ0IDQ0FC3y3l1ZuT48eNoaWlBZGSk0/TIyEjs27fP7XoNDQ2Ijo6G1WqFTqfDn/70J7eNCABkZWXBbDY7XlssFsTExGDatGkIDw/3pmSXbDYbSktLMX36dOj1+nbLGpvj/aUog1awLN6Oxdu1sNo1ftXGPP/y1Fwb8/jeMq9j8tRcW2fIa7HqPFrO68s0vujduzd27dqF06dPo6ysDGazGcOGDWtzCaeVwWCAwWBoM12v1/vdPLRXnqssa4vvb6TVrvFrfeYFLk/NtTFPPVnM69p5aq5NzXl2DzO8akYiIiKg0+lQV1fnNL2urg5RUVFu19NqtRg+fDgAIDY2Fnv37sWKFSvcNiNERETUfXj1NE1wcDDi4uJQVlbmmGa321FWVoakpCSPc+x2u9MNqkRERNR9eX2Zxmw2Y968eYiPj0dCQgLy8vLQ2NjoeLomLS0N0dHRWLFiBQBgxYoViI+Px8UXXwyr1Yp3330X69atwwsvvBDY34SIiIg6Ja+bkblz56K+vh5LlixBbW0tYmNjUVJS4riptaamBlrtDydcGhsbcf/99+Pbb79Fjx49MGrUKLz22muYO3du4H4LIiIi6rR8uoE1IyMDGRkZLueVl5c7vX7iiSfwxBNP+LIZIiIi6gb43TRERESkKDYjREREpCg2I0RERKSoDhn0rDsYm7M5oAPOEBERdRc8M0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREimIzQkRERIpiM0JERESKYjNCREREivKpGcnPz4fJZILRaERiYiKqqqrcLrtmzRr8/Oc/R9++fdG3b18kJyefd3kiIiLqXrxuRtavXw+z2Yzs7Gzs2LEDEyZMQEpKCo4dO+Zy+fLyctx666346KOPUFlZiZiYGFx99dU4cuSI38UTERFR5+d1M5Kbm4v58+cjPT0dY8aMQUFBAUJCQlBYWOhy+ddffx33338/YmNjMWrUKLz44ouw2+0oKyvzu3giIiLq/IK8Wbi5uRnV1dXIyspyTNNqtUhOTkZlZaVHGU1NTbDZbOjXr5/bZaxWK6xWq+O1xWIBANhsNthsNm9Kdqk1I5BZBq34nfXjHOYpn6fm2pinnizmde08NdfWGfJadJ7laETE4y0ePXoU0dHRqKioQFJSkmP6woULsWXLFmzbtu2CGffffz82b96Mzz//HEaj0eUyOTk5WLp0aZvpRUVFCAkJ8bRcIiIiUlBTUxNSU1PR0NCA0NBQt8t5dWbEX08++STeeOMNlJeXu21EACArKwtms9nx2mKxICYmBtOmTUN4eLjfddhsNpSWlmL69OnQ6/UByVq8XQurXeN3bQatYFm8nXkqyFNzbczje8u8jslTc22dIa/FqvNoOa+akYiICOh0OtTV1TlNr6urQ1RU1HnXfeaZZ/Dkk0/igw8+wPjx48+7rMFggMFgaDNdr9f73Ty0V57VroG1xf83jnnqy1NzbcxTTxbzunaemmtTc57dwwyvbmANDg5GXFyc082nrTej/viyzU899dRTWLZsGUpKShAfH+/NJomIiKiL8/oyjdlsxrx58xAfH4+EhATk5eWhsbER6enpAIC0tDRER0djxYoVAIA//OEPWLJkCYqKimAymVBbWwsA6NWrF3r16hXAX4WIiIg6I6+bkblz56K+vh5LlixBbW0tYmNjUVJSgsjISABATU0NtNofTri88MILaG5uxq9+9SunnOzsbOTk5PhXPREREXV6Pt3AmpGRgYyMDJfzysvLnV4fPnzYl02ojimz2OV0g07wVEIHF0NERNSF8LtpiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUT41I/n5+TCZTDAajUhMTERVVZXbZT///HPceOONMJlM0Gg0yMvL87VWIiIi6oK8bkbWr18Ps9mM7Oxs7NixAxMmTEBKSgqOHTvmcvmmpiYMGzYMTz75JKKiovwumIiIiLoWr5uR3NxczJ8/H+np6RgzZgwKCgoQEhKCwsJCl8tfdtllePrpp3HLLbfAYDD4XTARERF1LUHeLNzc3Izq6mpkZWU5pmm1WiQnJ6OysjJgRVmtVlitVsdri8UCALDZbLDZbH7nt2a4yhqbs9nlOgad6yyDVpz+9Bfz1JOn5tqYp54s5nXtPDXX1hnyWnSe5WhExOMtHj16FNHR0aioqEBSUpJj+sKFC7FlyxZs27btvOubTCYsWLAACxYsOO9yOTk5WLp0aZvpRUVFCAkJ8bRcIiIiUlBTUxNSU1PR0NCA0NBQt8t5dWako2RlZcFsNjteWywWxMTEYNq0aQgPD/c732azobS0FNOnT4der3ea5+7MiDsGrWBZvB2Lt2thtWv8ro156slTc23M43vLvI7JU3NtnSGvxermssJPeNWMREREQKfToa6uzml6XV1dQG9ONRgMLu8v0ev1bZoHf7jKs7b49pdvtWt8Xpd56s5Tc23MU08W87p2npprU3Oe3cMMr25gDQ4ORlxcHMrKyn7YkN2OsrIyp8s2RERERJ7y+jKN2WzGvHnzEB8fj4SEBOTl5aGxsRHp6ekAgLS0NERHR2PFihUAzt30umfPHsd/HzlyBLt27UKvXr0wfPjwAP4qRERE1Bl53YzMnTsX9fX1WLJkCWpraxEbG4uSkhJERkYCAGpqaqDV/nDC5ejRo5g4caLj9TPPPINnnnkGU6ZMQXl5uf+/AREREXVqPt3AmpGRgYyMDJfzftpgmEwmePHADhEREXUz/G4aIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSFJsRIiIiUhSbESIiIlIUmxEiIiJSlE9flNdVjM3ZDGuLRukyiIiIujWeGSEiIiJFsRkhIiIiRbEZISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkV16RFYTZnFLqcbdIKnEjq4GCIiInKJZ0aIiIhIUT41I/n5+TCZTDAajUhMTERVVdV5l3/zzTcxatQoGI1GjBs3Du+++65PxRIREVHX43Uzsn79epjNZmRnZ2PHjh2YMGECUlJScOzYMZfLV1RU4NZbb8Wdd96JnTt3Ys6cOZgzZw7+85//+F08ERERdX5e3zOSm5uL+fPnIz09HQBQUFCA4uJiFBYWIjMzs83yK1euxDXXXINHHnkEALBs2TKUlpZi1apVKCgo8LP8c9zdG0JERETq51Uz0tzcjOrqamRlZTmmabVaJCcno7Ky0uU6lZWVMJvNTtNSUlKwceNGt9uxWq2wWq2O1w0NDQCAEydOuFw+6Gyjp7/CueXtgqYmO4JsWrTYNV6t255ZzFNXnpprYx7fW+Z1TJ6aa+sMefazTQAAETn/guKFI0eOCACpqKhwmv7II49IQkKCy3X0er0UFRU5TcvPz5cBAwa43U52drYA4A9/+MMf/vCHP13g55tvvjlvf6HKR3uzsrKczqacPHkSQ4YMQU1NDfr06eN3vsViQUxMDL755huEhoaqJot56spTc23M43vLvI7JU3NtnSFPRHDq1CkMGjTovMt51YxERERAp9Ohrq7OaXpdXR2ioqJcrhMVFeXV8gBgMBhgMBjaTO/Tp09A/nJahYaGBiwvkFnMU1eemmtjnnqymNe189Rcm9rzPDmJ4NXTNMHBwYiLi0NZWZljmt1uR1lZGZKSklyuk5SU5LQ8AJSWlrpdnoiIiLoXry/TmM1mzJs3D/Hx8UhISEBeXh4aGxsdT9ekpaUhOjoaK1asAAA89NBDmDJlCv74xz9i1qxZeOONN7B9+3b8+c9/DuxvQkRERJ2S183I3LlzUV9fjyVLlqC2thaxsbEoKSlBZGQkAKCmpgZa7Q8nXCZNmoSioiIsWrQIv/vd7zBixAhs3LgRY8eO9XibBoMB2dnZLi/d+CKQeWqujXnqyWKeuvLUXBvz1JWn5to6Q56nNCIXet6GiIiIqP3wu2mIiIhIUWxGiIiISFFsRoiIiEhRbEaIiIhIUWxGiIiISFGqHA7++PHjKCwsRGVlJWprawGcG8l10qRJ+PWvf43+/fsrXCGRslq/SDJQj9+pOa+hocHp3wF/vhIikFmkPmr+HAc6L9CfZaX3DdWdGfn3v/+NSy65BM899xz69OmDyZMnY/LkyejTpw+ee+45jBo1Ctu3b/c69+zZs9i9ezc2b96MzZs3Y/fu3bDZbD7VGMgsAKitrcWmTZuwevVqrF69Gps2bXJ8KNSQB5z7oO7fvx/79+93fItyV85To9LSUsycORN9+/ZFSEgIQkJC0LdvX8ycORMffPBBl8t78cUXMWbMGPTr1w9jxoxx+u+1a9cqlvVTP/2WcX+pOU+N+63aP8dq3i/aI89nnnxbb0dKTEyUu+++W+x2e5t5drtd7r77brn88ss9zmtpaZHHHntMwsLCRKPROP2EhYXJokWLpKWlpcOzREROnz4tt912m+h0OgkKCpIBAwbIgAEDJCgoSHQ6ndx+++3S2NioWJ6IyJo1a2T06NGi1WqdfkaPHi0vvviiV1mdIU9ExGazya5du6SkpERKSkpk165d0tzc7FNWoPJefvllCQoKkltuuUVeeukleffdd+Xdd9+Vl156SW699VbR6/Xy6quvdpm8p556SkJCQiQzM1M++ugj2bNnj+zZs0c++ugjycrKkp49e8rTTz/d4Vmt3n//fZkxY4aEhYU5PnNhYWEyY8YMKS0t9SqrM+Spdb9V++dYzftFe+T5Q3XNiNFolL1797qdv3fvXjEajR7nPfLII9K/f38pKCiQQ4cOSVNTkzQ1NcmhQ4dk9erVMmDAAFm4cGGHZ4mI3HnnnTJixAgpKSmRs2fPOqafPXtWNm/eLJdcconcddddiuWp/YMf6LxAN5uBzBsxYoSsWrXK7fz8/HwZPny4x7WpPW/w4MGyfv16t/PfeOMNiYmJ6fAsEfUfsLrTAVDtn2M17xftkecP1TUjJpNJXnnlFbfzX3nlFRkyZIjHeZGRkVJSUuJ2fklJiQwYMKDDs0REwsLC5F//+pfb+Vu3bpWwsDDF8tT+wQ90XqCbzUDmGQwG2bdvn9v5+/bt86pJV3ue0WiUPXv2uJ3/+eefS48ePTo8S0T9B6zudABU++dYzftFe+T5Q3XNyKpVq8RgMMiDDz4omzZtkk8++UQ++eQT2bRpkzz44IPSo0cPyc/P9zgvJCREPv30U7fzd+/eLT179uzwLBGR0NBQ+fe//+12flVVlYSGhiqWp/YPfqDzAt1sBjLv0ksvlUceecTt/IULF8qll17qcW1qz/v5z38uaWlpYrPZ2sw7e/aspKWlyeTJkzs8S0T9B6zudABU++dYzftFe+T5Q5XfTbN+/Xo8++yzqK6uRktLCwBAp9MhLi4OZrMZN998s8dZs2bNwtmzZ/H6668jIiLCad7x48dxxx13QKfT4R//+EeHZgHAbbfdhr1792Lt2rWYOHGi07ydO3di/vz5GDVqFF577TVF8iZPnoyhQ4di7dq1CApyfvCqpaUFv/nNb3D48GFs2bKlS+T17NkTn3zyCcaNG+dy/qeffopJkybh9OnTHZ5XXl6Oa6+9FsOGDUNycrLjiynr6upQVlaGr776CsXFxZg8ebJHtak979NPP0VKSgpsNhsmT57slPfxxx8jODgY77//vkdfuBnILACIi4vDL37xCzz11FMu5z/66KP44IMPUF1d3SXy1Lzfqv1zrOb9oj3y/KHKZqSVzWbD8ePHAQARERHQ6/VeZ3zzzTeYOXMm9u3bh3Hjxjn9ZX/22WcYM2YM/vGPfyAmJqZDswDg+++/R2pqKjZv3oy+fftiwIABAIBjx47h5MmTSElJQVFREcLCwhTJU/sHP9B5gW42A513+PBhvPDCC/jkk0+cHsFLSkrCvffeC5PJ5FFOZ8k7deoUXnvtNZd5qampCA0NVSRL7Qes7nYAVPvnWM37RXvk+UrVzUig2O12bN682eVf9tVXXw2t1vMnnAOZ1Wrv3r0u80aNGuV1VqDz1P7BD2ReoJvNQOeReqj9gMUDIHU23aIZIfJUoJvNQOedPXsWn3/+uSNr4MCBGD16tE9nDTtDXm1tLbZt2+aUl5CQgKioKEWzSF3U/jlW837RHnm+6DbNSFVVlcsRXS+77DJFs5qbm7Fx40aXeddffz2Cg4MVzQPU/8FXw47U3ux2O5YsWYL8/Pw2g0P16dMHGRkZWLp0qcfNjdrzGhsbcc899+CNN96ARqNBv379AAAnTpyAiODWW2/F6tWrERIS0qFZP6b2A1Z3OACq/XOs5v2iPfL80iG3ySqorq5OrrzyStFoNDJkyBBJSEiQhIQEGTJkiGg0Grnyyiulrq6uw7NERL788ksZNmyYGI1GmTJlitx8881y8803y5QpU8RoNMrw4cPlyy+/VCxP7YOytccgbyIi27Ztk7y8PMnMzJTMzEzJy8uTqqoqr3MCmafmx47bIy+QY+YEevwdNY9H0x55at5v1f45VvN+0R55/ujyzciNN94oSUlJLh9127dvn0yaNEl+9atfdXiWiEhycrJcf/310tDQ0GZeQ0ODXH/99XL11Vcrlqf2D36g8wLdbAYyT82PHbdHXiDHzAn0+DtqP2B1pwOg2j/Hat4v2iPPH12+GenVq5fs2LHD7fzt27dLr169OjxLRKRHjx7y2WefuZ3/6aefevX8fqDz1P7BD3ReoJvNQOYFeowbtecFcsycQI+/o/YDVnc6AKr9c6zm/aI98vyhui/KCzSDwQCLxeJ2/qlTpzz+BsVAZgFAWFgYDh8+7Hb+4cOHPX4Mtz3y7Hb7ee8xCQ4Oht1u7zJ5mzdvRn5+PkaOHNlm3siRI/Hcc8+hpKREkbypU6fit7/9reNR9x87fvw4Hn30UUydOtXj2tSed+211+Luu+/Gzp0728zbuXMn7rvvPsyePbvDs4Bz+/mgQYPczh84cCAaGxu7TJ6a91u1f47VvF+0R55fOqTlUdD9998vQ4YMkQ0bNjhdvmhoaJANGzaIyWSSjIyMDs8SEVm8eLH07dtXcnNzZffu3VJbWyu1tbWye/duyc3NlX79+kl2drZieampqTJx4kSXZ4N27NghcXFxctttt3WZvPDwcCkvL3c7/6OPPpLw8HBF8mpqamTs2LESFBQkEydOlGuuuUauueYamThxogQFBcn48eOlpqbG49rUnnfixAm55pprRKPRSL9+/WTUqFEyatQo6devn2i1WpkxY4Z8//33HZ4lIjJz5ky5+uqrpb6+vs28+vp6ueaaa2TWrFldJk/N+63aP8dq3i/aI88fXf5pGqvVigULFqCwsBBnz551dOTNzc0ICgrCnXfeiWeffdajMxrusqxWK/R6vVdZrf7whz9g5cqVqK2thUajAQCICKKiorBgwQIsXLjQq983kHlqH5Qt0HkPPPAAiouL8eyzz+IXv/iFY6wDi8WCsrIymM1mXHvttXj++ecVyVP7Y8ftMQbPvn372jwZ5uuYOYEaf0ft49F0t8EZ1f45Vvt+AQR+rCtfdPlmpJXFYkF1dbXTX3ZcXJxPg+tYLBZs374ddXV1AIDIyEjEx8f7NVDPoUOHnGobOnSoz1mBzlPzoGxA4HbMQDau7ZFH6qH2AxYPgNTZdJtmpD0FBwdj9+7dGD16tNKlUAAEsnENdF4gx7gBzh20XB2Y7HY7vv32WwwePNin3FZXXXUVXnrpJQwZMsSr9axWK7RarWNMjIMHD6KwsBA1NTUYMmQI7rzzTq8a7N27d6O6uhpTp07FsGHD8PnnnyM/Px92ux033HADUlJSvKqP1IX7hW/7BaCefaNbNCP/93//h+rqavTr1w9jxoxxmnfmzBn89a9/RVpa2gVzzGazy+krV67E7bffjvDwcABAbm6uR3Xt2LEDffv2dXx41q1bh4KCAscHKyMjA7fccotHWa1WrVqFqqoqzJw5E7fccgvWrVuHFStWwG6345e//CUef/zxNl9OdT7tMYjat99+i7CwMPTq1ctpus1mQ2VlpcffmeHOsGHDsHnzZowYMcKvHDU5duwYbrzxRvzrX//C4MGDnU6919TU4IorrsDbb7/tOOV9IRaLBXfddRf+/ve/IzQ0FPfccw+ys7Oh0+kcuYMGDXJ8UeWFvPPOOy6n//KXv8TKlSsdlwSuu+46j/KmTp2KjIwM/OpXv8K//vUv/OIXv8DIkSMxevRofPHFF9i/fz8++OADJCUlXTBrw4YNuPnmmxEWFgar1Yq//e1vuOmmmxAfHw+dTocPPvgAr776KlJTUz2qrRUPgMofALlf+L5fAO23b/ikQ+5MUdD+/fsd4zpotVqZPHmyHDlyxDG/trZWtFqtR1kajUZiY2Nl6tSpTj8ajUYuu+wymTp1qkybNs3j2saPHy+lpaUiIrJmzRrp0aOHPPjgg/LCCy/IggULpFevXrJ27VqP85YtWya9e/eWG2+8UaKiouTJJ5+U8PBweeKJJ2T58uXSv39/WbJkicd5gR5E7ejRo3LZZZeJVqsVnU4nd9xxh5w6dcox35v3QkRk5cqVLn90Op1kZWU5Xnvqm2++cbrp7+OPP5bU1FS58sor5bbbbpOKigqPs1r9/e9/l8WLF8vWrVtFRKSsrExmzJghKSkpsnr1ao9zAv3Y8YMPPiiXXHKJvPnmm7JmzRoZMmSIzJo1S6xWq4icey80Go3Hea37108H2frxjzfvbWhoqHzxxRciIjJlyhR5+OGHneYvWrRIrrjiCo+yLr30UnniiSdEROQvf/mLhIWFyeOPP+6Y/8wzz0hsbKzHtQV6PJqGhga56aabxGg0yoABA2Tx4sVO4294u19s2rTJ5Y9Op5NVq1Y5XntqypQp8uabb4rIucduDQaDjB8/XubOnSsTJ06UkJAQr/aNt99+W3Q6nYSHh0uvXr2ktLRUwsLCJDk5WVJSUkSn08nrr7/uURb3C9/3C5HA7xv+6PLNyJw5c2TWrFlSX18vX375pcyaNUuGDh0qX3/9tYh4t6OvWLFChg4dKmVlZU7Tg4KC5PPPP/e6th49esjhw4dFRGTixIny5z//2Wn+66+/LmPGjPE47+KLL5a3335bRER27dolOp1OXnvtNcf8DRs2yPDhwz3OC/QgamlpaZKYmCj//ve/pbS0VOLi4iQ+Pl5OnDghIr7t6BdddJGYTCanH41GI9HR0WIymWTo0KEe5yUkJMjf//53ERHZuHGjaLVaue666+TRRx+VG264QfR6vWO+JwoKCiQoKEji4uIkNDRU1q1bJ71795a77rpL7rnnHunRo4fk5eV5lBXoMW4GDx4sH330keN1fX29JCQkyNVXXy1nzpzx+gDY+oTGTw/Cvu4bPXv2lL1794rIuXEzdu3a5TT/wIEDHv++PXv2lEOHDomIiN1uF71e7zT2w8GDB736u+MBUD0HQO4Xvu8XrXmB3Df80eWbkQEDBjj95drtdrn33ntl8ODBcvDgQa8/XFVVVXLJJZfI//7v/0pzc7OI+P7BCg8Pl+3btzvqdPXB8nbQs9YmS0REr9fLf/7zH8frw4cPS0hIiFd5gRxEbdCgQbJt2zbH6zNnzsjs2bMlNjZW/vvf/3r9Xtxzzz0SGxsre/bscZruz47+1VdfiYhIYmKiPPnkk07zn3/+eZk4caLHeWPGjHE0mB9++KEYjUbJz893zH/ppZdk9OjRHmUF+rHjHj16OH7XVhaLRZKSkuSqq66Sr776yqv3QkQkNzdXYmJinBo2X9+Lq666Sp566ikREZk0aZK88sorTvPfeustGTx4sEdZUVFRjv3sxIkTotFonA44VVVVEhUV5XFtPACq5wDI/cL3/UIk8PuGP7p8M9K7d+82BysRkQceeEAuuugi+fjjj73+cJ06dUrS0tJk/Pjx8tlnn4ler/fpg3X77bfLnXfeKSIiN910kyxatMhp/vLly2XcuHEe5w0dOlTee+89ERH54osvRKvVyl//+lfH/OLiYjGZTB7nDRw48LxnAt555x0ZOHCgx3k9e/Z0/B9WK5vNJnPmzJHx48fLp59+6vV7sWHDBomJiZHnn3/eMc3XHb1Pnz6ye/duETnXHLb+d6sDBw543cz9tDn8cXN36NAhj/MCPcbNyJEjpbi4uM30U6dOSVJSkkyYMMHr90JEZOfOnTJmzBi5++67pbGx0ef3oqKiQvr06SPZ2dny/PPPS0REhCxatEhef/11WbJkiYSFhckf/vAHj7Juv/12SUxMlNdee01mz54tKSkpcvnll8vevXtl3759MmXKFK/OZPAAqJ4DIPcL3/cLkcDvG/7o8s3IZZddJq+++qrLeQ888ICEhYX59OESOXeKMTIyUrRarU8frCNHjojJZJLJkyeL2WyWHj16yJVXXinz58+XyZMnS3BwsMsdw51FixZJ//795a677pKhQ4dKZmamDB48WF544QUpKCiQmJiYNqdYzyfQg6iNGzdO3nrrrTbTWxuSwYMH+/RefPvtt3LVVVfJNddcI999953PO/p1110nmZmZIiKSkpLS5n6TNWvWyIgRIzzOa212Rc691xqNxun9LC8vl4suusijrDNnzsi9994rwcHBotVqxWg0itFoFK1WK8HBwXLffffJmTNnPK7tf/7nf9z+I2OxWCQxMdHn/aKpqUnuueceGTFihOh0Op/eC5Fz//BefvnlbS4xREdHe3x5S+TcZY7p06dLr169JCUlRU6ePCkZGRmOyxUjRoyQAwcOeJzHA6B6DoDu9guNRsP9wgOB3jf80eWbkeXLl8uMGTPczr/vvvu8uh77U998841s3LhRTp8+7dP633//vTz66KMyZswYMRqNEhwcLEOGDJHU1NTzfmeAKy0tLfL73/9err32Wlm+fLnY7Xb5y1/+IjExMRIeHi6//vWvva7zySeflIEDBzo+nK3XogcOHOjVP0AiIgsXLnR7j4nNZpPrrrvO5/fCbrfL8uXLJSoqyucdfc+ePRIeHi5paWmybNky6dWrl9x+++3y+9//XtLS0sRgMMhLL73kcd4DDzwgI0aMkCeeeEISEhJk3rx5MmrUKHnvvfekpKRExo0bJ7/5zW+8qrGhoUE+/PBDKSoqkqKiIvnwww9d3tNzISdOnHC6hPdTFovlvP/374lNmzbJggULvLqZ05Vjx47JJ598IhUVFY7T+4Fw8OBB+eyzz8Rms3m1HhvD9jsAajQanw6ADQ0NUlZW5tgvysrKArpf2O12EQncfvHggw8GdL/46Zk1f/m6b/ijWzzaS/4LxCBqZ8+eRVNTk9vxNc6ePYsjR454/ejhj1VXV2Pr1q1IS0tD3759vV7/4MGDWLRoEYqLi3H69GkAQFBQEC677DI88sgjmDNnjsdZjY2NePjhh1FZWYlJkybh+eefx3PPPYfHHnsMNpsNU6ZMwfr16z1+7JDUJVDjx3z//fc4evQofvazn7mcf+rUKezYsQNTpkzxudZ33nkHH330EbKysvz6vNXX1+Orr76C3W7HwIEDYTKZfM76qa+++gpNTU0YNWqUV8MPuBLosZ+Y1/7YjJDPvvnmG2RnZ6OwsLDL5YkIjh07BrvdjoiICMcYC4Fw5swZ2Gw29O7d26v1AjVeTnfMC3RtraOHto4Yum/fPqxcuRJWqxW33347rrrqKo+zOiovLy8Pzc3NfuVNmjQJI0eODFh9/uYFeuwn5vmX55cOOwdDXc6uXbt8Pn3cGfNqamokPT1dkbxAjpfjLu/o0aNdMi/Qtb333nsSHBws/fr1E6PRKO+99570799fkpOT5aqrrhKdTtfm8X/mtU9eoMd+Yp5/ef5gM0JuuRs8qfXn2WefDchgTGrJuxAlm6VAjpfT3fICXVtSUpI89thjInLuJva+ffvK7373O8f8zMxMmT59OvM6IC/QYz8xz788f7AZIbcCPXiS2vPU3CwFeryc7pQX6NpCQ0MdIw+3tLRIUFCQ07gjn332mURGRjKvg/ICOfYT8/zP8xWbEXJr0KBBsnHjRrfzd+7c6dU/4mrPU3OzFOjxcrpTXqBrCw0NdXrao1evXnLw4EHH68OHD4vRaGReB+WJBG7sJ+YFJs8X3n+PNHUbcXFxqK6udjtfo9FAvLj/We15AwcOxIYNG2C3213+7Nixw+OsQOeNGjUK27dvbzN91apVuP766z3+oq3umBfo2kwmE7788kvH68rKSqcvsaupqcHAgQOZ10F5ANCrVy+88soryMrKQnJyssdfZMe89snzBZsRcuuRRx7BpEmT3M4fPnw4Pvrooy6Tp+Zm6YYbbsBf/vIXl/NWrVqFW2+91avaulNeoGu77777nP6xHjt2rNOjqO+9955XT5cwz7+8H7vllluwfft2bNiwwa8hApgXmDxv8NFeov/vn//8JxobG3HNNde4nN/Y2Ijt27d7PN5DoPOIiLoqNiNERESkKF6mISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkWxGSEiIiJFsRkhIiIiRbEZISIiIkX9P6VEZtaCkkDEAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "pt\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABDLUlEQVR4nO3deXxU9b3/8ffMkEyIGANBCMZAwA1yEYKJSUNbFhuMSlW8XdCooanimusyv4qkAglixVqLuOSKtaJWTaULF9uLhmIUvDaRlCBoFXdpLDSBXjXB5MdkyHx/f/hj7JgEZkvOSXg9H495xDnL+3ySmcP5eFaHMcYIAADAIk6rCwAAAEc3mhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGApmhEAAGCpQVYXEAq/3689e/bo2GOPlcPhsLocAAAQAmOM9u/frxNOOEFOZ8/7P/pFM7Jnzx6lp6dbXQYAAIjAxx9/rBNPPLHH8f2iGTn22GMlSR999JGGDRsWdZ7P59Of/vQnnX322YqLi7NNFnn2yrNzbeTx2ZLXN3l2rq0/5LW2tio9PT2wHe9Jv2hGDh2aOfbYY5WUlBR1ns/nU2JiopKSkmLyRY1VFnn2yrNzbeTx2ZLXN3l2rq0/5B1ypFMsOIEVAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYimYEAABYKuxm5OWXX9b555+vE044QQ6HQ+vWrTviPJs2bdIZZ5wht9utk08+WY8//ngEpQIAgIEo7Gakra1NkydPVmVlZUjTf/TRR5o9e7Zmzpyp7du366abbtKVV16pDRs2hF0sAAAYeMJ+au+5556rc889N+TpV61apbFjx+rnP/+5JGnChAl65ZVXdO+996qwsDDcxQMAgAEm7GYkXHV1dSooKAgaVlhYqJtuuqnHebxer7xeb+B9a2urpC8ebezz+aKu6VCG3bLIs1eenWsjzz5Z5A3sPDvX1p/yjsRhjDGRLsThcOi//uu/NGfOnB6nOfXUU1VSUqKysrLAsOeee06zZ89We3u7Bg8e3GWeiooKLV26tMvwqqoqJSYmRlouAADoQ+3t7SoqKlJLS4uSkpJ6nK7X94xEoqysTB6PJ/C+tbVV6enpmjlzplJSUqLO9/l82rhxo2bNmqW4uDjbZJFnrzw710Ze//9sJ1b0fN6c22m0LMevxVud8vodUddHnj2yjsa8Tq8rpOl6vRlJTU1Vc3Nz0LDm5mYlJSV1u1dEktxut9xud5fhcXFxMfmHozfy7FwbefbJIi90GQvXB713u4zuzpWm/ORFeTuj3MDEMCvyvCNP5/U7YlIfefbKOpry/CFm9Hozkp+fr+eeey5o2MaNG5Wfn9/biwYQgYkVG2K6gY5VHoCBK+xm5PPPP9f7778feP/RRx9p+/btGjZsmEaPHq2ysjLt3r1bv/rVryRJ11xzjR588EEtWLBAP/zhD/Xiiy/qN7/5jdavX9/TIgCEIdbNAwD0tbCbka1bt2rmzJmB94fO7Zg3b54ef/xx/eMf/1BjY2Ng/NixY7V+/XrdfPPNuu+++3TiiSfql7/8JZf14qhF8wAAwcJuRmbMmKHDXYDT3d1VZ8yYoddeey3cRQH9wlfPbegJzQMAdM+WV9MAdsI5DwDQu2hGcFQKZW8GezIAoG/QjGBAC/UQCgDAOjQjGBAONR1cTgoA/Q/NCPoV9nQAwMBDMwJbo/kAgIGPZgS2QvMBAEcfmhFYJmPhes7xAADQjKDvsNcDANAdmhH0GpoPAEAoaEYQczQhAIBw0IwgZmhCAACRoBlBxGg+AACx4LS6APRPEys2WF0CAGCAYM8IQvavl+ICABArNCM4Ig7HAAB6E80IekQTAgDoCzQj6IImBADQl2hGEEATAgCwAs0IaEIAAJbi0l4AAGAp9owcxdgjAgCwA5qRowwNCADAbjhMAwAALBVRM1JZWamMjAwlJCQoLy9P9fX1PU7r8/l0++2366STTlJCQoImT56s6urqiAtGZCZWbGCvCADAlsJuRtasWSOPx6Py8nJt27ZNkydPVmFhofbu3dvt9IsWLdLDDz+sBx54QG+99ZauueYaXXTRRXrttdeiLh5HxjNkAAB2F3YzsmLFCs2fP18lJSXKzMzUqlWrlJiYqNWrV3c7/ZNPPqkf//jHOu+88zRu3Dhde+21Ou+88/Tzn/886uIBAED/F9YJrB0dHWpoaFBZWVlgmNPpVEFBgerq6rqdx+v1KiEhIWjY4MGD9corr/S4HK/XK6/XG3jf2toq6YtDPj6fL5ySu3Uow25Zscw7tEfE7TRBP6N1NOXZuTby7JNF3sDOs3Nt/SGv0xVajsMYE/IS9+zZo7S0NNXW1io/Pz8wfMGCBdq8ebO2bNnSZZ6ioiLt2LFD69at00knnaSamhpdeOGF6uzsDGo4/lVFRYWWLl3aZXhVVZUSExNDLRcAAFiovb1dRUVFamlpUVJSUo/T9fqlvffdd5/mz5+v8ePHy+Fw6KSTTlJJSUmPh3UkqaysTB6PJ/C+tbVV6enpmjlzplJSUqKuyefzaePGjZo1a5bi4uJskxWLvK+eI+J2Gi3L8WvxVqe8fkfU9R1NeXaujTw+W/L6Js/OtfWHvE6vK6TpwmpGhg8fLpfLpebm5qDhzc3NSk1N7Xae448/XuvWrdOBAwf0v//7vzrhhBO0cOFCjRs3rsfluN1uud3uLsPj4uJissHvjTyra/vySpnuvzxev0Pezui/WEdjnp1rI88+WeQN7Dw712bnPH+IGWGdwBofH6/s7GzV1NR8uSC/XzU1NUGHbbqTkJCgtLQ0HTx4UL///e914YUXhrNoAAAwQIV9mMbj8WjevHnKyclRbm6uVq5cqba2NpWUlEiSiouLlZaWpuXLl0uStmzZot27dysrK0u7d+9WRUWF/H6/FixYENvf5CjFvUMAAP1d2M3I3LlztW/fPi1ZskRNTU3KyspSdXW1Ro4cKUlqbGyU0/nlDpcDBw5o0aJF+vDDDzVkyBCdd955evLJJ5WcnByzXwIAAPRfEZ3AWlpaqtLS0m7Hbdq0Kej99OnT9dZbb0WyGAAAcBTgQXn9FIdnAAADBQ/KAwAAlmLPSD/DHhEAwEDDnhEAAGAp9oz0E+wRAQAMVOwZAQAAlqIZAQAAluIwjc1xeAYAMNCxZwQAAFiKPSM2NbFiQ0yfwAgAgF2xZwQAAFiKZgQAAFiKZsRmJlZssLoEAAD6FM0IAACwFM0IAACwFFfT2MSh+4m4XRYXAgBAH2PPCAAAsBTNCAAAsBTNCAAAsBTnjFiMZ88AAI527BkBAACWohkBAACWohkBAACW4pwRi3CuCAAAX4hoz0hlZaUyMjKUkJCgvLw81dfXH3b6lStX6rTTTtPgwYOVnp6um2++WQcOHIioYAAAMLCE3YysWbNGHo9H5eXl2rZtmyZPnqzCwkLt3bu32+mrqqq0cOFClZeXa+fOnXr00Ue1Zs0a/fjHP466eAAA0P+FfZhmxYoVmj9/vkpKSiRJq1at0vr167V69WotXLiwy/S1tbX6+te/rqKiIklSRkaGLrnkEm3ZsiXK0vsnDs8AABAsrGako6NDDQ0NKisrCwxzOp0qKChQXV1dt/NMnTpVTz31lOrr65Wbm6sPP/xQzz33nC6//PIel+P1euX1egPvW1tbJUk+n08+ny+ckrt1KMOKLLfLHH680wT9jBZ59sgiz155dq6NPHvl2bm2/pDXeYRt3iEOY0zIS9yzZ4/S0tJUW1ur/Pz8wPAFCxZo8+bNPe7tuP/++/WjH/1IxhgdPHhQ11xzjR566KEel1NRUaGlS5d2GV5VVaXExMRQywUAABZqb29XUVGRWlpalJSU1ON0vX41zaZNm3TnnXfqP//zP5WXl6f3339fN954o5YtW6bFixd3O09ZWZk8Hk/gfWtrq9LT0zVz5kylpKREXZPP59PGjRs1a9YsxcXF9WnWxIoNhx3vdhoty/Fr8VanvH5HVLWRF12enWsjj8+WvL7Js3Nt/SGv0xvao+jDakaGDx8ul8ul5ubmoOHNzc1KTU3tdp7Fixfr8ssv15VXXilJOv3009XW1qarrrpKt912m5zOrufQut1uud3uLsPj4uKibh56Ky/ULG9naB+u1+8IeVryejfPzrWRZ58s8gZ2np1rs3OeP8SMsJqR+Ph4ZWdnq6amRnPmzPliQX6/ampqVFpa2u087e3tXRoOl+uLTimMI0T9GietAgDQs7AP03g8Hs2bN085OTnKzc3VypUr1dbWFri6pri4WGlpaVq+fLkk6fzzz9eKFSs0ZcqUwGGaxYsX6/zzzw80JQAA4OgVdjMyd+5c7du3T0uWLFFTU5OysrJUXV2tkSNHSpIaGxuD9oQsWrRIDodDixYt0u7du3X88cfr/PPP109+8pPY/RYAAKDfiugE1tLS0h4Py2zatCl4AYMGqby8XOXl5ZEsCgAADHA8m6YXca4IAABHxlN7AQCApWhGAACApWhGAACApWhGAACApWhGAACApbiaphdwFQ0AAKFjzwgAALAUzQgAALAUzQgAALAUzQgAALAUJ7DGECeuAgAQPvaMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMxMjEig1WlwAAQL9EMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACwVUTNSWVmpjIwMJSQkKC8vT/X19T1OO2PGDDkcji6v2bNnR1y0XWQsXM8lvQAARCnsZmTNmjXyeDwqLy/Xtm3bNHnyZBUWFmrv3r3dTr927Vr94x//CLz++te/yuVy6Xvf+17UxQMAgP4v7GZkxYoVmj9/vkpKSpSZmalVq1YpMTFRq1ev7nb6YcOGKTU1NfDauHGjEhMTaUYAAIAkaVA4E3d0dKihoUFlZWWBYU6nUwUFBaqrqwsp49FHH9XFF1+sY445psdpvF6vvF5v4H1ra6skyefzyefzhVNytw5lRJvldhm5neaL//7/P6NFnn3y7FwbefbJIm9g59m5tv6Q1+kKLcdhjAl5iXv27FFaWppqa2uVn58fGL5gwQJt3rxZW7ZsOez89fX1ysvL05YtW5Sbm9vjdBUVFVq6dGmX4VVVVUpMTAy1XAAAYKH29nYVFRWppaVFSUlJPU4X1p6RaD366KM6/fTTD9uISFJZWZk8Hk/gfWtrq9LT0zVz5kylpKREXYfP59PGjRs1a9YsxcXFRZwzsWKD3E6jZTl+Ld7qlNfviLo28uyTZ+fayOOzJa9v8uxcW3/I6/S6QpourGZk+PDhcrlcam5uDhre3Nys1NTUw87b1tamZ555RrfffvsRl+N2u+V2u7sMj4uLi6p5iHWet/PLD8rrdwS9jxZ59smzc23k2SeLvIGdZ+fa7JznDzEjrBNY4+PjlZ2drZqami8X5PerpqYm6LBNd37729/K6/XqsssuC2eRtpSxcL0yFq63ugwAAAaEsA/TeDwezZs3Tzk5OcrNzdXKlSvV1tamkpISSVJxcbHS0tK0fPnyoPkeffRRzZkzJyaHWQAAwMARdjMyd+5c7du3T0uWLFFTU5OysrJUXV2tkSNHSpIaGxvldAbvcHnnnXf0yiuv6E9/+lNsqgYAAANGRCewlpaWqrS0tNtxmzZt6jLstNNOUxgX7QAAgKMIz6YBAACWohkBAACWohkBAACWohkBAACWohkBAACW6tPbwfd33OgMAIDYY88IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFDc9CwE3OwMAoPewZwQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFgqomaksrJSGRkZSkhIUF5enurr6w87/Weffabrr79eo0aNktvt1qmnnqrnnnsuooIBAMDAEvbt4NesWSOPx6NVq1YpLy9PK1euVGFhod555x2NGDGiy/QdHR2aNWuWRowYod/97ndKS0vT3/72NyUnJ8ei/l7FbeABAOh9YTcjK1as0Pz581VSUiJJWrVqldavX6/Vq1dr4cKFXaZfvXq1PvnkE9XW1iouLk6SlJGREV3VAABgwAirGeno6FBDQ4PKysoCw5xOpwoKClRXV9ftPH/4wx+Un5+v66+/Xs8++6yOP/54FRUV6dZbb5XL5ep2Hq/XK6/XG3jf2toqSfL5fPL5fOGU3K1DGUfKcrvMEbPcThP0M1rk2SfPzrWRZ58s8gZ2np1r6w95nSFsRyXJYYwJeYl79uxRWlqaamtrlZ+fHxi+YMECbd68WVu2bOkyz/jx47Vr1y5deumluu666/T+++/ruuuu0w033KDy8vJul1NRUaGlS5d2GV5VVaXExMRQywUAABZqb29XUVGRWlpalJSU1ON0YR+mCZff79eIESP0i1/8Qi6XS9nZ2dq9e7d+9rOf9diMlJWVyePxBN63trYqPT1dM2fOVEpKStQ1+Xw+bdy4UbNmzQocOurOxIoNR8xyO42W5fi1eKtTXr8j6trIs0+enWsjj8+WvL7Js3Nt/SGv09v9EZCvCqsZGT58uFwul5qbm4OGNzc3KzU1tdt5Ro0apbi4uKBDMhMmTFBTU5M6OjoUHx/fZR632y23291leFxc3GGbh3AdKc/bGfoH4fU7wpqevP6TZ+fayLNPFnkDO8/Otdk5zx9iRliX9sbHxys7O1s1NTVfLsjvV01NTdBhm3/19a9/Xe+//778fn9g2LvvvqtRo0Z124gAAICjS9j3GfF4PHrkkUf0xBNPaOfOnbr22mvV1tYWuLqmuLg46ATXa6+9Vp988oluvPFGvfvuu1q/fr3uvPNOXX/99bH7LQAAQL8V9jkjc+fO1b59+7RkyRI1NTUpKytL1dXVGjlypCSpsbFRTueXPU56ero2bNigm2++WZMmTVJaWppuvPFG3XrrrbH7LQAAQL8V0QmspaWlKi0t7Xbcpk2bugzLz8/Xq6++GsmiAADAAMezaQAAgKVoRgAAgKV6/T4j/RHPpAEAoO+wZwQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKp/b+C57WCwBA32PPCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsBTNCAAAsFREzUhlZaUyMjKUkJCgvLw81dfX9zjt448/LofDEfRKSEiIuGAAADCwhN2MrFmzRh6PR+Xl5dq2bZsmT56swsJC7d27t8d5kpKS9I9//CPw+tvf/hZV0QAAYOAIuxlZsWKF5s+fr5KSEmVmZmrVqlVKTEzU6tWre5zH4XAoNTU18Bo5cmRURQMAgIEjrNvBd3R0qKGhQWVlZYFhTqdTBQUFqqur63G+zz//XGPGjJHf79cZZ5yhO++8U//2b//W4/Rer1derzfwvrW1VZLk8/nk8/nCKblbhzK+muV2mbCz3E4T9DNa5Nknz861kWefLPIGdp6da+sPeZ0hblcdxpiQl7hnzx6lpaWptrZW+fn5geELFizQ5s2btWXLli7z1NXV6b333tOkSZPU0tKie+65Ry+//LLefPNNnXjiid0up6KiQkuXLu0yvKqqSomJiaGWCwAALNTe3q6ioiK1tLQoKSmpx+l6/UF5+fn5QY3L1KlTNWHCBD388MNatmxZt/OUlZXJ4/EE3re2tio9PV0zZ85USkpK1DX5fD5t3LhRs2bNUlxcXGD4xIoNYWe5nUbLcvxavNUpr98RdW3k2SfPzrWRx2dLXt/k2bm2/pDX6XWFNF1Yzcjw4cPlcrnU3NwcNLy5uVmpqakhZcTFxWnKlCl6//33e5zG7XbL7XZ3O++/Ng/R+mqetzPyP7zX74hqfvLsm2fn2sizTxZ5AzvPzrXZOc8fYkZYJ7DGx8crOztbNTU1Xy7I71dNTU3Q3o/D6ezs1BtvvKFRo0aFs2gAADBAhX2YxuPxaN68ecrJyVFubq5WrlyptrY2lZSUSJKKi4uVlpam5cuXS5Juv/12fe1rX9PJJ5+szz77TD/72c/0t7/9TVdeeWVsf5MoZCxcb3UJAAActcJuRubOnat9+/ZpyZIlampqUlZWlqqrqwOX6zY2Nsrp/HKHy6effqr58+erqalJQ4cOVXZ2tmpra5WZmRm73wIAAPRbEZ3AWlpaqtLS0m7Hbdq0Kej9vffeq3vvvTeSxQAAgKMAz6YBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWGmR1AVaaWLFB3k6H1WUAAHBUY88IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwVETNSGVlpTIyMpSQkKC8vDzV19eHNN8zzzwjh8OhOXPmRLJYAAAwAIXdjKxZs0Yej0fl5eXatm2bJk+erMLCQu3du/ew8+3atUs/+tGP9M1vfjPiYgEAwMATdjOyYsUKzZ8/XyUlJcrMzNSqVauUmJio1atX9zhPZ2enLr30Ui1dulTjxo2LqmAAADCwhPWgvI6ODjU0NKisrCwwzOl0qqCgQHV1dT3Od/vtt2vEiBG64oor9D//8z9HXI7X65XX6w28b21tlST5fD75fL5wSu7WoQy300SddSgjFlnk2SvPzrWRZ58s8gZ2np1r6w95na7QchzGmJCXuGfPHqWlpam2tlb5+fmB4QsWLNDmzZu1ZcuWLvO88soruvjii7V9+3YNHz5cP/jBD/TZZ59p3bp1PS6noqJCS5cu7TK8qqpKiYmJoZYLAAAs1N7erqKiIrW0tCgpKanH6cLaMxKu/fv36/LLL9cjjzyi4cOHhzxfWVmZPB5P4H1ra6vS09M1c+ZMpaSkRF2Xz+fTxo0btXirU16/I6ost9NoWY4/Jlnk2SvPzrWRx2dLXt/k2bm2/pDX6XWFNF1Yzcjw4cPlcrnU3NwcNLy5uVmpqaldpv/ggw+0a9cunX/++YFhfr//iwUPGqR33nlHJ510Upf53G633G53l+FxcXGKi4sLp+TD8vod8nZG/8eOdRZ59sqzc23k2SeLvIGdZ+fa7JznDzEjrBNY4+PjlZ2drZqami8X5PerpqYm6LDNIePHj9cbb7yh7du3B14XXHCBZs6cqe3btys9PT2cxQMAgAEo7MM0Ho9H8+bNU05OjnJzc7Vy5Uq1tbWppKREklRcXKy0tDQtX75cCQkJmjhxYtD8ycnJktRlOAAAODqF3YzMnTtX+/bt05IlS9TU1KSsrCxVV1dr5MiRkqTGxkY5ndzYFQAAhCaiE1hLS0tVWlra7bhNmzYddt7HH388kkXG1MSKDbo71+oqAACAxLNpAACAxWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApQZZXUBfyli4XpLkdllcCAAACGDPCAAAsFREzUhlZaUyMjKUkJCgvLw81dfX9zjt2rVrlZOTo+TkZB1zzDHKysrSk08+GXHBAABgYAm7GVmzZo08Ho/Ky8u1bds2TZ48WYWFhdq7d2+30w8bNky33Xab6urq9Prrr6ukpEQlJSXasGFD1MUDAID+L+xmZMWKFZo/f75KSkqUmZmpVatWKTExUatXr+52+hkzZuiiiy7ShAkTdNJJJ+nGG2/UpEmT9Morr0RdPAAA6P/COoG1o6NDDQ0NKisrCwxzOp0qKChQXV3dEec3xujFF1/UO++8o5/+9Kc9Tuf1euX1egPvW1tbJUk+n08+ny+ckoO4XeaLn87gn9GIZRZ59sqzc23k2SeLvIGdZ+fa+kNepyu0HIcxJuQl7tmzR2lpaaqtrVV+fn5g+IIFC7R582Zt2bKl2/laWlqUlpYmr9crl8ul//zP/9QPf/jDHpdTUVGhpUuXdhleVVWlxMTEUMsFAAAWam9vV1FRkVpaWpSUlNTjdH1yae+xxx6r7du36/PPP1dNTY08Ho/GjRunGTNmdDt9WVmZPB5P4H1ra6vS09M1c+ZMpaSkRFzHxIovzlNxO42W5fi1eKtTXr8j4rxYZ5Fnrzw710Yeny15fZNn59r6Q16nN7R7aYTVjAwfPlwul0vNzc1Bw5ubm5WamtrjfE6nUyeffLIkKSsrSzt37tTy5ct7bEbcbrfcbneX4XFxcYqLiwun5CDezuA/rNfv6DIs4uwYZpFnrzw710aefbLIG9h5dq7Nznn+EDPCOoE1Pj5e2dnZqqmp+XJBfr9qamqCDtscsTi/P+icEAAAcPQK+zCNx+PRvHnzlJOTo9zcXK1cuVJtbW0qKSmRJBUXFystLU3Lly+XJC1fvlw5OTk66aST5PV69dxzz+nJJ5/UQw89FNvfBAAA9EthNyNz587Vvn37tGTJEjU1NSkrK0vV1dUaOXKkJKmxsVFO55c7XNra2nTdddfp73//uwYPHqzx48frqaee0ty5c2P3WwAAgH4rohNYS0tLVVpa2u24TZs2Bb2/4447dMcdd0SyGAAAcBTg2TQAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSET2bpr/JWLje6hIAAEAP2DMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsFVEzUllZqYyMDCUkJCgvL0/19fU9TvvII4/om9/8poYOHaqhQ4eqoKDgsNMDAICjS9jNyJo1a+TxeFReXq5t27Zp8uTJKiws1N69e7udftOmTbrkkkv00ksvqa6uTunp6Tr77LO1e/fuqIsHAAD9X9jNyIoVKzR//nyVlJQoMzNTq1atUmJiolavXt3t9E8//bSuu+46ZWVlafz48frlL38pv9+vmpqaqIsHAAD936BwJu7o6FBDQ4PKysoCw5xOpwoKClRXVxdSRnt7u3w+n4YNG9bjNF6vV16vN/C+tbVVkuTz+eTz+cIpWZLkdpng904T9DMascwiz155dq6NPPtkkTew8+xcW3/I63SFluMwxoS8xD179igtLU21tbXKz88PDF+wYIE2b96sLVu2HDHjuuuu04YNG/Tmm28qISGh22kqKiq0dOnSLsOrqqqUmJgYarkAAMBC7e3tKioqUktLi5KSknqcLqw9I9G666679Mwzz2jTpk09NiKSVFZWJo/HE3jf2tqq9PR0zZw5UykpKWEvd2LFhqD3bqfRshy/Fm91yut3hJ3XW1nk2SvPzrWRx2dLXt/k2bm2/pDX6XWFNF1Yzcjw4cPlcrnU3NwcNLy5uVmpqamHnfeee+7RXXfdpRdeeEGTJk067LRut1tut7vL8Li4OMXFxYVTsiTJ29n9H9Trd/Q4LuxlxDCLPHvl2bk28uyTRd7AzrNzbXbO84eYEdYJrPHx8crOzg46+fTQyaj/etjmq+6++24tW7ZM1dXVysnJCWeRAABggAv7MI3H49G8efOUk5Oj3NxcrVy5Um1tbSopKZEkFRcXKy0tTcuXL5ck/fSnP9WSJUtUVVWljIwMNTU1SZKGDBmiIUOGxPBXAQAA/VHYzcjcuXO1b98+LVmyRE1NTcrKylJ1dbVGjhwpSWpsbJTT+eUOl4ceekgdHR367ne/G5RTXl6uioqK6KoHAAD9XkQnsJaWlqq0tLTbcZs2bQp6v2vXrkgWEbWMhestWS4AAAgPz6YBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACWGmR1AbGWsXC91SUAAIAwRLRnpLKyUhkZGUpISFBeXp7q6+t7nPbNN9/Ud77zHWVkZMjhcGjlypWR1goAAAagsJuRNWvWyOPxqLy8XNu2bdPkyZNVWFiovXv3djt9e3u7xo0bp7vuukupqalRFwwAAAaWsJuRFStWaP78+SopKVFmZqZWrVqlxMRErV69utvpzzzzTP3sZz/TxRdfLLfbHXXBAABgYAnrnJGOjg41NDSorKwsMMzpdKqgoEB1dXUxK8rr9crr9Qbet7a2SpJ8Pp98Pt9h53W7zBHz3U4T9DMascwiz155dq6NPPtkkTew8+xcW3/I6wxhmyxJDmNMyEvcs2eP0tLSVFtbq/z8/MDwBQsWaPPmzdqyZcth58/IyNBNN92km2666bDTVVRUaOnSpV2GV1VVKTExMdRyAQCAhdrb21VUVKSWlhYlJSX1OJ0tr6YpKyuTx+MJvG9tbVV6erpmzpyplJSUw847sWLDEfPdTqNlOX4t3uqU1++IqtZYZpFnrzw710Yeny15fZNn59r6Q16n1xXSdGE1I8OHD5fL5VJzc3PQ8Obm5pienOp2u7s9vyQuLk5xcXGHndfbGfofz+t3hDV9X2WRZ688O9dGnn2yyBvYeXauzc55/hAzwjqBNT4+XtnZ2aqpqflyQX6/ampqgg7bAAAAhCrswzQej0fz5s1TTk6OcnNztXLlSrW1tamkpESSVFxcrLS0NC1fvlzSFye9vvXWW4H/3r17t7Zv364hQ4bo5JNPjuGvAgAA+qOwm5G5c+dq3759WrJkiZqampSVlaXq6mqNHDlSktTY2Cin88sdLnv27NGUKVMC7++55x7dc889mj59ujZt2hT9bwAAAPq1iE5gLS0tVWlpabfjvtpgZGRkKIwLdgAAwFGGB+UBAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL2fJBeZHIWLje6hIAAEAE2DMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAsRTMCAAAs1e+fTcMzaQAA6N/YMwIAACxFMwIAACxFMwIAACxFMwIAACwVUTNSWVmpjIwMJSQkKC8vT/X19Yed/re//a3Gjx+vhIQEnX766XruueciKhYAAAw8YTcja9askcfjUXl5ubZt26bJkyersLBQe/fu7Xb62tpaXXLJJbriiiv02muvac6cOZozZ47++te/RlV4xsL1XEkDAMAAEHYzsmLFCs2fP18lJSXKzMzUqlWrlJiYqNWrV3c7/X333adzzjlHt9xyiyZMmKBly5bpjDPO0IMPPhh18QAAoP8L6z4jHR0damhoUFlZWWCY0+lUQUGB6urqup2nrq5OHo8naFhhYaHWrVvX43K8Xq+8Xm/gfUtLiyTpk08+Ud7ymvAL/4pBfqP2dr8G+Zzq9DuiSIptFnn2yrNzbeTx2ZLXN3l2rq0/5PkPtkuSjDGHn9CEYffu3UaSqa2tDRp+yy23mNzc3G7niYuLM1VVVUHDKisrzYgRI3pcTnl5uZHEixcvXrx48RoAr48//viw/YUt78BaVlYWtDfls88+05gxY9TY2Kjjjjsu6vzW1lalp6fr448/VlJSkm2yyLNXnp1rI4/Plry+ybNzbf0hzxij/fv364QTTjjsdGE1I8OHD5fL5VJzc3PQ8ObmZqWmpnY7T2pqaljTS5Lb7Zbb7e4y/LjjjovJH+eQpKSkmOXFMos8e+XZuTby7JNF3sDOs3Ntds8LZSdCWCewxsfHKzs7WzU1NYFhfr9fNTU1ys/P73ae/Pz8oOklaePGjT1ODwAAji5hH6bxeDyaN2+ecnJylJubq5UrV6qtrU0lJSWSpOLiYqWlpWn58uWSpBtvvFHTp0/Xz3/+c82ePVvPPPOMtm7dql/84hex/U0AAEC/FHYzMnfuXO3bt09LlixRU1OTsrKyVF1drZEjR0qSGhsb5XR+ucNl6tSpqqqq0qJFi/TjH/9Yp5xyitatW6eJEyeGvEy3263y8vJuD91EIpZ5dq6NPPtkkWevPDvXRp698uxcW3/IC5XDmCNdbwMAANB7eDYNAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwlC1vB//Pf/5Tq1evVl1dnZqamiR9cSfXqVOn6gc/+IGOP/54iysErHXoQZKxuvzOznktLS1B/w5E80iIWGbBfuz8PY51Xqy/y1avG7bbM/KXv/xFp556qu6//34dd9xxmjZtmqZNm6bjjjtO999/v8aPH6+tW7eGnXvw4EHt2LFDGzZs0IYNG7Rjxw75fL6IaoxlliQ1NTXp2Wef1cMPP6yHH35Yzz77bOBLYYc86Ysv6jvvvKN33nkn8BTlgZxnRxs3btR5552noUOHKjExUYmJiRo6dKjOO+88vfDCCwMu75e//KUyMzM1bNgwZWZmBv33o48+alnWV331KePRsnOeHddbu3+P7bxe9EZexEJ5Wm9fysvLM1dddZXx+/1dxvn9fnPVVVeZr33tayHndXZ2mttuu80kJycbh8MR9EpOTjaLFi0ynZ2dfZ5ljDGff/65ufTSS43L5TKDBg0yI0aMMCNGjDCDBg0yLpfLXHbZZaatrc2yPGOMeeSRR8yECROM0+kMek2YMMH88pe/DCurP+QZY4zP5zPbt2831dXVprq62mzfvt10dHRElBWrvMcff9wMGjTIXHzxxeaxxx4zzz33nHnuuefMY489Zi655BITFxdnfvWrXw2YvLvvvtskJiaahQsXmpdeesm89dZb5q233jIvvfSSKSsrM8ccc4z52c9+1udZh/zpT38y5557rklOTg5855KTk825555rNm7cGFZWf8iz63pr9++xndeL3siLhu2akYSEBLNz584ex+/cudMkJCSEnHfLLbeY448/3qxatcp89NFHpr293bS3t5uPPvrIPPzww2bEiBFmwYIFfZ5ljDFXXHGFOeWUU0x1dbU5ePBgYPjBgwfNhg0bzKmnnmquvPJKy/Ls/sWPdV6sm81Y5p1yyinmwQcf7HF8ZWWlOfnkk0Ouze55o0ePNmvWrOlx/DPPPGPS09P7PMsY+2+wjqYNoN2/x3ZeL3ojLxq2a0YyMjLME0880eP4J554wowZMybkvJEjR5rq6uoex1dXV5sRI0b0eZYxxiQnJ5s///nPPY5/5ZVXTHJysmV5dv/ixzov1s1mLPPcbrd5++23exz/9ttvh9Wk2z0vISHBvPXWWz2Of/PNN83gwYP7PMsY+2+wjqYNoN2/x3ZeL3ojLxq2a0YefPBB43a7zQ033GCeffZZ8+qrr5pXX33VPPvss+aGG24wgwcPNpWVlSHnJSYmmtdff73H8Tt27DDHHHNMn2cZY0xSUpL5y1/+0uP4+vp6k5SUZFme3b/4sc6LdbMZy7wzzjjD3HLLLT2OX7BggTnjjDNCrs3ued/85jdNcXGx8fl8XcYdPHjQFBcXm2nTpvV5ljH232AdTRtAu3+P7bxe9EZeNGz5bJo1a9bo3nvvVUNDgzo7OyVJLpdL2dnZ8ng8+v73vx9y1uzZs3Xw4EE9/fTTGj58eNC4f/7zn7r88svlcrn03//9332aJUmXXnqpdu7cqUcffVRTpkwJGvfaa69p/vz5Gj9+vJ566ilL8qZNm6axY8fq0Ucf1aBBwRdedXZ26oc//KF27dqlzZs3D4i8Y445Rq+++qpOP/30bse//vrrmjp1qj7//PM+z9u0aZO+/e1va9y4cSooKAg8mLK5uVk1NTX68MMPtX79ek2bNi2k2uye9/rrr6uwsFA+n0/Tpk0Lynv55ZcVHx+vP/3pTyE9cDOWWZKUnZ2tb33rW7r77ru7HX/rrbfqhRdeUENDw4DIs/N6a/fvsZ3Xi97Ii4Ytm5FDfD6f/vnPf0qShg8frri4uLAzPv74Y5133nl6++23dfrppwf9sd944w1lZmbqv//7v5Went6nWZL06aefqqioSBs2bNDQoUM1YsQISdLevXv12WefqbCwUFVVVUpOTrYkz+5f/FjnxbrZjHXerl279NBDD+nVV18NugQvPz9f11xzjTIyMkLK6S95+/fv11NPPdVtXlFRkZKSkizJsvsG62jbANr9e2zn9aI38iJl62YkVvx+vzZs2NDtH/vss8+W0xn6Fc6xzDpk586d3eaNHz8+7KxY59n9ix/LvFg3m7HOg33YfYPFBhD9zVHRjAChinWzGeu8gwcP6s033wxkjRo1ShMmTIhor2F/yGtqatKWLVuC8nJzc5WammppFuzF7t9jO68XvZEXiaOmGamvr+/2jq5nnnmmpVkdHR1at25dt3kXXnih4uPjLc2T7P/Ft8OK1Nv8fr+WLFmiysrKLjeHOu6441RaWqqlS5eG3NzYPa+trU1XX321nnnmGTkcDg0bNkyS9Mknn8gYo0suuUQPP/ywEhMT+zTrX9l9g3U0bADt/j2283rRG3lR6ZPTZC3U3NxsvvGNbxiHw2HGjBljcnNzTW5urhkzZoxxOBzmG9/4hmlubu7zLGOMee+998y4ceNMQkKCmT59uvn+979vvv/975vp06ebhIQEc/LJJ5v33nvPsjy735StN27yZowxW7ZsMStXrjQLFy40CxcuNCtXrjT19fVh58Qyz86XHfdGXizvmRPr++/Y+X40vZFn5/XW7t9jO68XvZEXjQHfjHznO98x+fn53V7q9vbbb5upU6ea7373u32eZYwxBQUF5sILLzQtLS1dxrW0tJgLL7zQnH322Zbl2f2LH+u8WDebscyz82XHvZEXy3vmxPr+O3bfYB1NG0C7f4/tvF70Rl40BnwzMmTIELNt27Yex2/dutUMGTKkz7OMMWbw4MHmjTfe6HH866+/Htb1+7HOs/sXP9Z5sW42Y5kX63vc2D0vlvfMifX9d+y+wTqaNoB2/x7beb3ojbxo2O5BebHmdrvV2tra4/j9+/eH/ATFWGZJUnJysnbt2tXj+F27doV8GW5v5Pn9/sOeYxIfHy+/3z9g8jZs2KDKykqddtppXcaddtppuv/++1VdXW1J3owZM/SjH/0ocKn7v/rnP/+pW2+9VTNmzAi5Nrvnffvb39ZVV12l1157rcu41157Tddee63OP//8Ps+SvljPTzjhhB7Hjxo1Sm1tbQMmz87rrd2/x3ZeL3ojLyp90vJY6LrrrjNjxowxa9euDTp80dLSYtauXWsyMjJMaWlpn2cZY8zixYvN0KFDzYoVK8yOHTtMU1OTaWpqMjt27DArVqwww4YNM+Xl5ZblFRUVmSlTpnS7N2jbtm0mOzvbXHrppQMmLyUlxWzatKnH8S+99JJJSUmxJK+xsdFMnDjRDBo0yEyZMsWcc8455pxzzjFTpkwxgwYNMpMmTTKNjY0h12b3vE8++cScc845xuFwmGHDhpnx48eb8ePHm2HDhhmn02nOPfdc8+mnn/Z5ljHGnHfeeebss882+/bt6zJu37595pxzzjGzZ88eMHl2Xm/t/j2283rRG3nRGPBX03i9Xt10001avXq1Dh48GOjIOzo6NGjQIF1xxRW69957Q9qj0VOW1+tVXFxcWFmH/PSnP9V9992npqYmORwOSZIxRqmpqbrpppu0YMGCsH7fWObZ/aZssc67/vrrtX79et1777361re+FbjXQWtrq2pqauTxePTtb39bDzzwgCV5dr/suDfuwfP22293uTIs0nvmxOr+O3a/H83RdnNGu3+P7b5eSLG/11UkBnwzckhra6saGhqC/tjZ2dkR3VyntbVVW7duVXNzsyRp5MiRysnJiepGPR999FFQbWPHjo04K9Z5dr4pmxS7FTOWjWtv5ME+7L7BYgOI/uaoaUZ6U3x8vHbs2KEJEyZYXQpiIJaNa6zzYnmPG+mLjVZ3Gya/36+///3vGj16dES5h5x11ll67LHHNGbMmLDm83q9cjqdgXtifPDBB1q9erUaGxs1ZswYXXHFFWE12Dt27FBDQ4NmzJihcePG6c0331RlZaX8fr8uuugiFRYWhlUf7IX1IrL1QrLPunFUNCP/9//+XzU0NGjYsGHKzMwMGnfgwAH95je/UXFx8RFzPB5Pt8Pvu+8+XXbZZUpJSZEkrVixIqS6tm3bpqFDhwa+PE8++aRWrVoV+GKVlpbq4osvDinrkAcffFD19fU677zzdPHFF+vJJ5/U8uXL5ff79e///u+6/fbbuzyc6nB64yZqf//735WcnKwhQ4YEDff5fKqrqwv5mRk9GTdunDZs2KBTTjklqhw72bt3r77zne/oz3/+s0aPHh20672xsVFf//rX9fvf/z6wy/tIWltbdeWVV+qPf/yjkpKSdPXVV6u8vFwulyuQe8IJJwQeVHkkf/jDH7od/u///u+67777AocELrjggpDyZsyYodLSUn33u9/Vn//8Z33rW9/SaaedpgkTJujdd9/VO++8oxdeeEH5+flHzFq7dq2+//3vKzk5WV6vV//1X/+l733ve8rJyZHL5dILL7ygX/3qVyoqKgqptkPYAFq/AWS9iHy9kHpv3YhIn5yZYqF33nkncF8Hp9Nppk2bZnbv3h0Y39TUZJxOZ0hZDofDZGVlmRkzZgS9HA6HOfPMM82MGTPMzJkzQ65t0qRJZuPGjcYYYx555BEzePBgc8MNN5iHHnrI3HTTTWbIkCHm0UcfDTlv2bJl5thjjzXf+c53TGpqqrnrrrtMSkqKueOOO8ydd95pjj/+eLNkyZKQ82J9E7U9e/aYM8880zidTuNyuczll19u9u/fHxgfzmdhjDH33Xdfty+Xy2XKysoC70P18ccfB5309/LLL5uioiLzjW98w1x66aWmtrY25KxD/vjHP5rFixebV155xRhjTE1NjTn33HNNYWGhefjhh0POifVlxzfccIM59dRTzW9/+1vzyCOPmDFjxpjZs2cbr9drjPnis3A4HCHnHVq/vnqTrX99hfPZJiUlmXfffdcYY8z06dPNzTffHDR+0aJF5utf/3pIWWeccYa54447jDHG/PrXvzbJycnm9ttvD4y/5557TFZWVsi1xfp+NC0tLeZ73/ueSUhIMCNGjDCLFy8Ouv9GuOvFs88+2+3L5XKZBx98MPA+VNOnTze//e1vjTFfXHbrdrvNpEmTzNy5c82UKVNMYmJiWOvG73//e+NyuUxKSooZMmSI2bhxo0lOTjYFBQWmsLDQuFwu8/TTT4eUxXoR+XphTOzXjWgM+GZkzpw5Zvbs2Wbfvn3mvffeM7NnzzZjx441f/vb34wx4a3oy5cvN2PHjjU1NTVBwwcNGmTefPPNsGsbPHiw2bVrlzHGmClTpphf/OIXQeOffvppk5mZGXLeSSedZH7/+98bY4zZvn27cblc5qmnngqMX7t2rTn55JNDzov1TdSKi4tNXl6e+ctf/mI2btxosrOzTU5Ojvnkk0+MMZGt6CeeeKLJyMgIejkcDpOWlmYyMjLM2LFjQ87Lzc01f/zjH40xxqxbt844nU5zwQUXmFtvvdVcdNFFJi4uLjA+FKtWrTKDBg0y2dnZJikpyTz55JPm2GOPNVdeeaW5+uqrzeDBg83KlStDyor1PW5Gjx5tXnrppcD7ffv2mdzcXHP22WebAwcOhL0BPHSFxlc3wpGuG8ccc4zZuXOnMeaL+2Zs3749aPz7778f8u97zDHHmI8++sgYY4zf7zdxcXFB93744IMPwvrbsQG0zwaQ9SLy9eJQXizXjWgM+GZkxIgRQX9cv99vrrnmGjN69GjzwQcfhP3lqq+vN6eeeqr5P//n/5iOjg5jTORfrJSUFLN169ZAnd19scK96dmhJssYY+Li4sxf//rXwPtdu3aZxMTEsPJieRO1E044wWzZsiXw/sCBA+b88883WVlZ5n//93/D/iyuvvpqk5WVZd56662g4dGs6B9++KExxpi8vDxz1113BY1/4IEHzJQpU0LOy8zMDDSYL774oklISDCVlZWB8Y899piZMGFCSFmxvux48ODBgd/1kNbWVpOfn2/OOuss8+GHH4b1WRhjzIoVK0x6enpQwxbpZ3HWWWeZu+++2xhjzNSpU80TTzwRNP53v/udGT16dEhZqampgfXsk08+MQ6HI2iDU19fb1JTU0OujQ2gfTaArBeRrxfGxH7diMaAb0aOPfbYLhsrY4y5/vrrzYknnmhefvnlsL9c+/fvN8XFxWbSpEnmjTfeMHFxcRF9sS677DJzxRVXGGOM+d73vmcWLVoUNP7OO+80p59+esh5Y8eONc8//7wxxph3333XOJ1O85vf/CYwfv369SYjIyPkvFGjRh12T8Af/vAHM2rUqJDzjjnmmMD/YR3i8/nMnDlzzKRJk8zrr78e9mexdu1ak56ebh544IHAsEhX9OOOO87s2LHDGPNFc3jovw95//33w27mvtoc/mtz99FHH4WcF+t73Jx22mlm/fr1XYbv37/f5Ofnm8mTJ4f9WRhjzGuvvWYyMzPNVVddZdra2iL+LGpra81xxx1nysvLzQMPPGCGDx9uFi1aZJ5++mmzZMkSk5ycbH7605+GlHXZZZeZvLw889RTT5nzzz/fFBYWmq997Wtm586d5u233zbTp08Pa08GG0D7bABZLyJfL4yJ/boRjQHfjJx55pnmV7/6Vbfjrr/+epOcnBzRl8uYL3Yxjhw50jidzoi+WLt37zYZGRlm2rRpxuPxmMGDB5tvfOMbZv78+WbatGkmPj6+2xWjJ4sWLTLHH3+8ufLKK83YsWPNwoULzejRo81DDz1kVq1aZdLT07vsYj2cWN9E7fTTTze/+93vugw/1JCMHj06os/i73//uznrrLPMOeecY/7xj39EvKJfcMEFZuHChcYYYwoLC7ucb/LII4+YU045JeS8Q82uMV981g6HI+jz3LRpkznxxBNDyjpw4IC55pprTHx8vHE6nSYhIcEkJCQYp9Np4uPjzbXXXmsOHDgQcm3/8R//0eM/Mq2trSYvLy/i9aK9vd1cffXV5pRTTjEulyuiz8KYL/7h/drXvtblEENaWlrIh7eM+eIwx6xZs8yQIUNMYWGh+eyzz0xpaWngcMUpp5xi3n///ZDz2ADaZwPY03rhcDhYL0IQ63UjGgO+GbnzzjvNueee2+P4a6+9NqzjsV/18ccfm3Xr1pnPP/88ovk//fRTc+utt5rMzEyTkJBg4uPjzZgxY0xRUdFhnxnQnc7OTvOTn/zEfPvb3zZ33nmn8fv95te//rVJT083KSkp5gc/+EHYdd51111m1KhRgS/noWPRo0aNCusfIGOMWbBgQY/nmPh8PnPBBRdE/Fn4/X5z5513mtTU1IhX9LfeesukpKSY4uJis2zZMjNkyBBz2WWXmZ/85CemuLjYuN1u89hjj4Wcd/3115tTTjnF3HHHHSY3N9fMmzfPjB8/3jz//POmurranH766eaHP/xhWDW2tLSYF1980VRVVZmqqirz4osvdntOz5F88sknQYfwvqq1tfWw//cfimeffdbcdNNNYZ3M2Z29e/eaV1991dTW1gZ278fCBx98YN544w3j8/nCmo/GsPc2gA6HI6INYEtLi6mpqQmsFzU1NTFdL/x+vzEmduvFDTfcENP14qt71qIV6boRjaPi0l5ELxY3UTt48KDa29t7vL/GwYMHtXv37rAvPfxXDQ0NeuWVV1RcXKyhQ4eGPf8HH3ygRYsWaf369fr8888lSYMGDdKZZ56pW265RXPmzAk5q62tTTfffLPq6uo0depUPfDAA7r//vt12223yefzafr06VqzZk3Ilx3CXmJ1/5hPP/1Ue/bs0b/92791O37//v3atm2bpk+fHnGtf/jDH/TSSy+prKwsqu/bvn379OGHH8rv92vUqFHKyMiIOOurPvzwQ7W3t2v8+PFh3X6gO7G+9xN5vY9mBBH7+OOPVV5ertWrVw+4PGOM9u7dK7/fr+HDhwfusRALBw4ckM/n07HHHhvWfLG6X87RmBfr2g7dPfTQHUPffvtt3XffffJ6vbrssst01llnhZzVV3krV65UR0dHVHlTp07VaaedFrP6os2L9b2fyIsuLyp9tg8GA8727dsj3n3cH/MaGxtNSUmJJXmxvF9OT3l79uwZkHmxru3555838fHxZtiwYSYhIcE8//zz5vjjjzcFBQXmrLPOMi6Xq8vl/+T1Tl6s7/1EXnR50aAZQY96unnSode9994bk5sx2SXvSKxslmJ5v5yjLS/WteXn55vbbrvNGPPFSexDhw41P/7xjwPjFy5caGbNmkVeH+TF+t5P5EWXFw2aEfQo1jdPsnuenZulWN8v52jKi3VtSUlJgTsPd3Z2mkGDBgXdd+SNN94wI0eOJK+P8mJ57yfyos+LFM0IenTCCSeYdevW9Tj+tddeC+sfcbvn2blZivX9co6mvFjXlpSUFHS1x5AhQ8wHH3wQeL9r1y6TkJBAXh/lGRO7ez+RF5u8SIT/HGkcNbKzs9XQ0NDjeIfDIRPG+c92zxs1apTWrl0rv9/f7Wvbtm0hZ8U6b/z48dq6dWuX4Q8++KAuvPDCkB+0dTTmxbq2jIwMvffee4H3dXV1QQ+xa2xs1KhRo8jrozxJGjJkiJ544gmVlZWpoKAg5AfZkdc7eZGgGUGPbrnlFk2dOrXH8SeffLJeeumlAZNn52bpoosu0q9//etuxz344IO65JJLwqrtaMqLdW3XXntt0D/WEydODLoU9fnnnw/r6hLyosv7VxdffLG2bt2qtWvXRnWLAPJikxcOLu0F/r//+Z//UVtbm84555xux7e1tWnr1q0h3+8h1nkAMFDRjAAAAEtxmAYAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFiKZgQAAFjq/wGd3loxRnRapAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "ru\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABB2UlEQVR4nO3de1xUdeL/8ffMAINohGKKEop2U7cSgyCs9dJqlHaxbcuiwmXLrnzL5ruZbCqarbbVGpZstpXd2dzK1XYtjChrW0nWa21pZWmUBtpWYPB1GJnP749+ThFgMzB6Dvh6Ph48as7lfT4wczrvzpw54zDGGAEAAFjEafUAAADA4Y0yAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgqQirBxAMv9+vnTt36ogjjpDD4bB6OAAAIAjGGO3Zs0d9+/aV09n6+Y8OUUZ27typpKQkq4cBAADa4LPPPtPRRx/d6vwOUUaOOOIISdK2bdvUo0ePduf5fD698sorOuussxQZGWmbLPLslWfnsZHHc0veocmz89g6Ql5tba2SkpICx/HWdIgysv+tmSOOOEKxsbHtzvP5fIqJiVFsbGxYXqjhyiLPXnl2Hht5PLfkHZo8O4+tI+Tt91OXWHABKwAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsFXIZefPNN3Xeeeepb9++cjgcWrZs2U+us2rVKp1yyilyu9069thj9fjjj7dhqAAAoDMKuYzU1dVp6NChKioqCmr5bdu2afz48Ro9erQ2btyoKVOm6Oqrr9bKlStDHiwAAOh8Qv7W3nPOOUfnnHNO0MsvWrRIAwYM0B//+EdJ0uDBg/XWW2/pvvvuU1ZWVqibBwAAnUzIZSRU5eXlGjNmTJNpWVlZmjJlSqvreL1eeb3ewOPa2lpJ3321sc/na/eY9mfYLYs8e+XZeWzk2SeLvM6dZ+exdaS8n+Iwxpi2bsThcOhvf/ubJkyY0Ooyxx9/vHJzc5Wfnx+Y9tJLL2n8+PGqr69Xly5dmq0za9YszZ49u9n04uJixcTEtHW4AADgEKqvr1d2drZqamoUGxvb6nIH/cxIW+Tn58vj8QQe19bWKikpSaNHj1Z8fHy7830+n0pLSzV27FhFRkbaJos8e+XZeWzk8dzud+Ksn77+zu00mpPm14y1Tnn9jnaP73DKs/PYOkJeo9cV1HIHvYwkJCSourq6ybTq6mrFxsa2eFZEktxut9xud7PpkZGRYdnRD0aencdGnn2yyDt0ecnTVrQ6z+0yujtdGvb71+RtDMN/wC3NC357Xr8jLOM7HPPsPDY75/mDzDjoZSQzM1MvvfRSk2mlpaXKzMw82JsGcAi0dtDff0A9cdbKsB6gw5UHwD5CLiPffvuttm7dGni8bds2bdy4UT169FC/fv2Un5+vHTt26Mknn5QkXXfddVq4cKGmTp2q3/zmN3rttdf017/+VStWtP5/LQAOrpYKBAd7AFYJuYysXbtWo0ePDjzef23HpEmT9Pjjj+uLL75QZWVlYP6AAQO0YsUK3XLLLVqwYIGOPvpoPfLII3ysFwjRjwsE5QFAZxFyGRk1apQO9AGclu6uOmrUKG3YsCHUTQEdGuUBAILDd9MAAABL2fKjvYBVfng2gzMZAHBocGYEAABYijMj6PA4mwEAHRtnRgAAgKUoIwAAwFK8TQNL7H9rhbdVAACcGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCk+TYOg8OkXAMDBwpkRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBS3PSsE0uetoKblAEAbI8zIwAAwFKUEQAAYKk2lZGioiIlJycrOjpaGRkZqqioaHVZn8+nO+64Q8ccc4yio6M1dOhQlZSUtHnAAACgcwm5jCxZskQej0cFBQVav369hg4dqqysLO3atavF5adPn66HHnpIDzzwgN5//31dd911uvDCC7Vhw4Z2Dx4AAHR8IZeR+fPna/LkycrNzdWQIUO0aNEixcTEaPHixS0u/9RTT+l3v/udxo0bp4EDB+r666/XuHHj9Mc//rHdgwcAAB1fSJ+maWho0Lp165Sfnx+Y5nQ6NWbMGJWXl7e4jtfrVXR0dJNpXbp00VtvvdXqdrxer7xeb+BxbW2tpO/e8vH5fKEMuUX7M+yWFe48t8vI7TTf/fv//2e7Mw+jPDuPjTz7ZJHXufPsPLaOkNfoCi7HYYwJeos7d+5UYmKiVq9erczMzMD0qVOn6o033tCaNWuarZOdna1NmzZp2bJlOuaYY1RWVqYLLrhAjY2NTQrHD82aNUuzZ89uNr24uFgxMTHBDhcAAFiovr5e2dnZqqmpUWxsbKvLHfT7jCxYsECTJ0/WoEGD5HA4dMwxxyg3N7fVt3UkKT8/Xx6PJ/C4trZWSUlJGj16tOLj49s9Jp/Pp9LSUo0dO1aRkZG2yQp33omzVsrtNJqT5teMtU55/e2/z8jhlGfnsZHHc0veocmz89g6Ql6j1xXUciGVkZ49e8rlcqm6urrJ9OrqaiUkJLS4zlFHHaVly5Zp7969+u9//6u+fftq2rRpGjhwYKvbcbvdcrvdzaZHRkaG5YB/MPLsOLYf3uTM63eE9aZnh1OencdGnn2yyOvceXYem53z/EFmhHQBa1RUlFJTU1VWVvb9hvx+lZWVNXnbpiXR0dFKTEzUvn379MILL+iCCy4IZdMAAKCTCvltGo/Ho0mTJiktLU3p6ekqLCxUXV2dcnNzJUk5OTlKTEzUvHnzJElr1qzRjh07lJKSoh07dmjWrFny+/2aOnVqeH8TAADQIYVcRiZOnKjdu3dr5syZqqqqUkpKikpKStS7d29JUmVlpZzO70+47N27V9OnT9cnn3yibt26ady4cXrqqacUFxcXtl8CAAB0XG26gDUvL095eXktzlu1alWTxyNHjtT777/fls0AAIDDAN9NAwAALHXQP9qL0Jw4a6XuTv/un+G8MhoAALvizAgAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSbSojRUVFSk5OVnR0tDIyMlRRUXHA5QsLC3XCCSeoS5cuSkpK0i233KK9e/e2acAAAKBzCbmMLFmyRB6PRwUFBVq/fr2GDh2qrKws7dq1q8Xli4uLNW3aNBUUFGjz5s169NFHtWTJEv3ud79r9+ABAEDHFxHqCvPnz9fkyZOVm5srSVq0aJFWrFihxYsXa9q0ac2WX716tU4//XRlZ2dLkpKTk3XZZZdpzZo1rW7D6/XK6/UGHtfW1kqSfD6ffD5fqENuZn+G3bIkye00Tf5JnnV5dh4befbJIq9z59l5bB0hr9EVXI7DGBP0FhsaGhQTE6Pnn39eEyZMCEyfNGmSvvnmGy1fvrzZOsXFxbrhhhv0yiuvKD09XZ988onGjx+vK6+8stWzI7NmzdLs2bNbzIqJiQl2uAAAwEL19fXKzs5WTU2NYmNjW10upDMjX375pRobG9W7d+8m03v37q0tW7a0uE52dra+/PJLnXHGGTLGaN++fbruuusO+DZNfn6+PB5P4HFtba2SkpI0evRoxcfHhzLkFvl8PpWWlmrs2LGKjIy0TZYkpd5Rojlpfs1Y65TX72h3nttpyLNBFnn2yrPz2MizV56dx9YR8hq9rqCWC/ltmlCtWrVKc+fO1Z/+9CdlZGRo69atuvnmmzVnzhzNmDGjxXXcbrfcbnez6ZGRkWE54B+MvHBl7X/yvX6HvI3tfyH8MJc867PIs1eencdGnr3y7Dw2O+f5g8wIqYz07NlTLpdL1dXVTaZXV1crISGhxXVmzJihK6+8UldffbUk6aSTTlJdXZ2uueYa3X777XI6+XQxAACHs5DKSFRUlFJTU1VWVha4ZsTv96usrEx5eXktrlNfX9+scLhc3522CeFyFds7cdbKsLRId3BntAAA6DRCfpvG4/Fo0qRJSktLU3p6ugoLC1VXVxf4dE1OTo4SExM1b948SdJ5552n+fPna9iwYYG3aWbMmKHzzjsvUEoAAMDhK+QyMnHiRO3evVszZ85UVVWVUlJSVFJSEriotbKyssmZkOnTp8vhcGj69OnasWOHjjrqKJ133nn6/e9/H77fAgAAdFhtuoA1Ly+v1bdlVq1a1XQDEREqKChQQUFBWzYFAAA6Oa4eBQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALNWmMlJUVKTk5GRFR0crIyNDFRUVrS47atQoORyOZj/jx49v86ABAEDnEXIZWbJkiTwejwoKCrR+/XoNHTpUWVlZ2rVrV4vLL126VF988UXg5z//+Y9cLpcuvvjidg8eAAB0fCGXkfnz52vy5MnKzc3VkCFDtGjRIsXExGjx4sUtLt+jRw8lJCQEfkpLSxUTE0MZAQAAkqSIUBZuaGjQunXrlJ+fH5jmdDo1ZswYlZeXB5Xx6KOP6tJLL1XXrl1bXcbr9crr9QYe19bWSpJ8Pp98Pl8oQ27R/oxwZrmdpt1ZP8whz/o8O4+NPPtkkde58+w8to6Q1+gKLsdhjAl6izt37lRiYqJWr16tzMzMwPSpU6fqjTfe0Jo1aw64fkVFhTIyMrRmzRqlp6e3utysWbM0e/bsZtOLi4sVExMT7HABAICF6uvrlZ2drZqaGsXGxra6XEhnRtrr0Ucf1UknnXTAIiJJ+fn58ng8gce1tbVKSkrS6NGjFR8f3+5x+Hw+lZaWauzYsYqMjAxL1oy1Tnn9jnaPze00mpPmJ88GeXYeG3k8t+Qdmjw7j60j5DV6XUEtF1IZ6dmzp1wul6qrq5tMr66uVkJCwgHXraur07PPPqs77rjjJ7fjdrvldrubTY+MjGx3eThYeV6/Q97G9j9x5Nkvz85jI88+WeR17jw7j83Oef4gM0K6gDUqKkqpqakqKyv7fkN+v8rKypq8bdOS5557Tl6vV1dccUUomwQAAJ1cyG/TeDweTZo0SWlpaUpPT1dhYaHq6uqUm5srScrJyVFiYqLmzZvXZL1HH31UEyZMCMvbLAAAoPMIuYxMnDhRu3fv1syZM1VVVaWUlBSVlJSod+/ekqTKyko5nU1PuHzwwQd666239Morr4Rn1AAAoNNo0wWseXl5ysvLa3HeqlWrmk074YQTFMKHdgAAwGGE76YBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAs1aYyUlRUpOTkZEVHRysjI0MVFRUHXP6bb77RjTfeqD59+sjtduv444/XSy+91KYBAwCAziUi1BWWLFkij8ejRYsWKSMjQ4WFhcrKytIHH3ygXr16NVu+oaFBY8eOVa9evfT8888rMTFRn376qeLi4sIxfgAA0MGFXEbmz5+vyZMnKzc3V5K0aNEirVixQosXL9a0adOaLb948WJ99dVXWr16tSIjIyVJycnJ7Rs1AADoNEIqIw0NDVq3bp3y8/MD05xOp8aMGaPy8vIW13nxxReVmZmpG2+8UcuXL9dRRx2l7Oxs3XbbbXK5XC2u4/V65fV6A49ra2slST6fTz6fL5Qht2h/Rjiz3E7T7qwf5pBnfZ6dx0aefbLI69x5dh5bR8hrdAWX4zDGBL3FnTt3KjExUatXr1ZmZmZg+tSpU/XGG29ozZo1zdYZNGiQtm/frssvv1w33HCDtm7dqhtuuEE33XSTCgoKWtzOrFmzNHv27GbTi4uLFRMTE+xwAQCAherr65Wdna2amhrFxsa2ulzIb9OEyu/3q1evXvrzn/8sl8ul1NRU7dixQ/fcc0+rZSQ/P18ejyfwuLa2VklJSRo9erTi4+PbPSafz6fS0lLNWOuU1+9oV5bbaTQnzR+WLPLslWfnsZHHc0veocmz89g6Ql6jt+V3QH4spDLSs2dPuVwuVVdXN5leXV2thISEFtfp06ePIiMjm7wlM3jwYFVVVamhoUFRUVHN1nG73XK73c2mR0ZGBq47CQev3yFvY/v/2OHOIs9eeXYeG3n2ySKvc+fZeWx2zvMHmRHSR3ujoqKUmpqqsrKy7zfk96usrKzJ2zY/dPrpp2vr1q3y+/2BaR9++KH69OnTYhEBAACHl5DvM+LxePTwww/riSee0ObNm3X99derrq4u8OmanJycJhe4Xn/99frqq690880368MPP9SKFSs0d+5c3XjjjeH7LQAAQIcV8jUjEydO1O7duzVz5kxVVVUpJSVFJSUl6t27tySpsrJSTuf3HScpKUkrV67ULbfcopNPPlmJiYm6+eabddttt4XvtwAAAB1Wmy5gzcvLU15eXovzVq1a1WxaZmam3n777bZsCgAAdHJ8Nw0AALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGCpNpWRoqIiJScnKzo6WhkZGaqoqGh12ccff1wOh6PJT3R0dJsHDAAAOpeQy8iSJUvk8XhUUFCg9evXa+jQocrKytKuXbtaXSc2NlZffPFF4OfTTz9t16ABAEDnEXIZmT9/viZPnqzc3FwNGTJEixYtUkxMjBYvXtzqOg6HQwkJCYGf3r17t2vQAACg84gIZeGGhgatW7dO+fn5gWlOp1NjxoxReXl5q+t9++236t+/v/x+v0455RTNnTtXP/vZz1pd3uv1yuv1Bh7X1tZKknw+n3w+XyhDbtH+DLfTtDtrf0Y4ssizV56dx0aefbLI69x5dh5bR8hrdAWX4zDGBL3FnTt3KjExUatXr1ZmZmZg+tSpU/XGG29ozZo1zdYpLy/XRx99pJNPPlk1NTW699579eabb+q9997T0Ucf3eJ2Zs2apdmzZzebXlxcrJiYmGCHCwAALFRfX6/s7GzV1NQoNja21eVCOjPSFpmZmU2Ky/DhwzV48GA99NBDmjNnTovr5Ofny+PxBB7X1tYqKSlJo0ePVnx8fLvH5PP5VFpaqhlrnfL6He3KcjuN5qT5w5JFnr3y7Dw28nhuyTs0eXYeW0fIa/S6gloupDLSs2dPuVwuVVdXN5leXV2thISEoDIiIyM1bNgwbd26tdVl3G633G53i+tGRkaGMuQD8vod8ja2/48d7izy7JVn57GRZ58s8jp3np3HZuc8f5AZIV3AGhUVpdTUVJWVlX2/Ib9fZWVlTc5+HEhjY6Peffdd9enTJ5RNAwCATirkt2k8Ho8mTZqktLQ0paenq7CwUHV1dcrNzZUk5eTkKDExUfPmzZMk3XHHHTrttNN07LHH6ptvvtE999yjTz/9VFdffXV4fxMAANAhhVxGJk6cqN27d2vmzJmqqqpSSkqKSkpKAh/XrayslNP5/QmXr7/+WpMnT1ZVVZW6d++u1NRUrV69WkOGDAnfbwEAADqsNl3AmpeXp7y8vBbnrVq1qsnj++67T/fdd19bNgMAAA4DfDcNAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgqTaVkaKiIiUnJys6OloZGRmqqKgIar1nn31WDodDEyZMaMtmAQBAJxRyGVmyZIk8Ho8KCgq0fv16DR06VFlZWdq1a9cB19u+fbt++9vf6uc//3mbBwsAADqfkMvI/PnzNXnyZOXm5mrIkCFatGiRYmJitHjx4lbXaWxs1OWXX67Zs2dr4MCB7RowAADoXCJCWbihoUHr1q1Tfn5+YJrT6dSYMWNUXl7e6np33HGHevXqpauuukr//Oc/f3I7Xq9XXq838Li2tlaS5PP55PP5Qhlyi/ZnuJ2m3Vn7M8KRRZ698uw8NvLsk0Ve586z89g6Ql6jK7gchzEm6C3u3LlTiYmJWr16tTIzMwPTp06dqjfeeENr1qxpts5bb72lSy+9VBs3blTPnj3161//Wt98842WLVvW6nZmzZql2bNnN5teXFysmJiYYIcLAAAsVF9fr+zsbNXU1Cg2NrbV5UI6MxKqPXv26Morr9TDDz+snj17Br1efn6+PB5P4HFtba2SkpI0evRoxcfHt3tcPp9PpaWlmrHWKa/f0a4st9NoTpo/LFnk2SvPzmMjj+eWvEOTZ+exdYS8Rq8rqOVCKiM9e/aUy+VSdXV1k+nV1dVKSEhotvzHH3+s7du367zzzgtM8/v93204IkIffPCBjjnmmGbrud1uud3uZtMjIyMVGRkZypAPyOt3yNvY/j92uLPIs1eencdGnn2yyOvceXYem53z/EFmhHQBa1RUlFJTU1VWVvb9hvx+lZWVNXnbZr9Bgwbp3Xff1caNGwM/559/vkaPHq2NGzcqKSkplM0DAIBOKOS3aTwejyZNmqS0tDSlp6ersLBQdXV1ys3NlSTl5OQoMTFR8+bNU3R0tE488cQm68fFxUlSs+kAAODwFHIZmThxonbv3q2ZM2eqqqpKKSkpKikpUe/evSVJlZWVcjq5sSsAAAhOmy5gzcvLU15eXovzVq1adcB1H3/88bZsEgAAdFKcwgAAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJZqUxkpKipScnKyoqOjlZGRoYqKilaXXbp0qdLS0hQXF6euXbsqJSVFTz31VJsHDAAAOpeQy8iSJUvk8XhUUFCg9evXa+jQocrKytKuXbtaXL5Hjx66/fbbVV5ernfeeUe5ubnKzc3VypUr2z14AADQ8UWEusL8+fM1efJk5ebmSpIWLVqkFStWaPHixZo2bVqz5UeNGtXk8c0336wnnnhCb731lrKyslrchtfrldfrDTyura2VJPl8Pvl8vlCH3Mz+DLfTtDtrf0Y4ssizV56dx0aefbLI69x5dh5bR8hrdAWX4zDGBL3FhoYGxcTE6Pnnn9eECRMC0ydNmqRvvvlGy5cvP+D6xhi99tprOv/887Vs2TKNHTu2xeVmzZql2bNnN5teXFysmJiYYIcLAAAsVF9fr+zsbNXU1Cg2NrbV5UI6M/Lll1+qsbFRvXv3bjK9d+/e2rJlS6vr1dTUKDExUV6vVy6XS3/6059aLSKSlJ+fL4/HE3hcW1urpKQkjR49WvHx8aEMuUU+n0+lpaWasdYpr9/Rriy302hOmj8sWeTZK8/OYyOP55a8Q5Nn57F1hLxGryuo5UJ+m6YtjjjiCG3cuFHffvutysrK5PF4NHDgwGZv4ezndrvldrubTY+MjFRkZGTYxuX1O+RtbP8fO9xZ5Nkrz85jI88+WeR17jw7j83Oef4gM0IqIz179pTL5VJ1dXWT6dXV1UpISGh1PafTqWOPPVaSlJKSos2bN2vevHmtlhEAAHD4COnTNFFRUUpNTVVZWVlgmt/vV1lZmTIzM4PO8fv9TS5QBQAAh6+Q36bxeDyaNGmS0tLSlJ6ersLCQtXV1QU+XZOTk6PExETNmzdPkjRv3jylpaXpmGOOkdfr1UsvvaSnnnpKDz74YHh/EwAA0CGFXEYmTpyo3bt3a+bMmaqqqlJKSopKSkoCF7VWVlbK6fz+hEtdXZ1uuOEGff755+rSpYsGDRqkp59+WhMnTgzfbwEAADqsNl3AmpeXp7y8vBbnrVq1qsnjO++8U3feeWdbNgMAAA4DfDcNAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgqTaVkaKiIiUnJys6OloZGRmqqKhoddmHH35YP//5z9W9e3d1795dY8aMOeDyAADg8BJyGVmyZIk8Ho8KCgq0fv16DR06VFlZWdq1a1eLy69atUqXXXaZXn/9dZWXlyspKUlnnXWWduzY0e7BAwCAji/kMjJ//nxNnjxZubm5GjJkiBYtWqSYmBgtXry4xeWfeeYZ3XDDDUpJSdGgQYP0yCOPyO/3q6ysrN2DBwAAHV9EKAs3NDRo3bp1ys/PD0xzOp0aM2aMysvLg8qor6+Xz+dTjx49Wl3G6/XK6/UGHtfW1kqSfD6ffD5fKENu0f4Mt9O0O2t/RjiyyLNXnp3HRp59ssjr3Hl2HltHyGt0BZfjMMYEvcWdO3cqMTFRq1evVmZmZmD61KlT9cYbb2jNmjU/mXHDDTdo5cqVeu+99xQdHd3iMrNmzdLs2bObTS8uLlZMTEywwwUAABaqr69Xdna2ampqFBsb2+pyIZ0Zaa+77rpLzz77rFatWtVqEZGk/Px8eTyewOPa2lolJSVp9OjRio+Pb/c4fD6fSktLNWOtU16/o11ZbqfRnDR/WLLIs1eencdGHs8teYcmz85j6wh5jV5XUMuFVEZ69uwpl8ul6urqJtOrq6uVkJBwwHXvvfde3XXXXXr11Vd18sknH3BZt9stt9vdbHpkZKQiIyNDGfIBef0OeRvb/8cOdxZ59sqz89jIs08WeZ07z85js3OeP8iMkC5gjYqKUmpqapOLT/dfjPrDt21+7O6779acOXNUUlKitLS0UDYJAAA6uZDfpvF4PJo0aZLS0tKUnp6uwsJC1dXVKTc3V5KUk5OjxMREzZs3T5L0hz/8QTNnzlRxcbGSk5NVVVUlSerWrZu6desWxl8FAAB0RCGXkYkTJ2r37t2aOXOmqqqqlJKSopKSEvXu3VuSVFlZKafz+xMuDz74oBoaGvSrX/2qSU5BQYFmzZrVvtEDAIAOr00XsObl5SkvL6/FeatWrWryePv27W3ZBAAAOEzw3TQAAMBSh/Sjve2VMa9M+yK6tjvH7TK6Oz0MAwIAAO3GmREAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLtamMFBUVKTk5WdHR0crIyFBFRUWry7733nu66KKLlJycLIfDocLCwraOFQAAdEIhl5ElS5bI4/GooKBA69ev19ChQ5WVlaVdu3a1uHx9fb0GDhyou+66SwkJCe0eMAAA6FxCLiPz58/X5MmTlZubqyFDhmjRokWKiYnR4sWLW1z+1FNP1T333KNLL71Ubre73QMGAACdS0QoCzc0NGjdunXKz88PTHM6nRozZozKy8vDNiiv1yuv1xt4XFtbK0lyO41cLtPufLfTNPmnXbLIs1eencdGnn2yyOvceXYeW0fIawzymO0wxgS9xZ07dyoxMVGrV69WZmZmYPrUqVP1xhtvaM2aNQdcPzk5WVOmTNGUKVMOuNysWbM0e/bsZtOLi4sVExMT7HABAICF6uvrlZ2drZqaGsXGxra6XEhnRg6V/Px8eTyewOPa2lolJSXpzg1O7Yt0tTvf7TSak+bXjLVOef0O22SRZ688O4+NPJ5b8g5Nnp3H1hHyGr3BHbNDKiM9e/aUy+VSdXV1k+nV1dVhvTjV7Xa3eH2J1+/Qvsb2/3F+mOcNU144s8izV56dx0aefbLI69x5dh6bnfP8QWaEdAFrVFSUUlNTVVZW9v2G/H6VlZU1edsGAAAgWCG/TePxeDRp0iSlpaUpPT1dhYWFqqurU25uriQpJydHiYmJmjdvnqTvLnp9//33A/++Y8cObdy4Ud26ddOxxx4bxl8FAAB0RCGXkYkTJ2r37t2aOXOmqqqqlJKSopKSEvXu3VuSVFlZKafz+xMuO3fu1LBhwwKP7733Xt17770aOXKkVq1a1f7fAAAAdGhtuoA1Ly9PeXl5Lc77ccFITk5WCB/YAQAAhxm+mwYAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALBUm8pIUVGRkpOTFR0drYyMDFVUVBxw+eeee06DBg1SdHS0TjrpJL300kttGiwAAOh8Qi4jS5YskcfjUUFBgdavX6+hQ4cqKytLu3btanH51atX67LLLtNVV12lDRs2aMKECZowYYL+85//tHvwAACg4wu5jMyfP1+TJ09Wbm6uhgwZokWLFikmJkaLFy9ucfkFCxbo7LPP1q233qrBgwdrzpw5OuWUU7Rw4cJ2Dx4AAHR8EaEs3NDQoHXr1ik/Pz8wzel0asyYMSovL29xnfLycnk8nibTsrKytGzZsla34/V65fV6A49ramq+G6yvLpThtirCb1Rf71eEz6lGv8M2WeTZK8/OYyOP55a8Q5Nn57F1hDz/vnpJkjHmwAuaEOzYscNIMqtXr24y/dZbbzXp6ektrhMZGWmKi4ubTCsqKjK9evVqdTsFBQVGEj/88MMPP/zw0wl+PvvsswP2i5DOjBwq+fn5Tc6mfPPNN+rfv78qKyt15JFHtju/trZWSUlJ+uyzzxQbG2ubLPLslWfnsZHHc0veocmz89g6Qp4xRnv27FHfvn0PuFxIZaRnz55yuVyqrq5uMr26uloJCQktrpOQkBDS8pLkdrvldrubTT/yyCPD8sfZLzY2Nmx54cwiz155dh4befbJIq9z59l5bHbPC+YkQkgXsEZFRSk1NVVlZWWBaX6/X2VlZcrMzGxxnczMzCbLS1JpaWmrywMAgMNLyG/TeDweTZo0SWlpaUpPT1dhYaHq6uqUm5srScrJyVFiYqLmzZsnSbr55ps1cuRI/fGPf9T48eP17LPPau3atfrzn/8c3t8EAAB0SCGXkYkTJ2r37t2aOXOmqqqqlJKSopKSEvXu3VuSVFlZKafz+xMuw4cPV3FxsaZPn67f/e53Ou6447Rs2TKdeOKJQW/T7XaroKCgxbdu2iKceXYeG3n2ySLPXnl2Hht59sqz89g6Ql6wHMb81OdtAAAADh6+mwYAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKVseTv4L7/8UosXL1Z5ebmqqqokfXcn1+HDh+vXv/61jjrqKItHCFhr/xdJhuvjd3bOq6mpafLfgfZ8JUQ4s2A/dn4dhzsv3K9lq/cN250Z+fe//63jjz9e999/v4488kiNGDFCI0aM0JFHHqn7779fgwYN0tq1a0PO3bdvnzZt2qSVK1dq5cqV2rRpk3w+X5vGGM4sSaqqqtLy5cv10EMP6aGHHtLy5csDLwo75EnfvVA/+OADffDBB4FvUe7MeXZUWlqqcePGqXv37oqJiVFMTIy6d++ucePG6dVXX+10eY888oiGDBmiHj16aMiQIU3+/dFHH7Us68d+/C3j7WXnPDvut3Z/Hdt5vzgYeW0WzLf1HkoZGRnmmmuuMX6/v9k8v99vrrnmGnPaaacFndfY2Ghuv/12ExcXZxwOR5OfuLg4M336dNPY2HjIs4wx5ttvvzWXX365cblcJiIiwvTq1cv06tXLREREGJfLZa644gpTV1dnWZ4xxjz88MNm8ODBxul0NvkZPHiweeSRR0LK6gh5xhjj8/nMxo0bTUlJiSkpKTEbN240DQ0NbcoKV97jjz9uIiIizKWXXmoee+wx89JLL5mXXnrJPPbYY+ayyy4zkZGR5sknn+w0eXfffbeJiYkx06ZNM6+//rp5//33zfvvv29ef/11k5+fb7p27WruueeeQ5613yuvvGLOOeccExcXF3jNxcXFmXPOOceUlpaGlNUR8uy639r9dWzn/eJg5LWH7cpIdHS02bx5c6vzN2/ebKKjo4POu/XWW81RRx1lFi1aZLZt22bq6+tNfX292bZtm3nooYdMr169zNSpUw95ljHGXHXVVea4444zJSUlZt++fYHp+/btMytXrjTHH3+8ufrqqy3Ls/sLP9x54S6b4cw77rjjzMKFC1udX1RUZI499tigx2b3vH79+pklS5a0Ov/ZZ581SUlJhzzLGPsfsA6nA6DdX8d23i8ORl572K6MJCcnmyeeeKLV+U888YTp379/0Hm9e/c2JSUlrc4vKSkxvXr1OuRZxhgTFxdn/vWvf7U6/6233jJxcXGW5dn9hR/uvHCXzXDmud1us2XLllbnb9myJaSSbve86Oho8/7777c6/7333jNdunQ55FnG2P+AdTgdAO3+OrbzfnEw8trDdmVk4cKFxu12m5tuusksX77cvP322+btt982y5cvNzfddJPp0qWLKSoqCjovJibGvPPOO63O37Rpk+nateshzzLGmNjYWPPvf/+71fkVFRUmNjbWsjy7v/DDnRfushnOvFNOOcXceuutrc6fOnWqOeWUU4Iem93zfv7zn5ucnBzj8/mazdu3b5/JyckxI0aMOORZxtj/gHU4HQDt/jq2835xMPLaw5bfTbNkyRLdd999WrdunRobGyVJLpdLqamp8ng8uuSSS4LOGj9+vPbt26dnnnlGPXv2bDLvyy+/1JVXXimXy6V//OMfhzRLki6//HJt3rxZjz76qIYNG9Zk3oYNGzR58mQNGjRITz/9tCV5I0aM0IABA/Too48qIqLpB68aGxv1m9/8Rtu3b9cbb7zRKfK6du2qt99+WyeddFKL89955x0NHz5c33777SHPW7Vqlc4991wNHDhQY8aMCXwxZXV1tcrKyvTJJ59oxYoVGjFiRFBjs3veO++8o6ysLPl8Po0YMaJJ3ptvvqmoqCi98sorQX3hZjizJCk1NVW/+MUvdPfdd7c4/7bbbtOrr76qdevWdYo8O++3dn8d23m/OBh57WHLMrKfz+fTl19+KUnq2bOnIiMjQ8747LPPNG7cOG3ZskUnnXRSkz/2u+++qyFDhugf//iHkpKSDmmWJH399dfKzs7WypUr1b17d/Xq1UuStGvXLn3zzTfKyspScXGx4uLiLMmz+ws/3HnhLpvhztu+fbsefPBBvf32200+gpeZmanrrrtOycnJQeV0lLw9e/bo6aefbjEvOztbsbGxlmTZ/YB1uB0A7f46tvN+cTDy2srWZSRc/H6/Vq5c2eIf+6yzzpLTGfwnnMOZtd/mzZtbzBs0aFDIWeHOs/sLP5x54S6b4c6Dfdj9gMUBEB3NYVFGgGCFu2yGO2/fvn167733All9+vTR4MGD23TWsCPkVVVVac2aNU3y0tPTlZCQYGkW7MXur2M77xcHI68tDpsyUlFR0eIdXU899VRLsxoaGrRs2bIW8y644AJFRUVZmifZ/4Vvhx3pYPP7/Zo5c6aKioqa3RzqyCOPVF5enmbPnh10ubF7Xl1dna699lo9++yzcjgc6tGjhyTpq6++kjFGl112mR566CHFxMQc0qwfsvsB63A4ANr9dWzn/eJg5LXLIblM1kLV1dXmjDPOMA6Hw/Tv39+kp6eb9PR0079/f+NwOMwZZ5xhqqurD3mWMcZ89NFHZuDAgSY6OtqMHDnSXHLJJeaSSy4xI0eONNHR0ebYY481H330kWV5dr8p28G4yZsxxqxZs8YUFhaaadOmmWnTppnCwkJTUVERck448+z8seODkRfOe+aE+/47dr4fzcHIs/N+a/fXsZ33i4OR1x6dvoxcdNFFJjMzs8WPum3ZssUMHz7c/OpXvzrkWcYYM2bMGHPBBReYmpqaZvNqamrMBRdcYM466yzL8uz+wg93XrjLZjjz7Pyx44ORF8575oT7/jt2P2AdTgdAu7+O7bxfHIy89uj0ZaRbt25m/fr1rc5fu3at6dat2yHPMsaYLl26mHfffbfV+e+8805In98Pd57dX/jhzgt32QxnXrjvcWP3vHDeMyfc99+x+wHrcDoA2v11bOf94mDktYftvigv3Nxut2pra1udv2fPnqC/QTGcWZIUFxen7du3tzp/+/btQX8M92Dk+f3+A15jEhUVJb/f32nyVq5cqaKiIp1wwgnN5p1wwgm6//77VVJSYkneqFGj9Nvf/jbwUfcf+vLLL3Xbbbdp1KhRQY/N7nnnnnuurrnmGm3YsKHZvA0bNuj666/Xeeedd8izpO/28759+7Y6v0+fPqqrq+s0eXbeb+3+OrbzfnEw8trlkFQeC91www2mf//+ZunSpU3evqipqTFLly41ycnJJi8v75BnGWPMjBkzTPfu3c38+fPNpk2bTFVVlamqqjKbNm0y8+fPNz169DAFBQWW5WVnZ5thw4a1eDZo/fr1JjU11Vx++eWdJi8+Pt6sWrWq1fmvv/66iY+PtySvsrLSnHjiiSYiIsIMGzbMnH322ebss882w4YNMxEREebkk082lZWVQY/N7nlfffWVOfvss43D4TA9evQwgwYNMoMGDTI9evQwTqfTnHPOOebrr78+5FnGGDNu3Dhz1llnmd27dzebt3v3bnP22Web8ePHd5o8O++3dn8d23m/OBh57dHpP03j9Xo1ZcoULV68WPv27Qs08oaGBkVEROiqq67SfffdF9QZjdayvF6vIiMjQ8ra7w9/+IMWLFigqqoqORwOSZIxRgkJCZoyZYqmTp0a0u8bzjy735Qt3Hk33nijVqxYofvuu0+/+MUvAvc6qK2tVVlZmTwej84991w98MADluTZ/WPHB+MePFu2bGn2ybC23jMnXPffsfv9aA63mzPa/XVs9/1CCv+9rtqi05eR/Wpra7Vu3bomf+zU1NQ23VyntrZWa9euVXV1tSSpd+/eSktLa9eNerZt29ZkbAMGDGhzVrjz7HxTNil8O2Y4i+vByIN92P2AxQEQHc1hU0YOpqioKG3atEmDBw+2eigIg3AW13DnhfMeN9J3B62WDkx+v1+ff/65+vXr16bc/c4880w99thj6t+/f0jreb1eOZ3OwD0xPv74Yy1evFiVlZXq37+/rrrqqpAK9qZNm7Ru3TqNGjVKAwcO1HvvvaeioiL5/X5deOGFysrKCml8sBf2i7btF5J99o3Dooz83//9n9atW6cePXpoyJAhTebt3btXf/3rX5WTk/OTOR6Pp8XpCxYs0BVXXKH4+HhJ0vz584Ma1/r169W9e/fAi+epp57SokWLAi+svLw8XXrppUFl7bdw4UJVVFRo3LhxuvTSS/XUU09p3rx58vv9+uUvf6k77rij2ZdTHcjBuIna559/rri4OHXr1q3JdJ/Pp/Ly8qC/M6M1AwcO1MqVK3Xccce1K8dOdu3apYsuukj/+te/1K9fvyan3isrK3X66afrhRdeCJzy/im1tbW6+uqr9fe//12xsbG69tprVVBQIJfLFcjt27dv4Isqf8qLL77Y4vRf/vKXWrBgQeAtgfPPPz+ovFGjRikvL0+/+tWv9K9//Uu/+MUvdMIJJ2jw4MH68MMP9cEHH+jVV19VZmbmT2YtXbpUl1xyieLi4uT1evW3v/1NF198sdLS0uRyufTqq6/qySefVHZ2dlBj248DoPUHQPaLtu8X0sHbN9rkkFyZYqEPPvggcF8Hp9NpRowYYXbs2BGYX1VVZZxOZ1BZDofDpKSkmFGjRjX5cTgc5tRTTzWjRo0yo0ePDnpsJ598siktLTXGGPPwww+bLl26mJtuusk8+OCDZsqUKaZbt27m0UcfDTpvzpw55ogjjjAXXXSRSUhIMHfddZeJj483d955p5k7d6456qijzMyZM4POC/dN1Hbu3GlOPfVU43Q6jcvlMldeeaXZs2dPYH4oz4UxxixYsKDFH5fLZfLz8wOPg/XZZ581uejvzTffNNnZ2eaMM84wl19+uVm9enXQWfv9/e9/NzNmzDBvvfWWMcaYsrIyc84555isrCzz0EMPBZ0T7o8d33TTTeb44483zz33nHn44YdN//79zfjx443X6zXGfPdcOByOoPP2718/vsnWD39CeW5jY2PNhx9+aIwxZuTIkeaWW25pMn/69Onm9NNPDyrrlFNOMXfeeacxxpi//OUvJi4uztxxxx2B+ffee69JSUkJemzhvh9NTU2Nufjii010dLTp1auXmTFjRpP7b4S6XyxfvrzFH5fLZRYuXBh4HKyRI0ea5557zhjz3cdu3W63Ofnkk83EiRPNsGHDTExMTEj7xgsvvGBcLpeJj4833bp1M6WlpSYuLs6MGTPGZGVlGZfLZZ555pmgstgv2r5fGBP+faM9On0ZmTBhghk/frzZvXu3+eijj8z48ePNgAEDzKeffmqMCW1HnzdvnhkwYIApKytrMj0iIsK89957IY+tS5cuZvv27cYYY4YNG2b+/Oc/N5n/zDPPmCFDhgSdd8wxx5gXXnjBGGPMxo0bjcvlMk8//XRg/tKlS82xxx4bdF64b6KWk5NjMjIyzL///W9TWlpqUlNTTVpamvnqq6+MMW3b0Y8++miTnJzc5MfhcJjExESTnJxsBgwYEHReenq6+fvf/26MMWbZsmXG6XSa888/39x2223mwgsvNJGRkYH5wVi0aJGJiIgwqampJjY21jz11FPmiCOOMFdffbW59tprTZcuXUxhYWFQWeG+x02/fv3M66+/Hni8e/duk56ebs466yyzd+/ekA+A+z+h8eODcFv3ja5du5rNmzcbY767b8bGjRubzN+6dWvQv2/Xrl3Ntm3bjDHG+P1+ExkZ2eTeDx9//HFIfzsOgPY5ALJftH2/2J8Xzn2jPTp9GenVq1eTP67f7zfXXXed6devn/n4449DfnFVVFSY448/3vzv//6vaWhoMMa0/YUVHx9v1q5dGxhnSy+sUG96tr9kGWNMZGSk+c9//hN4vH37dhMTExNSXjhvota3b1+zZs2awOO9e/ea8847z6SkpJj//ve/IT8X1157rUlJSTHvv/9+k+nt2dE/+eQTY4wxGRkZ5q677moy/4EHHjDDhg0LOm/IkCGBgvnaa6+Z6OhoU1RUFJj/2GOPmcGDBweVFe6PHXfp0iXwu+5XW1trMjMzzZlnnmk++eSTkJ4LY4yZP3++SUpKalLY2vpcnHnmmebuu+82xhgzfPhw88QTTzSZ//zzz5t+/foFlZWQkBDYz7766ivjcDiaHHAqKipMQkJC0GPjAGifAyD7Rdv3C2PCv2+0R6cvI0cccUSzg5Uxxtx4443m6KOPNm+++WbIL649e/aYnJwcc/LJJ5t3333XREZGtumFdcUVV5irrrrKGGPMxRdfbKZPn95k/ty5c81JJ50UdN6AAQPMyy+/bIwx5sMPPzROp9P89a9/DcxfsWKFSU5ODjqvT58+BzwT8OKLL5o+ffoEnde1a9fA/2Ht5/P5zIQJE8zJJ59s3nnnnZCfi6VLl5qkpCTzwAMPBKa1dUc/8sgjzaZNm4wx35XD/f++39atW0Mucz8uhz8sd9u2bQs6L9z3uDnhhBPMihUrmk3fs2ePyczMNEOHDg35uTDGmA0bNpghQ4aYa665xtTV1bX5uVi9erU58sgjTUFBgXnggQdMz549zfTp080zzzxjZs6caeLi4swf/vCHoLKuuOIKk5GRYZ5++mlz3nnnmaysLHPaaaeZzZs3my1btpiRI0eGdCaDA6B9DoDsF23fL4wJ/77RHp2+jJx66qnmySefbHHejTfeaOLi4tr04jLmu1OMvXv3Nk6ns00vrB07dpjk5GQzYsQI4/F4TJcuXcwZZ5xhJk+ebEaMGGGioqJa3DFaM336dHPUUUeZq6++2gwYMMBMmzbN9OvXzzz44INm0aJFJikpqdkp1gMJ903UTjrpJPP88883m76/kPTr169Nz8Xnn39uzjzzTHP22WebL774os07+vnnn2+mTZtmjDEmKyur2fUmDz/8sDnuuOOCzttfdo357rl2OBxNns9Vq1aZo48+OqisvXv3muuuu85ERUUZp9NpoqOjTXR0tHE6nSYqKspcf/31Zu/evUGP7X/+539a/Y9MbW2tycjIaPN+UV9fb6699lpz3HHHGZfL1abnwpjv/sN72mmnNXuLITExMei3t4z57m2OsWPHmm7dupmsrCzzzTffmLy8vMDbFccdd5zZunVr0HkcAO1zAGxtv3A4HOwXQQj3vtEenb6MzJ0715xzzjmtzr/++utDej/2xz777DOzbNky8+2337Zp/a+//trcdtttZsiQISY6OtpERUWZ/v37m+zs7AN+Z0BLGhsbze9//3tz7rnnmrlz5xq/32/+8pe/mKSkJBMfH29+/etfhzzOu+66y/Tp0yfw4tz/XnSfPn1C+g+QMcZMnTq11WtMfD6fOf/889v8XPj9fjN37lyTkJDQ5h39/fffN/Hx8SYnJ8fMmTPHdOvWzVxxxRXm97//vcnJyTFut9s89thjQefdeOON5rjjjjN33nmnSU9PN5MmTTKDBg0yL7/8sikpKTEnnXSS+c1vfhPSGGtqasxrr71miouLTXFxsXnttddavKbnp3z11VdN3sL7sdra2gP+338wli9fbqZMmRLSxZwt2bVrl3n77bfN6tWrA6f3w+Hjjz827777rvH5fCGtRzE8eAdAh8PRpgNgTU2NKSsrC+wXZWVlYd0v/H6/MSZ8+8VNN90U1v3ix2fW2qut+0Z7HBYf7UX7heMmavv27VN9fX2r99fYt2+fduzYEfJHD39o3bp1euutt5STk6Pu3buHvP7HH3+s6dOna8WKFfr2228lSRERETr11FN16623asKECUFn1dXV6ZZbblF5ebmGDx+uBx54QPfff79uv/12+Xw+jRw5UkuWLAn6Y4ewl3DdP+brr7/Wzp079bOf/azF+Xv27NH69es1cuTINo/1xRdf1Ouvv678/Px2vd52796tTz75RH6/X3369FFycnKbs37sk08+UX19vQYNGhTS7QdaEu57P5F38FFG0GafffaZCgoKtHjx4k6XZ4zRrl275Pf71bNnz8A9FsJh79698vl8OuKII0JaL1z3yzkc88I9tv13D91/x9AtW7ZowYIF8nq9uuKKK3TmmWcGnXWo8goLC9XQ0NCuvOHDh+uEE04I2/jamxfuez+R1768djlk52DQ6WzcuLHNp487Yl5lZaXJzc21JC+c98tpLW/nzp2dMi/cY3v55ZdNVFSU6dGjh4mOjjYvv/yyOeqoo8yYMWPMmWeeaVwuV7OP/5N3cPLCfe8n8tqX1x6UEbSqtZsn7f+57777wnIzJrvk/RQry1I475dzuOWFe2yZmZnm9ttvN8Z8dxF79+7dze9+97vA/GnTppmxY8eSdwjywn3vJ/Lal9celBG0Ktw3T7J7np3LUrjvl3M45YV7bLGxsYE7Dzc2NpqIiIgm9x159913Te/evck7RHnhvPcTee3PayvKCFrVt29fs2zZslbnb9iwIaT/iNs9z85lKdz3yzmc8sI9ttjY2Caf9ujWrZv5+OOPA4+3b99uoqOjyTtEecaE795P5IUnry1C/x5pHDZSU1O1bt26Vuc7HA6ZEK5/tntenz59tHTpUvn9/hZ/1q9fH3RWuPMGDRqktWvXNpu+cOFCXXDBBUF/0dbhmBfusSUnJ+ujjz4KPC4vL2/yJXaVlZXq06cPeYcoT5K6deumJ554Qvn5+RozZkzQX2RH3sHJawvKCFp16623avjw4a3OP/bYY/X66693mjw7l6ULL7xQf/nLX1qct3DhQl122WUhje1wygv32K6//vom/7E+8cQTm3wU9eWXXw7p0yXktS/vhy699FKtXbtWS5cubdctAsgLT14o+Ggv8P/985//VF1dnc4+++wW59fV1Wnt2rVB3+8h3HkA0FlRRgAAgKV4mwYAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYKn/B/RbJggBMCCSAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "zh\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABDa0lEQVR4nO3df1zV9f3///s5RzhIRiimGJFov9SZYhAMt/mjYZT9XtssKhwr+8m7H+e9TJYKZst+Gv1g0SqrVSz3w9k2C0eUtgbJxLTeZfbT0XSgWwUGHw9HzvP7R19OEaDnwIHXC7hdLxcudl4/7q8HnvPyPHr9eL4cxhgjAAAAizitLgAAAAxuNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSNCMAAMBSQ6wuIBh+v1+7d+/W4YcfLofDYXU5AAAgCMYY7du3T0cddZSczq6Pf/SLZmT37t1KTEy0ugwAANANn3zyiY4++ugu5/eLZuTwww+XJH388ccaMWJEj/N8Pp/++te/6rTTTlNERIRtssizV56dayOP95a8vsmzc239Ia+xsVGJiYmB7/Gu9ItmpO3UzOGHH66YmJge5/l8PkVHRysmJiYsH9RwZZFnrzw710Ye7y15fZNn59r6Q16bQ11iwQWsAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUjQjAADAUiE3I6+++qrOPvtsHXXUUXI4HFq7du0h19mwYYNOPvlkud1uHXfccXryySe7USoAABiIQm5GmpqaNHXqVBUXFwe1/Mcff6wzzzxTs2fP1tatW3XDDTfo8ssv1/r160MuFgAADDwhP7X3jDPO0BlnnBH08iUlJRo3bpzuvfdeSdLEiRP12muv6b777lNWVlaomwcAAANMyM1IqKqqqpSZmdluWlZWlm644YYu1/F6vfJ6vYHXjY2Nkr58tLHP5+txTW0Zdssiz155dq6NPPtkkTew8+xcW3/KOxSHMcZ0dyMOh0N//OMfdd5553W5zAknnKDc3Fzl5+cHpr3wwgs688wz1dzcrKFDh3ZYp7CwUMuWLeswvbS0VNHR0d0tFwAA9KHm5mZlZ2eroaFBMTExXS7X60dGuiM/P18ejyfwurGxUYmJiZo9e7bi4uJ6nO/z+VReXq45c+YoIiLCNlnk2SvPzrWRx3tLXt/k2bm2/pDXdmbjUHq9GYmPj1d9fX27afX19YqJien0qIgkud1uud3uDtMjIiLC8pfTG3l2ro08+2SRZ688O9dGnr3y7FybnfOCzej1cUYyMjJUUVHRblp5ebkyMjJ6e9MAAKAfCLkZ+eKLL7R161Zt3bpV0pe37m7dulW1tbWSvjzFkpOTE1j+qquu0kcffaSFCxfq3Xff1S9/+Uv99re/1Y033hie3wAAAPRrITcjmzdv1rRp0zRt2jRJksfj0bRp07R06VJJ0r///e9AYyJJ48aN07p161ReXq6pU6fq3nvv1WOPPcZtvQAAQFI3rhmZNWuWDnYDTmejq86aNUtvvPFGqJsCAACDAM+mAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlqIZAQAAlupWM1JcXKykpCRFRUUpPT1d1dXVXS7r8/l066236thjj1VUVJSmTp2qsrKybhcMAAAGlpCbkdWrV8vj8aigoEBbtmzR1KlTlZWVpT179nS6/OLFi/XII4/owQcf1DvvvKOrrrpK559/vt54440eFw8AAPq/kJuRlStXasGCBcrNzdWkSZNUUlKi6OhorVq1qtPln376af385z/X3LlzNX78eF199dWaO3eu7r333h4XDwAA+r8hoSzc0tKimpoa5efnB6Y5nU5lZmaqqqqq03W8Xq+ioqLaTRs6dKhee+21Lrfj9Xrl9XoDrxsbGyV9ecrH5/OFUnKn2jLslkWevfLsXBt59skib2Dn2bm2/pR3KA5jjAk2dPfu3UpISFBlZaUyMjIC0xcuXKiNGzdq06ZNHdbJzs7Wtm3btHbtWh177LGqqKjQueeeq9bW1nYNx9cVFhZq2bJlHaaXlpYqOjo62HIBAICFmpublZ2drYaGBsXExHS5XEhHRrrj/vvv14IFCzRhwgQ5HA4de+yxys3N7fK0jiTl5+fL4/EEXjc2NioxMVGzZ89WXFxcj2vy+XwqLy/XnDlzFBERYZss8uyVZ+fayOO9Ja9v8uxcW3/IazuzcSghNSMjR46Uy+VSfX19u+n19fWKj4/vdJ0jjzxSa9eu1f79+/Xf//5XRx11lBYtWqTx48d3uR232y23291hekRERFj+cnojz861kWefLPLslWfn2sizV56da7NzXrAZIV3AGhkZqZSUFFVUVASm+f1+VVRUtDtt05moqCglJCTowIED+sMf/qBzzz03lE0DAIABKuTTNB6PR/Pnz1dqaqrS0tJUVFSkpqYm5ebmSpJycnKUkJCgFStWSJI2bdqkXbt2KTk5Wbt27VJhYaH8fr8WLlwY3t8EAAD0SyE3I/PmzdPevXu1dOlS1dXVKTk5WWVlZRo9erQkqba2Vk7nVwdc9u/fr8WLF+ujjz7SsGHDNHfuXD399NOKjY0N2y8BAAD6r25dwJqXl6e8vLxO523YsKHd65kzZ+qdd97pzmYAAMAgwLNpAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApXr92TQAAGBwSVq0TpLk9zYHtTxHRgAAgKU4MgIAAA6q7UhHb+HICAAAsBTNCAAAsBSnaQAAGGS6Ou3idhndlSZNLlwvb6ujz+rhyAgAALAUR0YAAOjn+vpIRrhxZAQAAFiKZgQAAFiK0zQAANhMsON6tF1w2t9xZAQAAFiKIyMAAPSytiMdVt06a3ccGQEAAJaiGQEAAJbiNA0AACHq7QfHDTYcGQEAAJbiyAgAYNCz27NaBptuHRkpLi5WUlKSoqKilJ6erurq6oMuX1RUpBNPPFFDhw5VYmKibrzxRu3fv79bBQMAgIEl5GZk9erV8ng8Kigo0JYtWzR16lRlZWVpz549nS5fWlqqRYsWqaCgQNu3b9fjjz+u1atX6+c//3mPiwcAAP1fyKdpVq5cqQULFig3N1eSVFJSonXr1mnVqlVatGhRh+UrKyv1ne98R9nZ2ZKkpKQkXXTRRdq0aVMPSwcAoGucWuk/QmpGWlpaVFNTo/z8/MA0p9OpzMxMVVVVdbrO9OnT9cwzz6i6ulppaWn66KOP9MILL+jSSy/tcjter1derzfwurGxUZLk8/nk8/lCKblTbRl2yyLPXnl2ro08+2SRZ988t9P0OKstIxxZgzGv1RVcjsMYE/QWd+/erYSEBFVWViojIyMwfeHChdq4cWOXRzseeOAB/exnP5MxRgcOHNBVV12lhx9+uMvtFBYWatmyZR2ml5aWKjo6OthyAQCAhZqbm5Wdna2GhgbFxMR0uVyv302zYcMG3X777frlL3+p9PR0ffDBB7r++uu1fPlyLVmypNN18vPz5fF4Aq8bGxuVmJio2bNnKy4ursc1+Xw+lZeXa86cOYqIiLBNFnn2yrNzbeTx3pJ36Lwlm53y+nt2msbtNFqe6g9L1mDMa/W6gloupGZk5MiRcrlcqq+vbze9vr5e8fHxna6zZMkSXXrppbr88sslSSeddJKampp0xRVX6JZbbpHT2fEaWrfbLbfb3WF6REREWD6ovZFn59rIs08WefbKs3Nt5LUXyiBjbbfjev2OsF0zEs6swZTnDzIjpLtpIiMjlZKSooqKiq825PeroqKi3Wmbr2tubu7QcLhcX3ZKIZwhAgAAA1TIp2k8Ho/mz5+v1NRUpaWlqaioSE1NTYG7a3JycpSQkKAVK1ZIks4++2ytXLlS06ZNC5ymWbJkic4+++xAUwIAAAavkJuRefPmae/evVq6dKnq6uqUnJyssrIyjR49WpJUW1vb7kjI4sWL5XA4tHjxYu3atUtHHnmkzj77bP3iF78I328BAOhXvn7ahVFO0a0LWPPy8pSXl9fpvA0bNrTfwJAhKigoUEFBQXc2BQAABjgelAcAACzFg/IAAD0Wyt0uwDdxZAQAAFiKZgQAAFiKZgQAAFiKa0YAAB0c7BoQbsVFuHFkBAAAWIpmBAAAWIrTNAAwSHBaBXbFkREAAGApmhEAAGApmhEAAGAprhkBgH4olOHX227FBeyKIyMAAMBSNCMAAMBSNCMAAMBSXDMCADbw9WtAGG4dgw1HRgAAgKVoRgAAgKVoRgAAgKW4ZgQAekEo44AAgx1HRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKW6dQFrcXGx7r77btXV1Wnq1Kl68MEHlZbW+VOYZs2apY0bN3aYPnfuXK1bxwVeAPqHg12QyiBlQM+EfGRk9erV8ng8Kigo0JYtWzR16lRlZWVpz549nS6/Zs0a/fvf/w78/N///Z9cLpd+9KMf9bh4AADQ/4XcjKxcuVILFixQbm6uJk2apJKSEkVHR2vVqlWdLj9ixAjFx8cHfsrLyxUdHU0zAgAAJIV4mqalpUU1NTXKz88PTHM6ncrMzFRVVVVQGY8//rguvPBCHXbYYV0u4/V65fV6A68bGxslST6fTz6fL5SSO9WWYbcs8uyVZ+fayOv7LLfLdD3Padr92VPk2SfPzrX1h7zWg+w3X+cwxgS9xd27dyshIUGVlZXKyMgITF+4cKE2btyoTZs2HXT96upqpaena9OmTV1eYyJJhYWFWrZsWYfppaWlio6ODrZcAABgoebmZmVnZ6uhoUExMTFdLtenI7A+/vjjOumkkw7aiEhSfn6+PB5P4HVjY6MSExM1e/ZsxcXF9bgOn8+n8vJyzZkzRxEREbbJIs9eeXaujby+f28nF67vcp7babQ81a8lm53y+nt+ASt59smzc239Ia/V6wpquZCakZEjR8rlcqm+vr7d9Pr6esXHxx903aamJj333HO69dZbD7kdt9stt9vdYXpERERY/lHrjTw710aefbLIs1fetF+8HMLdL4dezut3hPVuGvLsk2fn2uyc5w8yI6QLWCMjI5WSkqKKioqvNuT3q6Kiot1pm8787ne/k9fr1SWXXBLKJgEAwAAX8mkaj8ej+fPnKzU1VWlpaSoqKlJTU5Nyc3MlSTk5OUpISNCKFSvarff444/rvPPOC8tpFgAAMHCE3IzMmzdPe/fu1dKlS1VXV6fk5GSVlZVp9OjRkqTa2lo5ne0PuOzYsUOvvfaa/vrXv4anagAAMGB06wLWvLw85eXldTpvw4YNHaadeOKJCuGmHQAAMIjwbBoAAGCpPr21FwB6y8GeHfNNbc+SAWAPHBkBAACWohkBAACWohkBAACWohkBAACWohkBAACWohkBAACW4tZeALb0zVt1227HnVy4PqwPBANgPY6MAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAASzHOCIA+8c1xQwCgDUdGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApWhGAACApbrVjBQXFyspKUlRUVFKT09XdXX1QZf//PPPde2112rMmDFyu9064YQT9MILL3SrYAAAMLCEPOjZ6tWr5fF4VFJSovT0dBUVFSkrK0s7duzQqFGjOizf0tKiOXPmaNSoUfr973+vhIQE/fOf/1RsbGw46gdgkUMNYuZ2Gd2VJk0uXC9vq6OPqgLQH4XcjKxcuVILFixQbm6uJKmkpETr1q3TqlWrtGjRog7Lr1q1Sp9++qkqKysVEREhSUpKSupZ1QAAYMAIqRlpaWlRTU2N8vPzA9OcTqcyMzNVVVXV6Tp/+tOflJGRoWuvvVbPP/+8jjzySGVnZ+vmm2+Wy+XqdB2v1yuv1xt43djYKEny+Xzy+XyhlNyptgy7ZZFnrzw712aHPLfLHHy+07T7s6fCmWfn2sizV56da+sPea2H+HeijcMYE/QWd+/erYSEBFVWViojIyMwfeHChdq4caM2bdrUYZ0JEyZo586duvjii3XNNdfogw8+0DXXXKPrrrtOBQUFnW6nsLBQy5Yt6zC9tLRU0dHRwZYLAAAs1NzcrOzsbDU0NCgmJqbL5Xr9QXl+v1+jRo3Sr371K7lcLqWkpGjXrl26++67u2xG8vPz5fF4Aq8bGxuVmJio2bNnKy4ursc1+Xw+lZeXa86cOYFTR3bIIs9eeXauzQ55kwvXH3S+22m0PNWvJZud8vp7fs1IOPPsXBt59sqzc239Ia/V2/kZkG8KqRkZOXKkXC6X6uvr202vr69XfHx8p+uMGTNGERER7U7JTJw4UXV1dWppaVFkZGSHddxut9xud4fpERERYflHtzfy7FwbefbJGkh5wV6U6vU7wnoBazjz7FwbefbKs3Ntds7zB5kR0q29kZGRSklJUUVFxVcb8vtVUVHR7rTN133nO9/RBx98IL/fH5j23nvvacyYMZ02IgAAYHAJeZwRj8ejRx99VE899ZS2b9+uq6++Wk1NTYG7a3Jyctpd4Hr11Vfr008/1fXXX6/33ntP69at0+23365rr702fL8FAADot0K+ZmTevHnau3evli5dqrq6OiUnJ6usrEyjR4+WJNXW1srp/KrHSUxM1Pr163XjjTdqypQpSkhI0PXXX6+bb745fL8FAADot7p1AWteXp7y8vI6nbdhw4YO0zIyMvT66693Z1MAAGCA6/W7aQD0D5ML1zNiKgBL8KA8AABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZoRAABgKZ5NAwxQSYvWhbS829VLhQDAIXBkBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWKpbzUhxcbGSkpIUFRWl9PR0VVdXd7nsk08+KYfD0e4nKiqq2wUDAICBJeQH5a1evVoej0clJSVKT09XUVGRsrKytGPHDo0aNarTdWJiYrRjx47Aa4fD0f2KgUHqmw++c7uM7kqTJheul7eVfQpA/xXykZGVK1dqwYIFys3N1aRJk1RSUqLo6GitWrWqy3UcDofi4+MDP6NHj+5R0QAAYOAI6chIS0uLampqlJ+fH5jmdDqVmZmpqqqqLtf74osvNHbsWPn9fp188sm6/fbb9a1vfavL5b1er7xeb+B1Y2OjJMnn88nn84VScqfaMuyWRZ698uxWm9tl2r92mnZ/9tRgyrNzbeTZK8/OtfWHvFZXcDkOY0zQW9y9e7cSEhJUWVmpjIyMwPSFCxdq48aN2rRpU4d1qqqq9P7772vKlClqaGjQPffco1dffVVvv/22jj766E63U1hYqGXLlnWYXlpaqujo6GDLBQAAFmpublZ2drYaGhoUExPT5XIhXzMSqoyMjHaNy/Tp0zVx4kQ98sgjWr58eafr5Ofny+PxBF43NjYqMTFRs2fPVlxcXI9r8vl8Ki8v15w5cxQREWGbLPLslWe32iYXrm/32u00Wp7q15LNTnn9Pb9mZDDl2bk28uyVZ+fa+kNeq9cV1HIhNSMjR46Uy+VSfX19u+n19fWKj48PKiMiIkLTpk3TBx980OUybrdbbre703XD8aXQG3l2ro08+2T1JK+ri1S9fkdYL2AdTHl2ro08e+XZuTY75/mDzAjpAtbIyEilpKSooqLiqw35/aqoqGh39ONgWltb9dZbb2nMmDGhbBoAAAxQIZ+m8Xg8mj9/vlJTU5WWlqaioiI1NTUpNzdXkpSTk6OEhAStWLFCknTrrbfq29/+to477jh9/vnnuvvuu/XPf/5Tl19+eXh/EwAA0C+F3IzMmzdPe/fu1dKlS1VXV6fk5GSVlZUFbtetra2V0/nVAZfPPvtMCxYsUF1dnYYPH66UlBRVVlZq0qRJ4fstAABAv9WtC1jz8vKUl5fX6bwNGza0e33ffffpvvvu685mAADAIMCzaQAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKV6fTh4AJ1LWrTO6hIAwBY4MgIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACxFMwIAACw1xOoCgIEiadG6g853u4zuSpMmF66Xt9XRR1UBgP1168hIcXGxkpKSFBUVpfT0dFVXVwe13nPPPSeHw6HzzjuvO5sFAAADUMjNyOrVq+XxeFRQUKAtW7Zo6tSpysrK0p49ew663s6dO/Wzn/1M3/ve97pdLAAAGHhCbkZWrlypBQsWKDc3V5MmTVJJSYmio6O1atWqLtdpbW3VxRdfrGXLlmn8+PE9KhgAAAwsIV0z0tLSopqaGuXn5wemOZ1OZWZmqqqqqsv1br31Vo0aNUqXXXaZ/va3vx1yO16vV16vN/C6sbFRkuTz+eTz+UIpuVNtGXbLIs9eeaFmuV3m4POdpt2fPUWePbLIG9h5dq6tP+S1HuLfxTYOY0zQW9y9e7cSEhJUWVmpjIyMwPSFCxdq48aN2rRpU4d1XnvtNV144YXaunWrRo4cqZ/85Cf6/PPPtXbt2i63U1hYqGXLlnWYXlpaqujo6GDLBQAAFmpublZ2drYaGhoUExPT5XK9ejfNvn37dOmll+rRRx/VyJEjg14vPz9fHo8n8LqxsVGJiYmaPXu24uLielyXz+dTeXm55syZo4iICNtkkWevvFCzJheuP+h8t9NoeapfSzY75fX3/G4a8uyRRd7AzrNzbf0hr9XrCmq5kJqRkSNHyuVyqb6+vt30+vp6xcfHd1j+ww8/1M6dO3X22WcHpvn9/i83PGSIduzYoWOPPbbDem63W263u8P0iIiIsHxh9UaenWsjr2+ygr1d1+t3hPXWXvLskUXewM6zc212zvMHmRHSBayRkZFKSUlRRUXFVxvy+1VRUdHutE2bCRMm6K233tLWrVsDP+ecc45mz56trVu3KjExMZTNAwCAASjk0zQej0fz589Xamqq0tLSVFRUpKamJuXm5kqScnJylJCQoBUrVigqKkqTJ09ut35sbKwkdZgOAAAGp5CbkXnz5mnv3r1aunSp6urqlJycrLKyMo0ePVqSVFtbK6eTUeYBAEBwunUBa15envLy8jqdt2HDhoOu++STT3ZnkwAAYIDiEAYAALAUzQgAALAUzQgAALAUzQgAALAUzQgAALAUzQgAALAUzQgAALBUrz4oD+jPJheu111pX/4Zzmc+AADa48gIAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwFM0IAACwVLeakeLiYiUlJSkqKkrp6emqrq7uctk1a9YoNTVVsbGxOuyww5ScnKynn3662wUDAICBJeRmZPXq1fJ4PCooKNCWLVs0depUZWVlac+ePZ0uP2LECN1yyy2qqqrSm2++qdzcXOXm5mr9+vU9Lh4AAPR/ITcjK1eu1IIFC5Sbm6tJkyappKRE0dHRWrVqVafLz5o1S+eff74mTpyoY489Vtdff72mTJmi1157rcfFAwCA/m9IKAu3tLSopqZG+fn5gWlOp1OZmZmqqqo65PrGGL388svasWOH7rzzzi6X83q98nq9gdeNjY2SJJ/PJ5/PF0rJnWrLsFsWefbKcztNuz/JGzh5dq6NPHvl2bm2/pDX6goux2GMCXqLu3fvVkJCgiorK5WRkRGYvnDhQm3cuFGbNm3qdL2GhgYlJCTI6/XK5XLpl7/8pX760592uZ3CwkItW7asw/TS0lJFR0cHWy4AALBQc3OzsrOz1dDQoJiYmC6XC+nISHcdfvjh2rp1q7744gtVVFTI4/Fo/PjxmjVrVqfL5+fny+PxBF43NjYqMTFRs2fPVlxcXI/r8fl8Ki8v15w5cxQREWGbLPJ6N29yYWjXKbmdRstT/Vqy2Smv39Hj2sizT56dayPPXnl2rq0/5LV6XUEtF1IzMnLkSLlcLtXX17ebXl9fr/j4+C7XczqdOu644yRJycnJ2r59u1asWNFlM+J2u+V2uztMj4iICMsXVm/k2bk28r7kbe3ejuX1O7q9Lnn2zrNzbeTZK8/Otdk5zx9kRkgXsEZGRiolJUUVFRVfbcjvV0VFRbvTNocszu9vd00IAAAYvEI+TePxeDR//nylpqYqLS1NRUVFampqUm5uriQpJydHCQkJWrFihSRpxYoVSk1N1bHHHiuv16sXXnhBTz/9tB5++OHw/iYAAKBfCrkZmTdvnvbu3aulS5eqrq5OycnJKisr0+jRoyVJtbW1cjq/OuDS1NSka665Rv/61780dOhQTZgwQc8884zmzZsXvt8CAAD0W926gDUvL095eXmdztuwYUO717fddptuu+227mwGAAAMAjybBgAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWIpmBAAAWGqI1QUA3ZW0aF2HaW6X0V1p0uTC9fK2OiyoCgAQKo6MAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS9GMAAAAS3WrGSkuLlZSUpKioqKUnp6u6urqLpd99NFH9b3vfU/Dhw/X8OHDlZmZedDlAQDA4BJyM7J69Wp5PB4VFBRoy5Ytmjp1qrKysrRnz55Ol9+wYYMuuugivfLKK6qqqlJiYqJOO+007dq1q8fFAwCA/i/kZmTlypVasGCBcnNzNWnSJJWUlCg6OlqrVq3qdPlnn31W11xzjZKTkzVhwgQ99thj8vv9qqio6HHxAACg/wvp2TQtLS2qqalRfn5+YJrT6VRmZqaqqqqCymhubpbP59OIESO6XMbr9crr9QZeNzY2SpJ8Pp98Pl8oJXeqLcNuWeSFxu0yHac5Tbs/eyKcWeTZK8/OtZFnrzw719Yf8lo7+Xe6Mw5jTNBb3L17txISElRZWamMjIzA9IULF2rjxo3atGnTITOuueYarV+/Xm+//baioqI6XaawsFDLli3rML20tFTR0dHBlgsAACzU3Nys7OxsNTQ0KCYmpsvl+vSpvXfccYeee+45bdiwoctGRJLy8/Pl8XgCrxsbG5WYmKjZs2crLi6ux3X4fD6Vl5drzpw5ioiIsE0WeaHlTS5c32Ga22m0PNWvJZud8vp79tTecGaRZ688O9dGnr3y7Fxbf8hr9bqCWi6kZmTkyJFyuVyqr69vN72+vl7x8fEHXfeee+7RHXfcoZdeeklTpkw56LJut1tut7vD9IiIiLB8AfZGnp1rG6h53taudxSv33HQ+SFtJ4xZ5Nkrz861kWevPDvXZuc8f5AZIV3AGhkZqZSUlHYXn7ZdjPr10zbfdNddd2n58uUqKytTampqKJsEAAADXMinaTwej+bPn6/U1FSlpaWpqKhITU1Nys3NlSTl5OQoISFBK1askCTdeeedWrp0qUpLS5WUlKS6ujpJ0rBhwzRs2LAw/ioAAKA/CrkZmTdvnvbu3aulS5eqrq5OycnJKisr0+jRoyVJtbW1cjq/OuDy8MMPq6WlRT/84Q/b5RQUFKiwsLBn1QMAgH6vWxew5uXlKS8vr9N5GzZsaPd6586d3dkEAAAYJHg2DQAAsBTNCAAAsFSfjjMCHEzSonVWlwAAsABHRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKWGWF0ABq6kResOuYzbZXRXmjS5cL0kR+8XBQCwHY6MAAAAS9GMAAAAS3WrGSkuLlZSUpKioqKUnp6u6urqLpd9++23dcEFFygpKUkOh0NFRUXdrRUAAAxAITcjq1evlsfjUUFBgbZs2aKpU6cqKytLe/bs6XT55uZmjR8/XnfccYfi4+N7XDAAABhYQm5GVq5cqQULFig3N1eTJk1SSUmJoqOjtWrVqk6XP+WUU3T33XfrwgsvlNvt7nHBAABgYAnpbpqWlhbV1NQoPz8/MM3pdCozM1NVVVVhK8rr9crr9QZeNzY2SpJ8Pp98Pl+P89sy7JY10PLcLnPoZZym3Z89Fc48O9dGnn2yyBvYeXaurT/ktQbxPSBJDmNM0FvcvXu3EhISVFlZqYyMjMD0hQsXauPGjdq0adNB109KStINN9ygG2644aDLFRYWatmyZR2ml5aWKjo6OthyAQCAhZqbm5Wdna2GhgbFxMR0uZwtxxnJz8+Xx+MJvG5sbFRiYqJmz56tuLi4Huf7fD6Vl5drzpw5ioiIsE3WQMv7cuyQg3M7jZan+rVks1Nef8/HGQlnnp1rI4/3lry+ybNzbf0hr9XrCmq5kJqRkSNHyuVyqb6+vt30+vr6sF6c6na7O72+JCIiIixfqL2RZ+farMrztgb/Qfb6HSEt35d5dq6NPPtkkTew8+xcm53z/EFmhHQBa2RkpFJSUlRRUfHVhvx+VVRUtDttAwAAEKyQT9N4PB7Nnz9fqampSktLU1FRkZqampSbmytJysnJUUJCglasWCHpy4te33nnncB/79q1S1u3btWwYcN03HHHhfFXAQAA/VHIzci8efO0d+9eLV26VHV1dUpOTlZZWZlGjx4tSaqtrZXT+dUBl927d2vatGmB1/fcc4/uuecezZw5Uxs2bOj5bwAAAPq1bl3AmpeXp7y8vE7nfbPBSEpKUgg37AAAgEGGZ9MAAABL2fLWXthT0qJ1cruM7kr78rbdcF65DQAYvDgyAgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALEUzAgAALMVw8INY0qJ1VpcAAABHRgAAgLVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKW4m2YA6ezuGLfL6K40aXLhenlbHRZUBQDAwXFkBAAAWIpmBAAAWIpmBAAAWIprRmyMEVIBAIMBR0YAAIClutWMFBcXKykpSVFRUUpPT1d1dfVBl//d736nCRMmKCoqSieddJJeeOGFbhULAAAGnpBP06xevVoej0clJSVKT09XUVGRsrKytGPHDo0aNarD8pWVlbrooou0YsUKnXXWWSotLdV5552nLVu2aPLkyWH5JfqDYE+5fP1WXIlbcQEAA1/IzcjKlSu1YMEC5ebmSpJKSkq0bt06rVq1SosWLeqw/P3336/TTz9dN910kyRp+fLlKi8v10MPPaSSkpIelm+dtuaCcTwAAOiZkJqRlpYW1dTUKD8/PzDN6XQqMzNTVVVVna5TVVUlj8fTblpWVpbWrl3b5Xa8Xq+8Xm/gdUNDgyTp008/DaXcLvl8PjU3N+u///2vIiIiJEnpKypCymj7ixviN2pu9muIz6lWf8+bEfLsk2fn2sjjvSWvb/LsXFt/yPMfaJYkGWMOvqAJwa5du4wkU1lZ2W76TTfdZNLS0jpdJyIiwpSWlrabVlxcbEaNGtXldgoKCowkfvjhhx9++OFnAPx88sknB+0vbHlrb35+frujKZ9//rnGjh2r2tpaHXHEET3Ob2xsVGJioj755BPFxMTYJos8e+XZuTbyeG/J65s8O9fWH/KMMdq3b5+OOuqogy4XUjMycuRIuVwu1dfXt5teX1+v+Pj4TteJj48PaXlJcrvdcrvdHaYfccQRYfnLaRMTExO2vHBmkWevPDvXRp59ssgb2Hl2rs3uecEcRAjp1t7IyEilpKSoouKr6yv8fr8qKiqUkZHR6ToZGRntlpek8vLyLpcHAACDS8inaTwej+bPn6/U1FSlpaWpqKhITU1NgbtrcnJylJCQoBUrVkiSrr/+es2cOVP33nuvzjzzTD333HPavHmzfvWrX4X3NwEAAP1SyM3IvHnztHfvXi1dulR1dXVKTk5WWVmZRo8eLUmqra2V0/nVAZfp06ertLRUixcv1s9//nMdf/zxWrt2bUhjjLjdbhUUFHR66qY7wpln59rIs08WefbKs3Nt5Nkrz8619Ye8YDmMOdT9NgAAAL2HZ9MAAABL0YwAAABL0YwAAABL0YwAAABL0YwAAABL2XI4+P/85z9atWqVqqqqVFdXJ+nLkVynT5+un/zkJzryyCMtrhCwVtuDJMN1+52d8xoaGtr9O9CTR0KEMwv2Y+fPcbjzwv1ZtnrfsN2RkX/84x864YQT9MADD+iII47QjBkzNGPGDB1xxBF64IEHNGHCBG3evDnk3AMHDmjbtm1av3691q9fr23btsnn83WrxnBmSVJdXZ2ef/55PfLII3rkkUf0/PPPBz4UdsiTvvyg7tixQzt27Ag8RXkg59lReXm55s6dq+HDhys6OlrR0dEaPny45s6dq5deemnA5T322GOaNGmSRowYoUmTJrX778cff9yyrG/65lPGe8rOeXbcb+3+ObbzftEbed0WzNN6+1J6erq54oorjN/v7zDP7/ebK664wnz7298OOq+1tdXccsstJjY21jgcjnY/sbGxZvHixaa1tbXPs4wx5osvvjAXX3yxcblcZsiQIWbUqFFm1KhRZsiQIcblcplLLrnENDU1WZZnjDGPPvqomThxonE6ne1+Jk6caB577LGQsvpDnjHG+Hw+s3XrVlNWVmbKysrM1q1bTUtLS7eywpX35JNPmiFDhpgLL7zQPPHEE+aFF14wL7zwgnniiSfMRRddZCIiIsyvf/3rAZN31113mejoaLNo0SLzyiuvmHfeece888475pVXXjH5+fnmsMMOM3fffXefZ7X561//as444wwTGxsb+MzFxsaaM844w5SXl4eU1R/y7Lrf2v1zbOf9ojfyesJ2zUhUVJTZvn17l/O3b99uoqKigs676aabzJFHHmlKSkrMxx9/bJqbm01zc7P5+OOPzSOPPGJGjRplFi5c2OdZxhhz2WWXmeOPP96UlZWZAwcOBKYfOHDArF+/3pxwwgnm8ssvtyzP7h/8cOeFu9kMZ97xxx9vHnrooS7nFxcXm+OOOy7o2uyed8wxx5jVq1d3Of+5554ziYmJfZ5ljP2/sAbTF6DdP8d23i96I68nbNeMJCUlmaeeeqrL+U899ZQZO3Zs0HmjR482ZWVlXc4vKyszo0aN6vMsY4yJjY01f//737uc/9prr5nY2FjL8uz+wQ93XribzXDmud1u8+6773Y5/9133w2pSbd7XlRUlHnnnXe6nP/222+boUOH9nmWMfb/whpMX4B2/xzbeb/ojbyesF0z8tBDDxm3222uu+468/zzz5vXX3/dvP766+b555831113nRk6dKgpLi4OOi86Otq8+eabXc7ftm2bOeyww/o8yxhjYmJizD/+8Y8u51dXV5uYmBjL8uz+wQ93XribzXDmnXzyyeamm27qcv7ChQvNySefHHRtds/73ve+Z3JycozP5+sw78CBAyYnJ8fMmDGjz7OMsf8X1mD6ArT759jO+0Vv5PWELZ9Ns3r1at13332qqalRa2urJMnlciklJUUej0c//vGPg84688wzdeDAAT377LMaOXJku3n/+c9/dOmll8rlcukvf/lLn2ZJ0sUXX6zt27fr8ccf17Rp09rNe+ONN7RgwQJNmDBBzzzzjCV5M2bM0Lhx4/T4449ryJD2N161trbqpz/9qXbu3KmNGzcOiLzDDjtMr7/+uk466aRO57/55puaPn26vvjiiz7P27Bhg8466yyNHz9emZmZgQdT1tfXq6KiQh999JHWrVunGTNmBFWb3fPefPNNZWVlyefzacaMGe3yXn31VUVGRuqvf/1rUA/cDGeWJKWkpOj73/++7rrrrk7n33zzzXrppZdUU1MzIPLsvN/a/XNs5/2iN/J6wpbNSBufz6f//Oc/kqSRI0cqIiIi5IxPPvlEc+fO1bvvvquTTjqp3V/2W2+9pUmTJukvf/mLEhMT+zRLkj777DNlZ2dr/fr1Gj58uEaNGiVJ2rNnjz7//HNlZWWptLRUsbGxluTZ/YMf7rxwN5vhztu5c6cefvhhvf766+1uwcvIyNBVV12lpKSkoHL6S96+ffv0zDPPdJqXnZ2tmJgYS7Ls/oU12L4A7f45tvN+0Rt53WXrZiRc/H6/1q9f3+lf9mmnnSanM/g7nMOZ1Wb79u2d5k2YMCHkrHDn2f2DH868cDeb4c6Dfdj9C4svQPQ3g6IZAYIV7mYz3HkHDhzQ22+/HcgaM2aMJk6c2K2jhv0hr66uTps2bWqXl5aWpvj4eEuzYC92/xzbeb/ojbzuGDTNSHV1dacjup5yyimWZrW0tGjt2rWd5p177rmKjIy0NE+y/wffDjtSb/P7/Vq6dKmKi4s7DA51xBFHKC8vT8uWLQu6ubF7XlNTk6688ko999xzcjgcGjFihCTp008/lTFGF110kR555BFFR0f3adbX2f0LazB8Adr9c2zn/aI38nqkTy6TtVB9fb357ne/axwOhxk7dqxJS0szaWlpZuzYscbhcJjvfve7pr6+vs+zjDHm/fffN+PHjzdRUVFm5syZ5sc//rH58Y9/bGbOnGmioqLMcccdZ95//33L8uw+KFtvDPJmjDGbNm0yRUVFZtGiRWbRokWmqKjIVFdXh5wTzjw733bcG3nhHDMn3OPv2Hk8mt7Is/N+a/fPsZ33i97I64kB34xccMEFJiMjo9Nb3d59910zffp088Mf/rDPs4wxJjMz05x77rmmoaGhw7yGhgZz7rnnmtNOO82yPLt/8MOdF+5mM5x5dr7tuDfywjlmTrjH37H7F9Zg+gK0++fYzvtFb+T1xIBvRoYNG2a2bNnS5fzNmzebYcOG9XmWMcYMHTrUvPXWW13Of/PNN0O6fz/ceXb/4Ic7L9zNZjjzwj3Gjd3zwjlmTrjH37H7F9Zg+gK0++fYzvtFb+T1hO0elBdubrdbjY2NXc7ft29f0E9QDGeWJMXGxmrnzp1dzt+5c2fQt+H2Rp7f7z/oNSaRkZHy+/0DJm/9+vUqLi7WiSee2GHeiSeeqAceeEBlZWWW5M2aNUs/+9nPAre6f91//vMf3XzzzZo1a1bQtdk976yzztIVV1yhN954o8O8N954Q1dffbXOPvvsPs+SvtzPjzrqqC7njxkzRk1NTQMmz877rd0/x3beL3ojr0f6pOWx0DXXXGPGjh1r1qxZ0+70RUNDg1mzZo1JSkoyeXl5fZ5ljDFLliwxw4cPNytXrjTbtm0zdXV1pq6uzmzbts2sXLnSjBgxwhQUFFiWl52dbaZNm9bp0aAtW7aYlJQUc/HFFw+YvLi4OLNhw4Yu57/yyismLi7Okrza2lozefJkM2TIEDNt2jRz+umnm9NPP91MmzbNDBkyxEyZMsXU1tYGXZvd8z799FNz+umnG4fDYUaMGGEmTJhgJkyYYEaMGGGcTqc544wzzGeffdbnWcYYM3fuXHPaaaeZvXv3dpi3d+9ec/rpp5szzzxzwOTZeb+1++fYzvtFb+T1xIC/m8br9eqGG27QqlWrdODAgUBH3tLSoiFDhuiyyy7TfffdF9QRja6yvF6vIiIiQspqc+edd+r+++9XXV2dHA6HJMkYo/j4eN1www1auHBhSL9vOPPsPihbuPOuvfZarVu3Tvfdd5++//3vB8Y6aGxsVEVFhTwej8466yw9+OCDluTZ/bbj3hiD59133+1wZ1h3x8wJ1/g7dh+PZrANzmj3z7Hd9wsp/GNddceAb0baNDY2qqampt1fdkpKSrcG12lsbNTmzZtVX18vSRo9erRSU1N7NFDPxx9/3K62cePGdTsr3Hl2HpRNCt+OGc7GtTfyYB92/8LiCxD9zaBpRnpTZGSktm3bpokTJ1pdCsIgnI1ruPPCOcaN9OWXVmdfTH6/X//61790zDHHdCu3zamnnqonnnhCY8eODWk9r9crp9MZGBPjww8/1KpVq1RbW6uxY8fqsssuC6nB3rZtm2pqajRr1iyNHz9eb7/9toqLi+X3+3X++ecrKysrpPpgL+wX3dsvJPvsG4OiGfl//+//qaamRiNGjNCkSZPazdu/f79++9vfKicn55A5Ho+n0+n333+/LrnkEsXFxUmSVq5cGVRdW7Zs0fDhwwMfnqefflolJSWBD1ZeXp4uvPDCoLLaPPTQQ6qurtbcuXN14YUX6umnn9aKFSvk9/v1gx/8QLfeemuHh1MdTG8Movavf/1LsbGxGjZsWLvpPp9PVVVVQT8zoyvjx4/X+vXrdfzxx/cox0727NmjCy64QH//+991zDHHtDv0Xltbq+985zv6wx/+EDjkfSiNjY26/PLL9ec//1kxMTG68sorVVBQIJfLFcg96qijAg+qPJQ//elPnU7/wQ9+oPvvvz9wSuCcc84JKm/WrFnKy8vTD3/4Q/3973/X97//fZ144omaOHGi3nvvPe3YsUMvvfSSMjIyDpm1Zs0a/fjHP1ZsbKy8Xq/++Mc/6kc/+pFSU1Plcrn00ksv6de//rWys7ODqq0NX4DWfwGyX3R/v5B6b9/olj65MsVCO3bsCIzr4HQ6zYwZM8yuXbsC8+vq6ozT6Qwqy+FwmOTkZDNr1qx2Pw6Hw5xyyilm1qxZZvbs2UHXNmXKFFNeXm6MMebRRx81Q4cONdddd515+OGHzQ033GCGDRtmHn/88aDzli9fbg4//HBzwQUXmPj4eHPHHXeYuLg4c9ttt5nbb7/dHHnkkWbp0qVB54V7ELXdu3ebU045xTidTuNyucyll15q9u3bF5gfynthjDH3339/pz8ul8vk5+cHXgfrk08+aXfR36uvvmqys7PNd7/7XXPxxRebysrKoLPa/PnPfzZLliwxr732mjHGmIqKCnPGGWeYrKws88gjjwSdE+7bjq+77jpzwgknmN/97nfm0UcfNWPHjjVnnnmm8Xq9xpgv3wuHwxF0Xtv+9c1Btr7+E8p7GxMTY9577z1jjDEzZ840N954Y7v5ixcvNt/5zneCyjr55JPNbbfdZowx5je/+Y2JjY01t956a2D+PffcY5KTk4OuLdzj0TQ0NJgf/ehHJioqyowaNcosWbKk3fgboe4Xzz//fKc/LpfLPPTQQ4HXwZo5c6b53e9+Z4z58rZbt9ttpkyZYubNm2emTZtmoqOjQ9o3/vCHPxiXy2Xi4uLMsGHDTHl5uYmNjTWZmZkmKyvLuFwu8+yzzwaVxX7R/f3CmPDvGz0x4JuR8847z5x55plm79695v333zdnnnmmGTdunPnnP/9pjAltR1+xYoUZN26cqaioaDd9yJAh5u233w65tqFDh5qdO3caY4yZNm2a+dWvftVu/rPPPmsmTZoUdN6xxx5r/vCHPxhjjNm6datxuVzmmWeeCcxfs2aNOe6444LOC/cgajk5OSY9Pd384x//MOXl5SYlJcWkpqaaTz/91BjTvR396KOPNklJSe1+HA6HSUhIMElJSWbcuHFB56WlpZk///nPxhhj1q5da5xOpznnnHPMzTffbM4//3wTERERmB+MkpISM2TIEJOSkmJiYmLM008/bQ4//HBz+eWXmyuvvNIMHTrUFBUVBZUV7jFujjnmGPPKK68EXu/du9ekpaWZ0047zezfvz/kL8C2OzS++SXc3X3jsMMOM9u3bzfGfDluxtatW9vN/+CDD4L+fQ877DDz8ccfG2OM8fv9JiIiot3YDx9++GFIf3d8AdrnC5D9ovv7RVteOPeNnhjwzcioUaPa/eX6/X5z1VVXmWOOOcZ8+OGHIX+4qqurzQknnGD+93//17S0tBhjuv/BiouLM5s3bw7U2dkHK9RBz9qaLGOMiYiIMP/3f/8XeL1z504THR0dUl44B1E76qijzKZNmwKv9+/fb84++2yTnJxs/vvf/4b8Xlx55ZUmOTnZvPPOO+2m92RH/+ijj4wxxqSnp5s77rij3fwHH3zQTJs2Lei8SZMmBRrMl19+2URFRZni4uLA/CeeeMJMnDgxqKxw33Y8dOjQwO/aprGx0WRkZJhTTz3VfPTRRyG9F8YYs3LlSpOYmNiuYevue3Hqqaeau+66yxhjzPTp081TTz3Vbv7vf/97c8wxxwSVFR8fH9jPPv30U+NwONp94VRXV5v4+Piga+ML0D5fgOwX3d8vjAn/vtETA74ZOfzwwzt8WRljzLXXXmuOPvpo8+qrr4b84dq3b5/JyckxU6ZMMW+99ZaJiIjo1gfrkksuMZdddpkxxpgf/ehHZvHixe3m33777eakk04KOm/cuHHmxRdfNMYY89577xmn02l++9vfBuavW7fOJCUlBZ03ZsyYgx4J+NOf/mTGjBkTdN5hhx0W+D+sNj6fz5x33nlmypQp5s033wz5vVizZo1JTEw0Dz74YGBad3f0I444wmzbts0Y82Vz2PbfbT744IOQm7lvNodfb+4+/vjjoPPCPcbNiSeeaNatW9dh+r59+0xGRoaZOnVqyO+FMca88cYbZtKkSeaKK64wTU1N3X4vKisrzRFHHGEKCgrMgw8+aEaOHGkWL15snn32WbN06VITGxtr7rzzzqCyLrnkEpOenm6eeeYZc/bZZ5usrCzz7W9/22zfvt28++67ZubMmSEdyeAL0D5fgOwX3d8vjAn/vtETA74ZOeWUU8yvf/3rTudde+21JjY2tlsfLmO+PMQ4evRo43Q6u/XB2rVrl0lKSjIzZswwHo/HDB061Hz3u981CxYsMDNmzDCRkZGd7hhdWbx4sTnyyCPN5ZdfbsaNG2cWLVpkjjnmGPPwww+bkpISk5iY2OEQ68GEexC1k046yfz+97/vML2tITnmmGO69V7861//Mqeeeqo5/fTTzb///e9u7+jnnHOOWbRokTHGmKysrA7Xmzz66KPm+OOPDzqvrdk15sv32uFwtHs/N2zYYI4++uigsvbv32+uuuoqExkZaZxOp4mKijJRUVHG6XSayMhIc/XVV5v9+/cHXdv//M//dPmPTGNjo0lPT+/2ftHc3GyuvPJKc/zxxxuXy9Wt98KYL//h/fa3v93hFENCQkLQp7eM+fI0x5w5c8ywYcNMVlaW+fzzz01eXl7gdMXxxx9vPvjgg6Dz+AK0zxdgV/uFw+FgvwhCuPeNnhjwzcjtt99uzjjjjC7nX3311SGdj/2mTz75xKxdu9Z88cUX3Vr/s88+MzfffLOZNGmSiYqKMpGRkWbs2LEmOzv7oM8M6Exra6v5xS9+Yc466yxz++23G7/fb37zm9+YxMREExcXZ37yk5+EXOcdd9xhxowZE/hwtp2LHjNmTEj/ABljzMKFC7u8xsTn85lzzjmn2++F3+83t99+u4mPj+/2jv7OO++YuLg4k5OTY5YvX26GDRtmLrnkEvOLX/zC5OTkGLfbbZ544omg86699lpz/PHHm9tuu82kpaWZ+fPnmwkTJpgXX3zRlJWVmZNOOsn89Kc/DanGhoYG8/LLL5vS0lJTWlpqXn755U6v6TmUTz/9tN0pvG9qbGw86P/9B+P55583N9xwQ0gXc3Zmz5495vXXXzeVlZWBw/vh8OGHH5q33nrL+Hy+kNajMey9L0CHw9GtL8CGhgZTUVER2C8qKirCul/4/X5jTPj2i+uuuy6s+8U3j6z1VHf3jZ4YFLf2oufCMYjagQMH1Nzc3OX4GgcOHNCuXbtCvvXw62pqavTaa68pJydHw4cPD3n9Dz/8UIsXL9a6dev0xRdfSJKGDBmiU045RTfddJPOO++8oLOampp04403qqqqStOnT9eDDz6oBx54QLfccot8Pp9mzpyp1atXB33bIewlXOPHfPbZZ9q9e7e+9a1vdTp/37592rJli2bOnNntWv/0pz/plVdeUX5+fo8+b3v37tVHH30kv9+vMWPGKCkpqdtZ3/TRRx+publZEyZMCGn4gc6Ee+wn8nofzQi67ZNPPlFBQYFWrVo14PKMMdqzZ4/8fr9GjhwZGGMhHPbv3y+fz6fDDz88pPXCNV7OYMwLd21to4e2jRj67rvv6v7775fX69Ull1yiU089NeisvsorKipSS0tLj/KmT5+uE088MWz19TQv3GM/kdezvB7ps2MwGHC2bt3a7cPH/TGvtrbW5ObmWpIXzvFyusrbvXv3gMwLd20vvviiiYyMNCNGjDBRUVHmxRdfNEceeaTJzMw0p556qnG5XB1u/yevd/LCPfYTeT3L6wmaEXSpq8GT2n7uu+++sAzGZJe8Q7GyWQrneDmDLS/ctWVkZJhbbrnFGPPlRezDhw83P//5zwPzFy1aZObMmUNeH+SFe+wn8nqW1xM0I+hSuAdPsnuenZulcI+XM5jywl1bTExMYOTh1tZWM2TIkHbjjrz11ltm9OjR5PVRXjjHfiKv53ndRTOCLh111FFm7dq1Xc5/4403QvpH3O55dm6Wwj1ezmDKC3dtMTEx7e72GDZsmPnwww8Dr3fu3GmioqLI66M8Y8I39hN54cnrjtCfI41BIyUlRTU1NV3OdzgcMiFc/2z3vDFjxmjNmjXy+/2d/mzZsiXorHDnTZgwQZs3b+4w/aGHHtK5554b9IO2BmNeuGtLSkrS+++/H3hdVVXV7iF2tbW1GjNmDHl9lCdJw4YN01NPPaX8/HxlZmYG/SA78nonrztoRtClm266SdOnT+9y/nHHHadXXnllwOTZuVk6//zz9Zvf/KbTeQ899JAuuuiikGobTHnhru3qq69u94/15MmT292K+uKLL4Z0dwl5Pcv7ugsvvFCbN2/WmjVrejREAHnhyQsFt/YC/7+//e1vampq0umnn97p/KamJm3evDno8R7CnQcAAxXNCAAAsBSnaQAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKVoRgAAgKX+P9p0SfXh+o84AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n" + ] + } + ], + "source": [ + "perplexities_langs_histograms(\n", + " [\"en\", \"de\", \"es\", \"fr\", \"it\", \"ko\", \"nl\", \"pt\", \"ru\", \"zh\"],\n", + " n=100_000\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Aggregate stats" + ] + }, + { + "cell_type": "code", + "execution_count": 103, + "metadata": {}, + "outputs": [], + "source": [ + "def perplexities_langs_stats(langs: list[str], n: int) -> None:\n", + " means = []\n", + " maxes = []\n", + " mins = []\n", + " medians = []\n", + " std_devs = []\n", + " for lang in langs:\n", + " print(f\"On {lang}\")\n", + " if lang == \"de\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/2/0000.json.gz\"\n", + " elif lang == \"es\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/3/0000.json.gz\"\n", + " elif lang == \"fr\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/1/0000.json.gz\"\n", + " elif lang == \"ru\":\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/6/0000.json.gz\"\n", + " else:\n", + " corpus = f\"/mnt/data_2/shared/tower_llm_data/{lang}/0000.json.gz\"\n", + " train_corpus = open_read_cleaned(corpus)\n", + " perplexities, _ = get_perplexities_and_text(train_corpus, n=n)\n", + " means.append(np.mean(perplexities)) \n", + " maxes.append(np.max(perplexities))\n", + " mins.append(np.min(perplexities))\n", + " medians.append(np.median(perplexities))\n", + " std_devs.append(np.std(perplexities))\n", + " print(f\"Macro-avg mean: {np.mean(means)}\")\n", + " print(f\"Macro-avg max: {np.mean(maxes)}\")\n", + " print(f\"Macro-avg min: {np.mean(mins)}\")\n", + " print(f\"Macro-avg median: {np.mean(medians)}\")\n", + " print(f\"Macro-avg std_dev: {np.mean(std_devs)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 104, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "On de\n", + "On es\n", + "On fr\n", + "On it\n", + "On ko\n", + "On nl\n", + "On pt\n", + "On ru\n", + "On zh\n", + "Macro-avg mean: 819.4231974444444\n", + "Macro-avg max: 76421.04444444444\n", + "Macro-avg min: 6.111111111111111\n", + "Macro-avg median: 544.1999999999999\n", + "Macro-avg std_dev: 973.7085881455251\n" + ] + } + ], + "source": [ + "perplexities_langs_stats(\n", + " [\"en\", \"de\", \"es\", \"fr\", \"it\", \"ko\", \"nl\", \"pt\", \"ru\", \"zh\"],\n", + " n=100_000\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Text examples with perplexity ranges" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def get_text_within_perplexity_range(\n", + " corpus_path: str,\n", + " perplexity_range: Tuple[float, float],\n", + " n_max: int = 20\n", + ") -> List[str]:\n", + " corpus = open_read_cleaned(corpus_path)\n", + " ranged_texts = []\n", + " while len(ranged_texts) < n_max:\n", + " entry = next(corpus)\n", + " if perplexity_range[0] <= entry['perplexity'] <= perplexity_range[1]:\n", + " ranged_texts.append(entry['text'])\n", + " return ranged_texts" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "On perplexity range: (0, 50)\n", + "ko\n", + "On perplexity range: (50, 100)\n", + "ko\n", + "On perplexity range: (100, 200)\n", + "ko\n", + "On perplexity range: (200, 300)\n", + "ko\n", + "On perplexity range: (300, 400)\n", + "ko\n", + "On perplexity range: (400, 500)\n", + "ko\n", + "On perplexity range: (500, 600)\n", + "ko\n", + "On perplexity range: (600, 700)\n", + "ko\n", + "On perplexity range: (700, 800)\n", + "ko\n" + ] + } + ], + "source": [ + "for perplexity_range in [(0, 50), (50, 100), (100, 200), (200, 300), (300, 400), (400, 500), (500, 600), (600, 700), (700, 800)]:\n", + " print(\"On perplexity range:\", perplexity_range)\n", + " for lang in [\"ko\"]:\n", + " print(lang)\n", + " if lang == \"de\":\n", + " path = f\"/mnt/data_2/shared/tower_llm_data/{lang}/2/0000.json.gz\"\n", + " elif lang == \"es\":\n", + " path = f\"/mnt/data_2/shared/tower_llm_data/{lang}/3/0000.json.gz\"\n", + " else:\n", + " path = f\"/mnt/data_2/shared/tower_llm_data/{lang}/0000.json.gz\"\n", + " texts = get_text_within_perplexity_range(\n", + " corpus_path=path,\n", + " perplexity_range=perplexity_range,\n", + " n_max=50\n", + " )\n", + " with open(f\"/mnt/data/jpombal/multilinguality_megatron/perplexity_texts/perplexity_texts_range_{lang}_{perplexity_range}.txt\", \"w\") as f:\n", + " f.write(\"\\n\".join(texts))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# English" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import datasets" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "data = datasets.load_from_disk('/mnt/data_2/shared/tower_llm_data/en/data')" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'text': 'Beginners BBQ Class Taking Place in Missoula!\\nDo you want to get better at making delicious BBQ? You will have the opportunity, put this on your calendar now. Thursday, September 22nd join World Class BBQ Champion, Tony Balay from Lonestar Smoke Rangers. He will be teaching a beginner level class for everyone who wants to get better with their culinary skills.\\nHe will teach you everything you need to know to compete in a KCBS BBQ competition, including techniques, recipes, timelines, meat selection and trimming, plus smoker and fire information.\\nThe cost to be in the class is $35 per person, and for spectators it is free. Included in the cost will be either a t-shirt or apron and you will be tasting samples of each meat that is prepared.',\n", + " 'timestamp': '2019-04-25T12:57:54Z',\n", + " 'url': None,\n", + " 'perplexity_score': 478.8,\n", + " 'text_length': 747,\n", + " 'domain': None,\n", + " 'dup_ratio': 0.0,\n", + " 'pairs': [],\n", + " 'repetitions': [],\n", + " 'included_in_dedup': True,\n", + " 'cluster': []}" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "data[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "On perplexity range: (0, 50)\n", + "On perplexity range: (50, 100)\n", + "On perplexity range: (100, 200)\n", + "On perplexity range: (200, 300)\n", + "On perplexity range: (300, 400)\n", + "On perplexity range: (400, 500)\n", + "On perplexity range: (500, 600)\n", + "On perplexity range: (600, 700)\n", + "On perplexity range: (700, 800)\n" + ] + } + ], + "source": [ + "def get_text_within_perplexity_range(\n", + " corpus,\n", + " perplexity_range: Tuple[float, float],\n", + " n_max: int = 20\n", + ") -> List[str]:\n", + " ranged_texts = []\n", + " i = 0\n", + " while len(ranged_texts) < n_max:\n", + " entry = corpus[i]\n", + " if perplexity_range[0] <= entry['perplexity_score'] <= perplexity_range[1]:\n", + " ranged_texts.append(entry['text'])\n", + " i += 1\n", + " return ranged_texts\n", + "\n", + "for perplexity_range in [(0, 50), (50, 100), (100, 200), (200, 300), (300, 400), (400, 500), (500, 600), (600, 700), (700, 800)]:\n", + " print(\"On perplexity range:\", perplexity_range)\n", + " texts = get_text_within_perplexity_range(\n", + " corpus=data,\n", + " perplexity_range=perplexity_range,\n", + " n_max=50\n", + " )\n", + " with open(f\"/mnt/data/jpombal/multilinguality_tower/notebooks/perplexity_texts/perplexity_texts_range_en_{perplexity_range}.txt\", \"w\") as f:\n", + " f.write(\"\\n\".join(texts))" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGyCAYAAAA2+MTKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABCp0lEQVR4nO3dfVxUdeL+/2tmgEE0QjHFCMVu1a3EIAhrvWk1KruxbcuiwmXLbvl0M5/NZFPRbLWt1uyGzbayeza3crVdCyPK2laSVdPa0u6N0kD7VGDwdRiZ9++Pfk47ATbDDJwDvp6PBw+bc3PNG2eO5+qcOWccxhgjAAAAizitHgAAANi/UUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACwVY/UAQuH3+7V9+3YdcMABcjgcVg8HAACEwBijXbt26eCDD5bT2f7xj25RRrZv3660tDSrhwEAADrg888/1yGHHNLu/G5RRg444ABJ0qeffqp+/fpFnOfz+fTSSy/plFNOUWxsrG2yyLNXnp3HRh6vLXldk2fnsXWHvIaGBqWlpQX24+3pFmVk76mZAw44QImJiRHn+Xw+JSQkKDExMSpv1GhlkWevPDuPjTxeW/K6Js/OY+sOeXv91Ecs+AArAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACwVdhl5/fXXdeaZZ+rggw+Ww+HQ8uXLf3Kd1atX67jjjpPb7dbhhx+uRx99tANDBQAAPVHYZaSxsVEjR45UaWlpSMt/+umnmjRpksaPH6+NGzfq+uuv12WXXaZVq1aFPVgAANDzhP2tvaeddppOO+20kJdfvHixhg4dqj/+8Y+SpOHDh+uNN97QXXfdpby8vHCfHgAA9DBhl5FwVVVVacKECUHT8vLydP3117e7jtfrldfrDTxuaGiQ9P1XG/t8vojHtDfDblnk2SvPzmMjzz5Z5PXsPDuPrTvl/RSHMcZ09EkcDof+9re/afLkye0uc+SRR6qwsFDFxcWBaS+88IImTZqkpqYm9erVq9U6c+bM0dy5c1tNLysrU0JCQkeHCwAAulBTU5Py8/NVX1+vxMTEdpfr9CMjHVFcXCyPxxN43NDQoLS0NI0fP17JyckR5/t8PlVUVGjixImKjY21TRZ59sqz89jI47Ulr2vy7Dy27pC398zGT+n0MpKSkqK6urqgaXV1dUpMTGzzqIgkud1uud3uVtNjY2Oj8pfTGXl2Hht59skiz155dh4befbKs/PY7JwXakan32ckNzdXlZWVQdMqKiqUm5vb2U8NAAC6gbCPjHz33Xf66KOPAo8//fRTbdy4Uf369dPgwYNVXFysbdu26fHHH5ckXXnllbrvvvs0ffp0/eY3v9Err7yiv/71r1q5cmX0fgsAABC29BnB+2K3y+j2bOnoOavkbXFEnO/3NoW0XNhlZN26dRo/fnzg8d7PdkydOlWPPvqovvzyS9XU1ATmDx06VCtXrtQNN9ygu+++W4cccogeeughLusFAHRrP96RhyLaO/to51kl7DIybtw47esCnLburjpu3Di99dZb4T4VAKAH6uz/G49mXk/Z2dudLa+mAQBYI5T/22cHjWijjABAN2KHUwNAtFFGACBK7HiaAegOOv3SXgAAgH3hyAiA/cJ/n97orA9MAugYyggAy/GhSWD/xmkaAABgKY6MANinjly9IXEkA0DoODICAAAsRRkBAACW4jQN0M1xbwsA3R1HRgAAgKUoIwAAwFKUEQAAYCk+MwJ0Iu76CQA/jSMjAADAUpQRAABgKcoIAACwFGUEAABYig+wYr/1U9+5wk3AAKBrcGQEAABYijICAAAsRRkBAACWoowAAABLdaiMlJaWKj09XfHx8crJyVF1dXW7y/p8Pt1yyy067LDDFB8fr5EjR6q8vLzDAwYAAD1L2GVk6dKl8ng8Kikp0YYNGzRy5Ejl5eVpx44dbS4/c+ZMPfDAA7r33nv13nvv6corr9Q555yjt956K+LBAwCA7i/sS3sXLlyoadOmqbCwUJK0ePFirVy5UkuWLNGMGTNaLf/EE0/o5ptv1umnny5Juuqqq/Tyyy/rj3/8o5588skIh4+e6qcuu20Pl+MCQPcTVhlpbm7W+vXrVVxcHJjmdDo1YcIEVVVVtbmO1+tVfHx80LRevXrpjTfeaPd5vF6vvF5v4HFDQ4Ok70/5+Hy+cIbcpr0Zdssi7wdul+lQnttpgv6MRDSzyLNXnp3HRp698uw8tu6Q1xLiv+UOY0zIz7h9+3alpqZqzZo1ys3NDUyfPn26XnvtNa1du7bVOvn5+dq0aZOWL1+uww47TJWVlTr77LPV0tISVDj+25w5czR37txW08vKypSQkBDqcAEAgIWampqUn5+v+vp6JSYmtrtcp9+B9e6779a0adM0bNgwORwOHXbYYSosLNSSJUvaXae4uFgejyfwuKGhQWlpaRo/frySk5MjHpPP51NFRYUmTpyo2NhY22SR90Pe0XNWdSjP7TSal+XXrHVOef2RnaaJZhZ59sqz89jIs1eencfWHfJavK6QlgurjPTv318ul0t1dXVB0+vq6pSSktLmOgcddJCWL1+u3bt36//+7/908MEHa8aMGTr00EPbfR632y23291qemxsbFR2gJ2RZ+exdce8SD/v4fU7ovaZkWhmkWevPDuPjTx75dl5bHbO84eYEdbVNHFxccrMzFRlZeUPT+T3q7KyMui0TVvi4+OVmpqqPXv26LnnntPZZ58dzlMDAIAeKuzTNB6PR1OnTlVWVpays7O1aNEiNTY2Bq6uKSgoUGpqqhYsWCBJWrt2rbZt26aMjAxt27ZNc+bMkd/v1/Tp06P7mwAAgG4p7DIyZcoU7dy5U7Nnz1Ztba0yMjJUXl6ugQMHSpJqamrkdP5wwGX37t2aOXOmPvnkE/Xp00enn366nnjiCSUlJUXtlwAAAN1Xhz7AWlRUpKKiojbnrV69Oujx2LFj9d5773XkaQAAwH6A76YBAACWoowAAABLdfp9RtAzRet269y+HQDAkREAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwVIzVA0D0pc9YGfhvt8vo9mzp6Dmr5G1xRJy9Nw8AgGjhyAgAALAUZQQAAFiKMgIAACzVoTJSWlqq9PR0xcfHKycnR9XV1ftcftGiRTrqqKPUq1cvpaWl6YYbbtDu3bs7NGAAANCzhF1Gli5dKo/Ho5KSEm3YsEEjR45UXl6eduzY0ebyZWVlmjFjhkpKSrR582Y9/PDDWrp0qX73u99FPHgAAND9hX01zcKFCzVt2jQVFhZKkhYvXqyVK1dqyZIlmjFjRqvl16xZoxNPPFH5+fmSpPT0dF144YVau3Ztu8/h9Xrl9XoDjxsaGiRJPp9PPp8v3CG3sjfDblnRynO7zA//7TRBf0Zqf8qz89jIs08WeT07z85j6w55La7QchzGmJCfsbm5WQkJCXr22Wc1efLkwPSpU6fq22+/1YoVK1qtU1ZWpquvvlovvfSSsrOz9cknn2jSpEm65JJL2j06MmfOHM2dO7fNrISEhFCHCwAALNTU1KT8/HzV19crMTGx3eXCOjLy1VdfqaWlRQMHDgyaPnDgQG3ZsqXNdfLz8/XVV1/ppJNOkjFGe/bs0ZVXXrnP0zTFxcXyeDyBxw0NDUpLS9P48eOVnJwczpDb5PP5VFFRoYkTJyo2NtY2WdHKO3rOqsB/u51G87L8mrXOKa8/CvcZ2Y/y7Dw28nhtyeuaPDuPrTvktXhdIS3X6Tc9W716tebPn68//elPysnJ0UcffaTrrrtO8+bN06xZs9pcx+12y+12t5oeGxsblR1+Z+TZaWxt3dzM63dE5aZn+2OencdGnn2yyOvZeXYem53z/CFmhFVG+vfvL5fLpbq6uqDpdXV1SklJaXOdWbNm6ZJLLtFll10mSTrmmGPU2Nioyy+/XDfffLOcTq4uBgBgfxZWE4iLi1NmZqYqKysD0/x+vyorK5Wbm9vmOk1NTa0Kh8v1/WGbMD6uAgAAeqiwT9N4PB5NnTpVWVlZys7O1qJFi9TY2Bi4uqagoECpqalasGCBJOnMM8/UwoULNWrUqMBpmlmzZunMM88MlBIAALD/CruMTJkyRTt37tTs2bNVW1urjIwMlZeXBz7UWlNTE3QkZObMmXI4HJo5c6a2bdumgw46SGeeeaZ+//vfR++3AAAA3VaHPsBaVFSkoqKiNuetXr06+AliYlRSUqKSkpKOPBUAAOjh+PQoAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS8VYPYD9TfqMlfuc73YZ3Z4tHT1nlbwtji4aFQAA1uHICAAAsBRlBAAAWKpDZaS0tFTp6emKj49XTk6Oqqur21123LhxcjgcrX4mTZrU4UEDAICeI+wysnTpUnk8HpWUlGjDhg0aOXKk8vLytGPHjjaXX7Zsmb788svAz3/+8x+5XC6dd955EQ8eAAB0f2GXkYULF2ratGkqLCzUiBEjtHjxYiUkJGjJkiVtLt+vXz+lpKQEfioqKpSQkEAZAQAAksK8mqa5uVnr169XcXFxYJrT6dSECRNUVVUVUsbDDz+sCy64QL179253Ga/XK6/XG3jc0NAgSfL5fPL5fOEMuU17M6zIcrvMvuc7TdCfkSLPHlnk2SvPzmMjz155dh5bd8hr+Yl93l4OY0zIz7h9+3alpqZqzZo1ys3NDUyfPn26XnvtNa1du3af61dXVysnJ0dr165VdnZ2u8vNmTNHc+fObTW9rKxMCQkJoQ4XAABYqKmpSfn5+aqvr1diYmK7y3XpfUYefvhhHXPMMfssIpJUXFwsj8cTeNzQ0KC0tDSNHz9eycnJEY/D5/OpoqJCEydOVGxsbJdmHT1n1T7nu51G87L8mrXOKa8/8vuMkGePLPLslWfnsZFnrzw7j6075LV4XSEtF1YZ6d+/v1wul+rq6oKm19XVKSUlZZ/rNjY26umnn9Ytt9zyk8/jdrvldrtbTY+NjY24PHRWXqhZod7IzOt3RPWmZ+TZI4s8e+XZeWzk2SvPzmOzc54/xIywPsAaFxenzMxMVVZW/vBEfr8qKyuDTtu05ZlnnpHX69XFF18czlMCAIAeLuzTNB6PR1OnTlVWVpays7O1aNEiNTY2qrCwUJJUUFCg1NRULViwIGi9hx9+WJMnT47KaRYAANBzhF1GpkyZop07d2r27Nmqra1VRkaGysvLNXDgQElSTU2NnM7gAy7vv/++3njjDb300kvRGTUAAOgxOvQB1qKiIhUVFbU5b/Xq1a2mHXXUUQrjoh0AALAf4btpAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS3WojJSWlio9PV3x8fHKyclRdXX1Ppf/9ttvdc0112jQoEFyu9068sgj9cILL3RowAAAoGeJCXeFpUuXyuPxaPHixcrJydGiRYuUl5en999/XwMGDGi1fHNzsyZOnKgBAwbo2WefVWpqqj777DMlJSVFY/wAAKCbC7uMLFy4UNOmTVNhYaEkafHixVq5cqWWLFmiGTNmtFp+yZIl+vrrr7VmzRrFxsZKktLT0yMbNQAA6DHCKiPNzc1av369iouLA9OcTqcmTJigqqqqNtd5/vnnlZubq2uuuUYrVqzQQQcdpPz8fN10001yuVxtruP1euX1egOPGxoaJEk+n08+ny+cIbdpb4YVWW6X2fd8pwn6M1Lk2SOLPHvl2Xls5Nkrz85j6w55LT+xz9vLYYwJ+Rm3b9+u1NRUrVmzRrm5uYHp06dP12uvvaa1a9e2WmfYsGHaunWrLrroIl199dX66KOPdPXVV+vaa69VSUlJm88zZ84czZ07t9X0srIyJSQkhDpcAABgoaamJuXn56u+vl6JiYntLhf2aZpw+f1+DRgwQH/+85/lcrmUmZmpbdu26Y477mi3jBQXF8vj8QQeNzQ0KC0tTePHj1dycnLEY/L5fKqoqNDEiRMDp466KuvoOav2Od/tNJqX5desdU55/Y6IxkZeZHl2Hht5vLbkdU2encfWHfJavG2fAfmxsMpI//795XK5VFdXFzS9rq5OKSkpba4zaNAgxcbGBp2SGT58uGpra9Xc3Ky4uLhW67jdbrnd7lbTY2NjIy4PnZUXapa3JbQX1+t3hLwseZ2bZ+exkWefLPJ6dp6dx2bnPH+IGWFd2hsXF6fMzExVVlb+8ER+vyorK4NO2/y3E088UR999JH8fn9g2gcffKBBgwa1WUQAAMD+Jez7jHg8Hj344IN67LHHtHnzZl111VVqbGwMXF1TUFAQ9AHXq666Sl9//bWuu+46ffDBB1q5cqXmz5+va665Jnq/BQAA6LbC/szIlClTtHPnTs2ePVu1tbXKyMhQeXm5Bg4cKEmqqamR0/lDx0lLS9OqVat0ww036Nhjj1Vqaqquu+463XTTTdH7LQAAQLfVoQ+wFhUVqaioqM15q1evbjUtNzdXb775ZkeeCgAA9HB8Nw0AALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGCpDpWR0tJSpaenKz4+Xjk5Oaqurm532UcffVQOhyPoJz4+vsMDBgAAPUvYZWTp0qXyeDwqKSnRhg0bNHLkSOXl5WnHjh3trpOYmKgvv/wy8PPZZ59FNGgAANBzhF1GFi5cqGnTpqmwsFAjRozQ4sWLlZCQoCVLlrS7jsPhUEpKSuBn4MCBEQ0aAAD0HDHhLNzc3Kz169eruLg4MM3pdGrChAmqqqpqd73vvvtOQ4YMkd/v13HHHaf58+frZz/7WbvLe71eeb3ewOOGhgZJks/nk8/nC2fIbdqbYUWW22X2Pd9pgv6MFHn2yCLPXnl2Hht59sqz89i6Q17LT+zz9nIYY0J+xu3btys1NVVr1qxRbm5uYPr06dP12muvae3ata3Wqaqq0ocffqhjjz1W9fX1uvPOO/X666/r3Xff1SGHHNLm88yZM0dz585tNb2srEwJCQmhDhcAAFioqalJ+fn5qq+vV2JiYrvLhXVkpCNyc3ODisvo0aM1fPhwPfDAA5o3b16b6xQXF8vj8QQeNzQ0KC0tTePHj1dycnLEY/L5fKqoqNDEiRMVGxvbpVlHz1m1z/lup9G8LL9mrXPK63dENDbyIsuz89jI47Ulr2vy7Dy27pDX4nWFtFxYZaR///5yuVyqq6sLml5XV6eUlJSQMmJjYzVq1Ch99NFH7S7jdrvldrvbXDfS8tBZeaFmeVtCe3G9fkfIy5LXuXl2Hht59skir2fn2Xlsds7zh5gR1gdY4+LilJmZqcrKyh+eyO9XZWVl0NGPfWlpadE777yjQYMGhfPUAACghwr7NI3H49HUqVOVlZWl7OxsLVq0SI2NjSosLJQkFRQUKDU1VQsWLJAk3XLLLTrhhBN0+OGH69tvv9Udd9yhzz77TJdddll0f5MoSJ+xMux13C6j27O/P/0SzVYKAMD+IuwyMmXKFO3cuVOzZ89WbW2tMjIyVF5eHrhct6amRk7nDwdcvvnmG02bNk21tbXq27evMjMztWbNGo0YMSJ6vwUAAOi2OvQB1qKiIhUVFbU5b/Xq1UGP77rrLt11110deRoAALAf4LtpAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS3WojJSWlio9PV3x8fHKyclRdXV1SOs9/fTTcjgcmjx5ckeeFgAA9EBhl5GlS5fK4/GopKREGzZs0MiRI5WXl6cdO3bsc72tW7fqt7/9rX7+8593eLAAAKDnCbuMLFy4UNOmTVNhYaFGjBihxYsXKyEhQUuWLGl3nZaWFl100UWaO3euDj300IgGDAAAepaYcBZubm7W+vXrVVxcHJjmdDo1YcIEVVVVtbveLbfcogEDBujSSy/VP//5z598Hq/XK6/XG3jc0NAgSfL5fPL5fOEMuU17M36c5XaZsLPcThP0Z6TIs0+encdGnn2yyOvZeXYeW3fIawlxv+owxoT8jNu3b1dqaqrWrFmj3NzcwPTp06frtdde09q1a1ut88Ybb+iCCy7Qxo0b1b9/f/3617/Wt99+q+XLl7f7PHPmzNHcuXNbTS8rK1NCQkKowwUAABZqampSfn6+6uvrlZiY2O5yYR0ZCdeuXbt0ySWX6MEHH1T//v1DXq+4uFgejyfwuKGhQWlpaRo/frySk5MjHpfP51NFRYUmTpyo2NjYwPSj56wKO8vtNJqX5desdU55/Y6Ix0aeffLsPDbyeG3J65o8O4+tO+S1eF0hLRdWGenfv79cLpfq6uqCptfV1SklJaXV8h9//LG2bt2qM888MzDN7/d//8QxMXr//fd12GGHtVrP7XbL7Xa3mh4bGxtUHiL14zxvS8f/4r1+R0Trk2ffPDuPjTz7ZJHXs/PsPDY75/lDzAjrA6xxcXHKzMxUZWXlD0/k96uysjLotM1ew4YN0zvvvKONGzcGfs466yyNHz9eGzduVFpaWjhPDwAAeqCwT9N4PB5NnTpVWVlZys7O1qJFi9TY2KjCwkJJUkFBgVJTU7VgwQLFx8fr6KOPDlo/KSlJklpNBwAA+6ewy8iUKVO0c+dOzZ49W7W1tcrIyFB5ebkGDhwoSaqpqZHTyY1dAQBAaDr0AdaioiIVFRW1OW/16tX7XPfRRx/tyFMCAIAeikMYAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUh0qI6WlpUpPT1d8fLxycnJUXV3d7rLLli1TVlaWkpKS1Lt3b2VkZOiJJ57o8IABAEDPEnYZWbp0qTwej0pKSrRhwwaNHDlSeXl52rFjR5vL9+vXTzfffLOqqqr09ttvq7CwUIWFhVq1alXEgwcAAN1fTLgrLFy4UNOmTVNhYaEkafHixVq5cqWWLFmiGTNmtFp+3LhxQY+vu+46PfbYY3rjjTeUl5fX5nN4vV55vd7A44aGBkmSz+eTz+cLd8it7M34cZbbZcLOcjtN0J+RIs8+eXYeG3n2ySKvZ+fZeWzdIa8lxP2qwxgT8jM2NzcrISFBzz77rCZPnhyYPnXqVH377bdasWLFPtc3xuiVV17RWWedpeXLl2vixIltLjdnzhzNnTu31fSysjIlJCSEOlwAAGChpqYm5efnq76+XomJie0uF9aRka+++kotLS0aOHBg0PSBAwdqy5Yt7a5XX1+v1NRUeb1euVwu/elPf2q3iEhScXGxPB5P4HFDQ4PS0tI0fvx4JScnhzPkNvl8PlVUVGjixImKjY0NTD96TvinjtxOo3lZfs1a55TX74h4bOTZJ8/OYyOP15a8rsmz89i6Q16L1xXScmGfpumIAw44QBs3btR3332nyspKeTweHXrooa1O4ezldrvldrtbTY+NjQ0qD5H6cZ63peN/8V6/I6L1ybNvnp3HRp59ssjr2Xl2Hpud8/whZoRVRvr37y+Xy6W6urqg6XV1dUpJSWl3PafTqcMPP1ySlJGRoc2bN2vBggXtlhEAALD/COtqmri4OGVmZqqysjIwze/3q7KyUrm5uSHn+P3+oA+oAgCA/VfYp2k8Ho+mTp2qrKwsZWdna9GiRWpsbAxcXVNQUKDU1FQtWLBAkrRgwQJlZWXpsMMOk9fr1QsvvKAnnnhC999/f3R/EwAA0C2FXUamTJminTt3avbs2aqtrVVGRobKy8sDH2qtqamR0/nDAZfGxkZdffXV+uKLL9SrVy8NGzZMTz75pKZMmRK93wIAAHRbHfoAa1FRkYqKitqct3r16qDHt956q2699daOPA0AANgP8N00AADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApTpURkpLS5Wenq74+Hjl5OSourq63WUffPBB/fznP1ffvn3Vt29fTZgwYZ/LAwCA/UvYZWTp0qXyeDwqKSnRhg0bNHLkSOXl5WnHjh1tLr969WpdeOGFevXVV1VVVaW0tDSdcsop2rZtW8SDBwAA3V/YZWThwoWaNm2aCgsLNWLECC1evFgJCQlasmRJm8s/9dRTuvrqq5WRkaFhw4bpoYcekt/vV2VlZcSDBwAA3V9MOAs3Nzdr/fr1Ki4uDkxzOp2aMGGCqqqqQspoamqSz+dTv3792l3G6/XK6/UGHjc0NEiSfD6ffD5fOENu096MH2e5XSbsLLfTBP0ZKfLsk2fnsZFnnyzyenaencfWHfJaQtyvOowxIT/j9u3blZqaqjVr1ig3Nzcwffr06Xrttde0du3an8y4+uqrtWrVKr377ruKj49vc5k5c+Zo7ty5raaXlZUpISEh1OECAAALNTU1KT8/X/X19UpMTGx3ubCOjETqtttu09NPP63Vq1e3W0Qkqbi4WB6PJ/C4oaFBaWlpGj9+vJKTkyMeh8/nU0VFhSZOnKjY2NjA9KPnrAo7y+00mpfl16x1Tnn9jojHRp598uw8NvJ4bcnrmjw7j6075LV4XSEtF1YZ6d+/v1wul+rq6oKm19XVKSUlZZ/r3nnnnbrtttv08ssv69hjj93nsm63W263u9X02NjYoPIQqR/neVs6/hfv9TsiWp88++bZeWzk2SeLvJ6dZ+ex2TnPH2JGWB9gjYuLU2ZmZtCHT/d+GPW/T9v82O2336558+apvLxcWVlZ4TwlAADo4cI+TePxeDR16lRlZWUpOztbixYtUmNjowoLCyVJBQUFSk1N1YIFCyRJf/jDHzR79myVlZUpPT1dtbW1kqQ+ffqoT58+UfxVAABAdxR2GZkyZYp27typ2bNnq7a2VhkZGSovL9fAgQMlSTU1NXI6fzjgcv/996u5uVm/+tWvgnJKSko0Z86cyEYPAAC6vQ59gLWoqEhFRUVtzlu9enXQ461bt3bkKQAAwH6C76YBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJaijAAAAEtRRgAAgKUoIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAs1aEyUlpaqvT0dMXHxysnJ0fV1dXtLvvuu+/q3HPPVXp6uhwOhxYtWtTRsQIAgB4o7DKydOlSeTwelZSUaMOGDRo5cqTy8vK0Y8eONpdvamrSoYceqttuu00pKSkRDxgAAPQsYZeRhQsXatq0aSosLNSIESO0ePFiJSQkaMmSJW0uf/zxx+uOO+7QBRdcILfbHfGAAQBAzxITzsLNzc1av369iouLA9OcTqcmTJigqqqqqA3K6/XK6/UGHjc0NEiSfD6ffD5fxPl7M36c5XaZsLPcThP0Z6TIs0+encdGnn2yyOvZeXYeW3fIawlxv+owxoT8jNu3b1dqaqrWrFmj3NzcwPTp06frtdde09q1a/e5fnp6uq6//npdf/31+1xuzpw5mjt3bqvpZWVlSkhICHW4AADAQk1NTcrPz1d9fb0SExPbXS6sIyNdpbi4WB6PJ/C4oaFBaWlpGj9+vJKTkyPO9/l8qqio0MSJExUbGxuYfvScVWFnuZ1G87L8mrXOKa/fEfHYyLNPnp3HRh6vLXldk2fnsXWHvBavK6Tlwioj/fv3l8vlUl1dXdD0urq6qH441e12t/n5ktjY2KDyEKkf53lbOv4X7/U7IlqfPPvm2Xls5Nkni7yenWfnsdk5zx9iRlgfYI2Li1NmZqYqKyt/eCK/X5WVlUGnbQAAAEIV9mkaj8ejqVOnKisrS9nZ2Vq0aJEaGxtVWFgoSSooKFBqaqoWLFgg6fsPvb733nuB/962bZs2btyoPn366PDDD4/irwIAALqjsMvIlClTtHPnTs2ePVu1tbXKyMhQeXm5Bg4cKEmqqamR0/nDAZft27dr1KhRgcd33nmn7rzzTo0dO1arV6+O/DcAAADdWoc+wFpUVKSioqI25/24YKSnpyuMC3YAAMB+hu+mAQAAlqKMAAAAS9nyPiPtyVlQqT0xvSPOcbuMbs/+/r4i0bwUCgAAhI8jIwAAwFKUEQAAYCnKCAAAsBRlBAAAWIoyAgAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACwFGUEAABYijICAAAsRRkBAACWoowAAABLUUYAAIClKCMAAMBSlBEAAGApyggAALAUZQQAAFiKMgIAACzVoTJSWlqq9PR0xcfHKycnR9XV1ftc/plnntGwYcMUHx+vY445Ri+88EKHBgsAAHqesMvI0qVL5fF4VFJSog0bNmjkyJHKy8vTjh072lx+zZo1uvDCC3XppZfqrbfe0uTJkzV58mT95z//iXjwAACg+wu7jCxcuFDTpk1TYWGhRowYocWLFyshIUFLlixpc/m7775bp556qm688UYNHz5c8+bN03HHHaf77rsv4sEDAIDuLyachZubm7V+/XoVFxcHpjmdTk2YMEFVVVVtrlNVVSWPxxM0LS8vT8uXL2/3ebxer7xeb+BxfX3994P1NYYz3HbF+I2amvyK8TnV4nfYJos8e+XZeWzk8dqS1zV5dh5bd8jz72mSJBlj9r2gCcO2bduMJLNmzZqg6TfeeKPJzs5uc53Y2FhTVlYWNK20tNQMGDCg3ecpKSkxkvjhhx9++OGHnx7w8/nnn++zX4R1ZKSrFBcXBx1N+fbbbzVkyBDV1NTowAMPjDi/oaFBaWlp+vzzz5WYmGibLPLslWfnsZHHa0te1+TZeWzdIc8Yo127dunggw/e53JhlZH+/fvL5XKprq4uaHpdXZ1SUlLaXCclJSWs5SXJ7XbL7Xa3mn7ggQdG5S9nr8TExKjlRTOLPHvl2Xls5Nkni7yenWfnsdk9L5SDCGF9gDUuLk6ZmZmqrKwMTPP7/aqsrFRubm6b6+Tm5gYtL0kVFRXtLg8AAPYvYZ+m8Xg8mjp1qrKyspSdna1FixapsbFRhYWFkqSCggKlpqZqwYIFkqTrrrtOY8eO1R//+EdNmjRJTz/9tNatW6c///nP0f1NAABAtxR2GZkyZYp27typ2bNnq7a2VhkZGSovL9fAgQMlSTU1NXI6fzjgMnr0aJWVlWnmzJn63e9+pyOOOELLly/X0UcfHfJzut1ulZSUtHnqpiOimWfnsZFnnyzy7JVn57GRZ688O4+tO+SFymHMT11vAwAA0Hn4bhoAAGApyggAALAUZQQAAFiKMgIAACxFGQEAAJay5e3gv/rqKy1ZskRVVVWqra2V9P2dXEePHq1f//rXOuiggyweIWCtvV8kGa3L7+ycV19fH/TvQCRfCRHNLNiPnd/H0c6L9nvZ6m3DdkdG/v3vf+vII4/UPffcowMPPFBjxozRmDFjdOCBB+qee+7RsGHDtG7durBz9+zZo02bNmnVqlVatWqVNm3aJJ/P16ExRjNLkmpra7VixQo98MADeuCBB7RixYrAm8IOedL3b9T3339f77//fuBblHtynh1VVFTo9NNPV9++fZWQkKCEhAT17dtXp59+ul5++eUel/fQQw9pxIgR6tevn0aMGBH03w8//LBlWT/2428Zj5Sd8+y43dr9fWzn7aIz8joslG/r7Uo5OTnm8ssvN36/v9U8v99vLr/8cnPCCSeEnNfS0mJuvvlmk5SUZBwOR9BPUlKSmTlzpmlpaenyLGOM+e6778xFF11kXC6XiYmJMQMGDDADBgwwMTExxuVymYsvvtg0NjZalmeMMQ8++KAZPny4cTqdQT/Dhw83Dz30UFhZ3SHPGGN8Pp/ZuHGjKS8vN+Xl5Wbjxo2mubm5Q1nRynv00UdNTEyMueCCC8wjjzxiXnjhBfPCCy+YRx55xFx44YUmNjbWPP744z0m7/bbbzcJCQlmxowZ5tVXXzXvvfeeee+998yrr75qiouLTe/evc0dd9zR5Vl7vfTSS+a0004zSUlJgfdcUlKSOe2000xFRUVYWd0hz67brd3fx3beLjojLxK2KyPx8fFm8+bN7c7fvHmziY+PDznvxhtvNAcddJBZvHix+fTTT01TU5Npamoyn376qXnggQfMgAEDzPTp07s8yxhjLr30UnPEEUeY8vJys2fPnsD0PXv2mFWrVpkjjzzSXHbZZZbl2f2NH+28aJfNaOYdccQR5r777mt3fmlpqTn88MNDHpvd8wYPHmyWLl3a7vynn37apKWldXmWMfbfYe1PO0C7v4/tvF10Rl4kbFdG0tPTzWOPPdbu/Mcee8wMGTIk5LyBAwea8vLydueXl5ebAQMGdHmWMcYkJSWZf/3rX+3Of+ONN0xSUpJleXZ/40c7L9plM5p5brfbbNmypd35W7ZsCauk2z0vPj7evPfee+3Of/fdd02vXr26PMsY+++w9qcdoN3fx3beLjojLxK2KyP33Xefcbvd5tprrzUrVqwwb775pnnzzTfNihUrzLXXXmt69eplSktLQ85LSEgwb7/9drvzN23aZHr37t3lWcYYk5iYaP7973+3O7+6utokJiZalmf3N36086JdNqOZd9xxx5kbb7yx3fnTp083xx13XMhjs3vez3/+c1NQUGB8Pl+reXv27DEFBQVmzJgxXZ5ljP13WPvTDtDu72M7bxedkRcJW343zdKlS3XXXXdp/fr1amlpkSS5XC5lZmbK4/Ho/PPPDzlr0qRJ2rNnj5566in1798/aN5XX32lSy65RC6XS//4xz+6NEuSLrroIm3evFkPP/ywRo0aFTTvrbfe0rRp0zRs2DA9+eSTluSNGTNGQ4cO1cMPP6yYmOALr1paWvSb3/xGW7du1WuvvdYj8nr37q0333xTxxxzTJvz3377bY0ePVrfffddl+etXr1aZ5xxhg499FBNmDAh8MWUdXV1qqys1CeffKKVK1dqzJgxIY3N7nlvv/228vLy5PP5NGbMmKC8119/XXFxcXrppZdC+sLNaGZJUmZmpn7xi1/o9ttvb3P+TTfdpJdfflnr16/vEXl23m7t/j6283bRGXmRsGUZ2cvn8+mrr76SJPXv31+xsbFhZ3z++ec6/fTTtWXLFh1zzDFBf9nvvPOORowYoX/84x9KS0vr0ixJ+uabb5Sfn69Vq1apb9++GjBggCRpx44d+vbbb5WXl6eysjIlJSVZkmf3N36086JdNqOdt3XrVt1///168803gy7By83N1ZVXXqn09PSQcrpL3q5du/Tkk0+2mZefn6/ExERLsuy+w9rfdoB2fx/bebvojLyOsnUZiRa/369Vq1a1+Zd9yimnyOkM/QrnaGbttXnz5jbzhg0bFnZWtPPs/saPZl60y2a082Afdt9hsQNEd7NflBEgVNEum9HO27Nnj959991A1qBBgzR8+PAOHTXsDnm1tbVau3ZtUF52drZSUlIszYK92P19bOftojPyOmK/KSPV1dVt3tH1+OOPtzSrublZy5cvbzPv7LPPVlxcnKV5kv3f+HbYkDqb3+/X7NmzVVpa2urmUAceeKCKioo0d+7ckMuN3fMaGxt1xRVX6Omnn5bD4VC/fv0kSV9//bWMMbrwwgv1wAMPKCEhoUuz/pvdd1j7ww7Q7u9jO28XnZEXkS75mKyF6urqzEknnWQcDocZMmSIyc7ONtnZ2WbIkCHG4XCYk046ydTV1XV5ljHGfPjhh+bQQw818fHxZuzYseb88883559/vhk7dqyJj483hx9+uPnwww8ty7P7Tdk64yZvxhizdu1as2jRIjNjxgwzY8YMs2jRIlNdXR12TjTz7HzZcWfkRfOeOdG+/46d70fTGXl23m7t/j6283bRGXmR6PFl5NxzzzW5ubltXuq2ZcsWM3r0aPOrX/2qy7OMMWbChAnm7LPPNvX19a3m1dfXm7PPPtuccsopluXZ/Y0f7bxol81o5tn5suPOyIvmPXOiff8du++w9qcdoN3fx3beLjojLxI9voz06dPHbNiwod3569atM3369OnyLGOM6dWrl3nnnXfanf/222+Hdf1+tPPs/saPdl60y2Y086J9jxu750XznjnRvv+O3XdY+9MO0O7vYztvF52RFwnbfVFetLndbjU0NLQ7f9euXSF/g2I0syQpKSlJW7dubXf+1q1bQ74MtzPy/H7/Pj9jEhcXJ7/f32PyVq1apdLSUh111FGt5h111FG65557VF5ebkneuHHj9Nvf/jZwqft/++qrr3TTTTdp3LhxIY/N7nlnnHGGLr/8cr311lut5r311lu66qqrdOaZZ3Z5lvT9dn7wwQe3O3/QoEFqbGzsMXl23m7t/j6283bRGXkR6ZLKY6Grr77aDBkyxCxbtizo9EV9fb1ZtmyZSU9PN0VFRV2eZYwxs2bNMn379jULFy40mzZtMrW1taa2ttZs2rTJLFy40PTr18+UlJRYlpefn29GjRrV5tGgDRs2mMzMTHPRRRf1mLzk5GSzevXqdue/+uqrJjk52ZK8mpoac/TRR5uYmBgzatQoc+qpp5pTTz3VjBo1ysTExJhjjz3W1NTUhDw2u+d9/fXX5tRTTzUOh8P069fPDBs2zAwbNsz069fPOJ1Oc9ppp5lvvvmmy7OMMeb00083p5xyitm5c2ereTt37jSnnnqqmTRpUo/Js/N2a/f3sZ23i87Ii0SPv5rG6/Xq+uuv15IlS7Rnz55AI29ublZMTIwuvfRS3XXXXSEd0Wgvy+v1KjY2Nqysvf7whz/o7rvvVm1trRwOhyTJGKOUlBRdf/31mj59eli/bzTz7H5TtmjnXXPNNVq5cqXuuusu/eIXvwjc66ChoUGVlZXyeDw644wzdO+991qSZ/fLjjvjHjxbtmxpdWVYR++ZE63779j9fjT7280Z7f4+tvt2IUX/Xlcd0ePLyF4NDQ1av3590F92ZmZmh26u09DQoHXr1qmurk6SNHDgQGVlZUV0o55PP/00aGxDhw7tcFa08+x8UzYpehtmNItrZ+TBPuy+w2IHiO5mvykjnSkuLk6bNm3S8OHDrR4KoiCaxTXaedG8x430/U6rrR2T3+/XF198ocGDB3cod6+TTz5ZjzzyiIYMGRLWel6vV06nM3BPjI8//lhLlixRTU2NhgwZoksvvTSsgr1p0yatX79e48aN06GHHqp3331XpaWl8vv9Ouecc5SXlxfW+GAvbBcd2y4k+2wb+0UZ+X//7/9p/fr16tevn0aMGBE0b/fu3frrX/+qgoKCn8zxeDxtTr/77rt18cUXKzk5WZK0cOHCkMa1YcMG9e3bN/DmeeKJJ7R48eLAG6uoqEgXXHBBSFl73Xfffaqurtbpp5+uCy64QE888YQWLFggv9+vX/7yl7rllltafTnVvnTGTdS++OILJSUlqU+fPkHTfT6fqqqqQv7OjPYceuihWrVqlY444oiIcuxkx44dOvfcc/Wvf/1LgwcPDjr0XlNToxNPPFHPPfdc4JD3T2loaNBll12mv//970pMTNQVV1yhkpISuVyuQO7BBx8c+KLKn/L888+3Of2Xv/yl7r777sApgbPOOiukvHHjxqmoqEi/+tWv9K9//Uu/+MUvdNRRR2n48OH64IMP9P777+vll19Wbm7uT2YtW7ZM559/vpKSkuT1evW3v/1N5513nrKysuRyufTyyy/r8ccfV35+fkhj24sdoPU7QLaLjm8XUudtGx3SJZ9MsdD7778fuK+D0+k0Y8aMMdu2bQvMr62tNU6nM6Qsh8NhMjIyzLhx44J+HA6HOf744824cePM+PHjQx7bscceayoqKowxxjz44IOmV69e5tprrzX333+/uf76602fPn3Mww8/HHLevHnzzAEHHGDOPfdck5KSYm677TaTnJxsbr31VjN//nxz0EEHmdmzZ4ecF+2bqG3fvt0cf/zxxul0GpfLZS655BKza9euwPxwXgtjjLn77rvb/HG5XKa4uDjwOFSff/550If+Xn/9dZOfn29OOukkc9FFF5k1a9aEnLXX3//+dzNr1izzxhtvGGOMqaysNKeddprJy8szDzzwQMg50b7s+NprrzVHHnmkeeaZZ8yDDz5ohgwZYiZNmmS8Xq8x5vvXwuFwhJy3d/v68U22/vsnnNc2MTHRfPDBB8YYY8aOHWtuuOGGoPkzZ840J554YkhZxx13nLn11luNMcb85S9/MUlJSeaWW24JzL/zzjtNRkZGyGOL9v1o6uvrzXnnnWfi4+PNgAEDzKxZs4LuvxHudrFixYo2f1wul7nvvvsCj0M1duxY88wzzxhjvr/s1u12m2OPPdZMmTLFjBo1yiQkJIS1bTz33HPG5XKZ5ORk06dPH1NRUWGSkpLMhAkTTF5ennG5XOapp54KKYvtouPbhTHR3zYi0ePLyOTJk82kSZPMzp07zYcffmgmTZpkhg4daj777DNjTHgb+oIFC8zQoUNNZWVl0PSYmBjz7rvvhj22Xr16ma1btxpjjBk1apT585//HDT/qaeeMiNGjAg577DDDjPPPfecMcaYjRs3GpfLZZ588snA/GXLlpnDDz885Lxo30StoKDA5OTkmH//+9+moqLCZGZmmqysLPP1118bYzq2oR9yyCEmPT096MfhcJjU1FSTnp5uhg4dGnJedna2+fvf/26MMWb58uXG6XSas846y9x0003mnHPOMbGxsYH5oVi8eLGJiYkxmZmZJjEx0TzxxBPmgAMOMJdddpm54oorTK9evcyiRYtCyor2PW4GDx5sXn311cDjnTt3muzsbHPKKaeY3bt3h70D3HuFxo93wh3dNnr37m02b95sjPn+vhkbN24Mmv/RRx+F/Pv27t3bfPrpp8YYY/x+v4mNjQ2698PHH38c1t8dO0D77ADZLjq+XezNi+a2EYkeX0YGDBgQ9Jfr9/vNlVdeaQYPHmw+/vjjsN9c1dXV5sgjjzT/+7//a5qbm40xHX9jJScnm3Xr1gXG2dYbK9ybnu0tWcYYExsba/7zn/8EHm/dutUkJCSElRfNm6gdfPDBZu3atYHHu3fvNmeeeabJyMgw//d//xf2a3HFFVeYjIwM89577wVNj2RD/+STT4wxxuTk5JjbbrstaP69995rRo0aFXLeiBEjAgXzlVdeMfHx8aa0tDQw/5FHHjHDhw8PKSvalx336tUr8Lvu1dDQYHJzc83JJ59sPvnkk7BeC2OMWbhwoUlLSwsqbB19LU4++WRz++23G2OMGT16tHnssceC5j/77LNm8ODBIWWlpKQEtrOvv/7aOByOoB1OdXW1SUlJCXls7ADtswNku+j4dmFM9LeNSPT4MnLAAQe02lkZY8w111xjDjnkEPP666+H/ebatWuXKSgoMMcee6x55513TGxsbIfeWBdffLG59NJLjTHGnHfeeWbmzJlB8+fPn2+OOeaYkPOGDh1qXnzxRWOMMR988IFxOp3mr3/9a2D+ypUrTXp6esh5gwYN2ueRgOeff94MGjQo5LzevXsH/g9rL5/PZyZPnmyOPfZY8/bbb4f9WixbtsykpaWZe++9NzCtoxv6gQceaDZt2mSM+b4c7v3vvT766KOwy9yPy+F/l7tPP/005Lxo3+PmqKOOMitXrmw1fdeuXSY3N9eMHDky7NfCGGPeeustM2LECHP55ZebxsbGDr8Wa9asMQceeKApKSkx9957r+nfv7+ZOXOmeeqpp8zs2bNNUlKS+cMf/hBS1sUXX2xycnLMk08+ac4880yTl5dnTjjhBLN582azZcsWM3bs2LCOZLADtM8OkO2i49uFMdHfNiLR48vI8ccfbx5//PE2511zzTUmKSmpQ28uY74/xDhw4EDjdDo79Mbatm2bSU9PN2PGjDEej8f06tXLnHTSSWbatGlmzJgxJi4urs0Noz0zZ840Bx10kLnsssvM0KFDzYwZM8zgwYPN/fffbxYvXmzS0tJaHWLdl2jfRO2YY44xzz77bKvpewvJ4MGDO/RafPHFF+bkk082p556qvnyyy87vKGfddZZZsaMGcYYY/Ly8lp93uTBBx80RxxxRMh5e8uuMd+/1g6HI+j1XL16tTnkkENCytq9e7e58sorTVxcnHE6nSY+Pt7Ex8cbp9Np4uLizFVXXWV2794d8tj+53/+p91/ZBoaGkxOTk6Ht4umpiZzxRVXmCOOOMK4XK4OvRbGfP8P7wknnNDqFENqamrIp7eM+f40x8SJE02fPn1MXl6e+fbbb01RUVHgdMURRxxhPvroo5Dz2AHaZwfY3nbhcDjYLkIQ7W0jEj2+jMyfP9+cdtpp7c6/6qqrwjof+2Off/65Wb58ufnuu+86tP4333xjbrrpJjNixAgTHx9v4uLizJAhQ0x+fv4+vzOgLS0tLeb3v/+9OeOMM8z8+fON3+83f/nLX0xaWppJTk42v/71r8Me52233WYGDRoUeHPuPRc9aNCgsP4BMsaY6dOnt/sZE5/PZ84666wOvxZ+v9/Mnz/fpKSkdHhDf++990xycrIpKCgw8+bNM3369DEXX3yx+f3vf28KCgqM2+02jzzySMh511xzjTniiCPMrbfearKzs83UqVPNsGHDzIsvvmjKy8vNMcccY37zm9+ENcb6+nrzyiuvmLKyMlNWVmZeeeWVNj/T81O+/vrroFN4P9bQ0LDP//sPxYoVK8z1118f1oc527Jjxw7z5ptvmjVr1gQO70fDxx9/bN555x3j8/nCWo9i2Hk7QIfD0aEdYH19vamsrAxsF5WVlVHdLvx+vzEmetvFtddeG9Xt4sdH1iLV0W0jEvvFpb2IXDRuorZnzx41NTW1e3+NPXv2aNu2bWFfevjf1q9frzfeeEMFBQXq27dv2Ot//PHHmjlzplauXKnvvvtOkhQTE6Pjjz9eN954oyZPnhxyVmNjo2644QZVVVVp9OjRuvfee3XPPffo5ptvls/n09ixY7V06dKQLzuEvUTr/jHffPONtm/frp/97Gdtzt+1a5c2bNigsWPHdniszz//vF599VUVFxdH9H7buXOnPvnkE/n9fg0aNEjp6ekdzvqxTz75RE1NTRo2bFhYtx9oS7Tv/URe56OMoMM+//xzlZSUaMmSJT0uzxijHTt2yO/3q3///oF7LETD7t275fP5dMABB4S1XrTul7M/5kV7bHvvHrr3jqFbtmzR3XffLa/Xq4svvlgnn3xyyFldlbdo0SI1NzdHlDd69GgdddRRURtfpHnRvvcTeZHlRaTLjsGgx9m4cWOHDx93x7yamhpTWFhoSV4075fTXt727dt7ZF60x/biiy+auLg4069fPxMfH29efPFFc9BBB5kJEyaYk08+2bhcrlaX/5PXOXnRvvcTeZHlRYIygna1d/OkvT933XVXVG7GZJe8n2JlWYrm/XL2t7xojy03N9fcfPPNxpjvP8Tet29f87vf/S4wf8aMGWbixInkdUFetO/9RF5keZGgjKBd0b55kt3z7FyWon2/nP0pL9pjS0xMDNx5uKWlxcTExATdd+Sdd94xAwcOJK+L8qJ57yfyIs/rKMoI2nXwwQeb5cuXtzv/rbfeCusfcbvn2bksRft+OftTXrTHlpiYGHS1R58+fczHH38ceLx161YTHx9PXhflGRO9ez+RF528jgj/e6Sx38jMzNT69evbne9wOGTC+Pyz3fMGDRqkZcuWye/3t/mzYcOGkLOinTds2DCtW7eu1fT77rtPZ599dshftLU/5kV7bOnp6frwww8Dj6uqqoK+xK6mpkaDBg0ir4vyJKlPnz567LHHVFxcrAkTJoT8RXbkdU5eR1BG0K4bb7xRo0ePbnf+4YcfrldffbXH5Nm5LJ1zzjn6y1/+0ua8++67TxdeeGFYY9uf8qI9tquuuiroH+ujjz466FLUF198MayrS8iLLO+/XXDBBVq3bp2WLVsW0S0CyItOXji4tBf4//3zn/9UY2OjTj311DbnNzY2at26dSHf7yHaeQDQU1FGAACApThNAwAALEUZAQAAlqKMAAAAS1FGAACApSgjAADAUpQRAABgKcoIAACw1P8HhsZBdQbHzdIAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n" + ] + } + ], + "source": [ + "def perplexity_histogram(perplexities: List[float]):\n", + " series = pd.Series(perplexities)\n", + " series.hist(bins=1000, weights=np.zeros_like(series) + 1. / len(series), cumulative=True)\n", + " plt.xlim(0, 5000)\n", + " plt.xticks(np.arange(0, 5000, 200), rotation=90)\n", + " plt.yticks(np.arange(0, 1.1, 0.1))\n", + " plt.show()\n", + "\n", + "def get_perplexities_and_text(corpus: Iterable[str], n: float = None) -> List[float]:\n", + " perplexities = []\n", + " texts = []\n", + " if n is None:\n", + " for entry in enumerate(corpus):\n", + " perplexities.append(entry['perplexity_score'])\n", + " texts.append(entry['text'])\n", + " else:\n", + " for i, entry in enumerate(corpus):\n", + " if i >= n:\n", + " break\n", + " perplexities.append(entry['perplexity_score'])\n", + " texts.append(entry['text'])\n", + "\n", + " return perplexities, texts\n", + "\n", + "def describe_perplexities(perplexities: List[float]):\n", + " print(pd.Series(perplexities).describe())\n", + "\n", + "perplexities, _ = get_perplexities_and_text(data, n=100_000)\n", + "perplexity_histogram(perplexities)\n", + "print(\"\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def describe_perplexities(perplexities: List[float]):\n", + " print(pd.Series(perplexities).describe())" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "count 100000.000000\n", + "mean 811.207134\n", + "std 1455.270563\n", + "min 7.200000\n", + "25% 364.100000\n", + "50% 516.300000\n", + "75% 817.100000\n", + "max 249779.100000\n", + "dtype: float64\n" + ] + } + ], + "source": [ + "describe_perplexities(perplexities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flan_instruction_tuning_venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/multilinguality_megatron/prepare_data.py b/multilinguality_megatron/prepare_data.py new file mode 100644 index 0000000000000000000000000000000000000000..744aaf24afeb1b6e266a6b60d9f0676af90c35de --- /dev/null +++ b/multilinguality_megatron/prepare_data.py @@ -0,0 +1,275 @@ +import argparse +import gzip +import json +from pathlib import Path +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + TextIO, + Tuple, + Union, +) + +import datasets +import numpy as np +import pandas as pd + +TO_REMOVE = [ + "meta", + "perplexity_score", + "text_length", + "url", + "domain", + "dup_ratio", + "pairs", + "repetitions", + "included_in_dedup", + "cluster", + "id", +] + +L_TO_NAME = { + "en": "English", + "de": "German", + "fr": "French", + "es": "Spanish", + "it": "Italian", + "ru": "Russian", + "zh": "Chinese", + "ko": "Korean", + "pt": "Portuguese", + "nl": "Dutch", + "pl": "Polish", + "sv": "Swedish", +} + + +def gen(l): + for x in l: + yield x + + +def _close_when_exhausted(file: TextIO) -> Iterable[str]: + with file: + for line in file: + yield json.loads(line) + + +def _close_when_exhausted_txt(file: TextIO) -> Iterable[str]: + with file: + for line in file: + yield line[:-1] # ignore new line + + +def open_read_cleaned(filename) -> Iterable[str]: + file: TextIO = gzip.open(filename, "rt") # type: ignore + return _close_when_exhausted(file) + + +def open_gzip_txt(filename) -> Iterable[str]: + file: TextIO = gzip.open(filename, "rt") # type: ignore + return _close_when_exhausted_txt(file) + + +def read_parallel_corpus(dir: str, lp: str) -> Tuple[Iterable[str], Iterable[str]]: + src_l, tgt_l = lp.split("-") + if src_l != "en": + lp_path = f"{tgt_l}-{src_l}" + else: + lp_path = lp + src_path = Path(dir) / f"cometkiwi_data.{lp_path}.{src_l}" + tgt_path = Path(dir) / f"cometkiwi_data.{lp_path}.{tgt_l}" + src_corpus = open_gzip_txt(src_path) + tgt_corpus = open_gzip_txt(tgt_path) + return src_corpus, tgt_corpus + + +def unroll_chat(chat): + chat_str = "" + for i, turn in enumerate(chat): + if type(turn["value"]) != str: + pass + else: + chat_str += turn["value"] + return chat_str + + +parser = argparse.ArgumentParser() +parser.add_argument("--dataset_path", type=str, required=True) +parser.add_argument("--output", type=str, required=True) +parser.add_argument("--is_hf_dataset", type=str, required=True, default=False) +parser.add_argument("--n_tokens", type=int, required=False, default=None) +parser.add_argument("--threshold", type=int, required=False, default=None) +parser.add_argument("--min_perplexity", type=int, required=False, default=None) +parser.add_argument("--wikipedia", type=str, required=False, default=False) +parser.add_argument("--posterior_tokens", type=str, required=False, default=False) +parser.add_argument("--n_posterior_tokens", type=int, required=False, default=None) +parser.add_argument("--is_parallel", type=str, required=False, default=False) +parser.add_argument("--lp", type=str, required=False) + +args = parser.parse_args() +if args.posterior_tokens == "False": + if args.wikipedia == "True": + print("on wikipedia") + data = [] + dataset_paths = [p for p in Path(args.dataset_path).iterdir()] + dfs = [] + for dataset_path in dataset_paths: + print("on path", dataset_path) + corpus = open_read_cleaned(dataset_path) + + for doc in corpus: + data.append({"text": doc["text"]}) + + print(dataset_path) + + sub_df = pd.DataFrame(data=data) + dfs.append(sub_df) + + df = pd.concat(dfs, ignore_index=True) + dataset = datasets.Dataset.from_pandas(df) + dataset.to_json(args.output, lines=True) + + else: + if args.is_hf_dataset == "True": + if args.dataset_path == "Unbabel/TowerBlocks-v0.1": + df = datasets.load_dataset( + "Unbabel/TowerBlocks-v0.1", split="train" + ).to_pandas() + dataset = pd.DataFrame() + dataset["text"] = df["conversations"].apply(unroll_chat) + dataset = datasets.Dataset.from_pandas(dataset) + else: + dataset = datasets.load_from_disk(args.dataset_path) + instances_to_select = [] + n_words = 0 + for idx in range(len(dataset)): + perplexity = dataset[int(idx)]["perplexity_score"] + if perplexity < args.threshold and perplexity > args.min_perplexity: + instances_to_select.append(idx) + n_words += len(dataset[int(idx)]["text"].split(" ")) + print(f"Selected {n_words} of {args.n_tokens} tokens.") + if n_words >= args.n_tokens: + break + + dataset = dataset.select(instances_to_select) + + # Remove columns if they exist + for column in TO_REMOVE: + if column in dataset.column_names: + dataset = dataset.remove_columns(column) + + print("English") + print("n words", n_words) + + elif args.is_parallel == "False": + data = [] + corpus = open_read_cleaned(args.dataset_path) + + n_words = 0 + for doc in corpus: + perplexity = doc["perplexity"] + if perplexity < args.threshold and perplexity > args.min_perplexity: + if args.lp == "zh": + n_words += len(doc["text"]) + else: + n_words += len(doc["text"].split(" ")) + data.append({"text": doc["text"]}) + if n_words >= args.n_tokens: + break + + print(args.dataset_path) + print("n words", n_words) + + dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=data)) + + elif args.is_parallel == "True": + data = [] + src_data, tgt_data = read_parallel_corpus( + dir=f"{args.dataset_path}", lp=args.lp + ) + n_sents = 0 + for src, tgt in zip(src_data, tgt_data): + if n_sents >= args.n_tokens: + break + data.append( + { + "text": f"{L_TO_NAME[args.lp.split('-')[0]]}: {src}\n{L_TO_NAME[args.lp.split('-')[-1]]}: {tgt}" + } + ) + n_sents += 1 + if n_sents % 1000 == 0: + print(f"Selected {n_sents} of {args.n_tokens} sentences.") + data_len = len(data) + # if xx-en, take 1st half of data; otherwise, take 2nd half + if "-en" in args.lp: + data = data[: int(data_len / 2)] + else: + data = data[int(data_len / 2) :] + dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=data)) + + dataset.to_json(args.output, lines=True) + +else: + if args.is_hf_dataset: + dataset = datasets.load_from_disk(args.dataset_path) + instances_to_select = [] + n_words = 0 + surpassed = False + for idx in range(len(dataset)): + perplexity = dataset[int(idx)]["perplexity_score"] + if perplexity < args.threshold and perplexity > args.min_perplexity: + n_words += len(dataset[int(idx)]["text"].split(" ")) + if n_words >= args.n_tokens: + if surpassed: + instances_to_select.append(idx) + n_posterior_words += len(dataset[int(idx)]["text"].split(" ")) + if n_posterior_words >= args.n_posterior_tokens: + break + else: + n_posterior_words = 0 + surpassed = True + + dataset = dataset.select(instances_to_select) + + # Remove columns if they exist + for column in TO_REMOVE: + if column in dataset.column_names: + dataset = dataset.remove_columns(column) + + print("English") + print("n words", n_words) + + # here, we only start appending after the n_words threshold is satisfied once (this should be connected to another run) + else: + data = [] + corpus = open_read_cleaned(args.dataset_path) + + n_words = 0 + surpassed = False + for doc in corpus: + perplexity = doc["perplexity"] + if perplexity < args.threshold and perplexity > args.min_perplexity: + n_words += len(doc["text"].split(" ")) + # once we surpass the number of tokens, start appending on the next iteration + if n_words >= args.n_tokens: + if surpassed: + data.append({"text": doc["text"]}) + n_posterior_words += len(doc["text"].split(" ")) + if n_posterior_words >= args.n_posterior_tokens: + break + if not surpassed: + n_posterior_words = 0 + surpassed = True + + print(args.dataset_path) + print("n words", n_words) + + dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=data)) + + dataset.to_json(args.output, lines=True) diff --git a/multilinguality_megatron/preprocess_data.sh b/multilinguality_megatron/preprocess_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..f917ef98636acb2e801ef8763919c65bc8063b50 --- /dev/null +++ b/multilinguality_megatron/preprocess_data.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +dataset_json="/mnt/scratch-artemis/kshitij/oneB_experiment/new_data_wout_covost/combined/to_tokenize.jsonl" +dataset_bin="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b" +vocab_file="/mnt/scratch-artemis/kshitij/LLAMA/Megatron_LLM/temp/new_tokenizer/tokenizer.model" +repo="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron" + +# Parse command-line arguments +for arg in "$@" +do + case $arg in + --help) + echo "Usage: ./script.sh [OPTIONS]" + echo "Options:" + echo " --dataset_json=PATH Path to dataset json." + echo " --dataset_bin=PATH Path to save preprocessed data." + echo " --vocab_file=PATH Path to tokenizer.model file of HF model to be trained." + echo " --repo=PATH Path to repo." + exit 0 + ;; + --dataset_json=*) + dataset_json="${arg#*=}" + shift + ;; + --dataset_bin=*) + dataset_bin="${arg#*=}" + shift + ;; + --vocab_file=*) + vocab_file="${arg#*=}" + shift + ;; + --repo=*) + repo="${arg#*=}" + shift + ;; + esac +done + +echo $repo +mkdir -p $dataset_bin +python $repo/tools/preprocess_data.py \ + --input=$dataset_json \ + --output_prefix=$dataset_bin/data \ + --tokenizer_type=SentencePieceTokenizer \ + --vocab_file=$vocab_file \ + --chunk_size=64 \ + --workers=64 \ + --append_eod \ + --vocab_extra_ids 5000 diff --git a/multilinguality_megatron/pretrain_bert.py b/multilinguality_megatron/pretrain_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..95d54938dc24102790fd20510103b6edf9b94eeb --- /dev/null +++ b/multilinguality_megatron/pretrain_bert.py @@ -0,0 +1,146 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Pretrain BERT""" + +from functools import partial + +import torch +import torch.nn.functional as F + +import megatron.initialize +import megatron +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_timers +from megatron.core import tensor_parallel +from megatron.data.dataset_utils import build_train_valid_test_datasets +from megatron.model import BertModel, ModelType + +from megatron.utils import average_losses_across_data_parallel_group + + +def model_provider(pre_process=True, post_process=True): + """Build the model.""" + + print_rank_0('building BERT model ...') + + args = get_args() + num_tokentypes = 2 if args.bert_binary_head else 0 + + model_type_bert = ModelType.encoder_or_decoder + model = BertModel( + num_tokentypes=num_tokentypes, + add_binary_head=args.bert_binary_head, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + model_type=model_type_bert) + + return model + + +def get_batch(data_iterator): + """Build the batch.""" + + # Items and their type. + keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask'] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + tokens = data_b['text'].long() + types = data_b['types'].long() + sentence_order = data_b['is_random'].long() + loss_mask = data_b['loss_mask'].float() + lm_labels = data_b['labels'].long() + padding_mask = data_b['padding_mask'].long() + + return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask + + +def loss_func(loss_mask, sentence_order, output_tensor): + lm_loss_, sop_logits = output_tensor + + lm_loss_ = lm_loss_.float() + loss_mask = loss_mask.float() + lm_loss = torch.sum( + lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + if sop_logits is not None: + sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), + sentence_order.view(-1), + ignore_index=-1) + sop_loss = sop_loss.float() + loss = lm_loss + sop_loss + averaged_losses = average_losses_across_data_parallel_group( + [lm_loss, sop_loss]) + return loss, {'lm loss': averaged_losses[0], + 'sop loss': averaged_losses[1]} + + else: + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group( + [lm_loss]) + return loss, {'lm loss': averaged_losses[0]} + + +def forward_step(data_iterator, model): + """Forward step.""" + args = get_args() + timers = get_timers() + + # Get the batch. + timers('batch-generator', log_level=2).start() + tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch( + data_iterator) + timers('batch-generator').stop() + + if not args.bert_binary_head: + types = None + + # Forward pass through the model. + output_tensor = model(tokens, padding_mask, tokentype_ids=types, + lm_labels=lm_labels) + + return output_tensor, partial(loss_func, loss_mask, sentence_order) + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + + print_rank_0('> building train, validation, and test datasets ' + 'for BERT ...') + train_ds, valid_ds, test_ds = build_train_valid_test_datasets( + data_prefix=args.data_path, + data_impl=args.data_impl, + splits_string=args.split, + train_valid_test_num_samples=train_val_test_num_samples, + max_seq_length=args.seq_length, + masked_lm_prob=args.mask_prob, + short_seq_prob=args.short_seq_prob, + seed=args.seed, + skip_warmup=(not args.mmap_warmup), + binary_head=args.bert_binary_head) + print_rank_0("> finished creating BERT datasets ...") + + return train_ds, valid_ds, test_ds + + +if __name__ == "__main__": + model_type_bert = ModelType.encoder_or_decoder + args_defaults = {'tokenizer_type': 'BertWordPieceLowerCase'} + megatron.initialize.initialize_megatron(extra_args_provider=None, + args_defaults=args_defaults) + args = megatron.get_args() + megatron.training.pretrain(args, + train_valid_test_datasets_provider, + model_provider, + model_type_bert, + forward_step) diff --git a/multilinguality_megatron/pretrain_ict.py b/multilinguality_megatron/pretrain_ict.py new file mode 100644 index 0000000000000000000000000000000000000000..7143399b0ec69a80d1f2dcde0cf8ae6949b8f4a8 --- /dev/null +++ b/multilinguality_megatron/pretrain_ict.py @@ -0,0 +1,176 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Pretrain BERT for Inverse Cloze Task""" + +from functools import partial +import math + +import torch +import torch.distributed as dist +import torch.nn.functional as F + +import megatron.training +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_timers +from megatron.core import mpu +from megatron.data.biencoder_dataset_utils import get_ict_batch +from megatron.data.dataset_utils import build_train_valid_test_datasets +from megatron.model import ModelType +import megatron.model.biencoder_model +from megatron.utils import average_losses_across_data_parallel_group + + +def pretrain_ict_model_provider(pre_process=True, post_process=True): + args = get_args() + + ict_model_type = ModelType.encoder_or_decoder + + model = megatron.model.biencoder_model.biencoder_model_provider( + only_context_model=False, + only_query_model=False, + biencoder_shared_query_context_model=args.biencoder_shared_query_context_model, + pre_process=pre_process, + post_process=post_process, + model_type=ict_model_type) + + return model + + +def get_group_world_size_rank(): + group = mpu.get_data_parallel_group() + rank = torch.distributed.get_rank(group=group) + world_size = torch.distributed.get_world_size(group=group) + return group, rank, world_size + + +class AllgatherFromDataParallelRegion(torch.autograd.Function): + + @staticmethod + def forward(ctx, input_): + assert input_.dim() == 2 + group, rank, world_size = get_group_world_size_rank() + + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + torch.distributed.all_gather(tensor_list, input_, group=group) + + output = torch.cat(tensor_list, dim=0).contiguous() + + return output + + + @staticmethod + def backward(ctx, grad_output): + group, rank, world_size = get_group_world_size_rank() + + assert grad_output.shape[0] % world_size == 0 + dim_size = grad_output.shape[0] // world_size + output_list = torch.split(grad_output, dim_size, dim=0) + + # get chunk from this rank + output = output_list[rank].contiguous() + return output + + +def loss_func(output_tensor): + args = get_args() + query_logits, context_logits = output_tensor + + micro_batch_size = query_logits.shape[0] + # recall we assert that tensor_model_parallel_size == 1 + assert mpu.get_tensor_model_parallel_world_size() == 1, \ + "Model parallel size > 1 not supported for ICT" + + global_batch_size = dist.get_world_size() * micro_batch_size + all_query_logits = AllgatherFromDataParallelRegion.apply(query_logits) + all_context_logits = AllgatherFromDataParallelRegion.apply(context_logits) + + # scores are inner products between query and context embeddings + retrieval_scores = torch.matmul(all_query_logits, + torch.transpose(all_context_logits, 0, 1)) + # scaling the retriever scores + if args.retriever_score_scaling: + retrieval_scores = retrieval_scores / math.sqrt(args.hidden_size) + + softmax_scores = F.log_softmax(retrieval_scores, dim=1) + sorted_vals, sorted_indices = torch.topk(softmax_scores, + k=softmax_scores.shape[1], sorted=True) + + def topk_accuracy(k): + return torch.cuda.FloatTensor([sum([int(i in sorted_indices[i, :k]) \ + for i in range(global_batch_size)]) / global_batch_size]) + + topk_accs = [topk_accuracy(int(k)) for k in args.retriever_report_topk_accuracies] + + labels = torch.arange(global_batch_size).long().cuda() + loss = F.nll_loss(softmax_scores, labels, reduction='mean') + reduced_losses = average_losses_across_data_parallel_group([loss, *topk_accs]) + + # Scale the retrieval loss + loss = loss * mpu.get_data_parallel_world_size() + + # create stats_dict with retrieval loss and all specified top-k accuracies + topk_acc_dict = {'top{}_acc'.format(k): v * 100 for k, v in \ + zip(args.retriever_report_topk_accuracies, reduced_losses[1:])} + stats_dict = dict(loss=reduced_losses[0], **topk_acc_dict) + return loss, stats_dict + + +def forward_step(data_iterator, model): + """Forward step.""" + timers = get_timers() + + # Get the batch. + timers('batch-generator', log_level=2).start() + query_tokens, query_mask, \ + context_tokens, context_mask, context_indices = get_ict_batch(data_iterator) + timers('batch-generator').stop() + + # Query and Context Types + query_types = torch.cuda.LongTensor(*query_tokens.shape).fill_(0) + context_types = torch.cuda.LongTensor(*context_tokens.shape).fill_(0) + + # Forward model. + output_tensor = model(query_tokens, query_mask, query_types, context_tokens, + context_mask, context_types) + + return output_tensor, partial(loss_func) + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid and test datasets.""" + args = get_args() + print_rank_0('> building train, validation, and test datasets ' + 'for BERT ICT...') + + train_ds, valid_ds, test_ds = build_train_valid_test_datasets( + data_prefix=args.data_path, + data_impl=args.data_impl, + splits_string=args.split, + train_valid_test_num_samples=train_val_test_num_samples, + max_seq_length=args.seq_length, + masked_lm_prob=args.mask_prob, + short_seq_prob=args.short_seq_prob, + seed=args.seed, + skip_warmup=(not args.mmap_warmup), + binary_head=False, + dataset_type='ict') + print_rank_0("> finished creating BERT ICT datasets ...") + + return train_ds, valid_ds, test_ds + + +if __name__ == "__main__": + ict_model_type = ModelType.encoder_or_decoder + args_defaults = {'tokenizer_type': 'BertWordPieceLowerCase'} + + megatron.initialize.initialize_megatron(extra_args_provider=None, + args_defaults=args_defaults) + args = megatron.get_args() + + megatron.training.pretrain(args, + train_valid_test_datasets_provider, + pretrain_ict_model_provider, + ict_model_type, + forward_step) diff --git a/multilinguality_megatron/pretrain_t5.py b/multilinguality_megatron/pretrain_t5.py new file mode 100644 index 0000000000000000000000000000000000000000..3d959e8de14c94fac8930737f478f3a56c12997b --- /dev/null +++ b/multilinguality_megatron/pretrain_t5.py @@ -0,0 +1,171 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Pretrain T5""" + +from functools import partial + +import torch + +from megatron import ( + get_args, + get_timers, + print_rank_0 +) +from megatron.core import tensor_parallel +from megatron.data.dataset_utils import build_train_valid_test_datasets +from megatron.model import ModelType +import megatron.model +from megatron.training import pretrain +from megatron.utils import average_losses_across_data_parallel_group + + +""" +Pipeline parallelism for T5 +=========================== + +T5 is a model architecture with both encoder and decoder blocks. +Consequently, pipeline parallelism is implemented slightly differently +compared to architectures like GPT and BERT. + +In particular, when pipeline_model_parallel_world_size > 1, each stage +either executes an encoder block or a decoder block. The +--pipeline_model_parallel_split_rank argument controls the rank at which +the split happens: all ranks lower than this argument execute the +encoder block, and all ranks equal to or higher than this argument value +execute the decoder block. + +In the encoder section of the model, only one tensor is sent downstream: +the intermediate encoder_hidden_state. In the decoder section of the +model, two tensors are sent downstream in the forward pass: the fully +computed encoder_hidden_state, and the intermediate decoder_hidden_state. + +In particular, these are the shapes of the tensors sent between +different workers: + If rank is in decoder section: + intermediate decoder_hidden_state (pre-transpose), + complete encoder_hidden_state (post-transpose). + If rank is at boundary between encoder and decoder sections: + complete encoder_hidden_state (post-transpose). + If rank is in encoder section: + intermediate encoder_hidden_state (pre-transpose). + +Additionally, we have code in the backward_step function in schedules.py +to accumulate the encoder_hidden_state gradient across skip connections +(encoder_hidden_state fed in as input to each layer in the decoder). +""" + + +def model_provider(pre_process=True, + post_process=True, + add_encoder=True, + add_decoder=True): + """Build the model.""" + print_rank_0('building T5 model ...') + + model_type_t5 = ModelType.encoder_and_decoder + model = megatron.model.T5Model(num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + add_encoder=add_encoder, + add_decoder=add_decoder, + model_type=model_type_t5) + return model + + +def get_batch(data_iterator): + """Build the batch.""" + + keys = ['text_enc', 'text_dec', 'labels', 'loss_mask', + 'enc_mask', 'dec_mask', 'enc_dec_mask'] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_enc = data_b['text_enc'].long() + tokens_dec = data_b['text_dec'].long() + labels = data_b['labels'].long() + loss_mask = data_b['loss_mask'].float() + + enc_mask = (data_b['enc_mask'] < 0.5) + dec_mask = (data_b['dec_mask'] < 0.5) + enc_dec_mask = (data_b['enc_dec_mask'] < 0.5) + + return tokens_enc, tokens_dec, loss_mask, labels, \ + enc_mask, dec_mask, enc_dec_mask + + +def loss_func(loss_mask, output_tensor): + lm_loss_ = output_tensor.float() + lm_loss = torch.sum( + lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss]) + + return loss, {'lm loss': averaged_losses[0]} + + +def forward_step(data_iterator, model): + """Forward step.""" + args = get_args() + timers = get_timers() + + # Get the batch. + timers('batch generator', log_level=2).start() + tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask \ + = get_batch(data_iterator) + timers('batch generator').stop() + + # Forward model lm_labels + output_tensor = model(tokens_enc, + tokens_dec, + enc_mask, + dec_mask, + enc_dec_mask, + tokentype_ids=None, + lm_labels=lm_labels) + + return output_tensor, partial(loss_func, loss_mask) + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + + print_rank_0('> building train, validation, and test datasets ' + 'for T5 ...') + train_ds, valid_ds, test_ds = build_train_valid_test_datasets( + data_prefix=args.data_path, + data_impl=args.data_impl, + splits_string=args.split, + train_valid_test_num_samples=train_val_test_num_samples, + max_seq_length=args.encoder_seq_length, + max_seq_length_dec=args.decoder_seq_length, + masked_lm_prob=args.mask_prob, + short_seq_prob=args.short_seq_prob, + seed=args.seed, + skip_warmup=(not args.mmap_warmup), + dataset_type='t5') + print_rank_0("> finished creating T5 datasets ...") + return train_ds, valid_ds, test_ds + + +if __name__ == "__main__": + model_type_t5 = ModelType.encoder_and_decoder + args_defaults = {'tokenizer_type': 'BertWordPieceLowerCase'} + megatron.initialize.initialize_megatron(extra_args_provider=None, + args_defaults=args_defaults) + args = megatron.get_args() + + pretrain(args, + train_valid_test_datasets_provider, + model_provider, + model_type_t5, + forward_step) diff --git a/multilinguality_megatron/setup.py b/multilinguality_megatron/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..c5b18c1a6cb002fe625819028d4719e6ad18360a --- /dev/null +++ b/multilinguality_megatron/setup.py @@ -0,0 +1,10 @@ +from setuptools import setup, find_packages + +setup( + name="megatron.core", + version="0.1", + description="Core components of Megatron.", + packages=find_packages( + include=("megatron.core") + ) +) diff --git a/multilinguality_megatron/setup/conda.sh b/multilinguality_megatron/setup/conda.sh new file mode 100644 index 0000000000000000000000000000000000000000..85347c085df2df8ee61740a677a40e25c7189617 --- /dev/null +++ b/multilinguality_megatron/setup/conda.sh @@ -0,0 +1,62 @@ +#!/bin/zsh +# Script for setting up a conda environment with for launching servers +# It sidesteps system-wide installations by relying on conda for most packages +# and by building openssl from source +# TODO: only got it to work with a static build of OpenSSL, which is not ideal +ENV_NAME=towerllm-env + +# get the directory of this script, and go one up to get the root directory +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +DIR="$(dirname "$DIR")" + +set -eo pipefail + +# check if CONDA_HOME is set and create environment +if [ -z "$CONDA_HOME" ] +then + echo "Please set CONDA_HOME to the location of your conda installation" + exit 1 +fi +source ${CONDA_HOME}/etc/profile.d/conda.sh +# python can't handle this dependency madness, switch to C++ +conda install -y -c conda-forge mamba +mamba create -y -n ${ENV_NAME} python=3.9 +conda activate ${ENV_NAME} + +# install gcc, CUDA and set environment variables +mamba install -y -c conda-forge git +mamba install -y "gxx<10.0" -c conda-forge +mamba install -y -c "nvidia/label/cuda-11.8.0" cuda-toolkit cuda-nvcc cuda-cudart + +export PATH=${CONDA_HOME}/envs/${ENV_NAME}/bin:$PATH +export LD_LIBRARY_PATH=${CONDA_HOME}/envs/${ENV_NAME}/lib:$LD_LIBRARY_PATH +export CUDA_HOME=${CONDA_HOME}/envs/${ENV_NAME} + +# # install pytorch +mamba install -y pytorch torchvision torchaudio pytorch-cuda=11.8 \ + -c pytorch -c nvidia + +# install apex +pip install ninja packaging +rm -rf .apex && git clone https://github.com/NVIDIA/apex .apex +cd .apex +pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \ + --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ + + +# install pdsh +# git clone git@github.com:chaos/pdsh.git .pdsh +# cd .pdsh +# autoreconf -i +# ./configure --with-ssh --prefix ${DIR}/.pdsh +# make -j 8 && make install + +# install other dependencies +cd $DIR +pip install --upgrade pip +pip install --no-build-isolation flash-attn +pip install -r /mnt/cephfs-nvme/jpombal/multilinguality_megatron/setup/pip_reps.txt + +conda env config vars set PATH=$PATH +conda env config vars set LD_LIBRARY_PATH=$LD_LIBRARY_PATH +conda env config vars set CUDA_HOME=$CUDA_HOME \ No newline at end of file diff --git a/multilinguality_megatron/setup/pip_reps.txt b/multilinguality_megatron/setup/pip_reps.txt new file mode 100644 index 0000000000000000000000000000000000000000..7622c5898081c484251389122ea16109283e52d5 --- /dev/null +++ b/multilinguality_megatron/setup/pip_reps.txt @@ -0,0 +1,6 @@ +datasets +nltk +pybind11 +deepspeed +deepspeed[autotune] +transformers \ No newline at end of file diff --git a/multilinguality_megatron/split_files.sh b/multilinguality_megatron/split_files.sh new file mode 100644 index 0000000000000000000000000000000000000000..bd1cabce0153638d89fa73b620ac4849b5e2da7d --- /dev/null +++ b/multilinguality_megatron/split_files.sh @@ -0,0 +1,26 @@ +#!/bin/bash +langs=(en_de en_es en_fr en_it en_nl en_pt en_ru en_zh en_ko de_en es_en fr_en it_en nl_en pt_en ru_en zh_en ko_en) +# Read the file name from the command line + +for lang in "${langs[@]}"; do + echo $lang + in_dir=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/DumpHFDataset_baseline/Dataset.${lang} + in_file=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/DumpHFDataset_baseline/Dataset.${lang}/dataset.json + out_dir_big=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/DumpHFDataset/Dataset.${lang}/ + out_dir_small=/mnt/data/shared/multilingual_llm/experiments_megatron/continue_pretraining_tinyllama_all_20B/DumpHFDataset_annealing/Dataset.${lang}/ + mkdir -p $out_dir_big + mkdir -p $out_dir_small + + # Calculate the total number of lines in the file + total_lines=$(wc -l < "$in_file") + + # Calculate the line number for 87.5% of the file + split_line=$(awk -v lines=$total_lines 'BEGIN {print int(lines * 0.875)}') + echo calced split_line at $split_line + + # Split the file into two at the calculated line number + awk -v split_lines=$split_line -v out_dir_big="$out_dir_big" -v out_dir_small="$out_dir_small" 'NR<=split_lines {print > (out_dir_big "/dataset.json")} NR>split_lines {print > (out_dir_small "/dataset.json")}' $in_file + + cp $in_dir/ducttape_exit_code.txt $in_dir/ducttape_stderr.txt $in_dir/ducttape_stdout.txt $in_dir/ducttape_task.sh $out_dir_big + cp $in_dir/ducttape_exit_code.txt $in_dir/ducttape_stderr.txt $in_dir/ducttape_stdout.txt $in_dir/ducttape_task.sh $out_dir_small +done \ No newline at end of file diff --git a/multilinguality_megatron/tasks/data_utils.py b/multilinguality_megatron/tasks/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..914acf10c3ff738151a7cc2a5a1c8e4d7707533d --- /dev/null +++ b/multilinguality_megatron/tasks/data_utils.py @@ -0,0 +1,105 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +""" Tasks data utility.""" + +import re +import numpy as np + + +def clean_text(text): + """Remove new lines and multiple spaces and adjust end of sentence dot.""" + + text = text.replace("\n", " ") + text = re.sub(r'\s+', ' ', text) + for _ in range(3): + text = text.replace(' . ', '. ') + + return text + + +def build_sample(ids, types, paddings, label, unique_id): + """Convert to numpy and return a sample consumed by the batch producer.""" + + ids_np = np.array(ids, dtype=np.int64) + types_np = np.array(types, dtype=np.int64) + paddings_np = np.array(paddings, dtype=np.int64) + sample = ({'text': ids_np, + 'types': types_np, + 'padding_mask': paddings_np, + 'label': int(label), + 'uid': int(unique_id)}) + + return sample + + +def build_tokens_types_paddings_from_text(text_a, text_b, + tokenizer, max_seq_length): + """Build token types and paddings, trim if needed, and pad if needed.""" + + text_a_ids = tokenizer.tokenize(text_a) + text_b_ids = None + if text_b is not None: + text_b_ids = tokenizer.tokenize(text_b) + + return build_tokens_types_paddings_from_ids(text_a_ids, text_b_ids, + max_seq_length, tokenizer.cls, + tokenizer.sep, tokenizer.pad) + + +def build_tokens_types_paddings_from_ids(text_a_ids, text_b_ids, max_seq_length, + cls_id, sep_id, pad_id): + """Build token types and paddings, trim if needed, and pad if needed.""" + + ids = [] + types = [] + paddings = [] + + # [CLS]. + ids.append(cls_id) + types.append(0) + paddings.append(1) + + # A. + len_text_a = len(text_a_ids) + ids.extend(text_a_ids) + types.extend([0] * len_text_a) + paddings.extend([1] * len_text_a) + + # [SEP]. + ids.append(sep_id) + types.append(0) + paddings.append(1) + + # B. + if text_b_ids is not None: + len_text_b = len(text_b_ids) + ids.extend(text_b_ids) + types.extend([1] * len_text_b) + paddings.extend([1] * len_text_b) + + # Cap the size. + trimmed = False + if len(ids) >= max_seq_length: + max_seq_length_m1 = max_seq_length - 1 + ids = ids[0:max_seq_length_m1] + types = types[0:max_seq_length_m1] + paddings = paddings[0:max_seq_length_m1] + trimmed = True + + # [SEP]. + if (text_b_ids is not None) or trimmed: + ids.append(sep_id) + if text_b_ids is None: + types.append(0) + else: + types.append(1) + paddings.append(1) + + # Padding. + padding_length = max_seq_length - len(ids) + if padding_length > 0: + ids.extend([pad_id] * padding_length) + types.extend([pad_id] * padding_length) + paddings.extend([0] * padding_length) + + return ids, types, paddings diff --git a/multilinguality_megatron/tasks/eval_utils.py b/multilinguality_megatron/tasks/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e43f36bac0eff199f239b431e48d6c48642ef12a --- /dev/null +++ b/multilinguality_megatron/tasks/eval_utils.py @@ -0,0 +1,181 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Evaluation utilities.""" + +import os +import time +from functools import partial + +import torch + +from megatron import get_args +from megatron import print_rank_last, is_last_rank +from megatron.core import mpu +from megatron.schedules import get_forward_backward_func +import tasks.finetune_utils + + +def accuracy_func_provider(single_dataset_provider): + """Provide function that calculates accuracies.""" + args = get_args() + + # Build dataloaders. + datapaths = args.valid_data + dataloaders = [] + for datapath in datapaths: + dataset = single_dataset_provider(datapath) + dataloader = tasks.finetune_utils.build_data_loader( + dataset, args.orig_micro_batch_size, num_workers=args.num_workers, + drop_last=(mpu.get_data_parallel_world_size() > 1)) + dataloaders.append((dataset.dataset_name, dataloader)) + + def metrics_func(model, epoch, output_predictions=False): + print_rank_last('calculating metrics ...') + correct = 0 + total = 0 + if output_predictions: + assert mpu.get_data_parallel_world_size() == 1 + named_predictions = [] + names = 'predictions' + for name, dataloader in dataloaders: + output = calculate_correct_answers(name, model, dataloader, + epoch, output_predictions) + if not output_predictions: + correct_ans, total_count = output + else: + correct_ans, total_count, predictions = output + named_predictions.append((name, predictions)) + names += '_' + name + correct += correct_ans + total += total_count + if is_last_rank(): + percent = float(correct) * 100.0 / float(total) + print(' >> |epoch: {}| overall: correct / total = {} / {} = ' + '{:.4f} %'.format(epoch, correct, total, percent)) + + if output_predictions and is_last_rank(): + assert args.load is not None + filename = os.path.join(args.load, names + '.pt') + torch.save(named_predictions, filename) + + return metrics_func + + +def calculate_correct_answers(name, model, dataloader, + epoch, output_predictions): + """Calculate correct over total answers and return prediction if the + `output_predictions` is true.""" + args = get_args() + forward_backward_func = get_forward_backward_func() + start_time = time.time() + for m in model: + m.eval() + saved_micro_batch_size = args.micro_batch_size + saved_global_batch_size = args.global_batch_size + + ds = dataloader.dataset + if hasattr(ds, 'sample_multiplier'): + # If our dataset as a sample_multiplier attribute that means + # each "sample" from the dataset actually has multiple samples + # that will collapse into the batch dimension (for example in + # the RACE dataset that has several options), we need to + # account for that when setting the micro batch size. + sample_multiplier = ds.sample_multiplier + else: + sample_multiplier = 1 + micro_batch_size_times_data_parallel = args.orig_micro_batch_size * args.data_parallel_size + num_micro_batches = args.orig_global_batch_size // micro_batch_size_times_data_parallel + + def loss_func(output_predictions, labels, output_tensor): + logits = output_tensor + + loss_dict = {} + # Add output predictions. + if output_predictions: + assert False + loss_dict['softmaxes'] = torch.nn.Softmax(dim=-1)( + logits.float()).data.cpu().numpy().tolist() + loss_dict['labels'] = labels.data.cpu().numpy().tolist() + loss_dict['ids'] = batch['uid'].cpu().numpy().tolist() + # Compute the correct answers. + predicted = torch.argmax(logits, dim=-1) + corrects = (predicted == labels) + # Add to the counters. + loss_dict['total'] = labels.size(0) + loss_dict['correct'] = corrects.sum().item() + + return 0, loss_dict + + # defined inside to capture output_predictions + def correct_answers_forward_step(batch, model): + try: + batch_ = next(batch) + except BaseException: + batch_ = batch + tokens, types, labels, attention_mask = tasks.finetune_utils.process_batch(batch_, args.fp16) + + # Forward model. + args = get_args() + output_tensor = model(tokens, attention_mask, tokentype_ids=types) + + return output_tensor, partial(loss_func, output_predictions, labels) + + with torch.no_grad(): + # For all the batches in the dataset. + total = 0 + correct = 0 + if output_predictions: + # This option is only possible when data parallel size is 1. + assert mpu.get_data_parallel_world_size() == 1 + softmaxes = [] + labels = [] + ids = [] + for _, batch in enumerate(dataloader): + # For evaluation only mode we use drop_last = False to get all the + # samples, which means we might not have a full batch, so we + # adjust batch_size here to actual batch size of data + actual_batch_size = len(batch['label']) + # ... applying sample_multiplier if necessary + args.micro_batch_size = actual_batch_size * sample_multiplier + args.global_batch_size = actual_batch_size * sample_multiplier * num_micro_batches + + loss_dicts = forward_backward_func(correct_answers_forward_step, batch, model, + optimizer=None, timers=None, forward_only=True) + + for loss_dict in loss_dicts: + if output_predictions: + softmaxes.extend(loss_dict['softmaxes']) + labels.extend(loss_dict['labels']) + ids.extend(loss_dict['ids']) + total += loss_dict['total'] + correct += loss_dict['correct'] + + + for m in model: + m.train() + args.micro_batch_size = saved_micro_batch_size + args.global_batch_size = saved_global_batch_size + + # Reduce. + if mpu.is_pipeline_last_stage(): + unreduced = torch.cuda.LongTensor([correct, total]) + torch.distributed.all_reduce(unreduced, + group=mpu.get_data_parallel_group()) + + # Print on screen. + + correct_ans = unreduced[0].item() + total_count = unreduced[1].item() + percent = float(correct_ans) * 100.0 / float(total_count) + elapsed_time = time.time() - start_time + print_rank_last(' > |epoch: {}| metrics for {}: correct / total ' + '= {} / {} = {:.4f} %, elapsed time (sec): {:.3f}'.format( + epoch, name, correct_ans, total_count, + percent, elapsed_time)) + + if output_predictions: + return correct_ans, total_count, (softmaxes, labels, ids) + return correct_ans, total_count + if output_predictions: + return 0, 0, () + return 0, 0 diff --git a/multilinguality_megatron/tasks/finetune_utils.py b/multilinguality_megatron/tasks/finetune_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b14740980e9101cfef7d0c6a3b8432ce5689be81 --- /dev/null +++ b/multilinguality_megatron/tasks/finetune_utils.py @@ -0,0 +1,309 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Finetune utilities.""" + +from functools import partial +import sys +import torch + +from megatron import get_args, get_num_microbatches +from megatron import print_rank_0 +from megatron import get_timers +from megatron.core import mpu +from megatron.checkpointing import load_checkpoint +from megatron.checkpointing import save_checkpoint +from megatron.model import ModelType + +import megatron.training +import megatron.utils +from megatron.training import training_log +from megatron.utils import average_losses_across_data_parallel_group +from megatron.utils import calc_params_l2_norm + + +def process_batch(batch, is_fp16: bool): + """Process batch and produce inputs for the model.""" + tokens = batch['text'].long().cuda().contiguous() + types = batch['types'].long().cuda().contiguous() + labels = batch['label'].long().cuda().contiguous() + attention_mask = batch['padding_mask'].float().cuda().contiguous() + if is_fp16: + attention_mask = attention_mask.half() + + return tokens, types, labels, attention_mask + + +def cross_entropy_loss_func(labels, output_tensor): + logits = output_tensor + + # Cross-entropy loss. + loss_func = torch.nn.CrossEntropyLoss() + loss = loss_func(logits.contiguous().float(), labels) + + # Reduce loss for logging. + averaged_loss = average_losses_across_data_parallel_group([loss]) + return loss, {'lm loss': averaged_loss[0]} + + +def _cross_entropy_forward_step(batch, model): + """Simple forward step with cross-entropy loss.""" + timers = get_timers() + args = get_args() + # Get the batch. + timers('batch-generator', log_level=2).start() + try: + batch_ = next(batch) + except BaseException: + batch_ = batch + tokens, types, labels, attention_mask = process_batch(batch_, args.fp16) + timers('batch-generator').stop() + + # Forward model. + output_tensor = model(tokens, attention_mask, tokentype_ids=types) + + return output_tensor, partial(cross_entropy_loss_func, labels) + + +def build_data_loader(dataset, + micro_batch_size, num_workers, drop_last, + task_collate_fn=None): + """Data loader. Note that batch-size is the local (per GPU) batch-size.""" + + # Sampler. + world_size = mpu.get_data_parallel_world_size() + rank = mpu.get_data_parallel_rank() + sampler = torch.utils.data.distributed.DistributedSampler( + dataset, num_replicas=world_size, rank=rank) + + # Data loader. Note that batch size is the per GPU batch size. + data_loader = torch.utils.data.DataLoader(dataset, + batch_size=micro_batch_size, + sampler=sampler, + shuffle=False, + num_workers=num_workers, + drop_last=drop_last, + pin_memory=True, + collate_fn=task_collate_fn) + + return data_loader + + +def _build_infinite_size_dataloader(dataloader): + """Build a looped dataloader with infinite size.""" + + iterator = dataloader.__iter__() + while True: + try: + yield iterator.__next__() + except StopIteration: + iterator = dataloader.__iter__() + + +def _build_train_valid_dataloaders(train_dataset, valid_dataset, + task_collate_fn=None): + """Traing and validation dataloaders.""" + args = get_args() + + print_rank_0('building train and validation dataloaders ...') + # Training dataset. + train_dataloader = build_data_loader(train_dataset, args.micro_batch_size, + args.num_workers, not args.keep_last, + task_collate_fn) + # Set the training iterations. + args.train_iters_per_epoch = len(train_dataloader) + args.train_iters = args.epochs * args.train_iters_per_epoch + # Validation dataset. For this dataset, we do not need to set up + # shuffling so we can just use a simple infinite loop. + valid_dataloader_ = build_data_loader(valid_dataset, args.micro_batch_size, + args.num_workers, not args.keep_last, + task_collate_fn) + valid_dataloader = _build_infinite_size_dataloader(valid_dataloader_) + + # Now that we've built the data loaders, set batch_size arguments + # to the actual batch size the model will see for this dataset. + # This is necessary so pipeline transfers know what size they are + # and the LR schedule, which is based on samples seen, gets set + # correctly. + args.orig_micro_batch_size = args.micro_batch_size + args.orig_global_batch_size = args.global_batch_size + if hasattr(train_dataset, 'sample_multiplier'): + # If our dataset as a sample_multiplier attribute that means + # each "sample" from the dataset actually has multiple samples + # that will collapse into the batch dimension (for example in + # the RACE dataset that has several options), we need to + # account for that when setting the micro batch size. + args.micro_batch_size *= train_dataset.sample_multiplier + args.global_batch_size *= train_dataset.sample_multiplier + + return train_dataloader, valid_dataloader + + +def _train(model, + optimizer, + opt_param_scheduler, + forward_step, + train_dataloader, + valid_dataloader, + end_of_epoch_callback, + args): + """Train the model.""" + timers = get_timers() + + assert get_num_microbatches() == 1, "finetuning with gradient accumulation doesn't currently work" + + # Turn on training mode which enables dropout. + for m in model: + m.train() + + # Tracking loss. + losses_dict_sum = {} + + # Starting epoch and iteration + start_epoch = args.iteration // args.train_iters_per_epoch + start_iteration = args.iteration % args.train_iters_per_epoch + iteration = args.iteration + + # Memory reporting flag. + report_memory_flag = True + + # For each remaining epoch + timers('interval-time', log_level=0).start(barrier=True) + for epoch in range(start_epoch, args.epochs): + print_rank_0('working on epoch {} ...'.format(epoch + 1)) + + # Set the data loader epoch to shuffle the index iterator. + train_dataloader.sampler.set_epoch(args.seed + epoch) + + # For all the batches in the dataset. + for iteration_, batch in enumerate(train_dataloader): + + # Ignore the iterations before starting value + if iteration_ < start_iteration: + continue + # Set to zero so the next epoch does not skip any batches. + start_iteration = 0 + + # Train for one step. + out = megatron.training.train_step(forward_step, batch, model, optimizer, opt_param_scheduler, args) + + losses_dict, skipped_iter, grad_norm, num_zeros_in_grad = out + iteration += 1 + + # Logging. + params_norm = None + if args.log_params_norm: + params_norm = calc_params_l2_norm(model) + report_memory_flag = training_log(losses_dict, + losses_dict_sum, + optimizer.param_groups[0]['lr'], + iteration, + optimizer.get_loss_scale().item(), + report_memory_flag, skipped_iter, + grad_norm, params_norm, num_zeros_in_grad) + + # Autoresume + if args.adlr_autoresume and \ + (iteration % args.adlr_autoresume_interval == 0): + megatron.utils.check_adlr_autoresume_termination(iteration, model, + optimizer, opt_param_scheduler, args) + + # Checkpointing + saved_checkpoint = False + if args.save and args.save_interval and \ + iteration % args.save_interval == 0: + save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + saved_checkpoint = True + + # Evaluation + if args.eval_interval and iteration % args.eval_interval == 0: + prefix = 'iteration {}'.format(iteration) + megatron.training.evaluate_and_print_results(prefix, forward_step, + valid_dataloader, model, + iteration, None, False, args=args) + + # Exiting based on iterations + if args.exit_interval and iteration % args.exit_interval == 0: + if not saved_checkpoint: + save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + torch.distributed.barrier() + print_rank_0('exiting program at iteration {}'.format(iteration)) + sys.exit() + + # Checkpointing at the end of each epoch. + if args.save: + save_checkpoint(iteration, model, optimizer, opt_param_scheduler) + + # Callback at the end of each epoch. + if end_of_epoch_callback is not None: + end_of_epoch_callback(model, epoch) + + +def finetune(train_valid_datasets_provider, + model_provider, + model_type=ModelType.encoder_or_decoder, + forward_step=_cross_entropy_forward_step, + end_of_epoch_callback_provider=None, + task_collate_fn=None): + """Main finetune function used across all tasks.""" + args = get_args() + timers = get_timers() + + assert args.rampup_batch_size is None, \ + 'batch size scaling is not supported for finetuning' + + # Train and validation data loaders. + timers('train/valid/test dataset/dataloder', log_level=0).start() + if args.epochs > 0: + train_dataset, valid_dataset = train_valid_datasets_provider() + train_dataloader, valid_dataloader = _build_train_valid_dataloaders( + train_dataset, valid_dataset, task_collate_fn) + else: + args.train_iters = 0 + timers('train/valid/test dataset/dataloder').stop() + + # Build calback function. + timers('callback function', log_level=0).start() + end_of_epoch_callback = None + if end_of_epoch_callback_provider is not None: + end_of_epoch_callback = end_of_epoch_callback_provider() + timers('callback function').stop() + + # Build model, optimizer and learning rate scheduler. + timers('model and optimizer', log_level=0).start() + model, optimizer, opt_param_scheduler = megatron.training.setup_model_and_optimizer(model_provider, model_type, args=args) + timers('model and optimizer').stop() + + # If pretrained checkpoint is provided and we have not trained for + # any iteration (i.e., iteration is zero), then load the pretrained + # checkpoint. + timers('pretrained checkpoint', log_level=0).start(barrier=True) + if args.iteration == 0 and args.pretrained_checkpoint is not None: + original_load = args.load + args.load = args.pretrained_checkpoint + original_rng = args.no_load_rng + args.no_load_rng = True + _ = load_checkpoint(model, None, None) + args.load = original_load + args.no_load_rng = original_rng + # This is critical when only model is loaded. We should make sure + # main parameters are also updated. + optimizer.reload_model_params() + timers('pretrained checkpoint').stop() + + # Print setup timing. + print_rank_0('done with setups ...') + timers.log(['train/valid/test dataset/dataloder', 'callback function', + 'model and optimizer', 'pretrained checkpoint'], barrier=True) + print_rank_0('training ...') + + # Finetune the model. + if args.epochs > 0: + _train(model, + optimizer, opt_param_scheduler, forward_step, + train_dataloader, valid_dataloader, end_of_epoch_callback, args) + # Or just evaluate. + else: + if end_of_epoch_callback is not None: + print_rank_0('evaluation only mode, setting epoch to -1') + end_of_epoch_callback(model, epoch=-1, output_predictions=True) + print_rank_0('done :-)') diff --git a/multilinguality_megatron/tasks/glue/data.py b/multilinguality_megatron/tasks/glue/data.py new file mode 100644 index 0000000000000000000000000000000000000000..d96f6962d97cee1d1b4b7948b906b01af0724cfb --- /dev/null +++ b/multilinguality_megatron/tasks/glue/data.py @@ -0,0 +1,56 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""GLUE dataset.""" + +from abc import ABC +from abc import abstractmethod + +from torch.utils.data import Dataset + +from megatron import print_rank_0 +from tasks.data_utils import build_sample +from tasks.data_utils import build_tokens_types_paddings_from_text + + +class GLUEAbstractDataset(ABC, Dataset): + """GLUE base dataset class.""" + + def __init__(self, task_name, dataset_name, datapaths, + tokenizer, max_seq_length): + # Store inputs. + self.task_name = task_name + self.dataset_name = dataset_name + self.tokenizer = tokenizer + self.max_seq_length = max_seq_length + print_rank_0(' > building {} dataset for {}:'.format(self.task_name, + self.dataset_name)) + # Process the files. + string = ' > paths:' + for path in datapaths: + string += ' ' + path + print_rank_0(string) + self.samples = [] + for datapath in datapaths: + self.samples.extend(self.process_samples_from_single_path(datapath)) + print_rank_0(' >> total number of samples: {}'.format( + len(self.samples))) + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + raw_sample = self.samples[idx] + ids, types, paddings = build_tokens_types_paddings_from_text( + raw_sample['text_a'], raw_sample['text_b'], + self.tokenizer, self.max_seq_length) + sample = build_sample(ids, types, paddings, + raw_sample['label'], raw_sample['uid']) + return sample + + @abstractmethod + def process_samples_from_single_path(self, datapath): + """Abstract method that takes a single path / filename and + returns a list of dataset samples, each sample being a dict of + {'text_a': string, 'text_b': string, 'label': int, 'uid': int} + """ + pass diff --git a/multilinguality_megatron/tasks/glue/finetune.py b/multilinguality_megatron/tasks/glue/finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..2dc2eb97e21d62f17489ed8f1ffc6bbbb7a7899b --- /dev/null +++ b/multilinguality_megatron/tasks/glue/finetune.py @@ -0,0 +1,93 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""GLUE finetuning/evaluation.""" + +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_tokenizer +import megatron.model.classification +from tasks.eval_utils import accuracy_func_provider +import tasks.finetune_utils +import megatron.initialize +from megatron.model.enums import ModelType + + +def _glue_classification(num_classes, + Dataset, + name_from_datapath_func): + + def train_valid_datasets_provider(): + """Build train and validation dataset.""" + args = get_args() + tokenizer = get_tokenizer() + + train_dataset = Dataset('training', args.train_data, + tokenizer, args.seq_length) + valid_dataset = Dataset('validation', args.valid_data, + tokenizer, args.seq_length) + + return train_dataset, valid_dataset + + def model_provider(pre_process=True, + post_process=True): + """Build the model.""" + args = get_args() + + print_rank_0('building classification model for {} ...'.format(args.task)) + + model_type_glue = ModelType.encoder_or_decoder + model = megatron.model.classification.Classification(num_classes=num_classes, + num_tokentypes=2, + pre_process=pre_process, + post_process=post_process, + model_type=model_type_glue) + return model + + def metrics_func_provider(): + """Privde metrics callback function.""" + def single_dataset_provider(datapath): + args = get_args() + tokenizer = get_tokenizer() + + name = name_from_datapath_func(datapath) + return Dataset(name, [datapath], tokenizer, args.seq_length) + return accuracy_func_provider(single_dataset_provider) + + """Finetune/evaluate.""" + model_type_glue = ModelType.encoder_or_decoder + tasks.finetune_utils.finetune(train_valid_datasets_provider, + model_provider, + model_type_glue, + end_of_epoch_callback_provider=metrics_func_provider) + + +def main(): + megatron.initialize.initialize_megatron(extra_args_provider=None) + args = get_args() + + if args.task == 'MNLI': + num_classes = 3 + from tasks.glue.mnli import MNLIDataset as Dataset + + def name_from_datapath(datapath): + return datapath.split('MNLI')[-1].strip( + '.tsv').strip('/').replace('_', '-') + + elif args.task == 'QQP': + + num_classes = 2 + from tasks.glue.qqp import QQPDataset as Dataset + + def name_from_datapath(datapath): + return datapath.split('QQP')[-1].strip( + '.tsv').strip('/').replace('_', '-') + + else: + raise NotImplementedError('GLUE task {} is not implemented.'.format( + args.task)) + + _glue_classification(num_classes, Dataset, name_from_datapath) + + +if __name__ == "__main__": + main() diff --git a/multilinguality_megatron/tasks/glue/mnli.py b/multilinguality_megatron/tasks/glue/mnli.py new file mode 100644 index 0000000000000000000000000000000000000000..8cecc5911ea3996d621d8d9f677375423fb3d6c8 --- /dev/null +++ b/multilinguality_megatron/tasks/glue/mnli.py @@ -0,0 +1,71 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""MNLI dataset.""" + +from megatron import print_rank_0 +from tasks.data_utils import clean_text +from .data import GLUEAbstractDataset + + +LABELS = {'contradiction': 0, 'entailment': 1, 'neutral': 2} + + +class MNLIDataset(GLUEAbstractDataset): + + def __init__(self, name, datapaths, tokenizer, max_seq_length, + test_label='contradiction'): + self.test_label = test_label + super().__init__('MNLI', name, datapaths, + tokenizer, max_seq_length) + + def process_samples_from_single_path(self, filename): + """"Implement abstract method.""" + print_rank_0(' > Processing {} ...'.format(filename)) + + samples = [] + total = 0 + first = True + is_test = False + with open(filename, 'r') as f: + for line in f: + row = line.strip().split('\t') + if first: + first = False + if len(row) == 10: + is_test = True + print_rank_0( + ' reading {}, {} and {} columns and setting ' + 'labels to {}'.format( + row[0].strip(), row[8].strip(), + row[9].strip(), self.test_label)) + else: + print_rank_0(' reading {} , {}, {}, and {} columns ' + '...'.format( + row[0].strip(), row[8].strip(), + row[9].strip(), row[-1].strip())) + continue + + text_a = clean_text(row[8].strip()) + text_b = clean_text(row[9].strip()) + unique_id = int(row[0].strip()) + label = row[-1].strip() + if is_test: + label = self.test_label + + assert len(text_a) > 0 + assert len(text_b) > 0 + assert label in LABELS + assert unique_id >= 0 + + sample = {'text_a': text_a, + 'text_b': text_b, + 'label': LABELS[label], + 'uid': unique_id} + total += 1 + samples.append(sample) + + if total % 50000 == 0: + print_rank_0(' > processed {} so far ...'.format(total)) + + print_rank_0(' >> processed {} samples.'.format(len(samples))) + return samples diff --git a/multilinguality_megatron/tasks/glue/qqp.py b/multilinguality_megatron/tasks/glue/qqp.py new file mode 100644 index 0000000000000000000000000000000000000000..5409f5f74629a2b0dc161579565daa49e1a6a19b --- /dev/null +++ b/multilinguality_megatron/tasks/glue/qqp.py @@ -0,0 +1,88 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""QQP dataset.""" + +from megatron import print_rank_0 +from tasks.data_utils import clean_text +from .data import GLUEAbstractDataset + + +LABELS = [0, 1] + + +class QQPDataset(GLUEAbstractDataset): + + def __init__(self, name, datapaths, tokenizer, max_seq_length, + test_label=0): + self.test_label = test_label + super().__init__('QQP', name, datapaths, + tokenizer, max_seq_length) + + def process_samples_from_single_path(self, filename): + """"Implement abstract method.""" + print_rank_0(' > Processing {} ...'.format(filename)) + + samples = [] + total = 0 + first = True + is_test = False + with open(filename, 'r') as f: + for line in f: + row = line.strip().split('\t') + if first: + first = False + if len(row) == 3: + is_test = True + print_rank_0(' reading {}, {}, and {} columns and ' + 'setting labels to {}'.format( + row[0].strip(), row[1].strip(), + row[2].strip(), self.test_label)) + else: + assert len(row) == 6 + print_rank_0(' reading {}, {}, {}, and {} columns' + ' ...'.format( + row[0].strip(), row[3].strip(), + row[4].strip(), row[5].strip())) + continue + + if is_test: + assert len(row) == 3, 'expected length 3: {}'.format(row) + uid = int(row[0].strip()) + text_a = clean_text(row[1].strip()) + text_b = clean_text(row[2].strip()) + label = self.test_label + assert len(text_a) > 0 + assert len(text_b) > 0 + else: + if len(row) == 6: + uid = int(row[0].strip()) + text_a = clean_text(row[3].strip()) + text_b = clean_text(row[4].strip()) + label = int(row[5].strip()) + else: + print_rank_0('***WARNING*** index error, ' + 'skipping: {}'.format(row)) + continue + if len(text_a) == 0: + print_rank_0('***WARNING*** zero length a, ' + 'skipping: {}'.format(row)) + continue + if len(text_b) == 0: + print_rank_0('***WARNING*** zero length b, ' + 'skipping: {}'.format(row)) + continue + assert label in LABELS + assert uid >= 0 + + sample = {'uid': uid, + 'text_a': text_a, + 'text_b': text_b, + 'label': label} + total += 1 + samples.append(sample) + + if total % 50000 == 0: + print_rank_0(' > processed {} so far ...'.format(total)) + + print_rank_0(' >> processed {} samples.'.format(len(samples))) + return samples diff --git a/multilinguality_megatron/tasks/main.py b/multilinguality_megatron/tasks/main.py new file mode 100644 index 0000000000000000000000000000000000000000..2989e3723fef48dc0894b27abecaec6a27926778 --- /dev/null +++ b/multilinguality_megatron/tasks/main.py @@ -0,0 +1,96 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Main tasks functionality.""" + +import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +from megatron import get_args +import megatron.initialize + + +def get_tasks_args(parser): + """Provide extra arguments required for tasks.""" + group = parser.add_argument_group(title='tasks') + + group.add_argument('--task', type=str, required=True, + help='Task name.') + group.add_argument('--epochs', type=int, default=None, + help='Number of finetunning epochs. Zero results in ' + 'evaluation only.') + group.add_argument('--pretrained_checkpoint', type=str, default=None, + help='Pretrained checkpoint used for finetunning.') + group.add_argument('--keep_last', action='store_true', + help='Keep the last batch (maybe incomplete) in' + 'the data loader') + group.add_argument('--train_data', nargs='+', default=None, + help='Whitespace separated paths or corpora names ' + 'for training.') + group.add_argument('--valid_data', nargs='*', default=None, + help='path(s) to the validation data.') + group.add_argument('--overlapping_eval', type=int, default=32, + help='Sliding window for overlapping evaluation.') + group.add_argument('--strict_lambada', action='store_true', + help='Use more difficult formulation of lambada.') + # Retriever args + group.add_argument('--qa_data_dev', type=str, default=None, + help='Path to the QA dataset dev file.') + group.add_argument('--qa_data_test', type=str, default=None, + help='Path to the QA dataset test file.') + + # Faiss arguments for retriever + group.add_argument('--faiss_use_gpu', action='store_true', + help='Whether create the FaissMIPSIndex on GPU') + group.add_argument('--faiss_match', type=str, default='string', \ + choices=['regex', 'string'], help="Answer matching '\ + 'logic type") + group.add_argument('--faiss_topk_retrievals', type=int, default=100, + help='Number of blocks to use as top-k during retrieval') + + # finetune for retriever + group.add_argument('--eval_micro_batch_size', type=int, default=None, + help='Eval Batch size per model instance (local batch ' + 'size). Global batch size is local batch size ' + 'times data parallel size.') + group.add_argument('--train_with_neg', action='store_true', + help='Whether to use negative examples during model training') + group.add_argument('--train_hard_neg', type=int, default=0, + help='Number of hard negative exmaples to use during ' + 'training') + + # parameters for Av.rank validation method + # Following options/arguments have been taken directly from DPR codebase + group.add_argument('--val_av_rank_hard_neg', type=int, default=30, + help='Av.rank validation: how many hard negatives to' + ' take from each question pool') + group.add_argument('--val_av_rank_other_neg', type=int, default=30, + help='Av.rank validation: how many other negatives to' + ' take from each question pool') + return parser + + +if __name__ == '__main__': + megatron.initialize.initialize_megatron(extra_args_provider=get_tasks_args) + args = get_args() + + if args.num_layers_per_virtual_pipeline_stage is not None: + print("Interleaved pipeline schedule is not yet supported for downstream tasks.") + exit() + + if args.task == 'RACE': + from race.finetune import main + elif args.task in ['MNLI', 'QQP']: + from glue.finetune import main + elif args.task in ['LAMBADA', 'WIKITEXT103']: + from zeroshot_gpt.evaluate import main + elif args.task in ['ICT-ZEROSHOT-NQ', 'RETRIEVER-EVAL']: + from orqa.evaluate_orqa import main + elif args.task in ['RET-FINETUNE-NQ']: + from orqa.supervised.finetune import main + else: + raise NotImplementedError('Task {} is not implemented.'.format( + args.task)) + + main() diff --git a/multilinguality_megatron/tasks/msdp/README.md b/multilinguality_megatron/tasks/msdp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..27c8728eca146aea44c627a99d5f80184b6fbf84 --- /dev/null +++ b/multilinguality_megatron/tasks/msdp/README.md @@ -0,0 +1,19 @@ + +# Multi-Stage Prompting for Knowledgeable Dialogue Generation + +Below we present the steps to run our multi-stage dialogue prompting (MSDP) framework. + +## Multi-Stage Dialogue Prompting + +### Data Preparation +1. Dataset Download: [Wizard of Wikipedia](https://parl.ai/projects/wizard_of_wikipedia/) and [Wizard of Internet](https://parl.ai/projects/sea/) +2. Data Processing: We provide the script to run the [`data processing`](../../examples/msdp/data_processing.sh) of the datatsets. + +### Stage-1: Prompting for Knowledge Generation +1. We provide the script to perform the [`first-stage prompting`](../../examples/msdp/prompt_knwl_gen.sh) for the knowledge generation. +2. We provide the [`evaluation script`](../../examples/msdp/eval_knwl_generation.sh) for the automatic evaluation (i.e., F1, BLEU, METEOR, and ROUGE-L) of the knowledge generation. + +### Stage-2: Prompting for Response Generation +1. We provide the script to [`prepare the input file`](../../examples/msdp/prep_resp_gen.sh) for the response generation (based on the previously generated knowledge file). +2. We provide the script to perform the [`second-stage prompting`](../../examples/msdp/prompt_resp_gen.sh) for the response generation. +3. We provide the [`evaluation script`](../../examples/msdp/eval_resp_generation.sh) for the automatic evaluation (i.e., F1, KF1, BLEU, METEOR, and ROUGE-L) of the response generation. diff --git a/multilinguality_megatron/tasks/msdp/evaluate.py b/multilinguality_megatron/tasks/msdp/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..b0631d7b8f0a61d59b874e31cdf5d6a3e90fff3a --- /dev/null +++ b/multilinguality_megatron/tasks/msdp/evaluate.py @@ -0,0 +1,45 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Model evaluation""" + +from megatron import get_args +from megatron import print_rank_0 +from tasks.msdp.metrics import F1Metric +from tqdm import tqdm + + +def evaluate_f1(guess_file, answer_file): + """Evaluating F1 Score""" + + guess_list = [] + print_rank_0('reading %s' % guess_file) + with open(guess_file, "r") as f: + for i, line in enumerate(tqdm(f)): + line = line.strip() + if "<|endoftext|>" in line: + line = line.replace("<|endoftext|>", "") + guess_list.append(line) + + answer_list = [] + print_rank_0('reading %s' % answer_file) + with open(answer_file, "r") as f: + for i, line in enumerate(tqdm(f)): + line = line.strip() + if line == "no_passages_used": + line = "" + answer_list.append(line) + + assert len(guess_list) == len(answer_list), \ + "lengths of guess and answer are different!" + + precision, recall, f1 = F1Metric.compute_all_pairs(guess_list, answer_list) + print_rank_0('Precision: %.4f; recall: %.4f; f1: %.4f' % (precision, recall, f1)) + + print_rank_0('done :-)') + + +def main(): + args = get_args() + + evaluate_f1(args.guess_file, args.answer_file) + diff --git a/multilinguality_megatron/tasks/msdp/main.py b/multilinguality_megatron/tasks/msdp/main.py new file mode 100644 index 0000000000000000000000000000000000000000..a0c916d030b614d15b8f052bfd4e91b00d7d69c4 --- /dev/null +++ b/multilinguality_megatron/tasks/msdp/main.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Run multi-stage dialogue prompting (MSDP).""" + +import os +import sys + +sys.path.append(os.path.abspath(os.path.join( + os.path.join(os.path.dirname(__file__), os.path.pardir), os.path.pardir))) +from megatron import get_args +import megatron.initialize + + +def get_tasks_args(parser): + """Provide extra arguments required for tasks.""" + group = parser.add_argument_group(title='tasks') + + # parameters for the knowledgeable dialogue generation + group.add_argument('--task', type=str, required=True, + help='Task name.') + group.add_argument("--sample_input_file", type=str, default=None, + help='Get input from file instead of interactive mode, ' + 'each line is an input.') + group.add_argument("--sample_output_file", type=str, default=None, + help='Output file got from --sample_input_file') + group.add_argument('--prompt_file', type=str, default=None, + help='prompting file') + group.add_argument('--prompt_type', type=str, default=None, + choices=['knowledge', 'response'], + help='prompt type (knowledge or response)') + group.add_argument('--num_prompt_examples', type=int, default=10, + help='number of prompt examples') + group.add_argument('--guess_file', type=str, default=None, + help='datapath for generated sentences') + group.add_argument('--answer_file', type=str, default=None, + help='datapath for golden sentences') + group.add_argument('--out_seq_length', type=int, default=100, + help='output sequence length') + group.add_argument('--api_prompt', default=False, action="store_true", + help='setup model api for prompting') + group.add_argument('--megatron_api_url', type=str, default=None, + help='url of the megatron api') + return parser + + +if __name__ == '__main__': + megatron.initialize.initialize_megatron(extra_args_provider=get_tasks_args) + args = get_args() + + if args.num_layers_per_virtual_pipeline_stage is not None: + print("Interleaved pipeline schedule is not yet supported for downstream tasks.") + exit() + + if args.task == 'MSDP-PROMPT': + from tasks.msdp.prompt import main + + elif args.task == 'MSDP-EVAL-F1': + from tasks.msdp.evaluate import main + + else: + raise NotImplementedError('Task {} is not implemented.'.format( + args.task)) + + main() diff --git a/multilinguality_megatron/tasks/msdp/metrics.py b/multilinguality_megatron/tasks/msdp/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..fc7ce5dd5ad986f4d985eb962629627ac2f834a3 --- /dev/null +++ b/multilinguality_megatron/tasks/msdp/metrics.py @@ -0,0 +1,77 @@ + +# The following code is adapted from +# https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/metrics.py, +# which is licensed under the MIT license. More details on the license can be +# found at https://github.com/facebookresearch/ParlAI/blob/master/LICENSE. + +"""Provides standard metric evaluations for dialog.""" + +from collections import Counter +from typing import List +import numpy as np +import re + +re_art = re.compile(r'\b(a|an|the)\b') +re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']') + + +def normalize_answer(s): + """ + Lower text and remove punctuation, articles and extra whitespace. + """ + s = s.lower() + s = re_punc.sub(' ', s) + s = re_art.sub(' ', s) + s = ' '.join(s.split()) + return s + + +class F1Metric: + """ + Helper class which computes token-level F1. + """ + + @staticmethod + def _prec_recall_f1_score(pred_items, gold_items): + """ + Compute precision, recall and f1 given a set of gold and prediction items. + :param pred_items: iterable of predicted values + :param gold_items: iterable of gold values + :return: tuple (p, r, f1) for precision, recall, f1 + """ + common = Counter(gold_items) & Counter(pred_items) + num_same = sum(common.values()) + if num_same == 0: + return 0, 0, 0 + precision = 1.0 * num_same / len(pred_items) + recall = 1.0 * num_same / len(gold_items) + f1 = (2 * precision * recall) / (precision + recall) + return precision, recall, f1 + + @staticmethod + def compute_each_pair(guess: str, answer: str): + if answer == "": + return None, None, None + if guess == "": + return 0, 0, 0 + g_tokens = normalize_answer(guess).split() + a_tokens = normalize_answer(answer).split() + + precision, recall, f1 = F1Metric._prec_recall_f1_score(g_tokens, a_tokens) + return precision, recall, f1 + + @staticmethod + def compute_all_pairs(guesses: List[str], answers: List[str]): + # additional augment: + assert len(guesses) == len(answers) + + precision_list, recall_list, f1_list = [], [], [] + for guess, answer in zip(guesses, answers): + precision, recall, f1 = F1Metric.compute_each_pair(guess, answer) + if precision is None or recall is None or f1 is None: + continue + precision_list.append(precision) + recall_list.append(recall) + f1_list.append(f1) + + return np.mean(precision_list), np.mean(recall_list), np.mean(f1_list) diff --git a/multilinguality_megatron/tasks/msdp/preprocessing.py b/multilinguality_megatron/tasks/msdp/preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..330a7cfba0a22bd603b951e76f81422dabec8247 --- /dev/null +++ b/multilinguality_megatron/tasks/msdp/preprocessing.py @@ -0,0 +1,581 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Preprocessing for Wizard of Wikipedia and Wizard of Internet datasets""" + +import torch +import argparse +from nltk import word_tokenize +from tqdm import tqdm +import numpy as np +import json + +def get_args(): + parser = argparse.ArgumentParser(description="Preprocessing") + + parser.add_argument("--func", type=str, default=None, + help="choose to run which function") + parser.add_argument("--raw_file", type=str, default=None, + help="path of the input file") + parser.add_argument("--processed_file", type=str, default=None, + help="path of the output file") + parser.add_argument("--knwl_ref_file", type=str, default=None, + help="path of the knowledge reference file") + parser.add_argument("--resp_ref_file", type=str, default=None, + help="path of the knowledge reference file") + parser.add_argument("--knwl_gen_file", type=str, default=None, + help="path of the generated knowledge file") + parser.add_argument("--test_file", type=str, default=None, + help="path of the test file") + parser.add_argument("--train_file", type=str, default=None, + help="path of the train file") + parser.add_argument("--model_file", type=str, default=None, + help="path of the model file") + parser.add_argument("--data_type", type=str, default=None, + help="data types, choose one out of three types: \ + wow_seen, wow_unseen, and woi") + parser.add_argument("--seed", type=int, default=1234, + help="random seed") + args = parser.parse_args() + return args + + +def process_wow_dataset(raw_file, processed_file, knwl_ref_file, resp_ref_file): + """ + This is a function used for processing the wizard of wikipedia (wow) dataset + Expected processed format: + topic \t dialogue context \t golden knowledge \t golden response + """ + + # loading the raw data + print("> Loading data from %s" % raw_file) + with open(raw_file, "r") as fr: + dialog_data = json.load(fr) + + print("> Processing data ...") + fproc = open(processed_file, "w") + fknwl = open(knwl_ref_file, "w") if knwl_ref_file else None + fresp = open(resp_ref_file, "w") if resp_ref_file else None + + for i, sample in enumerate(tqdm(dialog_data)): + # get all the dialog data for a single dialog sample + dialog = sample["dialog"] + + turn_list = [] # collect the dialog history + # processing for each single dialog sample + for j, turn in enumerate(dialog): + # text of each turn + text = turn["text"] + if not (text.endswith("?") or text.endswith(".") or text.endswith("!")): + text = text + "." + + if j == 0: + # first turn + turn_list.append(text) + continue + + speaker = turn["speaker"].lower() + if "wizard" in speaker: + checked_sentence = list(turn["checked_sentence"].values()) # knowledge + checked_passage = list(turn["checked_passage"].values()) # topic + + assert len(checked_sentence) <= 1 + + # get the ground truth knowledge + if len(checked_sentence) > 0: + checked_sentence = checked_sentence[0] + else: + checked_sentence = "no_passages_used" + + if len(checked_passage) == 1: + checked_passage = checked_passage[0] + else: + checked_passage = "no_passages_used" + + # get the topic + if checked_passage != "no_passages_used": + topic = checked_passage + else: + topic = sample["chosen_topic"] + + dialog_context = " [SEP] ".join(turn_list) + knowledge = checked_sentence + response = text + # add the response into the dialog history + turn_list.append(response) + + # write to the output files + fproc.write(topic + "\t" + dialog_context + "\t" + \ + knowledge + "\t" + response + "\n") + + if fknwl: + fknwl.write(knowledge + "\n") + if fresp: + # tokenize for evaluation + response = " ".join(word_tokenize(response)) + fresp.write(response + "\n") + + else: + assert "apprentice" in speaker + turn_list.append(text) + + fproc.close() + if fknwl: + fknwl.close() + if fresp: + fresp.close() + + +def process_woi_dataset(raw_file, processed_file, knwl_ref_file, resp_ref_file): + """ + This is a function used for processing the wizard of internet (woi) dataset + Expected processed format: + topic \t dialogue context \t golden knowledge \t golden response + """ + + print("> Processing %s" % raw_file) + fproc = open(processed_file, "w") + fknwl = open(knwl_ref_file, "w") if knwl_ref_file else None + fresp = open(resp_ref_file, "w") if resp_ref_file else None + + with open(raw_file, "r") as fr: + for i, line in tqdm(enumerate(fr)): + # read line by line, each line uses json format + line = line.strip() + item_dict = json.loads(line) + + # item_dict is a dictionary + # its key is the data id, and its value contains all the data content + item_dict = item_dict.values() + item_dict = list(item_dict)[0] # len(item_dict) == 1 + + # get the whole dialog data for a single dialog sample + dialog_data = item_dict['dialog_history'] + length = len(dialog_data) + + turn_list = [] # collect the dialog history + search_text = "" + for i in range(length): + item = dialog_data[i] + action = item['action'] + + if action == "Wizard => SearchAgent": + search_text = item['text'] + + elif action == "Wizard => Apprentice": + if len(turn_list) == 0: + # first turn + turn = item['text'] + turn_list.append(turn) + continue + + # get the relevant content + contents = item["context"]["contents"] + selects = item["context"]["selected_contents"] + flag = selects[0][0] + selects = selects[1:] + assert len(selects) == len(contents) + + # get the topic + if flag: + # no knowledge sentence is used for the response + topic = "no_topic" + knwl_sent = "no_passages_used" + else: + # we consider the search text as the topic + topic = search_text + # get the knowledge sentence + knwl_sent = "" + for content, select in zip(contents, selects): + content = content['content'] + assert len(content) == len(select) + for c, s in zip(content, select): + if s: + knwl_sent = c + break + + if knwl_sent == "": + # no knowledge is used for the response + topic = "no_topic" + knwl_sent = "no_passages_used" + + # get dialogue context, knowledge, and response + dialog_context = " [SEP] ".join(turn_list) + response = item['text'] + + # processing + topic = topic.replace("\n", "").replace("\r", \ + "").replace("\t", "") + dialog_context = dialog_context.replace("\n", "").replace("\r", \ + "").replace("\t", "") + knwl_sent = knwl_sent.replace("\n", "").replace("\r", \ + "").replace("\t", "") + response = response.replace("\n", "").replace("\r", \ + "").replace("\t", "") + + if topic != "no_topic": + # write to the ouput files + fproc.write(topic + "\t" + dialog_context + "\t" + \ + knwl_sent + "\t" + response + "\n") + if fknwl: + fknwl.write(knwl_sent + "\n") + if fresp: + # tokenize for evaluation + response = " ".join(word_tokenize(response)) + fresp.write(response + "\n") + + turn_list.append(response) + + elif action == "Apprentice => Wizard": + turn = item['text'] + turn_list.append(turn) + + else: + assert action == "SearchAgent => Wizard", \ + "Please check whether you have used the correct data!" + + fproc.close() + if fknwl: + fknwl.close() + if fresp: + fresp.close() + + +def get_database(test_datapath, train_datapath, data_type): + """Get the database by topics""" + + assert data_type in ["wow_seen", "wow_unseen", "woi"], \ + "Please input a correct data type!!" + + # get test data topic dictionary + print("> reading test data from %s" % test_datapath) + test_topics = {} + with open(test_datapath, "r") as f: + for i, line in enumerate(f): + line = line.strip() + splits = line.split("\t") + topic = splits[0] + test_topics[topic] = True + + print("> reading data from %s" % train_datapath) + train_data_by_topic = {} + dialog_data_by_topic = {} + dialog_examples = [] + with open(train_datapath, "r") as f: + for i, line in enumerate(f): + line = line.strip() + splits = line.split("\t") + topic = splits[0] + turns = splits[1].split(" [SEP] ")[-3:] + knowledge = splits[2] + response = splits[3] + # filtering data samples + if knowledge == "no_passages_used": + # when no knowledge is used + continue + if data_type != "wow_seen" and ("(" in knowledge or ")" in knowledge): + # when bracket exists in the knowledge + continue + if data_type != "wow_seen" and topic not in knowledge: + # when topic does not exist in the knowledge + continue + + # get the instance + last_turn = turns[-1] + instance = "( " + last_turn + " ) " + topic + " => " + knowledge + + # construct dialog example + dialog_example = "" + if data_type != "wow_seen": + dialog_example += "( " + topic + " ) " + for i, turn in enumerate(turns): + if i != 0: + dialog_example += " " + dialog_example += turn + + # check overlaps + if topic in test_topics: + if topic not in train_data_by_topic: + train_data_by_topic[topic] = [instance] + else: + train_data_by_topic[topic].append(instance) + + if topic not in dialog_data_by_topic: + dialog_data_by_topic[topic] = [dialog_example] + else: + dialog_data_by_topic[topic].append(dialog_example) + + else: + # filtering data samples + if len(knowledge.split()) > 20: + # knowledge is too long + continue + if knowledge.startswith("It") or knowledge.startswith("it") or \ + knowledge.startswith("This") or knowledge.startswith("this"): + continue + + # append all the data into dialogue examples list + dialog_examples.append((topic, dialog_example, instance)) + + return train_data_by_topic, dialog_data_by_topic, dialog_examples + + +emb_dict = {} +def select_prompts_based_on_similarity( + query, dialog_list, prompt_list, topic, tokenizer, encoder, topk): + """Select samples based on the similarity""" + + with torch.no_grad(): + # get the query embeddings + query_ids = tokenizer.encode(query) + query_ids = torch.LongTensor([query_ids]).cuda() + query_emb = encoder(input_ids=query_ids).pooler_output + query_emb = query_emb[0] + + # calculate embeddings for the samples in the database + if topic in emb_dict: + example_embeddings = emb_dict[topic] + example_embeddings = example_embeddings.cuda() + else: + for idx, example in enumerate(dialog_list): + example_ids = tokenizer.encode(example) + example_ids = torch.LongTensor([example_ids]).cuda() + example_emb = encoder(input_ids=example_ids).pooler_output + if idx == 0: + example_embeddings = example_emb + else: + example_embeddings = torch.cat( + (example_embeddings, example_emb), dim=0) + emb_dict[topic] = example_embeddings.cpu() + + # compare the similarity and select the topk samples + similarity_list = example_embeddings.matmul(query_emb) + _, indices = torch.topk(similarity_list, k=topk) + + indices = indices.tolist() + indices = indices[::-1] # reverse the order + selected_prompts = [] + for index in indices: + # index = index.item() + selected_prompts.append(prompt_list[index]) + + return selected_prompts + + +def prompt_selection_for_knowledge_generation( + test_datapath, train_datapath, model_path, output_prompt_path, data_type): + """Selecting prompts for the knowledge generation""" + + print("> Selecting prompts for the knowledge generation") + + train_data_by_topic, dialog_data_by_topic, dialog_examples = \ + get_database(test_datapath, train_datapath, data_type) + + from transformers import DPRQuestionEncoderTokenizer + print("> loading tokenizer and encoder") + tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( + 'facebook/dpr-question_encoder-single-nq-base') + encoder = torch.load(model_path).cuda() + + print("> getting dialog embeddings") + with torch.no_grad(): + for idx, example in tqdm(enumerate(dialog_examples)): + dialog = example[1] + dialog_ids = tokenizer.encode(dialog) + dialog_ids = torch.LongTensor([dialog_ids]).cuda() + dialog_emb = encoder(input_ids=dialog_ids).pooler_output + + if idx == 0: + dialog_embeddings = dialog_emb + else: + dialog_embeddings = torch.cat((dialog_embeddings, dialog_emb), dim=0) + + print("> reading test data from %s" % test_datapath) + prompt_list_for_each_sample = [] + with open(test_datapath, "r") as f: + for i, line in tqdm(enumerate(f)): + line = line.strip() + + splits = line.split("\t") + topic = splits[0] + turns = splits[1].split(" [SEP] ")[-3:] + + # get the query sentence + query_sent = "" + if data_type != "seen": + query_sent += "( " + topic + " ) " + for i, turn in enumerate(turns): + if i != 0: + query_sent += " " + query_sent += turn + + if topic not in train_data_by_topic: + # get the query embedding + query_ids = tokenizer.encode(query_sent) + query_ids = torch.LongTensor([query_ids]).cuda() + query_emb = encoder(input_ids=query_ids).pooler_output + query_emb = query_emb[0] + + # calculate the similarity + similarity_list = dialog_embeddings.matmul(query_emb) + _, indices = torch.sort(similarity_list) + indices = indices.tolist() + selected_topics = {} + selected_prompts = [] + num_prompt = 0 + for index in indices: + example = dialog_examples[index] + topic_temp = example[0] + if topic_temp not in selected_topics: + selected_topics[topic_temp] = True + selected_prompts.append(example[2]) + num_prompt += 1 + if num_prompt == 10: + break + + # get the selected samples + example_list = selected_prompts[::-1] + key = topic + " " + turns[-1] + prompt_list_for_each_sample.append({key: example_list}) + + else: + num_data_sample = min(len(train_data_by_topic[topic]), 10) + total_example_list = train_data_by_topic[topic] + + dialog_list = dialog_data_by_topic[topic] + assert len(dialog_list) == len(train_data_by_topic[topic]) + + # calculate the similarity + example_list = select_prompts_based_on_similarity( + query_sent, dialog_list, total_example_list, + topic, tokenizer, encoder, topk=num_data_sample) + + key = topic + " " + turns[-1] + prompt_list_for_each_sample.append({key: example_list}) + + print("writing to %s" % output_prompt_path) + with open(output_prompt_path, "w") as f: + for instance in tqdm(prompt_list_for_each_sample): + json.dump(instance, f) + f.write("\n") + + +def prompt_selection_for_response_generation(input_path, output_path, seed): + """Selecting prompts for the response generation""" + + print("> Selecting prompts for the response generation") + print("> set random seed") + np.random.seed(seed) + + prompt_example_list = [] + print("> reading data from %s" % input_path) + with open(input_path, "r") as f: + for i, line in tqdm(enumerate(f)): + line = line.strip() + splits = line.split("\t") + + # get the topic, context, knowledge and response + topic = splits[0] + dialog_context = splits[1] + knowledge = splits[2] + response = splits[3] + turns = dialog_context.split(" [SEP] ")[-3:] + if knowledge == "no_passages_used": + continue + + # calculate the overlap ratio + from nltk import word_tokenize + knowledge_sent_token_list = word_tokenize(knowledge) + knowledge_sent_token_dict = {token: True for token in knowledge_sent_token_list} + knowledge_len = len(knowledge_sent_token_list) + response_token_list = word_tokenize(response) + response_len = len(response_token_list) + num_overlap_token = 0 + accumulator = 0 + for token in response_token_list: + if token in knowledge_sent_token_dict: + accumulator += 1 + else: + if accumulator >= 10: + num_overlap_token += accumulator + accumulator = 0 + if accumulator >= 10: + num_overlap_token += accumulator + + # filtering the data based on the ratio + if num_overlap_token > response_len * 0.9 or num_overlap_token < response_len * 0.6: + continue + if num_overlap_token < knowledge_len * 0.8: + continue + + last_turn = " ".join(word_tokenize(turns[-1])) + knowledge = " ".join(word_tokenize(knowledge)) + response = " ".join(word_tokenize(response)) + prompt_example = "" + # add dialog context + prompt_example += "Topic: " + topic + ". " + prompt_example += "User says: " + last_turn + " " + prompt_example += "We know that: " + knowledge + " " + prompt_example += "System replies: " + response + + prompt_example_list.append(prompt_example) + + # shuffle the prompt examples + np.random.shuffle(prompt_example_list) + + print("> writing to %s" % output_path) + with open(output_path, "w") as f: + # f.write("Generate the System's response based on the knowledge sentence:\n") + for i in tqdm(range(20)): + example = prompt_example_list[i] + f.write(example + "\n") + + +def prepare_input_for_response_generation(test_file, knwl_gen_file, processed_file): + """Preparing inputs for the response generation""" + + print("> Reading knowledge file from %s" % knwl_gen_file) + # get the knowledge list + with open(knwl_gen_file, "r") as f: + knowledge_list = f.readlines() + + print("> Processing ...") + with open(test_file, "r") as fr: + with open(processed_file, "w") as fw: + for line_num, line in enumerate(tqdm(fr)): + line = line.strip() + splits = line.split("\t") + # prepare topic, context, knowledge and response + topic = splits[0] + dialog_context = splits[1] + response = splits[3] + knowledge = knowledge_list[line_num] + knowledge = knowledge.strip() + if "<|endoftext|>" in knowledge: + knowledge = knowledge.replace("<|endoftext|>", "") + + # write to the output file + fw.write(topic + "\t" + dialog_context + "\t" \ + + knowledge + "\t" + response + "\n") + + +if __name__ == "__main__": + + args = get_args() + if args.func == "process_wow_dataset": + process_wow_dataset(args.raw_file, args.processed_file, args.knwl_ref_file, args.resp_ref_file) + + elif args.func == "process_woi_dataset": + process_woi_dataset(args.raw_file, args.processed_file, args.knwl_ref_file, args.resp_ref_file) + + elif args.func == "get_knwl_gen_prompts": + prompt_selection_for_knowledge_generation( + args.test_file, args.train_file, args.model_file, + args.processed_file, args.data_type) + + elif args.func == "get_resp_gen_prompts": + prompt_selection_for_response_generation( + args.train_file, args.processed_file, args.seed) + + elif args.func == "prepare_input": + prepare_input_for_response_generation( + args.test_file, args.knwl_gen_file, args.processed_file) diff --git a/multilinguality_megatron/tasks/msdp/prompt.py b/multilinguality_megatron/tasks/msdp/prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..54cfd16a20ef8af01b0d596d29ad28254abb77c2 --- /dev/null +++ b/multilinguality_megatron/tasks/msdp/prompt.py @@ -0,0 +1,308 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Prompting the pretrained language model to generate knowledge/response""" + +import json +import torch +import requests +from nltk import word_tokenize +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_tokenizer +from megatron.core import mpu +from megatron.model import GPTModel +from megatron.training import get_model +from megatron.checkpointing import load_checkpoint +from megatron.text_generation import generate_and_post_process + + +def call_model_api(inputs, tokens_to_generate): + """Calling the model api to get the output generations""" + + args = get_args() + + # The following is an example of using the Megatron API + # You can also implement your own API function to place this part + headers = {'Content-Type': 'application/json; charset=UTF-8'} + data = {"prompts": [inputs], "tokens_to_generate": tokens_to_generate, "top_k": 1} + data_json = json.dumps(data) + outputs = requests.put(args.megatron_api_url, headers=headers, data=data_json).json()["text"][0] + + input_len = len(inputs) + outputs = outputs[input_len:] + outputs = outputs.split("\n")[0].strip() + + return outputs + + +def read_prompts(prompt_path, prompt_type, n_example): + """Read prompt data""" + + if prompt_type == "knowledge": + # prompts for the knowledge generation + prompt_examples_dict = {} + # read prompt_path + with open(prompt_path, "r") as f: + for i, line in enumerate(f): + line = line.strip() + line_dict = json.loads(line) + key = list(line_dict.keys())[0] + + if key not in prompt_examples_dict: + prompt_examples = line_dict[key] + prompt = "" + for instance in prompt_examples: + instance = instance.strip() + prompt += instance + " \n" + prompt_examples_dict[key] = prompt + + return prompt_examples_dict + + else: + # prompts for the response generation + # read prompt_path + prompt = "" + with open(prompt_path, "r") as f: + prompt_examples = f.readlines() + prompt_examples = prompt_examples[:n_example] + for instance in prompt_examples: + instance = instance.strip() + prompt += instance + " \n" + + return prompt + + +def generate_samples_by_calling_api(): + """ Generate outputs by calling""" + args = get_args() + assert args.prompt_type in ["knowledge", "response"], \ + "Please input a correct prompt type!" + + if args.prompt_type == "knowledge": + # read knowledge generation prompts + knwl_gen_prompt_dict = read_prompts( + args.prompt_file, args.prompt_type, args.num_prompt_examples) + + else: + resp_gen_prompt = read_prompts( + args.prompt_file, args.prompt_type, args.num_prompt_examples) + + # read the test data + fname = open(args.sample_input_file, "r") + test_sample_list = fname.readlines() + # create output file + fname_out = open(args.sample_output_file, "w") + + # call the api to get the output generations + for test_sample in test_sample_list: + test_sample = test_sample.strip() + splits = test_sample.split("\t") + topic = splits[0] + + # prepare the inputs for the api + if args.prompt_type == "knowledge": + ## inputs = prompt + current test + # get the prompt + turns = splits[1].split(" [SEP] ") + last_turn = turns[-1] + key = topic + " " + last_turn + inputs = knwl_gen_prompt_dict[key] + + # add current test + inputs += "( " + last_turn + " ) " + topic + " =>" + + else: + # inputs = prompt + current test + # get the prompt + inputs = resp_gen_prompt + + # add current test + turns = splits[1].split(" [SEP] ") + knowledge = splits[2] + last_turn = turns[-1] + last_turn = " ".join(word_tokenize(last_turn)) + knowledge = " ".join(word_tokenize(knowledge)) + knowledge = knowledge.strip() + last_turn = last_turn.strip() + inputs += "Topic: " + topic + ". " + inputs += "User says: " + last_turn + " " + inputs += "We know that: " + knowledge + " " + inputs += "System replies:" + + # get the output generations from the api, + # and write to the output file + generations = call_model_api(inputs, args.out_seq_length) + fname_out.write(generations) + fname_out.write("\n") + + fname.close() + fname_out.close() + + +def model_provider(pre_process=True, post_process=True): + """Build the model.""" + + print_rank_0('building GPT model ...') + model = GPTModel( + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process + ) + return model + + +def generate_samples_by_prompting_input_from_file(model): + """Prompt a pretrained language model to generate knowledge/response""" + + # get tokenizer + args = get_args() + tokenizer = get_tokenizer() + + # Read the sample file and open the output file. + assert args.sample_input_file is not None, \ + 'sample input file is not provided.' + if mpu.is_pipeline_first_stage() and mpu.get_tensor_model_parallel_rank() == 0: + fname = open(args.sample_input_file, "r") + all_raw_text = fname.readlines() + input_count = len(all_raw_text) + if args.sample_output_file is None: + sample_output_file = args.sample_input_file + ".out" + print('`sample_output_file` not specified, setting ' + 'it to {}'.format(sample_output_file)) + else: + sample_output_file = args.sample_output_file + + fname_out = open(sample_output_file, "w") + + # only two prompt types (i.e., knowledge and response) are allowed + assert args.prompt_type in ["knowledge", "response"], \ + "Please input a correct prompt type!" + + # Read the prompt file + if args.prompt_type == "knowledge": + # read the prompts for the knowledge generation + prompt_examples_dict = {} + with open(args.prompt_file, "r") as f: + for i, line in enumerate(f): + line = line.strip() + line_dict = json.loads(line) + key = list(line_dict.keys())[0] + + # get the prompt examples based on the key + if key not in prompt_examples_dict: + prompt_examples = line_dict[key] + prompt = "" + for instance in prompt_examples: + instance = instance.strip() + prompt += instance + " \n" + prompt_examples_dict[key] = prompt + + else: + # read the prompts for the response generation + # prompts are fixed for all test samples + with open(args.prompt_file, "r") as f: + prompt_examples = f.readlines() + prompt_examples = prompt_examples[:args.num_prompt_examples] + + prompt = "" + for instance in prompt_examples: + instance = instance.strip() + prompt += instance + " \n" + + input_pos = 0 + model.eval() + # perform prompting + with torch.no_grad(): + while True: + raw_text_len = 0 + if mpu.is_pipeline_first_stage() \ + and mpu.get_tensor_model_parallel_rank() == 0: + input_str = all_raw_text[input_pos] + input_str = input_str.strip() + splits = input_str.split("\t") + topic = splits[0] + + if args.prompt_type == "knowledge": + # first add the prompt into the raw_text + turns = splits[1].split(" [SEP] ") + last_turn = turns[-1] + key = topic + " " + last_turn + raw_text = prompt_examples_dict[key] + + # construct inputs for knowledge generation + # then add the constructed inputs into the raw_text + raw_text += "( " + last_turn + " ) " + topic + " =>" + + else: + # first add the prompt into the raw_text + raw_text = prompt + + # construct inputs for response generation + # then add the constructed inputs into the raw_text + turns = splits[1].split(" [SEP] ") + knowledge = splits[2] + last_turn = turns[-1] + last_turn = " ".join(word_tokenize(last_turn)) + knowledge = " ".join(word_tokenize(knowledge)) + knowledge = knowledge.strip() + last_turn = last_turn.strip() + raw_text += "Topic: " + topic + ". " + raw_text += "User says: " + last_turn + " " + raw_text += "We know that: " + knowledge + " " + raw_text += "System replies:" + + input_pos += 1 + raw_text_len = len(raw_text) + + else: + raw_text = "EMPTY TEXT" + + if input_pos % 100 == 0: + print_rank_0("input_pos: %d" % input_pos) + + outputs = generate_and_post_process( + model=model, + prompts=[raw_text], + tokens_to_generate=args.out_seq_length, + top_k_sampling=1) + prompts_plus_generations = outputs[0] + prompts_plus_generations = prompts_plus_generations[0] + + # write the generated output to the output file + if mpu.get_tensor_model_parallel_rank() == 0: + if mpu.is_pipeline_first_stage(): + + generations = prompts_plus_generations[raw_text_len:] + generations = generations.split("\n")[0] + generations = generations.strip() + fname_out.write(generations) + fname_out.write("\n") + + raw_text = None + if input_pos == input_count: + return + + +def main(): + + args = get_args() + if args.api_prompt: + # obtain the generations by calling the api + generate_samples_by_calling_api() + return + + if args.num_layers_per_virtual_pipeline_stage is not None: + print("Interleaved pipeline schedule is not yet supported for text generation.") + exit() + + # Set up model and load checkpoint. + model = get_model(model_provider, wrap_with_ddp=False, args=args) + if args.load is not None: + _ = load_checkpoint(model, None, None) + + assert len(model) == 1, "Above condition should have caught this" + model = model[0] + + # perform the prompting + generate_samples_by_prompting_input_from_file(model) diff --git a/multilinguality_megatron/tasks/orqa/README.md b/multilinguality_megatron/tasks/orqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..172dc9a066f25e38ddc89a88975beb7b90ce275c --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/README.md @@ -0,0 +1,36 @@ +## End-to-End Training of Neural Retrievers for Open-Domain Question Answering + +Below we present the steps to run unsupervised and supervised trainining and evaluation of the retriever for [open domain question answering](https://arxiv.org/abs/2101.00408). + +## Retriever Training + +#### Unsupervised pretraining +1. Use `tools/preprocess_data.py` to preprocess the dataset for Inverse Cloze Task (ICT), which we call unsupervised pretraining. This script takes as input a corpus in loose JSON format and creates fixed-size blocks of text as the fundamental units of data. For a corpus like Wikipedia, this will mean multiple sentences per block and multiple blocks per document. Run [`tools/preprocess_data.py`](../../tools/preprocess_data.py) to construct one or more indexed datasets with the `--split_sentences` argument to make sentences the basic unit. We construct two datasets, one with the title of every document and another with the body. + +
+python tools/preprocess_data.py \
+    --input /path/to/corpus.json \
+    --json_keys text title \
+    --split_sentences \
+    --tokenizer_type BertWordPieceLowerCase \
+    --vocab_file /path/to/vocab.txt \
+    --output_prefix corpus_indexed \
+    --workers 10
+
+ +2. The [`examples/pretrain_ict.sh`](../../examples/pretrain_ict.sh) script runs a single GPU 217M parameter biencoder model for ICT retriever training. Single GPU training is primarily intended for debugging purposes, as the code is developed for distributed training. The script uses a pretrained BERT model and we use a total of batch size of 4096 for the ICT training. + +3. Evaluate the pretrained ICT model using [`examples/evaluate_retriever_nq.sh`](../../examples/evaluate_retriever_nq.sh) for [Google's Natural Questions Open dataset](https://arxiv.org/pdf/1906.00300.pdf). + +#### Supervised finetuning + +1. Use the above pretrained ICT model to finetune using [Google's Natural Questions Open dataset](https://github.com/google-research/language/tree/master/language/orqa). The script [`examples/finetune_retriever_distributed.sh`](../../examples/finetune_retriever_distributed.sh) provides an example for how to perform the training. Our finetuning process includes retriever score scaling and longer training (80 epochs) on top [DPR training](https://arxiv.org/abs/2004.04906). + +2. Evaluate the finetuned model using the same evaluation script as mentioned above for the unsupervised model. + +More details on the retriever are available in [our paper](https://arxiv.org/abs/2101.00408). + +## Reader Training + +The reader component will be available soon. + diff --git a/multilinguality_megatron/tasks/orqa/evaluate_orqa.py b/multilinguality_megatron/tasks/orqa/evaluate_orqa.py new file mode 100644 index 0000000000000000000000000000000000000000..8dc0e328212382e3037574d88a9d9054ecc9675b --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/evaluate_orqa.py @@ -0,0 +1,38 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Main tasks functionality.""" + +from megatron import get_args, print_rank_0 +import megatron.indexer +import tasks.orqa.evaluate_utils + + +def main(): + """ + Main program + """ + + args = get_args() + + """ + Create a BlockData data structure by running an IndexBuilder over an + ICT Dataset and then evaluate on NQ task + """ + + print_rank_0("Starting index builder!") + + index_builder = megatron.indexer.IndexBuilder(args) + index_builder.build_and_save_index() + print_rank_0("Build and save indices: done!") + print_rank_0("Starting evaluations!") + + # Set up the model and evaluator + evaluator = tasks.orqa.evaluate_utils.ORQAEvaluator() + + # Run evaluation + if args.qa_data_dev is not None: + evaluator.evaluate(args.qa_data_dev, "DEV") + + if args.qa_data_test is not None: + evaluator.evaluate(args.qa_data_test, "TEST") + diff --git a/multilinguality_megatron/tasks/orqa/evaluate_utils.py b/multilinguality_megatron/tasks/orqa/evaluate_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8fbfd321909317dca186d5cfcb98bc1562a39620 --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/evaluate_utils.py @@ -0,0 +1,180 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron import get_args, print_rank_0 +from megatron.data.orqa_wiki_dataset import get_open_retrieval_wiki_dataset +from megatron.data.realm_index import OpenRetreivalDataStore, FaissMIPSIndex +import megatron.model.biencoder_model +from megatron.training import get_model +from tasks.orqa.unsupervised.nq import get_nq_dataset +from tasks.orqa.unsupervised.nq import get_one_epoch_nq_dataloader +from tasks.orqa.unsupervised.nq import process_nq_batch +from tasks.orqa.unsupervised.qa_utils import calculate_matches + +from megatron.model import ModelType + + +class ORQAEvaluator(object): + def __init__(self): + args = get_args() + self.embedding_size = args.hidden_size + self.faiss_use_gpu = args.faiss_use_gpu + self.evidence_embedder_obj = None + self.evidence_dataset = None + self.mips_index = None + self.eval_dataset = None + + # Get Evidence (Wikipedia) dataset + self.get_evidence_dataset() + + # Load query encoder checkpoint + only_query_model = True + if args.biencoder_shared_query_context_model: + only_query_model = False + + model_type = ModelType.encoder_or_decoder + model_provider_func = megatron.model.biencoder_model.get_model_provider(only_query_model=only_query_model, + biencoder_shared_query_context_model=args.biencoder_shared_query_context_model, + model_type=model_type) + + wrap_with_ddp: bool = True + model = get_model(model_provider_func, model_type, wrap_with_ddp, args) + + self.model = megatron.checkpointing.load_biencoder_checkpoint(model, only_query_model=only_query_model) + + assert len(self.model) == 1 + self.model[0].eval() + + # Load faiss indexer + self.faiss_wrapper() + + def get_evidence_embedding(self): + # This will load the embedding from the embedding path + self.evidence_embedder_obj = OpenRetreivalDataStore(load_from_path=True) + + def get_evidence_dataset(self): + self.evidence_dataset = get_open_retrieval_wiki_dataset() + + def faiss_wrapper(self): + # Initialize FAISS wrapper on local rank = 0 as the evidence embeddings + # is distributed over all the GPUs in a node and FAISS is not + # thread-safe + args = get_args() + if args.local_rank == 0: + # Get evidence embeddings computed using context encoder + self.get_evidence_embedding() + + assert self.evidence_embedder_obj is not None + self.mips_index = FaissMIPSIndex(embed_size=self.embedding_size, + embed_data=self.evidence_embedder_obj, + use_gpu=self.faiss_use_gpu) + + # Wait for the FAISS index to be initialized in all the nodes + torch.distributed.barrier() + + def generate_query_vectors(self, qa_data, split): + + self.eval_dataset = get_nq_dataset(qa_data, split) + dataloader = get_one_epoch_nq_dataloader(self.eval_dataset) + + query_vectors = [] + reference_list = [] + + for batch in dataloader: + # batch also has query_tokens and query_pad_data + query_tokens, query_mask, query_types, \ + query_len, reference = process_nq_batch(batch) + + assert len(self.model) == 1 + unwrapped_model = self.model[0] + while not hasattr(unwrapped_model, 'embed_text'): + unwrapped_model = unwrapped_model.module + + with torch.no_grad(): + query_logits = unwrapped_model.embed_text( + unwrapped_model.query_model, query_tokens, + query_mask, query_types) + + reference_list.extend(reference) + query_vectors.extend(query_logits.split(1, dim=0)) + if len(query_vectors) % 100 == 0: + print_rank_0('Encoded queries {}'.format(len(query_vectors))) + + query_tensor = torch.cat(query_vectors, dim=0) + print_rank_0('Total encoded queries tensor {}'.format(query_tensor.size())) + + assert query_tensor.size(0) == len(self.eval_dataset) + return query_tensor, reference_list + + def evaluate(self, qa_data, split): + args = get_args() + query_tensor, reference_list = self.generate_query_vectors(qa_data, \ + split) + local_rank = args.local_rank + rank = torch.distributed.get_rank() + device_count = torch.cuda.device_count() + num_nodes = torch.distributed.get_world_size() // device_count + node_id = rank // device_count + + for node in range(num_nodes): + start_rank = node * device_count + end_rank = (node + 1) * device_count + ranks_list = list(range(start_rank, end_rank)) + node_group = torch.distributed.new_group(ranks=ranks_list) + + if node_id == node: + device_start_rank = start_rank + group = node_group + + input_ = torch.empty_like(query_tensor).copy_(query_tensor).detach_() + tensor_list = [torch.empty_like(input_) for _ in range(device_count)] + torch.distributed.all_gather(tensor_list, query_tensor, group=group) + + if local_rank == 0 and self.mips_index is not None: + all_query_tensor = torch.cat(tensor_list, dim=0).contiguous() + + distance, topkindex = self.mips_index.search_mips_index( + all_query_tensor, top_k=args.faiss_topk_retrievals, + reconstruct=False) + distance = torch.from_numpy(distance).cuda() + topkindex = torch.LongTensor(topkindex).cuda() + + if local_rank != 0: + distance = torch.empty(device_count * len(query_tensor), \ + args.faiss_topk_retrievals, dtype=torch.float32).cuda() + topkindex = torch.empty(device_count * len(query_tensor), \ + args.faiss_topk_retrievals, dtype=torch.int64).cuda() + + torch.distributed.broadcast(distance, src=device_start_rank, \ + group=group) + torch.distributed.broadcast(topkindex, src=device_start_rank, \ + group=group) + + distance = torch.split(distance, len(query_tensor), dim=0)\ + [local_rank] + topkindex = torch.split(topkindex, len(query_tensor), dim=0)\ + [local_rank] + + top_ids_and_scores = [] + for darray, topkarray in zip(distance, topkindex): + top_ids_and_scores.append((topkarray.tolist(), darray.tolist())) + + passages = self.evidence_dataset.id2text + match_stats = calculate_matches(passages, + reference_list, + top_ids_and_scores, + workers_num=args.num_workers, + match_type=args.faiss_match) + top_k_hits = match_stats.top_k_hits + + print_rank_0("{} SET RESULTS".format(split)) + print_rank_0("topk-{} documents hits {}".format( + args.faiss_topk_retrievals, top_k_hits)) + top_k_hits = [v / len(top_ids_and_scores) for v in top_k_hits] + print_rank_0("top-k documents hits accuracy {}".format(top_k_hits)) + + for i in args.retriever_report_topk_accuracies: + print_rank_0("top-{}: {:.2f}".format(i, top_k_hits[i-1] * 100)) + + return diff --git a/multilinguality_megatron/tasks/orqa/supervised/data.py b/multilinguality_megatron/tasks/orqa/supervised/data.py new file mode 100644 index 0000000000000000000000000000000000000000..eb99e2df824e220f712604414b58807013228cb9 --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/supervised/data.py @@ -0,0 +1,287 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""ORQA dataset.""" + +import json +import random +from abc import ABC +from abc import abstractmethod + +import numpy as np +from torch.utils.data import Dataset + +from megatron import print_rank_0, get_args +from megatron.data.biencoder_dataset_utils import make_attention_mask + +def build_token_types_from_context_list(ctx_list, tokenizer, max_seq_length): + ctx_id_list, ctx_types_list = [], [] + for context in ctx_list: + title_ids = tokenizer.tokenize(context['title']) + ctx_ids = tokenizer.tokenize(context['text']) + ctx_ids = title_ids + [tokenizer.sep_id] + ctx_ids + + ctx_ids, ctx_types, _ = build_tokens_types_paddings_from_ids(ctx_ids, + max_seq_length, tokenizer.cls, + tokenizer.sep, tokenizer.pad) + ctx_id_list.append(ctx_ids) + ctx_types_list.append(ctx_types) + + return ctx_id_list, ctx_types_list + + +def build_tokens_types_paddings_from_text(query, context, + tokenizer, max_seq_length): + """Build token types and paddings, trim if needed, and pad if needed.""" + + query_ids = tokenizer.tokenize(query) + query_ids, query_types, query_pad_mask = \ + build_tokens_types_paddings_from_ids(query_ids, max_seq_length, \ + tokenizer.cls, tokenizer.sep, tokenizer.pad) + + # Appending the title of the context at front + extended_ctx_ids = None + if context is not None: + title_ids = tokenizer.tokenize(context['title']) + ctx_ids = tokenizer.tokenize(context['text']) + extended_ctx_ids = title_ids + [tokenizer.sep] + ctx_ids + + ctx_ids, ctx_types, ctx_pad_mask = \ + build_tokens_types_paddings_from_ids(extended_ctx_ids, + max_seq_length, tokenizer.cls, tokenizer.sep, tokenizer.pad) + + return query_ids, query_types, query_pad_mask, \ + ctx_ids, ctx_types, ctx_pad_mask + + +# Similar code tasks/data_utils with some changes +def build_tokens_types_paddings_from_ids(text_ids, max_seq_length, + cls_id, sep_id, pad_id): + """Build token types and paddings, trim if needed, and pad if needed.""" + enc_ids = [] + tokentypes_enc = [] + + # [CLS]. + enc_ids.append(cls_id) + tokentypes_enc.append(0) + + # A. + len_src = len(text_ids) + enc_ids.extend(text_ids) + tokentypes_enc.extend([0] * len_src) + + # Cap the size. + if len(enc_ids) > max_seq_length - 1: + enc_ids = enc_ids[0: max_seq_length - 1] + tokentypes_enc = tokentypes_enc[0: max_seq_length - 1] + + # [SEP]. + enc_ids.append(sep_id) + tokentypes_enc.append(0) + + num_tokens_enc = len(enc_ids) + # Padding. + padding_length = max_seq_length - len(enc_ids) + if padding_length > 0: + enc_ids.extend([pad_id] * padding_length) + tokentypes_enc.extend([pad_id] * padding_length) + + pad_mask = ([1] * num_tokens_enc) + ([0] * padding_length) + pad_mask = np.array(pad_mask, dtype=np.int64) + + return enc_ids, tokentypes_enc, pad_mask + + +def build_sample(query_ids, query_types, query_pad_mask, + ctx_ids, ctx_types, ctx_pad_mask, answers, + neg_ctx_id_list=None, neg_ctx_types_list=None, + include_neg=False): + """Convert to numpy and return a sample consumed by the batch producer.""" + + query_ids = np.array(query_ids, dtype=np.int64) + query_types = np.array(query_types, dtype=np.int64) + query_mask = make_attention_mask(query_ids, query_ids) + + ctx_ids = np.array(ctx_ids, dtype=np.int64) + ctx_types = np.array(ctx_types, dtype=np.int64) + ctx_mask = make_attention_mask(ctx_ids, ctx_ids) + + sample = ({ + 'query': query_ids, + 'query_mask': query_mask, + 'query_types': query_types, + 'query_pad_mask': query_pad_mask, + 'context': ctx_ids, + 'context_mask': ctx_mask, + 'context_types': ctx_types, + 'context_pad_mask': ctx_pad_mask, + 'reference': answers + }) + + if include_neg: + neg_ctx_ids = np.array(neg_ctx_id_list, dtype=np.int64) + neg_ctx_id_types = np.array(neg_ctx_types_list, dtype=np.int64) + neg_ctx_mask = np.array([make_attention_mask(ids, ids) \ + for ids in neg_ctx_ids], dtype=np.int64) + + sample['neg_context'] = neg_ctx_ids + sample['neg_context_types'] = neg_ctx_id_types + sample['neg_context_mask'] = neg_ctx_mask + + return sample + + +class OpenRetrievalAbstractDataset(ABC, Dataset): + """Open Retrieval base dataset class.""" + + def __init__(self, task_name, dataset_name, datapaths, tokenizer, \ + max_seq_length, evaluate=False): + # Store inputs. + args = get_args() + self.evaluate = evaluate + self.val_av_rank_hard_neg = args.val_av_rank_hard_neg + self.val_av_rank_other_neg = args.val_av_rank_other_neg + self.train_with_neg = args.train_with_neg + self.train_hard_neg = args.train_hard_neg + + self.task_name = task_name + self.dataset_name = dataset_name + self.tokenizer = tokenizer + self.max_seq_length = max_seq_length + print_rank_0(' > building {} dataset for {}:'.format(self.task_name, + self.dataset_name)) + # Process the files. + string = ' > paths:' + for path in datapaths: + string += ' ' + path + print_rank_0(string) + self.samples = [] + for datapath in datapaths: + self.samples.extend(self.process_samples_from_single_path(datapath)) + + args = get_args() + if args.sample_rate < 1: # subsample + k = int(len(self.samples) * args.sample_rate) + self.samples = random.sample(self.samples, k) + + print_rank_0(' >> total number of samples: {}'.format( + len(self.samples))) + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + raw_sample = self.samples[idx] + + query_ids, query_types, query_pad_mask, ctx_ids, ctx_types, \ + ctx_pad_mask = build_tokens_types_paddings_from_text( \ + raw_sample['question'], raw_sample['pos_context'], \ + self.tokenizer, self.max_seq_length) + + if self.evaluate: + neg_ctx_list = \ + raw_sample['negative_context'][:self.val_av_rank_other_neg] + \ + raw_sample['hard_negative_context'][:self.val_av_rank_hard_neg] + neg_ctx_id_list, neg_ctx_types_list = \ + build_token_types_from_context_list(neg_ctx_list, \ + self.tokenizer, self.max_seq_length) + + elif self.train_with_neg: + hard_negative_ctx = raw_sample['hard_negative_context'] + negative_ctx = raw_sample['negative_context'] + if True: # TODO: fix this or remove this condition + random.shuffle(hard_negative_ctx) + random.shuffle(negative_ctx) + + neg_ctx_list = hard_negative_ctx[:self.train_hard_neg] + # In the Google NQ dataset by DPR paper, there are around more than + # 50 missing hard negatives in training data. + # In those cases, substitute hard negatives by simple negatives. + if len(neg_ctx_list) < self.train_hard_neg: + neg_ctx_list += negative_ctx[:self.train_hard_neg - \ + len(neg_ctx_list)] + + neg_ctx_id_list, neg_ctx_types_list = \ + build_token_types_from_context_list(neg_ctx_list, + self.tokenizer, self.max_seq_length) + else: + neg_ctx_id_list = None + neg_ctx_types_list = None + + sample = build_sample(query_ids, query_types, query_pad_mask, + ctx_ids, ctx_types, ctx_pad_mask, + raw_sample['answers'], + neg_ctx_id_list, neg_ctx_types_list, + include_neg=self.evaluate or self.train_with_neg) + + return sample + + @staticmethod + @abstractmethod + def process_samples_from_single_path(filename): + """Abstract method that takes a filename and + returns a list of dataset samples, each sample being a dict of + {'text': string, 'text': string} + """ + pass + + + +def normalize_question(question): + if question[-1] == '?': + question = question[:-1] + return question + +# The following class reads the datasets for training retriever as +# prepared by the DPR codebase (https://github.com/facebookresearch/DPR) + +class NQSupervisedDataset(OpenRetrievalAbstractDataset): + + def __init__(self, name, datapaths, tokenizer, max_seq_length, \ + evaluate=False): + super().__init__('natural_questions_ret', + name, + datapaths, + tokenizer, + max_seq_length, + evaluate=evaluate) + + @staticmethod + def process_samples_from_single_path(filename): + """"Implement abstract method.""" + print_rank_0(' > Processing {} ...'.format(filename)) + samples = [] + total = 0 + + with open(filename, 'r', encoding="utf-8") as f: + data = json.load(f) + for row in data: + question = normalize_question(row['question']) + pos_context = row['positive_ctxs'][0] + + # Hard Negative Contexts + if len(row['hard_negative_ctxs']) > 0: + hard_neg_context = row['hard_negative_ctxs'] + else: + hard_neg_context = [] + + # Negative Contexts + if len(row['negative_ctxs']) > 0: + neg_context = row['negative_ctxs'] + else: + neg_context = [] + + answers = row['answers'] + sample = {'question': question, + 'pos_context': pos_context, + 'hard_negative_context': hard_neg_context, + 'negative_context': neg_context, + 'answers': answers} + total += 1 + samples.append(sample) + + if total % 5000 == 0: + print_rank_0(' > processed {} so far ...'.format(total)) + + print_rank_0(' >> processed {} samples.'.format(len(samples))) + return samples + diff --git a/multilinguality_megatron/tasks/orqa/supervised/eval_utils.py b/multilinguality_megatron/tasks/orqa/supervised/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6284045be6609a2b31907b8bd30eb2b062810fdc --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/supervised/eval_utils.py @@ -0,0 +1,192 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Evaluation utilities.""" +from collections import OrderedDict +import math +import numpy as np +import time +import torch +import torch.nn.functional as F + +from megatron import get_args, print_rank_0 +from megatron.core import mpu +from megatron.utils import average_losses_across_data_parallel_group +import tasks.finetune_utils + + +def task_collate_fn(batch_data): + # generate batch + batch_size = len(batch_data) + tensorized = OrderedDict() + for d in batch_data: + for k, v in d.items(): + tensorized.setdefault(k, []).append(v) + + tensorized['query'] = torch.LongTensor(tensorized['query']) + tensorized['query_mask'] = torch.LongTensor(tensorized['query_mask']) + tensorized['query_types'] = torch.LongTensor(tensorized['query_types']) + tensorized['query_pad_mask'] = \ + torch.LongTensor(tensorized['query_pad_mask']) + + tensorized['context'] = torch.LongTensor(tensorized['context']) + tensorized['context_mask'] = \ + torch.LongTensor(tensorized['context_mask']) + tensorized['context_types'] = \ + torch.LongTensor(tensorized['context_types']) + tensorized['context_pad_mask'] = \ + torch.LongTensor(tensorized['context_pad_mask']) + + if 'neg_context' in tensorized: + tensorized['neg_context'] = \ + torch.LongTensor(np.concatenate(tensorized['neg_context'])) + tensorized['neg_context_mask'] = \ + torch.LongTensor(np.concatenate(tensorized['neg_context_mask'])) + tensorized['neg_context_types'] = \ + torch.LongTensor(np.concatenate(tensorized['neg_context_types'])) + + return tensorized + + +def process_batch(batch): + """Process batch and produce inputs for the model.""" + query_tokens = batch['query'].long().cuda() + query_mask = (batch['query_mask'] < 0.5).cuda() + query_types = batch['query_types'].long().cuda() + query_pad_mask = batch['query_pad_mask'].long().cuda() + + context_tokens = batch['context'].long().cuda() + context_mask = (batch['context_mask'] < 0.5).cuda() + context_types = batch['context_types'].long().cuda() + context_pad_mask = batch['context_pad_mask'].long().cuda() + + if 'neg_context' in batch: + neg_context_tokens = batch['neg_context'].long().cuda() + neg_context_mask = (batch['neg_context_mask'] < 0.5).cuda() + neg_context_types = batch['neg_context_types'].long().cuda() + else: + neg_context_tokens = None + neg_context_mask = None + neg_context_types = None + + reference = batch['reference'] + + return query_tokens, query_mask, query_types, query_pad_mask, \ + context_tokens, context_mask, context_types, context_pad_mask, \ + neg_context_tokens, neg_context_mask, neg_context_types, reference + +def accuracy_func_provider(single_dataset_provider, rank0sampler=False): + """Provide function that calculates accuracies.""" + args = get_args() + + print_rank_0("accuracy_func_provider is CALLED") + + # Build dataloaders + datapath = args.valid_data + dataset = single_dataset_provider(datapath) + + drop_last = False + if mpu.get_data_parallel_world_size() > 1 and not rank0sampler: + drop_last = True + + print_rank_0(datapath) + print_rank_0(rank0sampler) + + dataloader = tasks.finetune_utils.build_data_loader(dataset, + args.eval_micro_batch_size, + num_workers=args.num_workers, + drop_last=drop_last, + task_collate_fn=task_collate_fn) + dataloaders = (dataset.dataset_name, dataloader) + + def metrics_func(model, epoch, output_predictions=False): + print_rank_0('calculating metrics by accuracy func in ORQA...') + + if output_predictions: + assert rank0sampler + names = 'predictions' + name, dataloader = dataloaders + if args.task == "RET-FINETUNE-NQ": + start_time = time.time() + output = retrieval_loss(model, dataloader) + stats_dict, total = output + format_string = "" + for k, v in stats_dict.items(): + format_string += "|{} = {:.2f}".format(k, v / total) + print_rank_0("epoch:{}{}".format(epoch, format_string)) + print_rank_0("taken time to calcuate metrics {:.3f}".format(\ + time.time() - start_time)) + else: + raise AssertionError("{} Task not supported".format(args.task)) + + return metrics_func + + +def retrieval_loss(model, dataloader): + args = get_args() + total = 0 + topk_stats_dict = {'top{}_acc'.format(k): 0 for k in \ + args.retriever_report_topk_accuracies} + stats_dict = dict(rank=0, **topk_stats_dict) + + assert len(model) == 1 + unwrapped_model = model[0] + unwrapped_model.eval() + + with torch.no_grad(): + # For all the batches in the dataset. + for batch in dataloader: + # Run the model forward. + query_tokens, query_mask, query_types, _, \ + context_tokens, context_mask, context_types, _, \ + neg_context_tokens, neg_context_mask, neg_context_types, \ + reference = process_batch(batch) + + query_logits, context_logits = unwrapped_model(query_tokens, + query_mask, query_types, + torch.cat([context_tokens, neg_context_tokens]), + torch.cat([context_mask, neg_context_mask]), + torch.cat([context_types, neg_context_types])) + + retrieval_scores = torch.matmul(query_logits, + torch.transpose(context_logits, 0, 1)) + + if args.retriever_score_scaling: + retrieval_scores = retrieval_scores / \ + math.sqrt(args.hidden_size) + + local_batch_size = query_logits.shape[0] + labels = torch.arange(local_batch_size).long().cuda() + + softmax_scores = F.softmax(retrieval_scores, dim=1) + sorted_vals, sorted_indices = torch.topk(softmax_scores, + k=softmax_scores.shape[1], + sorted=True) + + def topk_accuracy(k): + return torch.cuda.FloatTensor( + [sum([int(labels[i] in sorted_indices[i, :k]) for i in \ + range(local_batch_size)])]) + + def get_rank(): + return torch.cuda.FloatTensor( + [sum([torch.nonzero(labels[i] == sorted_indices[i])[0][0] \ + for i in range(local_batch_size)])]) + + topk_accs = [topk_accuracy(k) for k in \ + args.retriever_report_topk_accuracies] + rank = get_rank() + losses = average_losses_across_data_parallel_group([rank, \ + *topk_accs]) + + # create stats_dict with retrieval loss and all specified + # top-k accuracies + topk_acc_dict = {'top{}_acc'.format(k): v * 100 for k, v in \ + zip(args.retriever_report_topk_accuracies, losses[1:])} + temp_stats_dict = dict(rank=losses[0], **topk_acc_dict) + for k in stats_dict.keys(): + stats_dict[k] += temp_stats_dict[k] + total += local_batch_size + + unwrapped_model.train() + + return stats_dict, total diff --git a/multilinguality_megatron/tasks/orqa/supervised/finetune.py b/multilinguality_megatron/tasks/orqa/supervised/finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..64e84c27cbdfbe781625ebbe0850c364adbd5459 --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/supervised/finetune.py @@ -0,0 +1,243 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""ORQA finetuning/evaluation.""" + +from functools import partial +import sys + +import math +import torch +import torch.nn.functional as F + +from megatron import get_args, get_timers, get_tokenizer, print_rank_0 +from megatron.core import mpu +import megatron.model.biencoder_model +from megatron.utils import average_losses_across_data_parallel_group +from pretrain_ict import get_group_world_size_rank +import tasks.finetune_utils +from tasks.orqa.supervised.eval_utils import accuracy_func_provider +from tasks.orqa.supervised.eval_utils import process_batch, task_collate_fn + +from megatron.model import ModelType + +# input_ is a 2D tensor + + +def check_and_append_tensor_for_gather(group, rank, world_size, input_): + # gather the size of the first dimension of the tensor from all ranks + current_length = input_.size()[0] + first_dim = torch.tensor([[current_length]], + device=torch.cuda.current_device()) + input_list = [torch.empty_like(first_dim) for _ in range(world_size)] + input_list[rank].copy_(first_dim) + torch.distributed.all_gather(input_list, first_dim, group=group) + all_input_list = torch.cat(input_list, dim=0).contiguous() + max_length = torch.max(all_input_list) + + # if the size are different than the max, extend the tensor + # accordingly + if max_length > current_length: + padding=tuple([0] * (input_.dim() * 2 - 1)) + \ + tuple([max_length - current_length]) + input_ = F.pad(input=input_, pad=padding) + + return input_ + +def orqa(Dataset): + + def cross_entropy_forward_step(batch, model): + """Simple forward step with cross-entropy loss.""" + timers = get_timers() + tokenizer = get_tokenizer() + + # Get the batch. + timers('batch generator', log_level=2).start() + try: + batch_ = next(batch) + except BaseException: + batch_ = batch + + group, rank, world_size = get_group_world_size_rank() + + query_tokens, query_mask, query_types, query_pad_mask, \ + context_tokens, context_mask, context_types, context_pad_mask, \ + neg_context_tokens, neg_context_mask, neg_context_types, \ + reference = process_batch(batch_) + + timers('batch generator').stop() + local_batch_size = query_tokens.shape[0] + + # Text representation of query and context + query_list, context_list = [], [] + for i in range(local_batch_size): + query_list.append(tokenizer.decode(query_tokens[i].tolist())) + context_list.append(tokenizer.decode(context_tokens[i].tolist())) + + if neg_context_tokens is not None: + neg_context_tokens = check_and_append_tensor_for_gather(group, + rank, world_size, neg_context_tokens) + neg_context_mask = check_and_append_tensor_for_gather(group, + rank, world_size, neg_context_mask) + neg_context_types = check_and_append_tensor_for_gather(group, + rank, world_size, neg_context_types) + + if neg_context_tokens is not None: + context_tokens = torch.cat([context_tokens, neg_context_tokens]) + context_mask = torch.cat([context_mask, neg_context_mask]) + context_types = torch.cat([context_types, neg_context_types]) + + # Forward model. + output_tensor = model(query_tokens, query_mask, + query_types, context_tokens, + context_mask, context_types) + return output_tensor, partial(cross_entropy_loss_func, query_tokens, context_tokens) + + + def cross_entropy_loss_func(query_tokens, context_tokens, output_tensor): + args = get_args() + + local_batch_size = query_tokens.shape[0] + group, rank, world_size = get_group_world_size_rank() + # recall we assert that model_parallel_size == 1 + global_batch_size = world_size * local_batch_size + + query_logits, context_logits = output_tensor + + if world_size > 1: + input_ = torch.empty_like(context_logits).copy_(\ + context_logits).detach_() + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank].copy_(input_) + torch.distributed.all_gather(tensor_list, input_, group=group) + + # Check if all-gather happens in order + assert tensor_list[rank].sum().item() == \ + context_logits.sum().item() + + # Preserves the gradient + tensor_list[rank] = context_logits + all_context_logits = torch.cat(tensor_list, dim=0).contiguous() + + # Query tensors + input_ = torch.empty_like(query_logits).copy_(\ + query_logits).detach_() + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank].copy_(input_) + torch.distributed.all_gather(tensor_list, input_, group=group) + + # Check if all-gather happens in order + assert tensor_list[rank].sum().item() == query_logits.sum().item() + + # Preserves the gradient + tensor_list[rank] = query_logits + all_query_logits = torch.cat(tensor_list, dim=0).contiguous() + else: + all_query_logits = query_logits + all_context_logits = context_logits + + retrieval_scores = torch.matmul(all_query_logits, + torch.transpose(all_context_logits, 0, 1)) + # Scaling the retrieval scores + if args.retriever_score_scaling: + retrieval_scores = retrieval_scores / math.sqrt(args.hidden_size) + + if args.train_with_neg: + # if the world size is 3, local batch size is 4, and + # local context size is 8, what we want is + # labels = [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19] + labels = [] + local_context_size = context_tokens.shape[0] + for i in range(world_size): + j = i * local_context_size + labels.extend(list(range(j, j + local_batch_size))) + labels = torch.LongTensor(labels).cuda() + assert len(labels) == global_batch_size + else: + labels = torch.arange(global_batch_size).long().cuda() + + # Cross-entropy loss. + softmax_scores = F.log_softmax(retrieval_scores, dim=1) + + loss = F.nll_loss(softmax_scores, labels, reduction='mean') + + max_score, max_idxs = torch.max(softmax_scores, 1) + correct_predictions_count = (max_idxs == labels).sum().float() + + # Reduce loss for logging. + reduced_loss = average_losses_across_data_parallel_group([loss, \ + correct_predictions_count]) + + # Loss scaling for correct losses in Supervised Retrieval + loss = loss * mpu.get_data_parallel_world_size() + + return loss, {'lm loss': reduced_loss[0], + 'correct_prediction_count': reduced_loss[1]} + + + def train_valid_datasets_provider(): + """Build train and validation dataset.""" + args = get_args() + tokenizer = get_tokenizer() + + train_dataset = Dataset('training', + args.train_data, + tokenizer, + args.retriever_seq_length, + evaluate=False) + valid_dataset = Dataset('validation', + args.valid_data, + tokenizer, + args.retriever_seq_length, + evaluate=True) + return train_dataset, valid_dataset + + def model_provider(pre_process=True, post_process=True): + """Build the model.""" + args = get_args() + print_rank_0('building retriever model for {} ...'.format(args.task)) + + model_type_orqa = ModelType.encoder_or_decoder + model = megatron.model.biencoder_model.biencoder_model_provider(only_context_model=False, + only_query_model=False, + biencoder_shared_query_context_model=args.biencoder_shared_query_context_model, + pre_process=pre_process, + post_process=post_process, + model_type=model_type_orqa) + return model + + def single_dataset_provider(datapath): + args = get_args() + tokenizer = get_tokenizer() + + name = datapath[0].split('/')[-1].split('.')[0] + return Dataset(name, + datapath, + tokenizer, + args.retriever_seq_length, + evaluate=True) + + def metrics_func_provider(): + """Provide metrics callback function.""" + return accuracy_func_provider(single_dataset_provider) + + """Finetune/evaluate.""" + model_type_orqa = ModelType.encoder_or_decoder + tasks.finetune_utils.finetune(train_valid_datasets_provider, + model_provider, + model_type_orqa, + forward_step=cross_entropy_forward_step, + end_of_epoch_callback_provider=metrics_func_provider, + task_collate_fn=task_collate_fn) + + +def main(): + args = get_args() + + if args.task == 'RET-FINETUNE-NQ': + from tasks.orqa.supervised.data import NQSupervisedDataset as Dataset + else: + raise NotImplementedError('ORQA task {} is not implemented.'.format( + args.task)) + + orqa(Dataset) + diff --git a/multilinguality_megatron/tasks/orqa/unsupervised/nq.py b/multilinguality_megatron/tasks/orqa/unsupervised/nq.py new file mode 100644 index 0000000000000000000000000000000000000000..56fd77c12c920f86182554753d4f83e6cfa06030 --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/unsupervised/nq.py @@ -0,0 +1,215 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +""" + Data Loader for Google NQ dataset +""" + +from abc import ABC +import csv +from collections import OrderedDict +import numpy as np + +import torch +from torch.utils.data import DataLoader +from torch.utils.data import Dataset, BatchSampler + +from megatron import print_rank_0, get_args, get_tokenizer +from megatron.data.biencoder_dataset_utils import make_attention_mask + +def get_nq_dataset(qa_data, split): + args = get_args() + tokenizer = get_tokenizer() + + dataset = NQDataset('Google NQ {} Split'.format(split), + 'Google Natural Questions', + qa_data, + tokenizer, + args.retriever_seq_length) + return dataset + + +def process_nq_batch(batch): + query_tokens = batch['token_ids'].long().cuda() + query_mask = (batch['token_mask'] < 0.5).cuda() + query_types = batch['token_types'].long().cuda() + query_len = batch['seq_len'].long().cuda() + reference = batch['reference'] + + return query_tokens, query_mask, query_types, query_len, reference + + +class CustomDataLoader(DataLoader): + def __init__(self, dataset, eval=False, **kwargs): + if kwargs.get('collate_fn', None) is None: + kwargs['collate_fn'] = self._collate_fn + self.eval = eval + super().__init__(dataset, **kwargs) + + def _collate_fn(self, batch_data): + # generate batch + batch_size = len(batch_data) + tensorized = OrderedDict() + for d in batch_data: + for k, v in d.items(): + tensorized.setdefault(k, []).append(v) + assert len(tensorized) == 5 + + tensorized['token_ids'] = torch.LongTensor(tensorized['token_ids']) + tensorized['token_mask'] = torch.LongTensor(tensorized['token_mask']) + tensorized['token_types'] = torch.LongTensor(tensorized['token_types']) + tensorized['seq_len'] = torch.LongTensor(tensorized['seq_len']) + return tensorized + + +def get_one_epoch_nq_dataloader(dataset, micro_batch_size=None): + """Data loader. Note that batch-size is the local (per GPU) batch-size. + NOTE: This dataloader is not distributed !!! + """ + + args = get_args() + if micro_batch_size is None: + micro_batch_size = args.micro_batch_size + num_workers = args.num_workers + + sampler = torch.utils.data.SequentialSampler(dataset) + # importantly, drop_last must be False to get all the data. + batch_sampler = BatchSampler(sampler, + batch_size=micro_batch_size, + drop_last=False) + + # Data loader. Note that batch size is the per GPU batch size. + data_loader = CustomDataLoader(dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + pin_memory=True) + return data_loader + + +def build_tokens_types_paddings_from_text(src_text, tokenizer, max_seq_length): + """Build token types and paddings, trim if needed, and pad if needed.""" + + src_text_ids = tokenizer.tokenize(src_text) + + return build_tokens_types_paddings_from_ids(src_text_ids, + max_seq_length, + tokenizer.cls, + tokenizer.sep, + tokenizer.pad) + + +def build_tokens_types_paddings_from_ids(src_ids, max_seq_length, cls_id, \ + sep_id, pad_id): + """ + Build token types and paddings, trim if needed, and pad if needed. + + TODO: Design modular interface to reuse this function. This is getting + repeated multiple times in different tasks + """ + + enc_ids = [] + tokentypes_enc = [] + + # [CLS]. + enc_ids.append(cls_id) + tokentypes_enc.append(0) + + # A. + len_src = len(src_ids) + enc_ids.extend(src_ids) + tokentypes_enc.extend([0] * len_src) + + # Cap the size. + if len(enc_ids) > max_seq_length - 1: + enc_ids = enc_ids[0: max_seq_length - 1] + tokentypes_enc = tokentypes_enc[0: max_seq_length - 1] + + # [SEP]. + enc_ids.append(sep_id) + tokentypes_enc.append(0) + + num_tokens_enc = len(enc_ids) + # Padding. + padding_length = max_seq_length - len(enc_ids) + if padding_length > 0: + enc_ids.extend([pad_id] * padding_length) + tokentypes_enc.extend([pad_id] * padding_length) + + return enc_ids, tokentypes_enc, num_tokens_enc + + +def build_sample(token_ids, token_types, num_tokens, reference): + """ + Convert to numpy and return a sample consumed by the + batch producer. + """ + + token_ids = np.array(token_ids, dtype=np.int64) + token_types = np.array(token_types, dtype=np.int64) + token_mask = make_attention_mask(token_ids, token_ids) + + sample = ({ + 'token_ids': token_ids, + 'token_mask': token_mask, + 'token_types': token_types, + 'seq_len': num_tokens, + 'reference': reference + }) + return sample + + +class NQDataset(ABC, Dataset): + """ + Open Retrieval Question Answering evaluation using Google NQ dataset. + """ + + def __init__(self, task_name, dataset_name, datapath, + tokenizer, max_seq_length): + # Store inputs. + self.task_name = task_name + self.dataset_name = dataset_name + self.tokenizer = tokenizer + self.max_seq_length = max_seq_length + print_rank_0(' > building {} dataset for {}:'.format(self.task_name, + self.dataset_name)) + print_rank_0(datapath) + self.samples = self.process_samples_from_single_path(datapath) + print_rank_0(' >> total number of samples: {}'.format(\ + len(self.samples))) + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + raw_sample = self.samples[idx] + + ques_tokens, tokentypes_enc, num_tokens_ques = \ + build_tokens_types_paddings_from_text(raw_sample['question'], + self.tokenizer, self.max_seq_length) + + sample = build_sample(ques_tokens, + tokentypes_enc, + num_tokens_ques, + raw_sample['answers']) + return sample + + @staticmethod + def process_samples_from_single_path(filename): + print_rank_0(' > Processing {} ...'.format(filename)) + samples = [] + total = 0 + + with open(filename, 'r') as ifile: + reader = csv.reader(ifile, delimiter='\t') + for row in reader: + question = row[0] + answers = eval(row[1]) + + sample = {'question': question, 'answers': answers} + total += 1 + samples.append(sample) + + if total % 1000 == 0: + print_rank_0(' > processed {} so far ...'.format(total)) + + print_rank_0(' >> processed {} samples.'.format(len(samples))) + return samples diff --git a/multilinguality_megatron/tasks/orqa/unsupervised/qa_utils.py b/multilinguality_megatron/tasks/orqa/unsupervised/qa_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..811a05834a47ce1e9f9cca9bae9e0f77f937b588 --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/unsupervised/qa_utils.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# + +# The following code has been taken from +# https://github.com/facebookresearch/DPR, which is CC-BY-NC 4.0 +# licensed as of now. More details on the license can be found +# at https://github.com/facebookresearch/DPR/blob/master/LICENSE + +""" + Set of utilities for Q&A results validation tasks - Retriver passage + validation and Reader predicted answer validation +""" + +import collections +import logging +import string +import unicodedata +from functools import partial +from multiprocessing import Pool as ProcessPool +from typing import Tuple, List, Dict + +import regex as re +from tasks.orqa.unsupervised.tokenizers import SimpleTokenizer + +logger = logging.getLogger(__name__) + +QAMatchStats = collections.namedtuple('QAMatchStats', ['top_k_hits',\ + 'questions_doc_hits']) + +def calculate_matches(all_docs: Dict[object, Tuple[str, str]], + answers: List[List[str]], closest_docs: List[Tuple[List[object], + List[float]]], workers_num: int, match_type: str) -> QAMatchStats: + """ + Evaluates answers presence in the set of documents. This function is + supposed to be used with a large collection of documents and results. + It internally forks multiple sub-processes for evaluation and then + merges results + :param all_docs: dictionary of the entire documents database. + doc_id -> (doc_text, title) + :param answers: list of answers's list. One list per question + :param closest_docs: document ids of the top results along with their + scores + :param workers_num: amount of parallel threads to process data + :param match_type: type of answer matching. Refer to has_answer code for + available options + :return: matching information tuple. + top_k_hits - a list where the index is the amount of top documents retrieved + and the value is the total amount of valid matches across an entire + dataset. + questions_doc_hits - more detailed info with answer matches for every + question and every retrieved document + """ + global dpr_all_documents + dpr_all_documents = all_docs + + tok_opts = {} + tokenizer = SimpleTokenizer(**tok_opts) + + processes = ProcessPool( + processes=workers_num, + ) + + logger.info('Matching answers in top docs...') + + get_score_partial = partial(check_answer, match_type=match_type, + tokenizer=tokenizer) + + questions_answers_docs = zip(answers, closest_docs) + + scores = processes.map(get_score_partial, questions_answers_docs) + + logger.info('Per question validation results len=%d', len(scores)) + + n_docs = len(closest_docs[0][0]) + top_k_hits = [0] * n_docs + for question_hits in scores: + best_hit = next((i for i, x in enumerate(question_hits) if x), None) + if best_hit is not None: + top_k_hits[best_hit:] = [v + 1 for v in top_k_hits[best_hit:]] + + return QAMatchStats(top_k_hits, scores) + + +def check_answer(questions_answers_docs, tokenizer, match_type) -> List[bool]: + """ + Search through all the top docs to see if they have any of the answers. + """ + answers, (doc_ids, doc_scores) = questions_answers_docs + + global dpr_all_documents + hits = [] + + for i, doc_id in enumerate(doc_ids): + doc = dpr_all_documents[doc_id] + text = doc[0] + + answer_found = False + if text is None: # cannot find the document for some reason + logger.warning("no doc in db") + hits.append(False) + continue + + if has_answer(answers, text, tokenizer, match_type): + answer_found = True + hits.append(answer_found) + return hits + + +def has_answer(answers, text, tokenizer, match_type) -> bool: + """ + Check if a document contains an answer string. + If `match_type` is string, token matching is done between the text + and answer. + If `match_type` is regex, we search the whole text with the regex. + """ + text = _normalize(text) + + if match_type == 'string': + # Answer is a list of possible strings + text = tokenizer.tokenize(text).words(uncased=True) + + for single_answer in answers: + single_answer = _normalize(single_answer) + single_answer = tokenizer.tokenize(single_answer) + single_answer = single_answer.words(uncased=True) + + for i in range(0, len(text) - len(single_answer) + 1): + if single_answer == text[i: i + len(single_answer)]: + return True + + elif match_type == 'regex': + # Answer is a regex + for single_answer in answers: + single_answer = _normalize(single_answer) + if regex_match(text, single_answer): + return True + return False + + +def regex_match(text, pattern): + """Test if a regex pattern is contained within a text.""" + try: + pattern = re.compile( + pattern, + flags=re.IGNORECASE + re.UNICODE + re.MULTILINE, + ) + except BaseException: + return False + return pattern.search(text) is not None + + +# function for the reader model answer validation +def exact_match_score(prediction, ground_truth): + return _normalize_answer(prediction) == _normalize_answer(ground_truth) + + +def _normalize_answer(s): + def remove_articles(text): + return re.sub(r'\b(a|an|the)\b', ' ', text) + + def white_space_fix(text): + return ' '.join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return ''.join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def _normalize(text): + return unicodedata.normalize('NFD', text) diff --git a/multilinguality_megatron/tasks/orqa/unsupervised/tokenizers.py b/multilinguality_megatron/tasks/orqa/unsupervised/tokenizers.py new file mode 100644 index 0000000000000000000000000000000000000000..fb23887ebdd43ca83b2a6746ddc77b2a69fc1dd8 --- /dev/null +++ b/multilinguality_megatron/tasks/orqa/unsupervised/tokenizers.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# + +# The following code has been taken from +# https://github.com/facebookresearch/DPR, which is CC-BY-NC 4.0 +# licensed as of now. More details on the license can be found +# at https://github.com/facebookresearch/DPR/blob/master/LICENSE + +""" +Most of the tokenizers code here is copied from DrQA codebase to avoid adding extra dependency +""" + +import copy +import logging + +import regex +import spacy + +logger = logging.getLogger(__name__) + + +class Tokens(object): + """A class to represent a list of tokenized text.""" + TEXT = 0 + TEXT_WS = 1 + SPAN = 2 + POS = 3 + LEMMA = 4 + NER = 5 + + def __init__(self, data, annotators, opts=None): + self.data = data + self.annotators = annotators + self.opts = opts or {} + + def __len__(self): + """The number of tokens.""" + return len(self.data) + + def slice(self, i=None, j=None): + """Return a view of the list of tokens from [i, j).""" + new_tokens = copy.copy(self) + new_tokens.data = self.data[i: j] + return new_tokens + + def untokenize(self): + """Returns the original text (with whitespace reinserted).""" + return ''.join([t[self.TEXT_WS] for t in self.data]).strip() + + def words(self, uncased=False): + """Returns a list of the text of each token + + Args: + uncased: lower cases text + """ + if uncased: + return [t[self.TEXT].lower() for t in self.data] + else: + return [t[self.TEXT] for t in self.data] + + def offsets(self): + """Returns a list of [start, end) character offsets of each token.""" + return [t[self.SPAN] for t in self.data] + + def pos(self): + """Returns a list of part-of-speech tags of each token. + Returns None if this annotation was not included. + """ + if 'pos' not in self.annotators: + return None + return [t[self.POS] for t in self.data] + + def lemmas(self): + """Returns a list of the lemmatized text of each token. + Returns None if this annotation was not included. + """ + if 'lemma' not in self.annotators: + return None + return [t[self.LEMMA] for t in self.data] + + def entities(self): + """Returns a list of named-entity-recognition tags of each token. + Returns None if this annotation was not included. + """ + if 'ner' not in self.annotators: + return None + return [t[self.NER] for t in self.data] + + def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True): + """Returns a list of all ngrams from length 1 to n. + + Args: + n: upper limit of ngram length + uncased: lower cases text + filter_fn: user function that takes in an ngram list and returns + True or False to keep or not keep the ngram + as_string: return the ngram as a string vs list + """ + + def _skip(gram): + if not filter_fn: + return False + return filter_fn(gram) + + words = self.words(uncased) + ngrams = [(s, e + 1) + for s in range(len(words)) + for e in range(s, min(s + n, len(words))) + if not _skip(words[s:e + 1])] + + # Concatenate into strings + if as_strings: + ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams] + + return ngrams + + def entity_groups(self): + """Group consecutive entity tokens with the same NER tag.""" + entities = self.entities() + if not entities: + return None + non_ent = self.opts.get('non_ent', 'O') + groups = [] + idx = 0 + while idx < len(entities): + ner_tag = entities[idx] + # Check for entity tag + if ner_tag != non_ent: + # Chomp the sequence + start = idx + while (idx < len(entities) and entities[idx] == ner_tag): + idx += 1 + groups.append((self.slice(start, idx).untokenize(), ner_tag)) + else: + idx += 1 + return groups + + +class Tokenizer(object): + """Base tokenizer class. + Tokenizers implement tokenize, which should return a Tokens class. + """ + + def tokenize(self, text): + raise NotImplementedError + + def shutdown(self): + pass + + def __del__(self): + self.shutdown() + + +class SimpleTokenizer(Tokenizer): + ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+' + NON_WS = r'[^\p{Z}\p{C}]' + + def __init__(self, **kwargs): + """ + Args: + annotators: None or empty set (only tokenizes). + """ + self._regexp = regex.compile( + '(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS), + flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE + ) + if len(kwargs.get('annotators', {})) > 0: + logger.warning('%s only tokenizes! Skipping annotators: %s' % + (type(self).__name__, kwargs.get('annotators'))) + self.annotators = set() + + def tokenize(self, text): + data = [] + matches = [m for m in self._regexp.finditer(text)] + for i in range(len(matches)): + # Get text + token = matches[i].group() + + # Get whitespace + span = matches[i].span() + start_ws = span[0] + if i + 1 < len(matches): + end_ws = matches[i + 1].span()[0] + else: + end_ws = span[1] + + # Format data + data.append(( + token, + text[start_ws: end_ws], + span, + )) + return Tokens(data, self.annotators) + + +class SpacyTokenizer(Tokenizer): + + def __init__(self, **kwargs): + """ + Args: + annotators: set that can include pos, lemma, and ner. + model: spaCy model to use (either path, or keyword like 'en'). + """ + model = kwargs.get('model', 'en') + self.annotators = copy.deepcopy(kwargs.get('annotators', set())) + nlp_kwargs = {'parser': False} + if not any([p in self.annotators for p in ['lemma', 'pos', 'ner']]): + nlp_kwargs['tagger'] = False + if 'ner' not in self.annotators: + nlp_kwargs['entity'] = False + self.nlp = spacy.load(model, **nlp_kwargs) + + def tokenize(self, text): + # We don't treat new lines as tokens. + clean_text = text.replace('\n', ' ') + tokens = self.nlp.tokenizer(clean_text) + if any([p in self.annotators for p in ['lemma', 'pos', 'ner']]): + self.nlp.tagger(tokens) + if 'ner' in self.annotators: + self.nlp.entity(tokens) + + data = [] + for i in range(len(tokens)): + # Get whitespace + start_ws = tokens[i].idx + if i + 1 < len(tokens): + end_ws = tokens[i + 1].idx + else: + end_ws = tokens[i].idx + len(tokens[i].text) + + data.append(( + tokens[i].text, + text[start_ws: end_ws], + (tokens[i].idx, tokens[i].idx + len(tokens[i].text)), + tokens[i].tag_, + tokens[i].lemma_, + tokens[i].ent_type_, + )) + + # Set special option for non-entity tag: '' vs 'O' in spaCy + return Tokens(data, self.annotators, opts={'non_ent': ''}) diff --git a/multilinguality_megatron/tasks/race/data.py b/multilinguality_megatron/tasks/race/data.py new file mode 100644 index 0000000000000000000000000000000000000000..c4967a0842fc35b6cbfa20dff49a3dc93342f073 --- /dev/null +++ b/multilinguality_megatron/tasks/race/data.py @@ -0,0 +1,135 @@ + +import glob +import json +import os +import time + +from torch.utils.data import Dataset + +from megatron import print_rank_0 +from tasks.data_utils import build_sample +from tasks.data_utils import build_tokens_types_paddings_from_ids +from tasks.data_utils import clean_text + + +NUM_CHOICES = 4 +MAX_QA_LENGTH = 128 + + +class RaceDataset(Dataset): + + def __init__(self, dataset_name, datapaths, tokenizer, max_seq_length, + max_qa_length=MAX_QA_LENGTH): + + self.dataset_name = dataset_name + print_rank_0(' > building RACE dataset for {}:'.format( + self.dataset_name)) + + string = ' > paths:' + for path in datapaths: + string += ' ' + path + print_rank_0(string) + + self.samples = [] + for datapath in datapaths: + self.samples.extend(process_single_datapath(datapath, tokenizer, + max_qa_length, + max_seq_length)) + + print_rank_0(' >> total number of samples: {}'.format( + len(self.samples))) + + # This indicates that each "sample" has multiple samples that + # will collapse into batch dimension + self.sample_multiplier = NUM_CHOICES + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + return self.samples[idx] + + +def process_single_datapath(datapath, tokenizer, max_qa_length, max_seq_length): + """Read in RACE files, combine, clean-up, tokenize, and convert to + samples.""" + + print_rank_0(' > working on {}'.format(datapath)) + start_time = time.time() + + # Get list of files. + filenames = glob.glob(os.path.join(datapath, '*.txt')) + + samples = [] + num_docs = 0 + num_questions = 0 + num_samples = 0 + # Load all the files + for filename in filenames: + with open(filename, 'r') as f: + for line in f: + data = json.loads(line) + num_docs += 1 + + context = data["article"] + questions = data["questions"] + choices = data["options"] + answers = data["answers"] + # Check the length. + assert len(questions) == len(answers) + assert len(questions) == len(choices) + + # Context: clean up and convert to ids. + context = clean_text(context) + context_ids = tokenizer.tokenize(context) + + # Loop over questions. + for qi, question in enumerate(questions): + num_questions += 1 + # Label. + label = ord(answers[qi]) - ord("A") + assert label >= 0 + assert label < NUM_CHOICES + assert len(choices[qi]) == NUM_CHOICES + + # For each question, build num-choices samples. + ids_list = [] + types_list = [] + paddings_list = [] + for ci in range(NUM_CHOICES): + choice = choices[qi][ci] + # Merge with choice. + if "_" in question: + qa = question.replace("_", choice) + else: + qa = " ".join([question, choice]) + # Clean QA. + qa = clean_text(qa) + # Tokenize. + qa_ids = tokenizer.tokenize(qa) + # Trim if needed. + if len(qa_ids) > max_qa_length: + qa_ids = qa_ids[0:max_qa_length] + + # Build the sample. + ids, types, paddings \ + = build_tokens_types_paddings_from_ids( + qa_ids, context_ids, max_seq_length, + tokenizer.cls, tokenizer.sep, tokenizer.pad) + + ids_list.append(ids) + types_list.append(types) + paddings_list.append(paddings) + + # Convert to numpy and add to samples + samples.append(build_sample(ids_list, types_list, + paddings_list, label, + num_samples)) + num_samples += 1 + + elapsed_time = time.time() - start_time + print_rank_0(' > processed {} document, {} questions, and {} samples' + ' in {:.2f} seconds'.format(num_docs, num_questions, + num_samples, elapsed_time)) + + return samples diff --git a/multilinguality_megatron/tasks/race/finetune.py b/multilinguality_megatron/tasks/race/finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..e1753a43e31524b77554a5d67c20ca5c84da9f9a --- /dev/null +++ b/multilinguality_megatron/tasks/race/finetune.py @@ -0,0 +1,58 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Race.""" + +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_tokenizer +from megatron.model.multiple_choice import MultipleChoice +import tasks.eval_utils +import tasks.finetune_utils +from tasks.race.data import RaceDataset +from megatron.model import ModelType + + +def train_valid_datasets_provider(): + """Provide train and validation datasets.""" + args = get_args() + tokenizer = get_tokenizer() + + train_dataset = RaceDataset('training', args.train_data, + tokenizer, args.seq_length) + valid_dataset = RaceDataset('validation', args.valid_data, + tokenizer, args.seq_length) + + return train_dataset, valid_dataset + + +def model_provider(pre_process=True, + post_process=True): + """Build the model.""" + + model_type = ModelType.encoder_or_decoder + print_rank_0('building multichoice model for RACE ...') + model = MultipleChoice(num_tokentypes=2, + pre_process=pre_process, + post_process=post_process, + model_type=model_type) + return model + + +def metrics_func_provider(): + """Privde metrics callback function.""" + args = get_args() + tokenizer = get_tokenizer() + + def single_dataset_provider(datapath): + name = datapath.split('RACE')[-1].strip('/').replace('/', '-') + return RaceDataset(name, [datapath], tokenizer, args.seq_length) + + return tasks.eval_utils.accuracy_func_provider(single_dataset_provider) + + +def main(): + model_type = ModelType.encoder_or_decoder + tasks.finetune_utils.finetune(train_valid_datasets_provider, + model_provider, + model_type, + end_of_epoch_callback_provider=metrics_func_provider) diff --git a/multilinguality_megatron/tasks/zeroshot_gpt/datasets.py b/multilinguality_megatron/tasks/zeroshot_gpt/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..5ad42f87ee24d0c5667614dabb79631113508e08 --- /dev/null +++ b/multilinguality_megatron/tasks/zeroshot_gpt/datasets.py @@ -0,0 +1,147 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Zero-shot datasets.""" + +import json +import math + +import numpy as np +import torch + +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_tokenizer +from .detokenizer import get_detokenizer + + +def build_dataset(task): + """Helper function to select and build dataset.""" + + if task == 'LAMBADA': + return _build_lambada_dataset() + if task == 'WIKITEXT103': + return _build_wikitext103_dataset() + raise NotImplementedError('dataset for {} task is not ' + 'implemented.'.format(task)) + + +class _LMDataset(torch.utils.data.Dataset): + + def __init__(self, tokens, seq_len, pad_idx, num_original_tokens, + num_tokenized_tokens, overalapping_eval=None): + self.tokens = tokens + self.seq_len = seq_len + self.pad_idx = pad_idx + self.overalapping_eval = overalapping_eval + if self.overalapping_eval is None: + self.overalapping_eval = self.seq_len + self.overalapping_eval = max(1, self.overalapping_eval) + self.num_original_tokens = num_original_tokens + self.num_tokenized_tokens = num_tokenized_tokens + self.total_targets = len(self.tokens) - 1 + # remove first sequence tokens + targets = max(self.total_targets - self.overalapping_eval, 0) + self.total_sequences = max( + math.ceil(targets / self.overalapping_eval) + 1, 1) + + def __len__(self): + return self.total_sequences + + def __getitem__(self, idx): + start_idx = idx * self.overalapping_eval + end_idx = start_idx + self.seq_len + tokens = self.tokens[start_idx:end_idx + 1] + num_tokens = len(tokens) + pad_mask = [1] * num_tokens + if num_tokens < self.seq_len + 1: + num_pad = (self.seq_len + 1 - num_tokens) + pad_mask += [0] * (num_pad) + tokens += [self.pad_idx] * num_pad + pad_mask = np.array(pad_mask[1:]) + if self.overalapping_eval != self.seq_len and idx != 0: + pad_mask[:-self.overalapping_eval] *= 0 + + return {'text': np.array(tokens), 'pad_mask': pad_mask} + + +class _LambadaDataset(torch.utils.data.Dataset): + + def __init__(self, path, pad_idx, tokenizer, seq_len, strict=False): + print_rank_0('> building lambada dataset from {} ...'.format(path)) + self.seq_len = seq_len + self.pad_idx = pad_idx + self.tokenizer = tokenizer + self.strict = strict + + self.tokens = [] + self.labels = [] + with open(path, 'r') as f: + for line in f.readlines(): + text = json.loads(line)['text'] + tokens, labels = self.get_tokens(text) + self.tokens.append(tokens) + self.labels.append(labels) + + def get_tokens(self, text): + if not self.strict: + tokens = self.tokenizer.tokenize(text) + return tokens[:-1], [tokens[-1]] + last_token = text.split()[-1] + start_idx = text.rfind(last_token) + beginning_tokens = self.tokenizer.tokenize(text[:start_idx].strip()) + last_token = self.tokenizer.tokenize(' ' + last_token) + return beginning_tokens, last_token + + def __len__(self): + return len(self.tokens) + + def __getitem__(self, idx): + tokens = self.tokens[idx] + num_tokens = len(tokens) + pad_mask = [0] * num_tokens + labels = self.labels[idx] + pad_mask += [1] * len(labels) + tokens = tokens + labels + num_tokens = len(tokens) + if num_tokens < self.seq_len + 1: + num_pad = (self.seq_len + 1 - num_tokens) + pad_mask += [0] * (num_pad) + tokens += [self.pad_idx] * num_pad + pad_mask = np.array(pad_mask[1:]) + + return {'text': np.array(tokens), 'pad_mask': pad_mask} + + +def _build_lambada_dataset(): + """Build lambada dataset.""" + args = get_args() + tokenizer = get_tokenizer() + + assert len(args.valid_data) == 1 + val_dataset = _LambadaDataset(args.valid_data[0], tokenizer.eod, tokenizer, + args.seq_length, args.strict_lambada) + print_rank_0(' > found {} samples.'.format(len(val_dataset))) + + return val_dataset + + +def _build_wikitext103_dataset(): + """""" + args = get_args() + tokenizer = get_tokenizer() + + assert len(args.valid_data) == 1 + with open(args.valid_data[0], "rb") as reader: + entire_data = reader.read().decode('utf-8') + num_original_tokens = len(entire_data.strip().split(" ")) + entire_data = get_detokenizer(args.valid_data[0])(entire_data) + tokenized_data = tokenizer.tokenize(entire_data) + num_tokenized_tokens = len(tokenized_data) + + val_dataset = _LMDataset(tokenized_data, args.seq_length, tokenizer.eod, + num_original_tokens, num_tokenized_tokens, + args.overlapping_eval) + print_rank_0(' > number of original tokens: {}, number of detokenized ' + 'tokens: {}'.format(num_original_tokens, num_tokenized_tokens)) + + return val_dataset diff --git a/multilinguality_megatron/tasks/zeroshot_gpt/detokenizer.py b/multilinguality_megatron/tasks/zeroshot_gpt/detokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..f7dfe4b775e0363b89ea4930317492a3cb1731b0 --- /dev/null +++ b/multilinguality_megatron/tasks/zeroshot_gpt/detokenizer.py @@ -0,0 +1,67 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Detokenization.""" + +import re + + +def ptb_detokenizer(string): + string = string.replace(" '", "'") + string = string.replace(" \n", "\n") + string = string.replace("\n ", "\n") + string = string.replace(" n't", "n't") + string = string.replace(" N ", "1 ") + string = string.replace("$ 1", "$1") + string = string.replace("# 1", "#1") + return string + + +def wikitext_detokenizer(string): + # contractions + string = string.replace("s '", "s'") + string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string) + # number separators + string = string.replace(" @-@ ", "-") + string = string.replace(" @,@ ", ",") + string = string.replace(" @.@ ", ".") + # punctuation + string = string.replace(" : ", ": ") + string = string.replace(" ; ", "; ") + string = string.replace(" . ", ". ") + string = string.replace(" ! ", "! ") + string = string.replace(" ? ", "? ") + string = string.replace(" , ", ", ") + # double brackets + string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string) + string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string) + string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string) + string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string) + string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string) + # miscellaneous + string = string.replace("= = = =", "====") + string = string.replace("= = =", "===") + string = string.replace("= =", "==") + string = string.replace(" " + chr(176) + " ", chr(176)) + string = string.replace(" \n", "\n") + string = string.replace("\n ", "\n") + string = string.replace(" N ", " 1 ") + string = string.replace(" 's", "'s") + + return string + + +def lambada_detokenizer(string): + return string + + +_DETOKENIZERS = { + 'ptb': ptb_detokenizer, + 'wiki': wikitext_detokenizer, + 'lambada': lambada_detokenizer, +} + + +def get_detokenizer(path): + for key in _DETOKENIZERS.keys(): + if key in path: + return _DETOKENIZERS[key] diff --git a/multilinguality_megatron/tasks/zeroshot_gpt/evaluate.py b/multilinguality_megatron/tasks/zeroshot_gpt/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..12fea66505c07a5397eeee73c6d69a39e107f94b --- /dev/null +++ b/multilinguality_megatron/tasks/zeroshot_gpt/evaluate.py @@ -0,0 +1,211 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""GPT zero-shot evaluation.""" + +import math + +import torch + +from megatron import get_args +from megatron import print_rank_0, is_last_rank +from megatron import get_tokenizer +from megatron.core import parallel_state, tensor_parallel +from megatron.checkpointing import load_checkpoint +from megatron.model import GPTModel +from megatron.utils import get_ltor_masks_and_position_ids, unwrap_model +from megatron.p2p_communication import recv_forward, send_forward +import tasks.finetune_utils +import megatron.training + +from .datasets import build_dataset + +# These are needed to unwrap the model, would be nice to put these in megatron.utils if possible? +from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP +from megatron.model import DistributedDataParallel as LocalDDP +from megatron.model import Float16Module + + +def _get_model_provider(eval_metric): + """Based on evaluation metric set the parallel-output flag and + return the model provider.""" + + def model_provider(pre_process=True, post_process=True): + """Build the model.""" + + if eval_metric == 'loss': + parallel_output = True + elif eval_metric == 'accuracy': + parallel_output = False + else: + raise NotImplementedError('output type for {} evaluation metric ' + 'is not supported.'.format(eval_metric)) + + print_rank_0('building GPT model ...') + model = GPTModel(num_tokentypes=0, parallel_output=parallel_output, + pre_process=pre_process, post_process=post_process) + + return model + + return model_provider + + +def process_batch(batch): + """Process batch and produce inputs for the model.""" + args = get_args() + tokenizer = get_tokenizer() + + loss_mask = batch['pad_mask'].long().cuda().contiguous().byte() + tokens_ = batch['text'].long().cuda().contiguous() + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + # Get the masks and postition ids. + attention_mask, _, position_ids = get_ltor_masks_and_position_ids( + tokens, + tokenizer.eod, + args.reset_position_ids, + args.reset_attention_mask, + args.eod_mask_loss) + + return tokens, labels, attention_mask, position_ids, loss_mask + + +def forward_step(batch, model, eval_metric): + """Forward step.""" + + # Get the batch. + tokens, labels, attention_mask, position_ids, loss_mask = process_batch( + batch) + + # Tell the model what our actual batch size will be + args = get_args() + args.micro_batch_size = len(labels) + + input_tensor = recv_forward() + + # Forward pass through the model. + unwrapped_model = unwrap_model( + model, (torchDDP, LocalDDP, Float16Module)) + unwrapped_model.set_input_tensor(input_tensor) + output = model(tokens, position_ids, attention_mask) + + send_forward(output) + + if parallel_state.is_pipeline_last_stage(): + # For loss, return the unreduced loss. + if eval_metric == 'loss': + losses = tensor_parallel.vocab_parallel_cross_entropy( + output.contiguous().float(), labels.contiguous()) + loss = torch.sum( + losses.view(-1) * loss_mask.contiguous().view(-1).float()) + return loss + + # For accuracy, return the number of correctly predicted samples. + if eval_metric == 'accuracy': + outputs = torch.argmax(output, -1) + correct = (outputs == labels).float() + correct[(1 - loss_mask).bool()] = 1 + correct = correct.prod(-1) + return correct.sum() + + raise NotImplementedError('forward method for evaluation metric {} ' + 'is not implemented.'.format(eval_metric)) + return None + + +def evaluate(data_loader, model, eval_metric): + """Evaluation.""" + args = get_args() + + # Turn on evaluation mode which disables dropout. + model.eval() + + total_output = 0.0 + with torch.no_grad(): + # For all the batches in the dataset. + for iteration, batch in enumerate(data_loader): + if iteration % args.log_interval == 0: + print_rank_0('> working on iteration: {}'.format(iteration)) + # Forward evaluation. + output = forward_step(batch, model, eval_metric) + + # Reduce across processes. + if parallel_state.is_pipeline_last_stage(): + torch.distributed.all_reduce(output, + group=parallel_state.get_data_parallel_group()) + + total_output += output + + return total_output + + +def _evaluate_and_print_results(task, data_loader, model, eval_metric): + """Evaluate and print results on screen.""" + + # Evaluate and get results. + output = evaluate(data_loader, model, eval_metric) + + string = ' validation results on {} | '.format(task) + if is_last_rank(): + if eval_metric == 'loss': + num_tokenized_tokens = data_loader.dataset.num_tokenized_tokens + num_original_tokens = data_loader.dataset.num_original_tokens + val_loss = output / (num_tokenized_tokens - 1) + ppl = math.exp(min(20, val_loss)) + token_ratio = (num_tokenized_tokens - 1) / (num_original_tokens - 1) + adjusted_ppl = math.exp(min(20, val_loss * token_ratio)) + string += 'avg loss: {:.4E} | '.format(val_loss) + string += 'ppl: {:.4E} | '.format(ppl) + string += 'adjusted ppl: {:.4E} | '.format(adjusted_ppl) + string += 'token ratio: {} |'.format(token_ratio) + + elif eval_metric == 'accuracy': + num_examples = len(data_loader.dataset) + acc = output / num_examples + string += 'number correct: {:.4E} | '.format(output) + string += 'total examples: {:.4E} | '.format(num_examples) + string += 'avg accuracy: {:.4E}'.format(acc) + + else: + raise NotImplementedError('evaluation method for {} metric is not ' + 'implemented yet.'.format(eval_metric)) + + length = len(string) + 1 + print('-' * length) + print(string) + print('-' * length) + + +def main(): + """Main program.""" + args = get_args() + + if args.num_layers_per_virtual_pipeline_stage is not None: + print("Interleaved pipeline schedule is not yet supported for text generation.") + exit() + + if args.task == 'LAMBADA': + eval_metric = 'accuracy' + elif args.task == 'WIKITEXT103': + eval_metric = 'loss' + else: + raise NotImplementedError('{} task is not implemented.'.format( + args.task)) + model_provider_func = _get_model_provider(eval_metric) + # Set up model and load checkpoint. + model = megatron.training.get_model(model_provider_func, wrap_with_ddp=False, args=args) + if args.load is not None: + _ = load_checkpoint(model, None, None) + + assert len(model) == 1, "Above condition should have caught this" + model = model[0] + + # Data stuff. + dataset = build_dataset(args.task) + dataloader = tasks.finetune_utils.build_data_loader(dataset, args.micro_batch_size, + args.num_workers, drop_last=False) + + # Run evaluation. + _evaluate_and_print_results(args.task, dataloader, model, eval_metric) + + print_rank_0('done :-)') diff --git a/multilinguality_megatron/test_data.sh b/multilinguality_megatron/test_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..d245e9a7d86e6da3ee8098d1b318c8b9192eed7f --- /dev/null +++ b/multilinguality_megatron/test_data.sh @@ -0,0 +1,5 @@ +python new_monolingual_data.py nl & +python new_monolingual_data.py pt & +python new_monolingual_data.py ru & +python new_monolingual_data.py zh & +python new_monolingual_data.py ko & \ No newline at end of file diff --git a/multilinguality_megatron/tests/__init__.py b/multilinguality_megatron/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/multilinguality_megatron/tests/conftest.py b/multilinguality_megatron/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..796a342e0597a6cf796e394637a406df12bc6f1f --- /dev/null +++ b/multilinguality_megatron/tests/conftest.py @@ -0,0 +1,60 @@ +import pytest +from pathlib import Path + + +_test_failed_incremental: dict[str, dict[tuple[int, ...], str]] = {} + + +def pytest_addoption(parser): + parser.addoption("--cache_path", type=Path, + help="Huggingface cache path (optional)") + parser.addoption("--llama_path", type=Path, required=True, + help="Path where the raw 7B weights are located (llama1)") + parser.addoption("--llama2_path", type=Path, required=True, + help="Path where the raw llama-2-7b weights are located") + parser.addoption("--tmp_dir", type=Path, + help="Prefix of the tempdir to create (optional)") + parser.addoption("--data_path", type=Path, required=True, + help="Path where the megatron dataset is located") + parser.addoption("--vocab_path", type=Path, required=True, + help="Meta's vocabfile") + + +def pytest_runtest_makereport(item, call): + if "incremental" in item.keywords: + # incremental marker is used + if call.excinfo is not None: + # the test has failed + # retrieve the class name of the test + cls_name = str(item.cls) + # retrieve the index of the test (if parametrize is used in combination with incremental) + parametrize_index = ( + tuple(item.callspec.indices.values()) + if hasattr(item, "callspec") + else () + ) + # retrieve the name of the test function + test_name = item.originalname or item.name + # store in _test_failed_incremental the original name of the failed test + _test_failed_incremental.setdefault(cls_name, {}).setdefault( + parametrize_index, test_name + ) + + +def pytest_runtest_setup(item): + if "incremental" in item.keywords: + # retrieve the class name of the test + cls_name = str(item.cls) + # check if a previous test has failed for this class + if cls_name in _test_failed_incremental: + # retrieve the index of the test (if parametrize is used in combination with incremental) + parametrize_index = ( + tuple(item.callspec.indices.values()) + if hasattr(item, "callspec") + else () + ) + # retrieve the name of the first test function to fail for this class name and index + test_name = _test_failed_incremental[cls_name].get(parametrize_index, None) + # if name found, test has failed for the combination of class name & test name + if test_name is not None: + pytest.xfail("previous test failed ({})".format(test_name)) diff --git a/multilinguality_megatron/tests/pytest.ini b/multilinguality_megatron/tests/pytest.ini new file mode 100644 index 0000000000000000000000000000000000000000..c4e0fdebef90a47a88eaa0dd7d318ba09919e450 --- /dev/null +++ b/multilinguality_megatron/tests/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +markers = + incremental: mark a test as incremental diff --git a/multilinguality_megatron/tests/tensor_parallel/test_cross_entropy.py b/multilinguality_megatron/tests/tensor_parallel/test_cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..2a725a2715b56c3ae7090c1e2a28e0e8b756ff29 --- /dev/null +++ b/multilinguality_megatron/tests/tensor_parallel/test_cross_entropy.py @@ -0,0 +1,14 @@ +from megatron.core.tensor_parallel.cross_entropy import vocab_parallel_cross_entropy +import torch +from tests.test_utilities import Utils +import numpy as np + +def test_vocab_parallel_cross_entropy(): + Utils.initialize_model_parallel(4,2) + vocab_parallel_logits = torch.range(0,7).repeat(16,4).cuda() + target = torch.arange(0,32,2).cuda() + output = vocab_parallel_cross_entropy(vocab_parallel_logits, target) + expected_output = torch.tensor([10.2309, 8.2309, 6.2309, 4.2309, 10.2309, 8.2309, 6.2309, 4.2309, + 10.2309, 8.2309, 6.2309, 4.2309, 10.2309, 8.2309, 6.2309, 4.2309]).cuda() + assert(torch.equal(torch.round(expected_output), torch.round(output))) + Utils.destroy_model_parallel() \ No newline at end of file diff --git a/multilinguality_megatron/tests/tensor_parallel/test_data.py b/multilinguality_megatron/tests/tensor_parallel/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..d7948474a79debcb11b5264af4fe26183826633e --- /dev/null +++ b/multilinguality_megatron/tests/tensor_parallel/test_data.py @@ -0,0 +1,21 @@ +from megatron.core.tensor_parallel.data import broadcast_data +import torch +from tests.test_utilities import Utils + +def test_broadcast_data(): + Utils.initialize_model_parallel(2,4) + input_data = { + 0 : torch.ones((8,8)).cuda() * 0.0, + 1 : torch.ones((8,8)).cuda() * 1.0, + 2 : torch.ones((8,8)).cuda() * 2.0, + 3 : torch.ones((8,8)).cuda() * 3.0, + 4 : torch.ones((8,8)).cuda() * 4.0, + 5 : torch.ones((8,8)).cuda() * 5.0, + 6 : torch.ones((8,8)).cuda() * 6.0, + 7 : torch.ones((8,8)).cuda() * 7.0 + } + dtype = torch.float32 + actual_output = broadcast_data([0,1],input_data, dtype) + assert(torch.equal(actual_output[0], input_data[0])) + assert(torch.equal(actual_output[1], input_data[1])) + Utils.destroy_model_parallel() \ No newline at end of file diff --git a/multilinguality_megatron/tests/tensor_parallel/test_mappings.py b/multilinguality_megatron/tests/tensor_parallel/test_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..52040a2edf8af47aca966fe919c31a44ac6e0aa5 --- /dev/null +++ b/multilinguality_megatron/tests/tensor_parallel/test_mappings.py @@ -0,0 +1,135 @@ +from megatron.core.tensor_parallel import mappings +from tests.test_utilities import Utils +import torch + +def test_CopyToModelParallelRegion(): + Utils.initialize_model_parallel(4,2) + input_data = torch.ones((1)).cuda()*Utils.rank + output_data = mappings._CopyToModelParallelRegion.backward(None, input_data) + result = torch.ones(1).cuda() + result = result * 22 if Utils.rank >= 4 else result * 6 + assert(torch.equal(output_data, result)) + assert(torch.equal(input_data, mappings.copy_to_tensor_model_parallel_region(input_data))) + assert(torch.equal(input_data, mappings._CopyToModelParallelRegion.symbolic(None, input_data))) + Utils.destroy_model_parallel() + +def test_ReduceFromModelParallelRegion(): + Utils.initialize_model_parallel(4,2) + input_data = torch.ones((1)).cuda()*Utils.rank + output_data = mappings._ReduceFromModelParallelRegion.symbolic(None, input_data) + result = torch.ones(1).cuda() + result = result * 22 if Utils.rank >= 4 else result * 6 + assert(torch.equal(output_data, result)) + input_data = torch.ones((1)).cuda()*Utils.rank + assert(torch.equal(mappings.reduce_from_tensor_model_parallel_region(input_data), result)) + assert(torch.equal(input_data, mappings._ReduceFromModelParallelRegion.backward(None, input_data))) + Utils.destroy_model_parallel() + +def test_ScatterToModelParallelRegion(): + Utils.initialize_model_parallel(4,2) + input_data = torch.rand((8,4)).cuda() + output_data = mappings.scatter_to_tensor_model_parallel_region(input_data) + req_dim = int(Utils.rank%(Utils.world_size/2)) + assert(torch.equal(output_data, input_data[:,req_dim].reshape((8,1)))) + output_data = mappings._ScatterToModelParallelRegion.symbolic(None, input_data) + assert(torch.equal(output_data, input_data[:, req_dim].reshape((8,1)))) + + input_data = torch.ones(8).cuda() * Utils.rank + actual_output_data = mappings._ScatterToModelParallelRegion.backward(None, input_data) + expected_output = torch.cat(( + torch.ones(8)*0, + torch.ones(8)*1, + torch.ones(8)*2, + torch.ones(8)*3)).cuda() + if (Utils.rank >= 4): + expected_output = expected_output + 4 + assert(torch.equal(actual_output_data, expected_output)) + Utils.destroy_model_parallel() + +def test_GatherFromModelParallelRegion(): + Utils.initialize_model_parallel(4,2) + input_data = torch.rand((8,4)).cuda() + req_dim = int(Utils.rank%(Utils.world_size/2)) + output_data = mappings._GatherFromModelParallelRegion.backward(None, input_data) + assert(torch.equal(output_data, input_data[:, req_dim].reshape((8,1)))) + input_data = torch.ones(8).cuda() * Utils.rank + actual_output_data = mappings.gather_from_tensor_model_parallel_region(input_data) + expected_output = torch.cat(( + torch.ones(8)*0, + torch.ones(8)*1, + torch.ones(8)*2, + torch.ones(8)*3)).cuda() + if (Utils.rank >= 4): + expected_output = expected_output + 4 + assert(torch.equal(actual_output_data, expected_output)) + assert(torch.equal(mappings._GatherFromModelParallelRegion.symbolic(None, input_data), expected_output)) + Utils.destroy_model_parallel() + +def test_ScatterToSequenceParallelRegion(): + Utils.initialize_model_parallel(4,2) + input_data = torch.rand((8,4)).cuda() + req_dim = int(Utils.rank%(Utils.world_size/2))*2 + output_data = mappings._ScatterToSequenceParallelRegion.symbolic(None, input_data) + assert(torch.equal(output_data, input_data[req_dim:req_dim+2, :])) + output_data = mappings.scatter_to_sequence_parallel_region(input_data) + assert(torch.equal(output_data, input_data[req_dim:req_dim+2, :])) + input_data = torch.ones(4).cuda() * Utils.rank + output_data = mappings._ScatterToModelParallelRegion.backward(None, input_data) + expected_output = torch.concat(( + torch.ones(4)*0, + torch.ones(4)*1, + torch.ones(4)*2, + torch.ones(4)*3)).cuda() + if (Utils.rank >= 4): + expected_output = expected_output + 4 + assert(torch.equal(output_data, expected_output)) + Utils.destroy_model_parallel() + +def test_GatherFromSequenceParallelRegion(): + Utils.initialize_model_parallel(4,2) + input_data = torch.ones(4).cuda() * Utils.rank + output_data = mappings.gather_from_sequence_parallel_region(input_data) + expected_output = torch.concat(( + torch.ones(4)*0, + torch.ones(4)*1, + torch.ones(4)*2, + torch.ones(4)*3)).cuda() + if (Utils.rank >= 4): + expected_output = expected_output + 4 + assert(torch.equal(output_data, expected_output)) + assert(torch.equal(mappings._GatherFromSequenceParallelRegion.symbolic(None, input_data), expected_output)) + input_data = torch.vstack(( + torch.ones(4)*0, + torch.ones(4)*1, + torch.ones(4)*2, + torch.ones(4)*3)).cuda() + class Ctx: + tensor_parallel_output_grad = True + output_data = mappings._GatherFromSequenceParallelRegion.backward(Ctx(), input_data) + expected_output = torch.ones((1,4)).cuda() * 4 * int(Utils.rank % 4) + assert(torch.equal(output_data[0], expected_output)) + Utils.destroy_model_parallel() + +def test_ReduceScatterToSequenceParallelRegion(): + Utils.initialize_model_parallel(4,2) + input_data = torch.vstack(( + torch.ones(4)*0, + torch.ones(4)*1, + torch.ones(4)*2, + torch.ones(4)*3)).cuda() + output_data = mappings.reduce_scatter_to_sequence_parallel_region(input_data) + expected_output = torch.ones(4).cuda() * 4 * int(Utils.rank % 4) + assert(torch.equal(output_data[0], expected_output)) + assert(torch.equal(mappings._ReduceScatterToSequenceParallelRegion.symbolic(None, input_data) , expected_output.reshape((1,4)))) + input_data = torch.ones(4).cuda() * Utils.rank + output_data = mappings._ReduceScatterToSequenceParallelRegion.backward(None,input_data) + expected_output = torch.concat(( + torch.ones(4)*0, + torch.ones(4)*1, + torch.ones(4)*2, + torch.ones(4)*3)).cuda() + if (Utils.rank >= 4): + expected_output = expected_output + 4 + assert(torch.equal(output_data, expected_output)) + Utils.destroy_model_parallel() + diff --git a/multilinguality_megatron/tests/tensor_parallel/test_random.py b/multilinguality_megatron/tests/tensor_parallel/test_random.py new file mode 100644 index 0000000000000000000000000000000000000000..82c6612d4620ee5e154819c543a0f4e0ab5bf6f0 --- /dev/null +++ b/multilinguality_megatron/tests/tensor_parallel/test_random.py @@ -0,0 +1,46 @@ +import megatron.core.tensor_parallel.random +from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed +from megatron.core.tensor_parallel.random import checkpoint +from tests.test_utilities import Utils +import pytest +import torch + + +def test_cuda_rng_states_tracker(): + rng_tracker = megatron.core.tensor_parallel.random.CudaRNGStatesTracker() + rng_tracker.set_states({"state1": 1234}) + assert(rng_tracker.get_states()["state1"] == 1234) + rng_tracker.reset() + assert(rng_tracker.get_states() == {}) + seed = 1111 + rng_tracker.add("state2", seed) + with pytest.raises(Exception): + assert(rng_tracker.add("state3", seed)) + with pytest.raises(Exception): + assert(rng_tracker.add("state2", 111)) + assert(rng_tracker.get_states()['state2'] is not None) + with pytest.raises(Exception): + assert() + + rng_tracker.fork("state2") + torch.cuda.manual_seed(seed) + rng_state = torch.cuda.get_rng_state() + assert torch.equal(rng_tracker.get_states()['state2'], rng_state) + + +def test_model_parallel_cuda_manual_seed(): + Utils.initialize_model_parallel(4,2) + model_parallel_cuda_manual_seed(0) + assert(megatron.core.tensor_parallel.random._CUDA_RNG_STATE_TRACKER.get_states()['model-parallel-rng'] is not None) + Utils.destroy_model_parallel() + + +def test_checkpoint(): + def test_forward(*input): + return input[0]+input[1] + assert(torch.equal(torch.ones(16)*3,checkpoint(test_forward, None, torch.ones(16), torch.ones(16)*2))) + Utils.initialize_model_parallel() + input1 = torch.ones((4,4)) + checkpoint(test_forward, True, input1, torch.ones((4,4))*2) + assert(torch.equal(torch.ones(input1.numel()).cuda(), input1)) + Utils.destroy_model_parallel() \ No newline at end of file diff --git a/multilinguality_megatron/tests/tensor_parallel/test_tensor_parallel_utils.py b/multilinguality_megatron/tests/tensor_parallel/test_tensor_parallel_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5aae470f4ff7df804e22a8175d979acf679b203f --- /dev/null +++ b/multilinguality_megatron/tests/tensor_parallel/test_tensor_parallel_utils.py @@ -0,0 +1,43 @@ +import torch +import megatron.core.tensor_parallel.utils as util +import megatron.core.parallel_state as ps +from tests.test_utilities import Utils + +rank = Utils.rank + +def test_split_tensor_along_last_dim(): + input_tensor = torch.rand((3,4)) + torch.equal(input_tensor[0:2,0:2], util.split_tensor_along_last_dim(input_tensor,2)[0]) + torch.equal(input_tensor[2:,2:], util.split_tensor_along_last_dim(input_tensor,2)[1]) + +def test_split_tensor_into_1d_equal_chunks(): + Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4) + input_tensor = torch.rand((3,4)) + output_tensor = util.split_tensor_into_1d_equal_chunks(input_tensor) + if rank % 2 == 0 : + start = 0 + end = int(input_tensor.numel()/2) + else : + start = int(input_tensor.numel()/2) + end = input_tensor.numel() + + assert torch.equal(output_tensor, input_tensor.flatten()[start:end]) + Utils.destroy_model_parallel() + +def test_gather_split_1d_tensor(): + Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4) + input_tensor = torch.ones((2,4)).cuda() * rank + actual_output_tensor = util.gather_split_1d_tensor(input_tensor) + if rank %2 == 0: + expected_output_tensor = torch.concat((input_tensor.flatten(), input_tensor.flatten() + 1)) + else : + expected_output_tensor = torch.concat((input_tensor.flatten() - 1, input_tensor.flatten())) + assert(torch.equal(actual_output_tensor, expected_output_tensor)) + Utils.destroy_model_parallel() + +def test_vocab(): + global_vocab_size = 1600 + per_partition_vocab_size = 1600 / Utils.world_size + assert((rank * per_partition_vocab_size, (rank + 1)* per_partition_vocab_size) == (util.VocabUtility.vocab_range_from_per_partition_vocab_size(global_vocab_size // Utils.world_size, rank, Utils.world_size))) + assert((rank * per_partition_vocab_size, (rank + 1)* per_partition_vocab_size) == (util.VocabUtility.vocab_range_from_global_vocab_size(global_vocab_size, rank, Utils.world_size))) + \ No newline at end of file diff --git a/multilinguality_megatron/tests/test_activations.py b/multilinguality_megatron/tests/test_activations.py new file mode 100644 index 0000000000000000000000000000000000000000..3963d65ad3c5cab3b72d62115cd83bc0856026c1 --- /dev/null +++ b/multilinguality_megatron/tests/test_activations.py @@ -0,0 +1,54 @@ +# Extracted from: https://github.com/bigscience-workshop/Megatron-DeepSpeed + +import random +import unittest + +import torch +from torch.nn import functional as F + +from megatron.model.glu_activations import GLU_ACTIVATIONS, geglu, liglu, reglu, swiglu + + +class TestActivations(unittest.TestCase): + def setUp(self): + """setup an input of reasonable size""" + + seed = 11 + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + self.batch_size = random.randint(2, 64) + self.seq_len = random.randint(256, 1025) + self.num_channels = random.randint(1, 384) * 2 + self.x = torch.randn(self.batch_size, self.seq_len, self.num_channels) + self.x1, self.x2 = self.x.chunk(2, dim=-1) + # glu should halve the last dimension + self.output_shape = [self.batch_size, self.seq_len, self.num_channels // 2] + + def test_shapes(self): + for activation_fn in GLU_ACTIVATIONS.values(): + output = activation_fn(self.x) + self.assertEqual(list(output.shape), self.output_shape) + + def test_liglu(self): + expected = self.x1 * self.x2 + assert torch.allclose(liglu(self.x), expected) + + def test_geglu(self): + expected = self.x1 * F.gelu(self.x2) + assert torch.allclose(geglu(self.x), expected) + + def test_reglu(self): + expected = self.x1 * F.relu(self.x2) + assert torch.allclose(reglu(self.x), expected) + + def test_swiglu(self): + expected = self.x1 * F.silu(self.x2) + assert torch.allclose(swiglu(self.x), expected) + + +if __name__ == "__main__": + ta = TestActivations() + ta.setUp() + ta.test_reglu() + diff --git a/multilinguality_megatron/tests/test_basic.py b/multilinguality_megatron/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..915d2c1001c3972b86b0b7662504bb18b4534f24 --- /dev/null +++ b/multilinguality_megatron/tests/test_basic.py @@ -0,0 +1,3 @@ +def test_import(): + import megatron + diff --git a/multilinguality_megatron/tests/test_layernorm_order.py b/multilinguality_megatron/tests/test_layernorm_order.py new file mode 100644 index 0000000000000000000000000000000000000000..d91be2f817c8f45b4237e12104c9b5f43f0dc322 --- /dev/null +++ b/multilinguality_megatron/tests/test_layernorm_order.py @@ -0,0 +1,105 @@ +import argparse +import os + +import torch + +import megatron +import megatron.initialize +import megatron.model.utils +import megatron.model.language_model +import megatron.arguments + +import megatron.core.tensor_parallel.random +import megatron.model.transformer + +from megatron.model.enums import AttnMaskType, ModelType, LayerType + + +init_method_std = .02 +num_layers = 2 +layer_number = 1 + +init_method = megatron.model.utils.init_method_normal(init_method_std) +output_layer_init_method = megatron.model.utils.scaled_init_method_normal(init_method_std, num_layers) +layer_type = LayerType.encoder + +""" +--use_bias +--micro_batch_size 2 +--num_layers 2 +--hidden_size 4 +--num_attention_heads 4 +--max_position_embeddings 4 +--encoder_seq_length 4 +--global_batch_size 128 +--train_iters 2000000 +--data_impl mmap +--split 80,10,10 +--distributed_backend nccl +--lr_decay_style constant +--lr 0.0001 +""" + + +if __name__ == "__main__": + # parser = argparse.ArgumentParser(description='Megatron-LM Arguments', + # allow_abbrev=False) + # extra_args_provider = get_the_parser_bro + # extra_args_provider = None + # args = megatron.get_args() + base_parser = megatron.arguments.build_base_parser() + args = base_parser.parse_args(["--micro_batch_size", "4"]) + args_defaults = {"micro_batch_size": 2, + "num_layers": 2, + "hidden_size": 4, + "num_attention_heads": 4, + "max_position_embeddings": 4, + "encoder_seq_length": 4 + } + + args.rank = int(os.getenv('RANK', '0')) + args.world_size = int(os.getenv("WORLD_SIZE", '1')) + + _MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng' + megatron.core.tensor_parallel.random._CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, + 111) + + # megatron.initialize.initialize_megatron(extra_args_provider=None, + # args_defaults=args_defaults) + # args = megatron.arguments.parse_args(extra_args_provider=None) + megatron.arguments.validate_args(args, args_defaults) + # megatron.initialize._compile_dependencies(args) + megatron.fused_kernels.load(args) + + device = torch.device("cuda") + world_size = 1 + # layer2 = megatron.model.transformer_matoba.ParallelTransformerLayer(init_method, + # output_layer_init_method, + # layer_number, + # layer_type, + # args=args) + layer1 = megatron.model.transformer.ParallelTransformerLayer(init_method, + output_layer_init_method, + layer_number, + layer_type, + world_size=world_size, + args=args).to(device) + + attention_mask = torch.tensor([[[[False, True, True, True], + [False, False, True, True], + [False, False, False, True], + [False, False, False, False]]]]).to(device) + hidden_states = torch.tensor([[[0.0000, 0.0334, -0.0528, -0.0357], + [-0.0061, -0.0052, 0.0041, -0.0000]], + [[0.0075, 0.0000, -0.0000, -0.0542], + [0.0196, 0.0000, -0.0114, -0.0205]], + [[0.0077, 0.0188, 0.0371, 0.0155], + [0.0009, 0.0042, 0.0135, 0.0034]], + [[-0.0073, -0.0129, 0.0069, 0.0060], + [-0.0000, -0.0000, 0.0174, 0.0210]]]).to(device) + y1 = layer1(hidden_states, attention_mask) + # y2 = layer2(hidden_states, attention_mask) + + # torch.testing.assert_allclose(y1, y2) + + diff --git a/multilinguality_megatron/tests/test_llama_weights.py b/multilinguality_megatron/tests/test_llama_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..0093b4163160b0e106690139820e4f85b21a5313 --- /dev/null +++ b/multilinguality_megatron/tests/test_llama_weights.py @@ -0,0 +1,201 @@ +import re +import pytest +import shutil +from pathlib import Path +from typing import Optional, Iterator +from tempfile import TemporaryDirectory +from subprocess import PIPE, Popen + + +# === +# = Arguments +# === +@pytest.fixture(scope="session", params=[1, 2]) +def llama_version(request) -> int: + return request.param + + +@pytest.fixture(scope="session") +def llama_meta(pytestconfig, llama_version: int ) -> Path: + if llama_version == 1: + return pytestconfig.getoption("llama_path") + return pytestconfig.getoption("llama2_path") + + +@pytest.fixture(scope="session") +def cache_dir(pytestconfig) -> Optional[Path]: + return pytestconfig.getoption("cache_path") + + +@pytest.fixture(scope="session") +def data(pytestconfig) -> Path: + return pytestconfig.getoption("data_path") + + +@pytest.fixture(scope="session") +def vocab(pytestconfig) -> Path: + return pytestconfig.getoption("vocab_path") + +@pytest.fixture(scope="session") +def root_dir(pytestconfig) -> TemporaryDirectory: + prefix = pytestconfig.getoption("tmp_dir") + prefix = None if prefix is None else str(prefix/"tmp") + return TemporaryDirectory(prefix=prefix) + + +# === +# = Paths +# === +@pytest.fixture(scope="session") +def root(root_dir, llama_version: int) -> Path: + return Path(f"{root_dir.name}-{llama_version}") + +@pytest.fixture(scope="session") +def llama_meta2mega(root: Path) -> Path: + return root/"llama-meta2mega" + +@pytest.fixture(scope="session") +def llama_hf2mega(root: Path) -> Path: + return root/"llama-hf2mega" + +@pytest.fixture(scope="session") +def vocab_hf2mega(llama_hf2mega: Path) -> Path: + return llama_hf2mega/"tokenizer.model" + +@pytest.fixture(scope="session") +def llama_sharded(root: Path) -> Path: + return root/"llama-sharded" + +@pytest.fixture(scope="session") +def llama_unsharded(root: Path) -> Path: + return root/"llama-unsharded" + +@pytest.fixture(scope="session") +def llama_mega2hf(root: Path) -> Path: + return root/"llama-mega2hf" + +@pytest.fixture(scope="session") +def llama_unsharded2hf(root: Path) -> Path: + return root/"llama-unsharded2hf" + + +# === +# = Utils +# === +def execute(cmd: list[str]) -> Iterator[str]: + with Popen(cmd, stdout=PIPE, text=True) as proc: + yield from map(lambda line: line.strip(), iter(proc.stdout.readline, "")) + assert proc.wait() == 0 + + +def verify_correctness(our_path: Path, cache_dir: Optional[Path], data: Path, + vocab: Path, llama_v: int = 2) -> list[float]: + llama_version = llama_v + model_name = "llama" if llama_version == 1 else "llama2" + distributed_args = ["--nproc_per_node=1", "--nnodes=1", + "--node_rank=0", "--master_addr=localhost", + "--master_port=8001"] + main_args = [f"--model_name={model_name}", f"--load={our_path}", + f"--data_path={data}", "--no_new_tokens", + "--tokenizer_type=SentencePieceTokenizer", + "--model_size=7", f"--vocab_file={vocab}"] + extra_args = ["--hidden_dropout=0.0", "--attention_dropout=0.0", + "--no_bias_dropout_fusion", "--no_bias_gelu_fusion"] + cmd = ["torchrun"] + distributed_args + ["verify_correctness.py"] \ + + main_args + extra_args + if cache_dir is not None: + cmd.append(f"--huggingface_cache={cache_dir}") + if llama_version == 1: + cmd.append("--layernorm_epsilon=1e-6") + + max_errors = [] + for line in execute(cmd): + if any(key in line for key in ["Iteration", "Max abs", "Abs loss"]): + print(line) + if rmatch := re.match(fr"^.*max=([0-9]+\.[0-9]+).*$", line): + max_errors.append(float(rmatch.group(1))) + assert sum(max_errors)/len(max_errors) <= 0.001, "Avg max error exceeds tolerance (0.001)" + return max_errors + + +def shard(load_dir: Path, save_dir: Path, llama_v: int = 2, tp: int = 1, pp: int = 1): + llama_version = llama_v + model_type = "llama" if llama_version == 1 else "llama2" + cmd = ["python", "tools/checkpoint_util.py", f"--load_dir={load_dir}", + f"--save_dir={save_dir}", f"--model_type={model_type}", "--true_vocab_size=32000", + f"--target_tensor_parallel_size={tp}", f"--target_pipeline_parallel_size={pp}"] + ignores = {"---", "...", "Setting"} + for line in execute(cmd): + if all(avoid not in line for avoid in ignores): + print(line) + + +def mega2hf(load_dir: Path, out_dir: Path, llama_v: int): + model_type = "llama" if llama_v == 1 else "llama2" + with Popen(["python", "weights_conversion/megatron_to_hf.py", f"--model={model_type}", + f"--input_dir={load_dir}", f"--output_dir={out_dir}"]) as proc: + assert proc.wait() == 0 + + +# === +# = Tests +# === +@pytest.mark.incremental +class TestLlamaWeights: + def test_path_exists(self, llama_meta: Path): + assert llama_meta.exists() and llama_meta.is_dir() + + def test_meta2mega(self, llama_meta2mega: Path, llama_meta: Path, + llama_version: int, + cache_dir: Optional[Path], data: Path, vocab: Path): + assert not llama_meta2mega.exists() + model_name = "llama" if llama_version == 1 else "llama2" + with Popen(["python", Path("weights_conversion")/"hf_to_megatron.py", + model_name, "--size=7", f"--out={llama_meta2mega}", + f"--cache-dir={llama_meta}"]) as proc: + assert proc.wait() == 0 + assert llama_meta2mega.exists() + if llama_version == 1: + llama_meta = cache_dir + verify_correctness(llama_meta2mega, llama_meta, data, vocab, llama_v=llama_version) + shutil.rmtree(llama_meta2mega) # all future tests will only use llama_hf2mega + + def test_hf2mega(self, llama_hf2mega: Path, cache_dir: Optional[Path], + data: Path, vocab_hf2mega: Path, llama_version: int): + assert not llama_hf2mega.exists() + model_name = "llama" if llama_version == 1 else "llama2" + cmd = ["python", Path("weights_conversion")/"hf_to_megatron.py", + model_name, "--size=7", f"--out={llama_hf2mega}"] + if cache_dir is not None: + cmd.append(f"--cache-dir={cache_dir}") + with Popen(cmd) as proc: + assert proc.wait() == 0 + assert llama_hf2mega.exists() + verify_correctness(llama_hf2mega, cache_dir, data, vocab_hf2mega, llama_v=llama_version) + + def test_metallama_verification(self, llama_hf2mega: Path, llama_meta: Path, + llama_version: int, data: Path, vocab: Path): + verify_correctness(llama_hf2mega, llama_meta, data, vocab, llama_v=llama_version) + + def test_shard_unshard(self, llama_hf2mega: Path, llama_sharded: Path, + llama_unsharded: Path, cache_dir: Optional[Path], + llama_version: int, data: Path, vocab_hf2mega: Path): + print("sharding to tp=2, pp=2") + shard(llama_hf2mega, llama_sharded, llama_v=llama_version, tp=2, pp=2) + assert llama_sharded.exists() + print("merging back to tp=1, pp=1") + shard(llama_sharded, llama_unsharded, llama_v=llama_version, tp=1, pp=1) + assert llama_unsharded.exists() + verify_correctness(llama_unsharded, cache_dir, data, vocab_hf2mega, llama_v=llama_version) + + def test_mega2hf(self, llama_hf2mega: Path, llama_mega2hf: Path, + cache_dir: Optional[Path], data: Path, vocab_hf2mega: Path, + llama_version: int ): + mega2hf(llama_hf2mega, llama_mega2hf, llama_version) + verify_correctness(llama_mega2hf, cache_dir, data, vocab_hf2mega, llama_v=llama_version) + + def test_unsharded2hf(self, llama_unsharded: Path, llama_unsharded2hf: Path, + cache_dir: Optional[Path], data: Path, vocab_hf2mega: Path, + llama_version: int): + mega2hf(llama_unsharded, llama_unsharded2hf, llama_version) + verify_correctness(llama_unsharded2hf, cache_dir, data, vocab_hf2mega, llama_v=llama_version) diff --git a/multilinguality_megatron/tests/test_parallel_state.py b/multilinguality_megatron/tests/test_parallel_state.py new file mode 100644 index 0000000000000000000000000000000000000000..349dbd655ce48fd43dfdc92dbdb3cc2802993c5e --- /dev/null +++ b/multilinguality_megatron/tests/test_parallel_state.py @@ -0,0 +1,106 @@ + +import megatron.core.parallel_state as ps +import pytest +from tests.test_utilities import Utils + +rank = Utils.rank +world_size = Utils.world_size + + +def test_initialize__and_destroy_model_parallel(): + with pytest.raises(AssertionError): + assert(ps.initialize_model_parallel()) + Utils.initialize_distributed() + with pytest.raises(RuntimeError): + assert(ps.initialize_model_parallel(tensor_model_parallel_size=2*world_size)) + with pytest.raises(RuntimeError): + assert(ps.initialize_model_parallel(pipeline_model_parallel_size=2*world_size)) + with pytest.raises(RuntimeError): + assert(ps.initialize_model_parallel(pipeline_model_parallel_size=world_size, tensor_model_parallel_size=world_size)) + with pytest.raises(RuntimeError): + assert(ps.initialize_model_parallel(virtual_pipeline_model_parallel_size=2)) + Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4) + + assert(ps.model_parallel_is_initialized()) + assert(ps.get_model_parallel_group() is not None) + assert(ps.get_tensor_model_parallel_group() is not None) + assert(ps.get_pipeline_model_parallel_group() is not None) + assert(ps.get_data_parallel_group() is not None) + Utils.destroy_model_parallel() + assert(ps._MODEL_PARALLEL_GROUP is None) + + +def test_pipeline_parallel_initializations(): + Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4) + assert(ps.get_pipeline_model_parallel_first_rank() == rank % 2 ) + assert(ps.get_data_parallel_src_rank() == rank) + assert(ps.get_pipeline_model_parallel_next_rank() == ((rank + 2) % world_size)) + assert(ps.get_pipeline_model_parallel_prev_rank() == ((rank - 2) % world_size)) + Utils.destroy_model_parallel() + + +def test_data_parallel_initializations(): + Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size) + assert(ps.get_data_parallel_src_rank() == rank) + assert(ps.get_data_parallel_world_size() == 1) + assert(ps.get_data_parallel_rank() == 0) + Utils.destroy_model_parallel() + + +def test_tensor_model_parellel_world_size(): + Utils.initialize_model_parallel(tensor_model_parallel_size=world_size) + assert(ps.get_tensor_model_parallel_world_size() == world_size) + ps.set_tensor_model_parallel_world_size(None) + assert(ps.get_tensor_model_parallel_world_size() == world_size) + Utils.destroy_model_parallel() + + +def test_pipeline_model_parallel_world_size(): + Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size) + assert(ps.get_pipeline_model_parallel_world_size() == world_size) + ps.set_pipeline_model_parallel_world_size(None) + assert(ps.get_pipeline_model_parallel_world_size() == world_size) + Utils.destroy_model_parallel() + + +def test_tensor_model_parallel_rank(): + Utils.initialize_model_parallel(tensor_model_parallel_size=world_size) + assert(ps.get_tensor_model_parallel_rank() == rank) + ps.set_tensor_model_parallel_rank(None) + assert(ps.get_tensor_model_parallel_rank() == rank) + Utils.destroy_model_parallel() + + +def test_pipeline_model_parallel_rank(): + Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size) + assert(ps.get_pipeline_model_parallel_rank() == rank) + ps.set_pipeline_model_parallel_rank(None) + assert(ps.get_pipeline_model_parallel_rank() == rank) + Utils.destroy_model_parallel() + + +def test_is_pipeline_first_stage(): + Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size) + assert(ps.is_pipeline_first_stage(ignore_virtual=True) == (rank == 0)) + assert(ps.is_pipeline_first_stage() == (rank == 0)) + Utils.destroy_model_parallel() + + +def test_is_pipeline_last_stage(): + Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size) + assert(ps.is_pipeline_last_stage(ignore_virtual=True) == (rank == world_size-1)) + assert(ps.is_pipeline_last_stage() == (rank == world_size-1)) + Utils.destroy_model_parallel() + + +def test_virtual_pipeline_model_parallel_rank(): + Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size) + ps.set_virtual_pipeline_model_parallel_rank(rank) + assert(ps.get_virtual_pipeline_model_parallel_rank() == rank) + Utils.destroy_model_parallel() + + +def test_get_tensor_model_parallel_src_rank(): + Utils.initialize_model_parallel(tensor_model_parallel_size=world_size) + assert(ps.get_tensor_model_parallel_src_rank() == ((rank // world_size) * world_size)) + Utils.destroy_model_parallel() \ No newline at end of file diff --git a/multilinguality_megatron/tests/test_utilities.py b/multilinguality_megatron/tests/test_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a7b22d42b8e661a249300c04c9c90777fbe48f --- /dev/null +++ b/multilinguality_megatron/tests/test_utilities.py @@ -0,0 +1,30 @@ +import os +import torch +import megatron.core.parallel_state as ps + + +class Utils: + world_size = torch.cuda.device_count() + rank = int(os.environ['LOCAL_RANK']) + + @staticmethod + def initialize_distributed(): + print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}') + torch.cuda.set_device(Utils.rank % torch.cuda.device_count()) + init_method = 'tcp://' + master_ip = os.getenv('MASTER_ADDR', 'localhost') + master_port = os.getenv('MASTER_PORT', '6000') + init_method += master_ip + ':' + master_port + torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method) + + @staticmethod + def destroy_model_parallel(): + ps.destroy_model_parallel() + torch.distributed.barrier() + + @staticmethod + def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None): + ps.destroy_model_parallel() + if not torch.distributed.is_initialized(): + Utils.initialize_distributed() + ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank) \ No newline at end of file diff --git a/multilinguality_megatron/tests/test_utils.py b/multilinguality_megatron/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c15f2ebd4fb86cebfb4fcdb3db6db77f8afaada2 --- /dev/null +++ b/multilinguality_megatron/tests/test_utils.py @@ -0,0 +1,43 @@ +import pytest +import torch +import numpy as np + +import megatron.core.utils as util + + +def test_divide_properly(): + assert util.divide(4, 2) == 2 + + +def test_divide_improperly(): + with pytest.raises(AssertionError): + util.divide(4, 5) + + +def test_global_memory_buffer(): + global_memory_buffer = util.GlobalMemoryBuffer() + obtained_tensor = global_memory_buffer.get_tensor((3, 2), torch.float32, "test_tensor") + expected_tensor = torch.empty((3, 2), dtype=torch.float32, device=torch.cuda.current_device()) + assert torch.equal(obtained_tensor, expected_tensor) + + +def test_make_viewless_tensor(): + inp = torch.rand((3, 4)) + assert(torch.equal(inp, util.make_viewless_tensor(inp, True, True))) + assert(torch.equal(inp, util.make_viewless_tensor(inp, True, False))) + + +def test_safely_set_viewless_tensor_data(): + tensor = torch.zeros((3, 4)) + new_data_tensor = torch.tensor(np.random.rand(3,4)) + util.safely_set_viewless_tensor_data(tensor, new_data_tensor) + assert(torch.equal(tensor, new_data_tensor)) + + +def test_assert_viewless_tensor(): + tensor = torch.rand((3, 4)) + assert(torch.equal(util.assert_viewless_tensor(tensor), tensor)) + input_tensor_list=[tensor, tensor, tensor] + output_tensor_list = util.assert_viewless_tensor(input_tensor_list) + for inp,out in zip(input_tensor_list, output_tensor_list): + assert(torch.equal(inp, out)) diff --git a/multilinguality_megatron/tests/test_wandb.py b/multilinguality_megatron/tests/test_wandb.py new file mode 100644 index 0000000000000000000000000000000000000000..6531103f07ea4768d58511de6e2f4f3fafb1ce3e --- /dev/null +++ b/multilinguality_megatron/tests/test_wandb.py @@ -0,0 +1,9 @@ +from megatron.arguments import parse_args +from megatron.global_vars import _set_tensorboard_writer,get_tensorboard_writer +if __name__ == "__main__": + args=parse_args() + args.wandb_logger="True" + args.wandb_project="test-logger" + _set_tensorboard_writer(args) + + writer=get_tensorboard_writer() diff --git a/multilinguality_megatron/tools/__pycache__/checkpoint_loader_megatron.cpython-39.pyc b/multilinguality_megatron/tools/__pycache__/checkpoint_loader_megatron.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fc5813a4f7013b6d6990a25940b3a2a49278af8 Binary files /dev/null and b/multilinguality_megatron/tools/__pycache__/checkpoint_loader_megatron.cpython-39.pyc differ diff --git a/multilinguality_megatron/tools/__pycache__/checkpoint_saver_megatron.cpython-39.pyc b/multilinguality_megatron/tools/__pycache__/checkpoint_saver_megatron.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be31451e352040bc08dc11245cfd4b4ed4f0f0b2 Binary files /dev/null and b/multilinguality_megatron/tools/__pycache__/checkpoint_saver_megatron.cpython-39.pyc differ diff --git a/multilinguality_megatron/tools/checkpoint_loader_megatron.py b/multilinguality_megatron/tools/checkpoint_loader_megatron.py new file mode 100644 index 0000000000000000000000000000000000000000..0cdddeeb0aecbd3073a83ef6148fafccdc4f422c --- /dev/null +++ b/multilinguality_megatron/tools/checkpoint_loader_megatron.py @@ -0,0 +1,391 @@ +import json +import os +import sys +import types + +import torch + + +def add_arguments(parser): + group = parser.add_argument_group(title="Megatron loader") + + group.add_argument( + "--true_vocab_size", + type=int, + default=None, + help="original size of vocab, if specified will trim padding from embedding table.", + ) + group.add_argument( + "--vocab_file", + type=str, + default=None, + help="Path to the vocab file. If specified will use this to get vocab size and " + "trim padding from the embedding table.", + ) + group.add_argument( + "--megatron_path", + type=str, + default=None, + help="Base directory of deepspeed repository", + ) + + +def _load_checkpoint(queue, args): + # Search in directory above this + sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) + ) + if args.megatron_path is not None: + sys.path.insert(0, args.megatron_path) + + try: + import megatron.arguments + from megatron import fused_kernels + from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint + from megatron.core import mpu + from megatron.global_vars import set_global_variables + from megatron.model import ModelType, module + from megatron.model.enums import PositionEmbeddingType + except ModuleNotFoundError: + print( + "Unable to import Megatron, please specify the path to Megatron using --megatron_path. Exiting." + ) + queue.put("exit") + exit(1) + + # We want all arguments to come from us + sys.argv = [ + "script.py", + "--no_masked_softmax_fusion", + "--no_bias_gelu_fusion", + "--no_bias_dropout_fusion", + "--use_cpu_initialization", + "--micro_batch_size", + "1", + "--no_load_optim", + "--no_load_rng", + "--no_save_optim", + "--no_save_rng", + "--no_initialization", + "--load", + args.load_dir, + ] + + if args.bf16: + sys.argv += ["--bf16"] + + margs = megatron.arguments.parse_args() + margs = load_args_from_checkpoint(margs) + + # Arguments do sanity checks on the world size, but we don't care, + # so trick it into thinking we are plenty of processes + margs.world_size = ( + margs.tensor_model_parallel_size * margs.pipeline_model_parallel_size + ) + + margs = megatron.arguments.validate_args(margs) + + def check_for_arg(arg_name): + if getattr(margs, arg_name, None) is None: + print(f"Checkpoint does not specify the argument {arg_name}. Exiting.") + print(f"Arguments: {margs}") + queue.put("exit") + exit(1) + + check_for_arg("tensor_model_parallel_size") + check_for_arg("pipeline_model_parallel_size") + check_for_arg("num_layers") + check_for_arg("hidden_size") + check_for_arg("seq_length") + check_for_arg("num_attention_heads") + check_for_arg("max_position_embeddings") + check_for_arg("tokenizer_type") + check_for_arg("iteration") + check_for_arg("params_dtype") + if args.model_type == "BERT": + check_for_arg("bert_binary_head") + + # Determine how to make our models + if args.model_type == "GPT": + from pretrain_gpt import model_provider + + margs.model_type = ModelType.encoder_or_decoder + elif args.model_type in {"falcon", "llama", "llama2", "llama3", "codellama", "mistral", "gemma"}: + from finetune import model_provider + + margs.model_name = args.model_type + margs.model_type = ModelType.encoder_or_decoder + if args.model_type=="gemma": + margs.kv_channels = args.kv_channels + elif args.model_type == "BERT": + from pretrain_bert import model_provider + + margs.model_type = ModelType.encoder_or_decoder + else: + raise Exception(f"unrecognized model type: {args.model_type}") + + # supress warning about torch.distributed not being initialized + module.MegatronModule.embedding_warning_printed = True + + consumed_train_samples = None + consumed_valid_samples = None + + def _get_models(count, dtype, pre_process, post_process): + nonlocal consumed_train_samples + nonlocal consumed_valid_samples + models = [] + for rank in range(count): + mpu.set_tensor_model_parallel_rank(rank) + model_ = [model_provider(pre_process, post_process).to(dtype)] + margs.consumed_train_samples = 0 + margs.consumed_valid_samples = 0 + load_checkpoint(model_, None, None) + assert len(model_) == 1 + model_ = model_[0] + if consumed_train_samples is not None: + assert margs.consumed_train_samples == consumed_train_samples + else: + consumed_train_samples = margs.consumed_train_samples + if consumed_valid_samples is not None: + assert margs.consumed_valid_samples == consumed_valid_samples + else: + consumed_valid_samples = margs.consumed_valid_samples + models.append(model_) + return models + + if margs.num_layers_per_virtual_pipeline_stage is not None: + print("Model with an interleaved pipeline schedule are not yet supported.") + queue.put("exit") + exit(1) + + set_global_variables(margs) + mpu._DATA_PARALLEL_GROUP = 0 + mpu.set_tensor_model_parallel_world_size(margs.tensor_model_parallel_size) + mpu.set_pipeline_model_parallel_world_size(margs.pipeline_model_parallel_size) + fused_kernels.load(margs) + + # Get true (non-padded) vocab size + if args.true_vocab_size is not None: + true_vocab_size = args.true_vocab_size + elif args.vocab_file is not None: + vocab = json.load(open(args.vocab_file)) + true_vocab_size = len(vocab) + if args.true_vocab_size is not None and true_vocab_size != args.true_vocab_size: + print( + "Both --true_vocab_size and --vocab_file specified and the vocab size does not match, aborting." + ) + queue.put("exit") + exit(1) + else: + true_vocab_size = None + + # short aliases + tp_size = margs.tensor_model_parallel_size + pp_size = margs.pipeline_model_parallel_size + + # metadata + md = types.SimpleNamespace() + md.model_type = args.model_type + md.num_layers = margs.num_layers + md.hidden_size = margs.hidden_size + md.seq_length = margs.seq_length + md.num_attention_heads = margs.num_attention_heads + md.max_position_embeddings = margs.max_position_embeddings + md.tokenizer_type = margs.tokenizer_type + md.iteration = margs.iteration + if args.model_type == "BERT": + md.bert_binary_head = margs.bert_binary_head + md.previous_tensor_parallel_size = margs.tensor_model_parallel_size + md.previous_pipeline_parallel_size = margs.pipeline_model_parallel_size + md.true_vocab_size = true_vocab_size + md.make_vocab_size_divisible_by = margs.make_vocab_size_divisible_by + md.num_attention_heads_kv = margs.num_attention_heads_kv + md.parallel_attn = margs.parallel_attn + md.parallel_layernorm = margs.parallel_layernorm + md.use_flash_attn = margs.use_flash_attn + md.hidden_dropout = margs.hidden_dropout + md.lima_dropout = margs.lima_dropout + md.use_bias = margs.use_bias + md.use_rms_norm = margs.use_rms_norm + md.ffn_hidden_size = margs.ffn_hidden_size + md.glu_activation = margs.glu_activation + md.tie_embed_logits = margs.tie_embed_logits + md.params_dtype = margs.params_dtype + md.sliding_window_size = margs.sliding_window_size + md.kv_channels = margs.kv_channels + if margs.position_embedding_type == PositionEmbeddingType.absolute: + md.position_embedding_type = "absolute" + elif margs.position_embedding_type == PositionEmbeddingType.rotary: + md.position_embedding_type = "rotary" + else: + raise KeyError(f"Unknown position embedding {margs.position_embedding_type}") + + # Get first pipe stage + mpu.set_pipeline_model_parallel_rank(0) + post_process = pp_size == 1 + models = _get_models(tp_size, md.params_dtype, True, post_process) + models_init = models + + md.consumed_train_samples = consumed_train_samples + md.consumed_valid_samples = consumed_valid_samples + queue.put(md) + + def queue_put(name, msg): + print(f"sending {name}") + msg["name"] = name + queue.put(msg) + + message = { + "word embeddings": torch.cat( + [ + models[tp_rank].language_model.embedding.word_embeddings.weight.data + for tp_rank in range(tp_size) + ], + dim=0, + ) + } + if margs.position_embedding_type == PositionEmbeddingType.absolute: + message["position embeddings"] = models[ + 0 + ].language_model.embedding.position_embeddings.weight.data + + queue_put("embeddings", message) + + # Get last pipe stage if lm_head needs to be sent + if not margs.tie_embed_logits: + mpu.set_pipeline_model_parallel_rank(pp_size - 1) + pre_process = pp_size == 1 + if pre_process: + models = models_init + else: + models = _get_models(tp_size, md.params_dtype, pre_process, True) + models_final = models + + queue_put( + "lm_head", + { + "lm_head": torch.cat( + [ + models[tp_rank].language_model.lm_head.data + for tp_rank in range(tp_size) + ] + ) + }, + ) + + total_layer_num = 0 + for pp_rank in range(pp_size): + # For later pipeline parallel ranks, make the new models + mpu.set_pipeline_model_parallel_rank(pp_rank) + post_process = pp_rank == pp_size - 1 + if pp_rank == 0: + models = models_init + elif pp_rank == pp_size - 1 and not md.tie_embed_logits: + models = models_final + else: + models = _get_models(tp_size, md.params_dtype, False, post_process) + + for layer_num in range(len(models[0].language_model.encoder.layers)): + message = {} + + # Get non-parallel tensors from tp_rank 0 + layer = models[0].language_model.encoder.layers[layer_num] + message["input layernorm weight"] = layer.input_layernorm.weight.data + if margs.parallel_layernorm: + message["mlp layernorm weight"] = layer.mlp_layernorm.weight.data + if not margs.use_rms_norm: + message["input layernorm bias"] = layer.input_layernorm.bias.data + if margs.parallel_layernorm: + message["mlp layernorm bias"] = layer.mlp_layernorm.bias.data + if not margs.parallel_attn: + message[ + "post layernorm weight" + ] = layer.post_attention_layernorm.weight.data + if not margs.use_rms_norm: + message[ + "post layernorm bias" + ] = layer.post_attention_layernorm.bias.data + if margs.use_bias: + message["dense bias"] = layer.self_attention.dense.bias.data + message["mlp l1 bias"] = layer.mlp.dense_4h_to_h.bias.data + + # Grab all parallel tensors for this layer + qkv_weight = [] + qkv_bias = [] + dense_weight = [] + mlp_l0_weight = [] + mlp_l0_bias = [] + mlp_l1_weight = [] + for tp_rank, model in enumerate(models): + layer = model.language_model.encoder.layers[layer_num] + qkv_weight.append(layer.self_attention.query_key_value.weight.data) + if margs.use_bias: + qkv_bias.append(layer.self_attention.query_key_value.bias.data) + dense_weight.append(layer.self_attention.dense.weight.data) + mlp_l0_weight.append(layer.mlp.dense_h_to_4h.weight.data) + if margs.use_bias: + mlp_l0_bias.append(layer.mlp.dense_h_to_4h.bias.data) + mlp_l1_weight.append(layer.mlp.dense_4h_to_h.weight.data) + + # concat them + message["qkv weight"] = torch.cat(qkv_weight, dim=0) + if margs.use_bias: + message["qkv bias"] = torch.cat(qkv_bias, dim=0) + message["dense weight"] = torch.cat(dense_weight, dim=1) + if margs.glu_activation is None: + message["mlp l0 weight"] = torch.cat(mlp_l0_weight, dim=0) + else: + up_weights = [] + gate_weights = [] + for weight in mlp_l0_weight: + up, gate = torch.chunk(weight, 2, dim=0) + up_weights.append(up) + gate_weights.append(gate) + message["mlp l0 weight"] = torch.cat(up_weights + gate_weights, dim=0) + if margs.use_bias: + message["mlp l0 bias"] = torch.cat(mlp_l0_bias, dim=0) + message["mlp l1 weight"] = torch.cat(mlp_l1_weight, dim=1) + + queue_put(f"transformer layer {total_layer_num}", message) + + total_layer_num = total_layer_num + 1 + + # Send final layernorm from tp_rank 0 + message = {"weight": models[0].language_model.encoder.final_layernorm.weight.data} + if not margs.use_rms_norm: + message["bias"] = models[0].language_model.encoder.final_layernorm.bias.data + queue_put("final layernorm", message) + + # Send BERT lm head and binary head if it exists + if md.model_type == "BERT": + message = { + "weight": models[0].language_model.pooler.dense.weight.data, + "bias": models[0].language_model.pooler.dense.bias.data, + } + queue_put("pooler", message) + + message = { + "dense weight": models[0].lm_head.dense.weight.data, + "dense bias": models[0].lm_head.dense.bias.data, + "layernorm weight": models[0].lm_head.layernorm.weight.data, + "layernorm bias": models[0].lm_head.layernorm.bias.data, + } + queue_put("lm head", message) + + if args.model_type == "BERT" and md.bert_binary_head: + print("Sending BERT Binary head") + queue.put("binary head") + message = { + "weight": models[0].binary_head.weight.data, + "bias": models[0].binary_head.bias.data, + } + queue_put("binary head", message) + queue.put("done") + + +def load_checkpoint(queue, args): + try: + _load_checkpoint(queue, args) + except: + queue.put("exit") + raise diff --git a/multilinguality_megatron/tools/checkpoint_saver_megatron.py b/multilinguality_megatron/tools/checkpoint_saver_megatron.py new file mode 100644 index 0000000000000000000000000000000000000000..7597a15542854d6298119dd5fd1fe017305369df --- /dev/null +++ b/multilinguality_megatron/tools/checkpoint_saver_megatron.py @@ -0,0 +1,513 @@ +import os +import sys + +import torch + + +def add_arguments(parser): + group = parser.add_argument_group(title="Megatron saver") + group.add_argument( + "--megatron_path", + type=str, + default=None, + help="Base directory of Megatron repository", + ) + group.add_argument( + "--target_tensor_parallel_size", + type=int, + help="Target tensor model parallel size, defaults to the tensor parallel size " + "in the input checkpoint if provided by the loader, otherwise to 1", + ) + group.add_argument( + "--target_pipeline_parallel_size", + type=int, + help="Target tensor model parallel size, default to the pipeline parall size " + "in the input checkpoint if provided by the loader, otherwise to 1", + ) + + +def save_checkpoint(queue, args): + # Search in directory above this + sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) + ) + if args.megatron_path is not None: + sys.path.insert(0, args.megatron_path) + + try: + import megatron.arguments + from megatron import fused_kernels + from megatron.checkpointing import save_checkpoint + from megatron.core import mpu + from megatron.global_vars import get_args, set_global_variables + from megatron.model import ModelType + from megatron.model.enums import PositionEmbeddingType + from megatron.tokenizer.tokenizer import _vocab_size_with_padding + except ModuleNotFoundError: + print( + "Unable to import Megatron, please specify the path to Megatron using --megatron_path. Exiting." + ) + exit(1) + + def queue_get(name=None): + val = queue.get() + if val == "exit": + print("Loader exited, exiting saver") + exit(1) + if name is not None and args.checking and val["name"] != name: + val_name = val["name"] + print( + f'Unexpected message. Expecting "{name}" but got "{val_name}". Exiting saver.' + ) + exit(1) + if name is not None: + print(f"received {name}") + return val + + def check_message(msg): + if not args.checking: + return + msg_name = msg.pop("name") + if len(msg.keys()) > 0: + print(f"Unexpected values in {msg_name}:") + for key in msg.keys(): + print(f" {key}") + print( + f"Exiting. If you want to ignore this, use the argument --no_checking." + ) + exit(1) + + md = queue_get() + + if args.target_tensor_parallel_size is None: + if hasattr(md, "previous_tensor_parallel_size"): + args.target_tensor_parallel_size = md.previous_tensor_parallel_size + else: + print( + "loader did not provide a tensor parallel size and --target_tensor_parallel_size not provided on command line. " + "Default to 1." + ) + args.target_tensor_parallel_size = 1 + + if args.target_pipeline_parallel_size is None: + if hasattr(md, "previous_pipeline_parallel_size"): + args.target_pipeline_parallel_size = md.previous_pipeline_parallel_size + else: + print( + "loader did not provide a pipeline parallel size and --target_pipeline_parallel_size not provided on command line. " + "Default to 1." + ) + args.target_pipeline_parallel_size = 1 + + # Arguments do sanity checks on the world size, but we don't care, + # so trick it into thinking we are plenty of processes + if ( + args.target_tensor_parallel_size is not None + and args.target_pipeline_parallel_size is not None + ): + os.environ[ + "WORLD_SIZE" + ] = f"{args.target_tensor_parallel_size * args.target_pipeline_parallel_size}" + + # We want all arguments to come from us + sys.argv = [ + "script.py", + "--num_layers", + str(md.num_layers), + "--hidden_size", + str(md.hidden_size), + "--seq_length", + str(md.seq_length), + "--num_attention_heads", + str(md.num_attention_heads), + "--max_position_embeddings", + str(md.max_position_embeddings), + "--tokenizer_type", + str(md.tokenizer_type), + "--tensor_model_parallel_size", + str(args.target_tensor_parallel_size), + "--pipeline_model_parallel_size", + str(args.target_pipeline_parallel_size), + "--no_masked_softmax_fusion", + "--no_bias_gelu_fusion", + "--no_bias_dropout_fusion", + "--use_cpu_initialization", + "--micro_batch_size", + "1", + "--no_load_optim", + "--no_load_rng", + "--no_save_optim", + "--no_save_rng", + "--no_initialization", + "--save_interval", + "1", + "--hidden_dropout", + str(md.hidden_dropout), + "--position_embedding_type", + str(md.position_embedding_type), + "--save", + args.save_dir, + "--ffn_hidden_size", + str(md.ffn_hidden_size), + "--kv_channels", + str(md.kv_channels) + ] + if md.num_attention_heads_kv is not None: + sys.argv += ["--num_attention_heads_kv", str(md.num_attention_heads_kv)] + if md.parallel_attn: + sys.argv += ["--parallel_attn"] + if md.parallel_layernorm: + sys.argv += ["--parallel_layernorm"] + if md.use_flash_attn: + sys.argv += ["--use_flash_attn"] + if md.glu_activation is not None: + sys.argv += ["--glu_activation", str(md.glu_activation)] + if md.use_rms_norm: + sys.argv += ["--use_rms_norm"] + if not md.tie_embed_logits: + sys.argv += ["--no_tie_embed_logits"] + if md.lima_dropout: + sys.argv += ["--lima_dropout"] + + if md.make_vocab_size_divisible_by is not None: + sys.argv.extend( + ["--make_vocab_size_divisible_by", str(md.make_vocab_size_divisible_by)] + ) + if md.params_dtype == torch.float16: + sys.argv.append("--fp16") + elif md.params_dtype == torch.bfloat16: + sys.argv.append("--bf16") + + margs = megatron.arguments.parse_args() + megatron.arguments.validate_args(margs) + set_global_variables(margs) + margs = get_args() + + if hasattr(md, "consumed_train_samples"): + margs.consumed_train_samples = md.consumed_train_samples + margs.consumed_valid_samples = md.consumed_valid_samples + print( + f"Setting consumed_train_samples to {margs.consumed_train_samples}" + f" and consumed_valid_samples to {margs.consumed_valid_samples}" + ) + else: + print("consumed_train_samples not provided.") + + # Determine how to make our models + if md.model_type == "GPT": + from pretrain_gpt import model_provider + + margs.model_type = ModelType.encoder_or_decoder + elif md.model_type == "BERT": + from pretrain_bert import model_provider + + margs.model_type = ModelType.encoder_or_decoder + elif md.model_type in {"falcon", "llama", "llama2", "llama3", "codellama", "mistral", "gemma"}: + from finetune import model_provider + + margs.model_name = args.model_type + margs.model_type = ModelType.encoder_or_decoder + else: + raise Exception(f"unrecognized model type: {args.model_type}") + + def _get_models(count, dtype, pre_process, post_process): + models = [ + model_provider(pre_process, post_process).to(dtype) for _ in range(count) + ] + return models + + # fake initializing distributed + mpu._DATA_PARALLEL_GROUP = 0 + mpu.set_tensor_model_parallel_world_size(args.target_tensor_parallel_size) + mpu.set_pipeline_model_parallel_world_size(args.target_pipeline_parallel_size) + mpu.set_tensor_model_parallel_rank(0) + mpu.set_pipeline_model_parallel_rank(0) + fused_kernels.load(margs) + + # Embeddings + embeddings_msg = queue_get("embeddings") + + if md.position_embedding_type == PositionEmbeddingType.absolute: + pos_embed = embeddings_msg.pop("position embeddings") + else: + pos_embed = None + orig_word_embed = embeddings_msg.pop("word embeddings") + check_message(embeddings_msg) + + # Get lm_head, if available + if not md.tie_embed_logits: + lm_head = queue_get("lm_head").pop("lm_head") + + # Deal with padding + if md.true_vocab_size is not None: + # figure out what our padded vocab size is + orig_vocab_size = orig_word_embed.shape[0] + margs.padded_vocab_size = _vocab_size_with_padding(md.true_vocab_size, margs) + + # Cut out extra padding we don't need + if orig_vocab_size > margs.padded_vocab_size: + full_word_embed = orig_word_embed[0 : margs.padded_vocab_size, :] + if not md.tie_embed_logits: + full_lm_head = lm_head[: margs.padded_vocab_size, :] + + # Expanding embedding to larger size by replicating final entry + elif orig_vocab_size < margs.padded_vocab_size: + padding_size = margs.padded_vocab_size - orig_vocab_size + + full_word_embed = torch.cat( + ( + orig_word_embed, + orig_word_embed[-1].unsqueeze(0).expand(padding_size, -1), + ) + ) + + if not md.tie_embed_logits: + full_lm_head = torch.cat( + [lm_head, lm_head[-1].unsqueeze(0).expand(padding_size, -1)] + ) + + # Same size! + else: + full_word_embed = orig_word_embed + if not md.tie_embed_logits: + full_lm_head = lm_head + else: + print( + "Original vocab size not specified, leaving embedding table as-is. " + "If you've changed the tensor parallel size this could cause problems." + ) + margs.padded_vocab_size = orig_word_embed.shape[0] + full_word_embed = orig_word_embed + if not md.tie_embed_logits: + full_lm_head = lm_head + + # Split into new tensor model parallel sizes + out_word_embed = torch.chunk( + full_word_embed, args.target_tensor_parallel_size, dim=0 + ) + if not md.tie_embed_logits: + out_lm_head = torch.chunk(full_lm_head, args.target_tensor_parallel_size, dim=0) + + # Make models for first pipeline stage and fill in embeddings + mpu.set_pipeline_model_parallel_rank(0) + post_process = args.target_pipeline_parallel_size == 1 + models = _get_models( + args.target_tensor_parallel_size, md.params_dtype, True, post_process + ) + models_init = models + for tp_rank, model in enumerate(models): + print( + f"word embeddings shape {model.language_model.embedding.word_embeddings.weight.shape}" + ) + model.language_model.embedding.word_embeddings.weight.data.copy_( + out_word_embed[tp_rank] + ) + if pos_embed is not None: + model.language_model.embedding.position_embeddings.weight.data.copy_( + pos_embed + ) + + # Make models for last pipeline stage and fill in lm_head, if necessary + if not md.tie_embed_logits: + mpu.set_pipeline_model_parallel_rank(args.target_pipeline_parallel_size - 1) + pre_process = args.target_pipeline_parallel_size == 1 + if pre_process: + models = models_init + else: + models = _get_models( + args.target_tensor_parallel_size, md.params_dtype, pre_process, True + ) + models_final = models + for tp_rank, model in enumerate(models): + print(f"lm_head shape {model.language_model.lm_head.shape}") + model.language_model.lm_head.data.copy_(out_lm_head[tp_rank]) + + # Transformer layers + total_layer_num = 0 + for pp_rank in range(args.target_pipeline_parallel_size): + # For later pipeline parallel ranks, make the new models + mpu.set_pipeline_model_parallel_rank(pp_rank) + post_process = pp_rank == args.target_pipeline_parallel_size - 1 + if pp_rank == 0: + models = models_init + elif ( + pp_rank == args.target_pipeline_parallel_size - 1 + and not md.tie_embed_logits + ): + models = models_final + else: + models = _get_models( + args.target_tensor_parallel_size, md.params_dtype, False, post_process + ) + + for layer in range(len(models[0].language_model.encoder.layers)): + msg = queue_get(f"transformer layer {total_layer_num}") + + # duplicated tensors + input_layernorm_weight = msg.pop("input layernorm weight") + if md.parallel_layernorm: + mlp_layernorm_weight = msg.pop("mlp layernorm weight") + if not md.use_rms_norm: + input_layernorm_bias = msg.pop("input layernorm bias") + if md.parallel_layernorm: + mlp_layernorm_bias = msg.pop("mlp layernorm bias") + if not md.parallel_attn: + post_layernorm_weight = msg.pop("post layernorm weight") + if not md.use_rms_norm: + post_layernorm_bias = msg.pop("post layernorm bias") + if md.use_bias: + dense_bias = msg.pop("dense bias") + mlp_l1_bias = msg.pop("mlp l1 bias") + + # Split up the parallel tensors + qkv_weight = torch.chunk( + msg.pop("qkv weight"), args.target_tensor_parallel_size, dim=0 + ) + if md.use_bias: + qkv_bias = torch.chunk( + msg.pop("qkv bias"), args.target_tensor_parallel_size, dim=0 + ) + dense_weight = torch.chunk( + msg.pop("dense weight"), args.target_tensor_parallel_size, dim=1 + ) + if md.glu_activation is None: + mlp_l0_weight = torch.chunk( + msg.pop("mlp l0 weight"), args.target_tensor_parallel_size, dim=0 + ) + else: + up_weight, gate_weight = torch.chunk(msg.pop("mlp l0 weight"), 2, dim=0) + up_weights = torch.chunk( + up_weight, args.target_tensor_parallel_size, dim=0 + ) + gate_weights = torch.chunk( + gate_weight, args.target_tensor_parallel_size, dim=0 + ) + mlp_l0_weight = [ + torch.cat([up_weight, gate_weight], dim=0) + for up_weight, gate_weight in zip(up_weights, gate_weights) + ] + if md.use_bias: + mlp_l0_bias = torch.chunk( + msg.pop("mlp l0 bias"), args.target_tensor_parallel_size, dim=0 + ) + mlp_l1_weight = torch.chunk( + msg.pop("mlp l1 weight"), args.target_tensor_parallel_size, dim=1 + ) + + # Save them to the model + for tp_rank in range(args.target_tensor_parallel_size): + l = models[tp_rank].language_model.encoder.layers[layer] + l.input_layernorm.weight.data.copy_(input_layernorm_weight) + if md.parallel_layernorm: + l.mlp_layernorm.weight.data.copy_(mlp_layernorm_weight) + if not md.use_rms_norm: + l.input_layernorm.bias.data.copy_(input_layernorm_bias) + if md.parallel_layernorm: + l.mlp_layernorm.bias.data.copy_(mlp_layernorm_bias) + l.self_attention.query_key_value.weight.data.copy_(qkv_weight[tp_rank]) + l.self_attention.dense.weight.data.copy_(dense_weight[tp_rank]) + if md.use_bias: + l.self_attention.query_key_value.bias.data.copy_(qkv_bias[tp_rank]) + l.self_attention.dense.bias.data.copy_(dense_bias) + if not md.parallel_attn: + l.post_attention_layernorm.weight.data.copy_(post_layernorm_weight) + if not md.use_rms_norm: + l.post_attention_layernorm.bias.data.copy_(post_layernorm_bias) + l.mlp.dense_h_to_4h.weight.data.copy_(mlp_l0_weight[tp_rank]) + l.mlp.dense_4h_to_h.weight.data.copy_(mlp_l1_weight[tp_rank]) + if md.use_bias: + l.mlp.dense_h_to_4h.bias.data.copy_(mlp_l0_bias[tp_rank]) + l.mlp.dense_4h_to_h.bias.data.copy_(mlp_l1_bias) + total_layer_num = total_layer_num + 1 + check_message(msg) + + if post_process: + msg = queue_get("final layernorm") + final_layernorm_weight = msg.pop("weight") + if not md.use_rms_norm: + final_layernorm_bias = msg.pop("bias") + for tp_rank in range(args.target_tensor_parallel_size): + models[ + tp_rank + ].language_model.encoder.final_layernorm.weight.data.copy_( + final_layernorm_weight + ) + if not md.use_rms_norm: + models[ + tp_rank + ].language_model.encoder.final_layernorm.bias.data.copy_( + final_layernorm_bias + ) + if pp_rank != 0 and md.tie_embed_logits: + # Copy word embeddings to final pipeline rank + models[tp_rank].word_embeddings.weight.data.copy_( + out_word_embed[tp_rank] + ) + del final_layernorm_weight + if not md.use_rms_norm: + del final_layernorm_bias + check_message(msg) + + msg = queue_get() + if msg != "done" and msg["name"] == "pooler": + if not hasattr(models[0].language_model, "pooler"): + print("ERROR: got a pooler, but model does not have one") + exit(1) + print("received pooler") + pooler_weight = msg.pop("weight") + pooler_bias = msg.pop("bias") + for tp_rank in range(args.target_tensor_parallel_size): + models[tp_rank].language_model.pooler.dense.weight.data.copy_( + pooler_weight + ) + models[tp_rank].language_model.pooler.dense.bias.data.copy_( + pooler_bias + ) + del pooler_weight + del pooler_bias + check_message(msg) + msg = queue_get() + + if msg != "done" and msg["name"] == "lm head": + if not hasattr(models[0], "lm_head"): + print("ERROR: got an lm head, but model does not have one") + exit(1) + print("received lm head") + lm_head_dense_weight = msg.pop("dense weight") + lm_head_dense_bias = msg.pop("dense bias") + lm_head_layernorm_weight = msg.pop("layernorm weight") + lm_head_layernorm_bias = msg.pop("layernorm bias") + for tp_rank in range(args.target_tensor_parallel_size): + models[tp_rank].lm_head.dense.weight.data.copy_( + lm_head_dense_weight + ) + models[tp_rank].lm_head.dense.bias.data.copy_(lm_head_dense_bias) + models[tp_rank].lm_head.layernorm.weight.data.copy_( + lm_head_layernorm_weight + ) + models[tp_rank].lm_head.layernorm.bias.data.copy_( + lm_head_layernorm_bias + ) + check_message(msg) + msg = queue_get() + + if msg != "done" and msg["name"] == "binary head": + if not hasattr(models[0], "binary_head"): + print("ERROR: got a binary head, but model does not have one") + exit(1) + print("received binary head") + binary_head_weight = msg.pop("weight") + binary_head_bias = msg.pop("bias") + for tp_rank in range(args.target_tensor_parallel_size): + models[tp_rank].binary_head.weight.data.copy_(binary_head_weight) + models[tp_rank].binary_head.bias.data.copy_(binary_head_bias) + check_message(msg) + msg = queue_get() + + if msg != "done": + print("ERROR: got some more data but was expecting to be done") + + for tp_rank in range(args.target_tensor_parallel_size): + mpu.set_tensor_model_parallel_rank(tp_rank) + save_checkpoint(md.iteration, [models[tp_rank]], None, None) + print("Done!") diff --git a/multilinguality_megatron/tools/checkpoint_util.py b/multilinguality_megatron/tools/checkpoint_util.py new file mode 100644 index 0000000000000000000000000000000000000000..08aee49cb57bb8d21bc94b524d6c44a5692b87b2 --- /dev/null +++ b/multilinguality_megatron/tools/checkpoint_util.py @@ -0,0 +1,185 @@ +import importlib +import sys + +import torch.multiprocessing as mp + +# A loader is a python file with at least two functions +# - add_arguments - takes in a parser and adds any arguments needed +# - load_checkpoint - takes in the queue and parsed arguments + +# A saver is similar but has save_checkpoint instead of +# load_checkpoint + +# The loader and saver process are each given a queue, the loader +# should load the checkpoint and send the weights in messages in the +# following order, the saver should receive them in this order and +# save the checkpoints. A message consists of a python dictionary with +# a "name" for error checking and an entry for each tensor as +# indicated below. Note that the weight sent over the queue are the +# full model weights, nothing split. + +# If the loader ever sends "exit" to the queue, that means something +# went wrong and it is exiting. + +# - Metadata Namespace with the following attributes: +# model_type - GPT, BERT, T5, etc. (Part of protocol to allow this to be deduced later instead of given on command line) +# num_layers - Number of transformer layers +# hidden_size +# seq_length +# num_attention_heads +# max_position_embeddings +# tokenizer_type +# iteration +# params_dtype +# bert_binary_head - Used only if model_type is BERT +# previous_tensor_parallel_size - Optional +# previous_pipeline_parallel_size - Optional +# true_vocab_size +# make_vocab_size_divisble_by +# consumed_train_samples +# consumed_valid_samples +# messages +# { +# "name": "embeddings" +# "position embeddings" +# "word embeddings" +# } +# (for each transformer layer): +# { +# "name": "transformer layer N" +# "input layernorm weight" +# "input layernorm bias" +# "qkv weight" +# "qkv bias" +# "dense weight" +# "dense bias" +# "post layernorm weight" +# "post layernorm bias" +# "mlp l0 weight" +# "mlp l0 bias" +# "mlp l1 weight" +# "mlp l1 bias" +# } +# { +# "name": "final layer norm" +# "weight" +# "bias" +# } +# if present (i.e. for BERT): +# { +# "name": "pooler" +# "weight" +# "bias" +# } +# { +# "name": "lm head" +# "dense weight" +# "dense bias" +# "layernorm weight" +# "layernorm bias" +# } +# { +# "name": "binary head" +# "weight" +# "bias" +# } +# - "done" + + +def load_plugin(plugin_type, name): + module_name = f"checkpoint_{plugin_type}_{name}" + try: + plugin = importlib.import_module(module_name) + except ModuleNotFoundError: + module_name = name + try: + plugin = importlib.import_module(module_name) + except ModuleNotFoundError: + sys.exit(f"Unable to load {plugin_type} plugin {name}. Exiting.") + + if not hasattr(plugin, "add_arguments"): + sys.exit(f"{module_name} module is not a plugin. Exiting.") + + print(f"Loaded {module_name} as the {plugin_type}.") + return plugin + + +def main(): + import argparse + + parser = argparse.ArgumentParser( + description="Megatron Checkpoint Utility Arguments", + allow_abbrev=False, + conflict_handler="resolve", + ) + + parser.add_argument( + "--model_type", + type=str, + required=True, + choices=["GPT", "BERT", "falcon", "llama", "llama2", "llama3", "codellama", "mistral", "gemma"], + help="Type of the model", + ) + parser.add_argument( + "--loader", + type=str, + default="megatron", + help="Module name to load checkpoint, should be on python path", + ) + parser.add_argument( + "--saver", + type=str, + default="megatron", + help="Module name to save checkpoint, shdoul be on python path", + ) + parser.add_argument( + "--load_dir", + type=str, + required=True, + help="Directory to load model checkpoint from", + ) + parser.add_argument( + "--save_dir", + type=str, + required=True, + help="Directory to save model checkpoint to", + ) + parser.add_argument( + "--max_queue_size", + type=int, + default=50, + help="Maximum number of tensors in the queue", + ) + parser.add_argument( + "--no_checking", + action="store_false", + help="Do not perform checking on the name and ordering of weights", + dest="checking", + ) + parser.add_argument("--bf16", action="store_true", help="force bfloat16 weights") + parser.add_argument("--kv_channels", type=int, default=None) + + known_args, _ = parser.parse_known_args() + loader = load_plugin("loader", known_args.loader) + saver = load_plugin("saver", known_args.saver) + + loader.add_arguments(parser) + saver.add_arguments(parser) + + args = parser.parse_args() + + queue = mp.Queue(maxsize=args.max_queue_size) + + print("Starting saver...") + saver_proc = mp.Process(target=saver.save_checkpoint, args=(queue, args)) + saver_proc.start() + + print("Starting loader...") + loader.load_checkpoint(queue, args) + + print("Waiting for saver to complete...") + saver_proc.join() + + +if __name__ == "__main__": + main() diff --git a/multilinguality_megatron/tools/linter.py b/multilinguality_megatron/tools/linter.py new file mode 100644 index 0000000000000000000000000000000000000000..5b14007666600746341e2b962b7b09e69f3019f0 --- /dev/null +++ b/multilinguality_megatron/tools/linter.py @@ -0,0 +1,36 @@ +import os +import os.path as osp +import pathlib +import subprocess + + +def recursively_lint_files(): + """Recursively lint all python files in chosen subdirectories of megatron-lm""" + + try: + import autopep8 + except ModuleNotFoundError: + print("Please first install autopep8 via `pip install autopep8`") + return + + # get all python file paths from top level directory + file_dir = str(pathlib.Path(__file__).parent.absolute()) + working_dir = osp.join(file_dir, os.pardir) + all_py_paths = set(os.path.join(working_dir, fname) + for fname in os.listdir(working_dir) if ".py" in fname) + + # get all python file paths from chosen subdirectories + check_dirs = ['docker', 'megatron', 'openwebtext', 'scripts', 'tasks'] + for sub_dir in check_dirs: + for path, _, fnames in os.walk(osp.join(working_dir, sub_dir)): + all_py_paths.update(set(osp.join(path, fname) for fname in fnames if ".py" in fname)) + + print("Linting the following: ") + for py_path in all_py_paths: + print(py_path) + command = 'autopep8 --max-line-length 100 --aggressive --in-place {}'.format(py_path) + subprocess.check_call(command) + + +if __name__ == "__main__": + recursively_lint_files() diff --git a/multilinguality_megatron/tools/merge_datasets.py b/multilinguality_megatron/tools/merge_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..ade2afbcb9b5c94cb8ad328a2bb5bcde7635fba9 --- /dev/null +++ b/multilinguality_megatron/tools/merge_datasets.py @@ -0,0 +1,66 @@ +import os +import sys +import json +import argparse +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +from megatron.data import indexed_dataset + + +def main(args): + + prefixes = set() + for basename in os.listdir(args.input): + prefix, ext = os.path.splitext(basename) + + if prefix in prefixes: + continue + + if not os.path.isfile(os.path.join(args.input, basename)): + continue + + ext_pair = '.bin' if ext == '.idx' else '.idx' + assert os.path.isfile(os.path.join(args.input, prefix) + ext_pair), \ + f'ERROR: {ext_pair} file not provided for {os.path.join(args.input, prefix)}' + + prefixes.add(prefix) + + builder = None + for prefix in sorted(prefixes): + if builder is None: + dataset = indexed_dataset.make_dataset(os.path.join(args.input, prefix), 'infer') + + if isinstance(dataset, indexed_dataset.MMapIndexedDataset): + builder = indexed_dataset.MMapIndexedDatasetBuilder(args.output_prefix + '.bin', dtype=dataset._index.dtype) + else: + builder = indexed_dataset.IndexedDatasetBuilder(args.output_prefix + '.bin') + + del dataset + + builder.merge_file_(os.path.join(args.input, prefix)) + + builder.finalize(args.output_prefix + '.idx') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + group = parser.add_argument_group(title='input data') + group.add_argument('--input', type=str, required=True, + help='Path to directory containing all document files to merge') + + group = parser.add_argument_group(title='output data') + group.add_argument('--output_prefix', type=str, required=True, + help='Path to binary output file without suffix') + + args = parser.parse_args() + + assert os.path.isdir(args.input), \ + f'ERROR: {args.input} is not a directory or does not exist' + + assert os.path.isdir(os.path.dirname(args.output_prefix)), \ + f'ERROR: {os.path.dirname(args.output_prefix)} is not a directory or does not exist' + + main(args) + diff --git a/multilinguality_megatron/tools/openwebtext/README.md b/multilinguality_megatron/tools/openwebtext/README.md new file mode 100644 index 0000000000000000000000000000000000000000..35d3f5886c59069e933557d6585f88ef3314203e --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/README.md @@ -0,0 +1,59 @@ +The following steps show how to prepare training dataset to train the mode. + +# Libraries to install + +``` + pip install ftfy langdetect numpy torch pandas nltk sentencepiece boto3 tqdm regex bs4 newspaper3k htmlmin tldextract + git clone https://github.com/mattilyra/LSH + cd LSH + python setup.py install +``` + +# Download the dataset + +1. Download the deduplicated URLs from [jcpeterson](https://mega.nz/#F!EZZD0YwJ!9_PlEQzdMVLaNdKv_ICNVQ!cc4RgQQZ) +2. Remove blacklisted URLs. +``` +python blacklist_urls.py +``` +3. Download the content from the clean urls with [openwebtext's utilities](https://github.com/eukaryote31/openwebtext/blob/master/download.py). + +4. Merge the contents into one loose json file with 1 json per newline of the format `{'text': text, 'url': unique_url}`. It is important for the url to be unique. + +# Prepare the data for GPT training: + +1. Perform ftfy, english detection and remove documents with less than 128 tokens. This step can be sharded and run on shards. +``` +python cleanup_dataset.py +``` +Additional cleanup (e.g. remove documents less than 512 characters or dataset specific cleaning like stories, realnews datasets) can be done using `cleanup_fix_dataset.py`. More details can be found by running `python cleanup_fix_dataset.py --help`. +2. Using LSH, find possible duplicates and store then in a file for later processing. The code supports saving and loading fingerprints for recurrent deduplications, and is also multithreaded for faster processing. More details are can be found by `python find_duplicate.py --help`. +``` +python find_duplicates.py --inputs --output +``` +3. Based on similarity measure defind inside function `is_similar` (default: 0.9), group urls that are similar. Basically, for each group, only one url we should keep and remove the rest. +``` +python group_duplicate_urls.py +``` +4. Remove similar documents that were detected in the last step. +``` +python remove_group_duplicates.py +``` + +5. Shuffle the dataset. +``` +shuf -o train_data.json +``` + +# Deduplicating ngrams + +To deduplicate the downstream tasks (e.g. lambada, squad) from the training dataset, we run the following command. + +``` +python filter_ngrams.py --tasks --dedup_dataset --output +``` +We use 13-grams by default for the deduplication. When we find a 13-gram match in a training document, we split the document into two pieces and remove the 13-gram along with 200 characters from the both side of the 13-gram. We also remove any splitted document with less than 200 characters or if a document got splitted more than 10 times. These parameters can be changed using corresponding arguments. + +Only for the lambada task, we need to provide the path, `--lambada_path `. + +Several other features (e.g. save and load dictionary) have been added, look at `python filter_ngrams.py --help` for details. diff --git a/multilinguality_megatron/tools/openwebtext/add_id.py b/multilinguality_megatron/tools/openwebtext/add_id.py new file mode 100644 index 0000000000000000000000000000000000000000..8547031d6e97386f88de910a2ed33f8cb17d4021 --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/add_id.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import argparse +import json +import os +import time + +""" +This code adds id to each json object in a json file. User can add prefix +to the ids. +""" + +if __name__ == '__main__': + + print('parsing the arguments ...') + + parser = argparse.ArgumentParser() + parser.add_argument('--input_file', type=str, default=None, help='Input'\ + ' json file where id needs to be added') + parser.add_argument('--output_file', type=str, default=None, help=\ + 'Output file name with id') + parser.add_argument('--id_prefix', type=str, default=None, help=\ + 'Id prefix') + parser.add_argument('--log_interval', type=int, default=100, + help='Log interval') + args = parser.parse_args() + + print('Adding ids to dataset ...') + + f_input = open(args.input_file, 'r', encoding='utf-8') + f_output = open(args.output_file, 'wb') + + unique_ids = 1 + start_time = time.time() + for row in f_input: + each_row = json.loads(row) + adlr_id_string = args.id_prefix + '-{:010d}'.format(int(unique_ids)) + each_row['adlr_id'] = adlr_id_string + myjson = json.dumps(each_row, ensure_ascii=False) + + f_output.write(myjson.encode('utf-8')) + f_output.write('\n'.encode('utf-8')) + + if unique_ids % args.log_interval == 0: + print(' processed {:9d} documents in {:.2f} seconds ...'.format( \ + unique_ids, time.time() - start_time), flush=True) + + unique_ids += 1 + + # Close the file. + f_input.close() + f_output.close() + + print('done :-)', flush=True) diff --git a/multilinguality_megatron/tools/openwebtext/blacklist_urls.py b/multilinguality_megatron/tools/openwebtext/blacklist_urls.py new file mode 100644 index 0000000000000000000000000000000000000000..bf68840b6d399ca6bb45960142d323900eb0076a --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/blacklist_urls.py @@ -0,0 +1,299 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +import glob +import re +import time +import tldextract +import sys + + +# List of the domains to blacklist. +domain_blacklist = set([ + '500px', + 'aapks', + 'akamaihd', + 'amazon', + 'apple', + 'artifactfire', + 'artstation', + 'awwni', + 'bandcamp', + 'battleforthenet', + 'coinscalendar', + 'dailymotion', + 'deviantart', + 'discord', + 'discordapp', + 'dlapkandroid', + 'dropbox', + 'e621', + 'ebay', + 'edealinfo', + 'erome', + 'eroshare', + 'explosm', + 'facebook', + 'fbcdn', + 'flickr', + 'furaffinity', + 'futhead', + 'gatopardo', + 'gfycat', + 'gifsound', + 'gifsoup', + 'giphy', + 'github', + 'google', + 'gunprime', + 'gyazo', + 'hotdealstar', + 'imagefap', + 'imageshack', + 'imgflip', + 'imgur', + 'instagram', + 'karmadecay', + 'kryptocal', + 'kym-cdn', + 'liveleak', + 'livememe', + 'lmgtfy', + 'magaimg', + 'memegenerator', + 'minorplanetcenter', + 'minus', + 'mobafire', + 'morejpeg', + 'nocookie', + 'pcpartpicker', + 'photobucket', + 'pinimg', + 'pinterest', + 'pixiv', + 'pornhub', + 'prntscr', + 'puu', + 'qkme', + 'quickmeme', + 'radd', + 'redd', + 'reddit', + 'reddit-stream', + 'redditlog', + 'redditmedia', + 'reddituploads', + 'redtube', + 'reupp', + 'reverb', + 'roanoke', + 'rollingstone', + 'sli', + 'soundcloud', + 'soundgasm', + 'spankbang', + 'spotify', + 'strawpoll', + 'streamable', + 'timeanddate', + 'tinypic', + 'touhouradio', + 'tumblr', + 'twimg', + 'twitch', + 'twitter', + 'vid', + 'vimeo', + 'vine', + 'vkaao', + 'vocaroo', + 'voyagefusion', + 'walmart', + 'wciu', + 'wikimedia', + 'wikipedia', + 'xhamster', + 'xkcd', + 'xvideos', + 'youtu', + 'youtube', + 'youtubedoubler', + 'ytimg', + 'zillexplorer', +]) + +def domain_is_in_blacklist(url): + domain = tldextract.extract(url).domain + return domain in domain_blacklist + + +# List of extentions to blacklist. +extentions_blacklist = ( + '.3gp', + '.7z' + '.ai', + '.aif', + '.apk', + '.app', + '.avi', + '.bin', + '.bmp', + '.bz2', + '.css', + '.csv', + '.dat', + '.deb', + '.dmg', + '.doc', + '.docx', + '.exe', + '.gif', + '.gifv', + '.gz', + '.iso', + '.jar', + '.jpeg', + '.jpg', + '.js', + '.log', + '.mid', + '.midi', + '.mkv', + '.mov', + '.mp3', + '.mp4', + '.mpeg', + '.mpg', + '.ogg', + '.ogv', + '.otf', + '.pdf', + '.pkg', + '.png', + '.pps', + '.ppt', + '.pptx', + '.psd', + '.py', + '.qt', + '.ram', + '.rar', + '.sql', + '.svg', + '.swf', + '.tar.gz', + '.tar', + '.tgz', + '.tiff', + '.ttf', + '.txt', + '.wav', + '.webm', + '.wma', + '.wmv', + '.xls', + '.xlsx', + '.xml', + '.xz', + '.zip', +) + +def extention_is_in_blacklist(url): + if url.split('?')[0].lower().endswith(extentions_blacklist): + return True + return False + + +# Malformed urls. +# This function is adapted from: +# https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not +url_regex = re.compile( + r'^(?:http)s?://' # http:// or https:// + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip + r'(?::\d+)?' # optional port + r'(?:/?|[/?]\S+)$', re.IGNORECASE) +def url_is_malformed(url): + return re.match(url_regex, url) is None + + +def print_progress(prefix, start_time, urls_counter, + domain_blacklist_counter, + extention_blacklist_counter, + short_url_counter, malformed_url_counter, + duplicate_url_counter): + string = prefix + ' | ' + string += 'time elapsed (s): {:.2f} | '.format(time.time() - start_time) + string += 'number of urls: {} | '.format(urls_counter) + string += 'domain blacklisted: {} | '.format(domain_blacklist_counter) + string += 'extention blacklisted: {} | '.format(extention_blacklist_counter) + string += 'short urls (<=8): {} | '.format(short_url_counter) + string += 'malformed urls: {} | '.format(malformed_url_counter) + string += 'duplicate urls: {}'.format(duplicate_url_counter) + print(string, flush=True) + + +if __name__ == '__main__': + + + print('remove blacklisted urls ..') + + # Path to the url files. + path = sys.argv[1] + # Output url file. + output = sys.argv[2] + + # Get the list of url files. + files = glob.glob(path + '/*.txt') + print('> found {} files'.format(len(files))) + + urls = set() + urls_counter = 0 + domain_blacklist_counter = 0 + extention_blacklist_counter = 0 + short_url_counter = 0 + malformed_url_counter = 0 + duplicate_url_counter = 0 + start_time = time.time() + for filename in files: + with open(filename, 'r') as f: + for line in f: + url = line.strip() + urls_counter += 1 + if domain_is_in_blacklist(url): + print('[DOMAIN BLACKLIST]: {}'.format(url), flush=True) + domain_blacklist_counter += 1 + elif extention_is_in_blacklist(url): + print('[EXTENTION BLACKLIST]: {}'.format(url), flush=True) + extention_blacklist_counter += 1 + elif len(url) <= 8: + print('[SHORT URL]: {}'.format(url), flush=True) + short_url_counter += 1 + elif url_is_malformed(url): + print('[MALFORMED URL]: {}'.format(url), flush=True) + malformed_url_counter += 1 + elif url in urls: + print('[DUPLICATE URL]: {}'.format(url), flush=True) + duplicate_url_counter += 1 + else: + urls.add(url) + if urls_counter % 100000 == 0: + print_progress('PROGRESS', start_time, urls_counter, + domain_blacklist_counter, + extention_blacklist_counter, + short_url_counter, malformed_url_counter, + duplicate_url_counter) + + print_progress('FINAL', start_time, urls_counter, + domain_blacklist_counter, + extention_blacklist_counter, + short_url_counter, malformed_url_counter, + duplicate_url_counter) + + # Write the final set of urls. + print('> writing cleaned up url list to {}'.format(output)) + with open(output, 'w') as f: + for url in urls: + f.write(url + '\n') + + print('done :-)') diff --git a/multilinguality_megatron/tools/openwebtext/cleanup_dataset.py b/multilinguality_megatron/tools/openwebtext/cleanup_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3a2eba4e8463bedbbc09ecca902c984dd2fd5314 --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/cleanup_dataset.py @@ -0,0 +1,102 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +import ftfy +import json +from langdetect import detect +import numpy as np +import time +import os +import sys + +from tokenizer import Tokenizer + +MIN_DOCUMENT_LENGHT = 128 + + +def print_progress(prefix, start_time, num_docs, num_fixed_text, + num_non_english_docs, chars_non_english_docs, + num_small_docs, chars_small_docs): + + string = prefix + ' | ' + string += 'elapsed time: {:.2f} | '.format(time.time() - start_time) + string += 'documents: {} | '.format(num_docs) + string += 'fixed text: {} | '.format(num_fixed_text) + string += 'non-english: {} | '.format(num_non_english_docs) + string += 'non-english chars: {} | '.format(chars_non_english_docs) + string += 'small docs: {} | '.format(num_small_docs) + string += 'small docs chars: {}'.format(chars_small_docs) + print(string, flush=True) + + +def filter_corpus(filename, out_filename, print_interval=10000): + + print(' > filtering {}'.format(filename)) + + tokenizer = Tokenizer(cache_dir='./cache') + + num_docs = 0 + num_written_docs = 0 + num_small_docs = 0 + num_fixed_text = 0 + num_non_english_docs = 0 + chars_non_english_docs = 0 + chars_small_docs = 0 + start_time = time.time() + with open(out_filename, 'wb') as f: + with open(filename, 'r') as fin: + for line in fin: + try: + num_docs += 1 + myjson = json.loads(line) + # Fix text + text = ftfy.fix_text(myjson['text']) + if text != myjson['text']: + num_fixed_text += 1 + myjson['text'] = text + # Detect language. + if detect(text) != 'en': + print('[non-english text]', myjson) + num_non_english_docs += 1 + chars_non_english_docs += len(text) + continue + # On average each token is 5 characters so 8 is an + # upper bound. + if len(text) < (8 * MIN_DOCUMENT_LENGHT): + tokens = tokenizer.tokenize_document(text) + if len(tokens) < MIN_DOCUMENT_LENGHT: + print('[small document, skipping]:', myjson) + num_small_docs += 1 + chars_small_docs += len(text) + continue + myjson = json.dumps(myjson, ensure_ascii=False) + f.write(myjson.encode('utf-8')) + f.write('\n'.encode('utf-8')) + num_written_docs += 1 + if num_docs % print_interval == 0: + print_progress('[PROGRESS]', start_time, num_docs, + num_fixed_text, num_non_english_docs, + chars_non_english_docs, + num_small_docs, chars_small_docs) + except Exception as e: + print(' skipping ', line, e) + + print_progress('[FINAL]', start_time, num_docs, + num_fixed_text, num_non_english_docs, + chars_non_english_docs, + num_small_docs, chars_small_docs) + + +if __name__ == '__main__': + + print('building gpt2 dataset ...') + + input_filename = sys.argv[1] + output_filename = sys.argv[2] + + print('will be reading {}'.format(input_filename)) + print('and will write the results to {}'.format(output_filename)) + + filter_corpus(input_filename, output_filename) + + diff --git a/multilinguality_megatron/tools/openwebtext/cleanup_fix_dataset.py b/multilinguality_megatron/tools/openwebtext/cleanup_fix_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..05c8926b8a5e1fc338f87eeb50e100bdf6f2392b --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/cleanup_fix_dataset.py @@ -0,0 +1,177 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +""" +Filter and clean documents: +Capable to clean docs with less than 512 characters, less than +256 characters and contains javascript, fix text and dataset specific +cleaning like stories and realnews datasets. +Program arguments have the details. +""" + +import argparse +from functools import partial +import glob +import ftfy +import json +from langdetect import detect +import multiprocessing +import os +from pathlib import Path +import re +import time + +def process_doc(json_line, args): + + # Read the line. + document = json.loads(json_line) + text = document['text'] + + output = {'remove_512': False, 'remove_256_javascript': False, \ + 'remove_512_non_english': False, 'ftfy_fix_text': False, \ + 'general_cleaning': False} + + try: + # Reomove all docs with less than 512 characters + if "remove_512" in args.tasks: + if len(text) < 512: + output['remove_512'] = True + return output, text, document, True + + # Remove docs if less than 256 character length and contains Javascript + if "remove_256_javascript" in args.tasks: + if len(text) < 256 and 'javascript' in text.lower(): + output['remove_256_javascript'] = True + return output, text, document, True + + # Remove docs < 512 and nonenglish + if "remove_512_non_english" in args.tasks: + if len(text) < 512 and detect(text) != 'en': + output['remove_512_non_english'] = True + return output, text, document, True + + # Fix the text using ftfy, don't remove the text, hence return False + if "ftfy_fix_text" in args.tasks: + fixed_text = ftfy.fix_text(text) + output['ftfy_fix_text'] = True + return output, fixed_text, document, False + + # Cleaning extra spaces and newlines + if "general_cleaning" in args.tasks: + cleaned_text = re.sub(r" +|\b\n+ |\b\n+", " ", text) + #cleaned_text = re.sub(r"\n\n+", "\n\n", text) # used this for Gutenberg dataset + #cleaned_text = re.sub(r"\n", "\n\n", text) # Used this for realnews + + # stories datasets + #cleaned_text = re.sub(r" \'", "'", text) + #cleaned_text = re.sub(r" \!", "!", cleaned_text) + #cleaned_text = re.sub(r" \.", ".", cleaned_text) + #cleaned_text = re.sub(r" \?", "?", cleaned_text) + #cleaned_text = re.sub(r" - ", "-", cleaned_text) + ##cleaned_text = re.sub(r"\" ", "\"", cleaned_text) + #cleaned_text = re.sub(r" @ ", "@", cleaned_text) + + output['general_cleaning'] = True + return output, cleaned_text, document, False + + except Exception as e: + print('Error: *************************\n{}\ntext: {}'.format(e, \ + text), flush=True) + return output, text, document, True + + # don't remove + return output, text, document, False + + +def process_set(args, input_file, output_f_cleaned, output_f_filtered): + + print(' > working on {} ...'.format(input_file), flush=True) + + num_docs = num_remove_512 = num_remove_java = num_remove_512_non_english \ + = num_ftfy_fix_text = num_general_cleaning = 0 + + # Output file and counters. + output_cleaned = open(output_f_cleaned, 'wb') + output_filtered = open(output_f_filtered, 'wb') + + start_time = time.time() + + # Setup multi-processing. + num_workers = 40 + fin = open(input_file, 'r', encoding='utf-8') + pool = multiprocessing.Pool(num_workers) + process_doc_partial = partial(process_doc, args=args) + processed_docs = pool.imap(process_doc_partial, fin, 500) + + # Process documents. + for output, text, document, to_filter in processed_docs: + num_docs += 1 + + num_remove_512 += 1 if output['remove_512'] else 0 + num_remove_java += 1 if output['remove_256_javascript'] else 0 + num_remove_512_non_english += 1 if output['remove_512_non_english'] \ + else 0 + num_ftfy_fix_text += 1 if output['ftfy_fix_text'] else 0 + num_general_cleaning += 1 if output['general_cleaning'] else 0 + + document['text'] = text + myjson = json.dumps(document, ensure_ascii=False) + + if to_filter: + output_filtered.write(myjson.encode('utf-8')) + output_filtered.write('\n'.encode('utf-8')) + else: + output_cleaned.write(myjson.encode('utf-8')) + output_cleaned.write('\n'.encode('utf-8')) + + if num_docs % args.log_interval == 0: + print(' processed {:9d} documents in {:.2f} seconds ...'.format( + num_docs, time.time() - start_time), flush=True) + + # Close the file. + output_cleaned.close() + output_filtered.close() + fin.close() + + # Print stats. + print(' >> total docs: {} remove_512 {} remove_256_javascript {} '\ + 'remove_512_non_english {} ftfy_fix_text {} general_cleaning {}'.\ + format(num_docs, num_remove_512, num_remove_java,\ + num_remove_512_non_english, num_ftfy_fix_text, \ + num_general_cleaning), flush=True) + + +if __name__ == '__main__': + print('parsing the arguments ...') + + parser = argparse.ArgumentParser() + parser.add_argument('--input_files', nargs = '*', required=True, default=\ + None, help = 'Input json files that needs to be'\ + ' cleaned') + parser.add_argument('--tasks', nargs = '*', required=True, default=None,\ + help='Tasks to perform on the input files, ' \ + 'such as remove_512, remove_256_javascript, ' \ + 'remove_512_non_english, ftfy_fix_text, and ' \ + 'general_cleaning. 256 or 512 means the number' \ + ' of characters.') + + parser.add_argument('--output_path', type=str, default=None, + help='Directory where the output should go') + parser.add_argument('--log_interval', type=int, default=100, + help='Log interval') + + args = parser.parse_args() + + print('cleanup dataset ...') + + for input_file in args.input_files: + input_filename, input_filename_ext = os.path.splitext(Path(input_file)\ + .name) + + output_f_cleaned = os.path.join(args.output_path, input_filename + \ + "_cleaned" + input_filename_ext) + output_f_filtered = os.path.join(args.output_path, input_filename + \ + "_filtered" + input_filename_ext) + + process_set(args, input_file, output_f_cleaned, output_f_filtered) + + print('done :-)', flush=True) diff --git a/multilinguality_megatron/tools/openwebtext/filter_ngrams.py b/multilinguality_megatron/tools/openwebtext/filter_ngrams.py new file mode 100644 index 0000000000000000000000000000000000000000..00400d12f4d917fa2e7a9701946010ccc74a488b --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/filter_ngrams.py @@ -0,0 +1,476 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +""" +Deduplicate downstream tasks from training dataset. 13-grams have been used. +All split documents with less than 200 characters got filtered. Any document +with more than 10 splits got filtered as well. +""" + +import argparse +from functools import partial +import json +import multiprocessing +import nltk +import pickle +import re +import string +import sys +import time + +def get_words(text): + # get all the lowercase words from text + words, positions = [], [] + for match in re.finditer(r'\w+', text.lower()): + words.append(match.group(0)) + positions.append(match.start()) + return words, positions + +# splits the text +def split_text(text, start_position, remove_char_each_side, seq): + # first part of the text + punctuations = ".!?" + pos = start_position - remove_char_each_side + text_first = "" + while pos > 0 and not text[pos] in punctuations: + pos -= 1 + if pos > 0: + text_first = text[0:pos+1] + + # add length of seq and remove_char_each_side + pos = start_position + len(seq) + remove_char_each_side + + # last part of the text + text_second = "" + while pos < len(text) and not text[pos] in punctuations: + pos += 1 + if pos + 1 < len(text): + text_second = text[pos+1:len(text)] + + return text_first, text_second + +def check_and_clean_text(args, words, ngrams, text, start_position, \ + text_buf_ngram_free, text_buf, local_ngram): + + seq = " ".join(words) + if seq in ngrams: + print(" [matched]: {}".format(seq), flush=True) + + if args.get_ngram_freq_only: + # increase freq of this seq and then only consider the later part + # of the text for further processing + if seq in local_ngram: + local_ngram[seq] += 1 + else: + local_ngram[seq] = 1 + #print(" [increased]: {} {}".format(seq, ngrams[seq]), flush=True) + if (start_position + len(seq) + 1) < len(text): + text_buf.append(text[start_position + len(seq) + 1:len(text)]) + return False + + # split the text + text_first, text_second = split_text(text, start_position, \ + args.remove_char_each_side, seq) + + # first part of ngrams free + if len(text_first) > args.filter_text_char_len: + text_buf_ngram_free.append(text_first) + + # add second part for further processing + if len(text_second) > args.filter_text_char_len: + text_buf.append(text_second) + + return False # not ngram free + + # ngram free + return True + + +def free_ngram(line, args, key, ngrams, ngrams_freq_sorted): + # remove all the ngrams + + try: + myjson = json.loads(line) + text_buf = [myjson[key]] + except Exception as e: + print("Error: {}".format(e), flush=True) + text_buf = [] + + text_buf_ngram_free = [] + local_ngram = {} + while len(text_buf) > 0: + + # get the first one from the buffer + text = text_buf.pop(0) + words, positions = get_words(text) + + ngram_free = True + # find each max n-grams and check dictionary + for i in range(len(words) - args.max_ngram_size + 1): + check_ngram_free = check_and_clean_text(args, words[i:\ + i+args.max_ngram_size], ngrams, text, positions[i], \ + text_buf_ngram_free, text_buf, local_ngram) + + # the seq is ngram free? if yes, break + if not check_ngram_free: + ngram_free = False + break + + # if max ngrams doesn't match, check if any other lower n-grams + # within max ngram macthes + for ngram_len, _ in ngrams_freq_sorted: + check_ngram_free = check_and_clean_text(args, words[i:\ + i+ngram_len], ngrams, text, positions[i], \ + text_buf_ngram_free, text_buf, local_ngram) + + # same check as above + if not check_ngram_free: + ngram_free = False + break + + # check break from lower than max ngram loop above + if not ngram_free: + break + + # for the last max n-gram, check all the lower ngrams in it + if ngram_free and len(words) - args.max_ngram_size > 0: + # get the last words of the lax max ngram + last_seq_words = words[(len(words)-args.max_ngram_size):len(words)] + last_seq_start_position = len(words) - args.max_ngram_size + + # check all n-grams lower than the max + for pos, (ngram_len, _) in enumerate(ngrams_freq_sorted): + + # ignore the max ngram as has been considered already + if ngram_len == args.max_ngram_size: + continue + + # find each ngram of ngram_len in max n-grams and check + for i in range(len(last_seq_words) - ngram_len + 1): + check_ngram_free = check_and_clean_text(args, \ + last_seq_words[i:i+ngram_len], ngrams, text,\ + positions[last_seq_start_position+i], \ + text_buf_ngram_free, text_buf, local_ngram) + + if not check_ngram_free: + ngram_free = False + break + + if not ngram_free: + break + + # texts are ngram free + if ngram_free and not args.get_ngram_freq_only: + text_buf_ngram_free.append(text) + + # check if the text has only been trimmed + trimmed = 0 + if not args.get_ngram_freq_only and len(text_buf_ngram_free) == 1 and \ + len(text_buf_ngram_free[0]) < len(myjson[key]): + trimmed = 1 + + return text_buf_ngram_free, trimmed, myjson, local_ngram + +# insert word sequence into dictionary +def insert_dict(words, ngrams, pos): + seq = " ".join(words) + if seq not in ngrams: + ngrams[seq] = 0 + #ngrams[seq] = pos + +# insert each ngram from text into the ngrams dictionary +def compute_ngrams_insert_dict(args, text, ngrams): + words, positions = get_words(text) + if len(words) < args.min_ngram_size: + return + + if len(words) < args.max_ngram_size: + insert_dict(words, ngrams, positions[0]) + + for i in range(len(words) - args.max_ngram_size+1): + insert_dict(words[i:i+args.max_ngram_size], ngrams, positions[i]) + + +# Build ngrams for the lambada dataset +def process_task_lambda(args, task_file, ngrams): + print(' reading from {} and computing ngrams'.format(task_file)) + with open(task_file, 'r') as f: + for line in f: + try: + myjson = json.loads(line) + text = myjson['text'] + compute_ngrams_insert_dict(args, text, ngrams) + except Exception as e: + print('Error:', e) + print(" Entities in ngrams {}".format(len(ngrams)), flush=True) + + +# Build ngrams for the dataset of the given task +def process_task(args, task_name, ngrams): + + print(' reading from {} and computing ngrams'.format('import datasets')) + print(" Current entities in ngrams {}".format(len(ngrams)), flush=True) + # using validation/test data from datasets + from datasets import load_dataset + + entities_in_ngrams = len(ngrams) + + # load the dataset + if task_name == 'squad': + dataset = load_dataset('squad_v2', split='validation') + elif task_name == 'natural_questions': + dataset = load_dataset('natural_questions', split='validation') + elif task_name == 'triviaqa': + dataset = load_dataset('trivia_qa', 'unfiltered', split='test') + elif task_name == 'webqa': + dataset = load_dataset('web_questions', split='test') + elif task_name == 'race': + dataset = load_dataset('race', 'all', split='test') + elif task_name == 'drop': + dataset = load_dataset('drop', split='validation') + elif task_name == 'coqa': + dataset = load_dataset('coqa', split='validation') + elif task_name == 'piqa': + dataset = load_dataset('piqa', split='test') + else: + print("Invalid task name: {}".format(task_name), flush=True) + return + + # read the dataset and add to ngrams + for line in dataset: + try: + if task_name in ['squad', 'triviaqa', 'webqa', 'race', 'drop']: + text = line['question'] + compute_ngrams_insert_dict(args, text, ngrams) + elif task_name == 'natural_questions': + text = line['question']['text'] + compute_ngrams_insert_dict(args, text, ngrams) + elif task_name == 'coqa': + all_questions = line['questions'] + for question in all_questions: + compute_ngrams_insert_dict(args, question, ngrams) + elif task_name == 'piqa': + text = line['goal'] + compute_ngrams_insert_dict(args, text, ngrams) + except Exception as e: + print('Error:', e) + + print(" After task {} entities in ngrams {}, added {}".format(task_name, \ + len(ngrams), len(ngrams) - entities_in_ngrams), flush=True) + +def compute_tasks_ngrams(args, ngrams): + start_time = time.time() + for _, task_name in enumerate(args.tasks): + print('Task: {}'.format(task_name), flush=True) + if task_name == 'lambada': + assert args.lambada_path is not None + process_task_lambda(args, args.lambada_path, ngrams) + else: + process_task(args, task_name, ngrams) + print(" Taken time to compute ngrams {:.2f}".format(time.time() - \ + start_time), flush=True) + +def compute_ngram_freq_sorted(args, ngrams): + ngrams_freq = {} + for ngram_key in ngrams.keys(): + length = len(ngram_key.split()) + ngrams_freq[length] = ngrams_freq[length] + 1 if length in \ + ngrams_freq else 1 + + ngrams_freq_sorted = sorted(ngrams_freq.items(), key=lambda item: item[0]) + print(" Ngram frequencies: {}".format(ngrams_freq_sorted), flush=True) + print(" Entities in ngrams {} min_ngram_size {} max_ngram_size {}".format(\ + len(ngrams), ngrams_freq_sorted[0][0], ngrams_freq_sorted[len(\ + ngrams_freq_sorted) -1 ][0]), flush=True) + return ngrams_freq_sorted + +def get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \ + dedup_file, dedup_key, ngrams_freq_sorted): + + start_time = time.time() + # get the ngrams frequency + args.get_ngram_freq_only = True + + # Open the large file to process in parallel + num_workers = args.num_threads + pool = multiprocessing.Pool(num_workers) + fin = open(dedup_file, 'r', encoding='utf-8') + free_ngram_abt_partial=partial(free_ngram, args=args, key=dedup_key, \ + ngrams=ngrams, ngrams_freq_sorted=ngrams_freq_sorted) + free_ngrams_abt = pool.imap(free_ngram_abt_partial, fin, 500) + + counter = 0 + for _, _, _, local_ngram in free_ngrams_abt: + counter += 1 + if counter % 1000 == 0: + print(' [compute_stat]> processed {} documents in {:.2f} seconds ...'. + format(counter, time.time() - start_time), flush=True) + for local_key in local_ngram: + if local_key in ngrams: + ngrams[local_key] += 1 + local_ngram = {} + + print(' Time taken to compute statistics {:.2f} seconds'.format(time.time() - \ + start_time), flush=True) + pool.close() + pool.join() + + start_time = time.time() + counter_threshold = 0 + # Get ngram below theadhold + for local_key, local_val in ngrams.items(): + if ngrams[local_key] < args.key_threshold: + print(" [threshold] {} {}".format(local_key, local_val), flush=True) + counter_threshold += 1 + ngrams_below_threshold[local_key] = 1 + + print(' Ngrams below threshold {}'.format(counter_threshold), flush=True) + fin.close() + +def clean_ngrams_below_threshold(args, ngrams_below_threshold, dedup_file, \ + dedup_key): + + start_time = time.time() + # Now actually filter the dataset + args.get_ngram_freq_only = False + #id_prefix = '-'.join(args.tasks[::2]) + id_prefix = '-'.join(args.tasks[::1]) + + # get the range of the size of the ngrams + ngrams_freq_sorted = compute_ngram_freq_sorted(args, ngrams_below_threshold) + + # Open the large file to process in parallel + counter = splitted = ignored = split_mt_thld = trimmed_count = 0 + num_workers = args.num_threads + pool = multiprocessing.Pool(num_workers) + fin = open(dedup_file, 'r', encoding='utf-8') + free_ngram_clean_partial=partial(free_ngram, args=args, key=dedup_key, \ + ngrams=ngrams_below_threshold, ngrams_freq_sorted=ngrams_freq_sorted) + free_ngrams_clean = pool.imap(free_ngram_clean_partial, fin, 500) + + out_f = open(args.output, 'wb') + + for text_buf_ngram_free, trimmed, myjson, _ in free_ngrams_clean: + counter += 1 + try: + + trimmed_count += trimmed + + if len(text_buf_ngram_free) > 1: + splitted += 1 + if len(text_buf_ngram_free) == 0: + ignored += 1 + # more than 10 splits ignored + if len(text_buf_ngram_free) > args.splits_count: + text_buf_ngram_free = [] + split_mt_thld += 1 + + if args.output is not None: + if "split_id" in myjson: + use_prefix = myjson["split_id"] + "-" + else: + use_prefix = "" + + for i in range(len(text_buf_ngram_free)): + split_id_string = id_prefix + '-{:010d}'.format(int(\ + counter)) + '-{:04d}'.format(int(i)) + myjson[dedup_key] = text_buf_ngram_free[i] + myjson["split_id"] = use_prefix + split_id_string + outjson = json.dumps(myjson, ensure_ascii=False) + + out_f.write(outjson.encode('utf-8')) + out_f.write('\n'.encode('utf-8')) + + if counter % 1000 == 0: + print(' [final]> processed {} documents in {:.2f} seconds ...'. + format(counter, time.time() - start_time), flush=True) + except Exception as e: + print('Error:', e) + + print(' [final]> processed {} documents in {:.2f} seconds ...'. + format(counter, time.time() - start_time), flush=True) + + print(' Total docs {} splitted {} ignored {} splits > theshold {} trimmed'\ + ' {}'.format(counter, splitted, ignored, split_mt_thld, trimmed_count)\ + , flush=True) + + pool.close() + pool.join() + + out_f.close() + fin.close() + + +if __name__ == '__main__': + # we use 13-grams, any text less than 200 characters got removed + # any text splitted more than 10 got removed as well + + print('parsing the arguments ...') + + parser = argparse.ArgumentParser() + parser.add_argument('--tasks', nargs='*', required=True, default=None, \ + help='Tasks to use for deduplication: currently ' + ' suuport [lambada, squad, natural_questions,' + ' triviaqa, webqa, race, drop, coqa, and piqa]') + parser.add_argument('--lambada_path', type=str, default=None, + help='Only Lambada task needs the path') + parser.add_argument('--dedup_dataset', nargs = '*', default=None, + help='Dataset to deduplicate with the key to use' + ' e.g. cc.json text') + parser.add_argument('--output', type=str, default=None, + help='Output file name to save dedup dataset') + parser.add_argument('--num_threads', type=int, default=40, + help='Number of threads to use') + + # Default dedup values + parser.add_argument('--max_ngram_size', type=int, default=13, + help='Maximum size of ngram to use.') + parser.add_argument('--min_ngram_size', type=int, default=8, + help='Minimum size of ngram to use.') + parser.add_argument('--filter_text_char_len', type=int, default=200, + help='Remove any text below this length.') + parser.add_argument('--key_threshold', type=int, default=10, + help='Number of keys to consider as threshold') + parser.add_argument('--save_dictionary', type=str, default=None, + help='Save the dictionary') + parser.add_argument('--load_dictionary', type=str, default=None, + help='Load the dictionary') + parser.add_argument('--splits_count', type=int, default=10, + help='Remove any documents more than this many splits') + parser.add_argument('--remove_char_each_side', type=int, default=200, + help='Maximum size of ngram to use.') + args = parser.parse_args() + + assert len(args.dedup_dataset) == 2 + dedup_file = args.dedup_dataset[0] + dedup_key = args.dedup_dataset[1] + + # Setup multi-processing + num_workers = args.num_threads + if args.load_dictionary is None: + # Build ngrams + ngrams = {} + compute_tasks_ngrams(args, ngrams) + + # get the range of the size of the ngrams + ngrams_freq_sorted = compute_ngram_freq_sorted(args, ngrams) + + # get ngram freq from large file in parallel + # get ngrams below threshold + ngrams_below_threshold = {} + get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \ + dedup_file, dedup_key, ngrams_freq_sorted) + + # save the dictionary if needed + if args.save_dictionary is not None: + with open(args.save_dictionary, 'wb') as save_dict_handle: + pickle.dump(ngrams_below_threshold, save_dict_handle) + else: + with open(args.load_dictionary, 'rb') as load_dict_handle: + ngrams_below_threshold = pickle.load(load_dict_handle) + + # filter the large file + if args.output is not None: + clean_ngrams_below_threshold(args, ngrams_below_threshold, \ + dedup_file, dedup_key) + + print('done :-)') diff --git a/multilinguality_megatron/tools/openwebtext/find_duplicates.py b/multilinguality_megatron/tools/openwebtext/find_duplicates.py new file mode 100644 index 0000000000000000000000000000000000000000..84507c49ffa610e7ebab00ddbcf64095ad34c2b2 --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/find_duplicates.py @@ -0,0 +1,292 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import argparse +from functools import partial +import itertools +import json +from lsh import cache, minhash +import multiprocessing +import numpy as np +import time +import pickle +import sys +import os + +# This function is adapted from: +# https://github.com/mattilyra/LSH/blob/master/examples/Introduction.ipynb +def shingles(text, char_ngram=5): + return set(text[head:head + char_ngram] + for head in range(0, len(text) - char_ngram)) + + +# This function is adapted from: +# https://github.com/mattilyra/LSH/blob/master/examples/Introduction.ipynb +def jaccard(set_a, set_b, args): + if len(set_a) < 1 or len(set_b) < 1: + return 0.0 + + intersection = set_a & set_b + union = set_a | set_b + + if args.jaccard == 'min': + return len(intersection) / min(len(set_a), len(set_b)) + elif args.jaccard == 'max': + return len(intersection) / max(len(set_a), len(set_b)) + else: + return len(intersection) / len(union) + +def compute_fingerprint(line, key): + try: + myjson = json.loads(line) + url = myjson[key] + text = myjson['text'] + fingerprint = hasher.fingerprint(text) + except Exception as e: + print('Error:', e) + return None, None, None, False + + return url, text, fingerprint, True + +def url_pairs_to_remove(args, bucket_urls, url_doc): + remove_urls_list = [] + deduped_local, counter_local = 0, 0 + iteration = 0 + while len(bucket_urls) > 1: + if args.heuristic_iter != -1 and \ + iteration == args.heuristic_iter: + break + + items = list(bucket_urls) + remove_urls = [] + main_url = items[np.random.randint(0, len(items))] + main_dhingles = shingles(url_doc[main_url]) + + for i in range(0, len(items)): + counter_local += 1 + other_url = items[i] + if other_url == main_url: + continue + other_shingles = shingles(url_doc[other_url]) + try: + jaccard_sim = jaccard(main_dhingles, other_shingles, args) + except Exception as e: + print('Error:', e) + jaccard_sim = 0.0 + if jaccard_sim > 0.5: + remove_urls.append({other_url: jaccard_sim}) + deduped_local += 1 + bucket_urls.remove(other_url) + + bucket_urls.remove(main_url) + if len(remove_urls) > 0: + remove_urls_list.append({main_url: remove_urls}) + iteration += 1 + return remove_urls_list, deduped_local, counter_local + +def write_remove_urls_list(remove_urls_list, f_out): + if len(remove_urls_list) > 0: + for each_url_remove in remove_urls_list: + myjson = json.dumps(each_url_remove, ensure_ascii=False) + f_out.write(myjson.encode('utf-8')) + f_out.write('\n'.encode('utf-8')) + +def compute_jaccard(each_bin, num_bins, start_time_local): + + remove_urls_list = [] + deduped_local, counter_local, bucket_local = 0, 0, 0 + + for bucket_id in each_bin: + bucket_local += 1 + if os.getpid() % num_bins == 0 and bucket_local % 100000 == 0: + print("Counter {}, progress {:.2f} time {:.2f}".\ + format(bucket_local, float(bucket_local)/float(len(each_bin)),\ + time.time() - start_time_local), flush=True) + + if len(each_bin[bucket_id]) <= 1: + continue + + bucket_urls = each_bin[bucket_id].copy() + remove_urls_list_sub, deduped_local_sub, counter_local_sub = \ + url_pairs_to_remove(args, bucket_urls, url_doc) + + deduped_local += deduped_local_sub + counter_local += counter_local_sub + if len(remove_urls_list_sub) > 0: + remove_urls_list.extend(remove_urls_list_sub) + + return remove_urls_list, deduped_local, counter_local + +def find_pair_urls_parallel(args, lshcache, url_doc): + start_time = time.time() + f_out = open(args.output, 'wb') + deduped, counter = 0, 0 + + # compute jaccards of buckets in bin in parallel (parallelism + # limited to # of bins) + num_bins = len(lshcache.bins) + pool = multiprocessing.Pool(num_bins) + compute_jaccard_partial = partial(compute_jaccard, num_bins=num_bins, \ + start_time_local=start_time) + # don't need to pass args and url_doc as they are already shared + compute_jaccard_iter = pool.imap(compute_jaccard_partial, lshcache.bins) + + print("multiprocessing init took {:.2f}".format(time.time() - start_time),\ + flush=True) + for remove_urls_list, deduped_local, counter_local in compute_jaccard_iter: + deduped += deduped_local + counter += counter_local + write_remove_urls_list(remove_urls_list, f_out) + print(' [write]> processed {} documents in {:.2f} ' + 'seoncds and deduped {} documents ...'.format(counter, time.time()\ + - start_time, deduped), flush=True) + + pool.close() + pool.join() + f_out.close() + + print(' Taken time for jaccard similariries {:.2f} seconds'.format(\ + time.time() - start_time), flush=True) + +def find_pair_urls_sequential(args, lshcache, url_doc): + start_time = time.time() + f_out = open(args.output, 'wb') + deduped, counter = 0, 0 + for b in lshcache.bins: + for bucket_id in b: + if len(b[bucket_id]) <= 1: + continue + + bucket_urls = b[bucket_id].copy() + remove_urls_list_sub, deduped_local_sub, counter_local_sub = \ + url_pairs_to_remove(args, bucket_urls, url_doc) + + deduped += deduped_local_sub + counter += counter_local_sub + write_remove_urls_list(remove_urls_list_sub, f_out) + if counter % 10000 == 0: + print(' [write]> processed {} documents in {:.2f} ' + 'seoncds and deduped {} documents ...'. + format(counter, time.time() - start_time, + deduped), flush=True) + f_out.close() + print(' [write]> processed {} documents in {:.2f} ' + 'seoncds and deduped {} documents ...'. + format(counter, time.time() - start_time, + deduped), flush=True) + + +if __name__ == '__main__': + print('parsing the arguments ...') + + parser = argparse.ArgumentParser() + parser.add_argument('--seed', type=int, default=1234, + help='Random seed used for python, numpy') + parser.add_argument('--inputs', nargs = '*', default=None, help = \ + 'Pairwise list of the input files and keys, ' + 'e.g. --inputs cc.json cc_id news.json news_id') + parser.add_argument('--load_fingerprints', nargs = '*', default=None, + help='Load fingerprints from a list of pickle files,' + ' e.g. cc.pkl news.pkl') + parser.add_argument('--save_fingerprints', type=str, default=None, + help='Save the fingerprints of the inputs.') + parser.add_argument('--output', type=str, default=None, + help='Output file name that consists of all ids' + ' with matching similarities') + parser.add_argument('--jaccard', type=str, default='union', + choices=['union', 'min', 'max'], help='Jaccard'\ + ' similarity computation') + parser.add_argument('--heuristic_iter', type=int, default=1, + help='Number of iterations to run the heuristics' + ': use -1 for exact') + parser.add_argument('--num_bands', type=int, default=10, + help='Number of bands to use in cache') + parser.add_argument('--num_seeds', type=int, default=100, + help='Number of seeds to use for minhash. Note that' + ' this value should be divisible by num_bands') + parser.add_argument('--jaccard_parallel', action='store_true', + help='Use this to process large number of documents.') + args = parser.parse_args() + + print('finding possible duplicate content ...') + + # set seed and get an array of seeds of 100 integers + np.random.seed(args.seed) + seeds = np.random.randint(0, 1e6, size=args.num_seeds) + + # initialize minhash and lsh cache + hasher = minhash.MinHasher(seeds=seeds, char_ngram=5, hashbytes=4) + lshcache = cache.Cache(num_bands=args.num_bands, hasher=hasher) + + url_doc = {} + + # load fingerprints from pickle file if needed + if args.load_fingerprints is not None: + for count_fp, fp_file_name in enumerate(args.load_fingerprints): + print("Loading fingerprints from pickle file {}".format( + fp_file_name), flush=True) + fp = open(fp_file_name, "rb") + if count_fp == 0: + # assign directory for the first pkl + lshcache = pickle.load(fp) + url_doc = pickle.load(fp) + else: + # append these to lshcache and url_doc + local_lshcache = pickle.load(fp) + local_url_doc = pickle.load(fp) + for url in local_lshcache.fingerprints.keys(): + url_doc[url] = local_url_doc[url] + lshcache.add_fingerprint(local_lshcache.fingerprints[url], url) + fp.close() + + counter = 0 + start_time = time.time() + + # compute finger prints of the inputs if any + # input file and the key to use as id + if args.inputs is not None: + print("Computing fingerprints", flush=True) + assert len(args.inputs) % 2 == 0 + for input_file, key in zip(args.inputs[::2], args.inputs[1::2]): + print(' document processing {} with key {}'.format(input_file, key), + flush=True) + + # compute fingerprints in parallel + num_workers = 40 + pool = multiprocessing.Pool(num_workers) + fin = open(input_file, 'r', encoding='utf-8') + compute_fingerprint_partial = partial(compute_fingerprint, key=key) + compute_fingerprint_iter = pool.imap(compute_fingerprint_partial, + fin, 512) + # traverse all the texts and add fingerprints + for url, text, fingerprint, flag in compute_fingerprint_iter: + counter += 1 + if flag: + url_doc[url] = text + lshcache.add_fingerprint(fingerprint, url) + if counter % 10000 == 0: + print(' [read]> processed {} documents in {:.2f} ' + 'seconds ...'.format(counter, time.time() - \ + start_time), flush=True) + + fin.close() + pool.close() + pool.join() + + # Save the fingerprints if needed + if args.save_fingerprints is not None: + print("Saving fingerprints to pickle file {}".format( + args.save_fingerprints), flush=True) + with open(args.save_fingerprints, 'wb') as f_save: + pickle.dump(lshcache, f_save) + pickle.dump(url_doc, f_save) + + # compute jaccard index of the input texts and write to file if needed + if args.output is not None: + print("Compute jaccard similarity", flush=True) + if args.jaccard_parallel: + find_pair_urls_parallel(args, lshcache, url_doc) + else: + find_pair_urls_sequential(args, lshcache, url_doc) + + print('done :-)') + diff --git a/multilinguality_megatron/tools/openwebtext/group_duplicate_url.py b/multilinguality_megatron/tools/openwebtext/group_duplicate_url.py new file mode 100644 index 0000000000000000000000000000000000000000..16a0354fde130d67de0fbb51e90658b1f16fc1c4 --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/group_duplicate_url.py @@ -0,0 +1,77 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import json +import time +import sys + + +if __name__ == '__main__': + + + print('grouping duplicate urls ...') + + input = sys.argv[1] + output = sys.argv[2] + if len(sys.argv) > 3: + jaccard_similarity_threshold = float(sys.argv[3]) + else: + jaccard_similarity_threshold = 0.7 + + url_to_index = {} + index_to_urls = [] + counter = 0 + start_time = time.time() + with open(input, 'r') as f: + for line in f: + counter += 1 + myjson = json.loads(line) + urls = [] + for main_url in myjson.keys(): + urls.append(main_url) + for value in myjson[main_url]: + for other_url, js in value.items(): + if js >= jaccard_similarity_threshold: + urls.append(other_url) + current_index = -1 + other_indices = set() + for url in urls: + if url in url_to_index: + if current_index == -1: + current_index = url_to_index[url] + elif current_index != url_to_index[url]: + other_indices.add(url_to_index[url]) + if current_index == -1: + current_index = len(index_to_urls) + index_to_urls.append(set()) + for url in urls: + url_to_index[url] = current_index + index_to_urls[current_index].add(url) + for index in other_indices: + for url in index_to_urls[index]: + index_to_urls[current_index].add(url) + url_to_index[url] = current_index + index_to_urls[index] = None + + if counter % 100000 == 0: + print(' > processed {} lines in {} seconds ...'.format( + counter, time.time() - start_time)) + + + total_remove = 0 + total_remain = 0 + for urls in index_to_urls: + if urls is not None: + if len(urls) > 1: + total_remove += (len(urls) - 1) + total_remain += 1 + print('out of {} urls, only {} are unique and {} should be removed'.format( + total_remove+total_remain, total_remain, total_remove)) + + with open(output, 'wb') as f: + for i, urls in enumerate(index_to_urls): + if urls is not None: + if len(urls) > 1: + myjson = json.dumps({str(i): list(urls)}, + ensure_ascii=False) + f.write(myjson.encode('utf-8')) + f.write('\n'.encode('utf-8')) diff --git a/multilinguality_megatron/tools/openwebtext/merge_jsons.py b/multilinguality_megatron/tools/openwebtext/merge_jsons.py new file mode 100644 index 0000000000000000000000000000000000000000..fb11fe45ba5d20b0bf05d9aeaad1758db0a33b3e --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/merge_jsons.py @@ -0,0 +1,42 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +import glob +import sys +import json +import argparse + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument("--json_path", type=str, default=".", + help="path where all the json files are located") + + parser.add_argument("--output_file", type=str, default="merged_output.json", + help="filename where the merged json should go") + + args = parser.parse_args() + + json_path = args.json_path + out_file = args.output_file + + json_files = glob.glob(json_path + '/*.json') + + counter = 0 + + with open(out_file, 'w') as outfile: + for fname in json_files: + counter += 1 + + if counter % 1024 == 0: + print("Merging at ", counter, flush=True) + + with open(fname, 'r') as infile: + for row in infile: + each_row = json.loads(row) + outfile.write(row) + + + print("Merged file", out_file, flush=True) + + diff --git a/multilinguality_megatron/tools/openwebtext/remove_group_duplicates.py b/multilinguality_megatron/tools/openwebtext/remove_group_duplicates.py new file mode 100644 index 0000000000000000000000000000000000000000..44b62d62c19f35ef555507f7a07fc2bb73c8ca51 --- /dev/null +++ b/multilinguality_megatron/tools/openwebtext/remove_group_duplicates.py @@ -0,0 +1,56 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +import json +import time +import sys + + +if __name__ == '__main__': + + url_filename = sys.argv[1] + data_filename = sys.argv[2] + output_filename = sys.argv[3] + + urls = set() + with open(url_filename, 'r') as f: + for line in f: + myjson = json.loads(line) + for key in myjson: + this_urls = myjson[key] + for i in range(1, len(this_urls)): + urls.add(this_urls[i]) + print('will be removing {} urls'.format(len(urls)), flush=True) + + written_docs = 0 + removed_docs = 0 + removed_chars = 0 + start_time = time.time() + with open(output_filename, 'wb') as fout: + with open(data_filename, 'r') as fin: + for line in fin: + try: + myjson = json.loads(line) + url = myjson['url'] + if url in urls: + print('removing', myjson) + removed_docs += 1 + removed_chars += len(myjson['text']) + continue + myjson = json.dumps(myjson, ensure_ascii=False) + fout.write(myjson.encode('utf-8')) + fout.write('\n'.encode('utf-8')) + written_docs += 1 + if written_docs % 10000 == 0: + print(' [PROCESSED] time (s): {:.2f} | written: {} ' + '| removed: {} (char: {})'.format( + time.time() - start_time, + written_docs, removed_docs, removed_chars)) + except Exception as e: + print('[SKIPPING]', line, e) + + print(' [PROCESSED] time (s): {:.2f} | written: {} ' + '| removed: {} (char: {})'.format( + time.time() - start_time, + written_docs, removed_docs, removed_chars)) + print('done :-)') diff --git a/multilinguality_megatron/tools/preprocess_data.py b/multilinguality_megatron/tools/preprocess_data.py new file mode 100644 index 0000000000000000000000000000000000000000..b88c6e75e76d33076b13b551bde7321f6ff0d6fa --- /dev/null +++ b/multilinguality_megatron/tools/preprocess_data.py @@ -0,0 +1,267 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Processing data for pretraining.""" + +import argparse +import json +import multiprocessing +import os +import sys + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) +) +import time + +import torch + +try: + import nltk + + nltk_available = True +except ImportError: + nltk_available = False + +from megatron.data import indexed_dataset +from megatron.tokenizer import build_tokenizer + + +# https://stackoverflow.com/questions/33139531/preserve-empty-lines-with-nltks-punkt-tokenizer +class CustomLanguageVars(nltk.tokenize.punkt.PunktLanguageVars): + _period_context_fmt = r""" + \S* # some word material + %(SentEndChars)s # a potential sentence ending + \s* # <-- THIS is what I changed + (?=(?P + %(NonWord)s # either other punctuation + | + (?P\S+) # <-- Normally you would have \s+ here + ))""" + + +class IdentitySplitter(object): + def tokenize(self, *text): + return text + + +class Encoder(object): + def __init__(self, args): + self.args = args + + def initializer(self): + # Use Encoder class as a container for global data + Encoder.tokenizer = build_tokenizer(self.args) + if self.args.split_sentences: + if not nltk_available: + print("NLTK is not available to split sentences.") + exit() + library = "tokenizers/punkt/{}.pickle".format(self.args.lang) + print("loading: " + library) + splitter = nltk.load(library) + if self.args.keep_newlines: + # this prevents punkt from eating newlines after sentences + Encoder.splitter = nltk.tokenize.punkt.PunktSentenceTokenizer( + train_text=splitter._params, lang_vars=CustomLanguageVars() + ) + else: + Encoder.splitter = splitter + + else: + Encoder.splitter = IdentitySplitter() + + def encode(self, json_line): + data = json.loads(json_line) + ids = {} + for key in self.args.json_keys: + text = data[key] + doc_ids = [] + for sentence in Encoder.splitter.tokenize(text): + sentence_ids = Encoder.tokenizer.tokenize(sentence) + if len(sentence_ids) > 0: + doc_ids.append(sentence_ids) + if len(doc_ids) > 0 and self.args.append_eod: + doc_ids[-1].append(Encoder.tokenizer.eos) + ids[key] = doc_ids + return ids, len(json_line) + + +def get_args(): + parser = argparse.ArgumentParser() + group = parser.add_argument_group(title="input data") + group.add_argument("--input", type=str, required=True, help="Path to input JSON") + group.add_argument( + "--json_keys", + nargs="+", + default=["text"], + help="space separate listed of keys to extract from json", + ) + group.add_argument( + "--split_sentences", action="store_true", help="Split documents into sentences." + ) + group.add_argument( + "--keep_newlines", + action="store_true", + help="Keep newlines between sentences when splitting.", + ) + + group = parser.add_argument_group(title="tokenizer") + group.add_argument( + "--tokenizer_type", + type=str, + required=True, + choices=[ + "BertWordPieceLowerCase", + "BertWordPieceCase", + "GPT2BPETokenizer", + "SentencePieceTokenizer", + "PretrainedFromHF", + "FalconTokenizer", + ], + help="What type of tokenizer to use.", + ) + group.add_argument( + "--vocab_file", type=str, default=None, help="Path to the vocab file" + ) + group.add_argument( + "--merge_file", + type=str, + default=None, + help="Path to the BPE merge file (if necessary).", + ) + group.add_argument( + "--append_eod", + action="store_true", + help="Append an token to the end of a document.", + ) + group.add_argument( + "--lang", + type=str, + default="english", + help="Language to use for NLTK-powered sentence splitting.", + ) + group = parser.add_argument_group(title="output data") + group.add_argument( + "--output_prefix", + type=str, + required=True, + help="Path to binary output file without suffix", + ) + group.add_argument( + "--dataset_impl", type=str, default="mmap", choices=["lazy", "cached", "mmap"] + ) + group = parser.add_argument_group(title="runtime") + group.add_argument( + "--workers", + type=int, + required=True, + help="Number of worker processes to launch", + ) + group.add_argument( + "--chunk_size", + type=int, + required=True, + help="Chunk size assigned to each worker process", + ) + group.add_argument( + "--log_interval", + type=int, + default=100, + help="Interval between progress updates", + ) + group.add_argument("--vocab_extra_ids", type=int, default=0) + group.add_argument( + "--vocab_extra_ids_list", + type=str, + default=None, + help="comma separated list of special vocab ids to add to the tokenizer", + ) + group.add_argument( + "--no_new_tokens", + action="store_false", + dest="new_tokens", + help=( + "Whether to add special tokens (e.g. CLS, MASK, etc) " + "in the sentenciepiece tokenizer or not" + ), + ) + args = parser.parse_args() + args.keep_empty = False + + if args.tokenizer_type.lower().startswith("bert"): + if not args.split_sentences: + print( + "Bert tokenizer detected, are you sure you don't want to split sentences?" + ) + + # some default/dummy values for the tokenizer + args.rank = 0 + args.make_vocab_size_divisible_by = 128 + args.tensor_model_parallel_size = 1 + + return args + + +def main(): + args = get_args() + startup_start = time.time() + + print("Opening", args.input) + fin = open(args.input, "r", encoding="utf-8") + + if nltk_available and args.split_sentences: + nltk.download("punkt", quiet=True) + + encoder = Encoder(args) + tokenizer = build_tokenizer(args) + pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer) + encoded_docs = pool.imap(encoder.encode, fin, args.chunk_size) + # encoded_docs = map(encoder.encode, fin) + + level = "document" + if args.split_sentences: + level = "sentence" + + print(f"Vocab size: {tokenizer.vocab_size}") + print(f"Output prefix: {args.output_prefix}") + output_bin_files = {} + output_idx_files = {} + builders = {} + for key in args.json_keys: + output_bin_files[key] = "{}_{}_{}.bin".format(args.output_prefix, key, level) + output_idx_files[key] = "{}_{}_{}.idx".format(args.output_prefix, key, level) + builders[key] = indexed_dataset.make_builder( + output_bin_files[key], + impl=args.dataset_impl, + vocab_size=tokenizer.vocab_size, + ) + + startup_end = time.time() + proc_start = time.time() + total_bytes_processed = 0 + print("Time to startup:", startup_end - startup_start) + + for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1): + total_bytes_processed += bytes_processed + for key, sentences in doc.items(): + if len(sentences) == 0: + continue + for sentence in sentences: + builders[key].add_item(torch.IntTensor(sentence)) + builders[key].end_document() + if i % args.log_interval == 0: + current = time.time() + elapsed = current - proc_start + mbs = total_bytes_processed / elapsed / 1024 / 1024 + print( + f"Processed {i} documents", + f"({i/elapsed} docs/s, {mbs} MB/s).", + file=sys.stderr, + ) + print("Done! Now finalizing.") + + for key in args.json_keys: + builders[key].finalize(output_idx_files[key]) + + +if __name__ == "__main__": + main() diff --git a/multilinguality_megatron/tools/preprocess_instruct_data.py b/multilinguality_megatron/tools/preprocess_instruct_data.py new file mode 100644 index 0000000000000000000000000000000000000000..50e868f12a70ee5e20a16b720c6241fdeee547ab --- /dev/null +++ b/multilinguality_megatron/tools/preprocess_instruct_data.py @@ -0,0 +1,196 @@ +# Instruction code heavily inspired by Andreas Köpf +# source: https://github.com/andreaskoepf/epfl-megatron/tree/local_changes/ +"""Processing data for instruction tuning. +Example: +python instruct/preprocess_instruct_data.py --input=/pure-mlo-scratch/alhernan/data/medmc/medmc-v1.jsonl \ + --output_prefix=/pure-mlo-scratch/alhernan/data/medmc/medmc-v1 \ + --tokenizer_type=SentencePieceTokenizer \ + --vocab_file=/pure-mlo-scratch/llama/tokenizer.model \ + --chunk_size=32 --workers=32 \ + --vocab_extra_ids_list "[bib_ref],[/bib_ref],[fig_ref],[/fig_ref],[bib],[/bib],[fig],[/fig],[table],[/table],[formula],[/formula],<|im_start|>,<|im_end|>" \ + --question_key=input \ + --answer_key=output \ + --system_key=instruction +""" + +import sys +import json +import time +import itertools +from pathlib import Path +from typing import Optional +from multiprocessing import Pool +from argparse import ArgumentParser, Namespace + +import torch + +sys.path.append(str(Path(__file__).parent.parent.absolute())) +from megatron.tokenizer import build_tokenizer +from megatron.tokenizer.tokenizer import AbstractTokenizer +from megatron.data.indexed_dataset import make_builder +from megatron.data.instruction_dataset import Role + + +class Encoder(object): + tokenizer: Optional[AbstractTokenizer] = None + + def __init__(self, args: Namespace): + self.args = args + + def initializer(self): + Encoder.tokenizer = build_tokenizer(self.args) + + def encode(self, line: str) -> tuple[int, list[int], list[int]]: + # get data + assert Encoder.tokenizer is not None + data = json.loads(line) + question = data[self.args.question_key] + answer = data[self.args.answer_key] + system = None if self.args.system_key is None else data[self.args.system_key] + + # now format messages + if system is not None: + system = format_message(system, "system") + question = format_message(question, "question") + answer = format_message(answer, "answer") + + # tokenize and get roles + tokens = [] + roles = [] + if system is not None: + system = Encoder.tokenizer.tokenize(system) + tokens += system + roles += [Role.system.value]*len(system) + question = Encoder.tokenizer.tokenize(question) + tokens += question + roles += [Role.prompter.value]*len(question) + answer = Encoder.tokenizer.tokenize(answer) + tokens += answer + roles += [Role.assistant.value]*len(answer) + return len(line), tokens, roles + + @property + def special_tokens(self) -> dict: + return self.tokenizer._special_tokens + + +class DatasetWriter: + def __init__(self, prefix: str, vocab_size: int, dataset_impl: str = "mmap", + feature: str = "text"): + self.vocab_size = vocab_size + self.dataset_impl = dataset_impl + self.bin_fname = f"{prefix}-{feature}.bin" + self.idx_fname = f"{prefix}-{feature}.idx" + self.builder = None + + def add_item(self, tokens: list[int]): + self.builder.add_item(torch.IntTensor(tokens)) + + def __enter__(self): + self.builder = make_builder(self.bin_fname, impl=self.dataset_impl, + vocab_size=self.vocab_size) + return self + + def __exit__(self, *_): + self.builder.finalize(self.idx_fname) + self.builder = None + + +def format_message(message: str, role: str) -> str: + return f"<|im_start|>{role}\n{message}<|im_end|>\n" + + +def get_args(): + parser = ArgumentParser() + group = parser.add_argument_group(title='input data') + group.add_argument('--input', type=str, nargs="+", + help='Path(s) to input JSON file(s)') + group.add_argument('--system_key', + help='key to extract system info from json (optional)') + group.add_argument('--question_key', default='input', + help='key to extract questions from json') + group.add_argument('--answer_key', default='output', + help='key to extract answers from json') + + group = parser.add_argument_group(title='tokenizer') + group.add_argument('--tokenizer_type', type=str, required=True, + choices=['BertWordPieceLowerCase','BertWordPieceCase', + 'GPT2BPETokenizer', 'SentencePieceTokenizer', 'FalconTokenizer'], + help='What type of tokenizer to use.') + group.add_argument('--vocab_file', type=str, default=None, + help='Path to the vocab file') + group.add_argument('--merge_file', type=str, default=None, + help='Path to the BPE merge file (if necessary).') + group.add_argument('--lang', type=str, default='english', + help='Language to use for NLTK-powered sentence splitting.') + + group = parser.add_argument_group(title='output data') + group.add_argument('--output_prefix', type=Path, required=True, + help='Path to binary output file without suffix') + group.add_argument('--dataset_impl', type=str, default='mmap', + choices=['lazy', 'cached', 'mmap']) + + group = parser.add_argument_group(title='runtime') + group.add_argument('--workers', type=int, required=True, + help='Number of worker processes to launch') + group.add_argument('--chunk_size', type=int, required=True, + help='Chunk size assigned to each worker process') + group.add_argument('--log_interval', type=int, default=100, + help='Interval between progress updates') + group.add_argument('--vocab_extra_ids', type=int, default=0) + group.add_argument('--vocab_extra_ids_list', type=str, default=None, + help='comma separated list of special vocab ids to add to the tokenizer') + group.add_argument("--no_new_tokens", action="store_false", dest="new_tokens", + help=("Whether to add special tokens (e.g. CLS, MASK, etc) " + "in the sentencepiece tokenizer or not")) + args = parser.parse_args() + args.keep_empty = False + + if args.tokenizer_type.lower().startswith('bert'): + if not args.split_sentences: + print("Bert tokenizer detected, are you sure you don't want to split sentences?") + + # some default/dummy values for the tokenizer + args.rank = 0 + args.make_vocab_size_divisible_by = 128 + args.tensor_model_parallel_size = 1 + return args + + +def main(): + args = get_args() + startup_start = time.time() + + encoder = Encoder(args) + vocab_size = build_tokenizer(args).vocab_size + fs = map(open, args.input) + with Pool(args.workers, initializer=encoder.initializer) as pool, \ + DatasetWriter(args.output_prefix, vocab_size, args.dataset_impl, + "text") as token_writer, \ + DatasetWriter(args.output_prefix, 16, args.dataset_impl, + "role") as role_writer: + + f = itertools.chain(*fs) + docs = pool.imap(encoder.encode, f, args.chunk_size) + startup_end = time.time() + proc_start = time.time() + total_bytes_processed = 0 + print("Time to startup:", startup_end - startup_start) + + for i, (size, tokens, roles) in enumerate(docs, start=1): + total_bytes_processed += size + token_writer.add_item(tokens) + role_writer.add_item(roles) + + if i % args.log_interval == 0: + elapsed = time.time() - proc_start + mbs = total_bytes_processed/1024/1024/elapsed + print(f"Processed {i} documents ({i/elapsed} docs/s, {mbs} MB/s).") + print("Done! Now finalizing.") + + for f in fs: + f.close() + + +if __name__ == '__main__': + main() diff --git a/multilinguality_megatron/tools/push_to_hub.py b/multilinguality_megatron/tools/push_to_hub.py new file mode 100644 index 0000000000000000000000000000000000000000..5aa9df9ae2d78d88c02c20bd9a107a6c39a3e3ba --- /dev/null +++ b/multilinguality_megatron/tools/push_to_hub.py @@ -0,0 +1,161 @@ +import argparse +import sys + +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Push checkpoints in HF transformers format to the Huggingface Hub.", + epilog="Example usage: python push_to_hub.py /path/to/checkpoint --hf_repo_name your_org/model_name --dtype bf16 --auth_token hf_ba..." + ) + parser.add_argument( + "model_name", + help="Path to checkpoint or model name", + type=str, + ) + parser.add_argument( + "--dtype", + help="auto (default), bf16, fp16 or fp32", + type=str, + default="auto", + ) + parser.add_argument( + "--hf_repo_name", + help="HuggingFace repository name", + type=str, + ) + parser.add_argument( + "--auth_token", + help="User access token (HuggingFace) used for model upload", + type=str, + ) + parser.add_argument( + "--output_folder", + help="Output folder path (e.g. for dtype conversion)", + type=str, + ) + parser.add_argument( + "--max_shard_size", + help="Maximum size for a checkpoint before being sharded (default: 10GB)", + type=str, + default="10GB", + ) + parser.add_argument( + "--unsafe", + help="Disable safetensor serialization", + action="store_true", + default=False, + ) + parser.add_argument( + "--rope_scaling_type", + help="Overwrite rope scaling type (linear, dynamic)", + type=str, + default="linear", + ) + parser.add_argument( + "--rope_scaling_factor", + help="Overwrite rope scaling factor (float >1.0)", + type=float, + ) + parser.add_argument( + "--trust_remote_code", + help="Allow custom model code", + action="store_true", + default=False, + ) + return parser.parse_args() + + +def main(): + args = parse_args() + print(args) + + safe_serialization = not args.unsafe + + if args.dtype in ("float16", "fp16"): + torch_dtype = torch.float16 + elif args.dtype in ("float32", "fp32"): + torch_dtype = torch.float32 + elif args.dtype in ("bfloat16", "bf16"): + torch_dtype = torch.bfloat16 + elif args.dtype == "auto": + torch_dtype = None + else: + print(f"Unsupported dtpye: {args.dtype}") + sys.exit(1) + + if not args.hf_repo_name and not args.output_folder: + print( + "Please specify either `--hf_repo_name` to push to HF or `--output_folder` " + "to export the model to a local folder." + ) + sys.exit(1) + + print(f"Loading tokenizer '{args.model_name}' ...") + print() + + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + print(f"Tokenizer: {type(tokenizer).__name__} (vocab_size: {len(tokenizer):,})") + + print("Special tokens:") + for token in tokenizer.all_special_tokens: + id = tokenizer.convert_tokens_to_ids(token) + print(f"{token}: {id}") + print() + + print(f"Loading model '{args.model_name}' ({args.dtype}) ...") + model = AutoModelForCausalLM.from_pretrained( + args.model_name, + torch_dtype=torch_dtype, + trust_remote_code=args.trust_remote_code, + ) + print(f"Model: {type(model).__name__} (num_parameters={model.num_parameters():,})") + + print("Model architecture:") + print(model) + print() + + if args.rope_scaling_type is not None and args.rope_scaling_factor is not None: + assert args.rope_scaling_type in ("linear", "dynamic") + assert args.rope_scaling_factor >= 1.0 + rope_scaling = { + "type": args.rope_scaling_type, + "factor": args.rope_scaling_factor, + } + print( + f"Setting new rope_scaling config: {rope_scaling} (old: {model.config.rope_scaling})" + ) + model.config.rope_scaling = rope_scaling + + print("Model configuration:") + print(model.config) + print() + + if args.output_folder: + print(f"Saving model to: {args.output_folder}") + model.save_pretrained( + args.output_folder, + max_shard_size=args.max_shard_size, + safe_serialization=safe_serialization, + ) + + print(f"Saving tokenizer to: {args.output_folder}") + tokenizer.save_pretrained(args.output_folder) + + if args.hf_repo_name: + print(f"Uploading model to HF repository ('{args.hf_repo_name}') ...") + model.push_to_hub( + args.hf_repo_name, + use_auth_token=args.auth_token, + max_shard_size=args.max_shard_size, + safe_serialization=safe_serialization, + ) + + print(f"Uploading tokenizer to HF repository ('{args.hf_repo_name}') ...") + tokenizer.push_to_hub(args.hf_repo_name, use_auth_token=args.auth_token) + + +if __name__ == "__main__": + main() diff --git a/multilinguality_megatron/tools/run_text_generation_server.py b/multilinguality_megatron/tools/run_text_generation_server.py new file mode 100644 index 0000000000000000000000000000000000000000..03830f84a8ba480db4e910fcbb4783f87f2016be --- /dev/null +++ b/multilinguality_megatron/tools/run_text_generation_server.py @@ -0,0 +1,84 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Sample Generate GPT""" +import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) +import torch + +import megatron.training +from megatron import get_args +from megatron import print_rank_0 +from megatron.core import mpu +from megatron.checkpointing import load_checkpoint +import megatron.initialize + +from megatron.model import GPTModel +from megatron.text_generation_server import MegatronServer +from megatron.text_generation import generate_and_post_process +from megatron.text_generation import beam_search_and_post_process +from megatron.model import ModelType + + +def model_provider(pre_process=True, post_process=True): + """Build the model.""" + + print_rank_0('building GPT model ...') + model = GPTModel(num_tokentypes=0, + parallel_output=False, + pre_process=pre_process, + post_process=post_process) + return model + + +def add_text_generate_args(parser): + group = parser.add_argument_group(title='text generation') + + group.add_argument("--temperature", type=float, default=1.0, + help='Sampling temperature.') + group.add_argument("--top_p", type=float, default=0.0, + help='Top p sampling.') + group.add_argument("--top_k", type=int, default=0, + help='Top k sampling.') + group.add_argument("--out_seq_length", type=int, default=1024, + help='Size of the output generated text.') + return parser + + +if __name__ == "__main__": + megatron.initialize.initialize_megatron(extra_args_provider=add_text_generate_args, + args_defaults={'tokenizer_type': 'GPT2BPETokenizer', + 'no_load_rng': True, + 'no_load_optim': True}) + args = get_args() + if args.num_layers_per_virtual_pipeline_stage is not None: + print("Interleaved pipeline schedule is not yet supported for text generation.") + exit() + # Set up model and load checkpoint + + model_type = ModelType.encoder_or_decoder + model = megatron.training.get_model(model_provider, model_type, wrap_with_ddp=False, args=args) + + if args.load is not None: + _ = load_checkpoint(model, None, None) + + assert len(model) == 1, "Above condition should have caught this" + model = model[0] + if mpu.is_pipeline_first_stage() and mpu.get_tensor_model_parallel_rank() == 0: + server = MegatronServer(model) + server.run("0.0.0.0") + + while True: + choice = torch.cuda.LongTensor(1) + torch.distributed.broadcast(choice, 0) + if choice[0].item() == 0: + try: + generate_and_post_process(model) + except ValueError as ve: + pass + elif choice[0].item() == 1: + try: + beam_search_and_post_process(model) + except ValueError as ve: + pass diff --git a/multilinguality_megatron/tools/text_generation_cli.py b/multilinguality_megatron/tools/text_generation_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..92917fbdd8c3b6498751c651b9dcec9f9e97157d --- /dev/null +++ b/multilinguality_megatron/tools/text_generation_cli.py @@ -0,0 +1,23 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +import json +import sys +import urllib2 + + +class PutRequest(urllib2.Request): + '''class to handling putting with urllib2''' + + def get_method(self, *args, **kwargs): + return 'PUT' + +if __name__ == "__main__": + url = sys.argv[1] + while True: + sentence = raw_input("Enter prompt: ") + tokens_to_generate = int(input("Enter number of tokens to generate: ")) + data = json.dumps({"prompts": [sentence], "tokens_to_generate":tokens_to_generate}) + req = PutRequest(url, data, {'Content-Type': 'application/json'}) + response = urllib2.urlopen(req) + resp_sentences = json.load(response) + print("Megatron Response: ") + print(resp_sentences["text"][0]) diff --git a/multilinguality_megatron/verify_correctness.py b/multilinguality_megatron/verify_correctness.py new file mode 100644 index 0000000000000000000000000000000000000000..769a6bf763e1dab855be92e8365985369de89b1f --- /dev/null +++ b/multilinguality_megatron/verify_correctness.py @@ -0,0 +1,264 @@ +import json +import os +import warnings +from pathlib import Path +from typing import Optional + +#import llama +import torch +from fairscale.nn.model_parallel.initialize import initialize_model_parallel +from torch import nn +from transformers import ( + AutoModelForCausalLM, + LlamaForCausalLM, + LlamaTokenizer, + MistralForCausalLM, + GemmaTokenizer, + GemmaForCausalLM +) + +from finetune import data_provider, extra_args, get_batch, loss_func, model_provider +from megatron import get_args, update_num_microbatches +from megatron.arguments import parse_args +from megatron.initialize import initialize_megatron, set_jit_fusion_options +from megatron.training import ( + _setup_model_and_optimizer, + build_train_valid_test_data_iterators, +) + + +class Llama2Wrapper(nn.Module): + def __init__(self, cache_dir): + super().__init__() + initialize_model_parallel(1) + cache_dir = Path(cache_dir) + checkpoints = sorted(cache_dir.glob("*.pth")) + assert ( + len(checkpoints) == 1 + ), "Currently, only llama2 unsharded models implemented" + with open(cache_dir / "params.json", "r") as f: + params = json.loads(f.read()) + params["vocab_size"] = 32000 + + self.model = llama.Transformer( + llama.ModelArgs(max_seq_len=4096, max_batch_size=1, **params) + ) + self.model.load_state_dict(torch.load(checkpoints[0]), strict=False) + + def forward(self, input_ids, position_ids=None, attention_mask=None, labels=None): + if labels is not None: + warnings.warn("Llama2 does not compute loss") + logits = self.model(input_ids, 0) + loss = torch.tensor(0.0).to(logits.device, logits.dtype) + return {"logits": logits, "loss": loss} + + +def is_meta_llama2_path(path: Optional[Path]) -> bool: + return path is not None and len(list(path.glob("*.pth"))) > 0 + + +def hf_provider( + name: str, cache_dir: Optional[Path], device: str, size: int = 7, bf16: bool = False +): + print("Getting huggingface model...") + extra_kwargs = {} + if bf16: + extra_kwargs = {"torch_dtype": torch.bfloat16} + if name == "falcon": + model = AutoModelForCausalLM.from_pretrained( + f"tiiuae/falcon-{size}b", + cache_dir=cache_dir, + trust_remote_code=True, + **extra_kwargs, + ) + elif name == "llama": + try: + model = LlamaForCausalLM.from_pretrained(cache_dir, **extra_kwargs) + except OSError: + print( + f"Cache dir {cache_dir} does not look like a huggingface " + "checkpoint, assuming cache_dir instead" + ) + model = LlamaForCausalLM.from_pretrained( + f"decapoda-research/llama-{size}b-hf", + cache_dir=cache_dir, + **extra_kwargs, + ) + elif name == "llama2" and is_meta_llama2_path(cache_dir): + print( + f"baseline path {cache_dir} does not look like a huggingface, " + "assuming it's raw llama2 weights instead" + ) + model = Llama2Wrapper(cache_dir) + elif name == "llama2": + model = LlamaForCausalLM.from_pretrained(cache_dir, **extra_kwargs) + elif name == "mistral": + assert size == 7, "Mistral only supports 7B model" + try: + model = MistralForCausalLM.from_pretrained(cache_dir, **extra_kwargs) + except OSError: + print( + f"Cache dir {cache_dir} does not look like a huggingface " + "checkpoint, assuming cache_dir instead" + ) + model = MistralForCausalLM.from_pretrained( + f"mistralai/Mistral-{size}B-v0.1", cache_dir=cache_dir, **extra_kwargs + ) + elif name == "gemma": + model = GemmaForCausalLM.from_pretrained(cache_dir, **extra_kwargs) + else: + raise KeyError(f"Model {name} not implemented") + return model.eval().requires_grad_(False).to(device) + + +def hf_our_provider(name: str, data_dir: Path, device: str, size: int = 7): + if name in {"llama", "llama2"}: + model = LlamaForCausalLM.from_pretrained(data_dir) + else: + raise NotImplementedError("Testing custom checkpoints supported for llama") + return model.eval().requires_grad_(False).to(device) + + +def hf_forward(model, batch): + device = next(param.device for param in model.parameters()) + batch = [tensor.to(device) for tensor in batch] + tokens, labels, loss_mask, attention_mask, position_ids = batch + output = model(input_ids=tokens, position_ids=position_ids, labels=tokens) + return output["logits"], output["loss"] + + +def mega_provider(name: str): + print("Getting megatron model...") + model, _, _ = _setup_model_and_optimizer(model_provider, name, args=get_args()) + assert ( + len(model) == 1 + ), "correctness verification only supported with unsharded models" + model = model[0].eval().requires_grad_(False) + return model + + +def mega_forward(model, batch): + tokens, labels, loss_mask, attention_mask, position_ids = batch + assert torch.all(loss_mask) + # we need to do two forward passes to get both the logits and the loss + _, logits = out = model(tokens, position_ids, attention_mask, labels=labels) + loss, _ = loss_func(model.training, batch, out) + return logits, loss + + +def verify_step(our_forward, our_model, base_forward, base_model, batch): + our_logits, our_loss = our_forward(our_model, batch) + base_logits, base_loss = base_forward(base_model, batch) + assert ( + our_logits.size() == base_logits.size() + ), f"ours={our_logits.size()}, true={base_logits.size()}" + our_logits = our_logits.cpu() + base_logits = base_logits.cpu() + abs_error = torch.abs(our_logits - base_logits) + print( + "Max absoulute error in the logits:", + f"max={torch.max(abs_error):.6f}, avg={torch.mean(abs_error):.6f}", + ) + assert our_loss.size() == base_loss.size() + our_loss = our_loss.cpu() + base_loss = base_loss.cpu() + loss_error = torch.abs(our_loss - base_loss) + print( + f"Abs loss error: {loss_error:.6f} " + f"Our loss: {our_loss:.3f}, theirs: {base_loss:.3f}" + ) + + +def is_megatron_path(path): + path = Path(path) if isinstance(path, str) else path + return (path / "latest_checkpointed_iteration.txt").exists() + + +def main(): + # Misc initializations + print("Starting megatron vs huggingface verification") + args = get_args() + set_jit_fusion_options(args) + + # Determine if the provided weight is a megatron checkpoint or huggingface checkpoint + print("Loading our model!") + if is_megatron_path(args.load): + our_model = mega_provider(args.model_name) + our_forward = mega_forward + else: + print( + "NOTE: The given path does not look like a megatron checkpoint, " + f"assuming it's a huggingface checkpoint instead (path={args.load})" + ) + our_model = hf_our_provider( + args.model_name, args.load, "cuda:0" + ) + our_forward = hf_forward + args.iteration = 0 + + # Load baseline model + print("Loading baseline model!") + base_model = hf_provider( + args.model_name, args.cache_dir, args.baseline_device, size=args.model_size + ) + base_forward = hf_forward + + # Load dataset iterator + print("Loading dataset!") + data_iterator, _, _ = build_train_valid_test_data_iterators(data_provider, args) + + # Now we can start the verifications + for iteration in range(0, 10): + print(f"Iteration {iteration}...") + update_num_microbatches(args.consumed_train_samples) + args.curr_iteration = iteration + verify_step( + our_forward, our_model, base_forward, base_model, get_batch(data_iterator) + ) + + +def extra_extra_args(parser): + parser = extra_args(parser) + group = parser.add_argument_group(title="huggingface") + group.add_argument( + "--huggingface_cache", + type=Path, + default=None, + dest="cache_dir", + help=( + "If falcon, optional: path to huggingface cache. " + "If llama2, optional: either the huggingface cache path, or " + "the raw weight directory given by meta. " + "If llama, optional: either the path to converted huggingface weights " + "(use convert_llama_weights_to_hf.py) or the huggingface cache dir." + ), + ) + group.add_argument( + "--huggingface_device", + default="cuda:1", + dest="baseline_device", + help="Device to use for the baseline model", + ) + group.add_argument("--model_size", type=int, default=7) + return parser + + +if __name__ == "__main__": + defaults = { + "micro_batch_size": 1, + "use_checkpoint_args": True, + "train_iters": 10, + "lr": 1.0, + } + # if not is_megatron_path(parse_args(extra_extra_args).load): + # defaults.update( + # { + # "encoder_num_layers": 1, + # "hidden_size": 1, + # "num_attention_heads": 1, + # "seq_length": 2048, + # "max_position_embeddings": 2048, + # } + # ) + initialize_megatron(extra_extra_args, args_defaults=defaults) + main() diff --git a/multilinguality_megatron/weights_conversion/hf_to_megatron.py b/multilinguality_megatron/weights_conversion/hf_to_megatron.py new file mode 100644 index 0000000000000000000000000000000000000000..83a762d70397677b8f3592ba3900eb8a3b785df4 --- /dev/null +++ b/multilinguality_megatron/weights_conversion/hf_to_megatron.py @@ -0,0 +1,734 @@ +""" +Convert weights from models in other formats (primairly huggingface) to megatron checkpoints. + +This script supports converting Falcon, LLaMa and LLaMa 2 weights to megatron checkpoints. +Depending on the model to convert, the inputs might differ. +- Falcon: + Weights are automatically retrieved from the official implementation hosted in huggingface. + Thus, the `--cache-dir` argument is optional, if specified it should point to + the huggingface cache directory where the huggingface Falcon weights will be stored. + You will need to specify the `--size` argument to determine which version to download + (i.e. Falcon 7B or 40B). +- LLaMa, LLaMa 2 and CodeLlama: + Converting llama weights can be done either fetching the weights hosted + in huggingface (recommended as it is the easier method) or directly from the + weights provided by Meta. + - From Meta weights (only available for LLaMa and LLaMa 2): + You will need to specify the `--cache-dir` to the directory where the + llama weights are stored. + This will by default have the form `xB` (e.g. 7B or 70B) for llama v1, + or `llama-2-xb` (e.g. llama-2-7b) for llama v2. + - From huggingface weights: + If `--cache-dir` is not specified or the directory specified does not + contain the format expected from Meta weights, the converter will automatically + retrieve the weights from huggingface, in which case the `--cache-dir` will + have the same semantics as with Falcon. + + Note that to download llama v2 weights from huggingface, you will need to + login using `huggingface-cli login` with a huggingface account which has been + granted access to the `meta-llama/Llama-2-7b-hf` model. + + +In all cases, the megatron checkpoint will be stored in the `--out` argument. +If a huggingface is specified, the intermediate weights (i.e. the huggingface weights) +stored therein will not be removed when the conversion succeeds. +""" + +import re +import shutil +import sys +import warnings +from argparse import ArgumentParser, Namespace +from pathlib import Path +from typing import Optional + +import torch +from tqdm.auto import trange +from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer, GemmaTokenizer, GemmaForCausalLM +from utils.merge_llama import merge_llama +from utils.permute_qkv import permute_qkv + +llama_s2layer = {1:22, 7: 32, 8: 32, 13: 40, 30: 60, 34: 48, 65: 80, 70: 80} +llama_s2heads = {1:32, 7: 32, 8: 32, 13: 40, 30: 52, 34: 64, 65: 64, 70: 64} +llama_s2dense = { + 1: 5632, + 7: 11008, + 8: 14336, + 13: 13824, + 30: 17920, + 34: 22016, + 65: 22016, + 70: 28672, +} # should be (2/3)*4*d, but it isn't exaclty that +llama_s2hidden = {1: 2048, 7: 4096, 8:4096, 13: 5120, 30: 6656, 34: 8192, 65: 8192, 70: 8192} + +gemma_s2layer = {2:18, 7: 28} +gemma_s2heads = {2:8, 7: 16} +gemma_s2dense = {2: 16384, 7: 24576} +gemma_s2hidden = {2: 2048, 7: 3072} +gemma_headsize = {2: 256, 7: 256} + +def falcon_to_megatron(weights: dict, size: int) -> dict: + def permute(qkv_w): + return permute_qkv(qkv_w, dim, n_heads, n_heads_kv) + + embedding = {} + transformer = {} + if size == 7: + n_layer = 32 + dim = 4544 + n_heads = 71 + n_heads_kv = 1 + else: + n_layer = 60 + dim = 8192 + n_heads = 128 + n_heads_kv = 8 + + # weights independent of layers (i.e. token embeddings and layernorms + assert torch.allclose( + weights["lm_head.weight"], weights["transformer.word_embeddings.weight"] + ) + embedding["word_embeddings.weight"] = weights["transformer.word_embeddings.weight"] + transformer["final_layernorm.weight"] = weights["transformer.ln_f.weight"] + transformer["final_layernorm.bias"] = weights["transformer.ln_f.bias"] + + # copy weights for each transformer layer + for layer in trange(n_layer, desc="Converting weights"): + prefix1 = f"layers.{layer}" + prefix2 = f"transformer.h.{layer}" + # mlp + transformer[f"{prefix1}.mlp.dense_h_to_4h.weight"] = weights[ + f"{prefix2}.mlp.dense_h_to_4h.weight" + ] + transformer[f"{prefix1}.mlp.dense_4h_to_h.weight"] = weights[ + f"{prefix2}.mlp.dense_4h_to_h.weight" + ] + # qkv weights + transformer[f"{prefix1}.attention.query_key_value.weight"] = permute( + weights[f"{prefix2}.self_attention.query_key_value.weight"] + ) + # dense + transformer[f"{prefix1}.attention.dense.weight"] = weights[ + f"{prefix2}.self_attention.dense.weight" + ] + # falcon7 and falcon40 differ in the input layernorms + if size == 7: + transformer[f"{prefix1}.input_layernorm.weight"] = weights[ + f"{prefix2}.input_layernorm.weight" + ] + transformer[f"{prefix1}.input_layernorm.bias"] = weights[ + f"{prefix2}.input_layernorm.bias" + ] + else: + transformer[f"{prefix1}.input_layernorm.weight"] = weights[ + f"{prefix2}.ln_attn.weight" + ] + transformer[f"{prefix1}.mlp_layernorm.weight"] = weights[ + f"{prefix2}.ln_mlp.weight" + ] + transformer[f"{prefix1}.input_layernorm.bias"] = weights[ + f"{prefix2}.ln_attn.bias" + ] + transformer[f"{prefix1}.mlp_layernorm.bias"] = weights[ + f"{prefix2}.ln_mlp.bias" + ] + return {"embedding": embedding, "transformer": transformer} + + +def llama_to_megatron( + weights: dict, size: int, source: str = "meta", version: int = 1 +) -> dict: + def permute(qkv_w): + if source == "hf": + return permute_qkv(qkv_w, hidden, n_heads, n_kv_heads) + return qkv_w + + def rearrange_qkv(wq, wk, wv): + wq = torch.split(wq, n_hidden_per_head, dim=0) + wk = torch.split(wk, n_hidden_per_head, dim=0) + wv = torch.split(wv, n_hidden_per_head, dim=0) + assert len(wq) == n_heads + assert len(wk) == n_kv_heads + assert len(wv) == n_kv_heads + n_qs_per_kv = n_heads // n_kv_heads + w_qkv = [] + for i in range(n_kv_heads): + w_qkv += [wq[i * n_qs_per_kv + j] for j in range(n_qs_per_kv)] + w_qkv += [wk[i], wv[i]] + return permute(torch.concat(w_qkv)) + + # config + n_layer = llama_s2layer[size] + hidden = llama_s2hidden[size] + n_heads = llama_s2heads[size] + n_hidden_per_head = hidden // n_heads + if version==1 or size==13: + n_kv_heads=n_heads + elif size==1: + n_kv_heads=4 + else: + n_kv_heads=8 + + # weights independent of layers + embedding = {"word_embeddings.weight": weights["tok_embeddings.weight"]} + transformer = {"final_layernorm.weight": weights["norm.weight"]} + lm_head = weights["output.weight"] + + # get all the other weights + for layer in trange(n_layer, desc="Converting weights"): + prefix = f"layers.{layer}" + # identical weights + transformer[f"{prefix}.attention.dense.weight"] = weights[ + f"{prefix}.attention.wo.weight" + ] + transformer[f"{prefix}.post_attention_layernorm.weight"] = weights[ + f"{prefix}.ffn_norm.weight" + ] + transformer[f"{prefix}.input_layernorm.weight"] = weights[ + f"{prefix}.attention_norm.weight" + ] + transformer[f"{prefix}.mlp.dense_4h_to_h.weight"] = weights[ + f"{prefix}.feed_forward.w2.weight" + ] + # concatenate up, gate mlp weights + transformer[f"{prefix}.mlp.dense_h_to_4h.weight"] = torch.concat( + [ + weights[f"{prefix}.feed_forward.w3.weight"], + weights[f"{prefix}.feed_forward.w1.weight"], + ] + ) + # finally, qkv requires serious manipulation to get right + transformer[f"{prefix}.attention.query_key_value.weight"] = rearrange_qkv( + weights[f"{prefix}.attention.wq.weight"], + weights[f"{prefix}.attention.wk.weight"], + weights[f"{prefix}.attention.wv.weight"], + ) + + # release references to original weights (free mem) + del weights[f"{prefix}.feed_forward.w3.weight"] + del weights[f"{prefix}.feed_forward.w1.weight"] + del weights[f"{prefix}.attention.wq.weight"] + del weights[f"{prefix}.attention.wk.weight"] + del weights[f"{prefix}.attention.wv.weight"] + + return {"embedding": embedding, "transformer": transformer, "lm_head": lm_head} + +def gemma_to_megatron( + weights: dict, size: int, source: str = "meta", version: int = 1 +) -> dict: + def permute(qkv_w): + return permute_qkv(qkv_w, hidden, n_heads, n_kv_heads, n_hidden_per_head) + + def rearrange_qkv(wq, wk, wv): + wq = torch.split(wq, n_hidden_per_head, dim=0) + wk = torch.split(wk, n_hidden_per_head, dim=0) + wv = torch.split(wv, n_hidden_per_head, dim=0) + assert len(wq) == n_heads + assert len(wk) == n_kv_heads + assert len(wv) == n_kv_heads + n_qs_per_kv = n_heads // n_kv_heads + w_qkv = [] + for i in range(n_kv_heads): + w_qkv += [wq[i * n_qs_per_kv + j] for j in range(n_qs_per_kv)] + w_qkv += [wk[i], wv[i]] + return permute(torch.concat(w_qkv)) + + # config + n_layer = gemma_s2layer[size] + hidden = gemma_s2hidden[size] + n_heads = gemma_s2heads[size] + n_hidden_per_head = gemma_headsize[size] #hidden // n_heads + + if size==2: + n_kv_heads=1 + else: + n_kv_heads=16 + + # weights independent of layers + embedding = {"word_embeddings.weight": weights["model.embed_tokens.weight"]} + transformer = {"final_layernorm.weight": weights["model.norm.weight"]} + lm_head = weights["lm_head.weight"] + + # get all the other weights + for layer in trange(n_layer, desc="Converting weights"): + prefix = f"layers.{layer}" + prefix_old = f"model.layers.{layer}" + # identical weights + transformer[f"{prefix}.attention.dense.weight"] = weights[ + f"{prefix_old}.self_attn.o_proj.weight" + ] + transformer[f"{prefix}.post_attention_layernorm.weight"] = weights[ + f"{prefix_old}.post_attention_layernorm.weight" + ] + transformer[f"{prefix}.input_layernorm.weight"] = weights[ + f"{prefix_old}.input_layernorm.weight" + ] + transformer[f"{prefix}.mlp.dense_4h_to_h.weight"] = weights[ + f"{prefix_old}.mlp.down_proj.weight" + ] + # concatenate up, gate mlp weights + transformer[f"{prefix}.mlp.dense_h_to_4h.weight"] = torch.concat( + [ + weights[f"{prefix_old}.mlp.up_proj.weight"], + weights[f"{prefix_old}.mlp.gate_proj.weight"], + ] + ) + # finally, qkv requires serious manipulation to get right + transformer[f"{prefix}.self_attention.query_key_value.weight"] = rearrange_qkv( + weights[f"{prefix_old}.self_attn.q_proj.weight"], + weights[f"{prefix_old}.self_attn.k_proj.weight"], + weights[f"{prefix_old}.self_attn.v_proj.weight"], + ) + + # release references to original weights (free mem) + del weights[f"{prefix_old}.mlp.up_proj.weight"] + del weights[f"{prefix_old}.mlp.down_proj.weight"] + del weights[f"{prefix_old}.self_attn.q_proj.weight"] + del weights[f"{prefix_old}.self_attn.k_proj.weight"] + del weights[f"{prefix_old}.self_attn.v_proj.weight"] + + return {"embedding": embedding, "transformer": transformer, "lm_head": lm_head} + +def mistral_to_megatron(weights: dict, size: int) -> dict: + assert size == 7 + + def permute(qkv_w): + # if source == "hf": + # by default, we pull mistrals weights from huggingface + return permute_qkv(qkv_w, hidden, n_heads, n_kv_heads) + # return qkv_w + + def rearrange_qkv(wq, wk, wv): + wq = torch.split(wq, n_hidden_per_head, dim=0) + wk = torch.split(wk, n_hidden_per_head, dim=0) + wv = torch.split(wv, n_hidden_per_head, dim=0) + assert len(wq) == n_heads + assert len(wk) == n_kv_heads + assert len(wv) == n_kv_heads + n_qs_per_kv = n_heads // n_kv_heads + w_qkv = [] + for i in range(n_kv_heads): + w_qkv += [wq[i * n_qs_per_kv + j] for j in range(n_qs_per_kv)] + w_qkv += [wk[i], wv[i]] + return permute(torch.concat(w_qkv)) + + # config + if size == 7: + n_layer = 32 + hidden = 4096 + n_heads = 32 + n_kv_heads = 8 + n_hidden_per_head = hidden // n_heads + + # weights independent of layers + embedding = {"word_embeddings.weight": weights["model.embed_tokens.weight"]} + transformer = {"final_layernorm.weight": weights["model.norm.weight"]} + lm_head = weights["lm_head.weight"] + + # get all the other weights + for layer in trange(n_layer, desc="Converting weights"): + prefix = f"layers.{layer}" + hf_prefix = f"model.{prefix}" + # identical weights + transformer[f"{prefix}.attention.dense.weight"] = weights[ + f"{hf_prefix}.self_attn.o_proj.weight" + ] + transformer[f"{prefix}.post_attention_layernorm.weight"] = weights[ + f"{hf_prefix}.post_attention_layernorm.weight" + ] + transformer[f"{prefix}.input_layernorm.weight"] = weights[ + f"{hf_prefix}.input_layernorm.weight" + ] + transformer[f"{prefix}.mlp.dense_4h_to_h.weight"] = weights[ + f"{hf_prefix}.mlp.down_proj.weight" + ] + # concatenate up, gate mlp weights + transformer[f"{prefix}.mlp.dense_h_to_4h.weight"] = torch.concat( + [ + weights[f"{hf_prefix}.mlp.up_proj.weight"], # w3 + weights[f"{hf_prefix}.mlp.gate_proj.weight"], # w1 + ] + ) + # finally, qkv requires serious manipulation to get right (probably same as llama-2) + transformer[f"{prefix}.attention.query_key_value.weight"] = rearrange_qkv( + weights[f"{hf_prefix}.self_attn.q_proj.weight"], + weights[f"{hf_prefix}.self_attn.k_proj.weight"], + weights[f"{hf_prefix}.self_attn.v_proj.weight"], + ) + + # release references to original weights (free mem) + del weights[f"{hf_prefix}.mlp.up_proj.weight"] + del weights[f"{hf_prefix}.mlp.gate_proj.weight"] + del weights[f"{hf_prefix}.self_attn.q_proj.weight"] + del weights[f"{hf_prefix}.self_attn.k_proj.weight"] + del weights[f"{hf_prefix}.self_attn.v_proj.weight"] + + return {"embedding": embedding, "transformer": transformer, "lm_head": lm_head} + + +def main( + model_name: str = "falcon", + size: int = 7, + out: Optional[Path] = None, + cache_dir: Optional[Path] = None, + model_path: Optional[str] = None, +): + if out is None: + out = Path(f"falcon{size}b_megatron.pt").absolute() + + # get weights from or specified directory + if model_name == "falcon": + print("Fetching weights from huggingface") + if model_path is None: + model_path = (f"tiiuae/falcon-{size}b",) + model = AutoModelForCausalLM.from_pretrained( + model_path, trust_remote_code=True, cache_dir=cache_dir + ) + hf_weights = model.state_dict() + elif model_name == "mistral": + print("Fetching weights from huggingface") + if model_path is None: + model_path = "mistralai/Mistral-7B-v0.1" + model = AutoModelForCausalLM.from_pretrained( + model_path, trust_remote_code=True, cache_dir=cache_dir + ) + hf_weights = model.state_dict() + elif model_name == 'gemma': + print("Fetching weights from huggingface") + if size == 2: + model_path = "google/gemma-2b" + elif size == 7: + model_path = "google/gemma-7b" + model = GemmaForCausalLM.from_pretrained( + model_path, trust_remote_code=True, cache_dir=cache_dir + ) + hf_weights = model.state_dict() + else: + print("Getting llama...") + version = 2 if "2" in model_name else 1 + hf_weights, llama_source = merge_llama( + size, version, root_dir=cache_dir, model_path=model_path + ) + + # convert state dict to be megatron-compatible + if model_name == "falcon": + megatron_weights = falcon_to_megatron(hf_weights, size) + elif model_name == "mistral": + megatron_weights = mistral_to_megatron(hf_weights, size) + elif model_name == "gemma": + megatron_weights = gemma_to_megatron(hf_weights, size) + else: + megatron_weights = llama_to_megatron( + hf_weights, size, llama_source, version=1 if model_name == "llama" else 2 + ) + + # save converted weights in specified out + (out / "release" / "mp_rank_00").mkdir(parents=True) + + if model_name in {"llama", "llama2", "llama3"} and llama_source == "hf": + tokenizer = None + if model_path is not None: + try: + if model_name == "llama2": + tokenizer = LlamaTokenizer.from_pretrained( + model_path, cache_dir=cache_dir) + else: + tokenizer = AutoTokenizer.from_pretrained( + model_path, cache_dir=cache_dir) + except OSError: + warnings.warn( + f"Model path {model_path} does not have a " + "tokenizer, using default tokenizer instead" + ) + if tokenizer is None: + if model_name == "llama2": + name = "meta-llama/Llama-2-7b-hf" + elif model_name == "llama3": + name = "meta-llama/Llama-3-8b" + else: + name = "decapoda-research/llama-7b-hf" + tokenizer = LlamaTokenizer.from_pretrained(name, cache_dir=cache_dir) + + if model_name!="llama3": + token_path = out / "tokenizer.model" + vocab_file = tokenizer.vocab_file + shutil.copy(vocab_file, token_path) + print("Saved tokenizer.model in", token_path) + elif model_name == "mistral": + tokenizer = None + if model_path is not None: + try: + tokenizer = LlamaTokenizer.from_pretrained( + model_path, cache_dir=cache_dir + ) + except OSError: + warnings.warn( + f"Model path {model_path} does not have a " + "tokenizer, using default tokenizer instead" + ) + if tokenizer is None: + tokenizer = LlamaTokenizer.from_pretrained( + "mistralai/Mistral-7B-v0.1", cache_dir=cache_dir + ) + token_path = out / "tokenizer.model" + vocab_file = tokenizer.vocab_file + shutil.copy(vocab_file, token_path) + print("Saved tokenizer.model in", token_path) + elif model_name == "gemma": + tokenizer = None + if model_path is not None: + try: + tokenizer = GemmaTokenizer.from_pretrained( + model_path, cache_dir=cache_dir + ) + except OSError: + warnings.warn( + f"Model path {model_path} does not have a " + "tokenizer, using default tokenizer instead" + ) + if tokenizer is None: + tokenizer = GemmaTokenizer.from_pretrained( + "google/gemma-2b", cache_dir=cache_dir + ) + token_path = out / "tokenizer.model" + vocab_file = tokenizer.vocab_file + shutil.copy(vocab_file, token_path) + print("Saved tokenizer.model in", token_path) + + # set args + dtype = megatron_weights["embedding"]["word_embeddings.weight"].dtype + if model_name == "falcon": + if size == 7: + args = { + "num_layers": 32, + "hidden_size": 4544, + "num_attention_heads": 71, + "num_attention_heads_kv": 1, + } + else: + args = { + "num_layers": 60, + "hidden_size": 8192, + "num_attention_heads": 128, + "num_attention_heads_kv": 8, + "parallel_layernorm": True, + } + args.update( + { + "tokenizer_type": "FalconTokenizer", + "use_flash_attn": True, + "hidden_dropout": 0.0, + "parallel_attn": True, + "max_position_embeddings": 2048, + "seq_length": 2048, + } + ) + elif model_name == "mistral": + assert size == 7 + # mistral-7b mostly uses the same args as llama-7b + # https://huggingface.co/mistralai/Mistral-7B-v0.1/blob/main/config.json + args = { + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "num_attention_heads_kv": 8, # except this - GroupedAttention + "ffn_hidden_size": 14336, # except this + "parallel_attn": False, + "make_vocab_size_divisible_by": 128, + "glu_activation": "swiglu", # == silu + "padded_vocab_size": 32000, + "use_rms_norm": True, + "tie_embed_logits": False, + "tokenizer_type": "SentencePieceTokenizer", + "max_position_embeddings": 32768, + "seq_length": 32768, + "layernorm_epsilon": 1e-5, + "rope_theta": 10000.0, + "sliding_window_size": 4096, + } + elif model_name == "gemma": + args = { + "num_layers": gemma_s2layer[size], + "hidden_size": gemma_s2hidden[size], + "num_attention_heads": gemma_s2heads[size], + "ffn_hidden_size": gemma_s2dense[size], + "head_dim": gemma_headsize[size], + "parallel_attn": False, + "make_vocab_size_divisible_by": 128, + "glu_activation": "geglu", + "padded_vocab_size": 256000, + "use_rms_norm": True, + "tie_embed_logits": True, + "tokenizer_type": "SentencePieceTokenizer", + "max_position_embeddings": 8192, + "seq_length": 8192, + "layernorm_epsilon": 1e-6, + "rope_theta": 10000.0, + "sliding_window_size": 4096, + } + if size==2: + args.update( + { + "num_attention_heads_kv": 1, + } + ) + elif size==7: + args.update( + { + "num_attention_heads_kv": 16, + } + ) + + else: # llama1, llama2, codellama + make_vocab_size_divisible_by = 64 + padded_vocab_size = 37005 + # if len(tokenizer) % make_vocab_size_divisible_by == 0: + # padded_vocab_size = len(tokenizer) + # else: + # padded_vocab_size = len(tokenizer) + (make_vocab_size_divisible_by - (len(tokenizer) % make_vocab_size_divisible_by)) + args = { + "num_layers": llama_s2layer[size], + "hidden_size": llama_s2hidden[size], + "num_attention_heads": llama_s2heads[size], + "ffn_hidden_size": llama_s2dense[size], + "parallel_attn": False, + "make_vocab_size_divisible_by": make_vocab_size_divisible_by, # changed to accommodate for extend32 + "glu_activation": "swiglu", + "padded_vocab_size": padded_vocab_size, + "use_rms_norm": True, + "tie_embed_logits": False, + "tokenizer_type": "SentencePieceTokenizer", + } + if model_name == "llama": + args.update( + { + "max_position_embeddings": 2048, + "seq_length": 2048, + "layernorm_epsilon": 1e-6, + } + ) + elif model_name == "llama2": + if size==1: + args.update( + { + "num_attention_heads_kv": 4, + "max_position_embeddings": 2048, + "seq_length": 2048, + "layernorm_epsilon": 1e-5, + } + ) + + args.update( + { + "max_position_embeddings": 4096, + "seq_length": 4096, + "layernorm_epsilon": 1e-5, + } + ) + if size >= 34: + args.update({"num_attention_heads_kv": 8}) + elif model_name == "llama3": + args.update( + { + "num_attention_heads_kv": 8, + "max_position_embeddings": 8192, + "seq_length": 8192, + "layernorm_epsilon": 1e-6, + "rope_theta": 500000, + "padded_vocab_size": 128256 + } + ) + elif model_name == "codellama": + args.update( + { + "max_position_embeddings": 16384, + "seq_length": 16384, + "layernorm_epsilon": 1e-5, + "rope_theta": 1e6, + } + ) + if size >= 34: + args.update({"num_attention_heads_kv": 8}) + if size < 34 and not re.match(r"CodeLlama-\d+b-Python", cache_dir): + args.update({"padded_vocab_size": 32016}) + else: + sys.exit( + f"Model name has to be llama, llama2 or codellama, not {model_name}." + ) + + args.update( + { + "tensor_model_parallel_size": 1, + "pipeline_model_parallel_size": 1, + "iteration": "release", + "bias_gelu_fusion": False, + "bias_droput_fusion": False, + "position_embedding_type": "rotary", + } + ) + + with open(out / "latest_checkpointed_iteration.txt", "w+") as f: + f.write("release") + final_dict = { + "iteration": "release", + "model": {"language_model": megatron_weights}, + "checkpoint_version": 3.0, + "args": Namespace(**args), + } + torch.save(final_dict, out / "release" / "mp_rank_00" / "model_optim_rng.pt") + print("Saved weights in", out) + + print("Done") + + +if __name__ == "__main__": + parser = ArgumentParser( + description="Convert Huggingface llama or falcon weights to " + "megatron-compatible weights" + ) + parser.add_argument( + "model", choices={"falcon", "llama", "llama2", "llama3", "codellama", "mistral", "gemma"} + ) + parser.add_argument( + "--size", + default=7, + choices={1, 2, 7, 8, 13, 30, 34, 40, 65, 70}, + type=int, + help="The size of the model", + ) + parser.add_argument( + "--out", + type=Path, + help="Directory to store the megatron weights (as checkpoint)", + ) + parser.add_argument( + "--model-path", + help="Sets model_name_or_path when fetching weights from huggingface", + ) + parser.add_argument( + "--cache-dir", + type=Path, + help=( + "Directory to use as cache for the huggingface " + "weights, or in case of the llama model, the path " + "of the weights privided Meta" + ), + ) + args = parser.parse_args() + + # small arg verification + if args.model == "falcon": + assert args.size in {7, 40} + elif args.model == "llama": + assert args.size in {7, 13, 30, 65} + elif args.model == "codellama": + assert args.size in {7, 13, 34} + elif args.model == "mistral": + assert args.size in {7} + elif args.model == "gemma": + assert args.size in {2, 7} + else: + assert args.size in {1, 7, 8, 13, 70} + + main(args.model, args.size, args.out, args.cache_dir, args.model_path) diff --git a/multilinguality_megatron/weights_conversion/megatron_to_hf.py b/multilinguality_megatron/weights_conversion/megatron_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..d47632d496748ea0f148036e3197084ea7ea6b37 --- /dev/null +++ b/multilinguality_megatron/weights_conversion/megatron_to_hf.py @@ -0,0 +1,896 @@ +""" +Convert megatron checkpoints to huggingface weights. + +This script will also convert the tokenizer configured. +Set the `--input_dir` to the megatron checkpoint root (i.e. where the +`latest_checkpointed_iteration.txt` file is located) and `--output_dir` to +the directory where the huggingface weights should be stored. +""" + +# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import json +import os +import sys +import warnings +from argparse import ArgumentParser, Namespace +from pathlib import Path +from tempfile import TemporaryDirectory + +sys.path.append(str(Path(__file__).parent.parent.absolute())) # megatron is importable + +import torch +from tqdm.auto import trange +from transformers import ( + AutoTokenizer, + FalconConfig, + FalconForCausalLM, + LlamaConfig, + LlamaForCausalLM, + LlamaTokenizerFast, + MistralConfig, + MistralForCausalLM, + GemmaConfig, + GemmaForCausalLM, + GemmaTokenizerFast, +) +from utils.permute_qkv import permute_qkv + +from megatron.tokenizer import build_tokenizer + + +def write_json(text, path): + with open(path, "w") as f: + json.dump(text, f) + + +def convert_wqkv( + llama_mega, layer_idx=0, n_heads=32, n_heads_kv=8, head_size: int = None +): + qkv_w = llama_mega["transformer"][ + f"layers.{layer_idx}.attention.query_key_value.weight" + ] + n_hidden = qkv_w.size(1) + if head_size is None: + hidden_dim = n_hidden // n_heads + else: + hidden_dim = head_size + qkv_w = permute_qkv( + qkv_w, n_hidden, n_heads, n_heads_kv, n_hidden_per_head=head_size, revert=True + ) + + n_qs_per_kv = n_heads // n_heads_kv + n_groups = qkv_w.size(0) // hidden_dim // (n_qs_per_kv + 2) + qkv_w = list(torch.split(qkv_w, hidden_dim, dim=0)) + + wq, wk, wv = [], [], [] + for group in range(n_groups): + for qs in range(n_qs_per_kv): + wq.append(qkv_w[0]) + del qkv_w[0] + wk.append(qkv_w[0]) + del qkv_w[0] + wv.append(qkv_w[0]) + del qkv_w[0] + assert len(qkv_w) == 0 + + wq = torch.concat(wq, dim=0) + wk = torch.concat(wk, dim=0) + wv = torch.concat(wv, dim=0) + return wq, wk, wv + + +def convert_ffn(llama_mega, layer_idx=0, n_dense=11008): + mega_ffn = llama_mega["transformer"][f"layers.{layer_idx}.mlp.dense_h_to_4h.weight"] + ffn_w3, ffn_w1 = mega_ffn.split(n_dense, dim=0) + return ffn_w1, ffn_w3 + + +def write_llama_model( + model_path, + input_base_path, + num_output_shards: int = 2, + norm_eps: float = 1e-05, + rope_theta: float = 1e4, +): + # Preliminaries + print(f"Fetching all parameters from the checkpoint at {input_base_path}.") + os.makedirs(model_path, exist_ok=True) + with open(os.path.join(input_base_path, "latest_checkpointed_iteration.txt")) as f: + iteration = f.read() + if iteration != "release": + iteration = f"iter_{int(iteration):07d}" + print(f"Fetching iteration {iteration}") + + # Load weights + base_path = Path(input_base_path) / iteration + assert ( + len(list(base_path.glob("mp_rank_*"))) == 1 + ), "Unshard your model with checkpoint_util.py first!" + loaded = torch.load( + base_path / "mp_rank_00" / "model_optim_rng.pt", map_location="cpu" + ) + args = loaded["args"] + + loaded = loaded["model"]["language_model"] + if "transformer" not in loaded: # normalize key names + loaded["transformer"] = loaded.pop("encoder") + for key in list(loaded["transformer"].keys()): + loaded["transformer"][key.replace("self_attention", "attention")] = loaded[ + "transformer" + ].pop(key) + loaded["embedding"]["word_embeddings.weight"] = loaded["embedding"].pop( + "word_embeddings" + )["weight"] + args.num_layers = args.encoder_num_layers + + # Load arguments + n_layers = args.num_layers + n_heads = args.num_attention_heads + n_heads_kv = getattr(args, "num_attention_heads_kv", n_heads) + n_dense = args.ffn_hidden_size + n_hidden = args.hidden_size + hidden_per_head = n_hidden // n_heads + intermediate_size = args.ffn_hidden_size + inv_freq = 1.0 / ( + rope_theta ** (torch.arange(0, hidden_per_head, 2).float() / hidden_per_head) + ) + + print("Llama-Megatron Loaded!") + param_count = 0 + index_dict = {"weight_map": {}} + + # Start conversion + with TemporaryDirectory() as tmp_model_path: + print(f"Weighted Converting for {n_layers} layers...") + for layer_i in range(n_layers): + filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" + wq_proj, wk_proj, wv_proj = convert_wqkv( + llama_mega=loaded, + layer_idx=layer_i, + n_heads=n_heads, + n_heads_kv=n_heads_kv, + ) + ffn_w1, ffn_w3 = convert_ffn( + llama_mega=loaded, layer_idx=layer_i, n_dense=n_dense + ) + state_dict = { + f"model.layers.{layer_i}.self_attn.q_proj.weight": wq_proj, + f"model.layers.{layer_i}.self_attn.k_proj.weight": wk_proj, + f"model.layers.{layer_i}.self_attn.v_proj.weight": wv_proj, + f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[ + "transformer" + ][f"layers.{layer_i}.attention.dense.weight"], + f"model.layers.{layer_i}.mlp.gate_proj.weight": ffn_w1, + f"model.layers.{layer_i}.mlp.down_proj.weight": loaded["transformer"][ + f"layers.{layer_i}.mlp.dense_4h_to_h.weight" + ], + f"model.layers.{layer_i}.mlp.up_proj.weight": ffn_w3, + f"model.layers.{layer_i}.input_layernorm.weight": loaded["transformer"][ + f"layers.{layer_i}.input_layernorm.weight" + ], + f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[ + "transformer" + ][f"layers.{layer_i}.post_attention_layernorm.weight"], + f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq": inv_freq, + } + + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + print(f"Sharded file saved to {filename}") + + filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" + state_dict = { + "model.norm.weight": loaded["transformer"]["final_layernorm.weight"], + "lm_head.weight": loaded["lm_head"], + "model.embed_tokens.weight": loaded["embedding"]["word_embeddings.weight"], + } + + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch_dtype = state_dict["lm_head.weight"].dtype + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + print(f"Sharded file saved to {filename}") + + # Write configs and save + index_dict["metadata"] = {"total_size": param_count * 2} + write_json( + index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json") + ) + config = LlamaConfig( + vocab_size=args.padded_vocab_size, + hidden_size=n_hidden, + intermediate_size=intermediate_size, + num_attention_heads=n_heads, + num_hidden_layers=n_layers, + rms_norm_eps=norm_eps, + num_key_value_heads=n_heads_kv, + max_position_embeddings=args.seq_length, + ) + config.save_pretrained(tmp_model_path) + + # Make space so we can load the model properly now. + del state_dict + del loaded + gc.collect() + + print("Loading the checkpoint in a Llama model...") + model = LlamaForCausalLM.from_pretrained( + tmp_model_path, torch_dtype=torch_dtype + ) + # Avoid saving this as part of the config. + del model.config._name_or_path + + print("Saving in the Transformers format.") + max_num_params_per_shard = param_count * 2 // max(1, (num_output_shards - 1)) + model.save_pretrained( + model_path, max_shard_size=max_num_params_per_shard, safe_serialization=False + ) + + +def write_gemma_model( + model_path, + input_base_path, + num_output_shards: int = 2, + norm_eps: float = 1e-06, + rope_theta: float = 1e4, +): + # Preliminaries + print(f"Fetching all parameters from the checkpoint at {input_base_path}.") + os.makedirs(model_path, exist_ok=True) + with open(os.path.join(input_base_path, "latest_checkpointed_iteration.txt")) as f: + iteration = f.read() + if iteration != "release": + iteration = f"iter_{int(iteration):07d}" + print(f"Fetching iteration {iteration}") + + # Load weights + base_path = Path(input_base_path) / iteration + assert ( + len(list(base_path.glob("mp_rank_*"))) == 1 + ), "Unshard your model with checkpoint_util.py first!" + loaded = torch.load( + base_path / "mp_rank_00" / "model_optim_rng.pt", map_location="cpu" + ) + args = loaded["args"] + + loaded = loaded["model"]["language_model"] + if "transformer" not in loaded: # normalize key names + loaded["transformer"] = loaded.pop("encoder") + for key in list(loaded["transformer"].keys()): + loaded["transformer"][key.replace("self_attention", "attention")] = loaded[ + "transformer" + ].pop(key) + loaded["embedding"]["word_embeddings.weight"] = loaded["embedding"].pop( + "word_embeddings" + )["weight"] + args.num_layers = args.encoder_num_layers + + # Load arguments + n_layers = args.num_layers + n_heads = args.num_attention_heads + n_heads_kv = getattr(args, "num_attention_heads_kv", n_heads) + n_dense = args.ffn_hidden_size + n_hidden = args.hidden_size + hidden_per_head = n_hidden // n_heads + intermediate_size = args.ffn_hidden_size + inv_freq = 1.0 / ( + rope_theta ** (torch.arange(0, hidden_per_head, 2).float() / hidden_per_head) + ) + + print("Gemma-Megatron Loaded!") + param_count = 0 + index_dict = {"weight_map": {}} + + # Start conversion + gemma_headsize = 256 + with TemporaryDirectory() as tmp_model_path: + print(f"Weighted Converting for {n_layers} layers...") + for layer_i in range(n_layers): + filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" + wq_proj, wk_proj, wv_proj = convert_wqkv( + llama_mega=loaded, + layer_idx=layer_i, + n_heads=n_heads, + n_heads_kv=n_heads_kv, + head_size=gemma_headsize, + ) + ffn_w1, ffn_w3 = convert_ffn( + llama_mega=loaded, layer_idx=layer_i, n_dense=n_dense + ) + state_dict = { + f"model.layers.{layer_i}.self_attn.q_proj.weight": wq_proj, + f"model.layers.{layer_i}.self_attn.k_proj.weight": wk_proj, + f"model.layers.{layer_i}.self_attn.v_proj.weight": wv_proj, + f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[ + "transformer" + ][f"layers.{layer_i}.attention.dense.weight"], + f"model.layers.{layer_i}.mlp.gate_proj.weight": ffn_w1, + f"model.layers.{layer_i}.mlp.down_proj.weight": loaded["transformer"][ + f"layers.{layer_i}.mlp.dense_4h_to_h.weight" + ], + f"model.layers.{layer_i}.mlp.up_proj.weight": ffn_w3, + f"model.layers.{layer_i}.input_layernorm.weight": loaded["transformer"][ + f"layers.{layer_i}.input_layernorm.weight" + ], + f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[ + "transformer" + ][f"layers.{layer_i}.post_attention_layernorm.weight"], + f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq": inv_freq, + } + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + print(f"Sharded file saved to {filename}") + + filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" + state_dict = { + "model.norm.weight": loaded["transformer"]["final_layernorm.weight"], + # "lm_head.weight": loaded["lm_head"], + "model.embed_tokens.weight": loaded["embedding"]["word_embeddings.weight"], + } + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch_dtype = state_dict["model.norm.weight"].dtype + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + print(f"Sharded file saved to {filename}") + + # Write configs and save + index_dict["metadata"] = {"total_size": param_count * 2} + write_json( + index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json") + ) + config = GemmaConfig( + vocab_size=args.padded_vocab_size, + hidden_size=n_hidden, + intermediate_size=intermediate_size, + num_attention_heads=n_heads, + num_hidden_layers=n_layers, + rms_norm_eps=norm_eps, + num_key_value_heads=n_heads_kv, + max_position_embeddings=args.seq_length, + ) + config.save_pretrained(tmp_model_path) + + # Make space so we can load the model properly now. + del state_dict + del loaded + gc.collect() + + print("Loading the checkpoint in a Gemma model...") + model = GemmaForCausalLM.from_pretrained( + tmp_model_path, torch_dtype=torch_dtype + ) + # Avoid saving this as part of the config. + del model.config._name_or_path + print(model) + + print("Saving in the Transformers format.") + max_num_params_per_shard = param_count * 2 // max(1, (num_output_shards - 1)) + model.save_pretrained( + model_path, max_shard_size=max_num_params_per_shard, safe_serialization=False + ) + + +def write_mistral_model( + model_path, + input_base_path, + num_output_shards: int = 2, + norm_eps: float = 1e-5, + rope_theta: float = 10000.0, + vocab_size: int = None, +): + # Preliminaries + print(f"Fetching all parameters from the checkpoint at {input_base_path}.") + os.makedirs(model_path, exist_ok=True) + with open(os.path.join(input_base_path, "latest_checkpointed_iteration.txt")) as f: + iteration = f.read() + if iteration != "release": + iteration = f"iter_{int(iteration):07d}" + print(f"Fetching iteration {iteration}") + + # Load weights + base_path = Path(input_base_path) / iteration + assert ( + len(list(base_path.glob("mp_rank_*"))) == 1 + ), "Unshard your model with checkpoint_util.py first!" + loaded = torch.load( + base_path / "mp_rank_00" / "model_optim_rng.pt", map_location="cpu" + ) + args = loaded["args"] + + loaded = loaded["model"]["language_model"] + if "transformer" not in loaded: # normalize key names + loaded["transformer"] = loaded.pop("encoder") + for key in list(loaded["transformer"].keys()): + loaded["transformer"][key.replace("self_attention", "attention")] = loaded[ + "transformer" + ].pop(key) + loaded["embedding"]["word_embeddings.weight"] = loaded["embedding"].pop( + "word_embeddings" + )["weight"] + args.num_layers = args.encoder_num_layers + + # Load arguments + n_layers = args.num_layers + n_heads = args.num_attention_heads + n_heads_kv = getattr(args, "num_attention_heads_kv", n_heads) + n_dense = args.ffn_hidden_size + n_hidden = args.hidden_size + hidden_per_head = n_hidden // n_heads + intermediate_size = args.ffn_hidden_size + inv_freq = 1.0 / ( + rope_theta ** (torch.arange(0, hidden_per_head, 2).float() / hidden_per_head) + ) + + print("Mistral-Megatron Loaded!") + param_count = 0 + index_dict = {"weight_map": {}} + + # Start conversion + with TemporaryDirectory() as tmp_model_path: + print(f"Weighted Converting for {n_layers} layers...") + for layer_i in range(n_layers): + filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" + wq_proj, wk_proj, wv_proj = convert_wqkv( + llama_mega=loaded, + layer_idx=layer_i, + n_heads=n_heads, + n_heads_kv=n_heads_kv, + ) + ffn_w1, ffn_w3 = convert_ffn( + llama_mega=loaded, layer_idx=layer_i, n_dense=n_dense + ) + state_dict = { + f"model.layers.{layer_i}.self_attn.q_proj.weight": wq_proj, + f"model.layers.{layer_i}.self_attn.k_proj.weight": wk_proj, + f"model.layers.{layer_i}.self_attn.v_proj.weight": wv_proj, + f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[ + "transformer" + ][f"layers.{layer_i}.attention.dense.weight"], + f"model.layers.{layer_i}.mlp.gate_proj.weight": ffn_w1, + f"model.layers.{layer_i}.mlp.down_proj.weight": loaded["transformer"][ + f"layers.{layer_i}.mlp.dense_4h_to_h.weight" + ], + f"model.layers.{layer_i}.mlp.up_proj.weight": ffn_w3, + f"model.layers.{layer_i}.input_layernorm.weight": loaded["transformer"][ + f"layers.{layer_i}.input_layernorm.weight" + ], + f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[ + "transformer" + ][f"layers.{layer_i}.post_attention_layernorm.weight"], + f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq": inv_freq, + } + + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + print(f"Sharded file saved to {filename}") + + filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" + state_dict = { + "model.norm.weight": loaded["transformer"]["final_layernorm.weight"], + "lm_head.weight": loaded["lm_head"], + "model.embed_tokens.weight": loaded["embedding"]["word_embeddings.weight"], + } + + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch_dtype = state_dict["lm_head.weight"].dtype + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + print(f"Sharded file saved to {filename}") + + # Write configs and save + index_dict["metadata"] = {"total_size": param_count * 2} + write_json( + index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json") + ) + + # load mistral config from huggingface + config = MistralConfig.from_pretrained("mistralai/Mistral-7B-v0.1") + # assert configuration matches + assert config.hidden_size == n_hidden + assert config.intermediate_size == intermediate_size + assert config.num_attention_heads == n_heads + assert config.num_hidden_layers == n_layers + assert config.rms_norm_eps == norm_eps + assert config.num_key_value_heads == n_heads_kv + # Set vocab size + config.vocab_size = args.padded_vocab_size + config.save_pretrained(tmp_model_path) + + # Make space so we can load the model properly now. + del state_dict + del loaded + gc.collect() + + if vocab_size is None: + vocab_size = args.padded_vocab_size + else: + print( + f"Using vocab size {vocab_size} from tokenizer and not {args.padded_vocab_size} from args." + ) + # update config + config.vocab_size = vocab_size + + print("Loading the checkpoint in a Llama model...") + model = MistralForCausalLM.from_pretrained( + tmp_model_path, torch_dtype=torch_dtype + ) + model.config.vocab_size = vocab_size + # resizes the embedding layer to the correct size + model.resize_token_embeddings(vocab_size) + # Avoid saving this as part of the config. + del model.config._name_or_path + + print("Saving in the Transformers format.") + max_num_params_per_shard = param_count * 2 // max(1, (num_output_shards - 1)) + model.save_pretrained(model_path, max_shard_size=max_num_params_per_shard) + + +def write_falcon_model( + model_path: str, + input_base_path: str, + num_output_shards: int = 2, + safe_serialization: bool = True, +): + # Preliminaries + print(f"Fetching all parameters from the checkpoint at {input_base_path}.") + input_base_path = Path(input_base_path) + iteration = (input_base_path / "latest_checkpointed_iteration.txt").read_text() + if iteration != "release": + iteration = f"iter_{int(iteration):07d}" + print(f"Fetching iteration {iteration}") + + # Load weights + loaded = torch.load( + input_base_path / iteration / "mp_rank_00" / "model_optim_rng.pt", + map_location="cpu", + ) + args = loaded["args"] + loaded = loaded["model"]["language_model"] + + if "transformer" not in loaded: # normalize key names + loaded["transformer"] = loaded.pop("encoder") + loaded["embedding"]["word_embeddings.weight"] = loaded["embedding"].pop( + "word_embeddings" + )["weight"] + args.num_layers = args.encoder_num_layers + + # Make sure the self_attention layer is called "attention" in the megatron state dict + for key in list(loaded["transformer"].keys()): + loaded["transformer"][key.replace("self_attention", "attention")] = loaded[ + "transformer" + ].pop(key) + + embedding = loaded["embedding"] + transformer = loaded["transformer"] + + # Load arguments + n_layers = args.num_layers + dim = args.hidden_size + n_heads = args.num_attention_heads + n_heads_kv = args.num_attention_heads_kv + + def permute(qkv_w): + return permute_qkv(qkv_w, dim, n_heads, n_heads_kv, revert=True) + + weights = {} + + # weights independent of layers (i.e. token embeddings and layernorms + weights["transformer.word_embeddings.weight"] = embedding["word_embeddings.weight"] + weights["lm_head.weight"] = weights["transformer.word_embeddings.weight"] + weights["transformer.ln_f.weight"] = transformer["final_layernorm.weight"] + weights["transformer.ln_f.bias"] = transformer["final_layernorm.bias"] + + # copy weights for each transformer layer + for layer in trange(n_layers, desc="Converting weights"): + prefix1 = f"layers.{layer}" + prefix2 = f"transformer.h.{layer}" + # mlp + weights[f"{prefix2}.mlp.dense_h_to_4h.weight"] = transformer[ + f"{prefix1}.mlp.dense_h_to_4h.weight" + ] + weights[f"{prefix2}.mlp.dense_4h_to_h.weight"] = transformer[ + f"{prefix1}.mlp.dense_4h_to_h.weight" + ] + + # qkv weights + weights[f"{prefix2}.self_attention.query_key_value.weight"] = permute( + transformer[f"{prefix1}.attention.query_key_value.weight"] + ) + + # dense + weights[f"{prefix2}.self_attention.dense.weight"] = transformer[ + f"{prefix1}.attention.dense.weight" + ] + + # falcon7 and falcon40 differ in the input layernorms + if n_layers <= 32: # 7B model + weights[f"{prefix2}.input_layernorm.weight"] = transformer[ + f"{prefix1}.input_layernorm.weight" + ] + weights[f"{prefix2}.input_layernorm.bias"] = transformer[ + f"{prefix1}.input_layernorm.bias" + ] + else: + weights[f"{prefix2}.ln_attn.weight"] = transformer[ + f"{prefix1}.input_layernorm.weight" + ] + weights[f"{prefix2}.ln_mlp.weight"] = transformer[ + f"{prefix1}.mlp_layernorm.weight" + ] + weights[f"{prefix2}.ln_attn.bias"] = transformer[ + f"{prefix1}.input_layernorm.bias" + ] + weights[f"{prefix2}.ln_mlp.bias"] = transformer[ + f"{prefix1}.mlp_layernorm.bias" + ] + + print("Falcon-Megatron Loaded!") + + vocab_size = 65024 # default size for falcon + if "padded_vocab_size" in args: + vocab_size = args.padded_vocab_size + + # creating HF falcon model + config = FalconConfig( + vocab_size=vocab_size, + hidden_size=args.hidden_size, + num_hidden_layers=args.num_layers, + num_attention_heads=args.num_attention_heads, + num_kv_heads=( + None if args.num_attention_heads_kv == 1 else args.num_attention_heads_kv + ), + new_decoder_architecture=args.num_layers >= 60, + ) + + print("Creating FalconForCausalLM") + model = FalconForCausalLM(config=config) + torch_dtype = weights["lm_head.weight"].dtype + print(f"dtype: {torch_dtype}") + print("Loading state dict...") + model.to(torch_dtype) # convert model to soucre dtype + model.load_state_dict(weights) + print("Done!") + + param_count = 0 + for v in weights.values(): + param_count += v.numel() + print(f"param_count: {param_count:,}") + + # write model + print(f"Saving in the Transformers format to: {model_path} ({torch_dtype})") + bits_per_param = torch.finfo(torch_dtype).bits + max_shard_size = param_count * bits_per_param // num_output_shards // 8 + print(f"max_shard_size: {max_shard_size:,} bytes") + model.save_pretrained( + model_path, + max_shard_size=max_shard_size, + safe_serialization=safe_serialization, + ) + + +def write_tokenizer(args: Namespace): + if args.model in {"llama", "llama2", "codellama", "mistral", "gemma"}: + # mistral also use LlamaTokenizerFast + args.tokenizer_type = "SentencePieceTokenizer" + if args.vocab_file: + # prevent "single file or url is deprecated and won't be possible anymore in v5" warning, + # use parent directory instead + p = Path(args.vocab_file) + if p.suffix == ".model": + p = p.parent + if args.model == "gemma": + hf_tokenizer = GemmaTokenizerFast.from_pretrained(p) + else: + hf_tokenizer = LlamaTokenizerFast.from_pretrained(p) + args.vocab_file = hf_tokenizer.vocab_file + else: + if args.model == "codellama": + hf_repo_name = "TheBloke/CodeLlama-13B-fp16" + elif args.model == "mistral": + hf_repo_name = "mistralai/Mistral-7B-v0.1" + else: + hf_repo_name = "meta-llama/Llama-2-7b-hf" + try: # try loading from huggingface + hf_tokenizer = LlamaTokenizerFast.from_pretrained( + hf_repo_name, cache_dir=args.cache_dir + ) + print("LlamaTokenizerFast loaded from huggingface") + print( + "vocab_file not set, assuming same tokenizer.model used " + "by llama LlamaTokenizerFast" + ) + args.vocab_file = hf_tokenizer.vocab_file + except OSError: + print( + f"ERROR: Could not load tokenizer from HF repo '{hf_repo_name}'. " + "Tokenizer processing failed." + ) + return + elif args.model == "llama3": + args.tokenizer_type = "PretrainedFromHF" + hf_tokenizer = AutoTokenizer.from_pretrained( + "meta-llama/Meta-Llama-3-8B", cache_dir=args.cache_dir + ) + else: + hf_tokenizer = AutoTokenizer.from_pretrained( + "tiiuae/falcon-40b", cache_dir=args.cache_dir + ) + args.tokenizer_type = "FalconTokenizer" + + # add default args for megatron tokenizer + args.rank = 0 + args.vocab_extra_ids = 0 + args.new_tokens = True + args.make_vocab_size_divisible_by = 1 + args.tensor_model_parallel_size = 1 + mt_tokenizer = build_tokenizer(args) + + if args.tokenizer_type == "SentencePieceTokenizer": + pass + # if mt_tokenizer.cls is not None: + # hf_tokenizer.add_tokens("", special_tokens=True) + # hf_tokenizer.cls_token_id = mt_tokenizer.cls + # if mt_tokenizer.sep is not None: + # hf_tokenizer.add_tokens("", special_tokens=True) + # hf_tokenizer.sep_token_id = mt_tokenizer.sep + # if mt_tokenizer.eod is not None: + # hf_tokenizer.add_tokens("", special_tokens=True) + # if mt_tokenizer.mask is not None: + # hf_tokenizer.add_tokens("", special_tokens=True) + # hf_tokenizer.mask_token_id = mt_tokenizer.mask + # if mt_tokenizer.pad is not None: + # hf_tokenizer.add_tokens("", special_tokens=True) + # hf_tokenizer.pad_token_id = mt_tokenizer.pad + + # additional_special_tokens = hf_tokenizer.additional_special_tokens + # special_tokens = {"additional_special_tokens": additional_special_tokens} + # if args.vocab_extra_ids_list: + # additional_special_tokens.extend(args.vocab_extra_ids_list.split(",")) + + # hf_tokenizer.add_special_tokens( + # special_tokens_dict=special_tokens, replace_additional_special_tokens=True + # ) + + # additional_special_tokens_ids = [ + # mt_tokenizer.vocab.get(t) for t in additional_special_tokens + # ] + # hf_tokenizer.additional_special_tokens_ids = additional_special_tokens_ids + + # hf_vocab = hf_tokenizer.get_vocab() + # tokens_to_check = [ + # v + # for k, v in hf_tokenizer.special_tokens_map.items() + # if k != "additional_special_tokens" + # ] + additional_special_tokens + # for t in tokens_to_check: + # a = mt_tokenizer.vocab.get(t) + # b = hf_vocab.get(t) + # assert ( + # a == b + # ), f"Mismatch between megatron and huggingface tokenizer vocabularies {t}, {a}, {b}" + elif ( + args.tokenizer_type == "FalconTokenizer" + or args.tokenizer_type == "PretrainedFromHF" + ): + hf_tokenizer = mt_tokenizer.tokenizer + else: + raise RuntimeError(f"Unsupported tokenizer type: {args.tokenizer_type}") + + # handle special token overrides + # for override in args.override_special_tokens: + # try: + # key, value = override.split("=") + # assert key in {"bos", "cls", "eos", "mask", "pad", "sep", "unk"} + # value = mt_tokenizer.vocab[value] + # setattr(hf_tokenizer, f"{key}_token_id", value) + # except ValueError: + # warnings.warn(f"Illegal override string {override}") + # except AssertionError: + # warnings.warn(f"Cannot override key {key}") + # except KeyError: + # warnings.warn(f"Token {value} not found in megatron tokenizer") + + print("Final HF Tokenizer configuration:") + print(hf_tokenizer) + hf_tokenizer.save_pretrained(args.output_dir) + + +def main(): + # make sure megatron is importable + + parser = ArgumentParser() + parser.add_argument( + "--input_dir", help="Location of Megatron weights", required=True + ) + parser.add_argument("--num_output_shards", type=int, default=1) + parser.add_argument( + "--model", + choices={ + "falcon", + "llama", + "llama2", + "llama3", + "codellama", + "mistral", + "gemma", + }, + default="llama2", + ) + parser.add_argument( + "--output_dir", help="Location to write HF model and tokenizer", required=True + ) + parser.add_argument("--cache_dir", help="Huggingface cache_dir (optional)") + parser.add_argument("--vocab_file", type=str, help="Path to the vocab file") + parser.add_argument( + "--vocab_extra_ids_list", + help="comma separated list of special vocab ids to add to the tokenizer", + ) + parser.add_argument( + "--override_special_tokens", + nargs="*", + default=[], + help=( + "One or more arguments to override special tokens. " + "Syntax set as `key=value`, e.g. `eos=<|im_end|>`. " + "Overrides available only bos, cls, eos, mask, pad, sep, unk." + ), + ) + + args = parser.parse_args() + if args.model in {"llama", "llama2", "llama3", "codellama"}: + eps = 1e-6 if args.model == "llama" else 1e-5 + rope_theta = 1e6 if args.model == "codellama" else 1e4 + write_llama_model( + model_path=args.output_dir, + input_base_path=args.input_dir, + num_output_shards=args.num_output_shards, + norm_eps=eps, + rope_theta=rope_theta, + ) + elif args.model == "mistral": + write_mistral_model( + model_path=args.output_dir, + input_base_path=args.input_dir, + num_output_shards=args.num_output_shards, + # vocab_size=args.vocab_size, + ) + elif args.model == "falcon": + write_falcon_model( + model_path=args.output_dir, + input_base_path=args.input_dir, + num_output_shards=args.num_output_shards, + safe_serialization=True, + ) + elif args.model == "gemma": + write_gemma_model( + model_path=args.output_dir, + input_base_path=args.input_dir, + num_output_shards=args.num_output_shards, + ) + write_tokenizer(args) + + +if __name__ == "__main__": + main() diff --git a/multilinguality_megatron/weights_conversion/utils/__init__.py b/multilinguality_megatron/weights_conversion/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..905d9e216bf58b18645ef32db5baa741da901d53 --- /dev/null +++ b/multilinguality_megatron/weights_conversion/utils/__init__.py @@ -0,0 +1,2 @@ +from . import permute_qkv +from . import merge_llama diff --git a/multilinguality_megatron/weights_conversion/utils/__pycache__/__init__.cpython-39.pyc b/multilinguality_megatron/weights_conversion/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ce6b41eae23ab38f83b75aa111c0c0b83a6b05 Binary files /dev/null and b/multilinguality_megatron/weights_conversion/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/multilinguality_megatron/weights_conversion/utils/__pycache__/merge_llama.cpython-39.pyc b/multilinguality_megatron/weights_conversion/utils/__pycache__/merge_llama.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e135c61df03d1bac34f633cae2d743affb5077a3 Binary files /dev/null and b/multilinguality_megatron/weights_conversion/utils/__pycache__/merge_llama.cpython-39.pyc differ diff --git a/multilinguality_megatron/weights_conversion/utils/__pycache__/permute_qkv.cpython-39.pyc b/multilinguality_megatron/weights_conversion/utils/__pycache__/permute_qkv.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee784e49fd209d1b3caff8f3be3bc4c2e443b4d7 Binary files /dev/null and b/multilinguality_megatron/weights_conversion/utils/__pycache__/permute_qkv.cpython-39.pyc differ diff --git a/multilinguality_megatron/weights_conversion/utils/merge_llama.py b/multilinguality_megatron/weights_conversion/utils/merge_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..391f05da15def94c33987b92c1b59317d39a9bb0 --- /dev/null +++ b/multilinguality_megatron/weights_conversion/utils/merge_llama.py @@ -0,0 +1,123 @@ +import os +import re +from pathlib import Path +from typing import Optional +from collections import OrderedDict + +import torch +from tqdm.auto import tqdm +from transformers import LlamaForCausalLM + + +scale2emb = { + '7B': 4096, + '13B': 5120, + '30B': 6656, + '34B': 8192, + '65B': 8192, + '70B': 8192, +} + + +key_to_dim = { + "w1": 0, + "w2": -1, + "w3": 0, + "wo": -1, + "wq": 0, + "wk": 0, + "wv": 0, + "output": 0, + "tok_embeddings": -1, + "ffn_norm": None, + "attention_norm": None, + "norm": None, + "rope": None, +} + + +def init_merged_ckpt(pth_00, num_pth=8, emb_dim=8192): + merged_ckpt = OrderedDict() + for parameter_name, parameter in pth_00.items(): + short_name = parameter_name.split(".")[-2] + if key_to_dim[short_name] is None: + merged_ckpt[parameter_name] = parameter + del parameter + elif key_to_dim[short_name] == 0: + size = parameter.shape[0] + merged_param_shape = [ parameter.shape[0] * num_pth, parameter.shape[1] ] + merged_ckpt[parameter_name] = torch.zeros(merged_param_shape) + merged_ckpt[parameter_name][0 : size, :] = parameter + del parameter + elif key_to_dim[short_name] == -1: + size = parameter.shape[-1] + merged_param_shape = [ parameter.shape[0], parameter.shape[1] * num_pth] + merged_ckpt[parameter_name] = torch.zeros(merged_param_shape) + merged_ckpt[parameter_name][:, 0 : size] = parameter + del parameter + return merged_ckpt + + +def merge_meta_llama(size: int, root_dir: Path): + paths = sorted(path for path in root_dir.iterdir() + if re.match(r"^consolidated\.[0-9]+\.pth$", path.name)) + if len(paths) == 1: # no sharded checkpoints, return everything + return torch.load(paths[0], map_location=torch.device("cpu")) + + num_pth = len(paths) + for i, ckpt_path in enumerate(tqdm(paths, desc="Merging llama")): + llama_config = torch.load(ckpt_path, map_location=torch.device('cpu')) + if i == 0: + merged_ckpt = init_merged_ckpt(llama_config, num_pth=num_pth, + emb_dim=scale2emb[f"{size}B"]) + else: + for parameter_name, parameter in llama_config.items(): + short_name = parameter_name.split(".")[-2] + if key_to_dim[short_name] == 0: + size = parameter.shape[0] + merged_param_shape = [ parameter.shape[0] * num_pth, parameter.shape[1] ] + merged_ckpt[parameter_name][size * i : size * (i + 1), :] = parameter + del parameter + if key_to_dim[short_name] == -1: + size = parameter.shape[-1] + merged_param_shape = [ parameter.shape[0], parameter.shape[1] * num_pth] + merged_ckpt[parameter_name][:, size * i : size * (i + 1)] = parameter + del parameter + del llama_config + return merged_ckpt + + +def merge_hf_llama(size: int, version: int, cache_dir: Optional[Path] = None, + model_path: Optional[str] = None): + if model_path is None and version == 1: + model_path = f"decapoda-research/llama-{size}b-hf" + elif model_path is None and version == 2: + model_path = f"meta-llama/Llama-2-{size}b-hf" + weights = LlamaForCausalLM.from_pretrained(model_path, cache_dir=cache_dir).state_dict() + weights["tok_embeddings.weight"] = weights.pop("model.embed_tokens.weight") + weights["norm.weight"] = weights.pop("model.norm.weight") + weights["output.weight"] = weights.pop("lm_head.weight") + for key in list(weights.keys()): + if rmatch := re.match(r"^model\.(layers\.[0-9]+\.)(.+)(\.weight)$", key): + new_key = { + "self_attn.q_proj": "attention.wq", + "self_attn.k_proj": "attention.wk", + "self_attn.v_proj": "attention.wv", + "self_attn.o_proj": "attention.wo", + "mlp.gate_proj": "feed_forward.w1", + "mlp.down_proj": "feed_forward.w2", + "mlp.up_proj": "feed_forward.w3", + "input_layernorm": "attention_norm", + "post_attention_layernorm": "ffn_norm" + }[rmatch.group(2)] + weights[rmatch.group(1) + new_key + rmatch.group(3)] = weights.pop(key) + return weights + + +def merge_llama(size: int, version: int, root_dir: Optional[Path] = None, + model_path: Optional[str] = None): + if root_dir is not None and (root_dir/"consolidated.00.pth").exists(): + return merge_meta_llama(size, root_dir), "meta" + print(f"Weights at {root_dir} do not look like a meta checkpoint, assuming " + "huggingface cache_dir instead") + return merge_hf_llama(size, version, root_dir, model_path), "hf" diff --git a/multilinguality_megatron/weights_conversion/utils/permute_qkv.py b/multilinguality_megatron/weights_conversion/utils/permute_qkv.py new file mode 100644 index 0000000000000000000000000000000000000000..91a673d26b6a57ef051b847836ba4239169d8732 --- /dev/null +++ b/multilinguality_megatron/weights_conversion/utils/permute_qkv.py @@ -0,0 +1,93 @@ +import re +import sys +import os +import shutil +from pathlib import Path +from argparse import ArgumentParser + +import torch +from tqdm.auto import tqdm + + +def permute_qkv( + qkv_w: torch.Tensor, + dim: int, + n_heads: int, + n_heads_kv: int, + n_hidden_per_head=None, + revert: bool = False, +) -> torch.Tensor: + + def permute(x): + if revert: + return x.view(head_dim // 2, 2, dim).transpose(0, 1).reshape(head_dim, dim) + return x.view(2, head_dim // 2, dim).transpose(0, 1).reshape(head_dim, dim) + + if n_hidden_per_head is None: + head_dim = dim // n_heads + else: + head_dim = n_hidden_per_head + n_qs_per_kv = n_heads // n_heads_kv + n_groups = qkv_w.size(0) // head_dim // (n_qs_per_kv + 2) + groups = torch.chunk(qkv_w, n_groups, dim=0) + new = [] + for group in groups: + *qs, k, v = torch.split(group, head_dim, dim=0) + assert len(qs) == n_qs_per_kv, f"{len(qs)}, {n_qs_per_kv}" + new += list(map(permute, qs)) + [permute(k), v] + + return torch.cat(new, dim=0) + + +def update_checkpoint(input_dir: Path, output_dir: Path, overwrite_ok: bool = False): + # make sure megatron is importable + sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) + ) + + # prepare output dir + if output_dir.exists(): + if not overwrite_ok: + raise FileExistsError(f"Output directory {output_dir} already exists") + print(f"Removing {output_dir}") + shutil.rmtree(output_dir) + output_dir.mkdir(exist_ok=True) + + # determine realease + with open(input_dir / "latest_checkpointed_iteration.txt") as f: + it = f.read() + print("Updating weights of iteration", it) + with open(output_dir / "latest_checkpointed_iteration.txt", "w+") as f: + f.write(it) + if it != "release": + it = f"iter_{int(it):07d}" + (output_dir / it).mkdir() + + # convert weights + for fname in tqdm(list((input_dir / it).iterdir())): + checkpoint = torch.load(fname / "model_optim_rng.pt", map_location="cpu") + args = checkpoint["args"] + args = (args.hidden_size, args.num_attention_heads, args.num_attention_heads_kv) + if "transformer" in checkpoint["model"]["language_model"]: + key = "transformer" + attn_key = "attention" + else: + key = "encoder" + attn_key = "self_attention" + states = checkpoint["model"]["language_model"][key] + for name, weight in states.items(): + if re.match( + rf"^layers\.[0-9]+\.{attn_key}\.query_key_value\.weight$", name + ): + states[name] = permute_qkv(weight, *args) + (output_dir / it / fname.stem).mkdir() + torch.save(checkpoint, output_dir / it / fname.stem / "model_optim_rng.pt") + + +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument("--input-dir", type=Path) + parser.add_argument("--output-dir", type=Path) + parser.add_argument("--overwrite-ok", action="store_true") + args = parser.parse_args() + update_checkpoint(args.input_dir, args.output_dir, args.overwrite_ok)