diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..0b13a06db6f8438513366c05e4943ebf63abdf13 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +mamba/assets/ssd_algorithm.png filter=lfs diff=lfs merge=lfs -text diff --git a/mamba/.github/workflows/publish.yaml b/mamba/.github/workflows/publish.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb3e60c6b5b050fcae0b982d8dbed760e2d61ac9 --- /dev/null +++ b/mamba/.github/workflows/publish.yaml @@ -0,0 +1,209 @@ +# This workflow will: +# - Create a new Github release +# - Build wheels for supported architectures +# - Deploy the wheels to the Github release +# - Release the static code to PyPi +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: Build wheels and deploy + +on: + create: + tags: + - v* + +jobs: + + setup_release: + name: Create Release + runs-on: ubuntu-latest + steps: + - name: Get the tag version + id: extract_branch + run: echo ::set-output name=branch::${GITHUB_REF#refs/tags/} + shell: bash + + - name: Create Release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.extract_branch.outputs.branch }} + release_name: ${{ steps.extract_branch.outputs.branch }} + + build_wheels: + name: Build Wheel + needs: setup_release + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + # Using ubuntu-20.04 instead of 22.04 for more compatibility (glibc). Ideally we'd use the + # manylinux docker image, but I haven't figured out how to install CUDA on manylinux. + os: [ubuntu-20.04] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + torch-version: ['2.0.1', '2.1.2', '2.2.2', '2.3.1', '2.4.0'] + cuda-version: ['11.8.0', '12.2.2'] + # We need separate wheels that either uses C++11 ABI (-D_GLIBCXX_USE_CXX11_ABI) or not. + # Pytorch wheels currently don't use it, but nvcr images have Pytorch compiled with C++11 ABI. + # Without this we get import error (undefined symbol: _ZN3c105ErrorC2ENS_14SourceLocationESs) + # when building without C++11 ABI and using it on nvcr images. + cxx11_abi: ['FALSE', 'TRUE'] + exclude: + # Pytorch < 2.2 does not support Python 3.12 + - torch-version: '2.0.1' + python-version: '3.12' + - torch-version: '2.1.2' + python-version: '3.12' + # Pytorch <= 2.0 only supports CUDA <= 11.8 + - torch-version: '2.0.1' + cuda-version: '12.2.2' + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Set CUDA and PyTorch versions + run: | + echo "MATRIX_CUDA_VERSION=$(echo ${{ matrix.cuda-version }} | awk -F \. {'print $1 $2'})" >> $GITHUB_ENV + echo "MATRIX_TORCH_VERSION=$(echo ${{ matrix.torch-version }} | awk -F \. {'print $1 "." $2'})" >> $GITHUB_ENV + + - name: Free up disk space + if: ${{ runner.os == 'Linux' }} + # https://github.com/easimon/maximize-build-space/blob/master/action.yml + # https://github.com/easimon/maximize-build-space/tree/test-report + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /opt/hostedtoolcache/CodeQL + + - name: Set up swap space + if: runner.os == 'Linux' + uses: pierotofy/set-swap-space@v1.0 + with: + swap-size-gb: 10 + + - name: Install CUDA ${{ matrix.cuda-version }} + if: ${{ matrix.cuda-version != 'cpu' }} + uses: Jimver/cuda-toolkit@v0.2.14 + id: cuda-toolkit + with: + cuda: ${{ matrix.cuda-version }} + linux-local-args: '["--toolkit"]' + # default method is "local", and we're hitting some error with caching for CUDA 11.8 and 12.1 + # method: ${{ (matrix.cuda-version == '11.8.0' || matrix.cuda-version == '12.1.0') && 'network' || 'local' }} + method: 'network' + # We need the cuda libraries (e.g. cuSparse, cuSolver) for compiling PyTorch extensions, + # not just nvcc + # sub-packages: '["nvcc"]' + + - name: Install PyTorch ${{ matrix.torch-version }}+cu${{ matrix.cuda-version }} + run: | + pip install --upgrade pip + # If we don't install before installing Pytorch, we get error for torch 2.0.1 + # ERROR: Could not find a version that satisfies the requirement setuptools>=40.8.0 (from versions: none) + pip install lit + # For some reason torch 2.2.0 on python 3.12 errors saying no setuptools + pip install setuptools + # We want to figure out the CUDA version to download pytorch + # e.g. we can have system CUDA version being 11.7 but if torch==1.12 then we need to download the wheel from cu116 + # This code is ugly, maybe there's a better way to do this. + export TORCH_CUDA_VERSION=$(python -c "from os import environ as env; \ + minv = {'2.0': 117, '2.1': 118, '2.2': 118, '2.3': 118, '2.4': 118}[env['MATRIX_TORCH_VERSION']]; \ + maxv = {'2.0': 118, '2.1': 121, '2.2': 121, '2.3': 121, '2.4': 124}[env['MATRIX_TORCH_VERSION']]; \ + print(max(min(int(env['MATRIX_CUDA_VERSION']), maxv), minv))" \ + ) + if [[ ${{ matrix.torch-version }} == *"dev"* ]]; then + pip install --no-cache-dir --pre torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/nightly/cu${TORCH_CUDA_VERSION} + else + pip install --no-cache-dir torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/cu${TORCH_CUDA_VERSION} + fi + nvcc --version + python --version + python -c "import torch; print('PyTorch:', torch.__version__)" + python -c "import torch; print('CUDA:', torch.version.cuda)" + python -c "from torch.utils import cpp_extension; print (cpp_extension.CUDA_HOME)" + shell: + bash + + - name: Build wheel + run: | + # We want setuptools >= 49.6.0 otherwise we can't compile the extension if system CUDA version is 11.7 and pytorch cuda version is 11.6 + # https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/torch/utils/cpp_extension.py#L810 + # However this still fails so I'm using a newer version of setuptools + pip install setuptools==68.0.0 + pip install ninja packaging wheel + export PATH=/usr/local/nvidia/bin:/usr/local/nvidia/lib64:$PATH + export LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/cuda/lib64:$LD_LIBRARY_PATH + # Limit MAX_JOBS otherwise the github runner goes OOM + MAX_JOBS=2 MAMBA_FORCE_BUILD="TRUE" MAMBA_FORCE_CXX11_ABI=${{ matrix.cxx11_abi}} python setup.py bdist_wheel --dist-dir=dist + tmpname=cu${MATRIX_CUDA_VERSION}torch${MATRIX_TORCH_VERSION}cxx11abi${{ matrix.cxx11_abi }} + wheel_name=$(ls dist/*whl | xargs -n 1 basename | sed "s/-/+$tmpname-/2") + ls dist/*whl |xargs -I {} mv {} dist/${wheel_name} + echo "wheel_name=${wheel_name}" >> $GITHUB_ENV + + - name: Log Built Wheels + run: | + ls dist + + - name: Get the tag version + id: extract_branch + run: echo ::set-output name=branch::${GITHUB_REF#refs/tags/} + + - name: Get Release with tag + id: get_current_release + uses: joutvhu/get-release@v1 + with: + tag_name: ${{ steps.extract_branch.outputs.branch }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload Release Asset + id: upload_release_asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.get_current_release.outputs.upload_url }} + asset_path: ./dist/${{env.wheel_name}} + asset_name: ${{env.wheel_name}} + asset_content_type: application/* + + publish_package: + name: Publish package + needs: [build_wheels] + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + pip install ninja packaging setuptools wheel twine + # We don't want to download anything CUDA-related here + pip install torch --index-url https://download.pytorch.org/whl/cpu + + - name: Build core package + env: + MAMBA_SKIP_CUDA_BUILD: "TRUE" + run: | + python setup.py sdist --dist-dir=dist + + - name: Deploy + env: + TWINE_USERNAME: "__token__" + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} + run: | + python -m twine upload dist/* diff --git a/mamba/.gitignore b/mamba/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..dbde1b1172f14e2afc545c2c0e7d38c4d6074402 --- /dev/null +++ b/mamba/.gitignore @@ -0,0 +1,6 @@ +*__pycache__/ +*.egg-info/ +build/ +**.so +*.hip +*_hip.* \ No newline at end of file diff --git a/mamba/.gitmodules b/mamba/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..a7445800fb64f3ae664c0b994a54235105986d2e --- /dev/null +++ b/mamba/.gitmodules @@ -0,0 +1,3 @@ +[submodule "3rdparty/lm-evaluation-harness"] + path = 3rdparty/lm-evaluation-harness + url = https://github.com/EleutherAI/lm-evaluation-harness/ diff --git a/mamba/AUTHORS b/mamba/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..38557a872f8d603ed963a05c211de7032de5926b --- /dev/null +++ b/mamba/AUTHORS @@ -0,0 +1,2 @@ +Tri Dao, tri@tridao.me +Albert Gu, agu@andrew.cmu.edu diff --git a/mamba/LICENSE b/mamba/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f4abe24eb520fbb077753ae4f34bfaa43cb3b83f --- /dev/null +++ b/mamba/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Tri Dao, Albert Gu + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/mamba/README.md b/mamba/README.md new file mode 100644 index 0000000000000000000000000000000000000000..990275407942ae5753fcb2132a785fea304e1eb3 --- /dev/null +++ b/mamba/README.md @@ -0,0 +1,243 @@ +# Mamba + +![Mamba](assets/selection.png "Selective State Space") +> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\ +> Albert Gu*, Tri Dao*\ +> Paper: https://arxiv.org/abs/2312.00752 + +![Mamba-2](assets/ssd_algorithm.png "State Space Dual Model") +> **Transformers are SSMs: Generalized Models and Efficient Algorithms**\ +> **Through Structured State Space Duality**\ +> Tri Dao*, Albert Gu*\ +> Paper: https://arxiv.org/abs/2405.21060 + +## About + +Mamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers. +It is based on the line of progress on [structured state space models](https://github.com/state-spaces/s4), +with an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https://github.com/Dao-AILab/flash-attention). + +## Installation + +- [Option] `pip install causal-conv1d>=1.4.0`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block. +- `pip install mamba-ssm`: the core Mamba package. +- `pip install mamba-ssm[causal-conv1d]`: To install core Mamba package and causal-conv1d. +- `pip install mamba-ssm[dev]`: To install core Mamba package and dev depdencies. + +It can also be built from source with `pip install .` from this repository. + +If `pip` complains about PyTorch versions, try passing `--no-build-isolation` to `pip`. + +Other requirements: +- Linux +- NVIDIA GPU +- PyTorch 1.12+ +- CUDA 11.6+ + +For AMD cards, see additional prerequisites below. + +## Usage + +We expose several levels of interface with the Mamba model. + +### Selective SSM + +Mamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2). + +Source: [ops/selective_scan_interface.py](mamba_ssm/ops/selective_scan_interface.py). + +### Mamba Block + +The main module of this repository is the Mamba architecture block wrapping the selective SSM. + +Source: [modules/mamba_simple.py](mamba_ssm/modules/mamba_simple.py). + +Usage: +``` python +import torch +from mamba_ssm import Mamba + +batch, length, dim = 2, 64, 16 +x = torch.randn(batch, length, dim).to("cuda") +model = Mamba( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=16, # SSM state expansion factor + d_conv=4, # Local convolution width + expand=2, # Block expansion factor +).to("cuda") +y = model(x) +assert y.shape == x.shape +``` + +### Mamba-2 + +The Mamba-2 block is implemented at [modules/mamba2.py](mamba_ssm/modules/mamba2.py). + +A simpler version is at [modules/mamba2_simple.py](mamba_ssm/modules/mamba2_simple.py) + +The usage is similar to Mamba(-1): +``` python +from mamba_ssm import Mamba2 +model = Mamba2( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=64, # SSM state expansion factor, typically 64 or 128 + d_conv=4, # Local convolution width + expand=2, # Block expansion factor +).to("cuda") +y = model(x) +assert y.shape == x.shape +``` + +#### SSD + +A minimal version of the inner SSD module (Listing 1 from the Mamba-2 paper) with conversion between "discrete" and "continuous" SSM versions +is at [modules/ssd_minimal.py](mamba_ssm/modules/ssd_minimal.py). + +### Mamba Language Model + +Finally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head. + +Source: [models/mixer_seq_simple.py](mamba_ssm/models/mixer_seq_simple.py). + +This is an example of how to integrate Mamba into an end-to-end neural network. +This example is used in the generation scripts below. + + +## Pretrained Models + +Pretrained models are uploaded to +[Hugging Face](https://huggingface.co/state-spaces): `mamba-130m`, `mamba-370m`, +`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`, `mamba2-130m`, `mamba2-370m`, +`mamba2-780m`, `mamba2-1.3b`, `mamba2-2.7b`, `transformerpp-2.7b`, `mamba2attn-2.7b`, trained on 300B tokens on the Pile, as well as `mamba-2.8b-slimpj` +(trained on 600B tokens on the SlimPajama dataset). + + +The models will be autodownloaded by the generation script below. + +These models were trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), and follow the standard model dimensions described by GPT-3 and followed by many open source models: + +| Parameters | Layers | Model dim. | +|------------|--------|------------| +| 130M | 24 | 768 | +| 370M | 48 | 1024 | +| 790M | 48 | 1536 | +| 1.4B | 48 | 2048 | +| 2.8B | 64 | 2560 | + +(The layer count of Mamba doubles that of a Transformer with similar size, as two Mamba blocks are needed for each "layer" (MHA block + MLP block) of a Transformer.) + +Note: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.). +Performance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models. + + +## Evaluations + +To run zero-shot evaluations of models (corresponding to Table 3 of the paper), +we use the +[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) +library. + +1. Install `lm-evaluation-harness` by `pip install lm-eval==0.4.2`. +2. Run evaluation with (more documentation at the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) repo): +``` sh +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 +python evals/lm_harness_eval.py --model hf --model_args pretrained=EleutherAI/pythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +``` + +To reproduce the results on the `mamba-2.8b-slimpj` model reported in the blogposts: +``` sh +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-2.8b-slimpj --tasks boolq,piqa,hellaswag,winogrande,arc_easy,arc_challenge,openbookqa,race,truthfulqa_mc2 --device cuda --batch_size 256 +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-2.8b-slimpj --tasks mmlu --num_fewshot 5 --device cuda --batch_size 256 +``` + +To run evaluations on Mamba-2 models, simply replace the model names: +``` sh +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/transformerpp-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2attn-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 +``` + +Note that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process. + +## Inference + +The script [benchmarks/benchmark_generation_mamba_simple.py](benchmarks/benchmark_generation_mamba_simple.py) +1. autoloads a model from the Hugging Face Hub, +2. generates completions of a user-specified prompt, +3. benchmarks the inference speed of this generation. + +Other configurable options include the top-p (nucleus sampling) probability, and the softmax temperature. + +### Examples + +To test generation latency (e.g. batch size = 1) with different sampling strategies: + +``` sh +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --minp 0.05 --topk 0 --temperature 0.7 --repetition-penalty 1.2 +``` + +To test generation throughput with random prompts (e.g. large batch size): +``` sh +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --batch 64 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --batch 64 +``` + +With Mamba-2, you just need to change the model name: +``` sh +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba2-2.7b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2 +``` + + +## Troubleshooting + +### Precision +Our models were trained using PyTorch [AMP](https://pytorch.org/docs/stable/amp.html) for mixed precision. AMP keeps model parameters in float32 and casts to half precision when necessary. +On the other hand, other frameworks like DeepSpeed store parameters in float16 and upcasts when necessary (e.g. for optimizer accumulation). + +We've observed that higher precision for the main model parameters may be necessary, because SSMs are sensitive to their recurrent dynamics. If you are experiencing instabilities, +as a first step please try a framework storing parameters in fp32 (such as AMP). + +### Initialization +Some parts of the model have initializations inherited from prior work on S4 models. +For [example](https://github.com/state-spaces/mamba/blob/f0affcf69f06d1d06cef018ff640bf080a11c421/mamba_ssm/modules/mamba_simple.py#L102), the $\Delta$ parameter has a targeted range by initializing the bias of its linear projection. +However, some frameworks may have post-initialization hooks (e.g. setting all bias terms in `nn.Linear` modules to zero). +If this is the case, you may have to add custom logic (e.g. this [line](https://github.com/state-spaces/mamba/blob/f0affcf69f06d1d06cef018ff640bf080a11c421/mamba_ssm/modules/mamba_simple.py#L104) turns off re-initializing in our trainer, but would be a no-op in any other framework) +that is specific to the training framework. + +## Additional Prerequisites for AMD cards + +### Patching ROCm + +If you are on ROCm 6.0, run the following steps to avoid errors during compilation. This is not required for ROCm 6.1 onwards. + +1. Locate your ROCm installation directory. This is typically found at `/opt/rocm/`, but may vary depending on your installation. + +2. Apply the Patch. Run with `sudo` in case you encounter permission issues. + ```bash + patch /opt/rocm/include/hip/amd_detail/amd_hip_bf16.h < rocm_patch/rocm6_0.patch + ``` + + +## Citation + +If you use this codebase, or otherwise find our work valuable, please cite Mamba: +``` +@article{mamba, + title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces}, + author={Gu, Albert and Dao, Tri}, + journal={arXiv preprint arXiv:2312.00752}, + year={2023} +} + +@inproceedings{mamba2, + title={Transformers are {SSM}s: Generalized Models and Efficient Algorithms Through Structured State Space Duality}, + author={Dao, Tri and Gu, Albert}, + booktitle={International Conference on Machine Learning (ICML)}, + year={2024} +} + +``` diff --git a/mamba/assets/selection.png b/mamba/assets/selection.png new file mode 100644 index 0000000000000000000000000000000000000000..69b109a8eed4e3c7516b23e2b39d37e842a4464b Binary files /dev/null and b/mamba/assets/selection.png differ diff --git a/mamba/assets/ssd_algorithm.png b/mamba/assets/ssd_algorithm.png new file mode 100644 index 0000000000000000000000000000000000000000..5943388415a425fb94196ade9fed449eb3d6421c --- /dev/null +++ b/mamba/assets/ssd_algorithm.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91ab82b330761250c3241e4f16fed54a35081115c26777a4cc087c2f6e47f466 +size 1118415 diff --git a/mamba/benchmarks/benchmark_generation_mamba_simple.py b/mamba/benchmarks/benchmark_generation_mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..f3513b24a3c571cb2e9ecdb2018f0e1abdda24a5 --- /dev/null +++ b/mamba/benchmarks/benchmark_generation_mamba_simple.py @@ -0,0 +1,92 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import argparse +import time +import json + +import torch +import torch.nn.functional as F + +from einops import rearrange + +from transformers import AutoTokenizer, AutoModelForCausalLM + +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + + +parser = argparse.ArgumentParser(description="Generation benchmarking") +parser.add_argument("--model-name", type=str, default="state-spaces/mamba-130m") +parser.add_argument("--prompt", type=str, default=None) +parser.add_argument("--promptlen", type=int, default=100) +parser.add_argument("--genlen", type=int, default=100) +parser.add_argument("--temperature", type=float, default=1.0) +parser.add_argument("--topk", type=int, default=1) +parser.add_argument("--topp", type=float, default=1.0) +parser.add_argument("--minp", type=float, default=0.0) +parser.add_argument("--repetition-penalty", type=float, default=1.0) +parser.add_argument("--batch", type=int, default=1) +args = parser.parse_args() + +repeats = 3 +device = "cuda" +dtype = torch.float16 + +print(f"Loading model {args.model_name}") +is_mamba = args.model_name.startswith("state-spaces/mamba") or args.model_name.startswith("state-spaces/transformerpp") +if is_mamba: + tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") + model = MambaLMHeadModel.from_pretrained(args.model_name, device=device, dtype=dtype) +else: + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + model = AutoModelForCausalLM.from_pretrained(args.model_name, device_map={"": device}, torch_dtype=dtype) +model.eval() +print(f"Number of parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}") + +torch.random.manual_seed(0) +if args.prompt is None: + input_ids = torch.randint(1, 1000, (args.batch, args.promptlen), dtype=torch.long, device="cuda") + attn_mask = torch.ones_like(input_ids, dtype=torch.long, device="cuda") +else: + tokens = tokenizer(args.prompt, return_tensors="pt") + input_ids = tokens.input_ids.to(device=device) + attn_mask = tokens.attention_mask.to(device=device) +max_length = input_ids.shape[1] + args.genlen + +if is_mamba: + fn = lambda: model.generate( + input_ids=input_ids, + max_length=max_length, + cg=True, + return_dict_in_generate=True, + output_scores=True, + enable_timing=False, + temperature=args.temperature, + top_k=args.topk, + top_p=args.topp, + min_p=args.minp, + repetition_penalty=args.repetition_penalty, + ) +else: + fn = lambda: model.generate( + input_ids=input_ids, + attention_mask=attn_mask, + max_length=max_length, + return_dict_in_generate=True, + pad_token_id=tokenizer.eos_token_id, + do_sample=True, + temperature=args.temperature, + top_k=args.topk, + top_p=args.topp, + repetition_penalty=args.repetition_penalty, + ) +out = fn() +if args.prompt is not None: + print(tokenizer.batch_decode(out.sequences.tolist())) + +torch.cuda.synchronize() +start = time.time() +for _ in range(repeats): + fn() +torch.cuda.synchronize() +print(f"Prompt length: {len(input_ids[0])}, generation length: {len(out.sequences[0]) - len(input_ids[0])}") +print(f"{args.model_name} prompt processing + decoding time: {(time.time() - start) / repeats * 1000:.0f}ms") diff --git a/mamba/build/lib/mamba_ssm/__init__.py b/mamba/build/lib/mamba_ssm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..673ee32ab820d86ee5a2993d9000af506a8b0fd6 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/__init__.py @@ -0,0 +1,6 @@ +__version__ = "2.2.2" + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn +from mamba_ssm.modules.mamba_simple import Mamba +from mamba_ssm.modules.mamba2 import Mamba2 +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel diff --git a/mamba/build/lib/mamba_ssm/distributed/__init__.py b/mamba/build/lib/mamba_ssm/distributed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/build/lib/mamba_ssm/distributed/distributed_utils.py b/mamba/build/lib/mamba_ssm/distributed/distributed_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..74c55279645cd0fd687584bc1b7374c8c3c73e56 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/distributed/distributed_utils.py @@ -0,0 +1,144 @@ +from typing import Optional + +import torch +from torch import Tensor +from torch.distributed import ProcessGroup + +# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for +# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent +# version of PyTorch. The following 4 lines are for backward compatibility with +# older PyTorch. +if "all_gather_into_tensor" not in dir(torch.distributed): + torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base +if "reduce_scatter_tensor" not in dir(torch.distributed): + torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base + + +# Raw operation, does not support autograd, but does support async +def all_gather_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + world_size = torch.distributed.get_world_size(process_group) + output = torch.empty( + world_size * input_.shape[0], *input_.shape[1:], dtype=input_.dtype, device=input_.device + ) + handle = torch.distributed.all_gather_into_tensor( + output, input_.contiguous(), group=process_group, async_op=async_op + ) + return output, handle + + +# Raw operation, does not support autograd, but does support async +def reduce_scatter_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + world_size = torch.distributed.get_world_size(process_group) + assert input_.shape[0] % world_size == 0 + output = torch.empty( + input_.shape[0] // world_size, *input_.shape[1:], dtype=input_.dtype, device=input_.device + ) + handle = torch.distributed.reduce_scatter_tensor( + output, input_.contiguous(), group=process_group, async_op=async_op + ) + return output, handle + + +# Raw operation, does not support autograd, but does support async +def all_reduce_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + input_ = input_.contiguous() + handle = torch.distributed.all_reduce(input_, group=process_group, async_op=async_op) + return input_, handle + + +class AllGatherFunc(torch.autograd.Function): + """Gather the input from sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = all_gather_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + grad_input, _ = reduce_scatter_raw(grad_output, ctx.process_group) + return grad_input, None + + +# Supports autograd, but does not support async +all_gather = AllGatherFunc.apply + + +class ReduceScatterFunc(torch.autograd.Function): + """Reduce scatter the input from the sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = reduce_scatter_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + grad_input, _ = all_gather_raw(grad_output, ctx.process_group) + return grad_input, None + + +# Supports autograd, but does not support async +reduce_scatter = ReduceScatterFunc.apply + + +class AllReduceFunc(torch.autograd.Function): + """Gather the input from sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = all_reduce_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + return grad_output, None + + +# Supports autograd, but does not support async +all_reduce = AllReduceFunc.apply + + +def sync_shared_params(model: torch.nn.Module, process_group: ProcessGroup): + # We want to iterate over parameters with _shared_params=True in the same order, + # as different ranks might have different number of parameters (e.g., only rank 0 has bias). + pamams_shared = { + name: p for name, p in model.named_parameters() if getattr(p, "_shared_params", False) + } + for _, p in sorted(pamams_shared.items()): + with torch.no_grad(): + # Broadcast needs src to be global rank, not group rank + torch.distributed.broadcast( + p, src=torch.distributed.get_global_rank(process_group, 0), group=process_group + ) + + +# Ref: https://github.com/NVIDIA/Megatron-LM/blob/52e636888cccc41e931251c417a7181fc36de926/megatron/optimizer/optimizer.py#L256 +def allreduce_sequence_parallel_grad(model: torch.nn.Module, process_group: ProcessGroup): + # We want to iterate over parameters with _sequence_parallel=True in the same order, + # as different ranks might have different number of parameters (e.g., only rank 0 has bias). + params_seqparallel = { + name: p for name, p in model.named_parameters() if getattr(p, "_sequence_parallel", False) + } + grads = [p.grad for _, p in sorted(params_seqparallel.items())] + if grads: + with torch.no_grad(): + coalesced = torch._utils._flatten_dense_tensors(grads) + torch.distributed.all_reduce(coalesced, group=process_group) + for buf, synced in zip(grads, torch._utils._unflatten_dense_tensors(coalesced, grads)): + buf.copy_(synced) + + +def get_dim_for_local_rank(dim: int, world_size: int, local_rank: int, multiple_of: int = 1) -> int: + """Get the dim for the local rank derived from splitting dim on world_size processes. + + The split may not be even across the world_size processes. + """ + multiple = dim // multiple_of + div = multiple // world_size + mod = multiple % world_size + local_multiple = div + int(local_rank < mod) + return local_multiple * multiple_of diff --git a/mamba/build/lib/mamba_ssm/distributed/tensor_parallel.py b/mamba/build/lib/mamba_ssm/distributed/tensor_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..3660abfc6bb0f0f11eb0b776be443197ef20b510 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/distributed/tensor_parallel.py @@ -0,0 +1,296 @@ +# Copyright (c) 2024, Tri Dao. +# The TensorParallel linear modules are inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/layers.py +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.cuda.amp import custom_bwd, custom_fwd +from torch.distributed import ProcessGroup + +from einops import rearrange + +from mamba_ssm.distributed.distributed_utils import ( + all_gather_raw, + all_reduce, + all_reduce_raw, + reduce_scatter, + reduce_scatter_raw, +) + + +class ParallelLinearFunc(torch.autograd.Function): + @staticmethod + @custom_fwd + def forward(ctx, x, weight, bias, process_group=None, sequence_parallel=True): + """ + If process_group is not None and sequence_parallel=True, we're doing Tensor Parallel + with sequence parallelism: we do an all_gather_raw of x before doing the matmul. + """ + ctx.compute_weight_gradient = weight.requires_grad + ctx.process_group = process_group + ctx.sequence_parallel = sequence_parallel + + if torch.is_autocast_enabled(): + x = x.to(dtype=torch.get_autocast_gpu_dtype()) + x = x.contiguous() + if process_group is not None and sequence_parallel: + # We want to kick off the all_gather early, before weight dtype conversion + total_x, handle_x = all_gather_raw(x, process_group, async_op=True) + else: + total_x = x + + if torch.is_autocast_enabled(): + weight = weight.to(dtype=torch.get_autocast_gpu_dtype()) + bias = bias.to(dtype=torch.get_autocast_gpu_dtype()) if bias is not None else None + weight = weight.contiguous() + if process_group is not None and sequence_parallel: + handle_x.wait() + batch_shape, n = total_x.shape[:-1], total_x.shape[-1] + batch_dim = batch_shape.numel() + # https://github.com/pytorch/pytorch/blob/5b51849b48a7dbccd297286cc0110def4706f9e7/aten/src/ATen/native/cuda/Blas.cpp#L174 + output = F.linear(total_x, weight, bias) + if ctx.compute_weight_gradient: + ctx.save_for_backward(x, weight) + else: + ctx.save_for_backward(weight) + return output + + @staticmethod + @custom_bwd + def backward(ctx, grad_output): + grad_output = grad_output.contiguous() + process_group = ctx.process_group + sequence_parallel = ctx.sequence_parallel + if ctx.compute_weight_gradient: + x, weight = ctx.saved_tensors + if process_group is not None and sequence_parallel: + total_x, handle_x = all_gather_raw(x, process_group, async_op=True) + else: + total_x = x + else: + (weight,) = ctx.saved_tensors + total_x = None + batch_shape = grad_output.shape[:-1] + batch_dim = batch_shape.numel() + grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1]) + if ctx.needs_input_grad[0]: + grad_input = F.linear(grad_output, weight.t()) + grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1]) + if process_group is not None: + reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw + grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True) + else: + grad_input = None + if ctx.needs_input_grad[1]: + assert ctx.compute_weight_gradient + if process_group is not None and sequence_parallel: + handle_x.wait() + grad_weight = torch.einsum( + "bo,bi->oi", grad_output, total_x.reshape(batch_dim, total_x.shape[-1]) + ) + else: + grad_weight = None + grad_bias = grad_output.sum(dim=0) if ctx.needs_input_grad[2] else None + if process_group is not None and ctx.needs_input_grad[0]: + handle_grad_input.wait() + return grad_input, grad_weight, grad_bias, None, None + + +def parallel_linear_func( + x: Tensor, + weight: Tensor, + bias: Optional[Tensor] = None, + process_group: Optional[ProcessGroup] = None, + sequence_parallel: bool = True, +): + return ParallelLinearFunc.apply(x, weight, bias, process_group, sequence_parallel) + + +class ColumnParallelLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + process_group: ProcessGroup, + bias: bool = True, + sequence_parallel=True, + multiple_of=1, + device=None, + dtype=None, + ) -> None: + world_size = torch.distributed.get_world_size(process_group) + if out_features % multiple_of: + raise ValueError(f"out_features ({out_features}) must be a multiple of {multiple_of}") + multiple = out_features // multiple_of + # We want to split @multiple across world_size, but it could be an uneven split + div = multiple // world_size + mod = multiple % world_size + # The first @mod ranks get @div + 1 copies, the rest get @div copies + local_multiple = div + int(torch.distributed.get_rank(process_group) < mod) + super().__init__( + in_features, local_multiple * multiple_of, bias=bias, device=device, dtype=dtype + ) + self.process_group = process_group + self.sequence_parallel = sequence_parallel + + def forward(self, x): + # If self.sequence_parallel is True, we're doing Tensor Parallel with sequence parallelism: + # we do an all_gather of x before doing the matmul. + # If not, then the input is already gathered. + return parallel_linear_func( + x, + self.weight, + self.bias, + process_group=self.process_group, + sequence_parallel=self.sequence_parallel, + ) + + +class RowParallelLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + process_group: ProcessGroup, + bias: bool = True, + sequence_parallel=True, + multiple_of=1, + device=None, + dtype=None, + ) -> None: + world_size = torch.distributed.get_world_size(process_group) + rank = torch.distributed.get_rank(process_group) + if in_features % multiple_of: + raise ValueError(f"in_features ({in_features}) must be a multiple of {multiple_of}") + multiple = in_features // multiple_of + # We want to split @multiple across world_size, but it could be an uneven split + div = multiple // world_size + mod = multiple % world_size + # The first @mod ranks get @div + 1 copies, the rest get @div copies + local_multiple = div + int(torch.distributed.get_rank(process_group) < mod) + # Only rank 0 will have bias + super().__init__( + local_multiple * multiple_of, + out_features, + bias=bias and rank == 0, + device=device, + dtype=dtype, + ) + self.process_group = process_group + self.sequence_parallel = sequence_parallel + + def forward(self, x): + """ + We're doing Tensor Parallel with sequence parallelism: we do the matmul and then + a reduce_scatter of the result. + """ + out = parallel_linear_func(x, self.weight, self.bias) + reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce + return reduce_fn(out, self.process_group) + + +class VocabParallelEmbedding(nn.Embedding): + def __init__(self, num_embeddings, *args, process_group=None, padding_idx=None, **kwargs): + self.process_group = process_group + if process_group is not None: + world_size = torch.distributed.get_world_size(process_group) + if num_embeddings % world_size != 0: + raise ValueError( + f"num_embeddings ({num_embeddings}) must be divisible by " + f"world_size ({world_size})" + ) + if world_size > 1 and padding_idx is not None: + raise RuntimeError("ParallelEmbedding does not support padding_idx") + else: + world_size = 1 + super().__init__(num_embeddings // world_size, *args, padding_idx=padding_idx, **kwargs) + + def forward(self, input: Tensor) -> Tensor: + if self.process_group is None: + return super().forward(input) + else: + rank = torch.distributed.get_rank(self.process_group) + vocab_size = self.num_embeddings + vocab_start_index, vocab_end_index = rank * vocab_size, (rank + 1) * vocab_size + # Create a mask of valid vocab ids (1 means it needs to be masked). + input_ids_mask = (input < vocab_start_index) | (input >= vocab_end_index) + input = input - vocab_start_index + input[input_ids_mask] = 0 + embeddings = super().forward(input) + embeddings[input_ids_mask] = 0.0 + return embeddings + + +class ColumnParallelEmbedding(nn.Embedding): + def __init__(self, num_embeddings, embedding_dim, *args, process_group=None, **kwargs): + self.process_group = process_group + if process_group is not None: + world_size = torch.distributed.get_world_size(process_group) + if embedding_dim % world_size != 0: + raise ValueError( + f"embedding_dim ({embedding_dim}) must be divisible by " + f"world_size ({world_size})" + ) + else: + world_size = 1 + super().__init__(num_embeddings, embedding_dim // world_size, *args, **kwargs) + + +class ParallelEmbeddings(nn.Module): + def __init__( + self, + embed_dim, + vocab_size, + max_position_embeddings, + process_group, + padding_idx=None, + sequence_parallel=True, + device=None, + dtype=None, + ): + """ + If max_position_embeddings <= 0, there's no position embeddings + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.process_group = process_group + self.sequence_parallel = sequence_parallel + self.word_embeddings = VocabParallelEmbedding( + vocab_size, + embed_dim, + padding_idx=padding_idx, + process_group=process_group, + **factory_kwargs, + ) + self.max_position_embeddings = max_position_embeddings + if self.max_position_embeddings > 0: + self.position_embeddings = ColumnParallelEmbedding( + max_position_embeddings, embed_dim, process_group=process_group, **factory_kwargs + ) + + def forward(self, input_ids, position_ids=None, combine_batch_seqlen_dim=False): + """ + input_ids: (batch, seqlen) + position_ids: (batch, seqlen) + """ + batch_size, seqlen = input_ids.shape + world_size = torch.distributed.get_world_size(self.process_group) + embeddings = self.word_embeddings(input_ids) + if self.max_position_embeddings > 0: + if position_ids is None: + position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device) + position_embeddings = self.position_embeddings(position_ids) + if world_size <= 1: + embeddings = embeddings + position_embeddings + else: + partition_dim = self.position_embeddings.embedding_dim + rank = torch.distributed.get_rank(self.process_group) + embeddings[ + ..., rank * partition_dim : (rank + 1) * partition_dim + ] += position_embeddings + if combine_batch_seqlen_dim: + embeddings = rearrange(embeddings, "b s d -> (b s) d") + reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce + return embeddings if world_size <= 1 else reduce_fn(embeddings, self.process_group) diff --git a/mamba/build/lib/mamba_ssm/models/__init__.py b/mamba/build/lib/mamba_ssm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/build/lib/mamba_ssm/models/config_mamba.py b/mamba/build/lib/mamba_ssm/models/config_mamba.py new file mode 100644 index 0000000000000000000000000000000000000000..646c9e1e8ac94b2e82974cc0d5dab83fcfea900c --- /dev/null +++ b/mamba/build/lib/mamba_ssm/models/config_mamba.py @@ -0,0 +1,18 @@ +from dataclasses import dataclass, field + + +@dataclass +class MambaConfig: + + d_model: int = 2560 + d_intermediate: int = 0 + n_layer: int = 64 + vocab_size: int = 50277 + ssm_cfg: dict = field(default_factory=dict) + attn_layer_idx: list = field(default_factory=list) + attn_cfg: dict = field(default_factory=dict) + rms_norm: bool = True + residual_in_fp32: bool = True + fused_add_norm: bool = True + pad_vocab_size_multiple: int = 8 + tie_embeddings: bool = True diff --git a/mamba/build/lib/mamba_ssm/models/mixer_seq_simple.py b/mamba/build/lib/mamba_ssm/models/mixer_seq_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..fae2257a924f30378c30cdd8adfd370ed15b2c4c --- /dev/null +++ b/mamba/build/lib/mamba_ssm/models/mixer_seq_simple.py @@ -0,0 +1,309 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. + +import math +from functools import partial +import json +import os +import copy + +from collections import namedtuple + +import torch +import torch.nn as nn + +from mamba_ssm.models.config_mamba import MambaConfig +from mamba_ssm.modules.mamba_simple import Mamba +from mamba_ssm.modules.mamba2 import Mamba2 +from mamba_ssm.modules.mha import MHA +from mamba_ssm.modules.mlp import GatedMLP +from mamba_ssm.modules.block import Block +from mamba_ssm.utils.generation import GenerationMixin +from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf + +try: + from mamba_ssm.ops.triton.layer_norm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +def create_block( + d_model, + d_intermediate, + ssm_cfg=None, + attn_layer_idx=None, + attn_cfg=None, + norm_epsilon=1e-5, + rms_norm=False, + residual_in_fp32=False, + fused_add_norm=False, + layer_idx=None, + device=None, + dtype=None, +): + if ssm_cfg is None: + ssm_cfg = {} + if attn_layer_idx is None: + attn_layer_idx = [] + if attn_cfg is None: + attn_cfg = {} + factory_kwargs = {"device": device, "dtype": dtype} + if layer_idx not in attn_layer_idx: + # Create a copy of the config to modify + ssm_cfg = copy.deepcopy(ssm_cfg) if ssm_cfg is not None else {} + ssm_layer = ssm_cfg.pop("layer", "Mamba1") + if ssm_layer not in ["Mamba1", "Mamba2"]: + raise ValueError(f"Invalid ssm_layer: {ssm_layer}, only support Mamba1 and Mamba2") + mixer_cls = partial( + Mamba2 if ssm_layer == "Mamba2" else Mamba, + layer_idx=layer_idx, + **ssm_cfg, + **factory_kwargs + ) + else: + mixer_cls = partial(MHA, layer_idx=layer_idx, **attn_cfg, **factory_kwargs) + norm_cls = partial( + nn.LayerNorm if not rms_norm else RMSNorm, eps=norm_epsilon, **factory_kwargs + ) + if d_intermediate == 0: + mlp_cls = nn.Identity + else: + mlp_cls = partial( + GatedMLP, hidden_features=d_intermediate, out_features=d_model, **factory_kwargs + ) + block = Block( + d_model, + mixer_cls, + mlp_cls, + norm_cls=norm_cls, + fused_add_norm=fused_add_norm, + residual_in_fp32=residual_in_fp32, + ) + block.layer_idx = layer_idx + return block + + +# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454 +def _init_weights( + module, + n_layer, + initializer_range=0.02, # Now only used for embedding layer. + rescale_prenorm_residual=True, + n_residuals_per_layer=1, # Change to 2 if we have MLP +): + if isinstance(module, nn.Linear): + if module.bias is not None: + if not getattr(module.bias, "_no_reinit", False): + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Embedding): + nn.init.normal_(module.weight, std=initializer_range) + + if rescale_prenorm_residual: + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + for name, p in module.named_parameters(): + if name in ["out_proj.weight", "fc2.weight"]: + # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block + # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) + # We need to reinit p since this code could be called multiple times + # Having just p *= scale would repeatedly scale it down + nn.init.kaiming_uniform_(p, a=math.sqrt(5)) + with torch.no_grad(): + p /= math.sqrt(n_residuals_per_layer * n_layer) + + +class MixerModel(nn.Module): + def __init__( + self, + d_model: int, + n_layer: int, + d_intermediate: int, + vocab_size: int, + ssm_cfg=None, + attn_layer_idx=None, + attn_cfg=None, + norm_epsilon: float = 1e-5, + rms_norm: bool = False, + initializer_cfg=None, + fused_add_norm=False, + residual_in_fp32=False, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + + self.embedding = nn.Embedding(vocab_size, d_model, **factory_kwargs) + + # We change the order of residual and layer norm: + # Instead of LN -> Attn / MLP -> Add, we do: + # Add -> LN -> Attn / MLP / Mixer, returning both the residual branch (output of Add) and + # the main branch (output of MLP / Mixer). The model definition is unchanged. + # This is for performance reason: we can fuse add + layer_norm. + self.fused_add_norm = fused_add_norm + if self.fused_add_norm: + if layer_norm_fn is None or rms_norm_fn is None: + raise ImportError("Failed to import Triton LayerNorm / RMSNorm kernels") + + self.layers = nn.ModuleList( + [ + create_block( + d_model, + d_intermediate=d_intermediate, + ssm_cfg=ssm_cfg, + attn_layer_idx=attn_layer_idx, + attn_cfg=attn_cfg, + norm_epsilon=norm_epsilon, + rms_norm=rms_norm, + residual_in_fp32=residual_in_fp32, + fused_add_norm=fused_add_norm, + layer_idx=i, + **factory_kwargs, + ) + for i in range(n_layer) + ] + ) + + self.norm_f = (nn.LayerNorm if not rms_norm else RMSNorm)( + d_model, eps=norm_epsilon, **factory_kwargs + ) + + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + n_residuals_per_layer=1 if d_intermediate == 0 else 2, # 2 if we have MLP + ) + ) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return { + i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + for i, layer in enumerate(self.layers) + } + + def forward(self, input_ids, inference_params=None, **mixer_kwargs): + hidden_states = self.embedding(input_ids) + residual = None + for layer in self.layers: + hidden_states, residual = layer( + hidden_states, residual, inference_params=inference_params, **mixer_kwargs + ) + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm_f(residual.to(dtype=self.norm_f.weight.dtype)) + else: + # Set prenorm=False here since we don't need the residual + hidden_states = layer_norm_fn( + hidden_states, + self.norm_f.weight, + self.norm_f.bias, + eps=self.norm_f.eps, + residual=residual, + prenorm=False, + residual_in_fp32=self.residual_in_fp32, + is_rms_norm=isinstance(self.norm_f, RMSNorm) + ) + return hidden_states + + +class MambaLMHeadModel(nn.Module, GenerationMixin): + + def __init__( + self, + config: MambaConfig, + initializer_cfg=None, + device=None, + dtype=None, + ) -> None: + self.config = config + d_model = config.d_model + n_layer = config.n_layer + d_intermediate = config.d_intermediate + vocab_size = config.vocab_size + ssm_cfg = config.ssm_cfg + attn_layer_idx = config.attn_layer_idx + attn_cfg = config.attn_cfg + rms_norm = config.rms_norm + residual_in_fp32 = config.residual_in_fp32 + fused_add_norm = config.fused_add_norm + pad_vocab_size_multiple = config.pad_vocab_size_multiple + factory_kwargs = {"device": device, "dtype": dtype} + + super().__init__() + if vocab_size % pad_vocab_size_multiple != 0: + vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple) + self.backbone = MixerModel( + d_model=d_model, + n_layer=n_layer, + d_intermediate=d_intermediate, + vocab_size=vocab_size, + ssm_cfg=ssm_cfg, + attn_layer_idx=attn_layer_idx, + attn_cfg=attn_cfg, + rms_norm=rms_norm, + initializer_cfg=initializer_cfg, + fused_add_norm=fused_add_norm, + residual_in_fp32=residual_in_fp32, + **factory_kwargs, + ) + self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs) + + # Initialize weights and apply final processing + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + ) + ) + self.tie_weights() + + def tie_weights(self): + if self.config.tie_embeddings: + self.lm_head.weight = self.backbone.embedding.weight + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.backbone.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + + def forward(self, input_ids, position_ids=None, inference_params=None, num_last_tokens=0, **mixer_kwargs): + """ + "position_ids" is just to be compatible with Transformer generation. We don't use it. + num_last_tokens: if > 0, only return the logits for the last n tokens + """ + hidden_states = self.backbone(input_ids, inference_params=inference_params, **mixer_kwargs) + if num_last_tokens > 0: + hidden_states = hidden_states[:, -num_last_tokens:] + lm_logits = self.lm_head(hidden_states) + CausalLMOutput = namedtuple("CausalLMOutput", ["logits"]) + return CausalLMOutput(logits=lm_logits) + + @classmethod + def from_pretrained(cls, pretrained_model_name, device=None, dtype=None, **kwargs): + config_data = load_config_hf(pretrained_model_name) + config = MambaConfig(**config_data) + model = cls(config, device=device, dtype=dtype, **kwargs) + model.load_state_dict(load_state_dict_hf(pretrained_model_name, device=device, dtype=dtype)) + return model + + def save_pretrained(self, save_directory): + """ + Minimal implementation of save_pretrained for MambaLMHeadModel. + Save the model and its configuration file to a directory. + """ + # Ensure save_directory exists + os.makedirs(save_directory, exist_ok=True) + + # Save the model's state_dict + model_path = os.path.join(save_directory, 'pytorch_model.bin') + torch.save(self.state_dict(), model_path) + + # Save the configuration of the model + config_path = os.path.join(save_directory, 'config.json') + with open(config_path, 'w') as f: + json.dump(self.config.__dict__, f, indent=4) diff --git a/mamba/build/lib/mamba_ssm/modules/__init__.py b/mamba/build/lib/mamba_ssm/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/build/lib/mamba_ssm/modules/block.py b/mamba/build/lib/mamba_ssm/modules/block.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd968a0bf20668bb312f9b7981529cf5c915471 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/modules/block.py @@ -0,0 +1,91 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. +from typing import Optional + +import torch +from torch import nn, Tensor + +from mamba_ssm.ops.triton.layer_norm import RMSNorm, layer_norm_fn + + +class Block(nn.Module): + def __init__( + self, dim, mixer_cls, mlp_cls, norm_cls=nn.LayerNorm, fused_add_norm=False, residual_in_fp32=False + ): + """ + Simple block wrapping a mixer class with LayerNorm/RMSNorm and residual connection" + + This Block has a slightly different structure compared to a regular + prenorm Transformer block. + The standard block is: LN -> MHA/MLP -> Add. + [Ref: https://arxiv.org/abs/2002.04745] + Here we have: Add -> LN -> Mixer, returning both + the hidden_states (output of the mixer) and the residual. + This is purely for performance reasons, as we can fuse add and LayerNorm. + The residual needs to be provided (except for the very first block). + """ + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + self.fused_add_norm = fused_add_norm + self.norm = norm_cls(dim) + self.mixer = mixer_cls(dim) + if mlp_cls is not nn.Identity: + self.norm2 = norm_cls(dim) + self.mlp = mlp_cls(dim) + else: + self.mlp = None + if self.fused_add_norm: + assert RMSNorm is not None, "RMSNorm import fails" + assert isinstance( + self.norm, (nn.LayerNorm, RMSNorm) + ), "Only LayerNorm and RMSNorm are supported for fused_add_norm" + + def forward( + self, hidden_states: Tensor, residual: Optional[Tensor] = None, inference_params=None, **mixer_kwargs + ): + r"""Pass the input through the encoder layer. + + Args: + hidden_states: the sequence to the encoder layer (required). + residual: hidden_states = Mixer(LN(residual)) + """ + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm(residual.to(dtype=self.norm.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + else: + hidden_states, residual = layer_norm_fn( + hidden_states, + self.norm.weight, + self.norm.bias, + residual=residual, + prenorm=True, + residual_in_fp32=self.residual_in_fp32, + eps=self.norm.eps, + is_rms_norm=isinstance(self.norm, RMSNorm) + ) + hidden_states = self.mixer(hidden_states, inference_params=inference_params, **mixer_kwargs) + + if self.mlp is not None: + if not self.fused_add_norm: + residual = hidden_states + residual + hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + else: + hidden_states, residual = layer_norm_fn( + hidden_states, + self.norm2.weight, + self.norm2.bias, + residual=residual, + prenorm=True, + residual_in_fp32=self.residual_in_fp32, + eps=self.norm2.eps, + is_rms_norm=isinstance(self.norm2, RMSNorm) + ) + hidden_states = self.mlp(hidden_states) + + return hidden_states, residual + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) diff --git a/mamba/build/lib/mamba_ssm/modules/mamba2.py b/mamba/build/lib/mamba_ssm/modules/mamba2.py new file mode 100644 index 0000000000000000000000000000000000000000..1859ab0de591a2b3c79e26d3f31222bd295e876e --- /dev/null +++ b/mamba/build/lib/mamba_ssm/modules/mamba2.py @@ -0,0 +1,383 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None, None + +try: + from causal_conv1d.causal_conv1d_varlen import causal_conv1d_varlen_states +except ImportError: + causal_conv1d_varlen_states = None + +try: + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +except ImportError: + selective_state_update = None + +from mamba_ssm.ops.triton.layernorm_gated import RMSNorm as RMSNormGated + +from mamba_ssm.distributed.tensor_parallel import ColumnParallelLinear, RowParallelLinear +from mamba_ssm.distributed.distributed_utils import all_reduce, reduce_scatter + +from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined +from mamba_ssm.ops.triton.ssd_combined import mamba_split_conv1d_scan_combined + +from huggingface_hub import PyTorchModelHubMixin + + +class Mamba2(nn.Module, PyTorchModelHubMixin): + def __init__( + self, + d_model, + d_state=128, + d_conv=4, + conv_init=None, + expand=2, + headdim=64, + d_ssm=None, # If not None, we only apply SSM on this many dimensions, the rest uses gated MLP + ngroups=1, + A_init_range=(1, 16), + D_has_hdim=False, + rmsnorm=True, + norm_before_gate=False, + dt_min=0.001, + dt_max=0.1, + dt_init_floor=1e-4, + dt_limit=(0.0, float("inf")), + bias=False, + conv_bias=True, + # Fused kernel and sharding options + chunk_size=256, + use_mem_eff_path=True, + layer_idx=None, # Absorb kwarg for general module + process_group=None, + sequence_parallel=True, + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.conv_init = conv_init + self.expand = expand + self.process_group = process_group + self.sequence_parallel = sequence_parallel + self.world_size = 1 if process_group is None else process_group.size() + self.local_rank = 0 if process_group is None else process_group.rank() + self.d_inner = (self.expand * self.d_model) // self.world_size + assert self.d_inner * self.world_size == self.expand * self.d_model + self.headdim = headdim + self.d_ssm = self.d_inner if d_ssm is None else d_ssm // self.world_size + assert ngroups % self.world_size == 0 + self.ngroups = ngroups // self.world_size + assert self.d_ssm % self.headdim == 0 + self.nheads = self.d_ssm // self.headdim + self.D_has_hdim = D_has_hdim + self.rmsnorm = rmsnorm + self.norm_before_gate = norm_before_gate + self.dt_limit = dt_limit + self.activation = "silu" + self.chunk_size = chunk_size + self.use_mem_eff_path = use_mem_eff_path + self.layer_idx = layer_idx + + # Order: [z, x, B, C, dt] + d_in_proj = 2 * self.d_inner + 2 * self.ngroups * self.d_state + self.nheads + if self.process_group is None: + self.in_proj = nn.Linear(self.d_model, d_in_proj, bias=bias, **factory_kwargs) + else: + self.in_proj = ColumnParallelLinear(self.d_model, d_in_proj * self.world_size, bias=bias, + process_group=self.process_group, sequence_parallel=self.sequence_parallel, + **factory_kwargs) + + conv_dim = self.d_ssm + 2 * self.ngroups * self.d_state + self.conv1d = nn.Conv1d( + in_channels=conv_dim, + out_channels=conv_dim, + bias=conv_bias, + kernel_size=d_conv, + groups=conv_dim, + padding=d_conv - 1, + **factory_kwargs, + ) + if self.conv_init is not None: + nn.init.uniform_(self.conv1d.weight, -self.conv_init, self.conv_init) + + self.act = nn.SiLU() + + # Initialize log dt bias + dt = torch.exp( + torch.rand(self.nheads, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ) + dt = torch.clamp(dt, min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + self.dt_bias = nn.Parameter(inv_dt) + # Just to be explicit. Without this we already don't put wd on dt_bias because of the check + # name.endswith("bias") in param_grouping.py + self.dt_bias._no_weight_decay = True + + assert A_init_range[0] > 0 and A_init_range[1] >= A_init_range[0] + A = torch.empty(self.nheads, dtype=torch.float32, device=device).uniform_(*A_init_range) + A_log = torch.log(A).to(dtype=dtype) + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.d_ssm if self.D_has_hdim else self.nheads, device=device)) + self.D._no_weight_decay = True + + if self.rmsnorm: + assert RMSNormGated is not None + self.norm = RMSNormGated(self.d_ssm, eps=1e-5, norm_before_gate=self.norm_before_gate, + group_size=self.d_ssm // ngroups, **factory_kwargs) + + if self.process_group is None: + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + else: + self.out_proj = RowParallelLinear(self.d_inner * self.world_size, self.d_model, bias=bias, + process_group=self.process_group, sequence_parallel=self.sequence_parallel, + **factory_kwargs) + + def forward(self, u, seqlen=None, seq_idx=None, cu_seqlens=None, inference_params=None): + """ + u: (batch, seqlen, hidden_dim) if seqlen=None. + If seqlen is not None, u is (batch * seqlen, hidden_dim). This is so that when we + split u during sequence parallel, we split the batch * seqlen dimension + (in case batch is small). + Returns: same shape as u + """ + seqlen_og = seqlen + if seqlen is None: + batch, seqlen, dim = u.shape + else: + batch_seqlen, dim = u.shape + batch = batch_seqlen // seqlen + + conv_state, ssm_state = None, None + if inference_params is not None: + inference_batch = cu_seqlens.shape[0] - 1 if cu_seqlens is not None else batch + conv_state, ssm_state = self._get_states_from_cache(inference_params, inference_batch) + if inference_params.seqlen_offset > 0: + # The states are updated inplace + out, _, _ = self.step(u, conv_state, ssm_state) + return out + + zxbcdt = self.in_proj(u) # (B, L, d_in_proj) or (B * L, d_in_proj) + if seqlen_og is not None: + zxbcdt = rearrange(zxbcdt, "(b l) d -> b l d", l=seqlen) + # If the model is loaded in fp16, without the .float() here, A might be -inf + A = -torch.exp(self.A_log.float()) # (nheads) or (d_inner, d_state) + dt_limit_kwargs = {} if self.dt_limit == (0.0, float("inf")) else dict(dt_limit=self.dt_limit) + if self.use_mem_eff_path and inference_params is None: + out = mamba_split_conv1d_scan_combined( + zxbcdt, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.dt_bias, + A, + D=rearrange(self.D, "(h p) -> h p", p=self.headdim) if self.D_has_hdim else self.D, + chunk_size=self.chunk_size, + seq_idx=seq_idx, + activation=self.activation, + rmsnorm_weight=self.norm.weight if self.rmsnorm else None, + rmsnorm_eps=self.norm.eps if self.rmsnorm else 1e-6, + outproj_weight=self.out_proj.weight, + outproj_bias=self.out_proj.bias, + headdim=None if self.D_has_hdim else self.headdim, + ngroups=self.ngroups, + norm_before_gate=self.norm_before_gate, + **dt_limit_kwargs, + ) + if seqlen_og is not None: + out = rearrange(out, "b l d -> (b l) d") + if self.process_group is not None: + reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce + out = reduce_fn(out, self.process_group) + else: + d_mlp = (zxbcdt.shape[-1] - 2 * self.d_ssm - 2 * self.ngroups * self.d_state - self.nheads) // 2 + z0, x0, z, xBC, dt = torch.split( + zxbcdt, + [d_mlp, d_mlp, self.d_ssm, self.d_ssm + 2 * self.ngroups * self.d_state, self.nheads], + dim=-1 + ) + if conv_state is not None: + if cu_seqlens is None: + # If we just take xBC[:, :, -self.d_conv :], it will error if seqlen < self.d_conv + # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. + xBC_t = rearrange(xBC, "b l d -> b d l") + conv_state.copy_(F.pad(xBC_t, (self.d_conv - xBC_t.shape[-1], 0))) # Update state (B D W) + else: + assert causal_conv1d_varlen_states is not None, "varlen inference requires causal_conv1d package" + assert batch == 1, "varlen inference only supports batch dimension 1" + conv_varlen_states = causal_conv1d_varlen_states( + xBC.squeeze(0), cu_seqlens, state_len=conv_state.shape[-1] + ) + conv_state.copy_(conv_varlen_states) + assert self.activation in ["silu", "swish"] + if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]: + assert seq_idx is None, "varlen conv1d requires the causal_conv1d package" + xBC = self.act( + self.conv1d(xBC.transpose(1, 2)).transpose(1, 2)[:, -(self.dconv - 1):] + ) # (B, L, self.d_ssm + 2 * ngroups * d_state) + else: + xBC = causal_conv1d_fn( + xBC.transpose(1, 2), + rearrange(self.conv1d.weight, "d 1 w -> d w"), + bias=self.conv1d.bias, + activation=self.activation, + seq_idx=seq_idx, + ).transpose(1, 2) + x, B, C = torch.split(xBC, [self.d_ssm, self.ngroups * self.d_state, self.ngroups * self.d_state], dim=-1) + y = mamba_chunk_scan_combined( + rearrange(x, "b l (h p) -> b l h p", p=self.headdim), + dt, + A, + rearrange(B, "b l (g n) -> b l g n", g=self.ngroups), + rearrange(C, "b l (g n) -> b l g n", g=self.ngroups), + chunk_size=self.chunk_size, + D=rearrange(self.D, "(h p) -> h p", p=self.headdim) if self.D_has_hdim else self.D, + z=rearrange(z, "b l (h p) -> b l h p", p=self.headdim) if not self.rmsnorm else None, + dt_bias=self.dt_bias, + dt_softplus=True, + seq_idx=seq_idx, + cu_seqlens=cu_seqlens, + **dt_limit_kwargs, + return_final_states=ssm_state is not None, + return_varlen_states=cu_seqlens is not None and inference_params is not None, + ) + if ssm_state is not None: + y, last_state, *rest = y + if cu_seqlens is None: + ssm_state.copy_(last_state) + else: + varlen_states = rest[0] + ssm_state.copy_(varlen_states) + y = rearrange(y, "b l h p -> b l (h p)") + if self.rmsnorm: + y = self.norm(y, z) + if d_mlp > 0: + y = torch.cat([F.silu(z0) * x0, y], dim=-1) + if seqlen_og is not None: + y = rearrange(y, "b l d -> (b l) d") + out = self.out_proj(y) + return out + + def step(self, hidden_states, conv_state, ssm_state): + dtype = hidden_states.dtype + assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" + zxbcdt = self.in_proj(hidden_states.squeeze(1)) # (B 2D) + d_mlp = (zxbcdt.shape[-1] - 2 * self.d_ssm - 2 * self.ngroups * self.d_state - self.nheads) // 2 + z0, x0, z, xBC, dt = torch.split( + zxbcdt, + [d_mlp, d_mlp, self.d_ssm, self.d_ssm + 2 * self.ngroups * self.d_state, self.nheads], + dim=-1 + ) + + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = xBC + xBC = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + xBC = xBC + self.conv1d.bias + xBC = self.act(xBC).to(dtype=dtype) + else: + xBC = causal_conv1d_update( + xBC, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + x, B, C = torch.split(xBC, [self.d_ssm, self.ngroups * self.d_state, self.ngroups * self.d_state], dim=-1) + A = -torch.exp(self.A_log.float()) # (nheads,) + + # SSM step + if selective_state_update is None: + assert self.ngroups == 1, "Only support ngroups=1 for this inference code path" + # Discretize A and B + dt = F.softplus(dt + self.dt_bias.to(dtype=dt.dtype)) # (batch, nheads) + dA = torch.exp(dt * A) # (batch, nheads) + x = rearrange(x, "b (h p) -> b h p", p=self.headdim) + dBx = torch.einsum("bh,bn,bhp->bhpn", dt, B, x) + ssm_state.copy_(ssm_state * rearrange(dA, "b h -> b h 1 1") + dBx) + y = torch.einsum("bhpn,bn->bhp", ssm_state.to(dtype), C) + y = y + rearrange(self.D.to(dtype), "h -> h 1") * x + y = rearrange(y, "b h p -> b (h p)") + if not self.rmsnorm: + y = y * self.act(z) # (B D) + else: + A = repeat(A, "h -> h p n", p=self.headdim, n=self.d_state).to(dtype=torch.float32) + dt = repeat(dt, "b h -> b h p", p=self.headdim) + dt_bias = repeat(self.dt_bias, "h -> h p", p=self.headdim) + D = repeat(self.D, "h -> h p", p=self.headdim) + B = rearrange(B, "b (g n) -> b g n", g=self.ngroups) + C = rearrange(C, "b (g n) -> b g n", g=self.ngroups) + x_reshaped = rearrange(x, "b (h p) -> b h p", p=self.headdim) + if not self.rmsnorm: + z = rearrange(z, "b (h p) -> b h p", p=self.headdim) + y = selective_state_update( + ssm_state, x_reshaped, dt, A, B, C, D, z=z if not self.rmsnorm else None, + dt_bias=dt_bias, dt_softplus=True + ) + y = rearrange(y, "b h p -> b (h p)") + if self.rmsnorm: + y = self.norm(y, z) + if d_mlp > 0: + y = torch.cat([F.silu(z0) * x0, y], dim=-1) + out = self.out_proj(y) + return out.unsqueeze(1), conv_state, ssm_state + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + device = self.out_proj.weight.device + conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype + conv_state = torch.zeros( + batch_size, self.d_conv, self.conv1d.weight.shape[0], device=device, dtype=conv_dtype + ).transpose(1, 2) + ssm_dtype = self.in_proj.weight.dtype if dtype is None else dtype + ssm_state = torch.zeros( + batch_size, self.nheads, self.headdim, self.d_state, device=device, dtype=ssm_dtype + ) + return conv_state, ssm_state + + def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): + assert self.layer_idx is not None + if self.layer_idx not in inference_params.key_value_memory_dict: + batch_shape = (batch_size,) + conv_state = torch.zeros( + batch_size, + self.d_conv, + self.conv1d.weight.shape[0], + device=self.conv1d.weight.device, + dtype=self.conv1d.weight.dtype, + ).transpose(1, 2) + ssm_state = torch.zeros( + batch_size, + self.nheads, + self.headdim, + self.d_state, + device=self.in_proj.weight.device, + dtype=self.in_proj.weight.dtype, + ) + inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) + else: + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + # TODO: What if batch size changes between generation, and we reuse the same states? + if initialize_states: + conv_state.zero_() + ssm_state.zero_() + return conv_state, ssm_state diff --git a/mamba/build/lib/mamba_ssm/modules/mamba2_simple.py b/mamba/build/lib/mamba_ssm/modules/mamba2_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..77a6af28e9f4630c482aa2c108c74f5d1dad1040 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/modules/mamba2_simple.py @@ -0,0 +1,200 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn +except ImportError: + causal_conv1d_fn = None + +try: + from mamba_ssm.ops.triton.layernorm_gated import RMSNorm as RMSNormGated, LayerNorm +except ImportError: + RMSNormGated, LayerNorm = None, None + +from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined +from mamba_ssm.ops.triton.ssd_combined import mamba_split_conv1d_scan_combined + + +class Mamba2Simple(nn.Module): + def __init__( + self, + d_model, + d_state=64, + d_conv=4, + conv_init=None, + expand=2, + headdim=128, + ngroups=1, + A_init_range=(1, 16), + dt_min=0.001, + dt_max=0.1, + dt_init_floor=1e-4, + dt_limit=(0.0, float("inf")), + learnable_init_states=False, + activation="swish", + bias=False, + conv_bias=True, + # Fused kernel and sharding options + chunk_size=256, + use_mem_eff_path=True, + layer_idx=None, # Absorb kwarg for general module + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.conv_init = conv_init + self.expand = expand + self.d_inner = self.expand * self.d_model + self.headdim = headdim + self.ngroups = ngroups + assert self.d_inner % self.headdim == 0 + self.nheads = self.d_inner // self.headdim + self.dt_limit = dt_limit + self.learnable_init_states = learnable_init_states + self.activation = activation + self.chunk_size = chunk_size + self.use_mem_eff_path = use_mem_eff_path + self.layer_idx = layer_idx + + # Order: [z, x, B, C, dt] + d_in_proj = 2 * self.d_inner + 2 * self.ngroups * self.d_state + self.nheads + self.in_proj = nn.Linear(self.d_model, d_in_proj, bias=bias, **factory_kwargs) + + conv_dim = self.d_inner + 2 * self.ngroups * self.d_state + self.conv1d = nn.Conv1d( + in_channels=conv_dim, + out_channels=conv_dim, + bias=conv_bias, + kernel_size=d_conv, + groups=conv_dim, + padding=d_conv - 1, + **factory_kwargs, + ) + if self.conv_init is not None: + nn.init.uniform_(self.conv1d.weight, -self.conv_init, self.conv_init) + # self.conv1d.weight._no_weight_decay = True + + if self.learnable_init_states: + self.init_states = nn.Parameter(torch.zeros(self.nheads, self.headdim, self.d_state, **factory_kwargs)) + self.init_states._no_weight_decay = True + + self.act = nn.SiLU() + + # Initialize log dt bias + dt = torch.exp( + torch.rand(self.nheads, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ) + dt = torch.clamp(dt, min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + self.dt_bias = nn.Parameter(inv_dt) + # Just to be explicit. Without this we already don't put wd on dt_bias because of the check + # name.endswith("bias") in param_grouping.py + self.dt_bias._no_weight_decay = True + + # A parameter + assert A_init_range[0] > 0 and A_init_range[1] >= A_init_range[0] + A = torch.empty(self.nheads, dtype=torch.float32, device=device).uniform_(*A_init_range) + A_log = torch.log(A).to(dtype=dtype) + self.A_log = nn.Parameter(A_log) + # self.register_buffer("A_log", torch.zeros(self.nheads, dtype=torch.float32, device=device), persistent=True) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.nheads, device=device)) + self.D._no_weight_decay = True + + # Extra normalization layer right before output projection + assert RMSNormGated is not None + self.norm = RMSNormGated(self.d_inner, eps=1e-5, norm_before_gate=False, **factory_kwargs) + + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + + def forward(self, u, seq_idx=None): + """ + u: (B, L, D) + Returns: same shape as u + """ + batch, seqlen, dim = u.shape + + zxbcdt = self.in_proj(u) # (B, L, d_in_proj) + A = -torch.exp(self.A_log) # (nheads) or (d_inner, d_state) + initial_states=repeat(self.init_states, "... -> b ...", b=batch) if self.learnable_init_states else None + dt_limit_kwargs = {} if self.dt_limit == (0.0, float("inf")) else dict(dt_limit=self.dt_limit) + + if self.use_mem_eff_path: + # Fully fused path + out = mamba_split_conv1d_scan_combined( + zxbcdt, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.dt_bias, + A, + D=self.D, + chunk_size=self.chunk_size, + seq_idx=seq_idx, + activation=self.activation, + rmsnorm_weight=self.norm.weight, + rmsnorm_eps=self.norm.eps, + outproj_weight=self.out_proj.weight, + outproj_bias=self.out_proj.bias, + headdim=self.headdim, + ngroups=self.ngroups, + norm_before_gate=False, + initial_states=initial_states, + **dt_limit_kwargs, + ) + else: + z, xBC, dt = torch.split( + zxbcdt, [self.d_inner, self.d_inner + 2 * self.ngroups * self.d_state, self.nheads], dim=-1 + ) + dt = F.softplus(dt + self.dt_bias) # (B, L, nheads) + assert self.activation in ["silu", "swish"] + + # 1D Convolution + if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]: + xBC = self.act( + self.conv1d(xBC.transpose(1, 2)).transpose(1, 2) + ) # (B, L, self.d_inner + 2 * ngroups * d_state) + xBC = xBC[:, :seqlen, :] + else: + xBC = causal_conv1d_fn( + x=xBC.transpose(1, 2), + weight=rearrange(self.conv1d.weight, "d 1 w -> d w"), + bias=self.conv1d.bias, + activation=self.activation, + ).transpose(1, 2) + + # Split into 3 main branches: X, B, C + # These correspond to V, K, Q respectively in the SSM/attention duality + x, B, C = torch.split(xBC, [self.d_inner, self.ngroups * self.d_state, self.ngroups * self.d_state], dim=-1) + y = mamba_chunk_scan_combined( + rearrange(x, "b l (h p) -> b l h p", p=self.headdim), + dt, + A, + rearrange(B, "b l (g n) -> b l g n", g=self.ngroups), + rearrange(C, "b l (g n) -> b l g n", g=self.ngroups), + chunk_size=self.chunk_size, + D=self.D, + z=None, + seq_idx=seq_idx, + initial_states=initial_states, + **dt_limit_kwargs, + ) + y = rearrange(y, "b l h p -> b l (h p)") + + # Multiply "gate" branch and apply extra normalization layer + y = self.norm(y, z) + out = self.out_proj(y) + return out diff --git a/mamba/build/lib/mamba_ssm/modules/mamba_simple.py b/mamba/build/lib/mamba_ssm/modules/mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..4c8a388217c4cd4d21d5ea4704a5d571be294781 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/modules/mamba_simple.py @@ -0,0 +1,294 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from einops import rearrange, repeat + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None, None + +try: + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +except ImportError: + selective_state_update = None + +try: + from mamba_ssm.ops.triton.layer_norm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +class Mamba(nn.Module): + def __init__( + self, + d_model, + d_state=16, + d_conv=4, + expand=2, + dt_rank="auto", + dt_min=0.001, + dt_max=0.1, + dt_init="random", + dt_scale=1.0, + dt_init_floor=1e-4, + conv_bias=True, + bias=False, + use_fast_path=True, # Fused kernel options + layer_idx=None, + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.expand = expand + self.d_inner = int(self.expand * self.d_model) + self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank + self.use_fast_path = use_fast_path + self.layer_idx = layer_idx + + self.in_proj = nn.Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs) + + self.conv1d = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.activation = "silu" + self.act = nn.SiLU() + + self.x_proj = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + # Initialize special dt projection to preserve variance at initialization + dt_init_std = self.dt_rank**-0.5 * dt_scale + if dt_init == "constant": + nn.init.constant_(self.dt_proj.weight, dt_init_std) + elif dt_init == "random": + nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std) + else: + raise NotImplementedError + + # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max + dt = torch.exp( + torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ).clamp(min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + with torch.no_grad(): + self.dt_proj.bias.copy_(inv_dt) + # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit + self.dt_proj.bias._no_reinit = True + + # S4D real initialization + A = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_log = torch.log(A) # Keep A_log in fp32 + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D._no_weight_decay = True + + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + + def forward(self, hidden_states, inference_params=None): + """ + hidden_states: (B, L, D) + Returns: same shape as hidden_states + """ + batch, seqlen, dim = hidden_states.shape + + conv_state, ssm_state = None, None + if inference_params is not None: + conv_state, ssm_state = self._get_states_from_cache(inference_params, batch) + if inference_params.seqlen_offset > 0: + # The states are updated inplace + out, _, _ = self.step(hidden_states, conv_state, ssm_state) + return out + + # We do matmul and transpose BLH -> HBL at the same time + xz = rearrange( + self.in_proj.weight @ rearrange(hidden_states, "b l d -> d (b l)"), + "d (b l) -> b d l", + l=seqlen, + ) + if self.in_proj.bias is not None: + xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), "d -> d 1") + + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + # In the backward pass we write dx and dz next to each other to avoid torch.cat + if self.use_fast_path and causal_conv1d_fn is not None and inference_params is None: # Doesn't support outputting the states + out = mamba_inner_fn( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + self.out_proj.weight, + self.out_proj.bias, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + else: + x, z = xz.chunk(2, dim=1) + # Compute short convolution + if conv_state is not None: + # If we just take x[:, :, -self.d_conv :], it will error if seqlen < self.d_conv + # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. + conv_state.copy_(F.pad(x, (self.d_conv - x.shape[-1], 0))) # Update state (B D W) + if causal_conv1d_fn is None: + x = self.act(self.conv1d(x)[..., :seqlen]) + else: + assert self.activation in ["silu", "swish"] + x = causal_conv1d_fn( + x=x, + weight=rearrange(self.conv1d.weight, "d 1 w -> d w"), + bias=self.conv1d.bias, + activation=self.activation, + ) + + # We're careful here about the layout, to avoid extra transposes. + # We want dt to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) + dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = self.dt_proj.weight @ dt.t() + dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) + B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + assert self.activation in ["silu", "swish"] + y = selective_scan_fn( + x, + dt, + A, + B, + C, + self.D.float(), + z=z, + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + return_last_state=ssm_state is not None, + ) + if ssm_state is not None: + y, last_state = y + ssm_state.copy_(last_state) + y = rearrange(y, "b d l -> b l d") + out = self.out_proj(y) + return out + + def step(self, hidden_states, conv_state, ssm_state): + dtype = hidden_states.dtype + assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" + xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D) + x, z = xz.chunk(2, dim=-1) # (B D) + + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + x = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + x = x + self.conv1d.bias + x = self.act(x).to(dtype=dtype) + else: + x = causal_conv1d_update( + x, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + x_db = self.x_proj(x) # (B dt_rank+2*d_state) + dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + # Don't add dt_bias here + dt = F.linear(dt, self.dt_proj.weight) # (B d_inner) + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + + # SSM step + if selective_state_update is None: + # Discretize A and B + dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype)) + dA = torch.exp(torch.einsum("bd,dn->bdn", dt, A)) + dB = torch.einsum("bd,bn->bdn", dt, B) + ssm_state.copy_(ssm_state * dA + rearrange(x, "b d -> b d 1") * dB) + y = torch.einsum("bdn,bn->bd", ssm_state.to(dtype), C) + y = y + self.D.to(dtype) * x + y = y * self.act(z) # (B D) + else: + y = selective_state_update( + ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True + ) + + out = self.out_proj(y) + return out.unsqueeze(1), conv_state, ssm_state + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + device = self.out_proj.weight.device + conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype + conv_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype + ) + ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype + # ssm_dtype = torch.float32 + ssm_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype + ) + return conv_state, ssm_state + + def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): + assert self.layer_idx is not None + if self.layer_idx not in inference_params.key_value_memory_dict: + batch_shape = (batch_size,) + conv_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_conv, + device=self.conv1d.weight.device, + dtype=self.conv1d.weight.dtype, + ) + ssm_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_state, + device=self.dt_proj.weight.device, + dtype=self.dt_proj.weight.dtype, + # dtype=torch.float32, + ) + inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) + else: + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + # TODO: What if batch size changes between generation, and we reuse the same states? + if initialize_states: + conv_state.zero_() + ssm_state.zero_() + return conv_state, ssm_state diff --git a/mamba/build/lib/mamba_ssm/modules/mha.py b/mamba/build/lib/mamba_ssm/modules/mha.py new file mode 100644 index 0000000000000000000000000000000000000000..978f3ea4d8f1c962303b5cb6d8388c2289c4f7db --- /dev/null +++ b/mamba/build/lib/mamba_ssm/modules/mha.py @@ -0,0 +1,294 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange + +try: + from flash_attn import flash_attn_with_kvcache +except ImportError: + flash_attn_with_kvcache = None + +try: + from flash_attn.layers.rotary import RotaryEmbedding +except ImportError: + RotaryEmbedding = None + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None, None + + +def _update_kv_cache(kv, inference_params, layer_idx): + """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)""" + # Pre-allocate memory for key-values for inference. + num_heads, head_dim = kv.shape[-2:] + assert layer_idx in inference_params.key_value_memory_dict + kv_cache, _ = inference_params.key_value_memory_dict[layer_idx] + # Adjust key and value for inference + batch_start = inference_params.batch_size_offset + batch_end = batch_start + kv.shape[0] + sequence_start = inference_params.seqlen_offset + sequence_end = sequence_start + kv.shape[1] + assert batch_end <= kv_cache.shape[0] + assert sequence_end <= kv_cache.shape[1] + assert kv_cache is not None + kv_cache[batch_start:batch_end, sequence_start:sequence_end, ...] = kv + return kv_cache[batch_start:batch_end, :sequence_end, ...] + + +class MHA(nn.Module): + """Multi-head self-attention and cross-attention""" + + def __init__( + self, + embed_dim, + num_heads, + num_heads_kv=None, + head_dim=None, # If None, use embed_dim // num_heads + mlp_dim=0, + qkv_proj_bias=True, + out_proj_bias=True, + softmax_scale=None, + causal=False, + layer_idx=None, + d_conv=0, + rotary_emb_dim=0, + rotary_emb_base=10000.0, + rotary_emb_interleaved=False, + device=None, + dtype=None, + ) -> None: + """ + num_heads_kv: can be used to toggle MQA / GQA. If None, use num_heads. + return_residual: whether to return the input x along with the output. This is for + performance reason: for post-norm architecture, returning the input allows us + to fuse the backward of nn.Linear with the residual connection. + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.embed_dim = embed_dim + self.layer_idx = layer_idx + self.d_conv = d_conv + self.rotary_emb_dim = rotary_emb_dim + self.softmax_scale = softmax_scale + self.causal = causal + + self.num_heads = num_heads + self.num_heads_kv = num_heads_kv if num_heads_kv is not None else num_heads + assert ( + self.num_heads % self.num_heads_kv == 0 + ), "num_heads must be divisible by num_heads_kv" + if head_dim is None: + assert self.embed_dim % num_heads == 0, "embed_dim must be divisible by num_heads" + self.head_dim = head_dim if head_dim is not None else self.embed_dim // num_heads + self.mlp_dim = math.ceil(mlp_dim / 256) * 256 + qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv) + out_dim = self.head_dim * self.num_heads + + if self.rotary_emb_dim > 0: + assert RotaryEmbedding is not None, "rotary requires flash_attn to be installed" + self.rotary_emb = RotaryEmbedding( + self.rotary_emb_dim, + base=rotary_emb_base, + interleaved=rotary_emb_interleaved, + device=device, + ) + + self.in_proj = nn.Linear(embed_dim, qkv_dim + self.mlp_dim, bias=qkv_proj_bias, **factory_kwargs) + if self.d_conv > 0: + self.conv1d = nn.Conv1d( + qkv_dim, qkv_dim, kernel_size=self.d_conv, padding=self.d_conv - 1, groups=qkv_dim, + **factory_kwargs + ) + self.out_proj = nn.Linear(out_dim + self.mlp_dim // 2, embed_dim, bias=out_proj_bias, **factory_kwargs) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None): + dtype = self.out_proj.weight.dtype if dtype is None else dtype + device = self.out_proj.weight.device + if self.d_conv > 0: + conv_state = torch.zeros( + batch_size, self.conv1d.weight.shape[0], self.d_conv, device=device, dtype=dtype + ) + else: + conv_state = None + kv_cache = torch.empty( + batch_size, max_seqlen, 2, self.num_heads_kv, self.head_dim, dtype=dtype, device=device, + ) + return kv_cache, conv_state + + def _update_kv_cache(self, kv, inference_params): + """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)""" + assert self.layer_idx is not None, "Generation requires layer_idx in the constructor" + return _update_kv_cache(kv, inference_params, self.layer_idx) + + def _apply_rotary_update_kvcache_attention(self, q, kv, inference_params): + """ + Fast path that combine 3 steps: apply rotary to Q and K, update kv cache, and apply attention. + q: (batch_size, seqlen_q, nheads, head_dim) + kv: (batch_size, seqlen_k, 2, nheads_kv, head_dim) + """ + assert inference_params is not None and inference_params.seqlen_offset > 0 + if self.rotary_emb_dim > 0: + self.rotary_emb._update_cos_sin_cache( + inference_params.max_seqlen, device=q.device, dtype=q.dtype + ) + rotary_cos, rotary_sin = self.rotary_emb._cos_cached, self.rotary_emb._sin_cached + else: + rotary_cos, rotary_sin = None, None + batch = q.shape[0] + kv_cache, _ = inference_params.key_value_memory_dict[self.layer_idx] + kv_cache = kv_cache[:batch] + cache_seqlens = ( + inference_params.lengths_per_sample[:batch] + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + assert flash_attn_with_kvcache is not None, "flash_attn must be installed" + context = flash_attn_with_kvcache( + q, + kv_cache[:, :, 0], + kv_cache[:, :, 1], + kv[:, :, 0], + kv[:, :, 1], + rotary_cos=rotary_cos, + rotary_sin=rotary_sin, + cache_seqlens=cache_seqlens, + softmax_scale=self.softmax_scale, + causal=self.causal, + rotary_interleaved=self.rotary_emb.interleaved if self.rotary_emb_dim > 0 else False, + ) + return context + + def _update_kvcache_attention(self, q, kv, inference_params): + """Write kv to inference_params, then do attention""" + if ( + inference_params.seqlen_offset == 0 + or flash_attn_with_kvcache is None + ): + # TODO: this only uses seqlen_offset and not lengths_per_sample. + kv = self._update_kv_cache(kv, inference_params) + k, v = kv.unbind(dim=-3) + k = torch.repeat_interleave(k, dim=2, repeats=self.num_heads // self.num_heads_kv) + v = torch.repeat_interleave(v, dim=2, repeats=self.num_heads // self.num_heads_kv) + return F.scaled_dot_product_attention( + q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), is_causal=self.causal, scale=self.softmax_scale + ).transpose(1, 2) + else: + batch = q.shape[0] + kv_cache, _ = inference_params.key_value_memory_dict[self.layer_idx] + kv_cache = kv_cache[:batch] + cache_seqlens = ( + inference_params.lengths_per_sample[:batch] + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + return flash_attn_with_kvcache( + q, + kv_cache[:, :, 0], + kv_cache[:, :, 1], + kv[:, :, 0], + kv[:, :, 1], + cache_seqlens=cache_seqlens, + softmax_scale=self.softmax_scale, + causal=self.causal, + ) + + def forward(self, x, inference_params=None): + """ + Arguments: + x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if + cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total + is the is the sum of the sequence lengths in the batch. + inference_params: for generation. Adapted from Megatron-LM (and Apex) + https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470 + """ + if inference_params is not None and self.layer_idx not in inference_params.key_value_memory_dict: + inference_params.key_value_memory_dict[self.layer_idx] = self.allocate_inference_cache( + x.shape[0], inference_params.max_seqlen, dtype=x.dtype + ) + seqlen_offset = ( + 0 + if inference_params is None + else ( + inference_params.lengths_per_sample + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + ) + rotary_max_seqlen = inference_params.max_seqlen if inference_params is not None else None + qkv = self.in_proj(x) + if self.mlp_dim > 0: + qkv, x_mlp = qkv.split([qkv.shape[-1] - self.mlp_dim, self.mlp_dim], dim=-1) + x_mlp_up, x_mlp_gate = x_mlp.chunk(2, dim=-1) + x_mlp = x_mlp_up * F.silu(x_mlp_gate) + if self.d_conv > 0: + # The inference code for conv1d is pretty messy, should clean it up + if (inference_params is None or inference_params.seqlen_offset == 0): + if causal_conv1d_fn is None: + qkv = rearrange( + self.conv1d(rearrange(qkv, "b s d -> b d s"))[..., :-(self.d_conv - 1)], "b d s -> b s d" + ).contiguous() + else: + qkv = causal_conv1d_fn( + qkv.transpose(1, 2), + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias + ).transpose(1, 2) + if inference_params is not None: + _, conv_state = inference_params.key_value_memory_dict[self.layer_idx] + # If we just take qkv[:, :, -self.d_conv :], it will error if seqlen < self.d_conv + # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. + qkv_t = rearrange(qkv, "b l d -> b d l") + conv_state.copy_(F.pad(qkv_t, (self.d_conv - qkv_t.shape[-1], 0))) # Update state (B D W) + else: + _, conv_state = inference_params.key_value_memory_dict[self.layer_idx] + assert qkv.shape[1] == 1, "Only support decoding with 1 token at a time for now" + qkv = qkv.squeeze(1) + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = qkv + qkv = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + qkv = qkv + self.conv1d.bias + else: + qkv = causal_conv1d_update( + qkv, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias + ) + qkv = qkv.unsqueeze(1) + q, kv = qkv.split([self.num_heads * self.head_dim, self.num_heads_kv * 2 * self.head_dim], dim=-1) + q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim) + kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim) + if ( + inference_params is None + or inference_params.seqlen_offset == 0 + or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0) + ): + if self.rotary_emb_dim > 0: + q, kv = self.rotary_emb( + q, kv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen + ) + if inference_params is None: + k, v = kv.unbind(dim=-3) + k = torch.repeat_interleave(k, dim=2, repeats=self.num_heads // self.num_heads_kv) + v = torch.repeat_interleave(v, dim=2, repeats=self.num_heads // self.num_heads_kv) + context = F.scaled_dot_product_attention( + q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), is_causal=self.causal, scale=self.softmax_scale + ).transpose(1, 2) + else: + context = self._update_kvcache_attention(q, kv, inference_params) + else: + context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params) + context = rearrange(context, "... h d -> ... (h d)") + if self.mlp_dim > 0: + context = torch.cat([context, x_mlp], dim=-1) + out = self.out_proj(context) + return out diff --git a/mamba/build/lib/mamba_ssm/modules/mlp.py b/mamba/build/lib/mamba_ssm/modules/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..33bab5c7cc21b96d5f5ccfe233e339cad12cfe2c --- /dev/null +++ b/mamba/build/lib/mamba_ssm/modules/mlp.py @@ -0,0 +1,34 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. +from torch import nn +from torch.nn import functional as F + + +class GatedMLP(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + activation=F.silu, + bias=False, + multiple_of=128, + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + out_features = out_features if out_features is not None else in_features + hidden_features = ( + hidden_features if hidden_features is not None else int(8 * in_features / 3) + ) + hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of + self.fc1 = nn.Linear(in_features, 2 * hidden_features, bias=bias, **factory_kwargs) + self.activation = activation + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias, **factory_kwargs) + + def forward(self, x): + y = self.fc1(x) + y, gate = y.chunk(2, dim=-1) + y = y * self.activation(gate) + y = self.fc2(y) + return y diff --git a/mamba/build/lib/mamba_ssm/modules/ssd_minimal.py b/mamba/build/lib/mamba_ssm/modules/ssd_minimal.py new file mode 100644 index 0000000000000000000000000000000000000000..9632ebd4350fa18ddc977c2bdedb0bab1fd82646 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/modules/ssd_minimal.py @@ -0,0 +1,103 @@ +# Copyright (c) 2024, Albert Gu and Tri Dao. +"""Minimal implementation of SSD. + +This is the same as Listing 1 from the paper. +""" + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined + + +def segsum_unstable(x): + """Naive segment sum calculation.""" + T = x.size(-1) + x_cumsum = torch.cumsum(x, dim=-1) + x_segsum = x_cumsum[..., :, None] - x_cumsum[..., None, :] + mask = torch.tril(torch.ones(T, T, device=x.device, dtype=bool), diagonal=0) + x_segsum = x_segsum.masked_fill(~mask, -torch.inf) + return x_segsum + +def segsum(x): + """More stable segment sum calculation.""" + T = x.size(-1) + x = repeat(x, "... d -> ... d e", e=T) + mask = torch.tril(torch.ones(T, T, device=x.device, dtype=bool), diagonal=-1) + x = x.masked_fill(~mask, 0) + x_segsum = torch.cumsum(x, dim=-2) + mask = torch.tril(torch.ones(T, T, device=x.device, dtype=bool), diagonal=0) + x_segsum = x_segsum.masked_fill(~mask, -torch.inf) + return x_segsum + +def ssd_minimal_discrete(X, A, B, C, block_len, initial_states=None): + """ + Arguments: + X: (batch, length, n_heads, d_head) + A: (batch, length, n_heads) + B: (batch, length, n_heads, d_state) + C: (batch, length, n_heads, d_state) + Return: + Y: (batch, length, n_heads, d_head) + """ + assert X.dtype == A.dtype == B.dtype == C.dtype + assert X.shape[1] % block_len == 0 + + # Rearrange into blocks/chunks + X, A, B, C = [rearrange(x, "b (c l) ... -> b c l ...", l=block_len) for x in (X, A, B, C)] + + A = rearrange(A, "b c l h -> b h c l") + A_cumsum = torch.cumsum(A, dim=-1) + + # 1. Compute the output for each intra-chunk (diagonal blocks) + L = torch.exp(segsum(A)) + Y_diag = torch.einsum("bclhn,bcshn,bhcls,bcshp->bclhp", C, B, L, X) + + # 2. Compute the state for each intra-chunk + # (right term of low-rank factorization of off-diagonal blocks; B terms) + decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum)) + states = torch.einsum("bclhn,bhcl,bclhp->bchpn", B, decay_states, X) + + # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries + # (middle term of factorization of off-diag blocks; A terms) + if initial_states is None: + initial_states = torch.zeros_like(states[:, :1]) + states = torch.cat([initial_states, states], dim=1) + decay_chunk = torch.exp(segsum(F.pad(A_cumsum[:, :, :, -1], (1, 0)))) + new_states = torch.einsum("bhzc,bchpn->bzhpn", decay_chunk, states) + states, final_state = new_states[:, :-1], new_states[:, -1] + + # 4. Compute state -> output conversion per chunk + # (left term of low-rank factorization of off-diagonal blocks; C terms) + state_decay_out = torch.exp(A_cumsum) + Y_off = torch.einsum('bclhn,bchpn,bhcl->bclhp', C, states, state_decay_out) + + # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks) + Y = rearrange(Y_diag+Y_off, "b c l h p -> b (c l) h p") + return Y, final_state + + +# Simple test +def test_correctness(): + torch.manual_seed(42) + + ## Dimensions + # Denoted (B, T, Q, D, P) in the paper + batch, seqlen, chunk_size, dim, headdim = 1, 2048, 64, 2048, 64 + nheads = dim // headdim # (H) in the paper + ngroups = 1 # (G) in the paper + dstate = 64 # (N) in the paper + dtype = torch.float32 + device = "cuda" + + x = torch.randn(batch, seqlen, nheads, headdim, dtype=dtype, device=device) + dt = F.softplus(torch.randn(batch, seqlen, nheads, dtype=torch.float32, device=device) - 4).requires_grad_() + A = (-torch.exp(torch.rand(nheads, dtype=torch.float32, device=device))).requires_grad_() + B = torch.randn(batch, seqlen, ngroups, dstate, dtype=dtype, device=device) + C = torch.randn(batch, seqlen, ngroups, dstate, dtype=dtype, device=device) + D = torch.randn(nheads, dtype=dtype, device=device) + + # Comparing fused version and minimal version + y = mamba_chunk_scan_combined(x, dt, A, B, C, chunk_size, D=None) + y_min, _ = ssd_minimal_discrete(x*dt.unsqueeze(-1), A*dt, B, C, chunk_size) diff --git a/mamba/build/lib/mamba_ssm/ops/__init__.py b/mamba/build/lib/mamba_ssm/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/build/lib/mamba_ssm/ops/selective_scan_interface.py b/mamba/build/lib/mamba_ssm/ops/selective_scan_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..c3596bfeb0e3718d9e4bb9426828b149aa7dbaa3 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/selective_scan_interface.py @@ -0,0 +1,357 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn + import causal_conv1d_cuda +except ImportError: + causal_conv1d_fn = None + causal_conv1d_cuda = None + +import selective_scan_cuda + + +class SelectiveScanFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + if u.stride(-1) != 1: + u = u.contiguous() + if delta.stride(-1) != 1: + delta = delta.contiguous() + if D is not None: + D = D.contiguous() + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if z is not None and z.stride(-1) != 1: + z = z.contiguous() + if B.dim() == 3: + B = rearrange(B, "b dstate l -> b 1 dstate l") + ctx.squeeze_B = True + if C.dim() == 3: + C = rearrange(C, "b dstate l -> b 1 dstate l") + ctx.squeeze_C = True + out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus) + ctx.delta_softplus = delta_softplus + ctx.has_z = z is not None + last_state = x[:, :, -1, 1::2] # (batch, dim, dstate) + if not ctx.has_z: + ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x) + return out if not return_last_state else (out, last_state) + else: + ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out) + out_z = rest[0] + return out_z if not return_last_state else (out_z, last_state) + + @staticmethod + def backward(ctx, dout, *args): + if not ctx.has_z: + u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors + z = None + out = None + else: + u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors + if dout.stride(-1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + # Here we just pass in None and dz will be allocated in the C++ code. + du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd( + u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus, + False # option to recompute out_z, not used here + ) + dz = rest[0] if ctx.has_z else None + dB = dB.squeeze(1) if getattr(ctx, "squeeze_B", False) else dB + dC = dC.squeeze(1) if getattr(ctx, "squeeze_C", False) else dC + return (du, ddelta, dA, dB, dC, + dD if D is not None else None, + dz, + ddelta_bias if delta_bias is not None else None, + None, + None) + + +def selective_scan_fn(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """if return_last_state is True, returns (out, last_state) + last_state has shape (batch, dim, dstate). Note that the gradient of the last state is + not considered in the backward pass. + """ + return SelectiveScanFn.apply(u, delta, A, B, C, D, z, delta_bias, delta_softplus, return_last_state) + + +def selective_scan_ref(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """ + u: r(B D L) + delta: r(B D L) + A: c(D N) or r(D N) + B: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + C: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + D: r(D) + z: r(B D L) + delta_bias: r(D), fp32 + + out: r(B D L) + last_state (optional): r(B D dstate) or c(B D dstate) + """ + dtype_in = u.dtype + u = u.float() + delta = delta.float() + if delta_bias is not None: + delta = delta + delta_bias[..., None].float() + if delta_softplus: + delta = F.softplus(delta) + batch, dim, dstate = u.shape[0], A.shape[0], A.shape[1] + is_variable_B = B.dim() >= 3 + is_variable_C = C.dim() >= 3 + if A.is_complex(): + if is_variable_B: + B = torch.view_as_complex(rearrange(B.float(), "... (L two) -> ... L two", two=2)) + if is_variable_C: + C = torch.view_as_complex(rearrange(C.float(), "... (L two) -> ... L two", two=2)) + else: + B = B.float() + C = C.float() + x = A.new_zeros((batch, dim, dstate)) + ys = [] + deltaA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + if not is_variable_B: + deltaB_u = torch.einsum('bdl,dn,bdl->bdln', delta, B, u) + else: + if B.dim() == 3: + deltaB_u = torch.einsum('bdl,bnl,bdl->bdln', delta, B, u) + else: + B = repeat(B, "B G N L -> B (G H) N L", H=dim // B.shape[1]) + deltaB_u = torch.einsum('bdl,bdnl,bdl->bdln', delta, B, u) + if is_variable_C and C.dim() == 4: + C = repeat(C, "B G N L -> B (G H) N L", H=dim // C.shape[1]) + last_state = None + for i in range(u.shape[2]): + x = deltaA[:, :, i] * x + deltaB_u[:, :, i] + if not is_variable_C: + y = torch.einsum('bdn,dn->bd', x, C) + else: + if C.dim() == 3: + y = torch.einsum('bdn,bn->bd', x, C[:, :, i]) + else: + y = torch.einsum('bdn,bdn->bd', x, C[:, :, :, i]) + if i == u.shape[2] - 1: + last_state = x + if y.is_complex(): + y = y.real * 2 + ys.append(y) + y = torch.stack(ys, dim=2) # (batch dim L) + out = y if D is None else y + u * rearrange(D, "d -> d 1") + if z is not None: + out = out * F.silu(z) + out = out.to(dtype=dtype_in) + return out if not return_last_state else (out, last_state) + + +class MambaInnerFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert causal_conv1d_cuda is not None, "causal_conv1d_cuda is not available. Please install causal-conv1d." + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) + if out_proj_bias is not None else None) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd( + x, conv1d_weight, conv1d_bias, None, None, None, True + ) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out, scan_intermediates, out_z = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + ctx.delta_softplus = delta_softplus + ctx.out_proj_bias_is_None = out_proj_bias is None + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, out_proj_weight, conv1d_out, delta, + A, B, C, D, delta_bias, scan_intermediates, out) + return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + assert causal_conv1d_cuda is not None, "causal_conv1d_cuda is not available. Please install causal-conv1d." + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, + conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd( + x, conv1d_weight, conv1d_bias, None, None, None, True + ) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + dout = rearrange(dout, "b l e -> e (b l)") + dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) + dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias, *_ = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, None, None, None, dx, False, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dout_proj_weight, dout_proj_bias, + dA, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +def mamba_inner_fn( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return MambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + + +def mamba_inner_ref( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + assert causal_conv1d_fn is not None, "causal_conv1d_fn is not available. Please install causal-conv1d." + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, activation="silu") + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = delta_proj_weight @ x_dbl[:, :delta_rank].t() + delta = rearrange(delta, "d (b l) -> b d l", l=L) + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + if C is None: # variable B + C = x_dbl[:, -d_state:] # (bl d) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True) + return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias) diff --git a/mamba/build/lib/mamba_ssm/ops/triton/__init__.py b/mamba/build/lib/mamba_ssm/ops/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/build/lib/mamba_ssm/ops/triton/k_activations.py b/mamba/build/lib/mamba_ssm/ops/triton/k_activations.py new file mode 100644 index 0000000000000000000000000000000000000000..79fa2cc672dd5ad839498e9150658ed7abce8736 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/k_activations.py @@ -0,0 +1,169 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +import torch + +import triton +import triton.language as tl + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_N': 32}), + triton.Config({'BLOCK_N': 64}), + triton.Config({'BLOCK_N': 128}), + triton.Config({'BLOCK_N': 256}), + triton.Config({'BLOCK_N': 512}), + triton.Config({'BLOCK_N': 1024}), + ], + key=['ncols'], +) +@triton.jit +def _swiglu_fwd_kernel( + X, + Y, + OUT, + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_out_row, + ncols, + BLOCK_N: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + start_col = tl.program_id(1) * BLOCK_N + X += row * stride_x_row + Y += row * stride_y_row + OUT += row * stride_out_row + cols = start_col + tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < ncols, other=0.).to(tl.float32) + y = tl.load(Y + cols, mask=cols < ncols, other=0.).to(tl.float32) + out = x * tl.sigmoid(x) * y + tl.store(OUT + cols, out, mask=cols < ncols) + + +def _swiglu_fwd(xy, out=None): + if xy.stride(-1) != 1: + xy = xy.contiguous() + batch_shape = xy.shape[:-1] + xy = xy.reshape(-1, xy.shape[-1]) + x, y = xy.chunk(2, dim=-1) + if out is None: + out = torch.empty_like(x) + else: + out = out.reshape(-1, out.shape[-1]) + assert out.shape == x.shape + assert out.stride(-1) == 1 + M, N = x.shape + grid = lambda META: (M, triton.cdiv(N, META['BLOCK_N'])) + with torch.cuda.device(x.device.index): + _swiglu_fwd_kernel[grid](x, y, out, x.stride(0), y.stride(0), out.stride(0), N) + return out.reshape(*batch_shape, out.shape[-1]) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_N': 32}), + triton.Config({'BLOCK_N': 64}), + triton.Config({'BLOCK_N': 128}), + triton.Config({'BLOCK_N': 256}), + triton.Config({'BLOCK_N': 512}), + triton.Config({'BLOCK_N': 1024}), + ], + key=['ncols'], +) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["OUT"] is not None}) +@triton.jit +def _swiglu_bwd_kernel( + X, + Y, + DOUT, + OUT, + DX, + DY, + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_dout_row, + stride_out_row, + stride_dx_row, + stride_dy_row, + ncols, + BLOCK_N: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + start_col = tl.program_id(1) * BLOCK_N + X += row * stride_x_row + Y += row * stride_y_row + DOUT += row * stride_dout_row + if RECOMPUTE_OUTPUT: + OUT += row * stride_out_row + DX += row * stride_dx_row + DY += row * stride_dy_row + cols = start_col + tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < ncols, other=0.).to(tl.float32) + y = tl.load(Y + cols, mask=cols < ncols, other=0.).to(tl.float32) + dout = tl.load(DOUT + cols, mask=cols < ncols, other=0.).to(tl.float32) + x_sigmoid = tl.sigmoid(x) + dx = x_sigmoid * (1 + x * (1 - x_sigmoid)) * y * dout + dy = x * x_sigmoid * dout + tl.store(DX + cols, dx, mask=cols < ncols) + tl.store(DY + cols, dy, mask=cols < ncols) + if RECOMPUTE_OUTPUT: + out = x * x_sigmoid * y + tl.store(OUT + cols, out, mask=cols < ncols) + + +def _swiglu_bwd(xy, dout, dxy=None, recompute_output=False, out=None): + if xy.stride(-1) != 1: + xy = xy.contiguous() + if dout.stride(-1) != 1: + dout = dout.contiguous() + batch_shape = xy.shape[:-1] + xy = xy.reshape(-1, xy.shape[-1]) + x, y = xy.chunk(2, dim=-1) + dout = dout.reshape(-1, dout.shape[-1]) + assert dout.shape == x.shape + if dxy is None: + dxy = torch.empty_like(xy) + else: + dxy = dxy.reshape(-1, dxy.shape[-1]) + assert dxy.shape == xy.shape + dx, dy = dxy.chunk(2, dim=-1) + assert dx.stride(-1) == 1 + assert dy.stride(-1) == 1 + if recompute_output: + if out is None: + out = torch.empty_like(x) + else: + out = out.reshape(-1, out.shape[-1]) + assert out.shape == x.shape + assert out.stride(-1) == 1 + M, N = x.shape + grid = lambda META: (M, triton.cdiv(N, META['BLOCK_N'])) + with torch.cuda.device(x.device.index): + _swiglu_bwd_kernel[grid](x, y, dout, out if recompute_output else None, dx, dy, + x.stride(0), y.stride(0), dout.stride(0), + out.stride(0) if recompute_output else 0, + dx.stride(0), dy.stride(0), + N) + if not recompute_output: + return dxy.reshape(*batch_shape, dxy.shape[-1]) + else: + return dxy.reshape(*batch_shape, dxy.shape[-1]), out.reshape(*batch_shape, out.shape[-1]) + + +class SwiGLU(torch.autograd.Function): + + @staticmethod + def forward(ctx, xy): + ctx.save_for_backward(xy) + return _swiglu_fwd(xy) + + @staticmethod + def backward(ctx, dout): + xy, = ctx.saved_tensors + return _swiglu_bwd(xy, dout) + + +swiglu = SwiGLU.apply diff --git a/mamba/build/lib/mamba_ssm/ops/triton/layer_norm.py b/mamba/build/lib/mamba_ssm/ops/triton/layer_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..2f304d4344574d6cf1e4bbdc7f87158bfa2cce11 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/layer_norm.py @@ -0,0 +1,1113 @@ +# Copyright (c) 2024, Tri Dao. +# Implement dropout + residual + layer_norm / rms_norm. + +# Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html +# For the backward pass, we keep weight_grad and bias_grad in registers and accumulate. +# This is faster for dimensions up to 8k, but after that it's much slower due to register spilling. +# The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine. + +import math +import warnings + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_fwd, custom_bwd + +import triton +import triton.language as tl + + +def layer_norm_ref( + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + dropout_mask=None, + dropout_mask1=None, + upcast=False, +): + dtype = x.dtype + if upcast: + x = x.float() + weight = weight.float() + bias = bias.float() if bias is not None else None + residual = residual.float() if residual is not None else residual + x1 = x1.float() if x1 is not None else None + weight1 = weight1.float() if weight1 is not None else None + bias1 = bias1.float() if bias1 is not None else None + if x1 is not None: + assert rowscale is None, "rowscale is not supported with parallel LayerNorm" + if rowscale is not None: + x = x * rowscale[..., None] + if dropout_p > 0.0: + if dropout_mask is not None: + x = x.masked_fill(~dropout_mask, 0.0) / (1.0 - dropout_p) + else: + x = F.dropout(x, p=dropout_p) + if x1 is not None: + if dropout_mask1 is not None: + x1 = x1.masked_fill(~dropout_mask1, 0.0) / (1.0 - dropout_p) + else: + x1 = F.dropout(x1, p=dropout_p) + if x1 is not None: + x = x + x1 + if residual is not None: + x = (x + residual).to(x.dtype) + out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to( + dtype + ) + if weight1 is None: + return out if not prenorm else (out, x) + else: + out1 = F.layer_norm( + x.to(weight1.dtype), x.shape[-1:], weight=weight1, bias=bias1, eps=eps + ).to(dtype) + return (out, out1) if not prenorm else (out, out1, x) + + +def rms_norm_ref( + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + dropout_mask=None, + dropout_mask1=None, + upcast=False, +): + dtype = x.dtype + if upcast: + x = x.float() + weight = weight.float() + bias = bias.float() if bias is not None else None + residual = residual.float() if residual is not None else residual + x1 = x1.float() if x1 is not None else None + weight1 = weight1.float() if weight1 is not None else None + bias1 = bias1.float() if bias1 is not None else None + if x1 is not None: + assert rowscale is None, "rowscale is not supported with parallel LayerNorm" + if rowscale is not None: + x = x * rowscale[..., None] + if dropout_p > 0.0: + if dropout_mask is not None: + x = x.masked_fill(~dropout_mask, 0.0) / (1.0 - dropout_p) + else: + x = F.dropout(x, p=dropout_p) + if x1 is not None: + if dropout_mask1 is not None: + x1 = x1.masked_fill(~dropout_mask1, 0.0) / (1.0 - dropout_p) + else: + x1 = F.dropout(x1, p=dropout_p) + if x1 is not None: + x = x + x1 + if residual is not None: + x = (x + residual).to(x.dtype) + rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps) + out = ((x * rstd * weight) + bias if bias is not None else (x * rstd * weight)).to(dtype) + if weight1 is None: + return out if not prenorm else (out, x) + else: + out1 = ((x * rstd * weight1) + bias1 if bias1 is not None else (x * rstd * weight1)).to( + dtype + ) + return (out, out1) if not prenorm else (out, out1, x) + +def config_prune(configs): + + if torch.version.hip: + try: + # set warp size based on gcn architecure + gcn_arch_name = torch.cuda.get_device_properties(0).gcnArchName + if "gfx10" in gcn_arch_name or "gfx11" in gcn_arch_name: + # radeon + warp_size = 32 + else: + # instinct + warp_size = 64 + except AttributeError as e: + # fall back to crude method to set warp size + device_name = torch.cuda.get_device_properties(0).name + if 'instinct' in device_name.lower(): + warp_size = 64 + else: + warp_size = 32 + warnings.warn(f"{e}, warp size set to {warp_size} based on device name: {device_name}", UserWarning) + + else: + # cuda + warp_size = 32 + + max_block_sz = 1024 + max_num_warps = max_block_sz // warp_size + pruned_configs = [config for config in configs if config.num_warps <= max_num_warps] + return pruned_configs + +configs_autotune = [ + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + triton.Config({}, num_warps=16), + triton.Config({}, num_warps=32), + ] + +pruned_configs_autotune = config_prune(configs_autotune) + +@triton.autotune( + configs = pruned_configs_autotune, + key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None}) +@triton.heuristics({"HAS_X1": lambda args: args["X1"] is not None}) +@triton.heuristics({"HAS_W1": lambda args: args["W1"] is not None}) +@triton.heuristics({"HAS_B1": lambda args: args["B1"] is not None}) +@triton.jit +def _layer_norm_fwd_1pass_kernel( + X, # pointer to the input + Y, # pointer to the output + W, # pointer to the weights + B, # pointer to the biases + RESIDUAL, # pointer to the residual + X1, + W1, + B1, + Y1, + RESIDUAL_OUT, # pointer to the residual + ROWSCALE, + SEEDS, # Dropout seeds for each row + DROPOUT_MASK, + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_res_row, + stride_res_out_row, + stride_x1_row, + stride_y1_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + dropout_p, # Dropout probability + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_RESIDUAL: tl.constexpr, + STORE_RESIDUAL_OUT: tl.constexpr, + HAS_BIAS: tl.constexpr, + HAS_DROPOUT: tl.constexpr, + STORE_DROPOUT_MASK: tl.constexpr, + HAS_ROWSCALE: tl.constexpr, + HAS_X1: tl.constexpr, + HAS_W1: tl.constexpr, + HAS_B1: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + X += row * stride_x_row + Y += row * stride_y_row + if HAS_RESIDUAL: + RESIDUAL += row * stride_res_row + if STORE_RESIDUAL_OUT: + RESIDUAL_OUT += row * stride_res_out_row + if HAS_X1: + X1 += row * stride_x1_row + if HAS_W1: + Y1 += row * stride_y1_row + # Compute mean and variance + cols = tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) + if HAS_ROWSCALE: + rowscale = tl.load(ROWSCALE + row).to(tl.float32) + x *= rowscale + if HAS_DROPOUT: + # Compute dropout mask + # 7 rounds is good enough, and reduces register pressure + keep_mask = tl.rand(tl.load(SEEDS + row).to(tl.uint32), cols, n_rounds=7) > dropout_p + x = tl.where(keep_mask, x / (1.0 - dropout_p), 0.0) + if STORE_DROPOUT_MASK: + tl.store(DROPOUT_MASK + row * N + cols, keep_mask, mask=cols < N) + if HAS_X1: + x1 = tl.load(X1 + cols, mask=cols < N, other=0.0).to(tl.float32) + if HAS_ROWSCALE: + rowscale = tl.load(ROWSCALE + M + row).to(tl.float32) + x1 *= rowscale + if HAS_DROPOUT: + # Compute dropout mask + # 7 rounds is good enough, and reduces register pressure + keep_mask = ( + tl.rand(tl.load(SEEDS + M + row).to(tl.uint32), cols, n_rounds=7) > dropout_p + ) + x1 = tl.where(keep_mask, x1 / (1.0 - dropout_p), 0.0) + if STORE_DROPOUT_MASK: + tl.store(DROPOUT_MASK + (M + row) * N + cols, keep_mask, mask=cols < N) + x += x1 + if HAS_RESIDUAL: + residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32) + x += residual + if STORE_RESIDUAL_OUT: + tl.store(RESIDUAL_OUT + cols, x, mask=cols < N) + if not IS_RMS_NORM: + mean = tl.sum(x, axis=0) / N + tl.store(Mean + row, mean) + xbar = tl.where(cols < N, x - mean, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + else: + xbar = tl.where(cols < N, x, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + tl.store(Rstd + row, rstd) + # Normalize and apply linear transformation + mask = cols < N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if HAS_BIAS: + b = tl.load(B + cols, mask=mask).to(tl.float32) + x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + y = x_hat * w + b if HAS_BIAS else x_hat * w + # Write output + tl.store(Y + cols, y, mask=mask) + if HAS_W1: + w1 = tl.load(W1 + cols, mask=mask).to(tl.float32) + if HAS_B1: + b1 = tl.load(B1 + cols, mask=mask).to(tl.float32) + y1 = x_hat * w1 + b1 if HAS_B1 else x_hat * w1 + tl.store(Y1 + cols, y1, mask=mask) + + +def _layer_norm_fwd( + x, + weight, + bias, + eps, + residual=None, + x1=None, + weight1=None, + bias1=None, + dropout_p=0.0, + rowscale=None, + out_dtype=None, + residual_dtype=None, + is_rms_norm=False, + return_dropout_mask=False, +): + if residual is not None: + residual_dtype = residual.dtype + M, N = x.shape + assert x.stride(-1) == 1 + if residual is not None: + assert residual.stride(-1) == 1 + assert residual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + if x1 is not None: + assert x1.shape == x.shape + assert rowscale is None + assert x1.stride(-1) == 1 + if weight1 is not None: + assert weight1.shape == (N,) + assert weight1.stride(-1) == 1 + if bias1 is not None: + assert bias1.shape == (N,) + assert bias1.stride(-1) == 1 + if rowscale is not None: + assert rowscale.is_contiguous() + assert rowscale.shape == (M,) + # allocate output + y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype) + assert y.stride(-1) == 1 + if weight1 is not None: + y1 = torch.empty_like(y) + assert y1.stride(-1) == 1 + else: + y1 = None + if ( + residual is not None + or (residual_dtype is not None and residual_dtype != x.dtype) + or dropout_p > 0.0 + or rowscale is not None + or x1 is not None + ): + residual_out = torch.empty( + M, N, device=x.device, dtype=residual_dtype if residual_dtype is not None else x.dtype + ) + assert residual_out.stride(-1) == 1 + else: + residual_out = None + mean = torch.empty((M,), dtype=torch.float32, device=x.device) if not is_rms_norm else None + rstd = torch.empty((M,), dtype=torch.float32, device=x.device) + if dropout_p > 0.0: + seeds = torch.randint( + 2**32, (M if x1 is None else 2 * M,), device=x.device, dtype=torch.int64 + ) + else: + seeds = None + if return_dropout_mask and dropout_p > 0.0: + dropout_mask = torch.empty(M if x1 is None else 2 * M, N, device=x.device, dtype=torch.bool) + else: + dropout_mask = None + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + with torch.cuda.device(x.device.index): + _layer_norm_fwd_1pass_kernel[(M,)]( + x, + y, + weight, + bias, + residual, + x1, + weight1, + bias1, + y1, + residual_out, + rowscale, + seeds, + dropout_mask, + mean, + rstd, + x.stride(0), + y.stride(0), + residual.stride(0) if residual is not None else 0, + residual_out.stride(0) if residual_out is not None else 0, + x1.stride(0) if x1 is not None else 0, + y1.stride(0) if y1 is not None else 0, + M, + N, + eps, + dropout_p, + is_rms_norm, + BLOCK_N, + residual is not None, + residual_out is not None, + bias is not None, + dropout_p > 0.0, + dropout_mask is not None, + rowscale is not None, + ) + # residual_out is None if residual is None and residual_dtype == input_dtype and dropout_p == 0.0 + if dropout_mask is not None and x1 is not None: + dropout_mask, dropout_mask1 = dropout_mask.tensor_split(2, dim=0) + else: + dropout_mask1 = None + return ( + y, + y1, + mean, + rstd, + residual_out if residual_out is not None else x, + seeds, + dropout_mask, + dropout_mask1, + ) + + +@triton.autotune( + configs=pruned_configs_autotune, + key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS", "HAS_DROPOUT"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None}) +# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None}) +@triton.heuristics({"HAS_ROWSCALE": lambda args: args["ROWSCALE"] is not None}) +@triton.heuristics({"HAS_DY1": lambda args: args["DY1"] is not None}) +@triton.heuristics({"HAS_DX1": lambda args: args["DX1"] is not None}) +@triton.heuristics({"HAS_B1": lambda args: args["DB1"] is not None}) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) +@triton.jit +def _layer_norm_bwd_kernel( + X, # pointer to the input + W, # pointer to the weights + B, # pointer to the biases + Y, # pointer to the output to be recomputed + DY, # pointer to the output gradient + DX, # pointer to the input gradient + DW, # pointer to the partial sum of weights gradient + DB, # pointer to the partial sum of biases gradient + DRESIDUAL, + W1, + DY1, + DX1, + DW1, + DB1, + DRESIDUAL_IN, + ROWSCALE, + SEEDS, + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_dy_row, + stride_dx_row, + stride_dres_row, + stride_dy1_row, + stride_dx1_row, + stride_dres_in_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + dropout_p, + rows_per_program, + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_DRESIDUAL: tl.constexpr, + STORE_DRESIDUAL: tl.constexpr, + HAS_BIAS: tl.constexpr, + HAS_DROPOUT: tl.constexpr, + HAS_ROWSCALE: tl.constexpr, + HAS_DY1: tl.constexpr, + HAS_DX1: tl.constexpr, + HAS_B1: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, +): + # Map the program id to the elements of X, DX, and DY it should compute. + row_block_id = tl.program_id(0) + row_start = row_block_id * rows_per_program + # Do not early exit if row_start >= M, because we need to write DW and DB + cols = tl.arange(0, BLOCK_N) + mask = cols < N + X += row_start * stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += row_start * stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += row_start * stride_dres_in_row + DY += row_start * stride_dy_row + DX += row_start * stride_dx_row + if HAS_DY1: + DY1 += row_start * stride_dy1_row + if HAS_DX1: + DX1 += row_start * stride_dx1_row + if RECOMPUTE_OUTPUT: + Y += row_start * stride_y_row + w = tl.load(W + cols, mask=mask).to(tl.float32) + if RECOMPUTE_OUTPUT and HAS_BIAS: + b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32) + if HAS_DY1: + w1 = tl.load(W1 + cols, mask=mask).to(tl.float32) + dw = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_BIAS: + db = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_DY1: + dw1 = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_B1: + db1 = tl.zeros((BLOCK_N,), dtype=tl.float32) + row_end = min((row_block_id + 1) * rows_per_program, M) + for row in range(row_start, row_end): + # Load data to SRAM + x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) + dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) + if HAS_DY1: + dy1 = tl.load(DY1 + cols, mask=mask, other=0).to(tl.float32) + if not IS_RMS_NORM: + mean = tl.load(Mean + row) + rstd = tl.load(Rstd + row) + # Compute dx + xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + xhat = tl.where(mask, xhat, 0.0) + if RECOMPUTE_OUTPUT: + y = xhat * w + b if HAS_BIAS else xhat * w + tl.store(Y + cols, y, mask=mask) + wdy = w * dy + dw += dy * xhat + if HAS_BIAS: + db += dy + if HAS_DY1: + wdy += w1 * dy1 + dw1 += dy1 * xhat + if HAS_B1: + db1 += dy1 + if not IS_RMS_NORM: + c1 = tl.sum(xhat * wdy, axis=0) / N + c2 = tl.sum(wdy, axis=0) / N + dx = (wdy - (xhat * c1 + c2)) * rstd + else: + c1 = tl.sum(xhat * wdy, axis=0) / N + dx = (wdy - xhat * c1) * rstd + if HAS_DRESIDUAL: + dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32) + dx += dres + # Write dx + if STORE_DRESIDUAL: + tl.store(DRESIDUAL_IN + cols, dx, mask=mask) + if HAS_DX1: + if HAS_DROPOUT: + keep_mask = ( + tl.rand(tl.load(SEEDS + M + row).to(tl.uint32), cols, n_rounds=7) > dropout_p + ) + dx1 = tl.where(keep_mask, dx / (1.0 - dropout_p), 0.0) + else: + dx1 = dx + tl.store(DX1 + cols, dx1, mask=mask) + if HAS_DROPOUT: + keep_mask = tl.rand(tl.load(SEEDS + row).to(tl.uint32), cols, n_rounds=7) > dropout_p + dx = tl.where(keep_mask, dx / (1.0 - dropout_p), 0.0) + if HAS_ROWSCALE: + rowscale = tl.load(ROWSCALE + row).to(tl.float32) + dx *= rowscale + tl.store(DX + cols, dx, mask=mask) + + X += stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += stride_dres_in_row + if RECOMPUTE_OUTPUT: + Y += stride_y_row + DY += stride_dy_row + DX += stride_dx_row + if HAS_DY1: + DY1 += stride_dy1_row + if HAS_DX1: + DX1 += stride_dx1_row + tl.store(DW + row_block_id * N + cols, dw, mask=mask) + if HAS_BIAS: + tl.store(DB + row_block_id * N + cols, db, mask=mask) + if HAS_DY1: + tl.store(DW1 + row_block_id * N + cols, dw1, mask=mask) + if HAS_B1: + tl.store(DB1 + row_block_id * N + cols, db1, mask=mask) + + +def _layer_norm_bwd( + dy, + x, + weight, + bias, + eps, + mean, + rstd, + dresidual=None, + dy1=None, + weight1=None, + bias1=None, + seeds=None, + dropout_p=0.0, + rowscale=None, + has_residual=False, + has_x1=False, + is_rms_norm=False, + x_dtype=None, + recompute_output=False, +): + M, N = x.shape + assert x.stride(-1) == 1 + assert dy.stride(-1) == 1 + assert dy.shape == (M, N) + if dresidual is not None: + assert dresidual.stride(-1) == 1 + assert dresidual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + if dy1 is not None: + assert weight1 is not None + assert dy1.shape == dy.shape + assert dy1.stride(-1) == 1 + if weight1 is not None: + assert weight1.shape == (N,) + assert weight1.stride(-1) == 1 + if bias1 is not None: + assert bias1.shape == (N,) + assert bias1.stride(-1) == 1 + if seeds is not None: + assert seeds.is_contiguous() + assert seeds.shape == (M if not has_x1 else M * 2,) + if rowscale is not None: + assert rowscale.is_contiguous() + assert rowscale.shape == (M,) + # allocate output + dx = ( + torch.empty_like(x) + if x_dtype is None + else torch.empty(M, N, dtype=x_dtype, device=x.device) + ) + dresidual_in = ( + torch.empty_like(x) + if has_residual + and (dx.dtype != x.dtype or dropout_p > 0.0 or rowscale is not None or has_x1) + else None + ) + dx1 = torch.empty_like(dx) if (has_x1 and dropout_p > 0.0) else None + y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None + if recompute_output: + assert weight1 is None, "recompute_output is not supported with parallel LayerNorm" + + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device) + _db = ( + torch.empty((sm_count, N), dtype=torch.float32, device=bias.device) + if bias is not None + else None + ) + _dw1 = torch.empty_like(_dw) if weight1 is not None else None + _db1 = torch.empty_like(_db) if bias1 is not None else None + rows_per_program = math.ceil(M / sm_count) + grid = (sm_count,) + with torch.cuda.device(x.device.index): + _layer_norm_bwd_kernel[grid]( + x, + weight, + bias, + y, + dy, + dx, + _dw, + _db, + dresidual, + weight1, + dy1, + dx1, + _dw1, + _db1, + dresidual_in, + rowscale, + seeds, + mean, + rstd, + x.stride(0), + 0 if not recompute_output else y.stride(0), + dy.stride(0), + dx.stride(0), + dresidual.stride(0) if dresidual is not None else 0, + dy1.stride(0) if dy1 is not None else 0, + dx1.stride(0) if dx1 is not None else 0, + dresidual_in.stride(0) if dresidual_in is not None else 0, + M, + N, + eps, + dropout_p, + rows_per_program, + is_rms_norm, + BLOCK_N, + dresidual is not None, + dresidual_in is not None, + bias is not None, + dropout_p > 0.0, + ) + dw = _dw.sum(0).to(weight.dtype) + db = _db.sum(0).to(bias.dtype) if bias is not None else None + dw1 = _dw1.sum(0).to(weight1.dtype) if weight1 is not None else None + db1 = _db1.sum(0).to(bias1.dtype) if bias1 is not None else None + # Don't need to compute dresidual_in separately in this case + if has_residual and dx.dtype == x.dtype and dropout_p == 0.0 and rowscale is None: + dresidual_in = dx + if has_x1 and dropout_p == 0.0: + dx1 = dx + return ( + (dx, dw, db, dresidual_in, dx1, dw1, db1) + if not recompute_output + else (dx, dw, db, dresidual_in, dx1, dw1, db1, y) + ) + + +class LayerNormFn(torch.autograd.Function): + @staticmethod + def forward( + ctx, + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + return_dropout_mask=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + if x1 is not None: + assert x1.shape == x_shape_og + assert rowscale is None, "rowscale is not supported with parallel LayerNorm" + x1 = x1.reshape(-1, x1.shape[-1]) + if x1.stride(-1) != 1: + x1 = x1.contiguous() + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + if weight1 is not None: + weight1 = weight1.contiguous() + if bias1 is not None: + bias1 = bias1.contiguous() + if rowscale is not None: + rowscale = rowscale.reshape(-1).contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd( + x, + weight, + bias, + eps, + residual, + x1, + weight1, + bias1, + dropout_p=dropout_p, + rowscale=rowscale, + residual_dtype=residual_dtype, + is_rms_norm=is_rms_norm, + return_dropout_mask=return_dropout_mask, + ) + ctx.save_for_backward( + residual_out, weight, bias, weight1, bias1, rowscale, seeds, mean, rstd + ) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.dropout_p = dropout_p + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.has_x1 = x1 is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + y = y.reshape(x_shape_og) + y1 = y1.reshape(x_shape_og) if y1 is not None else None + residual_out = residual_out.reshape(x_shape_og) if residual_out is not None else None + dropout_mask = dropout_mask.reshape(x_shape_og) if dropout_mask is not None else None + dropout_mask1 = dropout_mask1.reshape(x_shape_og) if dropout_mask1 is not None else None + if not return_dropout_mask: + if weight1 is None: + return y if not prenorm else (y, residual_out) + else: + return (y, y1) if not prenorm else (y, y1, residual_out) + else: + if weight1 is None: + return ( + (y, dropout_mask, dropout_mask1) + if not prenorm + else (y, residual_out, dropout_mask, dropout_mask1) + ) + else: + return ( + (y, y1, dropout_mask, dropout_mask1) + if not prenorm + else (y, y1, residual_out, dropout_mask, dropout_mask1) + ) + + @staticmethod + def backward(ctx, dy, *args): + x, weight, bias, weight1, bias1, rowscale, seeds, mean, rstd = ctx.saved_tensors + dy = dy.reshape(-1, dy.shape[-1]) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if weight1 is not None: + dy1, args = args[0], args[1:] + dy1 = dy1.reshape(-1, dy1.shape[-1]) + if dy1.stride(-1) != 1: + dy1 = dy1.contiguous() + assert dy1.shape == x.shape + else: + dy1 = None + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dw, db, dresidual_in, dx1, dw1, db1 = _layer_norm_bwd( + dy, + x, + weight, + bias, + ctx.eps, + mean, + rstd, + dresidual, + dy1, + weight1, + bias1, + seeds, + ctx.dropout_p, + rowscale, + ctx.has_residual, + ctx.has_x1, + ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + ) + return ( + dx.reshape(ctx.x_shape_og), + dw, + db, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + dx1.reshape(ctx.x_shape_og) if dx1 is not None else None, + dw1, + db1, + None, + None, + None, + None, + None, + None, + None, + ) + + +def layer_norm_fn( + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + return_dropout_mask=False, +): + return LayerNormFn.apply( + x, + weight, + bias, + residual, + x1, + weight1, + bias1, + eps, + dropout_p, + rowscale, + prenorm, + residual_in_fp32, + is_rms_norm, + return_dropout_mask, + ) + + +def rms_norm_fn( + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + residual_in_fp32=False, + return_dropout_mask=False, +): + return LayerNormFn.apply( + x, + weight, + bias, + residual, + x1, + weight1, + bias1, + eps, + dropout_p, + rowscale, + prenorm, + residual_in_fp32, + True, + return_dropout_mask, + ) + + +class RMSNorm(torch.nn.Module): + + def __init__(self, hidden_size, eps=1e-5, dropout_p=0.0, device=None, dtype=None): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + if dropout_p > 0.0: + self.drop = torch.nn.Dropout(dropout_p) + else: + self.drop = None + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.register_parameter("bias", None) + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + + def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False): + return rms_norm_fn( + x, + self.weight, + self.bias, + residual=residual, + eps=self.eps, + dropout_p=self.drop.p if self.drop is not None and self.training else 0.0, + prenorm=prenorm, + residual_in_fp32=residual_in_fp32, + ) + + +class LayerNormLinearFn(torch.autograd.Function): + @staticmethod + @custom_fwd + def forward( + ctx, + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + norm_weight = norm_weight.contiguous() + if norm_bias is not None: + norm_bias = norm_bias.contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, _, mean, rstd, residual_out, *rest = _layer_norm_fwd( + x, + norm_weight, + norm_bias, + eps, + residual, + out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype(), + residual_dtype=residual_dtype, + is_rms_norm=is_rms_norm, + ) + y = y.reshape(x_shape_og) + dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype + linear_weight = linear_weight.to(dtype) + linear_bias = linear_bias.to(dtype) if linear_bias is not None else None + out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias) + # We don't store y, will be recomputed in the backward pass to save memory + ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + ctx.linear_bias_is_none = linear_bias is None + return out if not prenorm else (out, residual_out.reshape(x_shape_og)) + + @staticmethod + @custom_bwd + def backward(ctx, dout, *args): + x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors + dout = dout.reshape(-1, dout.shape[-1]) + dy = F.linear(dout, linear_weight.t()) + dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dnorm_weight, dnorm_bias, dresidual_in, _, _, _, y = _layer_norm_bwd( + dy, + x, + norm_weight, + norm_bias, + ctx.eps, + mean, + rstd, + dresidual=dresidual, + has_residual=ctx.has_residual, + is_rms_norm=ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + recompute_output=True, + ) + dlinear_weight = torch.einsum("bo,bi->oi", dout, y) + return ( + dx.reshape(ctx.x_shape_og), + dnorm_weight, + dnorm_bias, + dlinear_weight, + dlinear_bias, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + None, + None, + None, + None, + ) + + +def layer_norm_linear_fn( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, +): + return LayerNormLinearFn.apply( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual, + eps, + prenorm, + residual_in_fp32, + is_rms_norm, + ) diff --git a/mamba/build/lib/mamba_ssm/ops/triton/layernorm_gated.py b/mamba/build/lib/mamba_ssm/ops/triton/layernorm_gated.py new file mode 100644 index 0000000000000000000000000000000000000000..de4b2f4815f6fa9d80291491e3826251f50ff5ad --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/layernorm_gated.py @@ -0,0 +1,437 @@ +# Copyright (c) 2024, Tri Dao. +# Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html +# For the backward pass, we keep weight_grad and bias_grad in registers and accumulate. +# This backward pass is faster for dimensions up to 8k, but after that it's much slower due to register spilling. +# The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine. + +import math + +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange + + +def rms_norm_ref(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, upcast=True): + dtype = x.dtype + N = x.shape[-1] + weight = weight.float() + bias = bias.float() if bias is not None else None + if upcast: + x = x.float() + z = z.float() if z is not None else z + if z is not None and not norm_before_gate: + x = x * F.silu(z) + if group_size is None: + rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps) + out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight) + else: + x_group = rearrange(x, "... (g d) -> ... g d", d=group_size) + rstd = 1 / torch.sqrt((x_group.square()).mean(dim=-1, keepdim=True) + eps) + out = rearrange(x_group * rstd, "... g d -> ... (g d)") * weight + if bias is not None: + out = out + bias + if z is not None and norm_before_gate: + out *= F.silu(z) + return out.to(dtype) + + +@triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["Z"] is not None}) +@triton.jit +def _layer_norm_fwd_1pass_kernel( + X, # pointer to the input + Y, # pointer to the output + W, # pointer to the weights + B, # pointer to the biases + Z, # pointer to the other branch + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_z_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + BLOCK_N: tl.constexpr, + HAS_BIAS: tl.constexpr, + HAS_Z: tl.constexpr, + NORM_BEFORE_GATE: tl.constexpr, + IS_RMS_NORM: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + group = tl.program_id(1) + X += row * stride_x_row + group * N + Y += row * stride_y_row + group * N + if HAS_Z: + Z += row * stride_z_row + group * N + if not IS_RMS_NORM: + Mean += group * M + Rstd += group * M + W += group * N + if HAS_BIAS: + B += group * N + # Compute mean and variance + cols = tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32) + if HAS_Z and not NORM_BEFORE_GATE: + z = tl.load(Z + cols, mask=cols < N).to(tl.float32) + x *= z * tl.sigmoid(z) + if not IS_RMS_NORM: + mean = tl.sum(x, axis=0) / N + tl.store(Mean + row, mean) + xbar = tl.where(cols < N, x - mean, 0.) + var = tl.sum(xbar * xbar, axis=0) / N + else: + xbar = tl.where(cols < N, x, 0.) + var = tl.sum(xbar * xbar, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + tl.store(Rstd + row, rstd) + # Normalize and apply linear transformation + mask = cols < N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if HAS_BIAS: + b = tl.load(B + cols, mask=mask).to(tl.float32) + x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + y = x_hat * w + b if HAS_BIAS else x_hat * w + if HAS_Z and NORM_BEFORE_GATE: + z = tl.load(Z + cols, mask=mask).to(tl.float32) + y *= z * tl.sigmoid(z) + # Write output + tl.store(Y + cols, y, mask=mask) + + +def _layer_norm_fwd(x, weight, bias, eps, z=None, out=None, group_size=None, norm_before_gate=True, is_rms_norm=False): + M, N = x.shape + if group_size is None: + group_size = N + assert N % group_size == 0 + ngroups = N // group_size + assert x.stride(-1) == 1 + if z is not None: + assert z.stride(-1) == 1 + assert z.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + if out is not None: + assert out.shape == x.shape + else: + out = torch.empty_like(x) + assert out.stride(-1) == 1 + mean = torch.empty((ngroups * M, ), dtype=torch.float32, device=x.device) if not is_rms_norm else None + rstd = torch.empty((ngroups * M, ), dtype=torch.float32, device=x.device) + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(group_size)) + if group_size > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + # heuristics for number of warps + num_warps = min(max(BLOCK_N // 256, 1), 8) + grid = (M, ngroups) + with torch.cuda.device(x.device.index): + _layer_norm_fwd_1pass_kernel[grid](x, out, weight, bias, z, mean, rstd, + x.stride(0), out.stride(0), z.stride(0) if z is not None else 0, + M, group_size, eps, + BLOCK_N=BLOCK_N, + NORM_BEFORE_GATE=norm_before_gate, + IS_RMS_NORM=is_rms_norm, + num_warps=num_warps) + return out, mean, rstd + + + +@triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["Z"] is not None}) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) +@triton.jit +def _layer_norm_bwd_kernel( + X, # pointer to the input + W, # pointer to the weights + B, # pointer to the biases + Z, # pointer to the other branch + Y, # pointer to the output to be recomputed + DY, # pointer to the output gradient + DX, # pointer to the input gradient + DW, # pointer to the partial sum of weights gradient + DB, # pointer to the partial sum of biases gradient + DZ, # pointer to the other branch + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_z_row, + stride_y_row, + stride_dy_row, + stride_dx_row, + stride_dz_row, + stride_dw_row, + stride_db_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + rows_per_program, + NORM_BEFORE_GATE: tl.constexpr, + IS_RMS_NORM: tl.constexpr, + HAS_BIAS: tl.constexpr, + HAS_Z: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, + BLOCK_N: tl.constexpr, +): + # Map the program id to the elements of X, DX, and DY it should compute. + row_block_id = tl.program_id(0) + group = tl.program_id(1) + row_start = row_block_id * rows_per_program + cols = tl.arange(0, BLOCK_N) + mask = cols < N + X += row_start * stride_x_row + group * N + if HAS_Z: + Z += row_start * stride_z_row + group * N + DZ += row_start * stride_dz_row + group * N + DY += row_start * stride_dy_row + group * N + DX += row_start * stride_dx_row + group * N + if RECOMPUTE_OUTPUT: + Y += row_start * stride_y_row + group * N + if not IS_RMS_NORM: + Mean += group * M + Rstd += group * M + W += group * N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if (RECOMPUTE_OUTPUT or HAS_Z) and HAS_BIAS: + B += group * N + b = tl.load(B + cols, mask=mask, other=0.).to(tl.float32) + dw = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_BIAS: + db = tl.zeros((BLOCK_N,), dtype=tl.float32) + row_end = min((row_block_id + 1) * rows_per_program, M) + for row in range(row_start, row_end): + # Load data to SRAM + x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) + dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) + if not IS_RMS_NORM: + mean = tl.load(Mean + row) + if HAS_Z and not NORM_BEFORE_GATE: + z = tl.load(Z + cols, mask=mask, other=0.).to(tl.float32) + x_og = x + x = x_og * z * tl.sigmoid(z) + rstd = tl.load(Rstd + row) + # Compute dx + xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + xhat = tl.where(mask, xhat, 0.) + if HAS_Z and NORM_BEFORE_GATE: + z = tl.load(Z + cols, mask=mask, other=0.).to(tl.float32) + z_sigmoid = tl.sigmoid(z) + y = xhat * w + b if HAS_BIAS else xhat * w + if RECOMPUTE_OUTPUT: + tl.store(Y + cols, y * z * z_sigmoid, mask=mask) + dz = dy * y * z_sigmoid * (1 + z * (1 - z_sigmoid)) + tl.store(DZ + cols, dz, mask=mask) + dy *= z * z_sigmoid + else: + if RECOMPUTE_OUTPUT: + y = xhat * w + b if HAS_BIAS else xhat * w + tl.store(Y + cols, y, mask=mask) + wdy = w * dy + c1 = tl.sum(xhat * wdy, axis=0) / N + if not IS_RMS_NORM: + c2 = tl.sum(wdy, axis=0) / N + dx = (wdy - (xhat * c1 + c2)) * rstd + else: + dx = (wdy - xhat * c1) * rstd + dw += dy * xhat + if HAS_BIAS: + db += dy + if HAS_Z and not NORM_BEFORE_GATE: + z_sigmoid = tl.sigmoid(z) + dz = dx * x_og * z_sigmoid * (1 + z * (1 - z_sigmoid)) + tl.store(DZ + cols, dz, mask=mask) + dx *= z * z_sigmoid + # Write dx + tl.store(DX + cols, dx, mask=mask) + + X += stride_x_row + if HAS_Z: + Z += stride_z_row + DZ += stride_dz_row + if RECOMPUTE_OUTPUT: + Y += stride_y_row + DY += stride_dy_row + DX += stride_dx_row + tl.store(DW + row_block_id * stride_dw_row + group * N + cols, dw, mask=mask) + if HAS_BIAS: + tl.store(DB + row_block_id * stride_db_row + group * N + cols, db, mask=mask) + + +def _layer_norm_bwd(dy, x, weight, bias, eps, mean, rstd, z=None, group_size=None, + norm_before_gate=True, is_rms_norm=False, recompute_output=False, dz=None, out=None): + M, N = x.shape + if group_size is None: + group_size = N + assert N % group_size == 0 + ngroups = N // group_size + assert x.stride(-1) == 1 + assert dy.stride(-1) == 1 + assert dy.shape == (M, N) + if z is not None: + assert z.stride(-1) == 1 + assert z.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + dx = torch.empty_like(x) + if dz is not None: + assert z is not None + assert dz.shape == z.shape + assert dz.stride(-1) == 1 + else: + dz = torch.empty_like(z) if z is not None else None + if recompute_output: + if out is None: + out = torch.empty_like(x) + assert out.shape == x.shape + + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(group_size)) + if group_size > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + # heuristics for number of warps + num_warps = min(max(BLOCK_N // 256, 1), 8) + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + # If group size is small (e.g., 64), we're only using 1 warp. So having just 108 programs + # would limit the occupancy. + nrow_groups = math.ceil(sm_count * math.ceil(4 / num_warps) / ngroups) + _dw = torch.empty((nrow_groups, N), dtype=torch.float32, device=weight.device) + _db = torch.empty((nrow_groups, N), dtype=torch.float32, device=bias.device) if bias is not None else None + rows_per_program = math.ceil(M / nrow_groups) + grid = (nrow_groups, ngroups) + with torch.cuda.device(x.device.index): + _layer_norm_bwd_kernel[grid](x, weight, bias, z, out if recompute_output else None, + dy, dx, _dw, _db, dz, mean, rstd, + x.stride(0), + z.stride(0) if z is not None else 0, + 0 if not recompute_output else out.stride(0), + dy.stride(0), dx.stride(0), + dz.stride(0) if dz is not None else 0, + _dw.stride(0), + _db.stride(0) if _db is not None else 0, + M, group_size, eps, + rows_per_program, + BLOCK_N=BLOCK_N, + NORM_BEFORE_GATE=norm_before_gate, + IS_RMS_NORM=is_rms_norm, + num_warps=num_warps) + dw = _dw.sum(0).to(weight.dtype) + db = _db.sum(0).to(bias.dtype) if bias is not None else None + return (dx, dw, db, dz) if not recompute_output else (dx, dw, db, dz, out) + + +class LayerNormFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, + is_rms_norm=False): + """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z)) + """ + + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if z is not None: + assert z.shape == x_shape_og + z = z.reshape(-1, z.shape[-1]) + if z.stride(-1) != 1: + z = z.contiguous() + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + y, mean, rstd = _layer_norm_fwd(x, weight, bias, eps, z=z, group_size=group_size, norm_before_gate=norm_before_gate, is_rms_norm=is_rms_norm) + ctx.save_for_backward(x, weight, bias, mean, rstd, z) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.group_size = group_size + ctx.norm_before_gate = norm_before_gate + ctx.is_rms_norm = is_rms_norm + return y.reshape(x_shape_og) + + @staticmethod + def backward(ctx, dy): + x, weight, bias, mean, rstd, z = ctx.saved_tensors + dy = dy.reshape(-1, dy.shape[-1]) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + dx, dw, db, dz = _layer_norm_bwd(dy, x, weight, bias, ctx.eps, mean, rstd, z, ctx.group_size, + ctx.norm_before_gate, ctx.is_rms_norm) + return dx.reshape(ctx.x_shape_og), dw, db, dz.reshape(ctx.x_shape_og) if dz is not None else None, None, None, None, None + + +def layernorm_fn(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, is_rms_norm=False): + return LayerNormFn.apply(x, weight, bias, z, eps, group_size, norm_before_gate, is_rms_norm) + + +def rmsnorm_fn(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True): + return LayerNormFn.apply(x, weight, bias, z, eps, group_size, norm_before_gate, True) + + +class LayerNorm(torch.nn.Module): + + def __init__(self, hidden_size, eps=1e-5, group_size=None, norm_before_gate=True, device=None, dtype=None): + """If group_size is not None, we do GroupNorm with each group having group_size elements. + group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group). + """ + + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.bias = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.group_size = group_size + self.norm_before_gate = norm_before_gate + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + torch.nn.init.zeros_(self.bias) + + def forward(self, x, z=None): + """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z)) + """ + return layernorm_fn(x, self.weight, self.bias, z=z, group_size=self.group_size, eps=self.eps, + norm_before_gate=self.norm_before_gate) + + +class RMSNorm(torch.nn.Module): + + def __init__(self, hidden_size, eps=1e-5, group_size=None, norm_before_gate=True, device=None, dtype=None): + """If group_size is not None, we do GroupNorm with each group having group_size elements. + group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group). + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.register_parameter("bias", None) + self.group_size = group_size + self.norm_before_gate = norm_before_gate + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + + def forward(self, x, z=None): + """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z)) + """ + return rmsnorm_fn(x, self.weight, self.bias, z=z, eps=self.eps, group_size=self.group_size, + norm_before_gate=self.norm_before_gate) diff --git a/mamba/build/lib/mamba_ssm/ops/triton/selective_state_update.py b/mamba/build/lib/mamba_ssm/ops/triton/selective_state_update.py new file mode 100644 index 0000000000000000000000000000000000000000..bc78de90ec60304e38c3c86b44c49bf086650084 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/selective_state_update.py @@ -0,0 +1,265 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or triton==2.2.0 or triton==2.3.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.softplus import softplus + + +@triton.heuristics({"HAS_DT_BIAS": lambda args: args["dt_bias_ptr"] is not None}) +@triton.heuristics({"HAS_D": lambda args: args["D_ptr"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["z_ptr"] is not None}) +@triton.heuristics({"BLOCK_SIZE_DSTATE": lambda args: triton.next_power_of_2(args["dstate"])}) +@triton.jit +def _selective_scan_update_kernel( + # Pointers to matrices + state_ptr, x_ptr, dt_ptr, dt_bias_ptr, A_ptr, B_ptr, C_ptr, D_ptr, z_ptr, out_ptr, + # Matrix dimensions + batch, nheads, dim, dstate, nheads_ngroups_ratio, + # Strides + stride_state_batch, stride_state_head, stride_state_dim, stride_state_dstate, + stride_x_batch, stride_x_head, stride_x_dim, + stride_dt_batch, stride_dt_head, stride_dt_dim, + stride_dt_bias_head, stride_dt_bias_dim, + stride_A_head, stride_A_dim, stride_A_dstate, + stride_B_batch, stride_B_group, stride_B_dstate, + stride_C_batch, stride_C_group, stride_C_dstate, + stride_D_head, stride_D_dim, + stride_z_batch, stride_z_head, stride_z_dim, + stride_out_batch, stride_out_head, stride_out_dim, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + TIE_HDIM: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + HAS_D: tl.constexpr, + HAS_Z: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_m = tl.program_id(axis=0) + pid_b = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + state_ptr += pid_b * stride_state_batch + pid_h * stride_state_head + x_ptr += pid_b * stride_x_batch + pid_h * stride_x_head + dt_ptr += pid_b * stride_dt_batch + pid_h * stride_dt_head + if HAS_DT_BIAS: + dt_bias_ptr += pid_h * stride_dt_bias_head + A_ptr += pid_h * stride_A_head + B_ptr += pid_b * stride_B_batch + (pid_h // nheads_ngroups_ratio) * stride_B_group + C_ptr += pid_b * stride_C_batch + (pid_h // nheads_ngroups_ratio) * stride_C_group + if HAS_Z: + z_ptr += pid_b * stride_z_batch + pid_h * stride_z_head + out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_DSTATE) + state_ptrs = state_ptr + (offs_m[:, None] * stride_state_dim + offs_n[None, :] * stride_state_dstate) + x_ptrs = x_ptr + offs_m * stride_x_dim + dt_ptrs = dt_ptr + offs_m * stride_dt_dim + if HAS_DT_BIAS: + dt_bias_ptrs = dt_bias_ptr + offs_m * stride_dt_bias_dim + if HAS_D: + D_ptr += pid_h * stride_D_head + A_ptrs = A_ptr + (offs_m[:, None] * stride_A_dim + offs_n[None, :] * stride_A_dstate) + B_ptrs = B_ptr + offs_n * stride_B_dstate + C_ptrs = C_ptr + offs_n * stride_C_dstate + if HAS_D: + D_ptrs = D_ptr + offs_m * stride_D_dim + if HAS_Z: + z_ptrs = z_ptr + offs_m * stride_z_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + + state = tl.load(state_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0) + x = tl.load(x_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if not TIE_HDIM: + dt = tl.load(dt_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt += tl.load(dt_bias_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if DT_SOFTPLUS: + dt = softplus(dt) + A = tl.load(A_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + dA = tl.exp(A * dt[:, None]) + else: + dt = tl.load(dt_ptr).to(tl.float32) + if HAS_DT_BIAS: + dt += tl.load(dt_bias_ptr).to(tl.float32) + if DT_SOFTPLUS: + dt = softplus(dt) + A = tl.load(A_ptr).to(tl.float32) + dA = tl.exp(A * dt) # scalar, not a matrix + + B = tl.load(B_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + C = tl.load(C_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + if HAS_D: + D = tl.load(D_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_Z: + z = tl.load(z_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + + if not TIE_HDIM: + dB = B[None, :] * dt[:, None] + else: + dB = B * dt # vector of size (dstate,) + state = state * dA + dB * x[:, None] + tl.store(state_ptrs, state, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate)) + out = tl.sum(state * C[None, :], axis=1) + if HAS_D: + out += x * D + if HAS_Z: + out *= z * tl.sigmoid(z) + tl.store(out_ptrs, out, mask=offs_m < dim) + + +def selective_state_update(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) or (batch, nheads, dim, dstate) + x: (batch, dim) or (batch, nheads, dim) + dt: (batch, dim) or (batch, nheads, dim) + A: (dim, dstate) or (nheads, dim, dstate) + B: (batch, dstate) or (batch, ngroups, dstate) + C: (batch, dstate) or (batch, ngroups, dstate) + D: (dim,) or (nheads, dim) + z: (batch, dim) or (batch, nheads, dim) + dt_bias: (dim,) or (nheads, dim) + Return: + out: (batch, dim) or (batch, nheads, dim) + """ + has_heads = state.dim() > 3 + if state.dim() == 3: + state = state.unsqueeze(1) + if x.dim() == 2: + x = x.unsqueeze(1) + if dt.dim() == 2: + dt = dt.unsqueeze(1) + if A.dim() == 2: + A = A.unsqueeze(0) + if B.dim() == 2: + B = B.unsqueeze(1) + if C.dim() == 2: + C = C.unsqueeze(1) + if D is not None and D.dim() == 1: + D = D.unsqueeze(0) + if z is not None and z.dim() == 2: + z = z.unsqueeze(1) + if dt_bias is not None and dt_bias.dim() == 1: + dt_bias = dt_bias.unsqueeze(0) + batch, nheads, dim, dstate = state.shape + assert x.shape == (batch, nheads, dim) + assert dt.shape == x.shape + assert A.shape == (nheads, dim, dstate) + ngroups = B.shape[1] + assert nheads % ngroups == 0, "nheads must be divisible by ngroups" + assert B.shape == (batch, ngroups, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (nheads, dim) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (nheads, dim) + out = torch.empty_like(x) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE_M']), batch, nheads) + z_strides = ((z.stride(0), z.stride(1), z.stride(2)) if z is not None else (0, 0, 0)) + # We don't want autotune since it will overwrite the state + # We instead tune by hand. + BLOCK_SIZE_M, num_warps = ((32, 4) if dstate <= 16 + else ((16, 4) if dstate <= 32 else + ((8, 4) if dstate <= 64 else + ((4, 4) if dstate <= 128 else + ((4, 8)))))) + tie_hdim = A.stride(-1) == 0 and A.stride(-2) == 0 and dt.stride(-1) == 0 and dt_bias.stride(-1) == 0 + with torch.cuda.device(x.device.index): + _selective_scan_update_kernel[grid]( + state, x, dt, dt_bias, A, B, C, D, z, out, + batch, nheads, dim, dstate, nheads // ngroups, + state.stride(0), state.stride(1), state.stride(2), state.stride(3), + x.stride(0), x.stride(1), x.stride(2), + dt.stride(0), dt.stride(1), dt.stride(2), + *(dt_bias.stride(0), dt_bias.stride(1)) if dt_bias is not None else 0, + A.stride(0), A.stride(1), A.stride(2), + B.stride(0), B.stride(1), B.stride(2), + C.stride(0), C.stride(1), C.stride(2), + *(D.stride(0), D.stride(1)) if D is not None else 0, + z_strides[0], z_strides[1], z_strides[2], + out.stride(0), out.stride(1), out.stride(2), + dt_softplus, + tie_hdim, + BLOCK_SIZE_M, + num_warps=num_warps, + ) + if not has_heads: + out = out.squeeze(1) + return out + + +def selective_state_update_ref(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) or (batch, nheads, dim, dstate) + x: (batch, dim) or (batch, nheads, dim) + dt: (batch, dim) or (batch, nheads, dim) + A: (dim, dstate) or (nheads, dim, dstate) + B: (batch, dstate) or (batch, ngroups, dstate) + C: (batch, dstate) or (batch, ngroups, dstate) + D: (dim,) or (nheads, dim) + z: (batch, dim) or (batch, nheads, dim) + dt_bias: (dim,) or (nheads, dim) + Return: + out: (batch, dim) or (batch, nheads, dim) + """ + has_heads = state.dim() > 3 + if state.dim() == 3: + state = state.unsqueeze(1) + if x.dim() == 2: + x = x.unsqueeze(1) + if dt.dim() == 2: + dt = dt.unsqueeze(1) + if A.dim() == 2: + A = A.unsqueeze(0) + if B.dim() == 2: + B = B.unsqueeze(1) + if C.dim() == 2: + C = C.unsqueeze(1) + if D is not None and D.dim() == 1: + D = D.unsqueeze(0) + if z is not None and z.dim() == 2: + z = z.unsqueeze(1) + if dt_bias is not None and dt_bias.dim() == 1: + dt_bias = dt_bias.unsqueeze(0) + batch, nheads, dim, dstate = state.shape + assert x.shape == (batch, nheads, dim) + assert dt.shape == x.shape + assert A.shape == (nheads, dim, dstate) + ngroups = B.shape[1] + assert nheads % ngroups == 0, "nheads must be divisible by ngroups" + assert B.shape == (batch, ngroups, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (nheads, dim) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (nheads, dim) + dt = dt + dt_bias + dt = F.softplus(dt) if dt_softplus else dt + dA = torch.exp(rearrange(dt, "b h d -> b h d 1") * A) # (batch, nheads, dim, dstate) + B = repeat(B, "b g n -> b (g h) n", h=nheads // ngroups) # (batch, nheads, dstate) + C = repeat(C, "b g n -> b (g h) n", h=nheads // ngroups) # (batch, nheads, dstate) + dB = rearrange(dt, "b h d -> b h d 1") * rearrange(B, "b h n -> b h 1 n") # (batch, nheads, dim, dstate) + state.copy_(state * dA + dB * rearrange(x, "b h d -> b h d 1")) # (batch, dim, dstate + out = torch.einsum("bhdn,bhn->bhd", state.to(C.dtype), C) + if D is not None: + out += (x * D).to(out.dtype) + out = (out if z is None else out * F.silu(z)).to(x.dtype) + if not has_heads: + out = out.squeeze(1) + return out diff --git a/mamba/build/lib/mamba_ssm/ops/triton/softplus.py b/mamba/build/lib/mamba_ssm/ops/triton/softplus.py new file mode 100644 index 0000000000000000000000000000000000000000..de68b46189178903432715557cdf525f7b644fc0 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/softplus.py @@ -0,0 +1,17 @@ +import triton +import triton.language as tl +from packaging import version + +TRITON3 = version.parse(triton.__version__) >= version.parse("3.0.0") + + +if TRITON3: + @triton.jit + def softplus(dt): + dt = tl.where(dt <= 20.0, tl.math.log(tl.math.exp(dt) + 1), dt) + return dt +else: + @triton.jit + def softplus(dt): + dt = tl.where(dt <= 20.0, tl.math.log1p(tl.exp(dt)), dt) + return dt \ No newline at end of file diff --git a/mamba/build/lib/mamba_ssm/ops/triton/ssd_bmm.py b/mamba/build/lib/mamba_ssm/ops/triton/ssd_bmm.py new file mode 100644 index 0000000000000000000000000000000000000000..48fd4f063e7796ceea772c21956b7bbdcbf1d196 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/ssd_bmm.py @@ -0,0 +1,262 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + + +def init_to_zero(names): + return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None] + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['chunk_size', 'K', 'IS_CAUSAL'], +) +@triton.jit +def _bmm_chunk_fwd_kernel( + # Pointers to matrices + a_ptr, b_ptr, out_ptr, seq_idx_ptr, + # Matrix dimensions + seqlen, chunk_size, K, ngroups, + stride_a_batch, stride_a_seqlen, stride_a_head, stride_ak, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_bk, + stride_out_batch, stride_out_chunk, stride_out_head, stride_outm, stride_outn, + stride_seq_idx_batch, stride_seq_idx_seqlen, + # Meta-parameters + IS_CAUSAL: tl.constexpr, + dot_dtype: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_ch = tl.program_id(axis=2) + pid_c = pid_ch // ngroups + pid_h = pid_ch - pid_c * ngroups + num_pid_n = tl.cdiv(chunk_size, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + if IS_CAUSAL: + if pid_n * BLOCK_SIZE_N >= (pid_m + 1) * BLOCK_SIZE_M: + return + a_ptr += pid_b * stride_a_batch + pid_c * chunk_size * stride_a_seqlen + pid_h * stride_a_head + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + pid_h * stride_b_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = a_ptr + (offs_m[:, None] * stride_a_seqlen + offs_k[None, :] * stride_ak) + b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_n[None, :] * stride_b_seqlen) + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0).to(dot_dtype) + b = tl.load(b_ptrs, mask=(offs_k[:, None] < K - k * BLOCK_SIZE_K) & (offs_n[None, :] < chunk_size_limit), other=0.0).to(dot_dtype) + acc += tl.dot(a, b) + a_ptrs += BLOCK_SIZE_K * stride_ak + b_ptrs += BLOCK_SIZE_K * stride_bk + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + if HAS_SEQ_IDX: + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_n = tl.load(seq_idx_ptr + offs_n * stride_seq_idx_seqlen, mask=offs_n < chunk_size_limit, other=-2) + acc = tl.where(seq_idx_m[:, None] == seq_idx_n[None, :], acc, 0.0) + out = acc.to(out_ptr.dtype.element_ty) + + out_ptr += pid_b * stride_out_batch + pid_c * stride_out_chunk + pid_h * stride_out_head + out_ptrs = out_ptr + (stride_outm * offs_m[:, None] + offs_n[None, :] * stride_outn) + tl.store(out_ptrs, out, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_CS': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_CS': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_CS': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=2), + ], + key=['chunk_size', 'K'], +) +@triton.jit +def _bmm_chunk_bwd_kernel( + # Pointers to matrices + a_ptr, dout_ptr, db_ptr, res_ptr, + # Matrix dimensions + seqlen, chunk_size, K, ngroups, + stride_a_batch, stride_a_seqlen, stride_a_head, stride_ak, + stride_dout_batch, stride_dout_chunk, stride_dout_head, stride_dout_csize_m, stride_dout_csize_n, + stride_db_batch, stride_db_seqlen, stride_db_head, stride_db_k, + stride_res_batch, stride_res_seqlen, stride_res_head, stride_res_k, + # Meta-parameters + dot_dtype: tl.constexpr, + HAS_RESIDUAL: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_CS: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_ch = tl.program_id(axis=2) + pid_c = pid_ch // ngroups + pid_h = pid_ch - pid_c * ngroups + num_pid_n = tl.cdiv(K, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + + a_ptr += pid_b * stride_a_batch + pid_c * chunk_size * stride_a_seqlen + pid_h * stride_a_head + dout_ptr += pid_b * stride_dout_batch + pid_c * stride_dout_chunk + pid_h * stride_dout_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_cs = tl.arange(0, BLOCK_SIZE_CS) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_csize_n + offs_cs[None, :] * stride_dout_csize_m) + a_ptrs = a_ptr + (offs_cs[:, None] * stride_a_seqlen + offs_n[None, :] * stride_ak) + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for cs in range(0, tl.cdiv(chunk_size_limit, BLOCK_SIZE_CS)): + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_cs[None, :] < chunk_size_limit - cs * BLOCK_SIZE_CS), other=0.0).to(dot_dtype) + a = tl.load(a_ptrs, mask=(offs_cs[:, None] < chunk_size_limit - cs * BLOCK_SIZE_CS) & (offs_n[None, :] < K), other=0.0).to(dot_dtype) + acc += tl.dot(dout, a) + dout_ptrs += BLOCK_SIZE_CS * stride_dout_csize_m + a_ptrs += BLOCK_SIZE_CS * stride_a_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + if HAS_RESIDUAL: + res_ptr += pid_b * stride_res_batch + pid_c * chunk_size * stride_res_seqlen + pid_h * stride_res_head + res_ptrs = res_ptr + (offs_m[:, None] * stride_res_seqlen + offs_n[None, :] * stride_res_k) + res = tl.load(res_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < K)).to(tl.float32) + acc += res + db = acc.to(db_ptr.dtype.element_ty) + + db_ptr += pid_b * stride_db_batch + pid_c * chunk_size * stride_db_seqlen + pid_h * stride_db_head + db_ptrs = db_ptr + (offs_m[:, None] * stride_db_seqlen + offs_n[None, :] * stride_db_k) + tl.store(db_ptrs, db, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < K)) + + +def _bmm_chunk_fwd(a, b, chunk_size, seq_idx=None, causal=False, output_dtype=None): + """ + Argument: + a: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + b: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + seq_idx: (batch, seqlen) or None. out[i, j] for seq_idx[i] != seq_idx[j] will be zeroed out. + causal: if True, then out[i, j] for i > j will be arbitrary, only out[i, j] for i <= j are + guaranteed to be correct. + Return: + out: (batch, nchunks, chunk_size, chunk_size) or (batch, nchunks, ngroups, chunk_size, chunk_size) + """ + # Check constraints. + has_groups = a.dim() == 4 + if not has_groups: + batch, seqlen, k = a.shape + else: + batch, seqlen, ngroups, k = a.shape + assert b.shape == a.shape + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if a.stride(-1) != 1 and a.stride(1) != 1: + a = a.contiguous() + if b.stride(-1) != 1 and b.stride(1) != 1: + b = b.contiguous() + nchunks = math.ceil(seqlen / chunk_size) + # Allocates output. + out_dtype = a.dtype if output_dtype is None else output_dtype + out = torch.empty((batch, nchunks, chunk_size, chunk_size) if not has_groups else (batch, nchunks, ngroups, chunk_size, chunk_size), + device=a.device, dtype=out_dtype) + dot_dtype = (tl.bfloat16 if a.dtype == torch.bfloat16 or b.dtype == torch.bfloat16 else + (tl.float16 if a.dtype == torch.float16 or b.dtype == torch.float16 else tl.float32)) + grid = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(chunk_size, META['BLOCK_SIZE_N']), + batch, nchunks if not has_groups else nchunks * ngroups) + with torch.cuda.device(a.device.index): + _bmm_chunk_fwd_kernel[grid]( + a, b, out, seq_idx, + seqlen, chunk_size, k, ngroups if has_groups else 1, + a.stride(0), a.stride(1), 0 if not has_groups else a.stride(2), a.stride(-1), + b.stride(0), b.stride(1), 0 if not has_groups else b.stride(2), b.stride(-1), + out.stride(0), out.stride(1), 0 if not has_groups else out.stride(2), out.stride(-2), out.stride(-1), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + causal, + dot_dtype, + HAS_SEQ_IDX=seq_idx is not None, + ) + return out + + +def _bmm_chunk_bwd(a, dout, residual=None, out=None): + """ + Argument: + a: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + dout: (batch, nchunks, chunk_size, chunk_size) or (batch, nchunks, ngroups, chunk_size, chunk_size) + residual: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + Return: + out: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + + If there was seq_idx in the fwd pass, then dout[i, j] for seq_idx[i] != seq_idx[j] should already be + zeroed out before calling this function. + """ + # Check constraints. + has_groups = a.dim() == 4 + if not has_groups: + batch, seqlen, k = a.shape + else: + batch, seqlen, ngroups, k = a.shape + nchunks, chunk_size = dout.shape[1], dout.shape[-1] + if a.stride(-1) != 1 and a.stride(-2) != 1: + a = a.contiguous() + if dout.stride(-1) != 1 and dout.stride(-2) != 1: + dout = dout.contiguous() + if residual is not None: + assert residual.shape == (batch, seqlen, k) if not has_groups else (batch, seqlen, ngroups, k) + if residual.stride(-1) != 1 and residual.stride(1) != 1: + residual = residual.contiguous() + # Allocates output. + if out is not None: + assert out.shape == a.shape + assert out.stride(-1) == 1 or out.stride(1) == 1 + else: + out = torch.empty_like(a) + dot_dtype = (tl.bfloat16 if a.dtype == torch.bfloat16 or dout.dtype == torch.bfloat16 else + (tl.float16 if a.dtype == torch.float16 or dout.dtype == torch.float16 else tl.float32)) + grid = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(k, META['BLOCK_SIZE_N']), batch, + nchunks if not has_groups else nchunks * ngroups) + residual_strides = ((residual.stride(0), residual.stride(1), 0 if not has_groups else residual.stride(2), + residual.stride(-1)) + if residual is not None else (0, 0, 0, 0)) + with torch.cuda.device(a.device.index): + _bmm_chunk_bwd_kernel[grid]( + a, dout, out, residual, + seqlen, chunk_size, k, ngroups if has_groups else 1, + a.stride(0), a.stride(1), 0 if not has_groups else a.stride(2), a.stride(-1), + dout.stride(0), dout.stride(1), 0 if not has_groups else dout.stride(2), dout.stride(-2), dout.stride(-1), + out.stride(0), out.stride(1), 0 if not has_groups else out.stride(2), out.stride(-1), + residual_strides[0], residual_strides[1], residual_strides[2], residual_strides[3], + dot_dtype, + HAS_RESIDUAL=residual is not None, + ) + return out diff --git a/mamba/build/lib/mamba_ssm/ops/triton/ssd_chunk_scan.py b/mamba/build/lib/mamba_ssm/ops/triton/ssd_chunk_scan.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa3a934615b85c09b4eb90f04f0f25caaea5980 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/ssd_chunk_scan.py @@ -0,0 +1,1829 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +import math +from packaging import version + +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.ssd_bmm import _bmm_chunk_fwd, _bmm_chunk_bwd + +TRITON_22 = version.parse(triton.__version__) >= version.parse('2.2.0') + + +def init_to_zero(names): + return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None] + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['chunk_size', 'hdim', 'dstate', 'IS_CAUSAL'], +) +@triton.jit +def _chunk_scan_fwd_kernel( + # Pointers to matrices + cb_ptr, x_ptr, z_ptr, out_ptr, out_x_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, C_ptr, prev_states_ptr, D_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k, + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_z_batch, stride_z_seqlen, stride_z_head, stride_z_hdim, + stride_out_batch, stride_out_seqlen, stride_out_head, stride_out_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_C_batch, stride_C_seqlen, stride_C_head, stride_C_dstate, + stride_states_batch, stride_states_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_D_head, + # Meta-parameters + IS_CAUSAL: tl.constexpr, + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + HAS_Z: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, + IS_TRITON_22: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + C_ptr += pid_b * stride_C_batch + pid_c * chunk_size * stride_C_seqlen + (pid_h // nheads_ngroups_ratio) * stride_C_head + prev_states_ptr += pid_b * stride_states_batch + pid_c * stride_states_chunk + pid_h * stride_states_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Without the if (pid_c > -1), with Triton 2.1.0, I get + # Assertion `!(srcMmaLayout && dstMmaLayout) && "Unexpected mma -> mm a layout conversion"' failed. + # With Triton 2.2.0, this works + if IS_TRITON_22 or pid_c > -1: + # Faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128 + offs_k_dstate = tl.arange(0, BLOCK_SIZE_DSTATE if BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K) + C_ptrs = C_ptr + (offs_m[:, None] * stride_C_seqlen + offs_k_dstate[None, :] * stride_C_dstate) + prev_states_ptrs = prev_states_ptr + (offs_n[None, :] * stride_states_hdim + offs_k_dstate[:, None] * stride_states_dstate) + if not HAS_SEQ_IDX: + scale_m = tl.exp(dA_cs_m) + else: + scale_m = tl.where(seq_idx_m == seq_idx_prev, tl.exp(dA_cs_m), 0.0) + if BLOCK_SIZE_DSTATE <= 128: + C = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k_dstate[None, :] < dstate), other=0.0) + prev_states = tl.load(prev_states_ptrs, mask=(offs_k_dstate[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + prev_states = prev_states.to(C_ptr.dtype.element_ty) + acc = tl.dot(C, prev_states) * scale_m[:, None] + else: + for k in range(0, dstate, BLOCK_SIZE_K): + C = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k_dstate[None, :] < dstate - k), other=0.0) + # C = (C * scale_m[:, None]).to(C_ptr.dtype.element_ty) + prev_states = tl.load(prev_states_ptrs, mask=(offs_k_dstate[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0) + prev_states = prev_states.to(C_ptr.dtype.element_ty) + acc += tl.dot(C, prev_states) + C_ptrs += BLOCK_SIZE_K + prev_states_ptrs += BLOCK_SIZE_K + acc *= scale_m[:, None] + + offs_k = tl.arange(0, BLOCK_SIZE_K) + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_k[None, :] * stride_cb_csize_k) + x_ptrs = x_ptr + (offs_k[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_k * stride_dt_csize + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + K_MAX = chunk_size_limit if not IS_CAUSAL else min((pid_m + 1) * BLOCK_SIZE_M, chunk_size_limit) + for k in range(0, K_MAX, BLOCK_SIZE_K): + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_k[None, :] < chunk_size - k), other=0.0).to(tl.float32) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < chunk_size - k, other=0.0).to(tl.float32) + # If there's seq_idx, we already set cb[i, j] = 0 for seq_idx[i] != seq_idx[j]. + # So we don't need masking wrt seq_idx here. + cb *= tl.exp((dA_cs_m[:, None] - dA_cs_k[None, :])) + dt_k = tl.load(dt_ptrs, mask=offs_k < chunk_size - k, other=0.0).to(tl.float32) + cb *= dt_k + if IS_CAUSAL: + mask = offs_m[:, None] >= k + offs_k[None, :] + cb = tl.where(mask, cb, 0.0) + cb = cb.to(x_ptr.dtype.element_ty) + x = tl.load(x_ptrs, mask=(offs_k[:, None] < chunk_size_limit - k) & (offs_n[None, :] < hdim), other=0.0) + acc += tl.dot(cb, x) + cb_ptrs += BLOCK_SIZE_K * stride_cb_csize_k + x_ptrs += BLOCK_SIZE_K * stride_x_seqlen + dt_ptrs += BLOCK_SIZE_K * stride_dt_csize + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + + offs_out_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_out_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + if HAS_D: + if D_HAS_HDIM: + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + x_residual = tl.load(x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim), + mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + acc += x_residual * D + + if HAS_Z: + out_x_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + out_x_ptrs = out_x_ptr + (stride_out_seqlen * offs_out_m[:, None] + offs_out_n[None, :]) + tl.store(out_x_ptrs, acc, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim)) + + z_ptr += pid_b * stride_z_batch + pid_c * chunk_size * stride_z_seqlen + pid_h * stride_z_head + z_ptrs = z_ptr + (stride_z_seqlen * offs_out_m[:, None] + stride_z_hdim * offs_out_n[None, :]) + z = tl.load(z_ptrs, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim), other=0.0).to(tl.float32) + acc *= z * tl.sigmoid(z) + + out_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + out_ptrs = out_ptr + (stride_out_seqlen * offs_out_m[:, None] + offs_out_n[None, :] * stride_out_hdim) + tl.store(out_ptrs, acc, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim)) + + +@triton.autotune( + configs=[ + # triton.Config({'BLOCK_SIZE_N': 256}, num_stages=4, num_warps=4), + # triton.Config({'BLOCK_SIZE_N': 128}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_N': 64}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_N': 64}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=8), + ], + key=['chunk_size', 'hdim', 'dstate'], +) +@triton.jit +def _chunk_scan_fwd_kernel_wip( + # Pointers to matrices + cb_ptr, x_ptr, z_ptr, out_ptr, out_x_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, C_ptr, B_ptr, prev_states_ptr, D_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k, + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_z_batch, stride_z_seqlen, stride_z_head, stride_z_hdim, + stride_out_batch, stride_out_seqlen, stride_out_head, stride_out_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_C_batch, stride_C_seqlen, stride_C_head, stride_C_dstate, + stride_B_batch, stride_B_seqlen, stride_B_head, stride_B_dstate, + stride_states_batch, stride_states_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_D_head, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + HAS_Z: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + pid_n = tl.program_id(axis=0) + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + C_ptr += pid_b * stride_C_batch + pid_c * chunk_size * stride_C_seqlen + (pid_h // nheads_ngroups_ratio) * stride_C_head + B_ptr += pid_b * stride_B_batch + pid_c * chunk_size * stride_B_seqlen + (pid_h // nheads_ngroups_ratio) * stride_B_head + prev_states_ptr += pid_b * stride_states_batch + pid_c * stride_states_chunk + pid_h * stride_states_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + out_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + + offs_m = tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k_dstate = tl.arange(0, BLOCK_SIZE_DSTATE) + + C_ptrs = C_ptr + (offs_m[:, None] * stride_C_seqlen + offs_k_dstate[None, :] * stride_C_dstate) + B_ptrs = B_ptr + (offs_m[None, :] * stride_B_seqlen + offs_k_dstate[:, None] * stride_B_dstate) + prev_states_ptrs = prev_states_ptr + (offs_n[None, :] * stride_states_hdim + offs_k_dstate[:, None] * stride_states_dstate) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_m[None, :] * stride_cb_csize_k) + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + out_ptrs = out_ptr + (offs_m[:, None] * stride_out_seqlen + offs_n[None, :] * stride_out_hdim) + + prev_states = tl.load(prev_states_ptrs, mask=(offs_k_dstate[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + # if pid_c == 0: + # if pid_b == 0: + # if pid_h == 0: + # tl.device_print("", prev_states) + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + # dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + # scale_m = tl.exp(dA_cs_m) + # C = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k_dstate[None, :] < dstate), other=0.0) + # acc = tl.dot(C, prev_states.to(C_ptr.dtype.element_ty)) * scale_m[:, None] + # cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_m[None, :] < chunk_size), other=0.0).to(tl.float32) + # cb *= tl.exp((dA_cs_m[:, None] - dA_cs_m[None, :])) + # dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + # cb *= dt_m + # mask = offs_m[:, None] >= offs_m[None, :] + # cb = tl.where(mask, cb, 0.0) + # cb = cb.to(x_ptr.dtype.element_ty) + # x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0) + # acc += tl.dot(cb, x) + # if HAS_D: + # if D_HAS_HDIM: + # D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + # else: + # D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + # acc += x.to(tl.float32) * D + # tl.store(out_ptrs, acc, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + + for start_m in range(0, chunk_size_limit, BLOCK_SIZE_M): + start_m = tl.multiple_of(start_m, BLOCK_SIZE_M) + dA_cs_m = tl.load(dA_cumsum_ptr + (start_m + offs_m) * stride_dA_cs_csize, mask=offs_m < chunk_size - start_m, other=0.0).to(tl.float32) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr + start_m - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + seq_idx_m = tl.load(seq_idx_ptr + (start_m + offs_m) * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit - start_m, other=-1) + if not HAS_SEQ_IDX: + scale_m = tl.exp(dA_cs_m) + else: + scale_m = tl.where(seq_idx_m == seq_idx_prev, tl.exp(dA_cs_m), 0.0) + C = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit - start_m) & (offs_k_dstate[None, :] < dstate), other=0.0) + acc = tl.dot(C, prev_states.to(C_ptr.dtype.element_ty)) * scale_m[:, None] + # cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size - start_m) & (offs_m[None, :] < chunk_size - start_m), other=0.0).to(tl.float32) + # cb *= tl.exp((dA_cs_m[:, None] - dA_cs_m[None, :])) + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size - start_m, other=0.0).to(tl.float32) + # cb *= dt_m + # mask = offs_m[:, None] >= offs_m[None, :] + # cb = tl.where(mask, cb, 0.0) + # cb = cb.to(x_ptr.dtype.element_ty) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit - start_m) & (offs_n[None, :] < hdim), other=0.0) + # acc += tl.dot(cb, x) + + if HAS_D: + if D_HAS_HDIM: + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + acc += x.to(tl.float32) * D + + # if HAS_Z: + # out_x_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + # out_x_ptrs = out_x_ptr + (stride_out_seqlen * offs_out_m[:, None] + offs_out_n[None, :]) + # tl.store(out_x_ptrs, acc, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim)) + + # z_ptr += pid_b * stride_z_batch + pid_c * chunk_size * stride_z_seqlen + pid_h * stride_z_head + # z_ptrs = z_ptr + (stride_z_seqlen * offs_out_m[:, None] + stride_z_hdim * offs_out_n[None, :]) + # z = tl.load(z_ptrs, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim), other=0.0).to(tl.float32) + # acc *= z * tl.sigmoid(z) + + tl.store(out_ptrs, acc, mask=(offs_m[:, None] < chunk_size_limit - start_m) & (offs_n[None, :] < hdim)) + + # TODO: this is not correct, and quite a bit slower + if start_m + BLOCK_SIZE_M < chunk_size_limit: + # B = tl.load(B_ptrs, mask=(offs_m[None, :] < chunk_size_limit - start_m) & (offs_k_dstate[:, None] < dstate), other=0.0).to(tl.float32) + B = tl.load(B_ptrs, mask=(offs_m[None, :] < chunk_size_limit - start_m) & (offs_k_dstate[:, None] < dstate), other=0.0) + dA_cs_last = tl.load(dA_cumsum_ptr + (start_m + BLOCK_SIZE_M) * stride_dA_cs_csize).to(tl.float32) + # TODO: seq_idx + scale = tl.exp((dA_cs_last - dA_cs_m)) * dt_m + # B *= scale + B = B.to(x_ptr.dtype.element_ty) + tmp = tl.dot(B, x) + prev_states += tmp.to(prev_states.dtype) + + C_ptrs += BLOCK_SIZE_M * stride_C_seqlen + B_ptrs += BLOCK_SIZE_M * stride_B_seqlen + cb_ptrs += BLOCK_SIZE_M * stride_cb_csize_m + BLOCK_SIZE_M * stride_cb_csize_k + x_ptrs += BLOCK_SIZE_M * stride_x_seqlen + dt_ptrs += BLOCK_SIZE_M * stride_dt_csize + out_ptrs += BLOCK_SIZE_M * stride_out_seqlen + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32}), + triton.Config({'BLOCK_SIZE_M': 64}), + triton.Config({'BLOCK_SIZE_M': 128}), + triton.Config({'BLOCK_SIZE_M': 256}), + ], + key=["chunk_size", "hdim"], +) +@triton.jit +def _chunk_scan_bwd_dz_kernel( + # Pointers to matrices + dout_ptr, out_ptr, z_ptr, x_ptr, D_ptr, outz_ptr, dz_ptr, dout_x_ptr, dD_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_out_batch, stride_out_seqlen, stride_out_head, stride_out_hdim, + stride_z_batch, stride_z_seqlen, stride_z_head, stride_z_hdim, + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_D_head, + stride_outz_batch, stride_outz_seqlen, stride_outz_head, stride_outz_hdim, + stride_dz_batch, stride_dz_seqlen, stride_dz_head, stride_dz_hdim, + stride_doutx_batch, stride_doutx_seqlen, stride_doutx_head, stride_doutx_hdim, + stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_csize, stride_dD_hdim, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + HAS_DDACS: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dout_x_ptr += pid_b * stride_doutx_batch + pid_c * chunk_size * stride_doutx_seqlen + pid_h * stride_doutx_head + out_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + z_ptr += pid_b * stride_z_batch + pid_c * chunk_size * stride_z_seqlen + pid_h * stride_z_head + dz_ptr += pid_b * stride_dz_batch + pid_c * chunk_size * stride_dz_seqlen + pid_h * stride_dz_head + if RECOMPUTE_OUTPUT: + outz_ptr += pid_b * stride_outz_batch + pid_c * chunk_size * stride_outz_seqlen + pid_h * stride_outz_head + if HAS_DDACS: + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + if HAS_D: + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_N) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dout_x_ptrs = dout_x_ptr + (offs_m[:, None] * stride_doutx_seqlen + offs_n[None, :] * stride_doutx_hdim) + out_ptrs = out_ptr + (offs_m[:, None] * stride_out_seqlen + offs_n[None, :] * stride_out_hdim) + z_ptrs = z_ptr + (offs_m[:, None] * stride_z_seqlen + offs_n[None, :] * stride_z_hdim) + dz_ptrs = dz_ptr + (offs_m[:, None] * stride_dz_seqlen + offs_n[None, :] * stride_dz_hdim) + if RECOMPUTE_OUTPUT: + outz_ptrs = outz_ptr + (offs_m[:, None] * stride_outz_seqlen + offs_n[None, :] * stride_outz_hdim) + if HAS_D: + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + if D_HAS_HDIM: + dD_ptrs = dD_ptr + offs_n * stride_dD_hdim + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + out = tl.load(out_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + z = tl.load(z_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + z_sigmoid = tl.sigmoid(z) + if RECOMPUTE_OUTPUT: + outz = out * z * z_sigmoid + tl.store(outz_ptrs, outz, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + dz = dout * out * z_sigmoid * (1 + z * (1 - z_sigmoid)) + tl.store(dz_ptrs, dz, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + dout *= z * z_sigmoid + tl.store(dout_x_ptrs, dout, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + if HAS_D: + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if D_HAS_HDIM: + dD = tl.sum(dout * x, axis=0) + tl.store(dD_ptrs, dD, mask=offs_n < hdim) + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + dD = tl.sum(dout * x) + tl.store(dD_ptr, dD) + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + out -= x * D + if HAS_DDACS: + ddA_cs = tl.sum(dout * out, axis=1) + tl.store(ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize, ddA_cs, mask=offs_m < chunk_size) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['hdim', 'dstate', 'chunk_size'], +) +@triton.jit +def _chunk_scan_bwd_dstates_kernel( + # Pointers to matrices + dout_ptr, c_ptr, dprev_states_ptr, dA_cumsum_ptr, seq_idx_ptr, + # Matrix dimensions + hdim, dstate, chunk_size, + batch, seqlen, nchunks, nheads_ngroups_ratio, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_c_batch, stride_c_seqlen, stride_c_head, stride_c_dstate, + stride_dprev_states_batch, stride_dprev_states_chunk, stride_dprev_states_head, stride_dprev_states_hdim, stride_dprev_states_dstate, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + # Meta-parameters + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + c_ptr += pid_b * stride_c_batch + pid_c * chunk_size * stride_c_seqlen + (pid_h // nheads_ngroups_ratio) * stride_c_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_hdim + offs_k[None, :] * stride_dout_seqlen) + c_ptrs = c_ptr + (offs_n[None, :] * stride_c_dstate + offs_k[:, None] * stride_c_seqlen) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + if HAS_SEQ_IDX: + seq_idx_ptrs = seq_idx_ptr + offs_k * stride_seq_idx_seqlen + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + for k in range(0, chunk_size_limit, BLOCK_SIZE_K): + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < hdim) & (offs_k[None, :] < chunk_size_limit - k), other=0.0).to(tl.float32) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < chunk_size - k, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale_k = tl.exp(dA_cs_k) + else: + seq_idx_k = tl.load(seq_idx_ptrs, mask=offs_k < chunk_size_limit - k, other=-1) + scale_k = tl.where(seq_idx_k == seq_idx_prev, tl.exp(dA_cs_k), 0.0) + dout = (dout * scale_k).to(dout_ptr.dtype.element_ty) + c = tl.load(c_ptrs, mask=(offs_k[:, None] < chunk_size_limit - k) & (offs_n[None, :] < dstate), other=0.0) + acc += tl.dot(dout, c) + dout_ptrs += BLOCK_SIZE_K * stride_dout_seqlen + c_ptrs += BLOCK_SIZE_K * stride_c_seqlen + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + if HAS_SEQ_IDX: + seq_idx_ptrs += BLOCK_SIZE_K * stride_seq_idx_seqlen + out = acc.to(dprev_states_ptr.dtype.element_ty) + + dprev_states_ptr += pid_b * stride_dprev_states_batch + pid_c * stride_dprev_states_chunk + pid_h * stride_dprev_states_head + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dprev_states_ptrs = dprev_states_ptr + (offs_m[:, None] * stride_dprev_states_hdim + offs_n[None, :] * stride_dprev_states_dstate) + tl.store(dprev_states_ptrs, out, mask=(offs_m[:, None] < hdim) & (offs_n[None, :] < dstate)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'dstate', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_dc_kernel( + # Pointers to matrices + dout_ptr, prev_states_ptr, C_ptr, dA_cumsum_ptr, seq_idx_ptr, + dc_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, dstate, hdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_prev_states_batch, stride_prev_states_chunk, stride_prev_states_head, stride_prev_states_hdim, stride_prev_states_dstate, + stride_C_batch, stride_C_seqlen, stride_C_head, stride_C_dstate, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_dc_batch, stride_dc_seqlen, stride_dc_split, stride_dc_group, stride_dc_dstate, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_DDA_CS: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_sg = tl.program_id(axis=2) + pid_s = pid_sg // ngroups + pid_g = pid_sg - pid_s * ngroups + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dout_head + dc_ptr += pid_b * stride_dc_batch + pid_c * chunk_size * stride_dc_seqlen + pid_g * stride_dc_group + pid_s * stride_dc_split + prev_states_ptr += pid_b * stride_prev_states_batch + pid_c * stride_prev_states_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_prev_states_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dA_cs_head + if HAS_DDA_CS: + C_ptr += pid_b * stride_C_batch + pid_c * chunk_size * stride_C_seqlen + pid_g * stride_C_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_ddA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + prev_states_ptrs = prev_states_ptr + (offs_n[None, :] * stride_prev_states_dstate + offs_k[:, None] * stride_prev_states_hdim) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_m * stride_dA_cs_csize + if HAS_DDA_CS: + C_ptrs = C_ptr + (offs_m[:, None] * stride_C_seqlen + offs_n[None, :] * stride_C_dstate) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + if HAS_DDA_CS: + c = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + nheads_iter = min(nheads_per_program, nheads // ngroups - pid_s * nheads_per_program) + for h in range(nheads_iter): + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + prev_states = tl.load(prev_states_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < dstate), other=0.0) + prev_states = prev_states.to(dout_ptrs.dtype.element_ty) + dc = tl.dot(dout, prev_states) + dA_cs_m = tl.load(dA_cumsum_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_m) + else: + scale = tl.where(seq_idx_m == seq_idx_prev, tl.exp(dA_cs_m), 0.0) + dc *= scale[:, None] + if HAS_DDA_CS: + ddA_cs = tl.sum(dc * c, axis=1) + tl.atomic_add(ddA_cumsum_ptrs, ddA_cs, mask=offs_m < chunk_size) + acc += dc + dout_ptrs += stride_dout_head + prev_states_ptrs += stride_prev_states_head + dA_cumsum_ptrs += stride_dA_cs_head + if HAS_DDA_CS: + ddA_cumsum_ptrs += stride_ddA_cs_head + # if HAS_SEQ_IDX: + # seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + # seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + # acc = tl.where(seq_idx_m[:, None] == seq_idx_prev, acc, 0.0) + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dc_ptrs = dc_ptr + (offs_m[:, None] * stride_dc_seqlen + offs_n[None, :] * stride_dc_dstate) + tl.store(dc_ptrs, acc, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + ], + key=['chunk_size', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_dx_kernel( + # Pointers to matrices + x_ptr, cb_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, D_ptr, + dx_ptr, ddt_ptr, # dD_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_D_head, + stride_dx_batch, stride_dx_seqlen, stride_dx_head, stride_dx_hdim, + stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize, + # stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_hdim, stride_dD_csize, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + # if HAS_D: + # dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_k[None, :] * stride_cb_csize_k) + dout_ptrs = dout_ptr + (offs_k[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + # Idk why limiting K_MAX gives wrong results, is it a Triton bug? + # K_MAX = min((pid_m + 1) * BLOCK_SIZE_M, chunk_size_limit) + K_MAX = chunk_size_limit + for k in range(0, K_MAX, BLOCK_SIZE_K): + # For some reason setting mask to (offs_m[:, None] < chunk_size_limit) is much slower + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_k[None, :] < K_MAX - k), other=0.0) + dout = tl.load(dout_ptrs, mask=(offs_k[:, None] < K_MAX - k) & (offs_n[None, :] < hdim), other=0.0) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < K_MAX - k, other=0.0).to(tl.float32) + cb *= tl.exp(dA_cs_k[None, :] - dA_cs_m[:, None]) + # If we don't have the (k + offs_k[None, :] < K_MAX) mask, for indices outside this range, + # we might have dA_cs_m = 0.0 and dA_cs_k very negative, and tl.exp will return inf. + # Multiplying with cb, which is 0.0 outside the range, will make the result NaN. + # This will cause NaN in acc, and hence NaN in dx and ddt. + mask = (k + offs_k[None, :] >= offs_m[:, None]) & (k + offs_k[None, :] < K_MAX) + cb = tl.where(mask, cb, 0.0) + cb = cb.to(dout_ptr.dtype.element_ty) + acc += tl.dot(cb, dout) + cb_ptrs += BLOCK_SIZE_K * stride_cb_csize_k + dout_ptrs += BLOCK_SIZE_K * stride_dout_seqlen + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + dx = acc * dt_m[:, None] + dx_ptr += pid_b * stride_dx_batch + pid_c * chunk_size * stride_dx_seqlen + pid_h * stride_dx_head + dx_ptrs = dx_ptr + (offs_m[:, None] * stride_dx_seqlen + offs_n[None, :] * stride_dx_hdim) + if HAS_D: + dout_res_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dout_res = tl.load(dout_res_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if D_HAS_HDIM: + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + dx += dout_res * D + tl.store(dx_ptrs, dx, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + ddt = tl.sum(acc * x, axis=1) + ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize + tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size) + + # if HAS_D: + # dout_new_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_csize + offs_n[None, :] * stride_dout_hdim) + # dout = tl.load(dout_new_ptrs, mask=(offs_m[:, None] < M) & (offs_n[None, :] < N), other=0.0).to(tl.float32) + # dD = tl.sum(x * dout, axis=0) + # tl.store(dD_ptr + offs_n * stride_dD_hdim, dD, mask=offs_n < N) + + +# Disabling HAS_DDA_CS for now since it's much slower +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 16}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 64}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 128}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 16}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 64}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 128}, num_stages=4, num_warps=8), + ], + key=['chunk_size', 'hdim'], +) +# @triton.heuristics({"BLOCK_SIZE_N": lambda args: max(triton.next_power_of_2(args["chunk_size"]), 16)}) +# @triton.heuristics({"BLOCK_SIZE_N": lambda args: 32}) +@triton.jit +def _chunk_scan_bwd_dcb_kernel( + # Pointers to matrices + x_ptr, dout_ptr, cb_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, + dcb_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_n, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_dcb_batch, stride_dcb_chunk, stride_dcb_split, stride_dcb_group, stride_dcb_csize_m, stride_dcb_csize_n, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize_m, stride_ddA_cs_csize_n, + # Meta-parameters + HAS_DDA_CS: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_sg = tl.program_id(axis=2) + pid_s = pid_sg // ngroups + pid_g = pid_sg - pid_s * ngroups + num_pid_n = tl.cdiv(chunk_size, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_x_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dA_cs_head + if HAS_DDA_CS: + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + pid_g * stride_cb_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_ddA_cs_head + pid_m * stride_ddA_cs_csize_m + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + x_ptrs = x_ptr + (offs_n[None, :] * stride_x_seqlen + offs_k[:, None] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_n * stride_dt_csize + if HAS_DDA_CS: + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_n[None, :] * stride_cb_csize_n) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_n * stride_ddA_cs_csize_n + + if pid_n * BLOCK_SIZE_N >= (pid_m + 1) * BLOCK_SIZE_M: + dcb_ptr += pid_b * stride_dcb_batch + pid_c * stride_dcb_chunk + pid_g * stride_dcb_group + pid_s * stride_dcb_split + dcb_ptrs = dcb_ptr + (offs_m[:, None] * stride_dcb_csize_m + offs_n[None, :] * stride_dcb_csize_n) + tl.store(dcb_ptrs, tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=dcb_ptr.dtype.element_ty), mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size)) + return + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + chunk_size_limit_n = min(chunk_size_limit, (pid_m + 1) * BLOCK_SIZE_M) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + if HAS_DDA_CS: + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size), other=0.0).to(tl.float32) + nheads_iter = min(nheads_per_program, nheads // ngroups - pid_s * nheads_per_program) + for h in range(nheads_iter): + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit_n), other=0.0) + dcb = tl.dot(dout, x) + dt_n = tl.load(dt_ptrs, mask=offs_n < chunk_size, other=0.0).to(tl.float32) + dcb *= dt_n + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + dA_cs_n = tl.load(dA_cumsum_ptr + offs_n * stride_dA_cs_csize, mask=offs_n < chunk_size_limit, other=0.0).to(tl.float32) + dcb *= tl.exp(dA_cs_m[:, None] - dA_cs_n[None, :]) + if HAS_DDA_CS: + tl.static_assert(not HAS_SEQ_IDX, "HAS_SEQ_IDX not supported with HAS_DDA_CS yet") + ddA_cs = dcb * cb + mask = offs_m[:, None] >= offs_n[None, :] + 1 + ddA_cs = tl.where(mask, ddA_cs, 0.0) + ddA_cs = tl.cumsum(ddA_cs, axis=1) + ddA_cs = tl.where(mask, ddA_cs, 0.0) + ddA_cs = tl.sum(ddA_cs, axis=0) + tl.store(ddA_cumsum_ptrs + stride_ddA_cs_csize_n, ddA_cs, mask=offs_n < chunk_size - 1) + tl.store(ddA_cumsum_ptr, 0.0) + acc += dcb + dout_ptrs += stride_dout_head + x_ptrs += stride_x_head + dt_ptrs += stride_dt_head + dA_cumsum_ptr += stride_dA_cs_head + if HAS_DDA_CS: + ddA_cumsum_ptr += stride_ddA_cs_head + ddA_cumsum_ptrs += stride_ddA_cs_head + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + if HAS_SEQ_IDX: + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_n = tl.load(seq_idx_ptr + offs_n * stride_seq_idx_seqlen, mask=offs_n < chunk_size_limit, other=-2) + acc = tl.where(seq_idx_m[:, None] == seq_idx_n[None, :], acc, 0.0) + mask = offs_m[:, None] >= offs_n[None, :] + acc = tl.where(mask, acc, 0.0) + dcb_ptr += pid_b * stride_dcb_batch + pid_c * stride_dcb_chunk + pid_g * stride_dcb_group + pid_s * stride_dcb_split + dcb_ptrs = dcb_ptr + (offs_m[:, None] * stride_dcb_csize_m + offs_n[None, :] * stride_dcb_csize_n) + tl.store(dcb_ptrs, acc, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size)) + + +# Not numerically stable and should not be used. Leaving here for reference. +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32}), + triton.Config({'BLOCK_SIZE_M': 64}), + triton.Config({'BLOCK_SIZE_M': 128}), + triton.Config({'BLOCK_SIZE_M': 256}), + ], + key=["chunk_size", "hdim"], +) +@triton.jit +def _chunk_scan_bwd_ddAcs_unstable_kernel( + # Pointers to matrices + dout_ptr, out_ptr, dt_ptr, ddt_ptr, x_ptr, D_ptr, + ddA_cumsum_ptr, dD_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_out_batch, stride_out_seqlen, stride_out_head, stride_out_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize, + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_D_head, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_csize, stride_dD_hdim, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + SUBTRACT_DDTDT: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + out_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + if HAS_D: + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_N) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + out_ptrs = out_ptr + (offs_m[:, None] * stride_out_seqlen + offs_n[None, :] * stride_out_hdim) + if HAS_D: + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + if D_HAS_HDIM: + dD_ptrs = dD_ptr + offs_n * stride_dD_hdim + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + out = tl.load(out_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if HAS_D: + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if D_HAS_HDIM: + dD = tl.sum(dout * x, axis=0) + tl.store(dD_ptrs, dD, mask=offs_n < hdim) + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + dD = tl.sum(dout * x) + tl.store(dD_ptr, dD) + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + out -= x * D + ddA_cs = tl.sum(dout * out, axis=1) + if SUBTRACT_DDTDT: + dt = tl.load(dt_ptr + offs_m * stride_dt_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + ddt = tl.load(ddt_ptr + offs_m * stride_ddt_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + ddA_cs -= dt * ddt + tl.store(ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize, ddA_cs, mask=offs_m < chunk_size) + + +@triton.autotune( + configs=[ + # triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 16}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 16}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 128}, num_stages=4, num_warps=8), + ], + key=['chunk_size', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_ddAcs_stable_kernel_old( + # Pointers to matrices + x_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, cb_ptr, + ddAcs_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_n, + stride_ddAcs_batch, stride_ddAcs_chunk, stride_ddAcs_head, stride_ddAcs_csize_m, stride_ddAcs_csize_n, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(chunk_size, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + x_ptrs = x_ptr + (offs_n[None, :] * stride_x_seqlen + offs_k[:, None] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_n * stride_dt_csize + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_n[None, :] * stride_cb_csize_n) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + chunk_size_limit_n = min(chunk_size_limit, (pid_m + 1) * BLOCK_SIZE_M) + # Doing a matmul loop with cumsum later on will cause Triton to crash + # Instead we do just one big matmul + # acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + # for k in range(0, hdim, BLOCK_SIZE_K): + # dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim - k), other=0.0) + # x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim - k) & (offs_n[None, :] < chunk_size_limit), other=0.0) + # acc += tl.dot(dout, x) + # dout_ptrs += BLOCK_SIZE_K * stride_dout_hdim + # x_ptrs += BLOCK_SIZE_K * stride_x_hdim + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit_n), other=0.0) + acc = tl.dot(dout, x) + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size), other=0.0).to(tl.float32) + acc *= cb + dt_n = tl.load(dt_ptrs, mask=offs_n < chunk_size, other=0.0).to(tl.float32) + acc *= dt_n + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + dA_cs_n = tl.load(dA_cumsum_ptr + offs_n * stride_dA_cs_csize, mask=offs_n < chunk_size, other=0.0).to(tl.float32) + acc *= tl.exp(dA_cs_m[:, None] - dA_cs_n[None, :]) + mask = offs_m[:, None] >= offs_n[None, :] + 1 + acc = tl.where(mask, acc, 0.0) + acc = tl.cumsum(acc, axis=1) + acc = tl.where(mask, acc, 0.0) + ddA_cs = tl.sum(acc, axis=0) + ddAcs_ptr += pid_b * stride_ddAcs_batch + pid_c * stride_ddAcs_chunk + pid_h * stride_ddAcs_head + pid_m * stride_ddAcs_csize_m + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + ddAcs_ptrs = ddAcs_ptr + offs_n * stride_ddAcs_csize_n + tl.store(ddAcs_ptrs + stride_ddAcs_csize_n, ddA_cs, mask=offs_n < chunk_size - 1) + tl.store(ddAcs_ptr, 0.0) + + # offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, 64) + # offs_k = tl.arange(0, BLOCK_SIZE_K) + # dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + # x_ptrs = x_ptr + (offs_n[None, :] * stride_x_seqlen + offs_k[:, None] * stride_x_hdim) + # dt_ptrs = dt_ptr + offs_n * stride_dt_csize + # cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_n[None, :] * stride_cb_csize_n) + + # chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + # chunk_size_limit_n = min(chunk_size_limit, (pid_m + 1) * BLOCK_SIZE_M) + # rowsum = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32) + # dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + # dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + # ddAcs_ptr += pid_b * stride_ddAcs_batch + pid_c * stride_ddAcs_chunk + pid_h * stride_ddAcs_head + pid_m * stride_ddAcs_csize_m + # ddAcs_ptrs = ddAcs_ptr + offs_n * stride_ddAcs_csize_n + # for n in range(0, chunk_size_limit_n, 64): + # x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit_n - n), other=0.0) + # acc = tl.dot(dout, x) + # cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size - n), other=0.0).to(tl.float32) + # acc *= cb + # dt_n = tl.load(dt_ptrs, mask=offs_n < chunk_size - n, other=0.0).to(tl.float32) + # acc *= dt_n + # dA_cs_n = tl.load(dA_cumsum_ptr + offs_n * stride_dA_cs_csize, mask=offs_n < chunk_size - n, other=0.0).to(tl.float32) + # acc *= tl.exp(dA_cs_m[:, None] - dA_cs_n[None, :]) + # mask = offs_m[:, None] >= offs_n[None, :] + 1 + n + # acc = tl.where(mask, acc, 0.0) + # acc = tl.cumsum(acc, axis=1) + # acc = tl.where(mask, acc, 0.0) + # ddA_cs = tl.sum(acc, axis=0) + # tl.store(ddAcs_ptrs, ddA_cs, mask=offs_n < chunk_size - 1 - n) + # # tl.store(ddAcs_ptr, 0.0) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4), + ], + key=['chunk_size', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_ddAcs_stable_kernel( + # Pointers to matrices + x_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, cb_ptr, + ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_n, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize_m, stride_ddA_cs_csize_n, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + pid_m * stride_ddA_cs_csize_m + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + x_ptrs = x_ptr + (offs_n[None, :] * stride_x_seqlen + offs_k[:, None] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_n * stride_dt_csize + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_n[None, :] * stride_cb_csize_n) + ddAcs_ptrs = ddA_cumsum_ptr + offs_n * stride_ddA_cs_csize_n + tl.store(ddA_cumsum_ptr, 0.0) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + rowsum = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32) + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + # Actually hi is (pid_m + 1) * BLOCK_SIZE_M - 1 but subtracting 1 makes it slower + lo, hi = 0, (pid_m + 1) * BLOCK_SIZE_M + # lo, hi = 0, chunk_size + for start_n in range(lo, hi, BLOCK_SIZE_N): + start_n = tl.multiple_of(start_n, BLOCK_SIZE_N) + # Doing a matmul loop with cumsum later on will cause Triton to crash + # Instead we do just one big matmul + # acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + # for k in range(0, hdim, BLOCK_SIZE_K): + # dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim - k), other=0.0) + # x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim - k) & (offs_n[None, :] < chunk_size_limit), other=0.0) + # acc += tl.dot(dout, x) + # dout_ptrs += BLOCK_SIZE_K * stride_dout_hdim + # x_ptrs += BLOCK_SIZE_K * stride_x_hdim + # x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit_n), other=0.0) + x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit - start_n), other=0.0) + acc = tl.dot(dout, x) + dt_n = tl.load(dt_ptrs, mask=offs_n < chunk_size - start_n, other=0.0).to(tl.float32) + acc *= dt_n + # If there's seq_idx, we already zero'ed out cb[i, j] for seq_idx[i] != seq_idx[j] + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size - start_n), other=0.0).to(tl.float32) + acc *= cb + dA_cs_n = tl.load(dA_cumsum_ptr + start_n + offs_n * stride_dA_cs_csize, mask=offs_n < chunk_size - start_n, other=0.0).to(tl.float32) + acc *= tl.exp(dA_cs_m[:, None] - dA_cs_n[None, :]) + mask = offs_m[:, None] >= start_n + offs_n[None, :] + 1 + acc = tl.where(mask, acc, 0.0) + rowsum_new = rowsum + tl.sum(acc, axis=1) + acc = rowsum[:, None] + tl.cumsum(acc, axis=1) + rowsum = rowsum_new + acc = tl.where(mask, acc, 0.0) + ddA_cs = tl.sum(acc, axis=0) + tl.store(ddAcs_ptrs + stride_ddA_cs_csize_n, ddA_cs, mask=offs_n < chunk_size - start_n - 1) + x_ptrs += BLOCK_SIZE_N * stride_x_seqlen + dt_ptrs += BLOCK_SIZE_N * stride_dt_csize + cb_ptrs += BLOCK_SIZE_N * stride_cb_csize_n + ddAcs_ptrs += BLOCK_SIZE_N * stride_ddA_cs_csize_n + + # Need to zero out the rest, since we'll be summing the rows together + for start_n in range(hi, chunk_size, BLOCK_SIZE_N): + tl.store(ddAcs_ptrs + stride_ddA_cs_csize_n, tl.zeros((BLOCK_SIZE_N,), dtype=tl.float32), mask=offs_n < chunk_size - start_n - 1) + ddAcs_ptrs += BLOCK_SIZE_N * stride_ddA_cs_csize_n + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'dstate', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_ddAcs_prev_kernel( + # Pointers to matrices + dout_ptr, prev_states_ptr, C_ptr, dA_cumsum_ptr, seq_idx_ptr, + ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, dstate, hdim, + batch, seqlen, nchunks, nheads_ngroups_ratio, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_prev_states_batch, stride_prev_states_chunk, stride_prev_states_head, stride_prev_states_hdim, stride_prev_states_dstate, + stride_C_batch, stride_C_seqlen, stride_C_head, stride_C_dstate, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + prev_states_ptr += pid_b * stride_prev_states_batch + pid_c * stride_prev_states_chunk + pid_h * stride_prev_states_head + C_ptr += pid_b * stride_C_batch + pid_c * chunk_size * stride_C_seqlen + (pid_h // nheads_ngroups_ratio) * stride_C_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + prev_states_ptrs = prev_states_ptr + (offs_n[None, :] * stride_prev_states_dstate + offs_k[:, None] * stride_prev_states_hdim) + C_ptrs = C_ptr + (offs_m[:, None] * stride_C_seqlen + offs_n[None, :] * stride_C_dstate) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_m * stride_dA_cs_csize + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + prev_states = tl.load(prev_states_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < dstate), other=0.0) + prev_states = prev_states.to(dout_ptrs.dtype.element_ty) + acc = tl.dot(dout, prev_states) + c = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + ddA_cs = tl.sum(acc * c, axis=1) + dA_cs_m = tl.load(dA_cumsum_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_m) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + scale = tl.where(seq_idx_m == seq_idx_prev, tl.exp(dA_cs_m), 0.0) + ddA_cs *= scale + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + tl.atomic_add(ddA_cumsum_ptrs, ddA_cs, mask=offs_m < chunk_size) + + +def _chunk_scan_fwd(cb, x, dt, dA_cumsum, C, states, D=None, z=None, seq_idx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = C.shape + assert nheads % ngroups == 0 + assert C.shape == (batch, seqlen, ngroups, dstate) + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + if z is not None: + assert z.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert states.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + # Allocates output. + out = torch.empty(batch, seqlen, nheads, headdim, device=x.device, dtype=x.dtype) + if z is not None: + out_x = torch.empty(batch, seqlen, nheads, headdim, device=x.device, dtype=x.dtype) + assert out_x.stride() == out.stride() + else: + out_x = None + grid = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + z_strides = ((z.stride(0), z.stride(1), z.stride(2), z.stride(3)) + if z is not None else (0, 0, 0, 0)) + _chunk_scan_fwd_kernel[grid]( + cb, x, z, out, out_x, dt, dA_cumsum, seq_idx, C, states, D, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(3), cb.stride(4), + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + z_strides[0], z_strides[1], z_strides[2], z_strides[3], + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + C.stride(0), C.stride(1), C.stride(2), C.stride(3), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), states.stride(4), + D.stride(0) if D is not None else 0, + True, + D is not None, + D.dim() == 2 if D is not None else True, + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + HAS_Z=z is not None, + HAS_SEQ_IDX=seq_idx is not None, + IS_TRITON_22=TRITON_22, + ) + return out, out_x + + +def _chunk_scan_fwd_wip(cb, x, dt, dA_cumsum, C, B, states, D=None, z=None, seq_idx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = C.shape + assert nheads % ngroups == 0 + assert C.shape == (batch, seqlen, ngroups, dstate) + assert B.shape == C.shape + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + if z is not None: + assert z.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert states.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + # Allocates output. + out = torch.empty(batch, seqlen, nheads, headdim, device=x.device, dtype=x.dtype) + if z is not None: + out_x = torch.empty(batch, seqlen, nheads, headdim, device=x.device, dtype=x.dtype) + assert out_x.stride() == out.stride() + else: + out_x = None + grid = lambda META: (triton.cdiv(headdim, META['BLOCK_SIZE_N']), batch * nchunks, nheads) + z_strides = ((z.stride(0), z.stride(1), z.stride(2), z.stride(3)) + if z is not None else (0, 0, 0, 0)) + _chunk_scan_fwd_kernel_wip[grid]( + cb, x, z, out, out_x, dt, dA_cumsum, seq_idx, C, B, states, D, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(3), cb.stride(4), + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + z_strides[0], z_strides[1], z_strides[2], z_strides[3], + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + C.stride(0), C.stride(1), C.stride(2), C.stride(3), + B.stride(0), B.stride(1), B.stride(2), B.stride(3), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), states.stride(4), + D.stride(0) if D is not None else 0, + D is not None, + D.dim() == 2 if D is not None else True, + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + BLOCK_SIZE_M=128, + HAS_Z=z is not None, + HAS_SEQ_IDX=seq_idx is not None, + ) + return out, out_x + + +def _chunk_scan_bwd_dz(x, z, out, dout, chunk_size, has_ddAcs=True, D=None, dz=None, recompute_output=False): + batch, seqlen, nheads, headdim = x.shape + assert z.shape == x.shape + assert out.shape == x.shape + assert dout.shape == out.shape + nchunks = math.ceil(seqlen / chunk_size) + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert D.stride(-1) == 1 + if has_ddAcs: + ddA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=x.device, dtype=torch.float32) + if D is not None: + BLOCK_SIZE_min = 32 + dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_min), batch, nchunks, nheads, + headdim if D.dim() == 2 else 1, device=D.device, dtype=torch.float32) + else: + dD = None + if dz is not None: + assert dz.shape == z.shape + else: + dz = torch.empty_like(z) + if recompute_output: + outz = torch.empty_like(x) + dout_x = torch.empty_like(dout) + dD_strides = ((dD.stride(0), dD.stride(1), dD.stride(2), dD.stride(3), dD.stride(4)) + if D is not None else (0, 0, 0, 0, 0)) + grid_dz = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']), batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_dz_kernel[grid_dz]( + dout, out, z, x, D, outz if recompute_output else None, + dz, dout_x, dD, ddA_cumsum if has_ddAcs else None, + chunk_size, headdim, + batch, seqlen, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + z.stride(0), z.stride(1), z.stride(2), z.stride(3), + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + D.stride(0) if D is not None else 0, + *((outz.stride(0), outz.stride(1), outz.stride(2), outz.stride(3)) if recompute_output else (0, 0, 0, 0)), + dz.stride(0), dz.stride(1), dz.stride(2), dz.stride(3), + dout_x.stride(0), dout_x.stride(1), dout_x.stride(2), dout_x.stride(3), + dD_strides[1], dD_strides[2], dD_strides[3], dD_strides[0], dD_strides[4], + *((ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3)) + if has_ddAcs else (0, 0, 0, 0)), + D is not None, + D.dim() == 2 if D is not None else True, + has_ddAcs, + BLOCK_SIZE_N=max(triton.next_power_of_2(headdim), 16), + RECOMPUTE_OUTPUT=recompute_output, + ) + if D is not None: + BLOCK_SIZE_actual = _chunk_scan_bwd_dz_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype) + if D.dim() == 1: + dD = rearrange(dD, "h 1 -> h") + return_vals = (dz, dout_x, dD, ddA_cumsum) if has_ddAcs else (dz, dout_x, dD) + return return_vals if not recompute_output else (*return_vals, outz) + + +def _chunk_scan_bwd_dstates(C, dA_cumsum, dout, seq_idx=None, dtype=None): + batch, seqlen, nheads, headdim = dout.shape + _, _, nchunks, chunk_size = dA_cumsum.shape + _, _, ngroups, dstate = C.shape + assert nheads % ngroups == 0 + assert C.shape == (batch, seqlen, ngroups, dstate) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + dtype = C.dtype if dtype is None else dtype + dprev_states = torch.empty(batch, nchunks, nheads, headdim, dstate, device=C.device, dtype=dtype) + grid_dstates = lambda META: (triton.cdiv(headdim, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(C.device.index): + _chunk_scan_bwd_dstates_kernel[grid_dstates]( + dout, C, dprev_states, dA_cumsum, seq_idx, + headdim, dstate, chunk_size, + batch, seqlen, nchunks, nheads // ngroups, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + C.stride(0), C.stride(1), C.stride(2), C.stride(3), + dprev_states.stride(0), dprev_states.stride(1), dprev_states.stride(2), dprev_states.stride(3), dprev_states.stride(4), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + HAS_SEQ_IDX=seq_idx is not None, + ) + return dprev_states + + +def _chunk_scan_bwd_dC(prev_states, dA_cumsum, dout, seq_idx=None, C=None, ngroups=1): + batch, nchunks, nheads, headdim, dstate = prev_states.shape + _, seqlen, _, _ = dout.shape + _, _, _, chunk_size = dA_cumsum.shape + assert prev_states.shape == (batch, nchunks, nheads, headdim, dstate) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert dout.shape == (batch, seqlen, nheads, headdim) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if C is not None: + assert C.shape == (batch, seqlen, ngroups, dstate) + C_strides = (C.stride(0), C.stride(1), C.stride(2), C.stride(3)) + ddA_cumsum_prev = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32) + ddA_cumsum_prev_strides = (ddA_cumsum_prev.stride(0), ddA_cumsum_prev.stride(2), ddA_cumsum_prev.stride(1), ddA_cumsum_prev.stride(3)) + else: + C_strides = (0, 0, 0, 0) + ddA_cumsum_prev = None + ddA_cumsum_prev_strides = (0, 0, 0, 0) + nheads_ngroups_ratio = nheads // ngroups + sm_count = torch.cuda.get_device_properties(dout.device).multi_processor_count + nheads_per_program = max(min(math.ceil(batch * nchunks * nheads / sm_count), nheads_ngroups_ratio), 1) + nsplits = triton.cdiv(nheads_ngroups_ratio, nheads_per_program) + dC = torch.empty(batch, seqlen, nsplits, ngroups, dstate, device=dout.device, dtype=torch.float32) + grid_dc = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nsplits * ngroups) + with torch.cuda.device(dout.device.index): + _chunk_scan_bwd_dc_kernel[grid_dc]( + dout, prev_states, C, dA_cumsum, seq_idx, dC, ddA_cumsum_prev, + chunk_size, dstate, headdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + prev_states.stride(0), prev_states.stride(1), prev_states.stride(2), prev_states.stride(3), prev_states.stride(4), + *C_strides, + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + dC.stride(0), dC.stride(1), dC.stride(2), dC.stride(3), dC.stride(4), + *ddA_cumsum_prev_strides, + HAS_DDA_CS=ddA_cumsum_prev is not None, + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + dC = dC.sum(2) + return dC if C is None else (dC, ddA_cumsum_prev) + + +def _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, seq_idx=None, CB=None, ngroups=1): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dout.shape == x.shape + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if CB is not None: + assert CB.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + CB_strides = (CB.stride(0), CB.stride(1), CB.stride(2), CB.stride(3), CB.stride(4)) + BLOCK_SIZE_M_min = 16 + ddA_cumsum = torch.empty(batch, nheads, nchunks, triton.cdiv(chunk_size, BLOCK_SIZE_M_min), + chunk_size, device=x.device, dtype=torch.float32) + ddA_cumsum_strides = (ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), ddA_cumsum.stride(4)) + else: + CB_strides = (0, 0, 0, 0, 0) + ddA_cumsum = None + ddA_cumsum_strides = (0, 0, 0, 0, 0) + nheads_ngroups_ratio = nheads // ngroups + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + nheads_per_program = max(min(math.ceil(batch * nchunks * nheads / sm_count), nheads_ngroups_ratio), 1) + nsplits = triton.cdiv(nheads_ngroups_ratio, nheads_per_program) + dcb = torch.empty(batch, nchunks, nsplits, ngroups, chunk_size, chunk_size, device=x.device, dtype=torch.float32) + grid_dcb = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(chunk_size, META['BLOCK_SIZE_N']), + batch * nchunks, nsplits * ngroups) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_dcb_kernel[grid_dcb]( + x, dout, CB, dt, dA_cumsum, seq_idx, dcb, ddA_cumsum, + chunk_size, headdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + *CB_strides, + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + dcb.stride(0), dcb.stride(1), dcb.stride(2), dcb.stride(3), dcb.stride(4), dcb.stride(5), + *ddA_cumsum_strides, + HAS_DDA_CS=ddA_cumsum is not None, + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + dcb = dcb.sum(2) + if ddA_cumsum is not None: + BLOCK_SIZE_M_actual = _chunk_scan_bwd_dcb_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_M_actual - 1) // BLOCK_SIZE_M_actual + ddA_cumsum = ddA_cumsum[:, :, :, :n_valid_blocks].sum(dim=3) + return dcb if CB is None else (dcb, ddA_cumsum) + + +def _chunk_scan_bwd_dx(cb, x, dt, dA_cumsum, dout, D=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + ngroups = cb.shape[2] + assert nheads % ngroups == 0 + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dout.shape == x.shape + # if D is not None: + # BLOCK_SIZE_M_min = 32 + # dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_M_min), batch, nchunks, nheads, headdim, device=D.device, dtype=torch.float32) + # else: + # dD = None + dx = torch.empty_like(x) + ddt = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32) + grid_dx = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_dx_kernel[grid_dx]( + x, cb, dout, dt, dA_cumsum, D, dx, ddt, # dD, + chunk_size, headdim, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(-1), cb.stride(-2), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + D.stride(0) if D is not None else 0, + dx.stride(0), dx.stride(1), dx.stride(2), dx.stride(3), + ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3), + # dD.stride(1) if dD is not None else 0, dD.stride(2) if dD is not None else 0, dD.stride(3) if dD is not None else 0, dD.stride(4) if dD is not None else 0, dD.stride(0) if dD is not None else 0, + D is not None, + D.dim() == 2 if D is not None else True, + ) + # if D is not None: + # BLOCK_SIZE_actual = _chunk_scan_bwd_dx_kernel.best_config.kwargs["BLOCK_SIZE_M"] + # n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + # dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype) + return dx, ddt.to(dtype=dt.dtype) + + +def _chunk_scan_bwd_ddAcs_unstable(x, dt, out, dout, ddt, D=None, subtract_ddtdt=True): + """Not numerically stable and should not be used. Leaving here for reference. + """ + + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert ddt.shape == dt.shape + assert out.shape == x.shape + assert dout.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + ddA_cumsum = torch.empty_like(dt) + grid_ddtcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']), batch * nchunks, nheads) + if D is not None: # Triton gives wrong results if we write to the same location + BLOCK_SIZE_min = 32 + dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_min), batch, nchunks, nheads, + headdim if D.dim() == 2 else 1, device=D.device, dtype=torch.float32) + else: + dD = None + dD_strides = ((dD.stride(0), dD.stride(1), dD.stride(2), dD.stride(3), dD.stride(4)) + if D is not None else (0, 0, 0, 0, 0)) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_ddAcs_unstable_kernel[grid_ddtcs]( + dout, out, dt, ddt, x, D, ddA_cumsum, dD, + chunk_size, headdim, + batch, seqlen, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3), + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + D.stride(0) if D is not None else 0, + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), + dD_strides[1], dD_strides[2], dD_strides[3], dD_strides[0], dD_strides[4], + D is not None, + D.dim() == 2 if D is not None else True, + subtract_ddtdt, + BLOCK_SIZE_N=max(triton.next_power_of_2(headdim), 16), + ) + if D is not None: + BLOCK_SIZE_actual = _chunk_scan_bwd_ddAcs_unstable_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype) + if D.dim() == 1: + dD = rearrange(dD, "h 1 -> h") + return ddA_cumsum, dD + + +def _chunk_scan_bwd_ddAcs_stable_old(x, dt, dA_cumsum, dout, cb): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dout.shape == x.shape + assert dA_cumsum.shape == dt.shape + ngroups = cb.shape[2] + assert nheads % ngroups == 0 + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + BLOCK_SIZE_M_min = 16 + ddA_cumsum = torch.empty(batch, nheads, nchunks, triton.cdiv(chunk_size, BLOCK_SIZE_M_min), + chunk_size, device=x.device, dtype=torch.float32) + grid_ddtcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']), batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_ddAcs_stable_kernel_old[grid_ddtcs]( + x, dout, dt, dA_cumsum, cb, ddA_cumsum, + chunk_size, headdim, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(3), cb.stride(4), + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), ddA_cumsum.stride(4), + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + BLOCK_SIZE_N=max(triton.next_power_of_2(chunk_size), 16), + ) + BLOCK_SIZE_M_actual = _chunk_scan_bwd_ddAcs_stable_kernel_old.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_M_actual - 1) // BLOCK_SIZE_M_actual + ddA_cumsum = ddA_cumsum[:, :, :, :n_valid_blocks].sum(dim=3) + return ddA_cumsum + + +def _chunk_scan_bwd_ddAcs_stable(x, dt, dA_cumsum, dout, cb): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dout.shape == x.shape + assert dA_cumsum.shape == dt.shape + ngroups = cb.shape[2] + assert nheads % ngroups == 0 + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + BLOCK_SIZE_M_min = 32 + ddA_cumsum = torch.empty(batch, nheads, nchunks, triton.cdiv(chunk_size, BLOCK_SIZE_M_min), + chunk_size, device=x.device, dtype=torch.float32) + grid_ddtcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']), batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_ddAcs_stable_kernel[grid_ddtcs]( + x, dout, dt, dA_cumsum, cb, ddA_cumsum, + chunk_size, headdim, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(3), cb.stride(4), + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), ddA_cumsum.stride(4), + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + BLOCK_SIZE_M_actual = _chunk_scan_bwd_ddAcs_stable_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_M_actual - 1) // BLOCK_SIZE_M_actual + ddA_cumsum = ddA_cumsum[:, :, :, :n_valid_blocks].sum(dim=3) + return ddA_cumsum + + +def _chunk_scan_bwd_ddAcs_prev(prev_states, C, dout, dA_cumsum, seq_idx=None): + batch, nchunks, nheads, headdim, dstate = prev_states.shape + _, seqlen, _, _ = dout.shape + _, _, _, chunk_size = dA_cumsum.shape + assert prev_states.shape == (batch, nchunks, nheads, headdim, dstate) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert dout.shape == (batch, seqlen, nheads, headdim) + ngroups = C.shape[2] + assert nheads % ngroups == 0 + assert C.shape == (batch, seqlen, ngroups, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + ddA_cumsum_prev = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32) + grid_ddAcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(dout.device.index): + _chunk_scan_bwd_ddAcs_prev_kernel[grid_ddAcs]( + dout, prev_states, C, dA_cumsum, seq_idx, ddA_cumsum_prev, + chunk_size, dstate, headdim, + batch, seqlen, nchunks, nheads // ngroups, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + prev_states.stride(0), prev_states.stride(1), prev_states.stride(2), prev_states.stride(3), prev_states.stride(4), + C.stride(0), C.stride(1), C.stride(2), C.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + ddA_cumsum_prev.stride(0), ddA_cumsum_prev.stride(2), ddA_cumsum_prev.stride(1), ddA_cumsum_prev.stride(3), + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + return ddA_cumsum_prev + + +class ChunkScanFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, B, C, x, dt, dA_cumsum, prev_states, D=None, z=None): + # Check constraints. + batch, seqlen, nheads, headdim = x.shape + _, _, ngroups, dstate = B.shape + assert B.shape == (batch, seqlen, ngroups, dstate) + _, _, nchunks, chunk_size = dt.shape + assert seqlen == nchunks * chunk_size + assert C.shape == B.shape + if z is not None: + assert z.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert prev_states.shape == (batch, nchunks, nheads, headdim, dstate) + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if x.stride(-1) != 1 and x.stride(1) != 1: # Either M or K dimension should be contiguous + x = x.contiguous() + if z is not None and z.stride(-1) != 1 and z.stride(1) != 1: # Either M or K dimension should be contiguous + z = z.contiguous() + if D is not None and D.stride(-1) != 1: + D = D.contiguous() + CB = _bmm_chunk_fwd(C, B, chunk_size) + out, out_x = _chunk_scan_fwd(CB, x, dt, dA_cumsum, C, prev_states, D=D, z=z) + ctx.save_for_backward(out if z is None else out_x, B, C, CB, x, dt, dA_cumsum, prev_states, D, z) + return out + + @staticmethod + def backward(ctx, dout): + if dout.stride(-1) != 1: + dout = dout.contiguous() + out, B, C, CB, x, dt, dA_cumsum, prev_states, D, z = ctx.saved_tensors + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert dout.shape == (batch, seqlen, nheads, headdim) + if z is not None: + dz, dout, dD, ddA_cumsum = _chunk_scan_bwd_dz(x, z, out, dout, chunk_size=chunk_size, D=D) + else: + dz = None + dprev_states = _chunk_scan_bwd_dstates(C, dA_cumsum, dout, dtype=prev_states.dtype) + dC = _chunk_scan_bwd_dC(prev_states, dA_cumsum, dout, ngroups=ngroups) + dC = dC.to(C.dtype) + dCB = _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, ngroups=ngroups) + dCB = dCB.to(CB.dtype) + dB = _bmm_chunk_bwd(C, dCB) + dC = _bmm_chunk_bwd(B, rearrange(dCB, "... l s -> ... s l"), residual=dC) + dx, ddt = _chunk_scan_bwd_dx(CB, x, dt, dA_cumsum, dout, D=D) + # Formula for ddA_cumsum, assuming out is the output of the forward pass before adding x * D. + # ddA_cumsum = torch.einsum("bclhp,bclhp->bhcl", out.float(), dout.float()) - ddt * dt + if z is not None: + ddA_cumsum -= ddt * dt + else: # If z is not None, we already calculated ddA_cumsum and dD when computing dz + ddA_cumsum, dD = _chunk_scan_bwd_ddAcs_unstable(x, dt, out, dout, ddt, D=D) + ddA_cumsum = ddA_cumsum.to(dA_cumsum.dtype) + return dB, dC, dx, ddt, ddA_cumsum, dprev_states, dD, dz + + +def chunk_scan(B, C, x, dt, dA_cumsum, prev_states, D=None, z=None): + """ + prev_states contains the initial_states at index 0, and the state for the next-to-last chunk at index -1. + Argument: + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) + dA_cumsum: (batch, nheads, nchunks, chunk_size) + prev_states: (batch, nchunks, nheads, headdim, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + Return: + out: (batch, seqlen, nheads, headdim) + """ + return ChunkScanFn.apply(B, C, x, dt, dA_cumsum, prev_states, D, z) + + +def chunk_scan_ref(B, C, x, dt, dA_cumsum, prev_states, D=None, z=None): + """ + Argument: + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) + dA_cumsum: (batch, nheads, nchunks, chunk_size) + prev_states: (batch, nchunks, nheads, headdim, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + Return: + out: (batch, seqlen, nheads, headdim) + """ + batch, seqlen, nheads, headdim = x.shape + _, _, ngroups, dstate = B.shape + assert B.shape == (batch, seqlen, ngroups, dstate) + _, _, nchunks, chunk_size = dt.shape + assert seqlen == nchunks * chunk_size + assert C.shape == B.shape + B = repeat(B, "b l g d -> b l (g h) d", h=nheads // ngroups) + C = repeat(C, "b l g d -> b l (g h) d", h=nheads // ngroups) + CB = torch.einsum("bclhn,bcshn->bchls", rearrange(C, "b (c l) h n -> b c l h n", c=nchunks), + rearrange(B, "b (c s) h n -> b c s h n", c=nchunks)) + # (batch, nheads, nchunks, chunksize, chunksize) + dt_segment_sum = dA_cumsum[:, :, :, :, None] - dA_cumsum[:, :, :, None, :] + decay = torch.exp(dt_segment_sum) + scores_decay = CB * rearrange(decay, "b h c l s -> b c h l s") + causal_mask = torch.tril(torch.ones(chunk_size, chunk_size, device=x.device, dtype=bool), diagonal=0) + scores_decay = scores_decay.masked_fill(~causal_mask, 0) + out = torch.einsum('bchls,bhcs,bcshp->bclhp', scores_decay.to(x.dtype), dt.to(x.dtype), + rearrange(x, "b (c s) h p -> b c s h p", c=nchunks)) + state_decay_out = torch.exp(rearrange(dA_cumsum, "b h c l -> b c l h 1")) + out_prev = torch.einsum('bclhn,bchpn->bclhp', rearrange(C, "b (c l) h n -> b c l h n", c=nchunks), + prev_states.to(C.dtype)) * state_decay_out + out = out + out_prev + out = rearrange(out, "b c l h p -> b (c l) h p") + if D is not None: + if D.dim() == 1: + D = rearrange(D, "h -> h 1") + out = out + x * D + return out if z is None else out * F.silu(z) diff --git a/mamba/build/lib/mamba_ssm/ops/triton/ssd_chunk_state.py b/mamba/build/lib/mamba_ssm/ops/triton/ssd_chunk_state.py new file mode 100644 index 0000000000000000000000000000000000000000..c4971c5f5a6fdc0fca92e115ae2c0b9319b8107a --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/ssd_chunk_state.py @@ -0,0 +1,988 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.softplus import softplus + + +def init_to_zero(names): + return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None] + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_H': 1}), + triton.Config({'BLOCK_SIZE_H': 2}), + triton.Config({'BLOCK_SIZE_H': 4}), + triton.Config({'BLOCK_SIZE_H': 8}), + triton.Config({'BLOCK_SIZE_H': 16}), + triton.Config({'BLOCK_SIZE_H': 32}), + triton.Config({'BLOCK_SIZE_H': 64}), + ], + key=['chunk_size', 'nheads'], +) +@triton.jit +def _chunk_cumsum_fwd_kernel( + # Pointers to matrices + dt_ptr, A_ptr, dt_bias_ptr, dt_out_ptr, dA_cumsum_ptr, + # Matrix dimension + batch, seqlen, nheads, chunk_size, + dt_min, dt_max, + # Strides + stride_dt_batch, stride_dt_seqlen, stride_dt_head, + stride_A_head, + stride_dt_bias_head, + stride_dt_out_batch, stride_dt_out_chunk, stride_dt_out_head, stride_dt_out_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + BLOCK_SIZE_H: tl.constexpr, BLOCK_SIZE_CHUNK: tl.constexpr, +): + pid_b = tl.program_id(axis=0) + pid_c = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + dt_ptr += pid_b * stride_dt_batch + pid_c * chunk_size * stride_dt_seqlen + dt_out_ptr += pid_b * stride_dt_out_batch + pid_c * stride_dt_out_chunk + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + + offs_h = pid_h * BLOCK_SIZE_H + tl.arange(0, BLOCK_SIZE_H) + offs_c = tl.arange(0, BLOCK_SIZE_CHUNK) + dt_ptrs = dt_ptr + (offs_h[:, None] * stride_dt_head + offs_c[None, :] * stride_dt_seqlen) + A_ptrs = A_ptr + offs_h * stride_A_head + dt_out_ptrs = dt_out_ptr + (offs_h[:, None] * stride_dt_out_head + offs_c[None, :] * stride_dt_out_csize) + dA_cs_ptrs = dA_cumsum_ptr + (offs_h[:, None] * stride_dA_cs_head + offs_c[None, :] * stride_dA_cs_csize) + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + dt = tl.load(dt_ptrs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt_bias = tl.load(dt_bias_ptr + offs_h * stride_dt_bias_head, mask=offs_h < nheads, other=0.0).to(tl.float32) + dt += dt_bias[:, None] + if DT_SOFTPLUS: + dt = softplus(dt) + # As of Triton 2.2.0, tl.clamp is not available yet + # dt = tl.clamp(dt, dt_min, dt_max) + dt = tl.minimum(tl.maximum(dt, dt_min), dt_max) + dt = tl.where((offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), dt, 0.0) + tl.store(dt_out_ptrs, dt, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size)) + A = tl.load(A_ptrs, mask=offs_h < nheads, other=0.0).to(tl.float32) + dA = dt * A[:, None] + dA_cs = tl.cumsum(dA, axis=1) + tl.store(dA_cs_ptrs, dA_cs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_H': 1}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 2}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 4}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 8}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 16}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 32}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 64}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + ], + key=['chunk_size', 'nheads'], +) +@triton.jit +def _chunk_cumsum_bwd_kernel( + # Pointers to matrices + ddA_ptr, ddt_out_ptr, dt_ptr, A_ptr, dt_bias_ptr, + ddt_ptr, dA_ptr, ddt_bias_ptr, + # Matrix dimensions + batch, seqlen, nheads, chunk_size, + dt_min, dt_max, + # Strides + stride_ddA_batch, stride_ddA_chunk, stride_ddA_head, stride_ddA_csize, + stride_ddt_out_batch, stride_ddt_out_chunk, stride_ddt_out_head, stride_ddt_out_csize, + stride_dt_batch, stride_dt_seqlen, stride_dt_head, + stride_A_head, + stride_dt_bias_head, + stride_ddt_batch, stride_ddt_seqlen, stride_ddt_head, + stride_dA_head, + stride_ddt_bias_head, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + BLOCK_SIZE_H: tl.constexpr, BLOCK_SIZE_CHUNK: tl.constexpr, +): + pid_b = tl.program_id(axis=0) + pid_c = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + ddt_out_ptr += pid_b * stride_ddt_out_batch + pid_c * stride_ddt_out_chunk + ddA_ptr += pid_b * stride_ddA_batch + pid_c * stride_ddA_chunk + dt_ptr += pid_b * stride_dt_batch + pid_c * chunk_size * stride_dt_seqlen + ddt_ptr += pid_b * stride_ddt_batch + pid_c * chunk_size * stride_ddt_seqlen + + offs_h = pid_h * BLOCK_SIZE_H + tl.arange(0, BLOCK_SIZE_H) + offs_c = tl.arange(0, BLOCK_SIZE_CHUNK) + ddt_out_ptrs = ddt_out_ptr + (offs_h[:, None] * stride_ddt_out_head + offs_c[None, :] * stride_ddt_out_csize) + ddA_ptrs = ddA_ptr + (offs_h[:, None] * stride_ddA_head + offs_c[None, :] * stride_ddA_csize) + dt_ptrs = dt_ptr + (offs_h[:, None] * stride_dt_head + offs_c[None, :] * stride_dt_seqlen) + ddt_ptrs = ddt_ptr + (offs_h[:, None] * stride_ddt_head + offs_c[None, :] * stride_ddt_seqlen) + A_ptrs = A_ptr + offs_h * stride_A_head + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + ddA = tl.load(ddA_ptrs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), other=0.0).to(tl.float32) + ddt_out = tl.load(ddt_out_ptrs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), other=0.0).to(tl.float32) + A = tl.load(A_ptrs, mask=offs_h < nheads, other=0.0).to(tl.float32) + ddt = ddA * A[:, None] + ddt_out + dt = tl.load(dt_ptrs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt_bias = tl.load(dt_bias_ptr + offs_h * stride_dt_bias_head, mask=offs_h < nheads, other=0.0).to(tl.float32) + dt += dt_bias[:, None] + if DT_SOFTPLUS: + dt_presoftplus = dt + dt = softplus(dt) + clamp_mask = (dt < dt_min) | (dt > dt_max) + # As of Triton 2.2.0, tl.clamp is not available yet + # dt = tl.clamp(dt, dt_min, dt_max) + dt = tl.minimum(tl.maximum(dt, dt_min), dt_max) + dt = tl.where((offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), dt, 0.0) + ddt = tl.where((offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), ddt, 0.0) + ddt = tl.where(clamp_mask, 0.0, ddt) + if DT_SOFTPLUS: + ddt = tl.where(dt_presoftplus <= 20.0, ddt * tl.sigmoid(dt_presoftplus), ddt) + tl.store(ddt_ptrs, ddt, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit)) + dA = tl.sum(ddA * dt, axis=1) + tl.atomic_add(dA_ptr + offs_h * stride_dA_head, dA, mask=offs_h < nheads) + if HAS_DT_BIAS: + ddt_bias = tl.sum(ddt, axis=1) + tl.atomic_add(ddt_bias_ptr + offs_h * stride_ddt_bias_head, ddt_bias, mask=offs_h < nheads) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['hdim', 'dstate', 'chunk_size'], +) +@triton.jit +def _chunk_state_fwd_kernel( + # Pointers to matrices + x_ptr, b_ptr, states_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, + # Matrix dimensions + hdim, dstate, chunk_size, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_states_batch, stride_states_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + # Meta-parameters + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_hdim + offs_k[None, :] * stride_x_seqlen) + b_ptrs = b_ptr + (offs_n[None, :] * stride_b_dstate + offs_k[:, None] * stride_b_seqlen) + dt_ptrs = dt_ptr + offs_k * stride_dt_csize + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + if HAS_SEQ_IDX: + seq_idx_ptrs = seq_idx_ptr + offs_k * stride_seq_idx_seqlen + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + if HAS_SEQ_IDX: + seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, chunk_size_limit, BLOCK_SIZE_K): + x = tl.load(x_ptrs, mask=(offs_m[:, None] < hdim) & (offs_k[None, :] < chunk_size_limit - k), other=0.0) + b = tl.load(b_ptrs, mask=(offs_k[:, None] < chunk_size_limit - k) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < chunk_size_limit - k, other=0.0).to(tl.float32) + if HAS_SEQ_IDX: + seq_idx_k = tl.load(seq_idx_ptrs, mask=offs_k < chunk_size_limit - k, other=-1) + dt_k = tl.load(dt_ptrs, mask=offs_k < chunk_size_limit - k, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp((dA_cs_last - dA_cs_k)) * dt_k + else: + scale = tl.where(seq_idx_k == seq_idx_last, tl.exp((dA_cs_last - dA_cs_k)) * dt_k, 0.0) + b *= scale[:, None] + b = b.to(x_ptr.dtype.element_ty) + acc += tl.dot(x, b) + x_ptrs += BLOCK_SIZE_K * stride_x_seqlen + b_ptrs += BLOCK_SIZE_K * stride_b_seqlen + dt_ptrs += BLOCK_SIZE_K * stride_dt_csize + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + if HAS_SEQ_IDX: + seq_idx_ptrs += BLOCK_SIZE_K * stride_seq_idx_seqlen + states = acc.to(states_ptr.dtype.element_ty) + + states_ptr += pid_b * stride_states_batch + pid_c * stride_states_chunk + pid_h * stride_states_head + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + states_ptrs = states_ptr + (offs_m[:, None] * stride_states_hdim + offs_n[None, :] * stride_states_dstate) + c_mask = (offs_m[:, None] < hdim) & (offs_n[None, :] < dstate) + tl.store(states_ptrs, states, mask=c_mask) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'hdim', 'dstate'], +) +@triton.jit +def _chunk_state_bwd_dx_kernel( + # Pointers to matrices + x_ptr, b_ptr, dstates_ptr, dt_ptr, dA_cumsum_ptr, + dx_ptr, ddt_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dstates_batch, stride_dstates_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_dx_batch, stride_dx_seqlen, stride_dx_head, stride_dx_hdim, + stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + pid_h * stride_states_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + # Faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128 + offs_k = tl.arange(0, BLOCK_SIZE_DSTATE if BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K) + b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_k[None, :] * stride_b_dstate) + dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_states_hdim + offs_k[:, None] * stride_states_dstate) + if BLOCK_SIZE_DSTATE <= 128: + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < dstate), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc = tl.dot(b, dstates) + else: + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, dstate, BLOCK_SIZE_K): + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < dstate - k), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc += tl.dot(b, dstates) + b_ptrs += BLOCK_SIZE_K * stride_b_dstate + dstates_ptrs += BLOCK_SIZE_K * stride_states_dstate + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dA_cumsum_ptrs = dA_cumsum_ptr + offs_m * stride_dA_cs_csize + dA_cs_m = tl.load(dA_cumsum_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + acc *= tl.exp(dA_cs_last - dA_cs_m)[:, None] + + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + ddt = tl.sum(acc * x, axis=1) + ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize + tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size) + ddA_cs = -(ddt * dt_m) + ddA_cs_last = -tl.sum(ddA_cs) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + tl.atomic_add(ddA_cumsum_ptrs, ddA_cs, mask=offs_m < chunk_size) + tl.atomic_add(ddA_cumsum_ptr + (chunk_size - 1) * stride_ddA_cs_csize, ddA_cs_last) + + dx = (acc * dt_m[:, None]).to(dx_ptr.dtype.element_ty) + dx_ptr += pid_b * stride_dx_batch + pid_c * chunk_size * stride_dx_seqlen + pid_h * stride_dx_head + dx_ptrs = dx_ptr + (offs_m[:, None] * stride_dx_seqlen + offs_n[None, :] * stride_dx_hdim) + tl.store(dx_ptrs, dx, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'dstate', 'hdim'], +) +@triton.jit +def _chunk_state_bwd_db_kernel( + # Pointers to matrices + x_ptr, dstates_ptr, b_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, + db_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, dstate, hdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_dstates_batch, stride_dstates_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_db_batch, stride_db_seqlen, stride_db_split, stride_db_group, stride_db_dstate, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_DDA_CS: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_sg = tl.program_id(axis=2) + pid_s = pid_sg // ngroups + pid_g = pid_sg - pid_s * ngroups + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_x_head + db_ptr += pid_b * stride_db_batch + pid_c * chunk_size * stride_db_seqlen + pid_g * stride_db_group + pid_s * stride_db_split + dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_states_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dA_cs_head + if HAS_DDA_CS: + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + pid_g * stride_b_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_ddA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_k[None, :] * stride_x_hdim) + dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_states_dstate + offs_k[:, None] * stride_states_hdim) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dA_cumsum_ptrs = dA_cumsum_ptr + offs_m * stride_dA_cs_csize + if HAS_DDA_CS: + b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_n[None, :] * stride_b_dstate) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + if HAS_DDA_CS: + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + if HAS_SEQ_IDX: + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + nheads_iter = min(nheads_per_program, nheads // ngroups - pid_s * nheads_per_program) + for h in range(nheads_iter): + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < dstate), other=0.0) + dstates = dstates.to(x_ptrs.dtype.element_ty) + db = tl.dot(x, dstates) + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + dA_cs_m = tl.load(dA_cumsum_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_last - dA_cs_m) + else: + scale = tl.where(seq_idx_m == seq_idx_last, tl.exp(dA_cs_last - dA_cs_m), 0.0) + db *= (scale * dt_m)[:, None] + if HAS_DDA_CS: + # This is the gradient wrt (dA_cs_last - dA_cs_m), i.e. the exclusive reverse cumsum + ddA_cs = tl.sum(db * b, axis=1) + tl.atomic_add(ddA_cumsum_ptrs + stride_ddA_cs_csize, ddA_cs, mask=offs_m < chunk_size - 1) + acc += db + x_ptrs += stride_x_head + dstates_ptrs += stride_states_head + dt_ptrs += stride_dt_head + dA_cumsum_ptr += stride_dA_cs_head + dA_cumsum_ptrs += stride_dA_cs_head + if HAS_DDA_CS: + ddA_cumsum_ptrs += stride_ddA_cs_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + # if HAS_SEQ_IDX: + # seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + # seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + # acc = tl.where(seq_idx_m[:, None] == seq_idx_last, acc, 0.0) + db_ptrs = db_ptr + (offs_m[:, None] * stride_db_seqlen + offs_n[None, :] * stride_db_dstate) + tl.store(db_ptrs, acc, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate)) + + +@triton.autotune( + configs=[ + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'hdim', 'dstate'], +) +@triton.jit +def _chunk_state_bwd_ddAcs_stable_kernel( + # Pointers to matrices + x_ptr, b_ptr, dstates_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, + ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dstates_batch, stride_dstates_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + pid_h * stride_states_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + # Faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128 + offs_k = tl.arange(0, BLOCK_SIZE_DSTATE if BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K) + b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_k[None, :] * stride_b_dstate) + dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_states_hdim + offs_k[:, None] * stride_states_dstate) + if BLOCK_SIZE_DSTATE <= 128: + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < dstate), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc = tl.dot(b, dstates) + else: + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, dstate, BLOCK_SIZE_K): + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < dstate - k), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc += tl.dot(b, dstates) + b_ptrs += BLOCK_SIZE_K * stride_b_dstate + dstates_ptrs += BLOCK_SIZE_K * stride_states_dstate + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_last - dA_cs_m) + else: + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + scale = tl.where(seq_idx_m == seq_idx_last, tl.exp(dA_cs_last - dA_cs_m), 0.0) + acc *= scale[:, None] + + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + ddt = tl.sum(acc * x, axis=1) + # ddA_cs = -(ddt * dt_m) + # Triton 2.2.0 errors if we have the cumsum here, so we just write it out + # then call torch.cumsum outside this kernel. + # ddA_cs = tl.cumsum(ddt * dt_m) + ddA_cs = ddt * dt_m + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + # tl.atomic_add(ddA_cumsum_ptrs, ddA_cs, mask=offs_m < chunk_size) + tl.atomic_add(ddA_cumsum_ptrs + stride_ddA_cs_csize, ddA_cs, mask=offs_m < chunk_size - 1) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['hdim', 'dstate', 'chunk_size'], +) +@triton.jit +def _chunk_state_varlen_kernel( + # Pointers to matrices + x_ptr, b_ptr, dt_ptr, dA_cumsum_ptr, chunk_states_ptr, cu_seqlens_ptr, states_ptr, + # Matrix dimensions + hdim, dstate, chunk_size, + seqlen, nheads_ngroups_ratio, + # Strides + stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_chunk_states_chunk, stride_chunk_states_head, stride_chunk_states_hdim, stride_chunk_states_dstate, + stride_states_batch, stride_states_head, stride_states_hdim, stride_states_dstate, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + end_idx = tl.load(cu_seqlens_ptr + pid_b + 1) + pid_c = (end_idx - 1) // chunk_size + b_ptr += pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + x_ptr += pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dt_ptr += pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + chunk_states_ptr += pid_c * stride_chunk_states_chunk + pid_h * stride_chunk_states_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_hdim + offs_k[None, :] * stride_x_seqlen) + b_ptrs = b_ptr + (offs_n[None, :] * stride_b_dstate + offs_k[:, None] * stride_b_seqlen) + dt_ptrs = dt_ptr + offs_k * stride_dt_csize + dA_cs_last = tl.load(dA_cumsum_ptr + (end_idx - pid_c * chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + + chunk_size_limit = end_idx - pid_c * chunk_size + start_idx = tl.load(cu_seqlens_ptr + pid_b) + start_idx_cur = tl.maximum(start_idx - pid_c * chunk_size, 0) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, chunk_size_limit, BLOCK_SIZE_K): + x = tl.load(x_ptrs, mask=(offs_m[:, None] < hdim) & (offs_k[None, :] < chunk_size_limit - k) & (offs_k[None, :] >= start_idx_cur - k), other=0.0) + b = tl.load(b_ptrs, mask=(offs_k[:, None] < chunk_size_limit - k) & (offs_n[None, :] < dstate) & (offs_k[:, None] >= start_idx_cur - k), other=0.0).to(tl.float32) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < chunk_size_limit - k, other=0.0).to(tl.float32) + dt_k = tl.load(dt_ptrs, mask=offs_k < chunk_size_limit - k, other=0.0).to(tl.float32) + scale = tl.where((offs_k >= start_idx_cur - k) & (offs_k < chunk_size_limit - k), + tl.exp((dA_cs_last - dA_cs_k)) * dt_k, 0.0) + b *= scale[:, None] + b = b.to(x_ptr.dtype.element_ty) + acc += tl.dot(x, b) + x_ptrs += BLOCK_SIZE_K * stride_x_seqlen + b_ptrs += BLOCK_SIZE_K * stride_b_seqlen + dt_ptrs += BLOCK_SIZE_K * stride_dt_csize + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + + # If the sequence starts after the last chunk idx, we don't need to add the contribution from the last chunk + if start_idx < pid_c * chunk_size: + chunk_states_ptrs = chunk_states_ptr + (offs_m[:, None] * stride_chunk_states_hdim + offs_n[None, :] * stride_chunk_states_dstate) + chunk_states = tl.load(chunk_states_ptrs, mask=(offs_m[:, None] < hdim) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + # scale = tl.where(start_idx < pid_c * chunk_size, tl.exp(dA_cs_last), 0.0) + scale = tl.exp(dA_cs_last) + acc += chunk_states * scale + + states = acc.to(states_ptr.dtype.element_ty) + + states_ptr += pid_b * stride_states_batch + pid_h * stride_states_head + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + states_ptrs = states_ptr + (offs_m[:, None] * stride_states_hdim + offs_n[None, :] * stride_states_dstate) + c_mask = (offs_m[:, None] < hdim) & (offs_n[None, :] < dstate) + tl.store(states_ptrs, states, mask=c_mask) + + +def _chunk_cumsum_fwd(dt, A, chunk_size, dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf"))): + batch, seqlen, nheads = dt.shape + assert A.shape == (nheads,) + if dt_bias is not None: + assert dt_bias.shape == (nheads,) + nchunks = math.ceil(seqlen / chunk_size) + dt_out = torch.empty(batch, nheads, nchunks, chunk_size, device=dt.device, dtype=torch.float32) + dA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=dt.device, dtype=torch.float32) + grid_chunk_cs = lambda META: (batch, nchunks, triton.cdiv(nheads, META['BLOCK_SIZE_H'])) + with torch.cuda.device(dt.device.index): + _chunk_cumsum_fwd_kernel[grid_chunk_cs]( + dt, A, dt_bias, dt_out, dA_cumsum, + batch, seqlen, nheads, chunk_size, + dt_limit[0], dt_limit[1], + dt.stride(0), dt.stride(1), dt.stride(2), + A.stride(0), + dt_bias.stride(0) if dt_bias is not None else 0, + dt_out.stride(0), dt_out.stride(2), dt_out.stride(1), dt_out.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + dt_softplus, + HAS_DT_BIAS=dt_bias is not None, + BLOCK_SIZE_CHUNK=triton.next_power_of_2(chunk_size), + ) + return dA_cumsum, dt_out + + +def _chunk_cumsum_bwd(ddA, ddt_out, dt, A, dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf")), ddt=None): + batch, seqlen, nheads = dt.shape + _, _, nchunks, chunk_size = ddA.shape + assert ddA.shape == (batch, nheads, nchunks, chunk_size) + assert ddt_out.shape == (batch, nheads, nchunks, chunk_size) + assert A.shape == (nheads,) + if dt_bias is not None: + assert dt_bias.shape == (nheads,) + ddt_bias = torch.empty_like(dt_bias, dtype=torch.float32) + else: + ddt_bias = None + if ddt is not None: + assert ddt.shape == dt.shape + else: + ddt = torch.empty_like(dt) + dA = torch.empty_like(A, dtype=torch.float32) + grid_chunk_cs = lambda META: (batch, nchunks, triton.cdiv(nheads, META['BLOCK_SIZE_H'])) + with torch.cuda.device(dt.device.index): + _chunk_cumsum_bwd_kernel[grid_chunk_cs]( + ddA, ddt_out, dt, A, dt_bias, ddt, dA, ddt_bias, + batch, seqlen, nheads, chunk_size, + dt_limit[0], dt_limit[1], + ddA.stride(0), ddA.stride(2), ddA.stride(1), ddA.stride(3), + ddt_out.stride(0), ddt_out.stride(2), ddt_out.stride(1), ddt_out.stride(3), + dt.stride(0), dt.stride(1), dt.stride(2), + A.stride(0), + dt_bias.stride(0) if dt_bias is not None else 0, + ddt.stride(0), ddt.stride(1), ddt.stride(2), + dA.stride(0), + ddt_bias.stride(0) if ddt_bias is not None else 0, + dt_softplus, + HAS_DT_BIAS=dt_bias is not None, + BLOCK_SIZE_CHUNK=triton.next_power_of_2(chunk_size), + ) + return ddt, dA, ddt_bias + + +def _chunk_state_fwd(B, x, dt, dA_cumsum, seq_idx=None, states=None, states_in_fp32=True): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if states is not None: + assert states.shape == (batch, nchunks, nheads, headdim, dstate) + else: + states_dtype = torch.float32 if states_in_fp32 else B.dtype + states = torch.empty((batch, nchunks, nheads, headdim, dstate), device=x.device, dtype=states_dtype) + grid = lambda META: (triton.cdiv(headdim, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_state_fwd_kernel[grid]( + x, B, states, dt, dA_cumsum, seq_idx, + headdim, dstate, chunk_size, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + B.stride(0), B.stride(1), B.stride(2), B.stride(-1), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), states.stride(4), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + HAS_SEQ_IDX=seq_idx is not None, + ) + return states + + +def _chunk_state_bwd_dx(B, x, dt, dA_cumsum, dstates, dx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if dx is not None: + assert dx.shape == x.shape + else: + dx = torch.empty_like(x) + ddt = torch.empty(batch, nheads, nchunks, chunk_size, device=dt.device, dtype=torch.float32) + ddA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=dA_cumsum.device, dtype=torch.float32) + grid_dx = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_state_bwd_dx_kernel[grid_dx]( + x, B, dstates, dt, dA_cumsum, dx, ddt, ddA_cumsum, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + B.stride(0), B.stride(1), B.stride(2), B.stride(-1), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + dx.stride(0), dx.stride(1), dx.stride(2), dx.stride(3), + ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3), + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + ) + return dx, ddt.to(dt.dtype), ddA_cumsum.to(dA_cumsum.dtype) + + +def _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, seq_idx=None, B=None, ngroups=1): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + dstate = dstates.shape[-1] + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if B is not None: + assert B.shape == (batch, seqlen, ngroups, dstate) + B_strides = (B.stride(0), B.stride(1), B.stride(2), B.stride(3)) + # Use torch.empty since the Triton kernel will call init_to_zero + ddA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=x.device, dtype=torch.float32) + ddA_cumsum_strides = (ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3)) + else: + B_strides = (0, 0, 0, 0) + ddA_cumsum = None + ddA_cumsum_strides = (0, 0, 0, 0) + nheads_ngroups_ratio = nheads // ngroups + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + nheads_per_program = max(min(math.ceil(batch * nchunks * nheads / sm_count), nheads_ngroups_ratio), 1) + nsplits = triton.cdiv(nheads_ngroups_ratio, nheads_per_program) + dB = torch.empty(batch, seqlen, nsplits, ngroups, dstate, device=x.device, dtype=torch.float32) + grid_db = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nsplits * ngroups) + with torch.cuda.device(x.device.index): + _chunk_state_bwd_db_kernel[grid_db]( + x, dstates, B, dt, dA_cumsum, seq_idx, dB, ddA_cumsum, + chunk_size, dstate, headdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4), + *B_strides, + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + dB.stride(0), dB.stride(1), dB.stride(2), dB.stride(3), dB.stride(4), + *ddA_cumsum_strides, + HAS_DDA_CS=ddA_cumsum is not None, + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + dB = dB.sum(2) + if ddA_cumsum is not None: + # The first element of ddA_cumsum is always zero, since that dA_cumsum does not contribute + # to the state of the chunk. + # torch.cumsum(ddA_cumsum[..., 1:], dim=-1, out=ddA_cumsum[..., 1:]) + # But it's easier to just do the cumsum for all elements, the result will be the same. + torch.cumsum(ddA_cumsum, dim=-1, out=ddA_cumsum) + return dB if B is None else (dB, ddA_cumsum) + + +def _chunk_state_bwd_ddAcs_stable(B, x, dt, dA_cumsum, dstates, seq_idx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + # Use torch.empty since the Triton kernel will call init_to_zero + ddA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=x.device, dtype=torch.float32) + grid_ddtcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_state_bwd_ddAcs_stable_kernel[grid_ddtcs]( + x, B, dstates, dt, dA_cumsum, seq_idx, ddA_cumsum, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + B.stride(0), B.stride(1), B.stride(2), B.stride(-1), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_M=max(triton.next_power_of_2(chunk_size), 16), + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + ) + torch.cumsum(ddA_cumsum[..., 1:], dim=-1, out=ddA_cumsum[..., 1:]) + return ddA_cumsum + + +def chunk_state_varlen(B, x, dt, dA_cumsum, cu_seqlens, chunk_states): + total_seqlen, nheads, headdim = x.shape + _, nchunks, chunk_size = dt.shape + _, ngroups, dstate = B.shape + batch = cu_seqlens.shape[0] - 1 + cu_seqlens = cu_seqlens.contiguous() + assert nheads % ngroups == 0 + assert B.shape == (total_seqlen, ngroups, dstate) + assert dt.shape == (nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert chunk_states.shape == (nchunks, nheads, headdim, dstate) + states = torch.empty(batch, nheads, headdim, dstate, dtype=chunk_states.dtype, device=chunk_states.device) + grid = lambda META: (triton.cdiv(headdim, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch, nheads) + with torch.cuda.device(x.device.index): + _chunk_state_varlen_kernel[grid]( + x, B, dt, dA_cumsum, chunk_states, cu_seqlens, states, + headdim, dstate, chunk_size, + total_seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), + B.stride(0), B.stride(1), B.stride(2), + dt.stride(1), dt.stride(0), dt.stride(2), + dA_cumsum.stride(1), dA_cumsum.stride(0), dA_cumsum.stride(2), + chunk_states.stride(0), chunk_states.stride(1), chunk_states.stride(2), chunk_states.stride(3), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), + ) + return states + + +class ChunkStateFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, B, x, dt, dA_cumsum, states_in_fp32=True): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert seqlen <= nchunks * chunk_size + _, _, ngroups, dstate = B.shape + assert B.shape == (batch, seqlen, ngroups, dstate) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + if B.stride(-1) != 1: + B = B.contiguous() + if x.stride(-1) != 1 and x.stride(1) != 1: # Either M or K dimension should be contiguous + x = x.contiguous() + states = _chunk_state_fwd(B, x, dt, dA_cumsum, states_in_fp32=states_in_fp32) + ctx.save_for_backward(B, x, dt, dA_cumsum) + return states + + @staticmethod + def backward(ctx, dstates): + B, x, dt, dA_cumsum = ctx.saved_tensors + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if dstates.stride(-1) != 1: + dstates = dstates.contiguous() + dx, ddt, ddA_cumsum = _chunk_state_bwd_dx(B, x, dt, dA_cumsum, dstates) + dB = _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, ngroups=ngroups) + dB = dB.to(B.dtype) + return dB, dx, ddt, ddA_cumsum, None + + +def chunk_state(B, x, dt, dA_cumsum, states_in_fp32=True): + """ + Argument: + B: (batch, seqlen, ngroups, headdim) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) + dA_cumsum: (batch, nheads, nchunks, chunk_size) + Return: + states: (batch, nchunks, nheads, headdim, dstate) + """ + return ChunkStateFn.apply(B, x, dt, dA_cumsum, states_in_fp32) + + +def chunk_state_ref(B, x, dt, dA_cumsum): + """ + Argument: + B: (batch, seqlen, ngroups, headdim) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) + dA_cumsum: (batch, nheads, nchunks, chunk_size) + Return: + states: (batch, nchunks, nheads, headdim, dstate) + """ + # Check constraints. + batch, seqlen, nheads, headdim = x.shape + dstate = B.shape[-1] + _, _, nchunks, chunk_size = dt.shape + assert seqlen <= nchunks * chunk_size + assert x.shape == (batch, seqlen, nheads, headdim) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + ngroups = B.shape[2] + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + B = repeat(B, "b l g d -> b l (g h) d", h=nheads // ngroups) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + if seqlen < nchunks * chunk_size: + x = F.pad(x, (0, 0, 0, 0, 0, nchunks * chunk_size - seqlen)) + B = F.pad(B, (0, 0, 0, 0, 0, nchunks * chunk_size - seqlen)) + x = rearrange(x, "b (c l) h p -> b c l h p", l=chunk_size) + B = rearrange(B, "b (c l) ... -> b c l ...", l=chunk_size) + decay_states = torch.exp((dA_cumsum[:, :, :, -1:] - dA_cumsum)) + return torch.einsum("bclhn,bhcl,bhcl,bclhp->bchpn", B.to(x.dtype), decay_states.to(x.dtype), dt.to(x.dtype), x) diff --git a/mamba/build/lib/mamba_ssm/ops/triton/ssd_combined.py b/mamba/build/lib/mamba_ssm/ops/triton/ssd_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..77d207152cdd2d9f18f5d6be6824cc0ef4637fdd --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/ssd_combined.py @@ -0,0 +1,981 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +from typing import Optional + +import math +from packaging import version + +import torch +import torch.nn.functional as F +from torch import Tensor +from torch.cuda.amp import custom_bwd, custom_fwd + +import triton +import triton.language as tl + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn + import causal_conv1d_cuda +except ImportError: + causal_conv1d_fn, causal_conv1d_cuda = None, None + +from mamba_ssm.ops.triton.ssd_bmm import _bmm_chunk_fwd, _bmm_chunk_bwd +from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_cumsum_fwd, _chunk_cumsum_bwd +from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_state_fwd, _chunk_state_bwd_db +from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_state_bwd_ddAcs_stable +from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state, chunk_state_ref +from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state_varlen +from mamba_ssm.ops.triton.ssd_state_passing import _state_passing_fwd, _state_passing_bwd +from mamba_ssm.ops.triton.ssd_state_passing import state_passing, state_passing_ref +from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_fwd, _chunk_scan_bwd_dz, _chunk_scan_bwd_dstates +from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_dC, _chunk_scan_bwd_dcb +from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_ddAcs_stable +from mamba_ssm.ops.triton.ssd_chunk_scan import chunk_scan, chunk_scan_ref +from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_ddAcs_prev +from mamba_ssm.ops.triton.layernorm_gated import rmsnorm_fn, _layer_norm_fwd, _layer_norm_bwd +from mamba_ssm.ops.triton.k_activations import _swiglu_fwd, _swiglu_bwd + +TRITON_22 = version.parse(triton.__version__) >= version.parse('2.2.0') + + +def init_to_zero(names): + return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None] + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + ], + key=['chunk_size', 'hdim', 'dstate'], +) +@triton.jit +def _chunk_scan_chunk_state_bwd_dx_kernel( + # Pointers to matrices + x_ptr, cb_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, D_ptr, + b_ptr, dstates_ptr, + dx_ptr, ddt_ptr, dD_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_D_head, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dstates_batch, stride_dstates_chunk, stride_dstates_head, stride_dstates_hdim, stride_dstates_dstate, + stride_dx_batch, stride_dx_seqlen, stride_dx_head, stride_dx_hdim, + stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize, + stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_csize, stride_dD_hdim, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, + IS_TRITON_22: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + pid_h * stride_dstates_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_last - dA_cs_m) + else: + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + scale = tl.where(seq_idx_m == seq_idx_last, tl.exp(dA_cs_last - dA_cs_m), 0.0) + # Might be faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128 + # However, we're getting error with the Triton compiler 2.1.0 for that code path: + # Unexpected mma -> mma layout conversion + # Triton 2.2.0 fixes this + offs_dstate = tl.arange(0, BLOCK_SIZE_DSTATE if IS_TRITON_22 and BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K) + b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_dstate[None, :] * stride_b_dstate) + dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_dstates_hdim + offs_dstate[:, None] * stride_dstates_dstate) + if IS_TRITON_22 and BLOCK_SIZE_DSTATE <= 128: + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_dstate[None, :] < dstate), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_dstate[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc = tl.dot(b, dstates) * scale[:, None] + else: + for k in range(0, dstate, BLOCK_SIZE_K): + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_dstate[None, :] < dstate - k), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_dstate[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc += tl.dot(b, dstates) + b_ptrs += BLOCK_SIZE_K * stride_b_dstate + dstates_ptrs += BLOCK_SIZE_K * stride_dstates_dstate + acc *= scale[:, None] + + # x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + # x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + # dt_ptrs = dt_ptr + offs_m * stride_dt_csize + # dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + # ddt = tl.sum(acc * x, axis=1) * dt_m + # ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize + # tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size) + + offs_k = tl.arange(0, BLOCK_SIZE_K) + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_k[None, :] * stride_cb_csize_k) + dout_ptrs = dout_ptr + (offs_k[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + K_MAX = chunk_size_limit + K_MIN = pid_m * BLOCK_SIZE_M + cb_ptrs += K_MIN * stride_cb_csize_k + dout_ptrs += K_MIN * stride_dout_seqlen + dA_cumsum_ptrs += K_MIN * stride_dA_cs_csize + for k in range(K_MIN, K_MAX, BLOCK_SIZE_K): + k = tl.multiple_of(k, BLOCK_SIZE_K) + # For some reason setting mask to (offs_m[:, None] < chunk_size_limit) is much slower + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_k[None, :] < K_MAX - k), other=0.0) + dout = tl.load(dout_ptrs, mask=(offs_k[:, None] < K_MAX - k) & (offs_n[None, :] < hdim), other=0.0) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < K_MAX - k, other=0.0).to(tl.float32) + cb *= tl.exp(dA_cs_k[None, :] - dA_cs_m[:, None]) + # If we don't have the (k + offs_k[None, :] < K_MAX) mask, for indices outside this range, + # we might have dA_cs_m = 0.0 and dA_cs_k very negative, and tl.exp will return inf. + # Multiplying with cb, which is 0.0 outside the range, will make the result NaN. + # This will cause NaN in acc, and hence NaN in dx and ddt. + mask = (k + offs_k[None, :] >= offs_m[:, None]) & (k + offs_k[None, :] < K_MAX) + cb = tl.where(mask, cb, 0.0) + cb = cb.to(dout_ptr.dtype.element_ty) + acc += tl.dot(cb, dout) + cb_ptrs += BLOCK_SIZE_K * stride_cb_csize_k + dout_ptrs += BLOCK_SIZE_K * stride_dout_seqlen + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + dx = acc * dt_m[:, None] + dx_ptr += pid_b * stride_dx_batch + pid_c * chunk_size * stride_dx_seqlen + pid_h * stride_dx_head + dx_ptrs = dx_ptr + (offs_m[:, None] * stride_dx_seqlen + offs_n[None, :] * stride_dx_hdim) + if HAS_D: + dout_res_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dout_res = tl.load(dout_res_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if D_HAS_HDIM: + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + dx += dout_res * D + tl.store(dx_ptrs, dx, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if HAS_D: + dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize + if D_HAS_HDIM: + dD_ptrs = dD_ptr + offs_n * stride_dD_hdim + dD = tl.sum(dout_res * x, axis=0) + tl.store(dD_ptrs, dD, mask=offs_n < hdim) + else: + dD = tl.sum(dout_res * x) + tl.store(dD_ptr, dD) + ddt = tl.sum(acc * x, axis=1) + ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize + tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size) + + +def _chunk_scan_chunk_state_bwd_dx(x, dt, dA_cumsum, B, CB, dout, dstates, D=None, seq_idx=None, dx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert CB.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dout.shape == x.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert D.stride(-1) == 1 + BLOCK_SIZE_min = 32 + dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_min), batch, nchunks, nheads, + headdim if D.dim() == 2 else 1, device=D.device, dtype=torch.float32) + else: + dD = None + dD_strides = ((dD.stride(0), dD.stride(1), dD.stride(2), dD.stride(3), dD.stride(4)) + if D is not None else (0, 0, 0, 0, 0)) + if dx is None: + dx = torch.empty_like(x) + else: + assert dx.shape == x.shape + ddt = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32) + grid_dx = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_chunk_state_bwd_dx_kernel[grid_dx]( + x, CB, dout, dt, dA_cumsum, seq_idx, D, B, dstates, dx, ddt, dD, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + CB.stride(0), CB.stride(1), CB.stride(2), CB.stride(-1), CB.stride(-2), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + D.stride(0) if D is not None else 0, + B.stride(0), B.stride(1), B.stride(2), B.stride(3), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4), + dx.stride(0), dx.stride(1), dx.stride(2), dx.stride(3), + ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3), + dD_strides[1], dD_strides[2], dD_strides[3], dD_strides[0], dD_strides[4], + D is not None, + D.dim() == 2 if D is not None else True, + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + IS_TRITON_22=TRITON_22 + ) + if D is not None: + BLOCK_SIZE_actual = _chunk_scan_chunk_state_bwd_dx_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype) + if D.dim() == 1: + dD = rearrange(dD, "h 1 -> h") + return dx, ddt.to(dtype=dt.dtype), dD + + +def _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf"))): + batch, seqlen, nheads, headdim = x.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert x.shape == (batch, seqlen, nheads, headdim) + assert dt.shape == (batch, seqlen, nheads) + assert A.shape == (nheads,) + assert C.shape == B.shape + if z is not None: + assert z.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if x.stride(-1) != 1 and x.stride(1) != 1: # Either M or K dimension should be contiguous + x = x.contiguous() + if z is not None and z.stride(-1) != 1 and z.stride(1) != 1: # Either M or K dimension should be contiguous + z = z.contiguous() + if D is not None and D.stride(-1) != 1: + D = D.contiguous() + if initial_states is not None: + assert initial_states.shape == (batch, nheads, headdim, dstate) + # # (batch, nchunks, chunk_size, chunk_size) or (batch, nchunks, nheads, chunk_size, chunk_size) + # dA_cumsum_tmp0, dt_tmp0 = _chunk_cumsum_fwd(dt[:, :147], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus) + # dA_cumsum_tmp1, dt_tmp1 = _chunk_cumsum_fwd(dt[:, 147:], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus) + # dA_cumsum_tmp2, dt_tmp2 = _chunk_cumsum_fwd(dt[:, 147:256], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus) + dA_cumsum, dt = _chunk_cumsum_fwd(dt, A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit) + states = _chunk_state_fwd(B, x, dt, dA_cumsum, seq_idx=seq_idx, states_in_fp32=True) + # states_tmp0 = _chunk_state_fwd(B[:, :147], x[:, :147], dt_tmp0, dA_cumsum_tmp0, states_in_fp32=True) + # states_tmp1 = _chunk_state_fwd(B[:, 147:], x[:, 147:], dt_tmp1, dA_cumsum_tmp1, states_in_fp32=True) + # states_tmp2 = _chunk_state_fwd(B[:, 147:256], x[:, 147:256], dt_tmp2, dA_cumsum_tmp2, states_in_fp32=True) + states, final_states = _state_passing_fwd(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1], + initial_states=rearrange(initial_states, "... p n -> ... (p n)") if initial_states is not None else None, + seq_idx=seq_idx, chunk_size=chunk_size, out_dtype=C.dtype) + states, final_states = [rearrange(t, "... (p n) -> ... p n", n=dstate) for t in [states, final_states]] + # states_tmp0 = rearrange(_state_passing_fwd(rearrange(states_tmp0, "... p n -> ... (p n)"), dA_cumsum_tmp0[:, :, :, -1], chunk_size=chunk_size), "... (p n) -> ... p n", n=dstate) + # states_tmp1 = rearrange(_state_passing_fwd(rearrange(states_tmp1, "... p n -> ... (p n)"), dA_cumsum_tmp1[:, :, :, -1], chunk_size=chunk_size), "... (p n) -> ... p n", n=dstate) + CB = _bmm_chunk_fwd(C, B, chunk_size, seq_idx=seq_idx, output_dtype=torch.float32) + out, out_x = _chunk_scan_fwd(CB, x, dt, dA_cumsum, C, states, D=D, z=z, seq_idx=seq_idx) + if cu_seqlens is None: + return out, out_x, dt, dA_cumsum, states, final_states + else: + assert batch == 1, "passing cu_seqlens to get the varlen states is only supported if batch dimension is 1" + varlen_states = chunk_state_varlen(B.squeeze(0), x.squeeze(0), dt.squeeze(0), dA_cumsum.squeeze(0), + cu_seqlens, states.squeeze(0)) + return out, out_x, dt, dA_cumsum, states, final_states, varlen_states + + +def _mamba_chunk_scan_combined_bwd(dout, x, dt, A, B, C, out, chunk_size, D=None, z=None, + dt_bias=None, initial_states=None, dfinal_states=None, seq_idx=None, dt_softplus=False, + dt_limit=(0.0, float("inf")), + dx=None, ddt=None, dB=None, dC=None, dz=None, recompute_output=False): + if dout.stride(-1) != 1: + dout = dout.contiguous() + batch, seqlen, nheads, headdim = x.shape + nchunks = math.ceil(seqlen / chunk_size) + _, _, ngroups, dstate = B.shape + assert dout.shape == (batch, seqlen, nheads, headdim) + assert dt.shape == (batch, seqlen, nheads) + assert A.shape == (nheads,) + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert C.shape == B.shape + assert out.shape == x.shape + if initial_states is not None: + assert initial_states.shape == (batch, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if dx is not None: + assert dx.shape == x.shape + if dB is not None: + assert dB.shape == B.shape + dB_given = dB + else: + dB_given = torch.empty_like(B) + if dC is not None: + assert dC.shape == C.shape + dC_given = dC + else: + dC_given = torch.empty_like(C) + if dz is not None: + assert z is not None + assert dz.shape == z.shape + if ddt is not None: + assert ddt.shape == dt.shape + ddt_given = ddt + else: + ddt_given = torch.empty_like(dt) + # TD: For some reason Triton (2.1.0 and 2.2.0) errors with + # "[CUDA]: invalid device context" (e.g. during varlne test), and cloning makes it work. Idk why. + dt_in = dt.clone() + dA_cumsum, dt = _chunk_cumsum_fwd(dt_in, A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus, + dt_limit=dt_limit) + CB = _bmm_chunk_fwd(C, B, chunk_size, seq_idx=seq_idx, output_dtype=torch.float32) + states = _chunk_state_fwd(B, x, dt, dA_cumsum, seq_idx=seq_idx, states_in_fp32=True) + states, _ = _state_passing_fwd(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1], + initial_states=rearrange(initial_states, "... p n -> ... (p n)") if initial_states is not None else None, + seq_idx=seq_idx, chunk_size=chunk_size) + states = rearrange(states, "... (p n) -> ... p n", n=dstate) + if z is not None: + dz, dout, dD, *rest = _chunk_scan_bwd_dz(x, z, out, dout, chunk_size=chunk_size, has_ddAcs=False, D=D, dz=dz, recompute_output=recompute_output) + outz = rest[0] if recompute_output else out + else: + dz = None + outz = out + dstates = _chunk_scan_bwd_dstates(C, dA_cumsum, dout, seq_idx=seq_idx, dtype=states.dtype) + # dstates has length nchunks, containing the gradient to initial states at index 0 and + # gradient to the states of chunk (nchunks - 2) at index (nchunks - 1) + # Do computation in fp32 but convert dstates and states to fp16/bf16 since dstates and states + # will be used in matmul in the next kernels. + dstates, ddA_chunk_cumsum, dinitial_states, states = _state_passing_bwd( + rearrange(states, "... p n -> ... (p n)"), + dA_cumsum[:, :, :, -1], + rearrange(dstates, "... p n -> ... (p n)"), + dfinal_states=rearrange(dfinal_states, "... p n -> ... (p n)") if dfinal_states is not None else None, + seq_idx=seq_idx, + has_initial_states=initial_states is not None, + dstates_dtype=x.dtype, + states_dtype=x.dtype, + chunk_size=chunk_size, + ) + # dstates has length nchunks, containing the gradient to states of chunk 0 at index 0 and + # gradient to the final states at index (nchunks - 1) + # states has length nchunks, containing the initial states at index 0 and the state for chunk (nchunks - 2) at index (nchunks - 1) + # The final states is not stored. + states = rearrange(states, "... (p n) -> ... p n", n=dstate) + dstates = rearrange(dstates, "... (p n) -> ... p n", n=dstate) + dinitial_states = rearrange(dinitial_states, "... (p n) -> ... p n", n=dstate) if dinitial_states is not None else None + dx, ddt, dD_from_x = _chunk_scan_chunk_state_bwd_dx(x, dt, dA_cumsum, B, CB, dout, dstates, D=D, seq_idx=seq_idx, dx=dx) + # dB = _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, seq_idx=seq_idx, ngroups=ngroups) + dB, ddA_next = _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, seq_idx=seq_idx, B=B, ngroups=ngroups) + # dC = _chunk_scan_bwd_dC(states[:, :-1].to(x.dtype), dA_cumsum, dout, seq_idx=seq_idx, ngroups=ngroups) + dC, ddA_cumsum_prev = _chunk_scan_bwd_dC(states.to(x.dtype), dA_cumsum, dout, seq_idx=seq_idx, C=C, ngroups=ngroups) + # Computing ddA with the dcb kernel is much slower, so we're not using it for now + dCB = _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, seq_idx=seq_idx, ngroups=ngroups) + # dCB, ddA_tmp = _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, seq_idx=seq_idx, CB=CB, ngroups=ngroups) + dCB = dCB.to(CB.dtype) + _bmm_chunk_bwd(C, dCB, residual=dB, out=dB_given) + _bmm_chunk_bwd(B, rearrange(dCB, "... l s -> ... s l"), residual=dC, out=dC_given) + # If we have z, then dout_x is recomputed in fp32 so dD = (dout_x * x).sum() is more accurate + # than dD_from_x = (dout_x * x).sum() where dout_x is in fp16/bf16 + if z is None: + dD = dD_from_x + # Formula for ddA_cumsum, assuming out is the output of the forward pass before adding x * D. + # ddA_cumsum = torch.einsum("bclhp,bclhp->bhcl", out.float(), dout.float()) - ddt * dt + # However, this is numerically unstable: when we do the reverse cumsum on ddA_cumsum, there might + # be a lot of underflow. + + # This is already done as part of bwd_dC kernel + # ddA_cumsum_prev = _chunk_scan_bwd_ddAcs_prev(states[:, :-1], C, dout, dA_cumsum, seq_idx=seq_idx) + ddA_cumsum_prev[..., -1] += ddA_chunk_cumsum + ddA_prev = ddA_cumsum_prev.flip([-1]).cumsum(dim=-1).flip([-1]) + # This is already done as part of bwd_dB kernel + # ddA_next = _chunk_state_bwd_ddAcs_stable(B, x, dt, dA_cumsum, dstates, seq_idx=seq_idx) + # We don't need to pass in seq_idx because CB also zeros out entries where seq_idx[i] != seq_idx[j] + ddA = _chunk_scan_bwd_ddAcs_stable(x, dt, dA_cumsum, dout, CB) + ddA += ddA_next + ddA_prev + + ddt_given, dA, ddt_bias = _chunk_cumsum_bwd(ddA, ddt, dt_in, A, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit, ddt=ddt_given) + + # These 2 lines are just to test ddt and dA being computed by old code + # _, dA = selective_scan_bwd(dout, x, dt, A, B, C, D=D.float(), z=z) + # ddt_given.copy_(ddt) + + return_vals = (dx, ddt_given, dA, dB_given, dC_given, dD, dz, ddt_bias, dinitial_states) + return return_vals if not recompute_output else (*return_vals, outz) + + +def selective_scan_bwd(dout, x, dt, A, B, C, D=None, z=None): + """ + Argument: + dout: (batch, seqlen, nheads, headdim) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) or (batch, nheads, headdim, nchunks, chunk_size) + A: (nheads) or (dim, dstate) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + Return: + out: (batch, seqlen, nheads, headdim) + """ + import selective_scan + + batch, seqlen, nheads, headdim = x.shape + chunk_size = dt.shape[-1] + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + x = rearrange(x, "b l h p -> b (h p) l") + squeeze_dt = dt.dim() == 4 + if dt.dim() == 4: + dt = repeat(dt, "b h c l -> b h p c l", p=headdim) + dt = rearrange(dt, "b h p c l -> b (h p) (c l)", p=headdim) + squeeze_A = A.dim() == 1 + if A.dim() == 1: + A = repeat(A, "h -> (h p) n", p=headdim, n=dstate).to(dtype=torch.float32) + else: + A = A.to(dtype=torch.float32) + B = rearrange(B, "b l g n -> b g n l") + C = rearrange(C, "b l g n -> b g n l") + if D is not None: + if D.dim() == 2: + D = rearrange(D, "h p -> (h p)") + else: + D = repeat(D, "h -> (h p)", p=headdim) + if z is not None: + z = rearrange(z, "b l h p -> b (h p) l") + + if x.stride(-1) != 1: + x = x.contiguous() + if dt.stride(-1) != 1: + dt = dt.contiguous() + if D is not None: + D = D.contiguous() + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if z is not None and z.stride(-1) != 1: + z = z.contiguous() + _, intermediate, *rest = selective_scan.fwd(x, dt.to(dtype=x.dtype), A, B, C, D, z, None, False) + if z is not None: + out = rest[0] + else: + out = None + + dout = rearrange(dout, "b l h p -> b (h p) l") + + if dout.stride(-1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan with the backward of chunk). + # Here we just pass in None and dz will be allocated in the C++ code. + _, ddt, dA, *rest = selective_scan.bwd( + x, dt.to(dtype=x.dtype), A, B, C, D, z, None, dout, intermediate, out, None, False, + False # option to recompute out_z, not used here + ) + ddt = rearrange(ddt, "b (h p) (c l) -> b h p c l", p=headdim, l=chunk_size) + if squeeze_dt: + ddt = ddt.float().sum(dim=2) + if squeeze_A: + dA = rearrange(dA, "(h p) n -> h p n", p=headdim).sum(dim=(1, 2)) + return ddt, dA + + +class MambaChunkScanCombinedFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf")), return_final_states=False, return_varlen_states=False): + ctx.dt_dtype = dt.dtype + if not return_varlen_states: + cu_seqlens = None + else: + assert cu_seqlens is not None, "cu_seqlens must be provided if return_varlen_states is True" + out, out_x, dt_out, dA_cumsum, states, final_states, *rest = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, cu_seqlens=cu_seqlens, dt_softplus=dt_softplus, dt_limit=dt_limit) + ctx.save_for_backward(out if z is None else out_x, x, dt, dA_cumsum, A, B, C, D, z, dt_bias, initial_states, seq_idx) + ctx.dt_softplus = dt_softplus + ctx.chunk_size = chunk_size + ctx.dt_limit = dt_limit + ctx.return_final_states = return_final_states + ctx.return_varlen_states = return_varlen_states + if not return_varlen_states: + return out if not return_final_states else (out, final_states) + else: + varlen_states = rest[0] + return (out, varlen_states) if not return_final_states else (out, final_states, varlen_states) + + @staticmethod + def backward(ctx, dout, *args): + out, x, dt, dA_cumsum, A, B, C, D, z, dt_bias, initial_states, seq_idx = ctx.saved_tensors + assert not ctx.return_varlen_states, "return_varlen_states is not supported in backward" + dfinal_states = args[0] if ctx.return_final_states else None + dx, ddt, dA, dB, dC, dD, dz, ddt_bias, dinitial_states = _mamba_chunk_scan_combined_bwd(dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=ctx.dt_softplus, dt_limit=ctx.dt_limit) + return dx, ddt, dA, dB, dC, None, dD, dz, ddt_bias, dinitial_states, None, None, None, None, None, None + + +def mamba_chunk_scan_combined(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf")), return_final_states=False, return_varlen_states=False): + """ + Argument: + x: (batch, seqlen, nheads, headdim) + dt: (batch, seqlen, nheads) + A: (nheads) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + chunk_size: int + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + dt_bias: (nheads,) + initial_states: (batch, nheads, headdim, dstate) + seq_idx: (batch, seqlen) + cu_seqlens: (num_sequences + 1) or None, only used if return_varlen_states is True + dt_softplus: Whether to apply softplus to dt + Return: + out: (batch, seqlen, nheads, headdim) + """ + return MambaChunkScanCombinedFn.apply(x, dt, A, B, C, chunk_size, D, z, dt_bias, initial_states, seq_idx, cu_seqlens, dt_softplus, dt_limit, return_final_states, return_varlen_states) + + +def mamba_chunk_scan(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + x: (batch, seqlen, nheads, headdim) + dt: (batch, seqlen, nheads) + A: (nheads) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + dt_bias: (nheads,) + Return: + out: (batch, seqlen, nheads, headdim) + """ + batch, seqlen, nheads, headdim = x.shape + dstate = B.shape[-1] + if seqlen % chunk_size != 0: + dt = F.pad(dt, (0, 0, 0, chunk_size - seqlen % chunk_size)) + dt = rearrange(dt, "b (c l) h -> b h c l", l=chunk_size) + dt = dt.float() # We want high precision for this before cumsum + if dt_bias is not None: + dt = dt + rearrange(dt_bias, "h -> h 1 1") + if dt_softplus: + dt = F.softplus(dt) + dA = dt * rearrange(A, "h -> h 1 1") + dA = dt * rearrange(A, "h -> h 1 1") + dA_cumsum = torch.cumsum(dA, dim=-1) + # 1. Compute the state for each chunk + states = chunk_state(B, x, dt, dA_cumsum, states_in_fp32=True) + # 2. Pass the state to all the chunks by weighted cumsum. + states = rearrange(state_passing(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1])[0], + "... (p n) -> ... p n", n=dstate) + # 3. Compute the output for each chunk + out = chunk_scan(B, C, x, dt, dA_cumsum, states, D=D, z=z) + return out + + +def ssd_chunk_scan_combined_ref(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + x: (batch, seqlen, nheads, headdim) + dt: (batch, seqlen, nheads) + A: (nheads) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + dt_bias: (nheads,) + Return: + out: (batch, seqlen, nheads, headdim) + """ + batch, seqlen, nheads, headdim = x.shape + dstate = B.shape[-1] + if seqlen % chunk_size != 0: + dt = F.pad(dt, (0, 0, 0, chunk_size - seqlen % chunk_size)) + dt = rearrange(dt, "b (c l) h -> b h c l", l=chunk_size) + dt = dt.float() # We want high precision for this before cumsum + if dt_bias is not None: + dt = dt + rearrange(dt_bias, "h -> h 1 1") + if dt_softplus: + dt = F.softplus(dt) + dA = dt * rearrange(A, "h -> h 1 1") + dA_cumsum = torch.cumsum(dA, dim=-1) + # 1. Compute the state for each chunk + states = chunk_state_ref(B, x, dt, dA_cumsum) + states_dtype = states.dtype + if states.dtype not in [torch.float32, torch.float64]: + states = states.to(torch.float32) + # 2. Pass the state to all the chunks by weighted cumsum. + # state_passing_ref is much less numerically stable + states = rearrange(state_passing_ref(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1])[0], + "... (p n) -> ... p n", n=dstate) + states = states.to(states_dtype) + # 3. Compute the output for each chunk + out = chunk_scan_ref(B, C, x, dt, dA_cumsum, states, D=D, z=z) + return out + + +def ssd_selective_scan(x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf"))): + """ + Argument: + x: (batch, seqlen, nheads, headdim) + dt: (batch, seqlen, nheads) or (batch, seqlen, nheads, headdim) + A: (nheads) or (dim, dstate) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + dt_bias: (nheads,) or (nheads, headdim) + Return: + out: (batch, seqlen, nheads, headdim) + """ + from mamba_ssm.ops.selective_scan_interface import selective_scan_fn + + batch, seqlen, nheads, headdim = x.shape + _, _, ngroups, dstate = B.shape + x = rearrange(x, "b l h p -> b (h p) l") + if dt.dim() == 3: + dt = repeat(dt, "b l h -> b l h p", p=headdim) + dt = rearrange(dt, "b l h p -> b (h p) l") + if A.dim() == 1: + A = repeat(A, "h -> (h p) n", p=headdim, n=dstate).to(dtype=torch.float32) + else: + A = A.to(dtype=torch.float32) + B = rearrange(B, "b l g n -> b g n l") + C = rearrange(C, "b l g n -> b g n l") + if D is not None: + if D.dim() == 2: + D = rearrange(D, "h p -> (h p)") + else: + D = repeat(D, "h -> (h p)", p=headdim) + if z is not None: + z = rearrange(z, "b l h p -> b (h p) l") + if dt_bias is not None: + if dt_bias.dim() == 1: + dt_bias = repeat(dt_bias, "h -> h p", p=headdim) + dt_bias = rearrange(dt_bias, "h p -> (h p)") + if dt_limit != (0.0, float("inf")): + if dt_bias is not None: + dt = dt + rearrange(dt_bias, "d -> d 1") + if dt_softplus: + dt = F.softplus(dt) + dt = dt.clamp(min=dt_limit[0], max=dt_limit[1]).to(x.dtype) + dt_bias = None + dt_softplus = None + out = selective_scan_fn(x, dt, A, B, C, D=D, z=z, delta_bias=dt_bias, delta_softplus=dt_softplus) + return rearrange(out, "b (h p) l -> b l h p", p=headdim) + + +def mamba_conv1d_scan_ref(xBC, conv1d_weight, conv1d_bias, dt, A, chunk_size, D=None, z=None, + dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf")), + activation="silu", headdim=None, ngroups=1): + """ + Argument: + xBC: (batch, seqlen, dim + 2 * ngroups * dstate) where dim == nheads * headdim + conv1d_weight: (dim + 2 * ngroups * dstate, width) + conv1d_bias: (dim + 2 * ngroups * dstate,) + dt: (batch, seqlen, nheads) or (batch, seqlen, nheads, headdim) + A: (nheads) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, dim) + dt_bias: (nheads) or (nheads, headdim) + headdim: if D is 1D and z is None, headdim must be passed in + Return: + out: (batch, seqlen, dim) + """ + batch, seqlen, nheads = dt.shape[:3] + assert nheads % ngroups == 0 + if z is not None: + dim = z.shape[-1] + assert dim % nheads == 0 + headdim = dim // nheads + else: + if D.dim() == 1: + assert headdim is not None + else: + headdim = D.shape[1] + dim = nheads * headdim + xBC = rearrange(causal_conv1d_fn(rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias, activation=activation), + "b d s -> b s d") + dstate = (xBC.shape[-1] - dim) // ngroups // 2 + x, B, C = torch.split(xBC, [dim, ngroups * dstate, ngroups * dstate], dim=-1) + x = rearrange(x, "b l (h p) -> b l h p", h=nheads) + B = rearrange(B, "b l (g n) -> b l g n", g=ngroups) + C = rearrange(C, "b l (g n) -> b l g n", g=ngroups) + z = rearrange(z, "b l (h p) -> b l h p", h=nheads) if z is not None else None + out = ssd_selective_scan(x, dt.to(x.dtype), A, B, C, D=D.float(), z=z, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit) + return rearrange(out, "b s h p -> b s (h p)") + + +class MambaSplitConv1dScanCombinedFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states=None, seq_idx=None, dt_limit=(0.0, float("inf")), return_final_states=False, activation="silu", + rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None, + ngroups=1, norm_before_gate=True): + assert activation in [None, "silu", "swish"] + if D.dim() == 1: + assert headdim is not None + nheads, = D.shape + else: + nheads, headdim = D.shape + batch, seqlen, _ = zxbcdt.shape + dim = nheads * headdim + assert nheads % ngroups == 0 + dstate = (conv1d_weight.shape[0] - dim) // ngroups // 2 + d_nonssm = (zxbcdt.shape[-1] - 2 * dim - 2 * ngroups * dstate - nheads) // 2 + assert d_nonssm >= 0 + assert zxbcdt.shape == (batch, seqlen, 2 * d_nonssm + 2 * dim + 2 * ngroups * dstate + nheads) + assert dt_bias.shape == (nheads,) + assert A.shape == (nheads,) + zx0, z, xBC, dt = torch.split(zxbcdt, [2 * d_nonssm, dim, dim + ngroups * dstate * 2, nheads], dim=-1) + seq_idx = seq_idx.contiguous() if seq_idx is not None else None + xBC_conv = rearrange( + causal_conv1d_cuda.causal_conv1d_fwd(rearrange(xBC, "b s d -> b d s"), + conv1d_weight, conv1d_bias, seq_idx, None, None, activation in ["silu", "swish"]), + "b d s -> b s d" + ) + x, B, C = torch.split(xBC_conv, [dim, ngroups * dstate, ngroups * dstate], dim=-1) + x = rearrange(x, "b l (h p) -> b l h p", h=nheads) + B = rearrange(B, "b l (g n) -> b l g n", g=ngroups) + C = rearrange(C, "b l (g n) -> b l g n", g=ngroups) + z = rearrange(z, "b l (h p) -> b l h p", h=nheads) if z is not None else None + if rmsnorm_weight is None: + out, out_x, dt_out, dA_cumsum, states, final_states = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size=chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=dt_limit) + out = rearrange(out, "b s h p -> b s (h p)") + rstd = None + if d_nonssm > 0: + out = torch.cat([_swiglu_fwd(zx0), out], dim=-1) + else: + out_x, _, dt_out, dA_cumsum, states, final_states = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size=chunk_size, D=D, z=None, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=dt_limit) + # reshape input data into 2D tensor + x_rms = rearrange(out_x, "b s h p -> (b s) (h p)") + z_rms = rearrange(z, "b s h p -> (b s) (h p)") + rmsnorm_weight = rmsnorm_weight.contiguous() + if d_nonssm == 0: + out = None + else: + out01 = torch.empty((batch, seqlen, d_nonssm + dim), dtype=x_rms.dtype, device=x_rms.device) + out = rearrange(out01[..., d_nonssm:], "b s d -> (b s) d") + _swiglu_fwd(zx0, out=out01[..., :d_nonssm]) + out, _, rstd = _layer_norm_fwd(x_rms, rmsnorm_weight, None, rmsnorm_eps, z_rms, out=out, + group_size=dim // ngroups, + norm_before_gate=norm_before_gate, is_rms_norm=True) + if d_nonssm == 0: + out = rearrange(out, "(b s) d -> b s d", b=batch) + else: + out = out01 + ctx.outproj_weight_dtype = outproj_weight.dtype if outproj_weight is not None else None + if outproj_weight is not None: + if torch.is_autocast_enabled(): + dtype = torch.get_autocast_gpu_dtype() + out, outproj_weight = out.to(dtype), outproj_weight.to(dtype) + outproj_bias = outproj_bias.to(dtype) if outproj_bias is not None else None + out = F.linear(out, outproj_weight, outproj_bias) + else: + assert outproj_bias is None + ctx.save_for_backward(zxbcdt, conv1d_weight, conv1d_bias, + out_x, A, D, dt_bias, initial_states, seq_idx, rmsnorm_weight, rstd, outproj_weight, outproj_bias) + ctx.dt_limit = dt_limit + ctx.return_final_states = return_final_states + ctx.activation = activation + ctx.rmsnorm_eps = rmsnorm_eps + ctx.norm_before_gate = norm_before_gate + ctx.chunk_size = chunk_size + ctx.headdim = headdim + ctx.ngroups = ngroups + return out if not return_final_states else (out, final_states) + + @staticmethod + @custom_bwd + def backward(ctx, dout, *args): + zxbcdt, conv1d_weight, conv1d_bias, out, A, D, dt_bias, initial_states, seq_idx, rmsnorm_weight, rstd, outproj_weight, outproj_bias = ctx.saved_tensors + dfinal_states = args[0] if ctx.return_final_states else None + headdim = ctx.headdim + nheads = D.shape[0] + dim = nheads * headdim + assert nheads % ctx.ngroups == 0 + dstate = (conv1d_weight.shape[0] - dim) // ctx.ngroups // 2 + d_nonssm = (zxbcdt.shape[-1] - 2 * dim - 2 * ctx.ngroups * dstate - nheads) // 2 + assert d_nonssm >= 0 + recompute_output = outproj_weight is not None + if recompute_output: + out_recompute = torch.empty(*out.shape[:2], d_nonssm + dim, device=out.device, dtype=out.dtype) + out0_recompute, out1_recompute = out_recompute.split([d_nonssm, dim], dim=-1) + zx0, z, xBC, dt = torch.split(zxbcdt, [2 * d_nonssm, dim, dim + 2 * ctx.ngroups * dstate, nheads], dim=-1) + # Recompute x, B, C + xBC_conv = rearrange( + causal_conv1d_cuda.causal_conv1d_fwd(rearrange(xBC, "b s d -> b d s"), + conv1d_weight, conv1d_bias, seq_idx, None, None, ctx.activation in ["silu", "swish"]), + "b d s -> b s d" + ) + x, B, C = torch.split(xBC_conv, [dim, ctx.ngroups * dstate, ctx.ngroups * dstate], dim=-1) + x = rearrange(x, "b l (h p) -> b l h p", h=nheads) + B = rearrange(B, "b l (g n) -> b l g n", g=ctx.ngroups) + C = rearrange(C, "b l (g n) -> b l g n", g=ctx.ngroups) + dzxbcdt = torch.empty_like(zxbcdt) + dzx0, dz, dxBC_given, ddt_given = torch.split(dzxbcdt, [2 * d_nonssm, dim, dim + 2 * ctx.ngroups * dstate, nheads], dim=-1) + dxBC = torch.empty_like(xBC) + dx, dB, dC = torch.split(dxBC, [dim, ctx.ngroups * dstate, ctx.ngroups * dstate], dim=-1) + z = rearrange(z, "b l (h p) -> b l h p", h=nheads) + dx = rearrange(dx, "b l (h p) -> b l h p", h=nheads) + dB = rearrange(dB, "b l (g n) -> b l g n", g=ctx.ngroups) + dC = rearrange(dC, "b l (g n) -> b l g n", g=ctx.ngroups) + if outproj_weight is not None: + dout_og = dout + dout = F.linear(dout, outproj_weight.t()) + if d_nonssm > 0: + dout0, dout = dout.split([d_nonssm, dim], dim=-1) + _swiglu_bwd(zx0, dout0, dxy=dzx0, recompute_output=True, out=out0_recompute) + dout = rearrange(dout, "b s (h p) -> b s h p", p=headdim) + if rmsnorm_weight is None: + dz = rearrange(dz, "b l (h p) -> b l h p", h=nheads) + dx, ddt, dA, dB, dC, dD, dz, ddt_bias, dinitial_states, *rest = _mamba_chunk_scan_combined_bwd( + dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=ctx.dt_limit, dx=dx, ddt=ddt_given, dB=dB, dC=dC, dz=dz, recompute_output=recompute_output + ) + out_for_linear = rearrange(rest[0], "b s h p -> b s (h p)") if recompute_output else None + drmsnorm_weight = None + else: + batch = dout.shape[0] + dy_rms = rearrange(dout, "b s h p -> (b s) (h p)") + dz = rearrange(dz, "b l d -> (b l) d") + x_rms = rearrange(out, "b s h p -> (b s) (h p)") + z_rms = rearrange(z, "b s h p -> (b s) (h p)") + out1_recompute = rearrange(out1_recompute, "b s d -> (b s) d") if recompute_output else None + dout, drmsnorm_weight, _, dz, *rest = _layer_norm_bwd(dy_rms, x_rms, rmsnorm_weight, None, ctx.rmsnorm_eps, None, rstd, z_rms, norm_before_gate=ctx.norm_before_gate, is_rms_norm=True, recompute_output=recompute_output, dz=dz, out=out1_recompute if recompute_output else None) + out_for_linear = out_recompute if recompute_output else None + dout = rearrange(dout, "(b s) (h p) -> b s h p", b=batch, p=headdim) + dx, ddt, dA, dB, dC, dD, _, ddt_bias, dinitial_states = _mamba_chunk_scan_combined_bwd( + dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=None, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=ctx.dt_limit, dx=dx, ddt=ddt_given, dB=dB, dC=dC + ) + + if outproj_weight is not None: + doutproj_weight = torch.einsum("bso,bsd->od", dout_og, out_for_linear) + doutproj_bias = dout_og.sum(dim=(0, 1)) if outproj_bias is not None else None + else: + doutproj_weight, doutproj_bias = None, None + dxBC_given = rearrange(dxBC_given, "b s d -> b d s") + dxBC_given, dweight, dbias, *_ = causal_conv1d_cuda.causal_conv1d_bwd( + rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias, + rearrange(dxBC, "b s d -> b d s"), seq_idx, None, None, dxBC_given, False, ctx.activation in ["silu", "swish"] + ) + dxBC_given = rearrange(dxBC_given, "b d s -> b s d") + return dzxbcdt, dweight, dbias, ddt_bias, dA, dD, None, dinitial_states, None, None, None, None, drmsnorm_weight, None, doutproj_weight, doutproj_bias, None, None, None + + +def mamba_split_conv1d_scan_combined(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states=None, seq_idx=None, dt_limit=(0.0, float("inf")), return_final_states=False, activation="silu", rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None, ngroups=1, norm_before_gate=True): + """ + Argument: + zxbcdt: (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads) where dim == nheads * headdim + conv1d_weight: (dim + 2 * ngroups * dstate, width) + conv1d_bias: (dim + 2 * ngroups * dstate,) + dt_bias: (nheads,) + A: (nheads) + D: (nheads, headdim) or (nheads,) + initial_states: (batch, nheads, headdim, dstate) + seq_idx: (batch, seqlen), int32 + rmsnorm_weight: (dim,) + outproj_weight: (out_dim, dim) + outproj_bias: (out_dim,) + headdim: if D is 1D, headdim must be passed in + norm_before_gate: if True, we do RMSNorm(x) * F.silu(z). If False, we do RMSNorm(x * F.silu(z)) + Return: + out: (batch, seqlen, dim) + """ + return MambaSplitConv1dScanCombinedFn.apply(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states, seq_idx, dt_limit, return_final_states, activation, rmsnorm_weight, rmsnorm_eps, outproj_weight, outproj_bias, headdim, ngroups, norm_before_gate) + + +def mamba_split_conv1d_scan_ref(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, dt_limit=(0.0, float("inf")), activation="silu", rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None, ngroups=1, norm_before_gate=True): + """ + Argument: + zxbcdt: (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads) where dim == nheads * headdim + conv1d_weight: (dim + 2 * ngroups * dstate, width) + conv1d_bias: (dim + 2 * ngroups * dstate,) + dt_bias: (nheads,) + A: (nheads) + D: (nheads, headdim) or (nheads,) + rmsnorm_weight: (dim,) + outproj_weight: (out_dim, dim) + outproj_bias: (out_dim,) + headdim: if D is 1D, headdim must be passed in + norm_before_gate: if True, we do RMSNorm(x) * F.silu(z). If False, we do RMSNorm(x * F.silu(z)) + Return: + out: (batch, seqlen, dim) + """ + if D.dim() == 1: + assert headdim is not None + nheads, = D.shape + else: + nheads, headdim = D.shape + assert nheads % ngroups == 0 + batch, seqlen, _ = zxbcdt.shape + dim = nheads * headdim + dstate = (zxbcdt.shape[-1] - 2 * dim - nheads) // ngroups // 2 + assert zxbcdt.shape == (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads) + assert dt_bias.shape == (nheads,) + assert A.shape == (nheads,) + if rmsnorm_weight is not None: + assert rmsnorm_weight.shape == (dim,) + z, xBC, dt = torch.split(zxbcdt, [dim, dim + 2 * ngroups * dstate, nheads], dim=-1) + xBC = rearrange(causal_conv1d_fn(rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias, activation=activation), + "b d s -> b s d") + x, B, C = torch.split(xBC, [dim, ngroups * dstate, ngroups * dstate], dim=-1) + x = rearrange(x, "b l (h p) -> b l h p", h=nheads) + B = rearrange(B, "b l (g n) -> b l g n", g=ngroups) + C = rearrange(C, "b l (g n) -> b l g n", g=ngroups) + z = rearrange(z, "b l (h p) -> b l h p", h=nheads) + out = ssd_selective_scan(x, dt.to(x.dtype), A, B, C, D=D.float(), + z=z if rmsnorm_weight is None else None, dt_bias=dt_bias, dt_softplus=True, dt_limit=dt_limit) + out = rearrange(out, "b s h p -> b s (h p)") + if rmsnorm_weight is not None: + out = rmsnorm_fn(out, rmsnorm_weight, None, z=rearrange(z, "b l h p -> b l (h p)"), eps=rmsnorm_eps, + norm_before_gate=norm_before_gate) + if outproj_weight is not None: + out = F.linear(out, outproj_weight, outproj_bias) + return out + diff --git a/mamba/build/lib/mamba_ssm/ops/triton/ssd_state_passing.py b/mamba/build/lib/mamba_ssm/ops/triton/ssd_state_passing.py new file mode 100644 index 0000000000000000000000000000000000000000..63863b8236e1c091741c9faeb6f4a41376fc5b42 --- /dev/null +++ b/mamba/build/lib/mamba_ssm/ops/triton/ssd_state_passing.py @@ -0,0 +1,348 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE': 64}), + triton.Config({'BLOCK_SIZE': 128}), + triton.Config({'BLOCK_SIZE': 256}), + triton.Config({'BLOCK_SIZE': 512}), + triton.Config({'BLOCK_SIZE': 1024}), + triton.Config({'BLOCK_SIZE': 2048}), + ], + key=['dim'], +) +@triton.jit +def _state_passing_fwd_kernel( + # Pointers to matrices + states_ptr, out_ptr, final_states_ptr, dA_cs_ptr, initstates_ptr, seq_idx_ptr, + # Matrix dimensions + dim, nchunks, seqlen, chunk_size, + # Strides + stride_states_batch, stride_states_chunk, stride_states_head, stride_states_dim, + stride_out_batch, stride_out_chunk, stride_out_head, stride_out_dim, + stride_final_states_batch, stride_final_states_head, stride_final_states_dim, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, + stride_initstates_batch, stride_initstates_head, stride_initstates_dim, + stride_seq_idx_batch, stride_seq_idx_seqlen, + # Meta-parameters + HAS_INITSTATES: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + states_ptr += pid_b * stride_states_batch + pid_h * stride_states_head + dA_cs_ptr += pid_b * stride_dA_cs_batch + pid_h * stride_dA_cs_head + out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + final_states_ptr += pid_b * stride_final_states_batch + pid_h * stride_final_states_head + if HAS_INITSTATES: + initstates_ptr += pid_b * stride_initstates_batch + pid_h * stride_initstates_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + + offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + states_ptrs = states_ptr + offs_m * stride_states_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + final_states_ptrs = final_states_ptr + offs_m * stride_final_states_dim + + if not HAS_INITSTATES: + states = tl.zeros((BLOCK_SIZE, ), dtype=tl.float32) + else: + initstates_ptrs = initstates_ptr + offs_m * stride_initstates_dim + states = tl.load(initstates_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + tl.store(out_ptrs, states, mask=offs_m < dim) + out_ptrs += stride_out_chunk + seq_idx = 0 + for c in range(nchunks): + new_states = tl.load(states_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dA_cs = tl.load(dA_cs_ptr).to(tl.float32) + scale = tl.exp(dA_cs) + if HAS_SEQ_IDX: + seq_idx_new = tl.load(seq_idx_ptr + (min((c + 1) * chunk_size, seqlen) - 1) * stride_seq_idx_seqlen) + scale = tl.where(seq_idx_new == seq_idx, scale, 0.0) + seq_idx = seq_idx_new + states = scale * states + new_states + if c < nchunks - 1: + tl.store(out_ptrs, states, mask=offs_m < dim) + else: + tl.store(final_states_ptrs, states, mask=offs_m < dim) + states_ptrs += stride_states_chunk + dA_cs_ptr += stride_dA_cs_chunk + out_ptrs += stride_out_chunk + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE': 64}), + triton.Config({'BLOCK_SIZE': 128}), + triton.Config({'BLOCK_SIZE': 256}), + triton.Config({'BLOCK_SIZE': 512}), + triton.Config({'BLOCK_SIZE': 1024}), + triton.Config({'BLOCK_SIZE': 2048}), + ], + key=['dim'], +) +@triton.jit +def _state_passing_bwd_kernel( + # Pointers to matrices + dout_ptr, out_ptr, dA_cs_ptr, dfinal_states_ptr, seq_idx_ptr, + dstates_ptr, ddA_cs_ptr, dinitstates_ptr, states_converted_ptr, + # Matrix dimensions + dim, nchunks, seqlen, chunk_size, + # Strides + stride_dout_batch, stride_dout_chunk, stride_dout_head, stride_dout_dim, + stride_out_batch, stride_out_chunk, stride_out_head, stride_out_dim, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, + stride_dfinal_states_batch, stride_dfinal_states_head, stride_dfinal_states_dim, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_dstates_batch, stride_dstates_chunk, stride_dstates_head, stride_dstates_dim, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, + stride_dinitstates_batch, stride_dinitstates_head, stride_dinitstates_dim, + # Meta-parameters + CONVERT_STATES: tl.constexpr, + HAS_DFINAL_STATES: tl.constexpr, + HAS_DINITSTATES: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + dstates_ptr += pid_b * stride_dstates_batch + pid_h * stride_dstates_head + (nchunks - 1) * stride_dstates_chunk + dA_cs_ptr += pid_b * stride_dA_cs_batch + pid_h * stride_dA_cs_head + (nchunks - 1) * stride_dA_cs_chunk + ddA_cs_ptr += pid_b * stride_ddA_cs_batch + pid_h * stride_ddA_cs_head + (nchunks - 1) * stride_ddA_cs_chunk + pid_m + out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + (nchunks - 1) * stride_out_chunk + dout_ptr += pid_b * stride_dout_batch + pid_h * stride_dout_head + (nchunks - 1) * stride_dout_chunk + if CONVERT_STATES: + states_converted_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + (nchunks - 1) * stride_out_chunk + if HAS_DFINAL_STATES: + dfinal_states_ptr += pid_b * stride_dfinal_states_batch + pid_h * stride_dfinal_states_head + if HAS_DINITSTATES: + dinitstates_ptr += pid_b * stride_dinitstates_batch + pid_h * stride_dinitstates_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + + offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + dstates_ptrs = dstates_ptr + offs_m * stride_dstates_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + dout_ptrs = dout_ptr + offs_m * stride_dout_dim + if CONVERT_STATES: + states_converted_ptrs = states_converted_ptr + offs_m * stride_out_dim + + if HAS_DFINAL_STATES: + dstates = tl.load(dfinal_states_ptr + offs_m * stride_dfinal_states_dim, mask=offs_m < dim, other=0.0).to(tl.float32) + else: + dstates = tl.zeros((BLOCK_SIZE, ), dtype=tl.float32) + tl.store(dstates_ptrs, dstates, mask=offs_m < dim) + if HAS_SEQ_IDX: + seq_idx = tl.load(seq_idx_ptr + (seqlen - 1) * stride_seq_idx_seqlen) + dstates_ptrs -= stride_dstates_chunk + for c in range(nchunks - 1): + dA_cs = tl.load(dA_cs_ptr).to(tl.float32) + scale = tl.exp(dA_cs) + if HAS_SEQ_IDX: + seq_idx_new = tl.load(seq_idx_ptr + (((nchunks - c - 1) * chunk_size - 1) * stride_seq_idx_seqlen)) + scale = tl.where(seq_idx_new == seq_idx, scale, 0.0) + seq_idx = seq_idx_new + out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if CONVERT_STATES: + tl.store(states_converted_ptrs, out, mask=offs_m < dim) + ddA = tl.sum(out * dstates) * scale + tl.store(ddA_cs_ptr, ddA) + dout = tl.load(dout_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dstates = scale * dstates + dout + tl.store(dstates_ptrs, dstates, mask=offs_m < dim) + dout_ptrs -= stride_dout_chunk + dstates_ptrs -= stride_dstates_chunk + dA_cs_ptr -= stride_dA_cs_chunk + ddA_cs_ptr -= stride_ddA_cs_chunk + out_ptrs -= stride_out_chunk + if CONVERT_STATES: + states_converted_ptrs -= stride_out_chunk + if CONVERT_STATES: + out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + tl.store(states_converted_ptrs, out, mask=offs_m < dim) + if not HAS_DINITSTATES: + tl.store(ddA_cs_ptr, 0.0) + else: + dA_cs = tl.load(dA_cs_ptr).to(tl.float32) + scale = tl.exp(dA_cs) + if HAS_SEQ_IDX: + scale = tl.where(seq_idx == 0, scale, 0.0) + out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + ddA = tl.sum(out * dstates) * scale + tl.store(ddA_cs_ptr, ddA) + dout = tl.load(dout_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dstates = scale * dstates + dout + tl.store(dinitstates_ptr + offs_m * stride_dinitstates_dim, dstates, mask=offs_m < dim) + + +def _state_passing_fwd(states, dA_chunk_cumsum, initial_states=None, seq_idx=None, chunk_size=None, + out_dtype=None): + batch, nchunks, nheads, dim = states.shape + assert dA_chunk_cumsum.shape == (batch, nheads, nchunks) + if initial_states is not None: + assert initial_states.shape == (batch, nheads, dim) + if seq_idx is not None: + assert chunk_size is not None + seqlen = seq_idx.shape[-1] + assert seq_idx.shape == (batch, seqlen) + out_dtype = states.dtype if out_dtype is None else out_dtype + out = torch.empty((batch, nchunks, nheads, dim), device=states.device, dtype=out_dtype) + final_states = torch.empty((batch, nheads, dim), device=states.device, dtype=torch.float32) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE']), batch, nheads) + with torch.cuda.device(states.device.index): + _state_passing_fwd_kernel[grid]( + states, out, final_states, dA_chunk_cumsum, initial_states, seq_idx, + dim, nchunks, seqlen if seq_idx is not None else 0, chunk_size if seq_idx is not None else 0, + states.stride(0), states.stride(1), states.stride(2), states.stride(3), + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + final_states.stride(0), final_states.stride(1), final_states.stride(2), + dA_chunk_cumsum.stride(0), dA_chunk_cumsum.stride(2), dA_chunk_cumsum.stride(1), + *((initial_states.stride(0), initial_states.stride(1), initial_states.stride(2)) + if initial_states is not None else (0, 0, 0)), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + HAS_INITSTATES=initial_states is not None, + HAS_SEQ_IDX=seq_idx is not None, + ) + return out, final_states + + +def _state_passing_bwd( + states, dA_chunk_cumsum, dout, dfinal_states=None, seq_idx=None, has_initial_states=None, + dstates_dtype=None, states_dtype=None, chunk_size=None +): + """ + states contains the initial_states at index 0. The final states are not included in states. + """ + batch, nchunks, nheads, dim = states.shape + assert dA_chunk_cumsum.shape == (batch, nheads, nchunks) + assert dout.shape == (batch, nchunks, nheads, dim) + if seq_idx is not None: + assert chunk_size is not None + seqlen = seq_idx.shape[-1] + assert seq_idx.shape == (batch, seqlen) + dstates = torch.empty_like(dout, dtype=dstates_dtype if dstates_dtype is not None else dout.dtype) + if states_dtype is not None and states_dtype != states.dtype: + states_converted = torch.empty_like(states, dtype=dstates_dtype if dstates_dtype is not None else dout.dtype) + assert states_converted.stride() == states.stride() + else: + states_converted = None + if has_initial_states: + dinitstates = torch.empty_like(dstates[:, 0]) + else: + dinitstates = None + if dfinal_states is not None: + assert dfinal_states.shape == (batch, nheads, dim) + BLOCK_SIZE_min = 64 + n_blocks = (dim + BLOCK_SIZE_min - 1) // BLOCK_SIZE_min + ddA_chunk_cumsum = torch.empty(batch, nheads, nchunks, n_blocks, + dtype=torch.float32, device=dA_chunk_cumsum.device) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE']), batch, nheads) + with torch.cuda.device(dout.device.index): + _state_passing_bwd_kernel[grid]( + dout, states, dA_chunk_cumsum, dfinal_states, seq_idx, + dstates, ddA_chunk_cumsum, dinitstates, states_converted, + dim, nchunks, seqlen if seq_idx is not None else 0, chunk_size if seq_idx is not None else 0, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), + dA_chunk_cumsum.stride(0), dA_chunk_cumsum.stride(2), dA_chunk_cumsum.stride(1), + *((dfinal_states.stride(0), dfinal_states.stride(1), dfinal_states.stride(2)) + if dfinal_states is not None else (0, 0, 0)), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), + ddA_chunk_cumsum.stride(0), ddA_chunk_cumsum.stride(2), ddA_chunk_cumsum.stride(1), + *((dinitstates.stride(0), dinitstates.stride(1), dinitstates.stride(2)) + if dinitstates is not None else (0, 0, 0)), + CONVERT_STATES=states_converted is not None, + HAS_DFINAL_STATES=dfinal_states is not None, + HAS_DINITSTATES=dinitstates is not None, + HAS_SEQ_IDX=seq_idx is not None, + ) + BLOCK_SIZE_actual = _state_passing_bwd_kernel.best_config.kwargs["BLOCK_SIZE"] + n_valid_blocks = (dim + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + ddA_chunk_cumsum = ddA_chunk_cumsum[..., :n_valid_blocks].sum(dim=-1).to(dtype=dA_chunk_cumsum.dtype) + if states_dtype is not None and states_dtype == states.dtype: + states_converted = states + return (dstates, ddA_chunk_cumsum, dinitstates) if states_dtype is None else (dstates, ddA_chunk_cumsum, dinitstates, states_converted) + + +class StatePassingFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, states, dA_chunk_cumsum, initial_states=None): + batch, nchunks, nheads, dim = states.shape + assert dA_chunk_cumsum.shape == (batch, nheads, nchunks) + if states.stride(-1) != 1: + states = states.contiguous() + out, final_states = _state_passing_fwd(states, dA_chunk_cumsum, initial_states) + ctx.save_for_backward(out, dA_chunk_cumsum) + ctx.has_initial_states = initial_states is not None + return out, final_states + + @staticmethod + def backward(ctx, dout, dfinal_states): + out, dA_chunk_cumsum = ctx.saved_tensors + batch, nchunks, nheads, dim = out.shape + assert dout.shape == (batch, nchunks, nheads, dim) + assert dA_chunk_cumsum.shape == (batch, nheads, nchunks) + assert dfinal_states.shape == (batch, nheads, dim) + if dout.stride(-1) != 1: + dout = dout.contiguous() + dstates, ddA_chunk_cumsum, dinitstates = _state_passing_bwd( + out, dA_chunk_cumsum, dout, dfinal_states=dfinal_states , has_initial_states=ctx.has_initial_states + ) + return dstates, ddA_chunk_cumsum, dinitstates + + +def state_passing(states, dA_chunk_cumsum, initial_states=None): + """ + Argument: + states: (batch, nchunks, nheads, dim) + dA_chunk_cumsum: (batch, nheads, nchunks) + initial_states: (batch, nheads, dim) + Return: + out: (batch, nchunks, nheads, dim) + final_states: (batch, nheads, dim) + """ + return StatePassingFn.apply(states, dA_chunk_cumsum, initial_states) + + +def state_passing_ref(states, dA_chunk_cumsum, initial_states=None): + """ + Argument: + states: (batch, nchunks, nheads, dim) + dA_chunk_cumsum: (batch, nheads, nchunks) + initial_states: (batch, nheads, dim) + Return: + out: (batch, nchunks, nheads, dim) + final_states: (batch, nheads, dim) + """ + if initial_states is None: + initial_states = torch.zeros_like(states[:, 0]) + states = torch.cat([rearrange(initial_states, "b h d -> b 1 h d"), states], dim=1) + dA_chunk_cumsum = F.pad(dA_chunk_cumsum, (1, 0)) + dA_chunk_cumsum = torch.cumsum(dA_chunk_cumsum, dim=-1) + nchunks = dA_chunk_cumsum.shape[-1] + # (batch, nheads, nchunks, nchunks) + dt_chunk_segment_sum = dA_chunk_cumsum[:, :, :, None] - dA_chunk_cumsum[:, :, None, :] + # (batch, nheads, nchunks, nchunks) + decay_chunk = torch.exp(dt_chunk_segment_sum) + causal_mask = torch.tril(torch.ones(nchunks, nchunks, device=states.device, dtype=bool), diagonal=0) + decay_chunk = decay_chunk.masked_fill(~causal_mask, 0) + out = torch.einsum("bhzc,bchd->bzhd", decay_chunk.to(dtype=states.dtype), states) + return out[:, :-1], out[:, -1] diff --git a/mamba/build/lib/mamba_ssm/utils/__init__.py b/mamba/build/lib/mamba_ssm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/build/lib/mamba_ssm/utils/generation.py b/mamba/build/lib/mamba_ssm/utils/generation.py new file mode 100644 index 0000000000000000000000000000000000000000..74abead94003bdbdeee954ca3e4f4f38ea7ac9fc --- /dev/null +++ b/mamba/build/lib/mamba_ssm/utils/generation.py @@ -0,0 +1,387 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. +import gc +import time +from collections import namedtuple +from dataclasses import dataclass, field +from functools import partial +from typing import Callable, Optional, Sequence, Union + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat +from torch import Tensor +from torch.profiler import ProfilerActivity, profile, record_function +from transformers.generation import GreedySearchDecoderOnlyOutput, SampleDecoderOnlyOutput, TextStreamer + + +@dataclass +class InferenceParams: + """Inference parameters that are passed to the main model in order + to efficienly calculate and store the context during inference.""" + + max_seqlen: int + max_batch_size: int + seqlen_offset: int = 0 + batch_size_offset: int = 0 + key_value_memory_dict: dict = field(default_factory=dict) + lengths_per_sample: Optional[Tensor] = None + + def reset(self, max_seqlen, max_batch_size): + self.max_seqlen = max_seqlen + self.max_batch_size = max_batch_size + self.seqlen_offset = 0 + if self.lengths_per_sample is not None: + self.lengths_per_sample.zero_() + + +def modify_logits_for_min_p_filtering(logits, min_p): + """Set the logits for none min_p values to -inf. Done in-place.""" + if min_p <= 0.0 or min_p >= 1.0: + return + indices_to_remove = logits < min_p + logits.masked_fill_(indices_to_remove, float("-Inf")) +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L231 +def modify_logits_for_top_k_filtering(logits, top_k): + """Set the logits for none top-k values to -inf. Done in-place.""" + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits.masked_fill_(indices_to_remove, float("-Inf")) + + +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L170 +def modify_logits_for_top_p_filtering(logits, top_p): + """Set the logits for none top-p values to -inf. Done in-place.""" + if top_p <= 0.0 or top_p >= 1.0: + return + # First sort and calculate cumulative sum of probabilities. + sorted_logits, sorted_indices = torch.sort(logits, descending=False) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs <= (1 - top_p) + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter( + 1, sorted_indices, sorted_indices_to_remove + ) + logits.masked_fill_(indices_to_remove, float("-inf")) + + +def modify_logit_for_repetition_penalty(logits, prev_output_tokens, repetition_penalty=1.0): + """Apply repetition penalty. See https://arxiv.org/abs/1909.05858 + logits: (batch_size, vocab_size) + prev_output_tokens: (batch_size, seq_len) + """ + if repetition_penalty == 1.0: + return logits + score = torch.gather(logits, 1, prev_output_tokens) + # if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability + score = torch.where(score < 0, score * repetition_penalty, score / repetition_penalty) + logits.scatter_(1, prev_output_tokens, score) + return logits + + +def sample(logits, top_k=1, top_p=0.0, min_p=0.0, temperature=1.0): + """Sample from top-k logits. + Arguments: + logits: Tensor of shape (batch_size, vocab_size) + """ + if top_k == 1: # Short-circuit for greedy decoding + return logits.argmax(dim=-1) + else: + if top_p > 0.0: + assert top_p <= 1.0, "top-p should be in (0, 1]." + if top_k > 0: + top_k = min(top_k, logits.size(-1)) # Safety check + logits_top, indices = torch.topk(logits, top_k, dim=-1) + if temperature != 1.0: + logits_top /= temperature + modify_logits_for_top_p_filtering(logits_top, top_p) + return indices[ + torch.arange(indices.shape[0], device=indices.device), + torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1), + ] + else: + if min_p > 0.0: + logits_top = logits.clone() + max_prob = logits_top[..., 0].item() + min_prob = max_prob * min_p + modify_logits_for_min_p_filtering(logits_top, min_prob) + if temperature != 1.0: + logits_top /= temperature + return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1) + # Clone so that when we modify for top_p we don't change the original logits + logits_top = logits / temperature if temperature != 1.0 else logits.clone() + modify_logits_for_top_p_filtering(logits_top, top_p) + return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze( + dim=-1 + ) + + +@torch.inference_mode() +def decode( + input_ids, + model, + max_length, + top_k=1, + top_p=0.0, + min_p=0.0, + temperature=1.0, + repetition_penalty=1.0, + eos_token_id=None, + teacher_outputs=None, + vocab_size=None, + cg=False, + enable_timing=False, + streamer: Optional[TextStreamer] = None +): + """Decoding, either greedy or with top-k or top-p sampling. + If top-k = 0, don't limit the number of candidates (pure sampling). + Top-k and top-p can be used together. If top_k > 0 and top_p > 0, then top-k is applied first, + then top-p. + We assume that all sequences in the same batch have the same length. + + Arguments: + input_ids: (batch, seq_len) + max_length: int + teacher_outputs (optional): (batch, seq_len). If provided, instead of sampling from the + logits, the next token is taken from the teacher_outputs. Useful for testing. + Returns: GreedySearchDecoderOnlyOutput or SampleDecoderOnlyOutput, with the following fields: + sequences: (batch, max_length) + scores: tuples of (batch, vocab_size) + """ + if streamer is not None: + streamer.put(input_ids.cpu()) + + batch_size, seqlen_og = input_ids.shape + teacher_output_len = teacher_outputs.shape[1] if teacher_outputs is not None else 0 + if cg: + if not hasattr(model, "_decoding_cache"): + model._decoding_cache = None + model._decoding_cache = update_graph_cache( + model, + model._decoding_cache, + batch_size, + seqlen_og, + max_length, + ) + inference_params = model._decoding_cache.inference_params + inference_params.reset(max_length, batch_size) + else: + inference_params = InferenceParams(max_seqlen=max_length, max_batch_size=batch_size) + + def get_logits(input_ids, inference_params): + decoding = inference_params.seqlen_offset > 0 + if decoding: + position_ids = torch.full( + (batch_size, 1), + inference_params.seqlen_offset, + dtype=torch.long, + device=input_ids.device, + ) + else: + position_ids = None + if not cg or not decoding: + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=1, + ).logits.squeeze(dim=1) + else: + logits = model._decoding_cache.run( + input_ids, position_ids, inference_params.seqlen_offset + ).squeeze(dim=1) + return logits[..., :vocab_size] if vocab_size is not None else logits + + def sample_tokens(logits, inference_params): + if teacher_outputs is None or teacher_output_len <= inference_params.seqlen_offset: + token = sample(logits, top_k=top_k, top_p=top_p, min_p=min_p, temperature=temperature) + else: + token = teacher_outputs[:, inference_params.seqlen_offset] + # return rearrange(token, "b -> b 1") + return token.unsqueeze(1) + + def should_stop(current_token, inference_params): + if inference_params.seqlen_offset == 0: + return False + if eos_token_id is not None and (current_token == eos_token_id).all(): + return True + if inference_params.seqlen_offset >= max_length - 1: + return True + return False + + start = torch.cuda.Event(enable_timing=enable_timing) + end = torch.cuda.Event(enable_timing=enable_timing) + + if enable_timing: + start.record() + scores, sequences = [], [input_ids] + sequences_cat = input_ids + while not should_stop(sequences[-1], inference_params): + scores.append(get_logits(sequences[-1], inference_params)) + inference_params.seqlen_offset += sequences[-1].shape[1] + if repetition_penalty == 1.0: + sampled_tokens = sample_tokens(scores[-1], inference_params) + else: + logits = modify_logit_for_repetition_penalty( + scores[-1].clone(), sequences_cat, repetition_penalty + ) + sampled_tokens = sample_tokens(logits, inference_params) + sequences_cat = torch.cat([sequences_cat, sampled_tokens], dim=1) + sequences.append(sampled_tokens) + if streamer is not None: + streamer.put(sampled_tokens.cpu()) + if streamer is not None: + streamer.end() + if enable_timing: + end.record() + torch.cuda.synchronize() + print(f"Prompt processing + decoding time: {(start.elapsed_time(end)):.0f}ms") + output_cls = GreedySearchDecoderOnlyOutput if top_k == 1 else SampleDecoderOnlyOutput + return output_cls(sequences=torch.cat(sequences, dim=1), scores=tuple(scores)) + + +class GenerationMixin: + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + raise NotImplementedError + + def generate( + self, + input_ids, + max_length, + top_k=1, + top_p=0.0, + min_p=0.0, + temperature=1.0, + return_dict_in_generate=False, + output_scores=False, + **kwargs, + ): + output = decode( + input_ids, self, max_length, top_k=top_k, top_p=top_p, min_p = min_p, temperature=temperature, **kwargs + ) + if not output_scores: + output.scores = None + return output if return_dict_in_generate else output.sequences + + +@dataclass +class DecodingCGCache: + max_batch_size: int = 0 + max_seqlen: int = 0 + device = None + dtype = None + callables: dict = field(default_factory=dict) + mempool = None + inference_params: Optional[InferenceParams] = None + run: Optional[Callable] = None + + +@torch.inference_mode() +def update_graph_cache( + model, + cache, + batch_size, + seqlen_og, + max_seqlen, + decoding_seqlens=(1,), + dtype=None, + n_warmups=2, +): + if cache is None: + cache = DecodingCGCache() + param_example = next(iter(model.parameters())) + device = param_example.device + if dtype is None: + dtype = param_example.dtype + if ( + (device, dtype) != (cache.device, cache.dtype) + or batch_size > cache.max_batch_size + or max_seqlen > cache.max_seqlen + ): # Invalidate the cache + cache.callables = {} + cache.mempool = None + cache.inference_params = None + gc.collect() + cache.device, cache.dtype = device, dtype + cache.max_batch_size, cache.max_seqlen = batch_size, max_seqlen + assert hasattr(model, "allocate_inference_cache"), "CUDA graph decoding requires that the model has a method allocate_inference_cache" + inf_cache = model.allocate_inference_cache(batch_size, max_seqlen, dtype) + lengths_per_sample = torch.full((batch_size,), seqlen_og, dtype=torch.int32, device=device) + cache.inference_params = InferenceParams( + max_seqlen=max_seqlen, + max_batch_size=batch_size, + seqlen_offset=seqlen_og, + key_value_memory_dict=inf_cache, + lengths_per_sample=lengths_per_sample, + ) + cache.mempool = torch.cuda.graphs.graph_pool_handle() + for decoding_seqlen in decoding_seqlens: + if (batch_size, decoding_seqlen) not in cache.callables: + cache.callables[batch_size, decoding_seqlen] = capture_graph( + model, + cache.inference_params, + batch_size, + max_seqlen, + decoding_seqlen=decoding_seqlen, + mempool=cache.mempool, + n_warmups=n_warmups, + ) + + def dispatch(input_ids, position_ids, seqlen): + batch_size, decoding_seqlen = input_ids.shape[:2] + return cache.callables[batch_size, decoding_seqlen](input_ids, position_ids, seqlen) + + cache.run = dispatch + cache.inference_params.seqlen_offset = 0 # Reset so it's not confusing + return cache + + +def capture_graph( + model, inference_params, batch_size, max_seqlen, decoding_seqlen=1, mempool=None, n_warmups=2 +): + device = next(iter(model.parameters())).device + input_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + position_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + seqlen_offset_og = inference_params.seqlen_offset + inference_params.seqlen_offset = max_seqlen - decoding_seqlen + inference_params.lengths_per_sample[:] = inference_params.seqlen_offset + + # Warmup before capture + s = torch.cuda.Stream() + s.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(s): + for _ in range(n_warmups): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + s.synchronize() + # This might be needed for correctness if we run with NCCL_GRAPH_MIXING_SUPPORT=0, + # which requires that graph launch and non-captured launch to not overlap (I think, + # that's how I interpret the documentation). I'm not sure if this is required. + if torch.distributed.is_initialized(): + torch.distributed.barrier() + torch.cuda.current_stream().wait_stream(s) + # Captures the graph + # To allow capture, automatically sets a side stream as the current stream in the context + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, pool=mempool): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + + def run(new_input_ids, new_position_ids, seqlen): + inference_params.lengths_per_sample[:] = seqlen + input_ids.copy_(new_input_ids) + position_ids.copy_(new_position_ids) + graph.replay() + return logits.clone() + + inference_params.seqlen_offset = seqlen_offset_og + return run diff --git a/mamba/build/lib/mamba_ssm/utils/hf.py b/mamba/build/lib/mamba_ssm/utils/hf.py new file mode 100644 index 0000000000000000000000000000000000000000..0d7555acddbd260636d1d14d5bd6324f6af0056a --- /dev/null +++ b/mamba/build/lib/mamba_ssm/utils/hf.py @@ -0,0 +1,23 @@ +import json + +import torch + +from transformers.utils import WEIGHTS_NAME, CONFIG_NAME +from transformers.utils.hub import cached_file + + +def load_config_hf(model_name): + resolved_archive_file = cached_file(model_name, CONFIG_NAME, _raise_exceptions_for_missing_entries=False) + return json.load(open(resolved_archive_file)) + + +def load_state_dict_hf(model_name, device=None, dtype=None): + # If not fp32, then we don't want to load directly to the GPU + mapped_device = "cpu" if dtype not in [torch.float32, None] else device + resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False) + return torch.load(resolved_archive_file, map_location=mapped_device) + # Convert dtype before moving to GPU to save memory + if dtype is not None: + state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()} + state_dict = {k: v.to(device=device) for k, v in state_dict.items()} + return state_dict diff --git a/mamba/csrc/selective_scan/reverse_scan.cuh b/mamba/csrc/selective_scan/reverse_scan.cuh new file mode 100644 index 0000000000000000000000000000000000000000..d19397879bda6a197ad2d62e6db8120d89a30fd5 --- /dev/null +++ b/mamba/csrc/selective_scan/reverse_scan.cuh @@ -0,0 +1,415 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#ifndef USE_ROCM + #include + + #include + #include + #include + // #include +#else + #include + namespace cub = hipcub; +#endif +#include "uninitialized_copy.cuh" + +/** + * Perform a reverse sequential reduction over \p LENGTH elements of the \p input array. The aggregate is returned. + */ +template < + int LENGTH, + typename T, + typename ReductionOp> +__device__ __forceinline__ T ThreadReverseReduce(const T (&input)[LENGTH], ReductionOp reduction_op) { + static_assert(LENGTH > 0); + T retval = input[LENGTH - 1]; + #pragma unroll + for (int i = LENGTH - 2; i >= 0; --i) { retval = reduction_op(retval, input[i]); } + return retval; +} + +/** + * Perform a sequential inclusive postfix reverse scan over the statically-sized \p input array, seeded with the specified \p postfix. The aggregate is returned. + */ +template < + int LENGTH, + typename T, + typename ScanOp> +__device__ __forceinline__ T ThreadReverseScanInclusive( + const T (&input)[LENGTH], + T (&output)[LENGTH], + ScanOp scan_op, + const T postfix) +{ + T inclusive = postfix; + #pragma unroll + for (int i = LENGTH - 1; i >= 0; --i) { + inclusive = scan_op(inclusive, input[i]); + output[i] = inclusive; + } + return inclusive; +} + +/** + * Perform a sequential exclusive postfix reverse scan over the statically-sized \p input array, seeded with the specified \p postfix. The aggregate is returned. + */ +template < + int LENGTH, + typename T, + typename ScanOp> +__device__ __forceinline__ T ThreadReverseScanExclusive( + const T (&input)[LENGTH], + T (&output)[LENGTH], + ScanOp scan_op, + const T postfix) +{ + // Careful, output maybe be aliased to input + T exclusive = postfix; + T inclusive; + #pragma unroll + for (int i = LENGTH - 1; i >= 0; --i) { + inclusive = scan_op(exclusive, input[i]); + output[i] = exclusive; + exclusive = inclusive; + } + return inclusive; +} + + +/** + * \brief WarpReverseScan provides SHFL-based variants of parallel postfix scan of items partitioned across a CUDA thread warp. + * + * LOGICAL_WARP_THREADS must be a power-of-two + */ +template < + typename T, ///< Data type being scanned + int LOGICAL_WARP_THREADS ///< Number of threads per logical warp + > +struct WarpReverseScan { + //--------------------------------------------------------------------- + // Constants and type definitions + //--------------------------------------------------------------------- + + /// Whether the logical warp size and the PTX warp size coincide + + // In hipcub, warp_threads is defined as HIPCUB_WARP_THREADS ::rocprim::warp_size() + // While in cub, it's defined as a macro that takes a redundant unused argument. + #ifndef USE_ROCM + #define WARP_THREADS CUB_WARP_THREADS(0) + #else + #define WARP_THREADS HIPCUB_WARP_THREADS + #endif + static constexpr bool IS_ARCH_WARP = (LOGICAL_WARP_THREADS == WARP_THREADS); + /// The number of warp scan steps + static constexpr int STEPS = cub::Log2::VALUE; + static_assert(LOGICAL_WARP_THREADS == 1 << STEPS); + + + //--------------------------------------------------------------------- + // Thread fields + //--------------------------------------------------------------------- + + /// Lane index in logical warp + unsigned int lane_id; + + /// Logical warp index in 32-thread physical warp + unsigned int warp_id; + + /// 32-thread physical warp member mask of logical warp + unsigned int member_mask; + + //--------------------------------------------------------------------- + // Construction + //--------------------------------------------------------------------- + + /// Constructor + explicit __device__ __forceinline__ + WarpReverseScan() + : lane_id(cub::LaneId()) + , warp_id(IS_ARCH_WARP ? 0 : (lane_id / LOGICAL_WARP_THREADS)) + , member_mask(cub::WarpMask(warp_id)) + { + if (!IS_ARCH_WARP) { + lane_id = lane_id % LOGICAL_WARP_THREADS; + } + } + + + /// Broadcast + __device__ __forceinline__ T Broadcast( + T input, ///< [in] The value to broadcast + int src_lane) ///< [in] Which warp lane is to do the broadcasting + { + return cub::ShuffleIndex(input, src_lane, member_mask); + } + + + /// Inclusive scan + template + __device__ __forceinline__ void InclusiveReverseScan( + T input, ///< [in] Calling thread's input item. + T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. + ScanOpT scan_op) ///< [in] Binary scan operator + { + inclusive_output = input; + #pragma unroll + for (int STEP = 0; STEP < STEPS; STEP++) { + int offset = 1 << STEP; + T temp = cub::ShuffleDown( + inclusive_output, offset, LOGICAL_WARP_THREADS - 1, member_mask + ); + // Perform scan op if from a valid peer + inclusive_output = static_cast(lane_id) >= LOGICAL_WARP_THREADS - offset + ? inclusive_output : scan_op(temp, inclusive_output); + } + } + + /// Exclusive scan + // Get exclusive from inclusive + template + __device__ __forceinline__ void ExclusiveReverseScan( + T input, ///< [in] Calling thread's input item. + T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. + ScanOpT scan_op, ///< [in] Binary scan operator + T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. + { + T inclusive_output; + InclusiveReverseScan(input, inclusive_output, scan_op); + warp_aggregate = cub::ShuffleIndex(inclusive_output, 0, member_mask); + // initial value unknown + exclusive_output = cub::ShuffleDown( + inclusive_output, 1, LOGICAL_WARP_THREADS - 1, member_mask + ); + } + + /** + * \brief Computes both inclusive and exclusive reverse scans using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p exclusive_output computed for the last warp-lane is undefined. + */ + template + __device__ __forceinline__ void ReverseScan( + T input, ///< [in] Calling thread's input item. + T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item. + T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item. + ScanOpT scan_op) ///< [in] Binary scan operator + { + InclusiveReverseScan(input, inclusive_output, scan_op); + // initial value unknown + exclusive_output = cub::ShuffleDown( + inclusive_output, 1, LOGICAL_WARP_THREADS - 1, member_mask + ); + } + +}; + +/** + * \brief BlockReverseScan provides variants of raking-based parallel postfix scan across a CUDA thread block. + */ +template < + typename T, ///< Data type being scanned + int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension + bool MEMOIZE=false ///< Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure + > +struct BlockReverseScan { + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + /// Constants + /// The thread block size in threads + static constexpr int BLOCK_THREADS = BLOCK_DIM_X; + + /// Layout type for padded thread block raking grid + using BlockRakingLayout = cub::BlockRakingLayout; + // The number of reduction elements is not a multiple of the number of raking threads for now + static_assert(BlockRakingLayout::UNGUARDED); + + /// Number of raking threads + static constexpr int RAKING_THREADS = BlockRakingLayout::RAKING_THREADS; + /// Number of raking elements per warp synchronous raking thread + static constexpr int SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH; + /// Cooperative work can be entirely warp synchronous + static constexpr bool WARP_SYNCHRONOUS = (int(BLOCK_THREADS) == int(RAKING_THREADS)); + + /// WarpReverseScan utility type + using WarpReverseScan = WarpReverseScan; + + /// Shared memory storage layout type + struct _TempStorage { + typename BlockRakingLayout::TempStorage raking_grid; ///< Padded thread block raking grid + }; + + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : cub::Uninitialized<_TempStorage> {}; + + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + // Thread fields + _TempStorage &temp_storage; + unsigned int linear_tid; + T cached_segment[SEGMENT_LENGTH]; + + + //--------------------------------------------------------------------- + // Utility methods + //--------------------------------------------------------------------- + + /// Performs upsweep raking reduction, returning the aggregate + template + __device__ __forceinline__ T Upsweep(ScanOp scan_op) { + T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); + // Read data into registers + #pragma unroll + for (int i = 0; i < SEGMENT_LENGTH; ++i) { cached_segment[i] = smem_raking_ptr[i]; } + T raking_partial = cached_segment[SEGMENT_LENGTH - 1]; + #pragma unroll + for (int i = SEGMENT_LENGTH - 2; i >= 0; --i) { + raking_partial = scan_op(raking_partial, cached_segment[i]); + } + return raking_partial; + } + + + /// Performs exclusive downsweep raking scan + template + __device__ __forceinline__ void ExclusiveDownsweep( + ScanOp scan_op, + T raking_partial) + { + T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); + // Read data back into registers + if (!MEMOIZE) { + #pragma unroll + for (int i = 0; i < SEGMENT_LENGTH; ++i) { cached_segment[i] = smem_raking_ptr[i]; } + } + ThreadReverseScanExclusive(cached_segment, cached_segment, scan_op, raking_partial); + // Write data back to smem + #pragma unroll + for (int i = 0; i < SEGMENT_LENGTH; ++i) { smem_raking_ptr[i] = cached_segment[i]; } + } + + + //--------------------------------------------------------------------- + // Constructors + //--------------------------------------------------------------------- + + /// Constructor + __device__ __forceinline__ BlockReverseScan( + TempStorage &temp_storage) + : + temp_storage(temp_storage.Alias()), + linear_tid(cub::RowMajorTid(BLOCK_DIM_X, 1, 1)) + {} + + + /// Computes an exclusive thread block-wide postfix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_postfix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically postfixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. + template < + typename ScanOp, + typename BlockPostfixCallbackOp> + __device__ __forceinline__ void ExclusiveReverseScan( + T input, ///< [in] Calling thread's input item + T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) + ScanOp scan_op, ///< [in] Binary scan operator + BlockPostfixCallbackOp &block_postfix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a thread block-wide postfix to be applied to all inputs. + { + if (WARP_SYNCHRONOUS) { + // Short-circuit directly to warp-synchronous scan + T block_aggregate; + WarpReverseScan warp_scan; + warp_scan.ExclusiveReverseScan(input, exclusive_output, scan_op, block_aggregate); + // Obtain warp-wide postfix in lane0, then broadcast to other lanes + T block_postfix = block_postfix_callback_op(block_aggregate); + block_postfix = warp_scan.Broadcast(block_postfix, 0); + exclusive_output = linear_tid == BLOCK_THREADS - 1 ? block_postfix : scan_op(block_postfix, exclusive_output); + } else { + // Place thread partial into shared memory raking grid + T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy(placement_ptr, input); + cub::CTA_SYNC(); + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) { + WarpReverseScan warp_scan; + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + // Warp-synchronous scan + T exclusive_partial, block_aggregate; + warp_scan.ExclusiveReverseScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate); + // Obtain block-wide postfix in lane0, then broadcast to other lanes + T block_postfix = block_postfix_callback_op(block_aggregate); + block_postfix = warp_scan.Broadcast(block_postfix, 0); + // Update postfix with warpscan exclusive partial + T downsweep_postfix = linear_tid == RAKING_THREADS - 1 + ? block_postfix : scan_op(block_postfix, exclusive_partial); + // Exclusive raking downsweep scan + ExclusiveDownsweep(scan_op, downsweep_postfix); + } + cub::CTA_SYNC(); + // Grab thread postfix from shared memory + exclusive_output = *placement_ptr; + + // // Compute warp scan in each warp. + // // The exclusive output from the last lane in each warp is invalid. + // T inclusive_output; + // WarpReverseScan warp_scan; + // warp_scan.ReverseScan(input, inclusive_output, exclusive_output, scan_op); + + // // Compute the warp-wide postfix and block-wide aggregate for each warp. Warp postfix for the last warp is invalid. + // T block_aggregate; + // T warp_postfix = ComputeWarpPostfix(scan_op, inclusive_output, block_aggregate); + + // // Apply warp postfix to our lane's partial + // if (warp_id != 0) { + // exclusive_output = scan_op(warp_postfix, exclusive_output); + // if (lane_id == 0) { exclusive_output = warp_postfix; } + // } + + // // Use the first warp to determine the thread block postfix, returning the result in lane0 + // if (warp_id == 0) { + // T block_postfix = block_postfix_callback_op(block_aggregate); + // if (lane_id == 0) { + // // Share the postfix with all threads + // detail::uninitialized_copy(&temp_storage.block_postfix, + // block_postfix); + + // exclusive_output = block_postfix; // The block postfix is the exclusive output for tid0 + // } + // } + + // cub::CTA_SYNC(); + + // // Incorporate thread block postfix into outputs + // T block_postfix = temp_storage.block_postfix; + // if (linear_tid > 0) { exclusive_output = scan_op(block_postfix, exclusive_output); } + } + } + + + /** + * \brief Computes an inclusive block-wide postfix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. the call-back functor \p block_postfix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically postfixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. + */ + template < + int ITEMS_PER_THREAD, + typename ScanOp, + typename BlockPostfixCallbackOp> + __device__ __forceinline__ void InclusiveReverseScan( + T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items + T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) + ScanOp scan_op, ///< [in] Binary scan functor + BlockPostfixCallbackOp &block_postfix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide postfix to be applied to the logical input sequence. + { + // Reduce consecutive thread items in registers + T thread_postfix = ThreadReverseReduce(input, scan_op); + // Exclusive thread block-scan + ExclusiveReverseScan(thread_postfix, thread_postfix, scan_op, block_postfix_callback_op); + // Inclusive scan in registers with postfix as seed + ThreadReverseScanInclusive(input, output, scan_op, thread_postfix); + } + +}; \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan.cpp b/mamba/csrc/selective_scan/selective_scan.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cde867cd32d39b5a5c222dffc6b1e65bb191979c --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan.cpp @@ -0,0 +1,497 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include +#include + +#include "selective_scan.h" + +#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")") + +#define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \ + if (ITYPE == at::ScalarType::Half) { \ + using input_t = at::Half; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::BFloat16) { \ + using input_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::Float) { \ + using input_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \ + } + +#define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \ + if (WTYPE == at::ScalarType::Half) { \ + using weight_t = at::Half; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::BFloat16) { \ + using weight_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::Float) { \ + using weight_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \ + } + +#define DISPATCH_WTYPE_FLOAT_AND_COMPLEX(WTYPE, NAME, ...) \ + if (WTYPE == at::ScalarType::Float) { \ + using weight_t = float; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::ComplexFloat) { \ + using weight_t = c10::complex; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \ + } + +template +void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); + +template +void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); + +void set_ssm_params_fwd(SSMParamsBase ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t dstate, + const size_t n_groups, + const size_t n_chunks, + const bool is_variable_B, + const bool is_variable_C, + // device pointers + const at::Tensor u, + const at::Tensor delta, + const at::Tensor A, + const at::Tensor B, + const at::Tensor C, + const at::Tensor out, + const at::Tensor z, + const at::Tensor out_z, + void* D_ptr, + void* delta_bias_ptr, + void* x_ptr, + bool has_z, + bool delta_softplus) { + + // Reset the parameters + memset(¶ms, 0, sizeof(params)); + + params.batch = batch; + params.dim = dim; + params.seqlen = seqlen; + params.dstate = dstate; + params.n_groups = n_groups; + params.n_chunks = n_chunks; + params.dim_ngroups_ratio = dim / n_groups; + + params.delta_softplus = delta_softplus; + + params.is_variable_B = is_variable_B; + params.is_variable_C = is_variable_C; + + // Set the pointers and strides. + params.u_ptr = u.data_ptr(); + params.delta_ptr = delta.data_ptr(); + params.A_ptr = A.data_ptr(); + params.B_ptr = B.data_ptr(); + params.C_ptr = C.data_ptr(); + params.D_ptr = D_ptr; + params.delta_bias_ptr = delta_bias_ptr; + params.out_ptr = out.data_ptr(); + params.x_ptr = x_ptr; + params.z_ptr = has_z ? z.data_ptr() : nullptr; + params.out_z_ptr = has_z ? out_z.data_ptr() : nullptr; + // All stride are in elements, not bytes. + params.A_d_stride = A.stride(0); + params.A_dstate_stride = A.stride(1); + if (!is_variable_B) { + params.B_d_stride = B.stride(0); + } else { + params.B_batch_stride = B.stride(0); + params.B_group_stride = B.stride(1); + } + params.B_dstate_stride = !is_variable_B ? B.stride(1) : B.stride(2); + if (!is_variable_C) { + params.C_d_stride = C.stride(0); + } else { + params.C_batch_stride = C.stride(0); + params.C_group_stride = C.stride(1); + } + params.C_dstate_stride = !is_variable_C ? C.stride(1) : C.stride(2); + params.u_batch_stride = u.stride(0); + params.u_d_stride = u.stride(1); + params.delta_batch_stride = delta.stride(0); + params.delta_d_stride = delta.stride(1); + if (has_z) { + params.z_batch_stride = z.stride(0); + params.z_d_stride = z.stride(1); + params.out_z_batch_stride = out_z.stride(0); + params.out_z_d_stride = out_z.stride(1); + } + params.out_batch_stride = out.stride(0); + params.out_d_stride = out.stride(1); +} + +void set_ssm_params_bwd(SSMParamsBwd ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t dstate, + const size_t n_groups, + const size_t n_chunks, + const bool is_variable_B, + const bool is_variable_C, + // device pointers + const at::Tensor u, + const at::Tensor delta, + const at::Tensor A, + const at::Tensor B, + const at::Tensor C, + const at::Tensor z, + const at::Tensor out, + const at::Tensor out_z, + void* D_ptr, + void* delta_bias_ptr, + void* x_ptr, + const at::Tensor dout, + const at::Tensor du, + const at::Tensor ddelta, + const at::Tensor dA, + const at::Tensor dB, + const at::Tensor dC, + const at::Tensor dz, + void* dD_ptr, + void* ddelta_bias_ptr, + bool has_z, + bool delta_softplus, + bool recompute_out_z) { + // Pass in "dout" instead of "out", we're not gonna use "out" unless we have z + set_ssm_params_fwd(params, batch, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, has_z ? out : dout, + has_z ? z : dout, + // If not recompute_out_z, pass dout instead of out_z. + // This won't be used by the bwd kernel + recompute_out_z ? out_z : dout, + D_ptr, delta_bias_ptr, x_ptr, has_z, delta_softplus); + if (!recompute_out_z) { params.out_z_ptr = nullptr; } + + // Set the pointers and strides. + params.dout_ptr = dout.data_ptr(); + params.du_ptr = du.data_ptr(); + params.dA_ptr = dA.data_ptr(); + params.dB_ptr = dB.data_ptr(); + params.dC_ptr = dC.data_ptr(); + params.dD_ptr = dD_ptr; + params.ddelta_ptr = ddelta.data_ptr(); + params.ddelta_bias_ptr = ddelta_bias_ptr; + params.dz_ptr = has_z ? dz.data_ptr() : nullptr; + // All stride are in elements, not bytes. + params.dout_batch_stride = dout.stride(0); + params.dout_d_stride = dout.stride(1); + params.dA_d_stride = dA.stride(0); + params.dA_dstate_stride = dA.stride(1); + if (!is_variable_B) { + params.dB_d_stride = dB.stride(0); + } else { + params.dB_batch_stride = dB.stride(0); + params.dB_group_stride = dB.stride(1); + } + params.dB_dstate_stride = !is_variable_B ? dB.stride(1) : dB.stride(2); + if (!is_variable_C) { + params.dC_d_stride = dC.stride(0); + } else { + params.dC_batch_stride = dC.stride(0); + params.dC_group_stride = dC.stride(1); + } + params.dC_dstate_stride = !is_variable_C ? dC.stride(1) : dC.stride(2); + params.du_batch_stride = du.stride(0); + params.du_d_stride = du.stride(1); + params.ddelta_batch_stride = ddelta.stride(0); + params.ddelta_d_stride = ddelta.stride(1); + if (has_z) { + params.dz_batch_stride = dz.stride(0); + params.dz_d_stride = dz.stride(1); + } +} + +std::vector +selective_scan_fwd(const at::Tensor &u, const at::Tensor &delta, + const at::Tensor &A, const at::Tensor &B, const at::Tensor &C, + const c10::optional &D_, + const c10::optional &z_, + const c10::optional &delta_bias_, + bool delta_softplus) { + auto input_type = u.scalar_type(); + auto weight_type = A.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::ComplexFloat); + + const bool is_variable_B = B.dim() >= 3; + const bool is_variable_C = C.dim() >= 3; + const bool is_complex = weight_type == at::ScalarType::ComplexFloat; + + TORCH_CHECK(delta.scalar_type() == input_type); + TORCH_CHECK(B.scalar_type() == (!is_variable_B ? weight_type : input_type)); + TORCH_CHECK(C.scalar_type() == (!is_variable_C ? weight_type : input_type)); + + TORCH_CHECK(u.is_cuda()); + TORCH_CHECK(delta.is_cuda()); + TORCH_CHECK(A.is_cuda()); + TORCH_CHECK(B.is_cuda()); + TORCH_CHECK(C.is_cuda()); + + TORCH_CHECK(u.stride(-1) == 1 || u.size(-1) == 1); + TORCH_CHECK(delta.stride(-1) == 1 || delta.size(-1) == 1); + + const auto sizes = u.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int dstate = A.size(1); + const int n_groups = is_variable_B ? B.size(1) : 1; + + TORCH_CHECK(dstate <= 256, "selective_scan only supports state dimension <= 256"); + + CHECK_SHAPE(u, batch_size, dim, seqlen); + CHECK_SHAPE(delta, batch_size, dim, seqlen); + CHECK_SHAPE(A, dim, dstate); + if (!is_variable_B) { + CHECK_SHAPE(B, dim, dstate); + } else { + CHECK_SHAPE(B, batch_size, n_groups, dstate, !is_complex ? seqlen : seqlen * 2); + TORCH_CHECK(B.stride(-1) == 1 || B.size(-1) == 1); + } + if (!is_variable_C) { + CHECK_SHAPE(C, dim, dstate); + } else { + CHECK_SHAPE(C, batch_size, n_groups, dstate, !is_complex ? seqlen: seqlen * 2); + TORCH_CHECK(C.stride(-1) == 1 || C.size(-1) == 1); + } + + if (D_.has_value()) { + auto D = D_.value(); + TORCH_CHECK(D.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(D.is_cuda()); + TORCH_CHECK(D.stride(-1) == 1 || D.size(-1) == 1); + CHECK_SHAPE(D, dim); + } + + if (delta_bias_.has_value()) { + auto delta_bias = delta_bias_.value(); + TORCH_CHECK(delta_bias.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(delta_bias.is_cuda()); + TORCH_CHECK(delta_bias.stride(-1) == 1 || delta_bias.size(-1) == 1); + CHECK_SHAPE(delta_bias, dim); + } + + at::Tensor z, out_z; + const bool has_z = z_.has_value(); + if (has_z) { + z = z_.value(); + TORCH_CHECK(z.scalar_type() == input_type); + TORCH_CHECK(z.is_cuda()); + TORCH_CHECK(z.stride(-1) == 1 || z.size(-1) == 1); + CHECK_SHAPE(z, batch_size, dim, seqlen); + out_z = torch::empty_like(z); + } + + const int n_chunks = (seqlen + 2048 - 1) / 2048; + // const int n_chunks = (seqlen + 1024 - 1) / 1024; + // at::Tensor out = torch::empty_like(u); + // Right now u has BHL layout and delta has HBL layout, and we want out to have HBL layout + at::Tensor out = torch::empty_like(delta); + at::Tensor x; + x = torch::empty({batch_size, dim, n_chunks, dstate * 2}, u.options().dtype(weight_type)); + + SSMParamsBase params; + set_ssm_params_fwd(params, batch_size, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, out, z, out_z, + D_.has_value() ? D_.value().data_ptr() : nullptr, + delta_bias_.has_value() ? delta_bias_.value().data_ptr() : nullptr, + x.data_ptr(), + has_z, + delta_softplus); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)u.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_fwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_COMPLEX(A.scalar_type(), "selective_scan_fwd", [&] { + selective_scan_fwd_cuda(params, stream); + }); + }); + std::vector result = {out, x}; + if (has_z) { result.push_back(out_z); } + return result; +} + +std::vector +selective_scan_bwd(const at::Tensor &u, const at::Tensor &delta, + const at::Tensor &A, const at::Tensor &B, const at::Tensor &C, + const c10::optional &D_, + const c10::optional &z_, + const c10::optional &delta_bias_, + const at::Tensor &dout, + const c10::optional &x_, + const c10::optional &out_, + c10::optional &dz_, + bool delta_softplus, + bool recompute_out_z) { + auto input_type = u.scalar_type(); + auto weight_type = A.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::ComplexFloat); + + const bool is_variable_B = B.dim() >= 3; + const bool is_variable_C = C.dim() >= 3; + const bool is_complex = weight_type == at::ScalarType::ComplexFloat; + + TORCH_CHECK(delta.scalar_type() == input_type); + TORCH_CHECK(B.scalar_type() == (!is_variable_B ? weight_type : input_type)); + TORCH_CHECK(C.scalar_type() == (!is_variable_C ? weight_type : input_type)); + TORCH_CHECK(dout.scalar_type() == input_type); + + TORCH_CHECK(u.is_cuda()); + TORCH_CHECK(delta.is_cuda()); + TORCH_CHECK(A.is_cuda()); + TORCH_CHECK(B.is_cuda()); + TORCH_CHECK(C.is_cuda()); + TORCH_CHECK(dout.is_cuda()); + + TORCH_CHECK(u.stride(-1) == 1 || u.size(-1) == 1); + TORCH_CHECK(delta.stride(-1) == 1 || delta.size(-1) == 1); + TORCH_CHECK(dout.stride(-1) == 1 || dout.size(-1) == 1); + + const auto sizes = u.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int dstate = A.size(1); + const int n_groups = is_variable_B ? B.size(1) : 1; + + TORCH_CHECK(dstate <= 256, "selective_scan only supports state dimension <= 256"); + + CHECK_SHAPE(u, batch_size, dim, seqlen); + CHECK_SHAPE(delta, batch_size, dim, seqlen); + CHECK_SHAPE(A, dim, dstate); + if (!is_variable_B) { + CHECK_SHAPE(B, dim, dstate); + } else { + CHECK_SHAPE(B, batch_size, n_groups, dstate, !is_complex ? seqlen : seqlen * 2); + TORCH_CHECK(B.stride(-1) == 1 || B.size(-1) == 1); + } + if (!is_variable_C) { + CHECK_SHAPE(C, dim, dstate); + } else { + CHECK_SHAPE(C, batch_size, n_groups, dstate, !is_complex ? seqlen: seqlen * 2); + TORCH_CHECK(C.stride(-1) == 1 || C.size(-1) == 1); + } + CHECK_SHAPE(dout, batch_size, dim, seqlen); + + if (D_.has_value()) { + auto D = D_.value(); + TORCH_CHECK(D.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(D.is_cuda()); + TORCH_CHECK(D.stride(-1) == 1 || D.size(-1) == 1); + CHECK_SHAPE(D, dim); + } + + if (delta_bias_.has_value()) { + auto delta_bias = delta_bias_.value(); + TORCH_CHECK(delta_bias.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(delta_bias.is_cuda()); + TORCH_CHECK(delta_bias.stride(-1) == 1 || delta_bias.size(-1) == 1); + CHECK_SHAPE(delta_bias, dim); + } + + at::Tensor z, out, dz, out_z; + const bool has_z = z_.has_value(); + if (has_z) { + z = z_.value(); + TORCH_CHECK(z.scalar_type() == input_type); + TORCH_CHECK(z.is_cuda()); + TORCH_CHECK(z.stride(-1) == 1 || z.size(-1) == 1); + CHECK_SHAPE(z, batch_size, dim, seqlen); + + TORCH_CHECK(out_.has_value()); + out = out_.value(); + TORCH_CHECK(out.scalar_type() == input_type); + TORCH_CHECK(out.is_cuda()); + TORCH_CHECK(out.stride(-1) == 1 || out.size(-1) == 1); + CHECK_SHAPE(out, batch_size, dim, seqlen); + + if (dz_.has_value()) { + dz = dz_.value(); + TORCH_CHECK(dz.scalar_type() == input_type); + TORCH_CHECK(dz.is_cuda()); + TORCH_CHECK(dz.stride(-1) == 1 || dz.size(-1) == 1); + CHECK_SHAPE(dz, batch_size, dim, seqlen); + } else { + dz = torch::empty_like(z); + } + if (recompute_out_z) { + out_z = torch::empty_like(out); + } + } + + const int n_chunks = (seqlen + 2048 - 1) / 2048; + // const int n_chunks = (seqlen + 1024 - 1) / 1024; + if (n_chunks > 1) { TORCH_CHECK(x_.has_value()); } + if (x_.has_value()) { + auto x = x_.value(); + TORCH_CHECK(x.scalar_type() == weight_type); + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(x.is_contiguous()); + CHECK_SHAPE(x, batch_size, dim, n_chunks, 2 * dstate); + } + + at::Tensor du = torch::empty_like(u); + at::Tensor ddelta = torch::empty_like(delta); + at::Tensor dA = torch::zeros_like(A); + at::Tensor dB = !is_variable_B ? torch::zeros_like(B) : torch::zeros_like(B, B.options().dtype(torch::kFloat32)); + at::Tensor dC = !is_variable_C ? torch::zeros_like(C) : torch::zeros_like(C, C.options().dtype(torch::kFloat32)); + at::Tensor dD; + if (D_.has_value()) { dD = torch::zeros_like(D_.value()); } + at::Tensor ddelta_bias; + if (delta_bias_.has_value()) { ddelta_bias = torch::zeros_like(delta_bias_.value()); } + + SSMParamsBwd params; + set_ssm_params_bwd(params, batch_size, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, z, out, out_z, + D_.has_value() ? D_.value().data_ptr() : nullptr, + delta_bias_.has_value() ? delta_bias_.value().data_ptr() : nullptr, + x_.has_value() ? x_.value().data_ptr() : nullptr, + dout, du, ddelta, dA, dB, dC, dz, + D_.has_value() ? dD.data_ptr() : nullptr, + delta_bias_.has_value() ? ddelta_bias.data_ptr() : nullptr, + has_z, delta_softplus, recompute_out_z); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)u.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_bwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_COMPLEX(A.scalar_type(), "selective_scan_bwd", [&] { + selective_scan_bwd_cuda(params, stream); + }); + }); + std::vector result = {du, ddelta, dA, dB.to(B.dtype()), dC.to(C.dtype()), dD, ddelta_bias}; + if (has_z) { result.push_back(dz); } + if (recompute_out_z) { result.push_back(out_z); } + return result; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("fwd", &selective_scan_fwd, "Selective scan forward"); + m.def("bwd", &selective_scan_bwd, "Selective scan backward"); +} diff --git a/mamba/csrc/selective_scan/selective_scan.h b/mamba/csrc/selective_scan/selective_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..e2c7bcdbd5ddadc5975caa641ecb1dcd3b73dafd --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +struct SSMScanParamsBase { + using index_t = uint32_t; + + int batch, seqlen, n_chunks; + index_t a_batch_stride; + index_t b_batch_stride; + index_t out_batch_stride; + + // Common data pointers. + void *__restrict__ a_ptr; + void *__restrict__ b_ptr; + void *__restrict__ out_ptr; + void *__restrict__ x_ptr; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +struct SSMParamsBase { + using index_t = uint32_t; + + int batch, dim, seqlen, dstate, n_groups, n_chunks; + int dim_ngroups_ratio; + bool is_variable_B; + bool is_variable_C; + + bool delta_softplus; + + index_t A_d_stride; + index_t A_dstate_stride; + index_t B_batch_stride; + index_t B_d_stride; + index_t B_dstate_stride; + index_t B_group_stride; + index_t C_batch_stride; + index_t C_d_stride; + index_t C_dstate_stride; + index_t C_group_stride; + index_t u_batch_stride; + index_t u_d_stride; + index_t delta_batch_stride; + index_t delta_d_stride; + index_t z_batch_stride; + index_t z_d_stride; + index_t out_batch_stride; + index_t out_d_stride; + index_t out_z_batch_stride; + index_t out_z_d_stride; + + // Common data pointers. + void *__restrict__ A_ptr; + void *__restrict__ B_ptr; + void *__restrict__ C_ptr; + void *__restrict__ D_ptr; + void *__restrict__ u_ptr; + void *__restrict__ delta_ptr; + void *__restrict__ delta_bias_ptr; + void *__restrict__ out_ptr; + void *__restrict__ x_ptr; + void *__restrict__ z_ptr; + void *__restrict__ out_z_ptr; +}; + +struct SSMParamsBwd: public SSMParamsBase { + index_t dout_batch_stride; + index_t dout_d_stride; + index_t dA_d_stride; + index_t dA_dstate_stride; + index_t dB_batch_stride; + index_t dB_group_stride; + index_t dB_d_stride; + index_t dB_dstate_stride; + index_t dC_batch_stride; + index_t dC_group_stride; + index_t dC_d_stride; + index_t dC_dstate_stride; + index_t du_batch_stride; + index_t du_d_stride; + index_t dz_batch_stride; + index_t dz_d_stride; + index_t ddelta_batch_stride; + index_t ddelta_d_stride; + + // Common data pointers. + void *__restrict__ dout_ptr; + void *__restrict__ dA_ptr; + void *__restrict__ dB_ptr; + void *__restrict__ dC_ptr; + void *__restrict__ dD_ptr; + void *__restrict__ du_ptr; + void *__restrict__ dz_ptr; + void *__restrict__ ddelta_ptr; + void *__restrict__ ddelta_bias_ptr; +}; diff --git a/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu b/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..c55f0e858af4ebd246a5d251308ab920b4e01a50 --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu b/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..72adaf5cb13c6429e2f345a0a823c6bc3722b95a --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu b/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..df126d7c8d5f9f0862273d2fe21ea15b35757b64 --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu b/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..3ff271b50eaff208ae33c16c87ab7aaee76dfd76 --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu b/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..5554902342785b289b81c060a71a51734fc1e6bf --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu b/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..a7ed642231da80c455c0499702cc8a1cb4536ec2 --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh b/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c720ba28c0c89937128c3d3517e115a1f4f2fc43 --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh @@ -0,0 +1,561 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK +#include // For atomicAdd on complex + +#ifndef USE_ROCM + #include + #include + #include + #include +#else + #include + namespace cub = hipcub; +#endif + +#include "selective_scan.h" +#include "selective_scan_common.h" +#include "reverse_scan.cuh" +#include "static_switch.h" + +template __device__ __forceinline__ scalar_t conj(scalar_t x); +template<> __device__ __forceinline__ float conj(float x) { return x; } +template<> __device__ __forceinline__ complex_t conj(complex_t x) { return std::conj(x); } + +template +struct Selective_Scan_bwd_kernel_traits { + static_assert(kNItems_ % 4 == 0); + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kNItems = kNItems_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : constexpr_min(8, kNItems); + static_assert(kNItems % kNElts == 0); + static constexpr int kNLoads = kNItems / kNElts; + static constexpr bool kIsComplex = std::is_same_v; + static constexpr bool kIsEvenLen = kIsEvenLen_; + static constexpr bool kIsVariableB = kIsVariableB_; + static constexpr bool kIsVariableC = kIsVariableC_; + static constexpr bool kDeltaSoftplus = kDeltaSoftplus_; + static constexpr bool kHasZ = kHasZ_; + // Setting MinBlocksPerMP to be 3 (instead of 2) for 128 threads with float improves occupancy. + // For complex this would lead to massive register spilling, so we keep it at 2. + static constexpr int kMinBlocks = kNThreads == 128 && !kIsComplex ? 3 : 2; + using vec_t = typename BytesToType::Type; + using scan_t = std::conditional_t; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockLoadWeightT = cub::BlockLoad; + using BlockLoadWeightVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + // using BlockScanT = cub::BlockScan; + using BlockScanT = cub::BlockScan; + // using BlockScanT = cub::BlockScan; + using BlockReverseScanT = BlockReverseScan; + using BlockReduceT = cub::BlockReduce; + using BlockReduceFloatT = cub::BlockReduce; + using BlockReduceComplexT = cub::BlockReduce; + using BlockExchangeT = cub::BlockExchange; + + static constexpr int kSmemIOSize = custom_max({sizeof(typename BlockLoadT::TempStorage), + sizeof(typename BlockLoadVecT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightVecT::TempStorage), + sizeof(typename BlockStoreT::TempStorage), + sizeof(typename BlockStoreVecT::TempStorage)}); + static constexpr int kSmemExchangeSize = (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockExchangeT::TempStorage); + static constexpr int kSmemReduceSize = sizeof(typename BlockReduceT::TempStorage); + static constexpr int kSmemSize = kSmemIOSize + kSmemExchangeSize + kSmemReduceSize + sizeof(typename BlockScanT::TempStorage) + sizeof(typename BlockReverseScanT::TempStorage); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads, Ktraits::kMinBlocks) +void selective_scan_bwd_kernel(SSMParamsBwd params) { + constexpr bool kIsComplex = Ktraits::kIsComplex; + constexpr bool kIsVariableB = Ktraits::kIsVariableB; + constexpr bool kIsVariableC = Ktraits::kIsVariableC; + constexpr bool kDeltaSoftplus = Ktraits::kDeltaSoftplus; + constexpr bool kHasZ = Ktraits::kHasZ; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNItems = Ktraits::kNItems; + using input_t = typename Ktraits::input_t; + using weight_t = typename Ktraits::weight_t; + using scan_t = typename Ktraits::scan_t; + + // Shared memory. + extern __shared__ char smem_[]; + // cast to lvalue reference of expected type + // char *smem_loadstorescan = smem_ + 2 * MAX_DSTATE * sizeof(weight_t); + // auto& smem_load = reinterpret_cast(smem_ + 2 * MAX_DSTATE * sizeof(weight_t)); + // auto& smem_load = reinterpret_cast(smem_loadstorescan); + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_weight = reinterpret_cast(smem_); + auto& smem_load_weight1 = *reinterpret_cast(smem_ + sizeof(typename Ktraits::BlockLoadWeightT::TempStorage)); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_exchange = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + auto& smem_exchange1 = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize + sizeof(typename Ktraits::BlockExchangeT::TempStorage)); + auto& smem_reduce = *reinterpret_cast(reinterpret_cast(&smem_exchange) + Ktraits::kSmemExchangeSize); + auto& smem_reduce_float = *reinterpret_cast(&smem_reduce); + auto& smem_reduce_complex = *reinterpret_cast(&smem_reduce); + auto& smem_scan = *reinterpret_cast(reinterpret_cast(&smem_reduce) + Ktraits::kSmemReduceSize); + auto& smem_reverse_scan = *reinterpret_cast(reinterpret_cast(&smem_scan) + sizeof(typename Ktraits::BlockScanT::TempStorage)); + weight_t *smem_delta_a = reinterpret_cast(smem_ + Ktraits::kSmemSize); + scan_t *smem_running_postfix = reinterpret_cast(smem_delta_a + 2 * MAX_DSTATE + kNThreads); + weight_t *smem_da = reinterpret_cast(smem_running_postfix + MAX_DSTATE); + weight_t *smem_dbc = reinterpret_cast(smem_da + MAX_DSTATE); + + const int batch_id = blockIdx.x; + const int dim_id = blockIdx.y; + const int group_id = dim_id / (params.dim_ngroups_ratio); + input_t *u = reinterpret_cast(params.u_ptr) + batch_id * params.u_batch_stride + + dim_id * params.u_d_stride; + input_t *delta = reinterpret_cast(params.delta_ptr) + batch_id * params.delta_batch_stride + + dim_id * params.delta_d_stride; + input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride + + dim_id * params.dout_d_stride; + weight_t *A = reinterpret_cast(params.A_ptr) + dim_id * params.A_d_stride; + weight_t *B = reinterpret_cast(params.B_ptr) + dim_id * params.B_d_stride; + input_t *Bvar = reinterpret_cast(params.B_ptr) + batch_id * params.B_batch_stride + group_id * params.B_group_stride; + weight_t *C = reinterpret_cast(params.C_ptr) + dim_id * params.C_d_stride; + input_t *Cvar = reinterpret_cast(params.C_ptr) + batch_id * params.C_batch_stride + group_id * params.C_group_stride; + weight_t *dA = reinterpret_cast(params.dA_ptr) + dim_id * params.dA_d_stride; + weight_t *dB = reinterpret_cast(params.dB_ptr) + + (!kIsVariableB ? dim_id * params.dB_d_stride : batch_id * (!kIsComplex ? params.dB_batch_stride : params.dB_batch_stride / 2) + group_id * params.dB_group_stride); + weight_t *dC = reinterpret_cast(params.dC_ptr) + + (!kIsVariableC ? dim_id * params.dC_d_stride : batch_id * (!kIsComplex ? params.dC_batch_stride : params.dC_batch_stride / 2) + group_id * params.dC_group_stride); + float *dD = params.dD_ptr == nullptr ? nullptr : reinterpret_cast(params.dD_ptr) + dim_id; + float D_val = params.D_ptr == nullptr ? 0 : reinterpret_cast(params.D_ptr)[dim_id]; + float *ddelta_bias = params.ddelta_bias_ptr == nullptr ? nullptr : reinterpret_cast(params.ddelta_bias_ptr) + dim_id; + float delta_bias = params.delta_bias_ptr == nullptr ? 0 : reinterpret_cast(params.delta_bias_ptr)[dim_id]; + scan_t *x = params.x_ptr == nullptr + ? nullptr + : reinterpret_cast(params.x_ptr) + (batch_id * params.dim + dim_id) * (params.n_chunks) * params.dstate; + float dD_val = 0; + float ddelta_bias_val = 0; + + constexpr int kChunkSize = kNThreads * kNItems; + u += (params.n_chunks - 1) * kChunkSize; + delta += (params.n_chunks - 1) * kChunkSize; + dout += (params.n_chunks - 1) * kChunkSize; + Bvar += (params.n_chunks - 1) * kChunkSize * (!kIsComplex ? 1 : 2); + Cvar += (params.n_chunks - 1) * kChunkSize * (!kIsComplex ? 1 : 2); + for (int chunk = params.n_chunks - 1; chunk >= 0; --chunk) { + input_t u_vals[kNItems]; + input_t delta_vals_load[kNItems]; + input_t dout_vals_load[kNItems]; + __syncthreads(); + load_input(u, u_vals, smem_load, params.seqlen - chunk * kChunkSize); + u -= kChunkSize; + __syncthreads(); + load_input(delta, delta_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + // Will reload delta at the same location if kDeltaSoftplus + if constexpr (!kDeltaSoftplus) { delta -= kChunkSize; } + __syncthreads(); + load_input(dout, dout_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + dout -= kChunkSize; + + float dout_vals[kNItems], delta_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dout_vals[i] = float(dout_vals_load[i]); + delta_vals[i] = float(delta_vals_load[i]) + delta_bias; + if constexpr (kDeltaSoftplus) { + delta_vals[i] = delta_vals[i] <= 20.f ? log1pf(expf(delta_vals[i])) : delta_vals[i]; + } + } + + if constexpr (kHasZ) { + input_t *z = reinterpret_cast(params.z_ptr) + batch_id * params.z_batch_stride + + dim_id * params.z_d_stride + chunk * kChunkSize; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + dim_id * params.out_d_stride + chunk * kChunkSize; + input_t *dz = reinterpret_cast(params.dz_ptr) + batch_id * params.dz_batch_stride + + dim_id * params.dz_d_stride + chunk * kChunkSize; + input_t z_vals[kNItems], out_vals[kNItems]; + __syncthreads(); + load_input(z, z_vals, smem_load, params.seqlen - chunk * kChunkSize); + __syncthreads(); + load_input(out, out_vals, smem_load, params.seqlen - chunk * kChunkSize); + float dz_vals[kNItems], z_silu_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float z_val = z_vals[i]; + float z_sigmoid_val = 1.0f / (1.0f + expf(-z_val)); + z_silu_vals[i] = z_val * z_sigmoid_val; + dz_vals[i] = dout_vals[i] * float(out_vals[i]) * z_sigmoid_val + * (1.0f + z_val * (1.0f - z_sigmoid_val)); + dout_vals[i] *= z_silu_vals[i]; + } + __syncthreads(); + store_output(dz, dz_vals, smem_store, params.seqlen - chunk * kChunkSize); + if (params.out_z_ptr != nullptr) { // Recompute and store out_z + float out_z_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { out_z_vals[i] = float(out_vals[i]) * z_silu_vals[i]; } + // if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0) { + // printf("out_val=%f, z_silu_val = %f, out_z_val = %f\n", float(out_vals[0]), z_silu_vals[0], out_z_vals[0]); + // } + input_t *out_z = reinterpret_cast(params.out_z_ptr) + batch_id * params.out_z_batch_stride + + dim_id * params.out_z_d_stride + chunk * kChunkSize; + __syncthreads(); + store_output(out_z, out_z_vals, smem_store, params.seqlen - chunk * kChunkSize); + } + } + + float du_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { du_vals[i] = D_val * dout_vals[i]; } + #pragma unroll + for (int i = 0; i < kNItems; ++i) { dD_val += dout_vals[i] * float(u_vals[i]); } + + float ddelta_vals[kNItems] = {0}; + __syncthreads(); + for (int state_idx = 0; state_idx < params.dstate; ++state_idx) { + const weight_t A_val = A[state_idx * params.A_dstate_stride]; + // Multiply the real part of A with LOG2E so we can use exp2f instead of expf. + weight_t A_scaled; + constexpr float kLog2e = M_LOG2E; + if constexpr (!kIsComplex) { + A_scaled = A_val * kLog2e; + } else { + A_scaled = complex_t(A_val.real_ * kLog2e, A_val.imag_); + } + weight_t B_val, C_val; + weight_t B_vals[kNItems], C_vals[kNItems]; + if constexpr (!kIsVariableB) { + B_val = B[state_idx * params.B_dstate_stride]; + } else { + load_weight(Bvar + state_idx * params.B_dstate_stride, B_vals, + smem_load_weight, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + } + if constexpr (!kIsVariableC) { + C_val = C[state_idx * params.C_dstate_stride]; + } else { + auto &smem_load_weight_C = !kIsVariableB ? smem_load_weight : smem_load_weight1; + load_weight(Cvar + state_idx * params.C_dstate_stride, C_vals, + smem_load_weight_C, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + } + // const weight_t A_val = smem_a[state_idx]; + scan_t thread_data[kNItems], thread_reverse_data[kNItems]; + if constexpr (!kIsComplex) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + const float delta_a_exp = exp2f(delta_vals[i] * A_scaled); + thread_data[i] = make_float2(delta_a_exp, !kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i]); + if (i == 0) { + smem_delta_a[threadIdx.x == 0 ? state_idx + (chunk % 2) * MAX_DSTATE : threadIdx.x + 2 * MAX_DSTATE] = delta_a_exp; + } else { + thread_reverse_data[i - 1].x = delta_a_exp; + } + thread_reverse_data[i].y = dout_vals[i] * + (!kIsVariableC + ? (!kIsVariableB ? B_val * C_val : C_val) + : (!kIsVariableB ? B_val * C_vals[i] : C_vals[i])); + } + __syncthreads(); + thread_reverse_data[kNItems - 1].x = threadIdx.x == kNThreads - 1 + ? (chunk == params.n_chunks - 1 ? 1.f : smem_delta_a[state_idx + ((chunk + 1) % 2) * MAX_DSTATE]) + : smem_delta_a[threadIdx.x + 1 + 2 * MAX_DSTATE]; + // Initialize running total + scan_t running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? x[(chunk - 1) * params.dstate + state_idx] : make_float2(1.f, 0.f); + SSMScanPrefixCallbackOp prefix_op(running_prefix); + typename Ktraits::BlockScanT(smem_scan).InclusiveScan( + thread_data, thread_data, SSMScanOp(), prefix_op + ); + scan_t running_postfix = chunk < params.n_chunks - 1 && threadIdx.x % 32 == 0 ? smem_running_postfix[state_idx] : make_float2(1.f, 0.f); + SSMScanPrefixCallbackOp postfix_op(running_postfix); + typename Ktraits::BlockReverseScanT(smem_reverse_scan).InclusiveReverseScan( + thread_reverse_data, thread_reverse_data, SSMScanOp(), postfix_op + ); + if (threadIdx.x == 0) { smem_running_postfix[state_idx] = postfix_op.running_prefix; } + weight_t dA_val = 0, dBC_val = 0; + weight_t dB_vals[kNItems], dC_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + const float dx = thread_reverse_data[i].y; + const float ddelta_u = !kIsVariableB ? dx : dx * B_vals[i]; + du_vals[i] += ddelta_u * delta_vals[i]; + const float a = thread_data[i].y - (!kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i]); + ddelta_vals[i] += ddelta_u * float(u_vals[i]) + dx * A_val * a; + dA_val += dx * delta_vals[i] * a; + if constexpr (!kIsVariableB || !kIsVariableC) { + if constexpr (!kIsVariableB) { // dBC_val is dB_val + dBC_val += dout_vals[i] * (!kIsVariableC ? thread_data[i].y : thread_data[i].y * C_vals[i]); + } else { // dBC_val is dC_val + dBC_val += dout_vals[i] * thread_data[i].y; + } + } + if constexpr (kIsVariableB) { dB_vals[i] = dx * delta_vals[i] * float(u_vals[i]); } + if constexpr (kIsVariableC) { + dC_vals[i] = dout_vals[i] * (!kIsVariableB ? thread_data[i].y * B_val : thread_data[i].y); + } + } + // Block-exchange to make the atomicAdd's coalesced, otherwise they're much slower + if constexpr (kIsVariableB || kIsVariableC) { + if constexpr (kIsVariableB) { + typename Ktraits::BlockExchangeT(smem_exchange).BlockedToStriped(dB_vals, dB_vals); + } + if constexpr (kIsVariableC) { + auto &smem_exchange_C = !kIsVariableB ? smem_exchange : smem_exchange1; + typename Ktraits::BlockExchangeT(smem_exchange_C).BlockedToStriped(dC_vals, dC_vals); + } + const int seqlen_remaining = params.seqlen - chunk * kChunkSize - threadIdx.x; + weight_t *dB_cur = dB + state_idx * params.dB_dstate_stride + chunk * kChunkSize + threadIdx.x; + weight_t *dC_cur = dC + state_idx * params.dC_dstate_stride + chunk * kChunkSize + threadIdx.x; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + if (i * kNThreads < seqlen_remaining) { + if constexpr (kIsVariableB) { gpuAtomicAdd(dB_cur + i * kNThreads, dB_vals[i]); } + if constexpr (kIsVariableC) { gpuAtomicAdd(dC_cur + i * kNThreads, dC_vals[i]); } + } + } + } + if constexpr (!kIsVariableB || !kIsVariableC) { + float2 dA_dBC_val = make_float2(dA_val, dBC_val); + dA_dBC_val = typename Ktraits::BlockReduceT(smem_reduce).Sum(dA_dBC_val); + dA_val = dA_dBC_val.x; + if (threadIdx.x == 0) { + smem_dbc[state_idx] = chunk == params.n_chunks - 1 ? dA_dBC_val.y : dA_dBC_val.y + smem_dbc[state_idx]; + } + } else { + dA_val = typename Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dA_val); + } + if (threadIdx.x == 0) { + smem_da[state_idx] = chunk == params.n_chunks - 1 ? dA_val : dA_val + smem_da[state_idx]; + } + } else { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + // Pytorch's implementation of complex exp (which calls thrust) is very slow + complex_t delta_a_exp = cexp2f(delta_vals[i] * A_scaled); + weight_t B_delta_u_val = !kIsVariableB ? delta_vals[i] * float(u_vals[i]) : B_vals[i] * delta_vals[i] * float(u_vals[i]); + thread_data[i] = make_float4(delta_a_exp.real_, delta_a_exp.imag_, B_delta_u_val.real_, B_delta_u_val.imag_); + if (i == 0) { + smem_delta_a[threadIdx.x == 0 ? state_idx + (chunk % 2) * MAX_DSTATE : threadIdx.x + 2 * MAX_DSTATE] = delta_a_exp; + } else { + thread_reverse_data[i - 1].x = delta_a_exp.real_; + thread_reverse_data[i - 1].y = -delta_a_exp.imag_; + } + complex_t dout_BC = 2 * dout_vals[i] + * conj(!kIsVariableC + ? (!kIsVariableB ? B_val * C_val : C_val) + : (!kIsVariableB ? B_val * C_vals[i] : C_vals[i])); + thread_reverse_data[i].z = dout_BC.real_; + thread_reverse_data[i].w = dout_BC.imag_; + } + __syncthreads(); + complex_t delta_a_exp = threadIdx.x == kNThreads - 1 + ? (chunk == params.n_chunks - 1 ? 1.f : smem_delta_a[state_idx + ((chunk + 1) % 2) * MAX_DSTATE]) + : smem_delta_a[threadIdx.x + 1 + 2 * MAX_DSTATE]; + thread_reverse_data[kNItems - 1].x = delta_a_exp.real_; + thread_reverse_data[kNItems - 1].y = -delta_a_exp.imag_; + // Initialize running total + scan_t running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? x[(chunk - 1) * params.dstate + state_idx] : make_float4(1.f, 0.f, 0.f, 0.f); + SSMScanPrefixCallbackOp prefix_op(running_prefix); + typename Ktraits::BlockScanT(smem_scan).InclusiveScan( + thread_data, thread_data, SSMScanOp(), prefix_op + ); + scan_t running_postfix = chunk < params.n_chunks - 1 && threadIdx.x % 32 == 0 ? smem_running_postfix[state_idx] : make_float4(1.f, 0.f, 0.f, 0.f); + SSMScanPrefixCallbackOp postfix_op(running_postfix); + typename Ktraits::BlockReverseScanT(smem_reverse_scan).InclusiveReverseScan( + thread_reverse_data, thread_reverse_data, SSMScanOp(), postfix_op + ); + if (threadIdx.x == 0) { smem_running_postfix[state_idx] = postfix_op.running_prefix; } + weight_t dA_val = 0, dBC_val = 0; + weight_t dB_vals[kNItems], dC_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + complex_t x = complex_t(thread_data[i].z, thread_data[i].w); + complex_t dx = complex_t(thread_reverse_data[i].z, thread_reverse_data[i].w); + float ddelta_u = !kIsVariableB ? dx.real_ : (dx * conj(B_vals[i])).real_; + if constexpr (!kIsVariableB || !kIsVariableC) { + if constexpr (!kIsVariableB) { // dBC_val is dB_val + dBC_val += (2 * dout_vals[i]) * conj(!kIsVariableC ? x : x * C_vals[i]); + } else { // dBC_val is dC_val + dBC_val += (2 * dout_vals[i]) * conj(x); + } + } + const complex_t a_conj = conj(x - (!kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i])); + du_vals[i] += ddelta_u * delta_vals[i]; + ddelta_vals[i] += ddelta_u * float(u_vals[i]) + (dx * conj(A_val) * a_conj).real_; + dA_val += delta_vals[i] * dx * a_conj; + if constexpr (kIsVariableB) { dB_vals[i] = dx * delta_vals[i] * float(u_vals[i]); } + if constexpr (kIsVariableC) { + dC_vals[i] = (2 * dout_vals[i]) * conj(!kIsVariableB ? x * B_val : x); + } + } + // Block-exchange to make the atomicAdd's coalesced, otherwise they're much slower + if constexpr (kIsVariableB || kIsVariableC) { + float dB_vals_f[kNItems * 2], dC_vals_f[kNItems * 2]; + if constexpr (kIsVariableB) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dB_vals_f[i * 2] = dB_vals[i].real_; + dB_vals_f[i * 2 + 1] = dB_vals[i].imag_; + } + typename Ktraits::BlockExchangeT(smem_exchange).BlockedToStriped(dB_vals_f, dB_vals_f); + } + if constexpr (kIsVariableC) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dC_vals_f[i * 2] = dC_vals[i].real_; + dC_vals_f[i * 2 + 1] = dC_vals[i].imag_; + } + auto &smem_exchange_C = !kIsVariableB ? smem_exchange : smem_exchange1; + typename Ktraits::BlockExchangeT(smem_exchange_C).BlockedToStriped(dC_vals_f, dC_vals_f); + } + const int seqlen_remaining = (params.seqlen - chunk * kChunkSize) * 2 - threadIdx.x; + float *dB_cur = reinterpret_cast(dB) + state_idx * params.dB_dstate_stride + chunk * kChunkSize * 2 + threadIdx.x; + float *dC_cur = reinterpret_cast(dC) + state_idx * params.dC_dstate_stride + chunk * kChunkSize * 2 + threadIdx.x; + #pragma unroll + for (int i = 0; i < kNItems * 2; ++i) { + if (i * kNThreads < seqlen_remaining) { + if constexpr (kIsVariableB) { gpuAtomicAdd(dB_cur + i * kNThreads, dB_vals_f[i]); } + if constexpr (kIsVariableC) { gpuAtomicAdd(dC_cur + i * kNThreads, dC_vals_f[i]); } + } + } + } + if constexpr (!kIsVariableB || !kIsVariableC) { + float4 dA_dBC_val = make_float4(dA_val.real_, dA_val.imag_, dBC_val.real_, dBC_val.imag_); + dA_dBC_val = typename Ktraits::BlockReduceT(smem_reduce).Sum(dA_dBC_val); + dA_val = complex_t(dA_dBC_val.x, dA_dBC_val.y); + dBC_val = complex_t(dA_dBC_val.z, dA_dBC_val.w); + if (threadIdx.x == 0) { + smem_dbc[state_idx] = chunk == params.n_chunks - 1 ? dBC_val : dBC_val + smem_dbc[state_idx]; + } + } else { + dA_val = typename Ktraits::BlockReduceComplexT(smem_reduce_complex).Sum(dA_val); + } + if (threadIdx.x == 0) { + smem_da[state_idx] = chunk == params.n_chunks - 1 ? dA_val : dA_val + smem_da[state_idx]; + } + } + } + + if constexpr (kDeltaSoftplus) { + __syncthreads(); + input_t delta_vals_load[kNItems]; + load_input(delta, delta_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + delta -= kChunkSize; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float delta_val = float(delta_vals_load[i]) + delta_bias; + float delta_val_neg_exp = expf(-delta_val); + ddelta_vals[i] = delta_val <= 20.f + ? ddelta_vals[i] / (1.f + delta_val_neg_exp) + : ddelta_vals[i]; + } + } + for (int i = 0; i < kNItems; ++i) { ddelta_bias_val += ddelta_vals[i]; } + + input_t *du = reinterpret_cast(params.du_ptr) + batch_id * params.du_batch_stride + + dim_id * params.du_d_stride + chunk * kChunkSize; + input_t *ddelta = reinterpret_cast(params.ddelta_ptr) + batch_id * params.ddelta_batch_stride + + dim_id * params.ddelta_d_stride + chunk * kChunkSize; + __syncthreads(); + store_output(du, du_vals, smem_store, params.seqlen - chunk * kChunkSize); + __syncthreads(); + store_output(ddelta, ddelta_vals, smem_store, params.seqlen - chunk * kChunkSize); + + Bvar -= kChunkSize * (!kIsComplex ? 1 : 2); + Cvar -= kChunkSize * (!kIsComplex ? 1 : 2); + } + if (params.dD_ptr != nullptr) { + dD_val = typename Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dD_val); + if (threadIdx.x == 0) { gpuAtomicAdd(dD, dD_val); } + } + if (params.ddelta_bias_ptr != nullptr) { + __syncthreads(); + ddelta_bias_val = typename Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(ddelta_bias_val); + if (threadIdx.x == 0) { gpuAtomicAdd(ddelta_bias, ddelta_bias_val); } + } + for (int state_idx = threadIdx.x; state_idx < params.dstate; state_idx += blockDim.x) { + gpuAtomicAdd(&(dA[state_idx * params.dA_dstate_stride]), smem_da[state_idx]); + weight_t dBC_val; + if (!kIsVariableB || !kIsVariableC) { dBC_val = smem_dbc[state_idx]; } + if constexpr (!kIsVariableB) { + gpuAtomicAdd(&(dB[state_idx * params.dB_dstate_stride]), + !kIsVariableC ? dBC_val * conj(C[state_idx * params.C_dstate_stride]) : dBC_val); + } + if constexpr (!kIsVariableC) { + gpuAtomicAdd(&(dC[state_idx * params.dC_dstate_stride]), + !kIsVariableB ? dBC_val * conj(B[state_idx * params.B_dstate_stride]) : dBC_val); + } + } +} + +template +void selective_scan_bwd_launch(SSMParamsBwd ¶ms, cudaStream_t stream) { + BOOL_SWITCH(params.seqlen % (kNThreads * kNItems) == 0, kIsEvenLen, [&] { + BOOL_SWITCH(params.is_variable_B, kIsVariableB, [&] { + BOOL_SWITCH(params.is_variable_C, kIsVariableC, [&] { + BOOL_SWITCH(params.delta_softplus, kDeltaSoftplus, [&] { + BOOL_SWITCH(params.z_ptr != nullptr , kHasZ, [&] { + using Ktraits = Selective_Scan_bwd_kernel_traits; + // using Ktraits = Selective_Scan_bwd_kernel_traits; + // TODO: check this + constexpr int kSmemSize = Ktraits::kSmemSize + MAX_DSTATE * sizeof(typename Ktraits::scan_t) + (kNThreads + 4 * MAX_DSTATE) * sizeof(typename Ktraits::weight_t); + + dim3 grid(params.batch, params.dim); + + auto kernel = &selective_scan_bwd_kernel; + + if (kSmemSize >= 48 * 1024) { + + #ifndef USE_ROCM + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + #else + C10_CUDA_CHECK(cudaFuncSetAttribute( + (void *) kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + std::cerr << "Warning (selective_scan_bwd_kernel): attempting to set maxDynamicSharedMemorySize on an AMD GPU which is currently a non-op (in ROCm versions <= 6.1). This might lead to undefined behavior. \n" << std::endl; + #endif + + } + + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + }); + }); + }); +} + +template +void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream) { + + #ifndef USE_ROCM + if (params.seqlen <= 128) { + selective_scan_bwd_launch<32, 4, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 256) { + selective_scan_bwd_launch<32, 8, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 512) { + selective_scan_bwd_launch<32, 16, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 1024) { + selective_scan_bwd_launch<64, 16, input_t, weight_t>(params, stream); + } else { + selective_scan_bwd_launch<128, 16, input_t, weight_t>(params, stream); + } + #else + if (params.seqlen <= 256) { + selective_scan_bwd_launch<64, 4, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 512) { + selective_scan_bwd_launch<64, 8, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 1024) { + selective_scan_bwd_launch<64, 16, input_t, weight_t>(params, stream); + } else { + selective_scan_bwd_launch<128, 16, input_t, weight_t>(params, stream); + } + #endif +} \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_common.h b/mamba/csrc/selective_scan/selective_scan_common.h new file mode 100644 index 0000000000000000000000000000000000000000..91328e913ae816c1dd718fce6adcdfcf5cff8437 --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_common.h @@ -0,0 +1,255 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#ifndef USE_ROCM + #include +#else + #include +#endif +#include +#include // For scalar_value_type + + +#ifndef USE_ROCM + + constexpr size_t custom_max(std::initializer_list ilist) + { + return std::max(ilist); + } + + template + constexpr T constexpr_min(T a, T b) { + return std::min(a, b); + } + +#else + constexpr size_t custom_max(std::initializer_list ilist) + { + return *std::max_element(ilist.begin(), ilist.end()); + } + + template + constexpr T constexpr_min(T a, T b) { + return a < b ? a : b; + } +#endif + + +#define MAX_DSTATE 256 + +using complex_t = c10::complex; + +inline __device__ float2 operator+(const float2 & a, const float2 & b){ + return {a.x + b.x, a.y + b.y}; +} + +inline __device__ float3 operator+(const float3 &a, const float3 &b) { + return {a.x + b.x, a.y + b.y, a.z + b.z}; +} + +inline __device__ float4 operator+(const float4 & a, const float4 & b){ + return {a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w}; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template struct BytesToType {}; + +template<> struct BytesToType<16> { + using Type = uint4; + static_assert(sizeof(Type) == 16); +}; + +template<> struct BytesToType<8> { + using Type = uint64_t; + static_assert(sizeof(Type) == 8); +}; + +template<> struct BytesToType<4> { + using Type = uint32_t; + static_assert(sizeof(Type) == 4); +}; + +template<> struct BytesToType<2> { + using Type = uint16_t; + static_assert(sizeof(Type) == 2); +}; + +template<> struct BytesToType<1> { + using Type = uint8_t; + static_assert(sizeof(Type) == 1); +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct Converter{ + static inline __device__ void to_float(const scalar_t (&src)[N], float (&dst)[N]) { + #pragma unroll + for (int i = 0; i < N; ++i) { dst[i] = src[i]; } + } +}; + +template +struct Converter{ + static inline __device__ void to_float(const at::Half (&src)[N], float (&dst)[N]) { + static_assert(N % 2 == 0); + auto &src2 = reinterpret_cast(src); + auto &dst2 = reinterpret_cast(dst); + #pragma unroll + for (int i = 0; i < N / 2; ++i) { dst2[i] = __half22float2(src2[i]); } + } +}; + +#if __CUDA_ARCH__ >= 800 +template +struct Converter{ + static inline __device__ void to_float(const at::BFloat16 (&src)[N], float (&dst)[N]) { + static_assert(N % 2 == 0); + auto &src2 = reinterpret_cast(src); + auto &dst2 = reinterpret_cast(dst); + #pragma unroll + for (int i = 0; i < N / 2; ++i) { dst2[i] = __bfloat1622float2(src2[i]); } + } +}; +#endif + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +// From https://stackoverflow.com/questions/9860711/cucomplex-h-and-exp +// and https://forums.developer.nvidia.com/t/complex-number-exponential-function/24696 +__device__ __forceinline__ complex_t cexp2f(complex_t z) { + float t = exp2f(z.real_); + float c, s; + sincosf(z.imag_, &s, &c); + return complex_t(c * t, s * t); +} + +__device__ __forceinline__ complex_t cexpf(complex_t z) { + float t = expf(z.real_); + float c, s; + sincosf(z.imag_, &s, &c); + return complex_t(c * t, s * t); +} + +template struct SSMScanOp; + +template<> +struct SSMScanOp { + __device__ __forceinline__ float2 operator()(const float2 &ab0, const float2 &ab1) const { + return make_float2(ab1.x * ab0.x, ab1.x * ab0.y + ab1.y); + } +}; + +template<> +struct SSMScanOp { + __device__ __forceinline__ float4 operator()(const float4 &ab0, const float4 &ab1) const { + complex_t a0 = complex_t(ab0.x, ab0.y); + complex_t b0 = complex_t(ab0.z, ab0.w); + complex_t a1 = complex_t(ab1.x, ab1.y); + complex_t b1 = complex_t(ab1.z, ab1.w); + complex_t out_a = a1 * a0; + complex_t out_b = a1 * b0 + b1; + return make_float4(out_a.real_, out_a.imag_, out_b.real_, out_b.imag_); + } +}; + +// A stateful callback functor that maintains a running prefix to be applied +// during consecutive scan operations. +template struct SSMScanPrefixCallbackOp { + using scan_t = std::conditional_t, float2, float4>; + scan_t running_prefix; + // Constructor + __device__ SSMScanPrefixCallbackOp(scan_t running_prefix_) : running_prefix(running_prefix_) {} + // Callback operator to be entered by the first warp of threads in the block. + // Thread-0 is responsible for returning a value for seeding the block-wide scan. + __device__ scan_t operator()(scan_t block_aggregate) { + scan_t old_prefix = running_prefix; + running_prefix = SSMScanOp()(running_prefix, block_aggregate); + return old_prefix; + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template +inline __device__ void load_input(typename Ktraits::input_t *u, + typename Ktraits::input_t (&u_vals)[Ktraits::kNItems], + typename Ktraits::BlockLoadT::TempStorage &smem_load, + int seqlen) { + if constexpr (Ktraits::kIsEvenLen) { + auto& smem_load_vec = reinterpret_cast(smem_load); + using vec_t = typename Ktraits::vec_t; + typename Ktraits::BlockLoadVecT(smem_load_vec).Load( + reinterpret_cast(u), + reinterpret_cast(u_vals) + #ifdef USE_ROCM + , Ktraits::kNThreads * Ktraits::kNLoads + #endif + + ); + } else { + typename Ktraits::BlockLoadT(smem_load).Load(u, u_vals, seqlen, 0.f); + } +} + +template +inline __device__ void load_weight(typename Ktraits::input_t *Bvar, + typename Ktraits::weight_t (&B_vals)[Ktraits::kNItems], + typename Ktraits::BlockLoadWeightT::TempStorage &smem_load_weight, + int seqlen) { + constexpr int kNItems = Ktraits::kNItems; + if constexpr (!Ktraits::kIsComplex) { + typename Ktraits::input_t B_vals_load[kNItems]; + if constexpr (Ktraits::kIsEvenLen) { + auto& smem_load_weight_vec = reinterpret_cast(smem_load_weight); + using vec_t = typename Ktraits::vec_t; + typename Ktraits::BlockLoadWeightVecT(smem_load_weight_vec).Load( + reinterpret_cast(Bvar), + reinterpret_cast(B_vals_load) + ); + } else { + typename Ktraits::BlockLoadWeightT(smem_load_weight).Load(Bvar, B_vals_load, seqlen, 0.f); + } + // #pragma unroll + // for (int i = 0; i < kNItems; ++i) { B_vals[i] = B_vals_load[i]; } + Converter::to_float(B_vals_load, B_vals); + } else { + typename Ktraits::input_t B_vals_load[kNItems * 2]; + if constexpr (Ktraits::kIsEvenLen) { + auto& smem_load_weight_vec = reinterpret_cast(smem_load_weight); + using vec_t = typename Ktraits::vec_t; + typename Ktraits::BlockLoadWeightVecT(smem_load_weight_vec).Load( + reinterpret_cast(Bvar), + reinterpret_cast(B_vals_load) + ); + } else { + typename Ktraits::BlockLoadWeightT(smem_load_weight).Load(Bvar, B_vals_load, seqlen, 0.f); + } + #pragma unroll + for (int i = 0; i < kNItems; ++i) { B_vals[i] = complex_t(B_vals_load[i * 2], B_vals_load[i * 2 + 1]); } + } +} + +template +inline __device__ void store_output(typename Ktraits::input_t *out, + const float (&out_vals)[Ktraits::kNItems], + typename Ktraits::BlockStoreT::TempStorage &smem_store, + int seqlen) { + typename Ktraits::input_t write_vals[Ktraits::kNItems]; + #pragma unroll + for (int i = 0; i < Ktraits::kNItems; ++i) { write_vals[i] = out_vals[i]; } + if constexpr (Ktraits::kIsEvenLen) { + auto& smem_store_vec = reinterpret_cast(smem_store); + using vec_t = typename Ktraits::vec_t; + typename Ktraits::BlockStoreVecT(smem_store_vec).Store( + reinterpret_cast(out), + reinterpret_cast(write_vals) + ); + } else { + typename Ktraits::BlockStoreT(smem_store).Store(out, write_vals, seqlen); + } +} diff --git a/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu b/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu new file mode 100644 index 0000000000000000000000000000000000000000..2b8615b1d522c119125d4cb6ff3dce42f2bd4659 --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu @@ -0,0 +1,10 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_fwd_kernel.cuh" + +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu b/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu new file mode 100644 index 0000000000000000000000000000000000000000..015e2a0eff633daf2693e43a2648008652a38c7c --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu @@ -0,0 +1,10 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_fwd_kernel.cuh" + +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu b/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu new file mode 100644 index 0000000000000000000000000000000000000000..c142fe0208ea784679122ba04997d3432b05efcc --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu @@ -0,0 +1,10 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_fwd_kernel.cuh" + +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/mamba/csrc/selective_scan/selective_scan_fwd_kernel.cuh b/mamba/csrc/selective_scan/selective_scan_fwd_kernel.cuh new file mode 100644 index 0000000000000000000000000000000000000000..80e9e37e3f8d8b28f2dfc6a51c75fa10e54add86 --- /dev/null +++ b/mamba/csrc/selective_scan/selective_scan_fwd_kernel.cuh @@ -0,0 +1,376 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK + +#ifndef USE_ROCM + #include + #include + #include +#else + #include + namespace cub = hipcub; +#endif + +#include "selective_scan.h" +#include "selective_scan_common.h" +#include "static_switch.h" + +template +struct Selective_Scan_fwd_kernel_traits { + static_assert(kNItems_ % 4 == 0); + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + // Setting MinBlocksPerMP to be 3 (instead of 2) for 128 threads improves occupancy. + static constexpr int kMinBlocks = kNThreads < 128 ? 5 : 3; + static constexpr int kNItems = kNItems_; + static constexpr int kNRows = kNRows_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : constexpr_min(8, kNItems); + static_assert(kNItems % kNElts == 0); + static constexpr int kNLoads = kNItems / kNElts; + static constexpr bool kIsComplex = std::is_same_v; + static constexpr bool kIsEvenLen = kIsEvenLen_; + static constexpr bool kIsVariableB = kIsVariableB_; + static constexpr bool kIsVariableC = kIsVariableC_; + static constexpr bool kHasZ = kHasZ_; + + static constexpr bool kDirectIO = kIsEvenLen && kNLoads == 1; + + using vec_t = typename BytesToType::Type; + using scan_t = std::conditional_t; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockLoadWeightT = cub::BlockLoad; + using BlockLoadWeightVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + // using BlockScanT = cub::BlockScan; + // using BlockScanT = cub::BlockScan; + using BlockScanT = cub::BlockScan; + static constexpr int kSmemIOSize = custom_max({sizeof(typename BlockLoadT::TempStorage), + sizeof(typename BlockLoadVecT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightVecT::TempStorage), + sizeof(typename BlockStoreT::TempStorage), + sizeof(typename BlockStoreVecT::TempStorage)}); + static constexpr int kSmemSize = kSmemIOSize + sizeof(typename BlockScanT::TempStorage); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads, Ktraits::kMinBlocks) +void selective_scan_fwd_kernel(SSMParamsBase params) { + constexpr bool kIsComplex = Ktraits::kIsComplex; + constexpr bool kIsVariableB = Ktraits::kIsVariableB; + constexpr bool kIsVariableC = Ktraits::kIsVariableC; + constexpr bool kHasZ = Ktraits::kHasZ; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNItems = Ktraits::kNItems; + constexpr int kNRows = Ktraits::kNRows; + constexpr bool kDirectIO = Ktraits::kDirectIO; + using input_t = typename Ktraits::input_t; + using weight_t = typename Ktraits::weight_t; + using scan_t = typename Ktraits::scan_t; + + // Shared memory. + extern __shared__ char smem_[]; + // cast to lvalue reference of expected type + // char *smem_loadstorescan = smem_ + 2 * MAX_DSTATE * sizeof(weight_t); + // auto& smem_load = reinterpret_cast(smem_ + 2 * MAX_DSTATE * sizeof(weight_t)); + // auto& smem_load = reinterpret_cast(smem_loadstorescan); + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_weight = reinterpret_cast(smem_); + auto& smem_load_weight1 = *reinterpret_cast(smem_ + sizeof(typename Ktraits::BlockLoadWeightT::TempStorage)); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_scan = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + // weight_t *smem_a = reinterpret_cast(smem_ + smem_loadstorescan_size); + // weight_t *smem_bc = reinterpret_cast(smem_a + MAX_DSTATE); + scan_t *smem_running_prefix = reinterpret_cast(smem_ + Ktraits::kSmemSize); + + const int batch_id = blockIdx.x; + const int dim_id = blockIdx.y; + const int group_id = dim_id / (params.dim_ngroups_ratio); + input_t *u = reinterpret_cast(params.u_ptr) + batch_id * params.u_batch_stride + + dim_id * kNRows * params.u_d_stride; + input_t *delta = reinterpret_cast(params.delta_ptr) + batch_id * params.delta_batch_stride + + dim_id * kNRows * params.delta_d_stride; + weight_t *A = reinterpret_cast(params.A_ptr) + dim_id * kNRows * params.A_d_stride; + weight_t *B = reinterpret_cast(params.B_ptr) + dim_id * kNRows * params.B_d_stride; + input_t *Bvar = reinterpret_cast(params.B_ptr) + batch_id * params.B_batch_stride + group_id * params.B_group_stride; + weight_t *C = reinterpret_cast(params.C_ptr) + dim_id * kNRows * params.C_d_stride; + input_t *Cvar = reinterpret_cast(params.C_ptr) + batch_id * params.C_batch_stride + group_id * params.C_group_stride; + scan_t *x = reinterpret_cast(params.x_ptr) + (batch_id * params.dim + dim_id * kNRows) * params.n_chunks * params.dstate; + + float D_val[kNRows] = {0}; + if (params.D_ptr != nullptr) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + D_val[r] = reinterpret_cast(params.D_ptr)[dim_id * kNRows + r]; + } + } + float delta_bias[kNRows] = {0}; + if (params.delta_bias_ptr != nullptr) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + delta_bias[r] = reinterpret_cast(params.delta_bias_ptr)[dim_id * kNRows + r]; + } + } + + // for (int state_idx = threadIdx.x; state_idx < params.dstate; state_idx += blockDim.x) { + // smem_a[state_idx] = A[state_idx * params.A_dstate_stride]; + // smem_bc[state_idx] = B[state_idx * params.B_dstate_stride] * C[state_idx * params.C_dstate_stride]; + // } + + constexpr int kChunkSize = kNThreads * kNItems; + for (int chunk = 0; chunk < params.n_chunks; ++chunk) { + input_t u_vals[kNRows][kNItems], delta_vals_load[kNRows][kNItems]; + __syncthreads(); + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + if constexpr (!kDirectIO) { + if (r > 0) { __syncthreads(); } + } + load_input(u + r * params.u_d_stride, u_vals[r], smem_load, params.seqlen - chunk * kChunkSize); + if constexpr (!kDirectIO) { __syncthreads(); } + load_input(delta + r * params.delta_d_stride, delta_vals_load[r], smem_load, params.seqlen - chunk * kChunkSize); + } + u += kChunkSize; + delta += kChunkSize; + + float delta_vals[kNRows][kNItems], delta_u_vals[kNRows][kNItems], out_vals[kNRows][kNItems]; + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float u_val = float(u_vals[r][i]); + delta_vals[r][i] = float(delta_vals_load[r][i]) + delta_bias[r]; + if (params.delta_softplus) { + delta_vals[r][i] = delta_vals[r][i] <= 20.f ? log1pf(expf(delta_vals[r][i])) : delta_vals[r][i]; + } + delta_u_vals[r][i] = delta_vals[r][i] * u_val; + out_vals[r][i] = D_val[r] * u_val; + } + } + + __syncthreads(); + for (int state_idx = 0; state_idx < params.dstate; ++state_idx) { + weight_t A_val[kNRows]; + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + A_val[r] = A[state_idx * params.A_dstate_stride + r * params.A_d_stride]; + // Multiply the real part of A with LOG2E so we can use exp2f instead of expf. + constexpr float kLog2e = M_LOG2E; + if constexpr (!kIsComplex) { + A_val[r] *= kLog2e; + } else { + A_val[r].real_ *= kLog2e; + } + } + // This variable holds B * C if both B and C are constant across seqlen. If only B varies + // across seqlen, this holds C. If only C varies across seqlen, this holds B. + // If both B and C vary, this is unused. + weight_t BC_val[kNRows]; + weight_t B_vals[kNItems], C_vals[kNItems]; + if constexpr (kIsVariableB) { + load_weight(Bvar + state_idx * params.B_dstate_stride, B_vals, + smem_load_weight, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + if constexpr (!kIsVariableC) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + BC_val[r] = C[state_idx * params.C_dstate_stride + r * params.C_d_stride]; + } + } + } + if constexpr (kIsVariableC) { + auto &smem_load_weight_C = !kIsVariableB ? smem_load_weight : smem_load_weight1; + load_weight(Cvar + state_idx * params.C_dstate_stride, C_vals, + smem_load_weight_C, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + if constexpr (!kIsVariableB) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + BC_val[r] = B[state_idx * params.B_dstate_stride + r * params.B_d_stride]; + } + } + } + if constexpr (!kIsVariableB && !kIsVariableC) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + BC_val[r] = B[state_idx * params.B_dstate_stride + r * params.B_d_stride] * C[state_idx * params.C_dstate_stride + r * params.C_d_stride]; + } + } + + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + if (r > 0) { __syncthreads(); } // Scan could be using the same smem + scan_t thread_data[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + if constexpr (!kIsComplex) { + thread_data[i] = make_float2(exp2f(delta_vals[r][i] * A_val[r]), + !kIsVariableB ? delta_u_vals[r][i] : B_vals[i] * delta_u_vals[r][i]); + if constexpr (!Ktraits::kIsEvenLen) { // So that the last state is correct + if (threadIdx.x * kNItems + i >= params.seqlen - chunk * kChunkSize) { + thread_data[i] = make_float2(1.f, 0.f); + } + } + } else { + // Pytorch's implementation of complex exp (which calls thrust) is very slow + complex_t delta_a_exp = cexp2f(delta_vals[r][i] * A_val[r]); + weight_t B_delta_u_val = !kIsVariableB ? delta_u_vals[r][i] : B_vals[i] * delta_u_vals[r][i]; + thread_data[i] = make_float4(delta_a_exp.real_, delta_a_exp.imag_, B_delta_u_val.real_, B_delta_u_val.imag_); + if constexpr (!Ktraits::kIsEvenLen) { // So that the last state is correct + if (threadIdx.x * kNItems + i >= params.seqlen - chunk * kChunkSize) { + thread_data[i] = make_float4(1.f, 0.f, 0.f, 0.f); + } + } + } + } + // Initialize running total + scan_t running_prefix; + if constexpr (!kIsComplex) { + // If we use WARP_SCAN then all lane 0 of all warps (not just thread 0) needs to read + running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? smem_running_prefix[state_idx + r * MAX_DSTATE] : make_float2(1.f, 0.f); + // running_prefix = chunk > 0 && threadIdx.x == 0 ? smem_running_prefix[state_idx] : make_float2(1.f, 0.f); + } else { + running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? smem_running_prefix[state_idx + r * MAX_DSTATE] : make_float4(1.f, 0.f, 0.f, 0.f); + // running_prefix = chunk > 0 && threadIdx.x == 0 ? smem_running_prefix[state_idx] : make_float4(1.f, 0.f, 0.f, 0.f); + } + SSMScanPrefixCallbackOp prefix_op(running_prefix); + typename Ktraits::BlockScanT(smem_scan).InclusiveScan( + thread_data, thread_data, SSMScanOp(), prefix_op + ); + // There's a syncthreads in the scan op, so we don't need to sync here. + // Unless there's only 1 warp, but then it's the same thread (0) reading and writing. + if (threadIdx.x == 0) { + smem_running_prefix[state_idx] = prefix_op.running_prefix; + x[(r * params.n_chunks + chunk) * params.dstate + state_idx] = prefix_op.running_prefix; + } + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + const weight_t C_val = !kIsVariableC + ? BC_val[r] + : (!kIsVariableB ? BC_val[r] * C_vals[i] : C_vals[i]); + if constexpr (!kIsComplex) { + out_vals[r][i] += thread_data[i].y * C_val; + } else { + out_vals[r][i] += (complex_t(thread_data[i].z, thread_data[i].w) * C_val).real_ * 2; + } + } + } + } + + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + dim_id * kNRows * params.out_d_stride + chunk * kChunkSize; + __syncthreads(); + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + if constexpr (!kDirectIO) { + if (r > 0) { __syncthreads(); } + } + store_output(out + r * params.out_d_stride, out_vals[r], smem_store, params.seqlen - chunk * kChunkSize); + } + + if constexpr (kHasZ) { + input_t *z = reinterpret_cast(params.z_ptr) + batch_id * params.z_batch_stride + + dim_id * kNRows * params.z_d_stride + chunk * kChunkSize; + input_t *out_z = reinterpret_cast(params.out_z_ptr) + batch_id * params.out_z_batch_stride + + dim_id * kNRows * params.out_z_d_stride + chunk * kChunkSize; + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + input_t z_vals[kNItems]; + __syncthreads(); + load_input(z + r * params.z_d_stride, z_vals, smem_load, params.seqlen - chunk * kChunkSize); + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float z_val = z_vals[i]; + out_vals[r][i] *= z_val / (1 + expf(-z_val)); + } + __syncthreads(); + store_output(out_z + r * params.out_z_d_stride, out_vals[r], smem_store, params.seqlen - chunk * kChunkSize); + } + } + + Bvar += kChunkSize * (!kIsComplex ? 1 : 2); + Cvar += kChunkSize * (!kIsComplex ? 1 : 2); + } +} + +template +void selective_scan_fwd_launch(SSMParamsBase ¶ms, cudaStream_t stream) { + // Only kNRows == 1 is tested for now, which ofc doesn't differ from previously when we had each block + // processing 1 row. + constexpr int kNRows = 1; + BOOL_SWITCH(params.seqlen % (kNThreads * kNItems) == 0, kIsEvenLen, [&] { + BOOL_SWITCH(params.is_variable_B, kIsVariableB, [&] { + BOOL_SWITCH(params.is_variable_C, kIsVariableC, [&] { + BOOL_SWITCH(params.z_ptr != nullptr , kHasZ, [&] { + using Ktraits = Selective_Scan_fwd_kernel_traits; + + constexpr int kSmemSize = Ktraits::kSmemSize + kNRows * MAX_DSTATE * sizeof(typename Ktraits::scan_t); + dim3 grid(params.batch, params.dim / kNRows); + + // Had to change this substantially since potentially the hip + // interface for setting kernel launch attributes is slightly different from + // cuda's. In particualar, it seems to expect a plain const void * pointer. + + auto kernel = &selective_scan_fwd_kernel; + + + if (kSmemSize >= 48 * 1024) { + #ifndef USE_ROCM + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + #else + C10_CUDA_CHECK(cudaFuncSetAttribute( + (void *) kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + std::cerr << "Warning (selective_scan_fwd_kernel): attempting to set maxDynamicSharedMemorySize on an AMD GPU which is currently a non-op (in ROCm versions <= 6.1). This might lead to undefined behavior. \n" << std::endl; + #endif + } + + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + }); + }); +} + +template +void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream) { + + #ifndef USE_ROCM + if (params.seqlen <= 128) { + selective_scan_fwd_launch<32, 4, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 256) { + selective_scan_fwd_launch<32, 8, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 512) { + selective_scan_fwd_launch<32, 16, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 1024) { + selective_scan_fwd_launch<64, 16, input_t, weight_t>(params, stream); + } else { + selective_scan_fwd_launch<128, 16, input_t, weight_t>(params, stream); + } + #else + if (params.seqlen <= 256) { + selective_scan_fwd_launch<64, 4, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 512) { + selective_scan_fwd_launch<64, 8, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 1024) { + selective_scan_fwd_launch<64, 16, input_t, weight_t>(params, stream); + } else { + selective_scan_fwd_launch<128, 16, input_t, weight_t>(params, stream); + } + #endif +} diff --git a/mamba/csrc/selective_scan/static_switch.h b/mamba/csrc/selective_scan/static_switch.h new file mode 100644 index 0000000000000000000000000000000000000000..7920ac045d0a2a1f4c4159ee3eebe51fe1e2c203 --- /dev/null +++ b/mamba/csrc/selective_scan/static_switch.h @@ -0,0 +1,25 @@ +// Inspired by https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h +// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h + +#pragma once + +/// @param COND - a boolean expression to switch by +/// @param CONST_NAME - a name given for the constexpr bool variable. +/// @param ... - code to execute for true and false +/// +/// Usage: +/// ``` +/// BOOL_SWITCH(flag, BoolConst, [&] { +/// some_function(...); +/// }); +/// ``` +#define BOOL_SWITCH(COND, CONST_NAME, ...) \ + [&] { \ + if (COND) { \ + constexpr bool CONST_NAME = true; \ + return __VA_ARGS__(); \ + } else { \ + constexpr bool CONST_NAME = false; \ + return __VA_ARGS__(); \ + } \ + }() diff --git a/mamba/csrc/selective_scan/uninitialized_copy.cuh b/mamba/csrc/selective_scan/uninitialized_copy.cuh new file mode 100644 index 0000000000000000000000000000000000000000..cdaf115e34a303bdda35b03a189e50cdbde8150e --- /dev/null +++ b/mamba/csrc/selective_scan/uninitialized_copy.cuh @@ -0,0 +1,77 @@ +/****************************************************************************** + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#ifndef USE_ROCM + #include + + #include +#else + #include + // Map ::cuda::std to the standard std namespace + namespace cuda { + namespace std = ::std; + } +#endif + + +namespace detail +{ + +#if defined(_NVHPC_CUDA) +template +__host__ __device__ void uninitialized_copy(T *ptr, U &&val) +{ + // NVBug 3384810 + new (ptr) T(::cuda::std::forward(val)); +} +#else +template ::value, + int + >::type = 0> +__host__ __device__ void uninitialized_copy(T *ptr, U &&val) +{ + *ptr = ::cuda::std::forward(val); +} + +template ::value, + int + >::type = 0> +__host__ __device__ void uninitialized_copy(T *ptr, U &&val) +{ + new (ptr) T(::cuda::std::forward(val)); +} +#endif + +} // namespace detail diff --git a/mamba/dist/mamba_ssm-2.2.2-py3.9.egg b/mamba/dist/mamba_ssm-2.2.2-py3.9.egg new file mode 100644 index 0000000000000000000000000000000000000000..75ff2e069c3a7e0de93443c8867728ee5e9d7da4 Binary files /dev/null and b/mamba/dist/mamba_ssm-2.2.2-py3.9.egg differ diff --git a/mamba/evals/lm_harness_eval.py b/mamba/evals/lm_harness_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..d5895af73f112af617b9eb3d753e1e200935c87d --- /dev/null +++ b/mamba/evals/lm_harness_eval.py @@ -0,0 +1,39 @@ +import torch + +import transformers +from transformers import AutoTokenizer + +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + +from lm_eval.api.model import LM +from lm_eval.models.huggingface import HFLM +from lm_eval.api.registry import register_model +from lm_eval.__main__ import cli_evaluate + + +@register_model("mamba") +class MambaEvalWrapper(HFLM): + + AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + + def __init__(self, pretrained="state-spaces/mamba-2.8b", max_length=2048, batch_size=None, device="cuda", + dtype=torch.float16): + LM.__init__(self) + self._model = MambaLMHeadModel.from_pretrained(pretrained, device=device, dtype=dtype) + self.tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + self.vocab_size = self.tokenizer.vocab_size + self._batch_size = int(batch_size) if batch_size is not None else 64 + self._max_length = max_length + self._device = torch.device(device) + + @property + def batch_size(self): + return self._batch_size + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + raise NotImplementedError() + + +if __name__ == "__main__": + cli_evaluate() diff --git a/mamba/mamba_ssm.egg-info/PKG-INFO b/mamba/mamba_ssm.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..771a96a423d6d997e36337792a31250611178bf9 --- /dev/null +++ b/mamba/mamba_ssm.egg-info/PKG-INFO @@ -0,0 +1,256 @@ +Metadata-Version: 2.1 +Name: mamba-ssm +Version: 2.2.2 +Summary: UNKNOWN +Home-page: UNKNOWN +License: UNKNOWN +Platform: UNKNOWN +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS + +# Mamba + +![Mamba](assets/selection.png "Selective State Space") +> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\ +> Albert Gu*, Tri Dao*\ +> Paper: https://arxiv.org/abs/2312.00752 + +![Mamba-2](assets/ssd_algorithm.png "State Space Dual Model") +> **Transformers are SSMs: Generalized Models and Efficient Algorithms**\ +> **Through Structured State Space Duality**\ +> Tri Dao*, Albert Gu*\ +> Paper: https://arxiv.org/abs/2405.21060 + +## About + +Mamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers. +It is based on the line of progress on [structured state space models](https://github.com/state-spaces/s4), +with an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https://github.com/Dao-AILab/flash-attention). + +## Installation + +- [Option] `pip install causal-conv1d>=1.4.0`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block. +- `pip install mamba-ssm`: the core Mamba package. +- `pip install mamba-ssm[causal-conv1d]`: To install core Mamba package and causal-conv1d. +- `pip install mamba-ssm[dev]`: To install core Mamba package and dev depdencies. + +It can also be built from source with `pip install .` from this repository. + +If `pip` complains about PyTorch versions, try passing `--no-build-isolation` to `pip`. + +Other requirements: +- Linux +- NVIDIA GPU +- PyTorch 1.12+ +- CUDA 11.6+ + +For AMD cards, see additional prerequisites below. + +## Usage + +We expose several levels of interface with the Mamba model. + +### Selective SSM + +Mamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2). + +Source: [ops/selective_scan_interface.py](mamba_ssm/ops/selective_scan_interface.py). + +### Mamba Block + +The main module of this repository is the Mamba architecture block wrapping the selective SSM. + +Source: [modules/mamba_simple.py](mamba_ssm/modules/mamba_simple.py). + +Usage: +``` python +import torch +from mamba_ssm import Mamba + +batch, length, dim = 2, 64, 16 +x = torch.randn(batch, length, dim).to("cuda") +model = Mamba( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=16, # SSM state expansion factor + d_conv=4, # Local convolution width + expand=2, # Block expansion factor +).to("cuda") +y = model(x) +assert y.shape == x.shape +``` + +### Mamba-2 + +The Mamba-2 block is implemented at [modules/mamba2.py](mamba_ssm/modules/mamba2.py). + +A simpler version is at [modules/mamba2_simple.py](mamba_ssm/modules/mamba2_simple.py) + +The usage is similar to Mamba(-1): +``` python +from mamba_ssm import Mamba2 +model = Mamba2( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=64, # SSM state expansion factor, typically 64 or 128 + d_conv=4, # Local convolution width + expand=2, # Block expansion factor +).to("cuda") +y = model(x) +assert y.shape == x.shape +``` + +#### SSD + +A minimal version of the inner SSD module (Listing 1 from the Mamba-2 paper) with conversion between "discrete" and "continuous" SSM versions +is at [modules/ssd_minimal.py](mamba_ssm/modules/ssd_minimal.py). + +### Mamba Language Model + +Finally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head. + +Source: [models/mixer_seq_simple.py](mamba_ssm/models/mixer_seq_simple.py). + +This is an example of how to integrate Mamba into an end-to-end neural network. +This example is used in the generation scripts below. + + +## Pretrained Models + +Pretrained models are uploaded to +[Hugging Face](https://huggingface.co/state-spaces): `mamba-130m`, `mamba-370m`, +`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`, `mamba2-130m`, `mamba2-370m`, +`mamba2-780m`, `mamba2-1.3b`, `mamba2-2.7b`, `transformerpp-2.7b`, `mamba2attn-2.7b`, trained on 300B tokens on the Pile, as well as `mamba-2.8b-slimpj` +(trained on 600B tokens on the SlimPajama dataset). + + +The models will be autodownloaded by the generation script below. + +These models were trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), and follow the standard model dimensions described by GPT-3 and followed by many open source models: + +| Parameters | Layers | Model dim. | +|------------|--------|------------| +| 130M | 24 | 768 | +| 370M | 48 | 1024 | +| 790M | 48 | 1536 | +| 1.4B | 48 | 2048 | +| 2.8B | 64 | 2560 | + +(The layer count of Mamba doubles that of a Transformer with similar size, as two Mamba blocks are needed for each "layer" (MHA block + MLP block) of a Transformer.) + +Note: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.). +Performance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models. + + +## Evaluations + +To run zero-shot evaluations of models (corresponding to Table 3 of the paper), +we use the +[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) +library. + +1. Install `lm-evaluation-harness` by `pip install lm-eval==0.4.2`. +2. Run evaluation with (more documentation at the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) repo): +``` sh +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 +python evals/lm_harness_eval.py --model hf --model_args pretrained=EleutherAI/pythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +``` + +To reproduce the results on the `mamba-2.8b-slimpj` model reported in the blogposts: +``` sh +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-2.8b-slimpj --tasks boolq,piqa,hellaswag,winogrande,arc_easy,arc_challenge,openbookqa,race,truthfulqa_mc2 --device cuda --batch_size 256 +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-2.8b-slimpj --tasks mmlu --num_fewshot 5 --device cuda --batch_size 256 +``` + +To run evaluations on Mamba-2 models, simply replace the model names: +``` sh +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/transformerpp-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 +lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2attn-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 +``` + +Note that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process. + +## Inference + +The script [benchmarks/benchmark_generation_mamba_simple.py](benchmarks/benchmark_generation_mamba_simple.py) +1. autoloads a model from the Hugging Face Hub, +2. generates completions of a user-specified prompt, +3. benchmarks the inference speed of this generation. + +Other configurable options include the top-p (nucleus sampling) probability, and the softmax temperature. + +### Examples + +To test generation latency (e.g. batch size = 1) with different sampling strategies: + +``` sh +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --minp 0.05 --topk 0 --temperature 0.7 --repetition-penalty 1.2 +``` + +To test generation throughput with random prompts (e.g. large batch size): +``` sh +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --batch 64 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --batch 64 +``` + +With Mamba-2, you just need to change the model name: +``` sh +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba2-2.7b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2 +``` + + +## Troubleshooting + +### Precision +Our models were trained using PyTorch [AMP](https://pytorch.org/docs/stable/amp.html) for mixed precision. AMP keeps model parameters in float32 and casts to half precision when necessary. +On the other hand, other frameworks like DeepSpeed store parameters in float16 and upcasts when necessary (e.g. for optimizer accumulation). + +We've observed that higher precision for the main model parameters may be necessary, because SSMs are sensitive to their recurrent dynamics. If you are experiencing instabilities, +as a first step please try a framework storing parameters in fp32 (such as AMP). + +### Initialization +Some parts of the model have initializations inherited from prior work on S4 models. +For [example](https://github.com/state-spaces/mamba/blob/f0affcf69f06d1d06cef018ff640bf080a11c421/mamba_ssm/modules/mamba_simple.py#L102), the $\Delta$ parameter has a targeted range by initializing the bias of its linear projection. +However, some frameworks may have post-initialization hooks (e.g. setting all bias terms in `nn.Linear` modules to zero). +If this is the case, you may have to add custom logic (e.g. this [line](https://github.com/state-spaces/mamba/blob/f0affcf69f06d1d06cef018ff640bf080a11c421/mamba_ssm/modules/mamba_simple.py#L104) turns off re-initializing in our trainer, but would be a no-op in any other framework) +that is specific to the training framework. + +## Additional Prerequisites for AMD cards + +### Patching ROCm + +If you are on ROCm 6.0, run the following steps to avoid errors during compilation. This is not required for ROCm 6.1 onwards. + +1. Locate your ROCm installation directory. This is typically found at `/opt/rocm/`, but may vary depending on your installation. + +2. Apply the Patch. Run with `sudo` in case you encounter permission issues. + ```bash + patch /opt/rocm/include/hip/amd_detail/amd_hip_bf16.h < rocm_patch/rocm6_0.patch + ``` + + +## Citation + +If you use this codebase, or otherwise find our work valuable, please cite Mamba: +``` +@article{mamba, + title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces}, + author={Gu, Albert and Dao, Tri}, + journal={arXiv preprint arXiv:2312.00752}, + year={2023} +} + +@inproceedings{mamba2, + title={Transformers are {SSM}s: Generalized Models and Efficient Algorithms Through Structured State Space Duality}, + author={Dao, Tri and Gu, Albert}, + booktitle={International Conference on Machine Learning (ICML)}, + year={2024} +} + +``` + + diff --git a/mamba/mamba_ssm.egg-info/SOURCES.txt b/mamba/mamba_ssm.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..3c104b6305e26b3323e6d89569be6ff67b256423 --- /dev/null +++ b/mamba/mamba_ssm.egg-info/SOURCES.txt @@ -0,0 +1,40 @@ +AUTHORS +LICENSE +README.md +pyproject.toml +setup.py +mamba_ssm/__init__.py +mamba_ssm.egg-info/PKG-INFO +mamba_ssm.egg-info/SOURCES.txt +mamba_ssm.egg-info/dependency_links.txt +mamba_ssm.egg-info/top_level.txt +mamba_ssm/distributed/__init__.py +mamba_ssm/distributed/distributed_utils.py +mamba_ssm/distributed/tensor_parallel.py +mamba_ssm/models/__init__.py +mamba_ssm/models/config_mamba.py +mamba_ssm/models/mixer_seq_simple.py +mamba_ssm/modules/__init__.py +mamba_ssm/modules/block.py +mamba_ssm/modules/mamba2.py +mamba_ssm/modules/mamba2_simple.py +mamba_ssm/modules/mamba_simple.py +mamba_ssm/modules/mha.py +mamba_ssm/modules/mlp.py +mamba_ssm/modules/ssd_minimal.py +mamba_ssm/ops/__init__.py +mamba_ssm/ops/selective_scan_interface.py +mamba_ssm/ops/triton/__init__.py +mamba_ssm/ops/triton/k_activations.py +mamba_ssm/ops/triton/layer_norm.py +mamba_ssm/ops/triton/layernorm_gated.py +mamba_ssm/ops/triton/selective_state_update.py +mamba_ssm/ops/triton/softplus.py +mamba_ssm/ops/triton/ssd_bmm.py +mamba_ssm/ops/triton/ssd_chunk_scan.py +mamba_ssm/ops/triton/ssd_chunk_state.py +mamba_ssm/ops/triton/ssd_combined.py +mamba_ssm/ops/triton/ssd_state_passing.py +mamba_ssm/utils/__init__.py +mamba_ssm/utils/generation.py +mamba_ssm/utils/hf.py \ No newline at end of file diff --git a/mamba/mamba_ssm.egg-info/dependency_links.txt b/mamba/mamba_ssm.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/mamba/mamba_ssm.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/mamba/mamba_ssm.egg-info/top_level.txt b/mamba/mamba_ssm.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..c5abb3e905a6c1f015761af1bdce86119b2517eb --- /dev/null +++ b/mamba/mamba_ssm.egg-info/top_level.txt @@ -0,0 +1 @@ +mamba_ssm diff --git a/mamba/mamba_ssm/__init__.py b/mamba/mamba_ssm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..673ee32ab820d86ee5a2993d9000af506a8b0fd6 --- /dev/null +++ b/mamba/mamba_ssm/__init__.py @@ -0,0 +1,6 @@ +__version__ = "2.2.2" + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn +from mamba_ssm.modules.mamba_simple import Mamba +from mamba_ssm.modules.mamba2 import Mamba2 +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel diff --git a/mamba/mamba_ssm/distributed/__init__.py b/mamba/mamba_ssm/distributed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/mamba_ssm/distributed/distributed_utils.py b/mamba/mamba_ssm/distributed/distributed_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..74c55279645cd0fd687584bc1b7374c8c3c73e56 --- /dev/null +++ b/mamba/mamba_ssm/distributed/distributed_utils.py @@ -0,0 +1,144 @@ +from typing import Optional + +import torch +from torch import Tensor +from torch.distributed import ProcessGroup + +# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for +# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent +# version of PyTorch. The following 4 lines are for backward compatibility with +# older PyTorch. +if "all_gather_into_tensor" not in dir(torch.distributed): + torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base +if "reduce_scatter_tensor" not in dir(torch.distributed): + torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base + + +# Raw operation, does not support autograd, but does support async +def all_gather_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + world_size = torch.distributed.get_world_size(process_group) + output = torch.empty( + world_size * input_.shape[0], *input_.shape[1:], dtype=input_.dtype, device=input_.device + ) + handle = torch.distributed.all_gather_into_tensor( + output, input_.contiguous(), group=process_group, async_op=async_op + ) + return output, handle + + +# Raw operation, does not support autograd, but does support async +def reduce_scatter_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + world_size = torch.distributed.get_world_size(process_group) + assert input_.shape[0] % world_size == 0 + output = torch.empty( + input_.shape[0] // world_size, *input_.shape[1:], dtype=input_.dtype, device=input_.device + ) + handle = torch.distributed.reduce_scatter_tensor( + output, input_.contiguous(), group=process_group, async_op=async_op + ) + return output, handle + + +# Raw operation, does not support autograd, but does support async +def all_reduce_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + input_ = input_.contiguous() + handle = torch.distributed.all_reduce(input_, group=process_group, async_op=async_op) + return input_, handle + + +class AllGatherFunc(torch.autograd.Function): + """Gather the input from sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = all_gather_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + grad_input, _ = reduce_scatter_raw(grad_output, ctx.process_group) + return grad_input, None + + +# Supports autograd, but does not support async +all_gather = AllGatherFunc.apply + + +class ReduceScatterFunc(torch.autograd.Function): + """Reduce scatter the input from the sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = reduce_scatter_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + grad_input, _ = all_gather_raw(grad_output, ctx.process_group) + return grad_input, None + + +# Supports autograd, but does not support async +reduce_scatter = ReduceScatterFunc.apply + + +class AllReduceFunc(torch.autograd.Function): + """Gather the input from sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = all_reduce_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + return grad_output, None + + +# Supports autograd, but does not support async +all_reduce = AllReduceFunc.apply + + +def sync_shared_params(model: torch.nn.Module, process_group: ProcessGroup): + # We want to iterate over parameters with _shared_params=True in the same order, + # as different ranks might have different number of parameters (e.g., only rank 0 has bias). + pamams_shared = { + name: p for name, p in model.named_parameters() if getattr(p, "_shared_params", False) + } + for _, p in sorted(pamams_shared.items()): + with torch.no_grad(): + # Broadcast needs src to be global rank, not group rank + torch.distributed.broadcast( + p, src=torch.distributed.get_global_rank(process_group, 0), group=process_group + ) + + +# Ref: https://github.com/NVIDIA/Megatron-LM/blob/52e636888cccc41e931251c417a7181fc36de926/megatron/optimizer/optimizer.py#L256 +def allreduce_sequence_parallel_grad(model: torch.nn.Module, process_group: ProcessGroup): + # We want to iterate over parameters with _sequence_parallel=True in the same order, + # as different ranks might have different number of parameters (e.g., only rank 0 has bias). + params_seqparallel = { + name: p for name, p in model.named_parameters() if getattr(p, "_sequence_parallel", False) + } + grads = [p.grad for _, p in sorted(params_seqparallel.items())] + if grads: + with torch.no_grad(): + coalesced = torch._utils._flatten_dense_tensors(grads) + torch.distributed.all_reduce(coalesced, group=process_group) + for buf, synced in zip(grads, torch._utils._unflatten_dense_tensors(coalesced, grads)): + buf.copy_(synced) + + +def get_dim_for_local_rank(dim: int, world_size: int, local_rank: int, multiple_of: int = 1) -> int: + """Get the dim for the local rank derived from splitting dim on world_size processes. + + The split may not be even across the world_size processes. + """ + multiple = dim // multiple_of + div = multiple // world_size + mod = multiple % world_size + local_multiple = div + int(local_rank < mod) + return local_multiple * multiple_of diff --git a/mamba/mamba_ssm/distributed/tensor_parallel.py b/mamba/mamba_ssm/distributed/tensor_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..3660abfc6bb0f0f11eb0b776be443197ef20b510 --- /dev/null +++ b/mamba/mamba_ssm/distributed/tensor_parallel.py @@ -0,0 +1,296 @@ +# Copyright (c) 2024, Tri Dao. +# The TensorParallel linear modules are inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/layers.py +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.cuda.amp import custom_bwd, custom_fwd +from torch.distributed import ProcessGroup + +from einops import rearrange + +from mamba_ssm.distributed.distributed_utils import ( + all_gather_raw, + all_reduce, + all_reduce_raw, + reduce_scatter, + reduce_scatter_raw, +) + + +class ParallelLinearFunc(torch.autograd.Function): + @staticmethod + @custom_fwd + def forward(ctx, x, weight, bias, process_group=None, sequence_parallel=True): + """ + If process_group is not None and sequence_parallel=True, we're doing Tensor Parallel + with sequence parallelism: we do an all_gather_raw of x before doing the matmul. + """ + ctx.compute_weight_gradient = weight.requires_grad + ctx.process_group = process_group + ctx.sequence_parallel = sequence_parallel + + if torch.is_autocast_enabled(): + x = x.to(dtype=torch.get_autocast_gpu_dtype()) + x = x.contiguous() + if process_group is not None and sequence_parallel: + # We want to kick off the all_gather early, before weight dtype conversion + total_x, handle_x = all_gather_raw(x, process_group, async_op=True) + else: + total_x = x + + if torch.is_autocast_enabled(): + weight = weight.to(dtype=torch.get_autocast_gpu_dtype()) + bias = bias.to(dtype=torch.get_autocast_gpu_dtype()) if bias is not None else None + weight = weight.contiguous() + if process_group is not None and sequence_parallel: + handle_x.wait() + batch_shape, n = total_x.shape[:-1], total_x.shape[-1] + batch_dim = batch_shape.numel() + # https://github.com/pytorch/pytorch/blob/5b51849b48a7dbccd297286cc0110def4706f9e7/aten/src/ATen/native/cuda/Blas.cpp#L174 + output = F.linear(total_x, weight, bias) + if ctx.compute_weight_gradient: + ctx.save_for_backward(x, weight) + else: + ctx.save_for_backward(weight) + return output + + @staticmethod + @custom_bwd + def backward(ctx, grad_output): + grad_output = grad_output.contiguous() + process_group = ctx.process_group + sequence_parallel = ctx.sequence_parallel + if ctx.compute_weight_gradient: + x, weight = ctx.saved_tensors + if process_group is not None and sequence_parallel: + total_x, handle_x = all_gather_raw(x, process_group, async_op=True) + else: + total_x = x + else: + (weight,) = ctx.saved_tensors + total_x = None + batch_shape = grad_output.shape[:-1] + batch_dim = batch_shape.numel() + grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1]) + if ctx.needs_input_grad[0]: + grad_input = F.linear(grad_output, weight.t()) + grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1]) + if process_group is not None: + reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw + grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True) + else: + grad_input = None + if ctx.needs_input_grad[1]: + assert ctx.compute_weight_gradient + if process_group is not None and sequence_parallel: + handle_x.wait() + grad_weight = torch.einsum( + "bo,bi->oi", grad_output, total_x.reshape(batch_dim, total_x.shape[-1]) + ) + else: + grad_weight = None + grad_bias = grad_output.sum(dim=0) if ctx.needs_input_grad[2] else None + if process_group is not None and ctx.needs_input_grad[0]: + handle_grad_input.wait() + return grad_input, grad_weight, grad_bias, None, None + + +def parallel_linear_func( + x: Tensor, + weight: Tensor, + bias: Optional[Tensor] = None, + process_group: Optional[ProcessGroup] = None, + sequence_parallel: bool = True, +): + return ParallelLinearFunc.apply(x, weight, bias, process_group, sequence_parallel) + + +class ColumnParallelLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + process_group: ProcessGroup, + bias: bool = True, + sequence_parallel=True, + multiple_of=1, + device=None, + dtype=None, + ) -> None: + world_size = torch.distributed.get_world_size(process_group) + if out_features % multiple_of: + raise ValueError(f"out_features ({out_features}) must be a multiple of {multiple_of}") + multiple = out_features // multiple_of + # We want to split @multiple across world_size, but it could be an uneven split + div = multiple // world_size + mod = multiple % world_size + # The first @mod ranks get @div + 1 copies, the rest get @div copies + local_multiple = div + int(torch.distributed.get_rank(process_group) < mod) + super().__init__( + in_features, local_multiple * multiple_of, bias=bias, device=device, dtype=dtype + ) + self.process_group = process_group + self.sequence_parallel = sequence_parallel + + def forward(self, x): + # If self.sequence_parallel is True, we're doing Tensor Parallel with sequence parallelism: + # we do an all_gather of x before doing the matmul. + # If not, then the input is already gathered. + return parallel_linear_func( + x, + self.weight, + self.bias, + process_group=self.process_group, + sequence_parallel=self.sequence_parallel, + ) + + +class RowParallelLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + process_group: ProcessGroup, + bias: bool = True, + sequence_parallel=True, + multiple_of=1, + device=None, + dtype=None, + ) -> None: + world_size = torch.distributed.get_world_size(process_group) + rank = torch.distributed.get_rank(process_group) + if in_features % multiple_of: + raise ValueError(f"in_features ({in_features}) must be a multiple of {multiple_of}") + multiple = in_features // multiple_of + # We want to split @multiple across world_size, but it could be an uneven split + div = multiple // world_size + mod = multiple % world_size + # The first @mod ranks get @div + 1 copies, the rest get @div copies + local_multiple = div + int(torch.distributed.get_rank(process_group) < mod) + # Only rank 0 will have bias + super().__init__( + local_multiple * multiple_of, + out_features, + bias=bias and rank == 0, + device=device, + dtype=dtype, + ) + self.process_group = process_group + self.sequence_parallel = sequence_parallel + + def forward(self, x): + """ + We're doing Tensor Parallel with sequence parallelism: we do the matmul and then + a reduce_scatter of the result. + """ + out = parallel_linear_func(x, self.weight, self.bias) + reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce + return reduce_fn(out, self.process_group) + + +class VocabParallelEmbedding(nn.Embedding): + def __init__(self, num_embeddings, *args, process_group=None, padding_idx=None, **kwargs): + self.process_group = process_group + if process_group is not None: + world_size = torch.distributed.get_world_size(process_group) + if num_embeddings % world_size != 0: + raise ValueError( + f"num_embeddings ({num_embeddings}) must be divisible by " + f"world_size ({world_size})" + ) + if world_size > 1 and padding_idx is not None: + raise RuntimeError("ParallelEmbedding does not support padding_idx") + else: + world_size = 1 + super().__init__(num_embeddings // world_size, *args, padding_idx=padding_idx, **kwargs) + + def forward(self, input: Tensor) -> Tensor: + if self.process_group is None: + return super().forward(input) + else: + rank = torch.distributed.get_rank(self.process_group) + vocab_size = self.num_embeddings + vocab_start_index, vocab_end_index = rank * vocab_size, (rank + 1) * vocab_size + # Create a mask of valid vocab ids (1 means it needs to be masked). + input_ids_mask = (input < vocab_start_index) | (input >= vocab_end_index) + input = input - vocab_start_index + input[input_ids_mask] = 0 + embeddings = super().forward(input) + embeddings[input_ids_mask] = 0.0 + return embeddings + + +class ColumnParallelEmbedding(nn.Embedding): + def __init__(self, num_embeddings, embedding_dim, *args, process_group=None, **kwargs): + self.process_group = process_group + if process_group is not None: + world_size = torch.distributed.get_world_size(process_group) + if embedding_dim % world_size != 0: + raise ValueError( + f"embedding_dim ({embedding_dim}) must be divisible by " + f"world_size ({world_size})" + ) + else: + world_size = 1 + super().__init__(num_embeddings, embedding_dim // world_size, *args, **kwargs) + + +class ParallelEmbeddings(nn.Module): + def __init__( + self, + embed_dim, + vocab_size, + max_position_embeddings, + process_group, + padding_idx=None, + sequence_parallel=True, + device=None, + dtype=None, + ): + """ + If max_position_embeddings <= 0, there's no position embeddings + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.process_group = process_group + self.sequence_parallel = sequence_parallel + self.word_embeddings = VocabParallelEmbedding( + vocab_size, + embed_dim, + padding_idx=padding_idx, + process_group=process_group, + **factory_kwargs, + ) + self.max_position_embeddings = max_position_embeddings + if self.max_position_embeddings > 0: + self.position_embeddings = ColumnParallelEmbedding( + max_position_embeddings, embed_dim, process_group=process_group, **factory_kwargs + ) + + def forward(self, input_ids, position_ids=None, combine_batch_seqlen_dim=False): + """ + input_ids: (batch, seqlen) + position_ids: (batch, seqlen) + """ + batch_size, seqlen = input_ids.shape + world_size = torch.distributed.get_world_size(self.process_group) + embeddings = self.word_embeddings(input_ids) + if self.max_position_embeddings > 0: + if position_ids is None: + position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device) + position_embeddings = self.position_embeddings(position_ids) + if world_size <= 1: + embeddings = embeddings + position_embeddings + else: + partition_dim = self.position_embeddings.embedding_dim + rank = torch.distributed.get_rank(self.process_group) + embeddings[ + ..., rank * partition_dim : (rank + 1) * partition_dim + ] += position_embeddings + if combine_batch_seqlen_dim: + embeddings = rearrange(embeddings, "b s d -> (b s) d") + reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce + return embeddings if world_size <= 1 else reduce_fn(embeddings, self.process_group) diff --git a/mamba/mamba_ssm/models/__init__.py b/mamba/mamba_ssm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/mamba_ssm/models/config_mamba.py b/mamba/mamba_ssm/models/config_mamba.py new file mode 100644 index 0000000000000000000000000000000000000000..646c9e1e8ac94b2e82974cc0d5dab83fcfea900c --- /dev/null +++ b/mamba/mamba_ssm/models/config_mamba.py @@ -0,0 +1,18 @@ +from dataclasses import dataclass, field + + +@dataclass +class MambaConfig: + + d_model: int = 2560 + d_intermediate: int = 0 + n_layer: int = 64 + vocab_size: int = 50277 + ssm_cfg: dict = field(default_factory=dict) + attn_layer_idx: list = field(default_factory=list) + attn_cfg: dict = field(default_factory=dict) + rms_norm: bool = True + residual_in_fp32: bool = True + fused_add_norm: bool = True + pad_vocab_size_multiple: int = 8 + tie_embeddings: bool = True diff --git a/mamba/mamba_ssm/models/mixer_seq_simple.py b/mamba/mamba_ssm/models/mixer_seq_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..fae2257a924f30378c30cdd8adfd370ed15b2c4c --- /dev/null +++ b/mamba/mamba_ssm/models/mixer_seq_simple.py @@ -0,0 +1,309 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. + +import math +from functools import partial +import json +import os +import copy + +from collections import namedtuple + +import torch +import torch.nn as nn + +from mamba_ssm.models.config_mamba import MambaConfig +from mamba_ssm.modules.mamba_simple import Mamba +from mamba_ssm.modules.mamba2 import Mamba2 +from mamba_ssm.modules.mha import MHA +from mamba_ssm.modules.mlp import GatedMLP +from mamba_ssm.modules.block import Block +from mamba_ssm.utils.generation import GenerationMixin +from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf + +try: + from mamba_ssm.ops.triton.layer_norm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +def create_block( + d_model, + d_intermediate, + ssm_cfg=None, + attn_layer_idx=None, + attn_cfg=None, + norm_epsilon=1e-5, + rms_norm=False, + residual_in_fp32=False, + fused_add_norm=False, + layer_idx=None, + device=None, + dtype=None, +): + if ssm_cfg is None: + ssm_cfg = {} + if attn_layer_idx is None: + attn_layer_idx = [] + if attn_cfg is None: + attn_cfg = {} + factory_kwargs = {"device": device, "dtype": dtype} + if layer_idx not in attn_layer_idx: + # Create a copy of the config to modify + ssm_cfg = copy.deepcopy(ssm_cfg) if ssm_cfg is not None else {} + ssm_layer = ssm_cfg.pop("layer", "Mamba1") + if ssm_layer not in ["Mamba1", "Mamba2"]: + raise ValueError(f"Invalid ssm_layer: {ssm_layer}, only support Mamba1 and Mamba2") + mixer_cls = partial( + Mamba2 if ssm_layer == "Mamba2" else Mamba, + layer_idx=layer_idx, + **ssm_cfg, + **factory_kwargs + ) + else: + mixer_cls = partial(MHA, layer_idx=layer_idx, **attn_cfg, **factory_kwargs) + norm_cls = partial( + nn.LayerNorm if not rms_norm else RMSNorm, eps=norm_epsilon, **factory_kwargs + ) + if d_intermediate == 0: + mlp_cls = nn.Identity + else: + mlp_cls = partial( + GatedMLP, hidden_features=d_intermediate, out_features=d_model, **factory_kwargs + ) + block = Block( + d_model, + mixer_cls, + mlp_cls, + norm_cls=norm_cls, + fused_add_norm=fused_add_norm, + residual_in_fp32=residual_in_fp32, + ) + block.layer_idx = layer_idx + return block + + +# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454 +def _init_weights( + module, + n_layer, + initializer_range=0.02, # Now only used for embedding layer. + rescale_prenorm_residual=True, + n_residuals_per_layer=1, # Change to 2 if we have MLP +): + if isinstance(module, nn.Linear): + if module.bias is not None: + if not getattr(module.bias, "_no_reinit", False): + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Embedding): + nn.init.normal_(module.weight, std=initializer_range) + + if rescale_prenorm_residual: + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + for name, p in module.named_parameters(): + if name in ["out_proj.weight", "fc2.weight"]: + # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block + # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) + # We need to reinit p since this code could be called multiple times + # Having just p *= scale would repeatedly scale it down + nn.init.kaiming_uniform_(p, a=math.sqrt(5)) + with torch.no_grad(): + p /= math.sqrt(n_residuals_per_layer * n_layer) + + +class MixerModel(nn.Module): + def __init__( + self, + d_model: int, + n_layer: int, + d_intermediate: int, + vocab_size: int, + ssm_cfg=None, + attn_layer_idx=None, + attn_cfg=None, + norm_epsilon: float = 1e-5, + rms_norm: bool = False, + initializer_cfg=None, + fused_add_norm=False, + residual_in_fp32=False, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + + self.embedding = nn.Embedding(vocab_size, d_model, **factory_kwargs) + + # We change the order of residual and layer norm: + # Instead of LN -> Attn / MLP -> Add, we do: + # Add -> LN -> Attn / MLP / Mixer, returning both the residual branch (output of Add) and + # the main branch (output of MLP / Mixer). The model definition is unchanged. + # This is for performance reason: we can fuse add + layer_norm. + self.fused_add_norm = fused_add_norm + if self.fused_add_norm: + if layer_norm_fn is None or rms_norm_fn is None: + raise ImportError("Failed to import Triton LayerNorm / RMSNorm kernels") + + self.layers = nn.ModuleList( + [ + create_block( + d_model, + d_intermediate=d_intermediate, + ssm_cfg=ssm_cfg, + attn_layer_idx=attn_layer_idx, + attn_cfg=attn_cfg, + norm_epsilon=norm_epsilon, + rms_norm=rms_norm, + residual_in_fp32=residual_in_fp32, + fused_add_norm=fused_add_norm, + layer_idx=i, + **factory_kwargs, + ) + for i in range(n_layer) + ] + ) + + self.norm_f = (nn.LayerNorm if not rms_norm else RMSNorm)( + d_model, eps=norm_epsilon, **factory_kwargs + ) + + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + n_residuals_per_layer=1 if d_intermediate == 0 else 2, # 2 if we have MLP + ) + ) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return { + i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + for i, layer in enumerate(self.layers) + } + + def forward(self, input_ids, inference_params=None, **mixer_kwargs): + hidden_states = self.embedding(input_ids) + residual = None + for layer in self.layers: + hidden_states, residual = layer( + hidden_states, residual, inference_params=inference_params, **mixer_kwargs + ) + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm_f(residual.to(dtype=self.norm_f.weight.dtype)) + else: + # Set prenorm=False here since we don't need the residual + hidden_states = layer_norm_fn( + hidden_states, + self.norm_f.weight, + self.norm_f.bias, + eps=self.norm_f.eps, + residual=residual, + prenorm=False, + residual_in_fp32=self.residual_in_fp32, + is_rms_norm=isinstance(self.norm_f, RMSNorm) + ) + return hidden_states + + +class MambaLMHeadModel(nn.Module, GenerationMixin): + + def __init__( + self, + config: MambaConfig, + initializer_cfg=None, + device=None, + dtype=None, + ) -> None: + self.config = config + d_model = config.d_model + n_layer = config.n_layer + d_intermediate = config.d_intermediate + vocab_size = config.vocab_size + ssm_cfg = config.ssm_cfg + attn_layer_idx = config.attn_layer_idx + attn_cfg = config.attn_cfg + rms_norm = config.rms_norm + residual_in_fp32 = config.residual_in_fp32 + fused_add_norm = config.fused_add_norm + pad_vocab_size_multiple = config.pad_vocab_size_multiple + factory_kwargs = {"device": device, "dtype": dtype} + + super().__init__() + if vocab_size % pad_vocab_size_multiple != 0: + vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple) + self.backbone = MixerModel( + d_model=d_model, + n_layer=n_layer, + d_intermediate=d_intermediate, + vocab_size=vocab_size, + ssm_cfg=ssm_cfg, + attn_layer_idx=attn_layer_idx, + attn_cfg=attn_cfg, + rms_norm=rms_norm, + initializer_cfg=initializer_cfg, + fused_add_norm=fused_add_norm, + residual_in_fp32=residual_in_fp32, + **factory_kwargs, + ) + self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs) + + # Initialize weights and apply final processing + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + ) + ) + self.tie_weights() + + def tie_weights(self): + if self.config.tie_embeddings: + self.lm_head.weight = self.backbone.embedding.weight + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.backbone.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + + def forward(self, input_ids, position_ids=None, inference_params=None, num_last_tokens=0, **mixer_kwargs): + """ + "position_ids" is just to be compatible with Transformer generation. We don't use it. + num_last_tokens: if > 0, only return the logits for the last n tokens + """ + hidden_states = self.backbone(input_ids, inference_params=inference_params, **mixer_kwargs) + if num_last_tokens > 0: + hidden_states = hidden_states[:, -num_last_tokens:] + lm_logits = self.lm_head(hidden_states) + CausalLMOutput = namedtuple("CausalLMOutput", ["logits"]) + return CausalLMOutput(logits=lm_logits) + + @classmethod + def from_pretrained(cls, pretrained_model_name, device=None, dtype=None, **kwargs): + config_data = load_config_hf(pretrained_model_name) + config = MambaConfig(**config_data) + model = cls(config, device=device, dtype=dtype, **kwargs) + model.load_state_dict(load_state_dict_hf(pretrained_model_name, device=device, dtype=dtype)) + return model + + def save_pretrained(self, save_directory): + """ + Minimal implementation of save_pretrained for MambaLMHeadModel. + Save the model and its configuration file to a directory. + """ + # Ensure save_directory exists + os.makedirs(save_directory, exist_ok=True) + + # Save the model's state_dict + model_path = os.path.join(save_directory, 'pytorch_model.bin') + torch.save(self.state_dict(), model_path) + + # Save the configuration of the model + config_path = os.path.join(save_directory, 'config.json') + with open(config_path, 'w') as f: + json.dump(self.config.__dict__, f, indent=4) diff --git a/mamba/mamba_ssm/modules/__init__.py b/mamba/mamba_ssm/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/mamba_ssm/modules/block.py b/mamba/mamba_ssm/modules/block.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd968a0bf20668bb312f9b7981529cf5c915471 --- /dev/null +++ b/mamba/mamba_ssm/modules/block.py @@ -0,0 +1,91 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. +from typing import Optional + +import torch +from torch import nn, Tensor + +from mamba_ssm.ops.triton.layer_norm import RMSNorm, layer_norm_fn + + +class Block(nn.Module): + def __init__( + self, dim, mixer_cls, mlp_cls, norm_cls=nn.LayerNorm, fused_add_norm=False, residual_in_fp32=False + ): + """ + Simple block wrapping a mixer class with LayerNorm/RMSNorm and residual connection" + + This Block has a slightly different structure compared to a regular + prenorm Transformer block. + The standard block is: LN -> MHA/MLP -> Add. + [Ref: https://arxiv.org/abs/2002.04745] + Here we have: Add -> LN -> Mixer, returning both + the hidden_states (output of the mixer) and the residual. + This is purely for performance reasons, as we can fuse add and LayerNorm. + The residual needs to be provided (except for the very first block). + """ + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + self.fused_add_norm = fused_add_norm + self.norm = norm_cls(dim) + self.mixer = mixer_cls(dim) + if mlp_cls is not nn.Identity: + self.norm2 = norm_cls(dim) + self.mlp = mlp_cls(dim) + else: + self.mlp = None + if self.fused_add_norm: + assert RMSNorm is not None, "RMSNorm import fails" + assert isinstance( + self.norm, (nn.LayerNorm, RMSNorm) + ), "Only LayerNorm and RMSNorm are supported for fused_add_norm" + + def forward( + self, hidden_states: Tensor, residual: Optional[Tensor] = None, inference_params=None, **mixer_kwargs + ): + r"""Pass the input through the encoder layer. + + Args: + hidden_states: the sequence to the encoder layer (required). + residual: hidden_states = Mixer(LN(residual)) + """ + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm(residual.to(dtype=self.norm.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + else: + hidden_states, residual = layer_norm_fn( + hidden_states, + self.norm.weight, + self.norm.bias, + residual=residual, + prenorm=True, + residual_in_fp32=self.residual_in_fp32, + eps=self.norm.eps, + is_rms_norm=isinstance(self.norm, RMSNorm) + ) + hidden_states = self.mixer(hidden_states, inference_params=inference_params, **mixer_kwargs) + + if self.mlp is not None: + if not self.fused_add_norm: + residual = hidden_states + residual + hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + else: + hidden_states, residual = layer_norm_fn( + hidden_states, + self.norm2.weight, + self.norm2.bias, + residual=residual, + prenorm=True, + residual_in_fp32=self.residual_in_fp32, + eps=self.norm2.eps, + is_rms_norm=isinstance(self.norm2, RMSNorm) + ) + hidden_states = self.mlp(hidden_states) + + return hidden_states, residual + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) diff --git a/mamba/mamba_ssm/modules/mamba2.py b/mamba/mamba_ssm/modules/mamba2.py new file mode 100644 index 0000000000000000000000000000000000000000..1859ab0de591a2b3c79e26d3f31222bd295e876e --- /dev/null +++ b/mamba/mamba_ssm/modules/mamba2.py @@ -0,0 +1,383 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None, None + +try: + from causal_conv1d.causal_conv1d_varlen import causal_conv1d_varlen_states +except ImportError: + causal_conv1d_varlen_states = None + +try: + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +except ImportError: + selective_state_update = None + +from mamba_ssm.ops.triton.layernorm_gated import RMSNorm as RMSNormGated + +from mamba_ssm.distributed.tensor_parallel import ColumnParallelLinear, RowParallelLinear +from mamba_ssm.distributed.distributed_utils import all_reduce, reduce_scatter + +from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined +from mamba_ssm.ops.triton.ssd_combined import mamba_split_conv1d_scan_combined + +from huggingface_hub import PyTorchModelHubMixin + + +class Mamba2(nn.Module, PyTorchModelHubMixin): + def __init__( + self, + d_model, + d_state=128, + d_conv=4, + conv_init=None, + expand=2, + headdim=64, + d_ssm=None, # If not None, we only apply SSM on this many dimensions, the rest uses gated MLP + ngroups=1, + A_init_range=(1, 16), + D_has_hdim=False, + rmsnorm=True, + norm_before_gate=False, + dt_min=0.001, + dt_max=0.1, + dt_init_floor=1e-4, + dt_limit=(0.0, float("inf")), + bias=False, + conv_bias=True, + # Fused kernel and sharding options + chunk_size=256, + use_mem_eff_path=True, + layer_idx=None, # Absorb kwarg for general module + process_group=None, + sequence_parallel=True, + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.conv_init = conv_init + self.expand = expand + self.process_group = process_group + self.sequence_parallel = sequence_parallel + self.world_size = 1 if process_group is None else process_group.size() + self.local_rank = 0 if process_group is None else process_group.rank() + self.d_inner = (self.expand * self.d_model) // self.world_size + assert self.d_inner * self.world_size == self.expand * self.d_model + self.headdim = headdim + self.d_ssm = self.d_inner if d_ssm is None else d_ssm // self.world_size + assert ngroups % self.world_size == 0 + self.ngroups = ngroups // self.world_size + assert self.d_ssm % self.headdim == 0 + self.nheads = self.d_ssm // self.headdim + self.D_has_hdim = D_has_hdim + self.rmsnorm = rmsnorm + self.norm_before_gate = norm_before_gate + self.dt_limit = dt_limit + self.activation = "silu" + self.chunk_size = chunk_size + self.use_mem_eff_path = use_mem_eff_path + self.layer_idx = layer_idx + + # Order: [z, x, B, C, dt] + d_in_proj = 2 * self.d_inner + 2 * self.ngroups * self.d_state + self.nheads + if self.process_group is None: + self.in_proj = nn.Linear(self.d_model, d_in_proj, bias=bias, **factory_kwargs) + else: + self.in_proj = ColumnParallelLinear(self.d_model, d_in_proj * self.world_size, bias=bias, + process_group=self.process_group, sequence_parallel=self.sequence_parallel, + **factory_kwargs) + + conv_dim = self.d_ssm + 2 * self.ngroups * self.d_state + self.conv1d = nn.Conv1d( + in_channels=conv_dim, + out_channels=conv_dim, + bias=conv_bias, + kernel_size=d_conv, + groups=conv_dim, + padding=d_conv - 1, + **factory_kwargs, + ) + if self.conv_init is not None: + nn.init.uniform_(self.conv1d.weight, -self.conv_init, self.conv_init) + + self.act = nn.SiLU() + + # Initialize log dt bias + dt = torch.exp( + torch.rand(self.nheads, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ) + dt = torch.clamp(dt, min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + self.dt_bias = nn.Parameter(inv_dt) + # Just to be explicit. Without this we already don't put wd on dt_bias because of the check + # name.endswith("bias") in param_grouping.py + self.dt_bias._no_weight_decay = True + + assert A_init_range[0] > 0 and A_init_range[1] >= A_init_range[0] + A = torch.empty(self.nheads, dtype=torch.float32, device=device).uniform_(*A_init_range) + A_log = torch.log(A).to(dtype=dtype) + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.d_ssm if self.D_has_hdim else self.nheads, device=device)) + self.D._no_weight_decay = True + + if self.rmsnorm: + assert RMSNormGated is not None + self.norm = RMSNormGated(self.d_ssm, eps=1e-5, norm_before_gate=self.norm_before_gate, + group_size=self.d_ssm // ngroups, **factory_kwargs) + + if self.process_group is None: + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + else: + self.out_proj = RowParallelLinear(self.d_inner * self.world_size, self.d_model, bias=bias, + process_group=self.process_group, sequence_parallel=self.sequence_parallel, + **factory_kwargs) + + def forward(self, u, seqlen=None, seq_idx=None, cu_seqlens=None, inference_params=None): + """ + u: (batch, seqlen, hidden_dim) if seqlen=None. + If seqlen is not None, u is (batch * seqlen, hidden_dim). This is so that when we + split u during sequence parallel, we split the batch * seqlen dimension + (in case batch is small). + Returns: same shape as u + """ + seqlen_og = seqlen + if seqlen is None: + batch, seqlen, dim = u.shape + else: + batch_seqlen, dim = u.shape + batch = batch_seqlen // seqlen + + conv_state, ssm_state = None, None + if inference_params is not None: + inference_batch = cu_seqlens.shape[0] - 1 if cu_seqlens is not None else batch + conv_state, ssm_state = self._get_states_from_cache(inference_params, inference_batch) + if inference_params.seqlen_offset > 0: + # The states are updated inplace + out, _, _ = self.step(u, conv_state, ssm_state) + return out + + zxbcdt = self.in_proj(u) # (B, L, d_in_proj) or (B * L, d_in_proj) + if seqlen_og is not None: + zxbcdt = rearrange(zxbcdt, "(b l) d -> b l d", l=seqlen) + # If the model is loaded in fp16, without the .float() here, A might be -inf + A = -torch.exp(self.A_log.float()) # (nheads) or (d_inner, d_state) + dt_limit_kwargs = {} if self.dt_limit == (0.0, float("inf")) else dict(dt_limit=self.dt_limit) + if self.use_mem_eff_path and inference_params is None: + out = mamba_split_conv1d_scan_combined( + zxbcdt, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.dt_bias, + A, + D=rearrange(self.D, "(h p) -> h p", p=self.headdim) if self.D_has_hdim else self.D, + chunk_size=self.chunk_size, + seq_idx=seq_idx, + activation=self.activation, + rmsnorm_weight=self.norm.weight if self.rmsnorm else None, + rmsnorm_eps=self.norm.eps if self.rmsnorm else 1e-6, + outproj_weight=self.out_proj.weight, + outproj_bias=self.out_proj.bias, + headdim=None if self.D_has_hdim else self.headdim, + ngroups=self.ngroups, + norm_before_gate=self.norm_before_gate, + **dt_limit_kwargs, + ) + if seqlen_og is not None: + out = rearrange(out, "b l d -> (b l) d") + if self.process_group is not None: + reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce + out = reduce_fn(out, self.process_group) + else: + d_mlp = (zxbcdt.shape[-1] - 2 * self.d_ssm - 2 * self.ngroups * self.d_state - self.nheads) // 2 + z0, x0, z, xBC, dt = torch.split( + zxbcdt, + [d_mlp, d_mlp, self.d_ssm, self.d_ssm + 2 * self.ngroups * self.d_state, self.nheads], + dim=-1 + ) + if conv_state is not None: + if cu_seqlens is None: + # If we just take xBC[:, :, -self.d_conv :], it will error if seqlen < self.d_conv + # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. + xBC_t = rearrange(xBC, "b l d -> b d l") + conv_state.copy_(F.pad(xBC_t, (self.d_conv - xBC_t.shape[-1], 0))) # Update state (B D W) + else: + assert causal_conv1d_varlen_states is not None, "varlen inference requires causal_conv1d package" + assert batch == 1, "varlen inference only supports batch dimension 1" + conv_varlen_states = causal_conv1d_varlen_states( + xBC.squeeze(0), cu_seqlens, state_len=conv_state.shape[-1] + ) + conv_state.copy_(conv_varlen_states) + assert self.activation in ["silu", "swish"] + if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]: + assert seq_idx is None, "varlen conv1d requires the causal_conv1d package" + xBC = self.act( + self.conv1d(xBC.transpose(1, 2)).transpose(1, 2)[:, -(self.dconv - 1):] + ) # (B, L, self.d_ssm + 2 * ngroups * d_state) + else: + xBC = causal_conv1d_fn( + xBC.transpose(1, 2), + rearrange(self.conv1d.weight, "d 1 w -> d w"), + bias=self.conv1d.bias, + activation=self.activation, + seq_idx=seq_idx, + ).transpose(1, 2) + x, B, C = torch.split(xBC, [self.d_ssm, self.ngroups * self.d_state, self.ngroups * self.d_state], dim=-1) + y = mamba_chunk_scan_combined( + rearrange(x, "b l (h p) -> b l h p", p=self.headdim), + dt, + A, + rearrange(B, "b l (g n) -> b l g n", g=self.ngroups), + rearrange(C, "b l (g n) -> b l g n", g=self.ngroups), + chunk_size=self.chunk_size, + D=rearrange(self.D, "(h p) -> h p", p=self.headdim) if self.D_has_hdim else self.D, + z=rearrange(z, "b l (h p) -> b l h p", p=self.headdim) if not self.rmsnorm else None, + dt_bias=self.dt_bias, + dt_softplus=True, + seq_idx=seq_idx, + cu_seqlens=cu_seqlens, + **dt_limit_kwargs, + return_final_states=ssm_state is not None, + return_varlen_states=cu_seqlens is not None and inference_params is not None, + ) + if ssm_state is not None: + y, last_state, *rest = y + if cu_seqlens is None: + ssm_state.copy_(last_state) + else: + varlen_states = rest[0] + ssm_state.copy_(varlen_states) + y = rearrange(y, "b l h p -> b l (h p)") + if self.rmsnorm: + y = self.norm(y, z) + if d_mlp > 0: + y = torch.cat([F.silu(z0) * x0, y], dim=-1) + if seqlen_og is not None: + y = rearrange(y, "b l d -> (b l) d") + out = self.out_proj(y) + return out + + def step(self, hidden_states, conv_state, ssm_state): + dtype = hidden_states.dtype + assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" + zxbcdt = self.in_proj(hidden_states.squeeze(1)) # (B 2D) + d_mlp = (zxbcdt.shape[-1] - 2 * self.d_ssm - 2 * self.ngroups * self.d_state - self.nheads) // 2 + z0, x0, z, xBC, dt = torch.split( + zxbcdt, + [d_mlp, d_mlp, self.d_ssm, self.d_ssm + 2 * self.ngroups * self.d_state, self.nheads], + dim=-1 + ) + + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = xBC + xBC = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + xBC = xBC + self.conv1d.bias + xBC = self.act(xBC).to(dtype=dtype) + else: + xBC = causal_conv1d_update( + xBC, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + x, B, C = torch.split(xBC, [self.d_ssm, self.ngroups * self.d_state, self.ngroups * self.d_state], dim=-1) + A = -torch.exp(self.A_log.float()) # (nheads,) + + # SSM step + if selective_state_update is None: + assert self.ngroups == 1, "Only support ngroups=1 for this inference code path" + # Discretize A and B + dt = F.softplus(dt + self.dt_bias.to(dtype=dt.dtype)) # (batch, nheads) + dA = torch.exp(dt * A) # (batch, nheads) + x = rearrange(x, "b (h p) -> b h p", p=self.headdim) + dBx = torch.einsum("bh,bn,bhp->bhpn", dt, B, x) + ssm_state.copy_(ssm_state * rearrange(dA, "b h -> b h 1 1") + dBx) + y = torch.einsum("bhpn,bn->bhp", ssm_state.to(dtype), C) + y = y + rearrange(self.D.to(dtype), "h -> h 1") * x + y = rearrange(y, "b h p -> b (h p)") + if not self.rmsnorm: + y = y * self.act(z) # (B D) + else: + A = repeat(A, "h -> h p n", p=self.headdim, n=self.d_state).to(dtype=torch.float32) + dt = repeat(dt, "b h -> b h p", p=self.headdim) + dt_bias = repeat(self.dt_bias, "h -> h p", p=self.headdim) + D = repeat(self.D, "h -> h p", p=self.headdim) + B = rearrange(B, "b (g n) -> b g n", g=self.ngroups) + C = rearrange(C, "b (g n) -> b g n", g=self.ngroups) + x_reshaped = rearrange(x, "b (h p) -> b h p", p=self.headdim) + if not self.rmsnorm: + z = rearrange(z, "b (h p) -> b h p", p=self.headdim) + y = selective_state_update( + ssm_state, x_reshaped, dt, A, B, C, D, z=z if not self.rmsnorm else None, + dt_bias=dt_bias, dt_softplus=True + ) + y = rearrange(y, "b h p -> b (h p)") + if self.rmsnorm: + y = self.norm(y, z) + if d_mlp > 0: + y = torch.cat([F.silu(z0) * x0, y], dim=-1) + out = self.out_proj(y) + return out.unsqueeze(1), conv_state, ssm_state + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + device = self.out_proj.weight.device + conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype + conv_state = torch.zeros( + batch_size, self.d_conv, self.conv1d.weight.shape[0], device=device, dtype=conv_dtype + ).transpose(1, 2) + ssm_dtype = self.in_proj.weight.dtype if dtype is None else dtype + ssm_state = torch.zeros( + batch_size, self.nheads, self.headdim, self.d_state, device=device, dtype=ssm_dtype + ) + return conv_state, ssm_state + + def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): + assert self.layer_idx is not None + if self.layer_idx not in inference_params.key_value_memory_dict: + batch_shape = (batch_size,) + conv_state = torch.zeros( + batch_size, + self.d_conv, + self.conv1d.weight.shape[0], + device=self.conv1d.weight.device, + dtype=self.conv1d.weight.dtype, + ).transpose(1, 2) + ssm_state = torch.zeros( + batch_size, + self.nheads, + self.headdim, + self.d_state, + device=self.in_proj.weight.device, + dtype=self.in_proj.weight.dtype, + ) + inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) + else: + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + # TODO: What if batch size changes between generation, and we reuse the same states? + if initialize_states: + conv_state.zero_() + ssm_state.zero_() + return conv_state, ssm_state diff --git a/mamba/mamba_ssm/modules/mamba2_simple.py b/mamba/mamba_ssm/modules/mamba2_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..77a6af28e9f4630c482aa2c108c74f5d1dad1040 --- /dev/null +++ b/mamba/mamba_ssm/modules/mamba2_simple.py @@ -0,0 +1,200 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn +except ImportError: + causal_conv1d_fn = None + +try: + from mamba_ssm.ops.triton.layernorm_gated import RMSNorm as RMSNormGated, LayerNorm +except ImportError: + RMSNormGated, LayerNorm = None, None + +from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined +from mamba_ssm.ops.triton.ssd_combined import mamba_split_conv1d_scan_combined + + +class Mamba2Simple(nn.Module): + def __init__( + self, + d_model, + d_state=64, + d_conv=4, + conv_init=None, + expand=2, + headdim=128, + ngroups=1, + A_init_range=(1, 16), + dt_min=0.001, + dt_max=0.1, + dt_init_floor=1e-4, + dt_limit=(0.0, float("inf")), + learnable_init_states=False, + activation="swish", + bias=False, + conv_bias=True, + # Fused kernel and sharding options + chunk_size=256, + use_mem_eff_path=True, + layer_idx=None, # Absorb kwarg for general module + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.conv_init = conv_init + self.expand = expand + self.d_inner = self.expand * self.d_model + self.headdim = headdim + self.ngroups = ngroups + assert self.d_inner % self.headdim == 0 + self.nheads = self.d_inner // self.headdim + self.dt_limit = dt_limit + self.learnable_init_states = learnable_init_states + self.activation = activation + self.chunk_size = chunk_size + self.use_mem_eff_path = use_mem_eff_path + self.layer_idx = layer_idx + + # Order: [z, x, B, C, dt] + d_in_proj = 2 * self.d_inner + 2 * self.ngroups * self.d_state + self.nheads + self.in_proj = nn.Linear(self.d_model, d_in_proj, bias=bias, **factory_kwargs) + + conv_dim = self.d_inner + 2 * self.ngroups * self.d_state + self.conv1d = nn.Conv1d( + in_channels=conv_dim, + out_channels=conv_dim, + bias=conv_bias, + kernel_size=d_conv, + groups=conv_dim, + padding=d_conv - 1, + **factory_kwargs, + ) + if self.conv_init is not None: + nn.init.uniform_(self.conv1d.weight, -self.conv_init, self.conv_init) + # self.conv1d.weight._no_weight_decay = True + + if self.learnable_init_states: + self.init_states = nn.Parameter(torch.zeros(self.nheads, self.headdim, self.d_state, **factory_kwargs)) + self.init_states._no_weight_decay = True + + self.act = nn.SiLU() + + # Initialize log dt bias + dt = torch.exp( + torch.rand(self.nheads, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ) + dt = torch.clamp(dt, min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + self.dt_bias = nn.Parameter(inv_dt) + # Just to be explicit. Without this we already don't put wd on dt_bias because of the check + # name.endswith("bias") in param_grouping.py + self.dt_bias._no_weight_decay = True + + # A parameter + assert A_init_range[0] > 0 and A_init_range[1] >= A_init_range[0] + A = torch.empty(self.nheads, dtype=torch.float32, device=device).uniform_(*A_init_range) + A_log = torch.log(A).to(dtype=dtype) + self.A_log = nn.Parameter(A_log) + # self.register_buffer("A_log", torch.zeros(self.nheads, dtype=torch.float32, device=device), persistent=True) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.nheads, device=device)) + self.D._no_weight_decay = True + + # Extra normalization layer right before output projection + assert RMSNormGated is not None + self.norm = RMSNormGated(self.d_inner, eps=1e-5, norm_before_gate=False, **factory_kwargs) + + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + + def forward(self, u, seq_idx=None): + """ + u: (B, L, D) + Returns: same shape as u + """ + batch, seqlen, dim = u.shape + + zxbcdt = self.in_proj(u) # (B, L, d_in_proj) + A = -torch.exp(self.A_log) # (nheads) or (d_inner, d_state) + initial_states=repeat(self.init_states, "... -> b ...", b=batch) if self.learnable_init_states else None + dt_limit_kwargs = {} if self.dt_limit == (0.0, float("inf")) else dict(dt_limit=self.dt_limit) + + if self.use_mem_eff_path: + # Fully fused path + out = mamba_split_conv1d_scan_combined( + zxbcdt, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.dt_bias, + A, + D=self.D, + chunk_size=self.chunk_size, + seq_idx=seq_idx, + activation=self.activation, + rmsnorm_weight=self.norm.weight, + rmsnorm_eps=self.norm.eps, + outproj_weight=self.out_proj.weight, + outproj_bias=self.out_proj.bias, + headdim=self.headdim, + ngroups=self.ngroups, + norm_before_gate=False, + initial_states=initial_states, + **dt_limit_kwargs, + ) + else: + z, xBC, dt = torch.split( + zxbcdt, [self.d_inner, self.d_inner + 2 * self.ngroups * self.d_state, self.nheads], dim=-1 + ) + dt = F.softplus(dt + self.dt_bias) # (B, L, nheads) + assert self.activation in ["silu", "swish"] + + # 1D Convolution + if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]: + xBC = self.act( + self.conv1d(xBC.transpose(1, 2)).transpose(1, 2) + ) # (B, L, self.d_inner + 2 * ngroups * d_state) + xBC = xBC[:, :seqlen, :] + else: + xBC = causal_conv1d_fn( + x=xBC.transpose(1, 2), + weight=rearrange(self.conv1d.weight, "d 1 w -> d w"), + bias=self.conv1d.bias, + activation=self.activation, + ).transpose(1, 2) + + # Split into 3 main branches: X, B, C + # These correspond to V, K, Q respectively in the SSM/attention duality + x, B, C = torch.split(xBC, [self.d_inner, self.ngroups * self.d_state, self.ngroups * self.d_state], dim=-1) + y = mamba_chunk_scan_combined( + rearrange(x, "b l (h p) -> b l h p", p=self.headdim), + dt, + A, + rearrange(B, "b l (g n) -> b l g n", g=self.ngroups), + rearrange(C, "b l (g n) -> b l g n", g=self.ngroups), + chunk_size=self.chunk_size, + D=self.D, + z=None, + seq_idx=seq_idx, + initial_states=initial_states, + **dt_limit_kwargs, + ) + y = rearrange(y, "b l h p -> b l (h p)") + + # Multiply "gate" branch and apply extra normalization layer + y = self.norm(y, z) + out = self.out_proj(y) + return out diff --git a/mamba/mamba_ssm/modules/mamba_simple.py b/mamba/mamba_ssm/modules/mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..4c8a388217c4cd4d21d5ea4704a5d571be294781 --- /dev/null +++ b/mamba/mamba_ssm/modules/mamba_simple.py @@ -0,0 +1,294 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from einops import rearrange, repeat + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None, None + +try: + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +except ImportError: + selective_state_update = None + +try: + from mamba_ssm.ops.triton.layer_norm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +class Mamba(nn.Module): + def __init__( + self, + d_model, + d_state=16, + d_conv=4, + expand=2, + dt_rank="auto", + dt_min=0.001, + dt_max=0.1, + dt_init="random", + dt_scale=1.0, + dt_init_floor=1e-4, + conv_bias=True, + bias=False, + use_fast_path=True, # Fused kernel options + layer_idx=None, + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.expand = expand + self.d_inner = int(self.expand * self.d_model) + self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank + self.use_fast_path = use_fast_path + self.layer_idx = layer_idx + + self.in_proj = nn.Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs) + + self.conv1d = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.activation = "silu" + self.act = nn.SiLU() + + self.x_proj = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + # Initialize special dt projection to preserve variance at initialization + dt_init_std = self.dt_rank**-0.5 * dt_scale + if dt_init == "constant": + nn.init.constant_(self.dt_proj.weight, dt_init_std) + elif dt_init == "random": + nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std) + else: + raise NotImplementedError + + # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max + dt = torch.exp( + torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ).clamp(min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + with torch.no_grad(): + self.dt_proj.bias.copy_(inv_dt) + # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit + self.dt_proj.bias._no_reinit = True + + # S4D real initialization + A = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_log = torch.log(A) # Keep A_log in fp32 + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D._no_weight_decay = True + + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + + def forward(self, hidden_states, inference_params=None): + """ + hidden_states: (B, L, D) + Returns: same shape as hidden_states + """ + batch, seqlen, dim = hidden_states.shape + + conv_state, ssm_state = None, None + if inference_params is not None: + conv_state, ssm_state = self._get_states_from_cache(inference_params, batch) + if inference_params.seqlen_offset > 0: + # The states are updated inplace + out, _, _ = self.step(hidden_states, conv_state, ssm_state) + return out + + # We do matmul and transpose BLH -> HBL at the same time + xz = rearrange( + self.in_proj.weight @ rearrange(hidden_states, "b l d -> d (b l)"), + "d (b l) -> b d l", + l=seqlen, + ) + if self.in_proj.bias is not None: + xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), "d -> d 1") + + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + # In the backward pass we write dx and dz next to each other to avoid torch.cat + if self.use_fast_path and causal_conv1d_fn is not None and inference_params is None: # Doesn't support outputting the states + out = mamba_inner_fn( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + self.out_proj.weight, + self.out_proj.bias, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + else: + x, z = xz.chunk(2, dim=1) + # Compute short convolution + if conv_state is not None: + # If we just take x[:, :, -self.d_conv :], it will error if seqlen < self.d_conv + # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. + conv_state.copy_(F.pad(x, (self.d_conv - x.shape[-1], 0))) # Update state (B D W) + if causal_conv1d_fn is None: + x = self.act(self.conv1d(x)[..., :seqlen]) + else: + assert self.activation in ["silu", "swish"] + x = causal_conv1d_fn( + x=x, + weight=rearrange(self.conv1d.weight, "d 1 w -> d w"), + bias=self.conv1d.bias, + activation=self.activation, + ) + + # We're careful here about the layout, to avoid extra transposes. + # We want dt to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) + dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = self.dt_proj.weight @ dt.t() + dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) + B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + assert self.activation in ["silu", "swish"] + y = selective_scan_fn( + x, + dt, + A, + B, + C, + self.D.float(), + z=z, + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + return_last_state=ssm_state is not None, + ) + if ssm_state is not None: + y, last_state = y + ssm_state.copy_(last_state) + y = rearrange(y, "b d l -> b l d") + out = self.out_proj(y) + return out + + def step(self, hidden_states, conv_state, ssm_state): + dtype = hidden_states.dtype + assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" + xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D) + x, z = xz.chunk(2, dim=-1) # (B D) + + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + x = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + x = x + self.conv1d.bias + x = self.act(x).to(dtype=dtype) + else: + x = causal_conv1d_update( + x, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + x_db = self.x_proj(x) # (B dt_rank+2*d_state) + dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + # Don't add dt_bias here + dt = F.linear(dt, self.dt_proj.weight) # (B d_inner) + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + + # SSM step + if selective_state_update is None: + # Discretize A and B + dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype)) + dA = torch.exp(torch.einsum("bd,dn->bdn", dt, A)) + dB = torch.einsum("bd,bn->bdn", dt, B) + ssm_state.copy_(ssm_state * dA + rearrange(x, "b d -> b d 1") * dB) + y = torch.einsum("bdn,bn->bd", ssm_state.to(dtype), C) + y = y + self.D.to(dtype) * x + y = y * self.act(z) # (B D) + else: + y = selective_state_update( + ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True + ) + + out = self.out_proj(y) + return out.unsqueeze(1), conv_state, ssm_state + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + device = self.out_proj.weight.device + conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype + conv_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype + ) + ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype + # ssm_dtype = torch.float32 + ssm_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype + ) + return conv_state, ssm_state + + def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): + assert self.layer_idx is not None + if self.layer_idx not in inference_params.key_value_memory_dict: + batch_shape = (batch_size,) + conv_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_conv, + device=self.conv1d.weight.device, + dtype=self.conv1d.weight.dtype, + ) + ssm_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_state, + device=self.dt_proj.weight.device, + dtype=self.dt_proj.weight.dtype, + # dtype=torch.float32, + ) + inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) + else: + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + # TODO: What if batch size changes between generation, and we reuse the same states? + if initialize_states: + conv_state.zero_() + ssm_state.zero_() + return conv_state, ssm_state diff --git a/mamba/mamba_ssm/modules/mha.py b/mamba/mamba_ssm/modules/mha.py new file mode 100644 index 0000000000000000000000000000000000000000..978f3ea4d8f1c962303b5cb6d8388c2289c4f7db --- /dev/null +++ b/mamba/mamba_ssm/modules/mha.py @@ -0,0 +1,294 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange + +try: + from flash_attn import flash_attn_with_kvcache +except ImportError: + flash_attn_with_kvcache = None + +try: + from flash_attn.layers.rotary import RotaryEmbedding +except ImportError: + RotaryEmbedding = None + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None, None + + +def _update_kv_cache(kv, inference_params, layer_idx): + """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)""" + # Pre-allocate memory for key-values for inference. + num_heads, head_dim = kv.shape[-2:] + assert layer_idx in inference_params.key_value_memory_dict + kv_cache, _ = inference_params.key_value_memory_dict[layer_idx] + # Adjust key and value for inference + batch_start = inference_params.batch_size_offset + batch_end = batch_start + kv.shape[0] + sequence_start = inference_params.seqlen_offset + sequence_end = sequence_start + kv.shape[1] + assert batch_end <= kv_cache.shape[0] + assert sequence_end <= kv_cache.shape[1] + assert kv_cache is not None + kv_cache[batch_start:batch_end, sequence_start:sequence_end, ...] = kv + return kv_cache[batch_start:batch_end, :sequence_end, ...] + + +class MHA(nn.Module): + """Multi-head self-attention and cross-attention""" + + def __init__( + self, + embed_dim, + num_heads, + num_heads_kv=None, + head_dim=None, # If None, use embed_dim // num_heads + mlp_dim=0, + qkv_proj_bias=True, + out_proj_bias=True, + softmax_scale=None, + causal=False, + layer_idx=None, + d_conv=0, + rotary_emb_dim=0, + rotary_emb_base=10000.0, + rotary_emb_interleaved=False, + device=None, + dtype=None, + ) -> None: + """ + num_heads_kv: can be used to toggle MQA / GQA. If None, use num_heads. + return_residual: whether to return the input x along with the output. This is for + performance reason: for post-norm architecture, returning the input allows us + to fuse the backward of nn.Linear with the residual connection. + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.embed_dim = embed_dim + self.layer_idx = layer_idx + self.d_conv = d_conv + self.rotary_emb_dim = rotary_emb_dim + self.softmax_scale = softmax_scale + self.causal = causal + + self.num_heads = num_heads + self.num_heads_kv = num_heads_kv if num_heads_kv is not None else num_heads + assert ( + self.num_heads % self.num_heads_kv == 0 + ), "num_heads must be divisible by num_heads_kv" + if head_dim is None: + assert self.embed_dim % num_heads == 0, "embed_dim must be divisible by num_heads" + self.head_dim = head_dim if head_dim is not None else self.embed_dim // num_heads + self.mlp_dim = math.ceil(mlp_dim / 256) * 256 + qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv) + out_dim = self.head_dim * self.num_heads + + if self.rotary_emb_dim > 0: + assert RotaryEmbedding is not None, "rotary requires flash_attn to be installed" + self.rotary_emb = RotaryEmbedding( + self.rotary_emb_dim, + base=rotary_emb_base, + interleaved=rotary_emb_interleaved, + device=device, + ) + + self.in_proj = nn.Linear(embed_dim, qkv_dim + self.mlp_dim, bias=qkv_proj_bias, **factory_kwargs) + if self.d_conv > 0: + self.conv1d = nn.Conv1d( + qkv_dim, qkv_dim, kernel_size=self.d_conv, padding=self.d_conv - 1, groups=qkv_dim, + **factory_kwargs + ) + self.out_proj = nn.Linear(out_dim + self.mlp_dim // 2, embed_dim, bias=out_proj_bias, **factory_kwargs) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None): + dtype = self.out_proj.weight.dtype if dtype is None else dtype + device = self.out_proj.weight.device + if self.d_conv > 0: + conv_state = torch.zeros( + batch_size, self.conv1d.weight.shape[0], self.d_conv, device=device, dtype=dtype + ) + else: + conv_state = None + kv_cache = torch.empty( + batch_size, max_seqlen, 2, self.num_heads_kv, self.head_dim, dtype=dtype, device=device, + ) + return kv_cache, conv_state + + def _update_kv_cache(self, kv, inference_params): + """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)""" + assert self.layer_idx is not None, "Generation requires layer_idx in the constructor" + return _update_kv_cache(kv, inference_params, self.layer_idx) + + def _apply_rotary_update_kvcache_attention(self, q, kv, inference_params): + """ + Fast path that combine 3 steps: apply rotary to Q and K, update kv cache, and apply attention. + q: (batch_size, seqlen_q, nheads, head_dim) + kv: (batch_size, seqlen_k, 2, nheads_kv, head_dim) + """ + assert inference_params is not None and inference_params.seqlen_offset > 0 + if self.rotary_emb_dim > 0: + self.rotary_emb._update_cos_sin_cache( + inference_params.max_seqlen, device=q.device, dtype=q.dtype + ) + rotary_cos, rotary_sin = self.rotary_emb._cos_cached, self.rotary_emb._sin_cached + else: + rotary_cos, rotary_sin = None, None + batch = q.shape[0] + kv_cache, _ = inference_params.key_value_memory_dict[self.layer_idx] + kv_cache = kv_cache[:batch] + cache_seqlens = ( + inference_params.lengths_per_sample[:batch] + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + assert flash_attn_with_kvcache is not None, "flash_attn must be installed" + context = flash_attn_with_kvcache( + q, + kv_cache[:, :, 0], + kv_cache[:, :, 1], + kv[:, :, 0], + kv[:, :, 1], + rotary_cos=rotary_cos, + rotary_sin=rotary_sin, + cache_seqlens=cache_seqlens, + softmax_scale=self.softmax_scale, + causal=self.causal, + rotary_interleaved=self.rotary_emb.interleaved if self.rotary_emb_dim > 0 else False, + ) + return context + + def _update_kvcache_attention(self, q, kv, inference_params): + """Write kv to inference_params, then do attention""" + if ( + inference_params.seqlen_offset == 0 + or flash_attn_with_kvcache is None + ): + # TODO: this only uses seqlen_offset and not lengths_per_sample. + kv = self._update_kv_cache(kv, inference_params) + k, v = kv.unbind(dim=-3) + k = torch.repeat_interleave(k, dim=2, repeats=self.num_heads // self.num_heads_kv) + v = torch.repeat_interleave(v, dim=2, repeats=self.num_heads // self.num_heads_kv) + return F.scaled_dot_product_attention( + q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), is_causal=self.causal, scale=self.softmax_scale + ).transpose(1, 2) + else: + batch = q.shape[0] + kv_cache, _ = inference_params.key_value_memory_dict[self.layer_idx] + kv_cache = kv_cache[:batch] + cache_seqlens = ( + inference_params.lengths_per_sample[:batch] + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + return flash_attn_with_kvcache( + q, + kv_cache[:, :, 0], + kv_cache[:, :, 1], + kv[:, :, 0], + kv[:, :, 1], + cache_seqlens=cache_seqlens, + softmax_scale=self.softmax_scale, + causal=self.causal, + ) + + def forward(self, x, inference_params=None): + """ + Arguments: + x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if + cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total + is the is the sum of the sequence lengths in the batch. + inference_params: for generation. Adapted from Megatron-LM (and Apex) + https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470 + """ + if inference_params is not None and self.layer_idx not in inference_params.key_value_memory_dict: + inference_params.key_value_memory_dict[self.layer_idx] = self.allocate_inference_cache( + x.shape[0], inference_params.max_seqlen, dtype=x.dtype + ) + seqlen_offset = ( + 0 + if inference_params is None + else ( + inference_params.lengths_per_sample + if inference_params.lengths_per_sample is not None + else inference_params.seqlen_offset + ) + ) + rotary_max_seqlen = inference_params.max_seqlen if inference_params is not None else None + qkv = self.in_proj(x) + if self.mlp_dim > 0: + qkv, x_mlp = qkv.split([qkv.shape[-1] - self.mlp_dim, self.mlp_dim], dim=-1) + x_mlp_up, x_mlp_gate = x_mlp.chunk(2, dim=-1) + x_mlp = x_mlp_up * F.silu(x_mlp_gate) + if self.d_conv > 0: + # The inference code for conv1d is pretty messy, should clean it up + if (inference_params is None or inference_params.seqlen_offset == 0): + if causal_conv1d_fn is None: + qkv = rearrange( + self.conv1d(rearrange(qkv, "b s d -> b d s"))[..., :-(self.d_conv - 1)], "b d s -> b s d" + ).contiguous() + else: + qkv = causal_conv1d_fn( + qkv.transpose(1, 2), + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias + ).transpose(1, 2) + if inference_params is not None: + _, conv_state = inference_params.key_value_memory_dict[self.layer_idx] + # If we just take qkv[:, :, -self.d_conv :], it will error if seqlen < self.d_conv + # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. + qkv_t = rearrange(qkv, "b l d -> b d l") + conv_state.copy_(F.pad(qkv_t, (self.d_conv - qkv_t.shape[-1], 0))) # Update state (B D W) + else: + _, conv_state = inference_params.key_value_memory_dict[self.layer_idx] + assert qkv.shape[1] == 1, "Only support decoding with 1 token at a time for now" + qkv = qkv.squeeze(1) + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = qkv + qkv = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + qkv = qkv + self.conv1d.bias + else: + qkv = causal_conv1d_update( + qkv, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias + ) + qkv = qkv.unsqueeze(1) + q, kv = qkv.split([self.num_heads * self.head_dim, self.num_heads_kv * 2 * self.head_dim], dim=-1) + q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim) + kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim) + if ( + inference_params is None + or inference_params.seqlen_offset == 0 + or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0) + ): + if self.rotary_emb_dim > 0: + q, kv = self.rotary_emb( + q, kv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen + ) + if inference_params is None: + k, v = kv.unbind(dim=-3) + k = torch.repeat_interleave(k, dim=2, repeats=self.num_heads // self.num_heads_kv) + v = torch.repeat_interleave(v, dim=2, repeats=self.num_heads // self.num_heads_kv) + context = F.scaled_dot_product_attention( + q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), is_causal=self.causal, scale=self.softmax_scale + ).transpose(1, 2) + else: + context = self._update_kvcache_attention(q, kv, inference_params) + else: + context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params) + context = rearrange(context, "... h d -> ... (h d)") + if self.mlp_dim > 0: + context = torch.cat([context, x_mlp], dim=-1) + out = self.out_proj(context) + return out diff --git a/mamba/mamba_ssm/modules/mlp.py b/mamba/mamba_ssm/modules/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..33bab5c7cc21b96d5f5ccfe233e339cad12cfe2c --- /dev/null +++ b/mamba/mamba_ssm/modules/mlp.py @@ -0,0 +1,34 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. +from torch import nn +from torch.nn import functional as F + + +class GatedMLP(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + activation=F.silu, + bias=False, + multiple_of=128, + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + out_features = out_features if out_features is not None else in_features + hidden_features = ( + hidden_features if hidden_features is not None else int(8 * in_features / 3) + ) + hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of + self.fc1 = nn.Linear(in_features, 2 * hidden_features, bias=bias, **factory_kwargs) + self.activation = activation + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias, **factory_kwargs) + + def forward(self, x): + y = self.fc1(x) + y, gate = y.chunk(2, dim=-1) + y = y * self.activation(gate) + y = self.fc2(y) + return y diff --git a/mamba/mamba_ssm/modules/ssd_minimal.py b/mamba/mamba_ssm/modules/ssd_minimal.py new file mode 100644 index 0000000000000000000000000000000000000000..9632ebd4350fa18ddc977c2bdedb0bab1fd82646 --- /dev/null +++ b/mamba/mamba_ssm/modules/ssd_minimal.py @@ -0,0 +1,103 @@ +# Copyright (c) 2024, Albert Gu and Tri Dao. +"""Minimal implementation of SSD. + +This is the same as Listing 1 from the paper. +""" + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined + + +def segsum_unstable(x): + """Naive segment sum calculation.""" + T = x.size(-1) + x_cumsum = torch.cumsum(x, dim=-1) + x_segsum = x_cumsum[..., :, None] - x_cumsum[..., None, :] + mask = torch.tril(torch.ones(T, T, device=x.device, dtype=bool), diagonal=0) + x_segsum = x_segsum.masked_fill(~mask, -torch.inf) + return x_segsum + +def segsum(x): + """More stable segment sum calculation.""" + T = x.size(-1) + x = repeat(x, "... d -> ... d e", e=T) + mask = torch.tril(torch.ones(T, T, device=x.device, dtype=bool), diagonal=-1) + x = x.masked_fill(~mask, 0) + x_segsum = torch.cumsum(x, dim=-2) + mask = torch.tril(torch.ones(T, T, device=x.device, dtype=bool), diagonal=0) + x_segsum = x_segsum.masked_fill(~mask, -torch.inf) + return x_segsum + +def ssd_minimal_discrete(X, A, B, C, block_len, initial_states=None): + """ + Arguments: + X: (batch, length, n_heads, d_head) + A: (batch, length, n_heads) + B: (batch, length, n_heads, d_state) + C: (batch, length, n_heads, d_state) + Return: + Y: (batch, length, n_heads, d_head) + """ + assert X.dtype == A.dtype == B.dtype == C.dtype + assert X.shape[1] % block_len == 0 + + # Rearrange into blocks/chunks + X, A, B, C = [rearrange(x, "b (c l) ... -> b c l ...", l=block_len) for x in (X, A, B, C)] + + A = rearrange(A, "b c l h -> b h c l") + A_cumsum = torch.cumsum(A, dim=-1) + + # 1. Compute the output for each intra-chunk (diagonal blocks) + L = torch.exp(segsum(A)) + Y_diag = torch.einsum("bclhn,bcshn,bhcls,bcshp->bclhp", C, B, L, X) + + # 2. Compute the state for each intra-chunk + # (right term of low-rank factorization of off-diagonal blocks; B terms) + decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum)) + states = torch.einsum("bclhn,bhcl,bclhp->bchpn", B, decay_states, X) + + # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries + # (middle term of factorization of off-diag blocks; A terms) + if initial_states is None: + initial_states = torch.zeros_like(states[:, :1]) + states = torch.cat([initial_states, states], dim=1) + decay_chunk = torch.exp(segsum(F.pad(A_cumsum[:, :, :, -1], (1, 0)))) + new_states = torch.einsum("bhzc,bchpn->bzhpn", decay_chunk, states) + states, final_state = new_states[:, :-1], new_states[:, -1] + + # 4. Compute state -> output conversion per chunk + # (left term of low-rank factorization of off-diagonal blocks; C terms) + state_decay_out = torch.exp(A_cumsum) + Y_off = torch.einsum('bclhn,bchpn,bhcl->bclhp', C, states, state_decay_out) + + # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks) + Y = rearrange(Y_diag+Y_off, "b c l h p -> b (c l) h p") + return Y, final_state + + +# Simple test +def test_correctness(): + torch.manual_seed(42) + + ## Dimensions + # Denoted (B, T, Q, D, P) in the paper + batch, seqlen, chunk_size, dim, headdim = 1, 2048, 64, 2048, 64 + nheads = dim // headdim # (H) in the paper + ngroups = 1 # (G) in the paper + dstate = 64 # (N) in the paper + dtype = torch.float32 + device = "cuda" + + x = torch.randn(batch, seqlen, nheads, headdim, dtype=dtype, device=device) + dt = F.softplus(torch.randn(batch, seqlen, nheads, dtype=torch.float32, device=device) - 4).requires_grad_() + A = (-torch.exp(torch.rand(nheads, dtype=torch.float32, device=device))).requires_grad_() + B = torch.randn(batch, seqlen, ngroups, dstate, dtype=dtype, device=device) + C = torch.randn(batch, seqlen, ngroups, dstate, dtype=dtype, device=device) + D = torch.randn(nheads, dtype=dtype, device=device) + + # Comparing fused version and minimal version + y = mamba_chunk_scan_combined(x, dt, A, B, C, chunk_size, D=None) + y_min, _ = ssd_minimal_discrete(x*dt.unsqueeze(-1), A*dt, B, C, chunk_size) diff --git a/mamba/mamba_ssm/ops/__init__.py b/mamba/mamba_ssm/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/mamba_ssm/ops/selective_scan_interface.py b/mamba/mamba_ssm/ops/selective_scan_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..c3596bfeb0e3718d9e4bb9426828b149aa7dbaa3 --- /dev/null +++ b/mamba/mamba_ssm/ops/selective_scan_interface.py @@ -0,0 +1,357 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn + import causal_conv1d_cuda +except ImportError: + causal_conv1d_fn = None + causal_conv1d_cuda = None + +import selective_scan_cuda + + +class SelectiveScanFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + if u.stride(-1) != 1: + u = u.contiguous() + if delta.stride(-1) != 1: + delta = delta.contiguous() + if D is not None: + D = D.contiguous() + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if z is not None and z.stride(-1) != 1: + z = z.contiguous() + if B.dim() == 3: + B = rearrange(B, "b dstate l -> b 1 dstate l") + ctx.squeeze_B = True + if C.dim() == 3: + C = rearrange(C, "b dstate l -> b 1 dstate l") + ctx.squeeze_C = True + out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus) + ctx.delta_softplus = delta_softplus + ctx.has_z = z is not None + last_state = x[:, :, -1, 1::2] # (batch, dim, dstate) + if not ctx.has_z: + ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x) + return out if not return_last_state else (out, last_state) + else: + ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out) + out_z = rest[0] + return out_z if not return_last_state else (out_z, last_state) + + @staticmethod + def backward(ctx, dout, *args): + if not ctx.has_z: + u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors + z = None + out = None + else: + u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors + if dout.stride(-1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + # Here we just pass in None and dz will be allocated in the C++ code. + du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd( + u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus, + False # option to recompute out_z, not used here + ) + dz = rest[0] if ctx.has_z else None + dB = dB.squeeze(1) if getattr(ctx, "squeeze_B", False) else dB + dC = dC.squeeze(1) if getattr(ctx, "squeeze_C", False) else dC + return (du, ddelta, dA, dB, dC, + dD if D is not None else None, + dz, + ddelta_bias if delta_bias is not None else None, + None, + None) + + +def selective_scan_fn(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """if return_last_state is True, returns (out, last_state) + last_state has shape (batch, dim, dstate). Note that the gradient of the last state is + not considered in the backward pass. + """ + return SelectiveScanFn.apply(u, delta, A, B, C, D, z, delta_bias, delta_softplus, return_last_state) + + +def selective_scan_ref(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """ + u: r(B D L) + delta: r(B D L) + A: c(D N) or r(D N) + B: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + C: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + D: r(D) + z: r(B D L) + delta_bias: r(D), fp32 + + out: r(B D L) + last_state (optional): r(B D dstate) or c(B D dstate) + """ + dtype_in = u.dtype + u = u.float() + delta = delta.float() + if delta_bias is not None: + delta = delta + delta_bias[..., None].float() + if delta_softplus: + delta = F.softplus(delta) + batch, dim, dstate = u.shape[0], A.shape[0], A.shape[1] + is_variable_B = B.dim() >= 3 + is_variable_C = C.dim() >= 3 + if A.is_complex(): + if is_variable_B: + B = torch.view_as_complex(rearrange(B.float(), "... (L two) -> ... L two", two=2)) + if is_variable_C: + C = torch.view_as_complex(rearrange(C.float(), "... (L two) -> ... L two", two=2)) + else: + B = B.float() + C = C.float() + x = A.new_zeros((batch, dim, dstate)) + ys = [] + deltaA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + if not is_variable_B: + deltaB_u = torch.einsum('bdl,dn,bdl->bdln', delta, B, u) + else: + if B.dim() == 3: + deltaB_u = torch.einsum('bdl,bnl,bdl->bdln', delta, B, u) + else: + B = repeat(B, "B G N L -> B (G H) N L", H=dim // B.shape[1]) + deltaB_u = torch.einsum('bdl,bdnl,bdl->bdln', delta, B, u) + if is_variable_C and C.dim() == 4: + C = repeat(C, "B G N L -> B (G H) N L", H=dim // C.shape[1]) + last_state = None + for i in range(u.shape[2]): + x = deltaA[:, :, i] * x + deltaB_u[:, :, i] + if not is_variable_C: + y = torch.einsum('bdn,dn->bd', x, C) + else: + if C.dim() == 3: + y = torch.einsum('bdn,bn->bd', x, C[:, :, i]) + else: + y = torch.einsum('bdn,bdn->bd', x, C[:, :, :, i]) + if i == u.shape[2] - 1: + last_state = x + if y.is_complex(): + y = y.real * 2 + ys.append(y) + y = torch.stack(ys, dim=2) # (batch dim L) + out = y if D is None else y + u * rearrange(D, "d -> d 1") + if z is not None: + out = out * F.silu(z) + out = out.to(dtype=dtype_in) + return out if not return_last_state else (out, last_state) + + +class MambaInnerFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert causal_conv1d_cuda is not None, "causal_conv1d_cuda is not available. Please install causal-conv1d." + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) + if out_proj_bias is not None else None) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd( + x, conv1d_weight, conv1d_bias, None, None, None, True + ) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out, scan_intermediates, out_z = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + ctx.delta_softplus = delta_softplus + ctx.out_proj_bias_is_None = out_proj_bias is None + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, out_proj_weight, conv1d_out, delta, + A, B, C, D, delta_bias, scan_intermediates, out) + return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + assert causal_conv1d_cuda is not None, "causal_conv1d_cuda is not available. Please install causal-conv1d." + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, + conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd( + x, conv1d_weight, conv1d_bias, None, None, None, True + ) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + dout = rearrange(dout, "b l e -> e (b l)") + dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) + dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias, *_ = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, None, None, None, dx, False, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dout_proj_weight, dout_proj_bias, + dA, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +def mamba_inner_fn( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return MambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + + +def mamba_inner_ref( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + assert causal_conv1d_fn is not None, "causal_conv1d_fn is not available. Please install causal-conv1d." + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, activation="silu") + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = delta_proj_weight @ x_dbl[:, :delta_rank].t() + delta = rearrange(delta, "d (b l) -> b d l", l=L) + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + if C is None: # variable B + C = x_dbl[:, -d_state:] # (bl d) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True) + return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias) diff --git a/mamba/mamba_ssm/ops/triton/__init__.py b/mamba/mamba_ssm/ops/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/mamba_ssm/ops/triton/k_activations.py b/mamba/mamba_ssm/ops/triton/k_activations.py new file mode 100644 index 0000000000000000000000000000000000000000..79fa2cc672dd5ad839498e9150658ed7abce8736 --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/k_activations.py @@ -0,0 +1,169 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +import torch + +import triton +import triton.language as tl + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_N': 32}), + triton.Config({'BLOCK_N': 64}), + triton.Config({'BLOCK_N': 128}), + triton.Config({'BLOCK_N': 256}), + triton.Config({'BLOCK_N': 512}), + triton.Config({'BLOCK_N': 1024}), + ], + key=['ncols'], +) +@triton.jit +def _swiglu_fwd_kernel( + X, + Y, + OUT, + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_out_row, + ncols, + BLOCK_N: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + start_col = tl.program_id(1) * BLOCK_N + X += row * stride_x_row + Y += row * stride_y_row + OUT += row * stride_out_row + cols = start_col + tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < ncols, other=0.).to(tl.float32) + y = tl.load(Y + cols, mask=cols < ncols, other=0.).to(tl.float32) + out = x * tl.sigmoid(x) * y + tl.store(OUT + cols, out, mask=cols < ncols) + + +def _swiglu_fwd(xy, out=None): + if xy.stride(-1) != 1: + xy = xy.contiguous() + batch_shape = xy.shape[:-1] + xy = xy.reshape(-1, xy.shape[-1]) + x, y = xy.chunk(2, dim=-1) + if out is None: + out = torch.empty_like(x) + else: + out = out.reshape(-1, out.shape[-1]) + assert out.shape == x.shape + assert out.stride(-1) == 1 + M, N = x.shape + grid = lambda META: (M, triton.cdiv(N, META['BLOCK_N'])) + with torch.cuda.device(x.device.index): + _swiglu_fwd_kernel[grid](x, y, out, x.stride(0), y.stride(0), out.stride(0), N) + return out.reshape(*batch_shape, out.shape[-1]) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_N': 32}), + triton.Config({'BLOCK_N': 64}), + triton.Config({'BLOCK_N': 128}), + triton.Config({'BLOCK_N': 256}), + triton.Config({'BLOCK_N': 512}), + triton.Config({'BLOCK_N': 1024}), + ], + key=['ncols'], +) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["OUT"] is not None}) +@triton.jit +def _swiglu_bwd_kernel( + X, + Y, + DOUT, + OUT, + DX, + DY, + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_dout_row, + stride_out_row, + stride_dx_row, + stride_dy_row, + ncols, + BLOCK_N: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + start_col = tl.program_id(1) * BLOCK_N + X += row * stride_x_row + Y += row * stride_y_row + DOUT += row * stride_dout_row + if RECOMPUTE_OUTPUT: + OUT += row * stride_out_row + DX += row * stride_dx_row + DY += row * stride_dy_row + cols = start_col + tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < ncols, other=0.).to(tl.float32) + y = tl.load(Y + cols, mask=cols < ncols, other=0.).to(tl.float32) + dout = tl.load(DOUT + cols, mask=cols < ncols, other=0.).to(tl.float32) + x_sigmoid = tl.sigmoid(x) + dx = x_sigmoid * (1 + x * (1 - x_sigmoid)) * y * dout + dy = x * x_sigmoid * dout + tl.store(DX + cols, dx, mask=cols < ncols) + tl.store(DY + cols, dy, mask=cols < ncols) + if RECOMPUTE_OUTPUT: + out = x * x_sigmoid * y + tl.store(OUT + cols, out, mask=cols < ncols) + + +def _swiglu_bwd(xy, dout, dxy=None, recompute_output=False, out=None): + if xy.stride(-1) != 1: + xy = xy.contiguous() + if dout.stride(-1) != 1: + dout = dout.contiguous() + batch_shape = xy.shape[:-1] + xy = xy.reshape(-1, xy.shape[-1]) + x, y = xy.chunk(2, dim=-1) + dout = dout.reshape(-1, dout.shape[-1]) + assert dout.shape == x.shape + if dxy is None: + dxy = torch.empty_like(xy) + else: + dxy = dxy.reshape(-1, dxy.shape[-1]) + assert dxy.shape == xy.shape + dx, dy = dxy.chunk(2, dim=-1) + assert dx.stride(-1) == 1 + assert dy.stride(-1) == 1 + if recompute_output: + if out is None: + out = torch.empty_like(x) + else: + out = out.reshape(-1, out.shape[-1]) + assert out.shape == x.shape + assert out.stride(-1) == 1 + M, N = x.shape + grid = lambda META: (M, triton.cdiv(N, META['BLOCK_N'])) + with torch.cuda.device(x.device.index): + _swiglu_bwd_kernel[grid](x, y, dout, out if recompute_output else None, dx, dy, + x.stride(0), y.stride(0), dout.stride(0), + out.stride(0) if recompute_output else 0, + dx.stride(0), dy.stride(0), + N) + if not recompute_output: + return dxy.reshape(*batch_shape, dxy.shape[-1]) + else: + return dxy.reshape(*batch_shape, dxy.shape[-1]), out.reshape(*batch_shape, out.shape[-1]) + + +class SwiGLU(torch.autograd.Function): + + @staticmethod + def forward(ctx, xy): + ctx.save_for_backward(xy) + return _swiglu_fwd(xy) + + @staticmethod + def backward(ctx, dout): + xy, = ctx.saved_tensors + return _swiglu_bwd(xy, dout) + + +swiglu = SwiGLU.apply diff --git a/mamba/mamba_ssm/ops/triton/layer_norm.py b/mamba/mamba_ssm/ops/triton/layer_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..2f304d4344574d6cf1e4bbdc7f87158bfa2cce11 --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/layer_norm.py @@ -0,0 +1,1113 @@ +# Copyright (c) 2024, Tri Dao. +# Implement dropout + residual + layer_norm / rms_norm. + +# Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html +# For the backward pass, we keep weight_grad and bias_grad in registers and accumulate. +# This is faster for dimensions up to 8k, but after that it's much slower due to register spilling. +# The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine. + +import math +import warnings + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_fwd, custom_bwd + +import triton +import triton.language as tl + + +def layer_norm_ref( + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + dropout_mask=None, + dropout_mask1=None, + upcast=False, +): + dtype = x.dtype + if upcast: + x = x.float() + weight = weight.float() + bias = bias.float() if bias is not None else None + residual = residual.float() if residual is not None else residual + x1 = x1.float() if x1 is not None else None + weight1 = weight1.float() if weight1 is not None else None + bias1 = bias1.float() if bias1 is not None else None + if x1 is not None: + assert rowscale is None, "rowscale is not supported with parallel LayerNorm" + if rowscale is not None: + x = x * rowscale[..., None] + if dropout_p > 0.0: + if dropout_mask is not None: + x = x.masked_fill(~dropout_mask, 0.0) / (1.0 - dropout_p) + else: + x = F.dropout(x, p=dropout_p) + if x1 is not None: + if dropout_mask1 is not None: + x1 = x1.masked_fill(~dropout_mask1, 0.0) / (1.0 - dropout_p) + else: + x1 = F.dropout(x1, p=dropout_p) + if x1 is not None: + x = x + x1 + if residual is not None: + x = (x + residual).to(x.dtype) + out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to( + dtype + ) + if weight1 is None: + return out if not prenorm else (out, x) + else: + out1 = F.layer_norm( + x.to(weight1.dtype), x.shape[-1:], weight=weight1, bias=bias1, eps=eps + ).to(dtype) + return (out, out1) if not prenorm else (out, out1, x) + + +def rms_norm_ref( + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + dropout_mask=None, + dropout_mask1=None, + upcast=False, +): + dtype = x.dtype + if upcast: + x = x.float() + weight = weight.float() + bias = bias.float() if bias is not None else None + residual = residual.float() if residual is not None else residual + x1 = x1.float() if x1 is not None else None + weight1 = weight1.float() if weight1 is not None else None + bias1 = bias1.float() if bias1 is not None else None + if x1 is not None: + assert rowscale is None, "rowscale is not supported with parallel LayerNorm" + if rowscale is not None: + x = x * rowscale[..., None] + if dropout_p > 0.0: + if dropout_mask is not None: + x = x.masked_fill(~dropout_mask, 0.0) / (1.0 - dropout_p) + else: + x = F.dropout(x, p=dropout_p) + if x1 is not None: + if dropout_mask1 is not None: + x1 = x1.masked_fill(~dropout_mask1, 0.0) / (1.0 - dropout_p) + else: + x1 = F.dropout(x1, p=dropout_p) + if x1 is not None: + x = x + x1 + if residual is not None: + x = (x + residual).to(x.dtype) + rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps) + out = ((x * rstd * weight) + bias if bias is not None else (x * rstd * weight)).to(dtype) + if weight1 is None: + return out if not prenorm else (out, x) + else: + out1 = ((x * rstd * weight1) + bias1 if bias1 is not None else (x * rstd * weight1)).to( + dtype + ) + return (out, out1) if not prenorm else (out, out1, x) + +def config_prune(configs): + + if torch.version.hip: + try: + # set warp size based on gcn architecure + gcn_arch_name = torch.cuda.get_device_properties(0).gcnArchName + if "gfx10" in gcn_arch_name or "gfx11" in gcn_arch_name: + # radeon + warp_size = 32 + else: + # instinct + warp_size = 64 + except AttributeError as e: + # fall back to crude method to set warp size + device_name = torch.cuda.get_device_properties(0).name + if 'instinct' in device_name.lower(): + warp_size = 64 + else: + warp_size = 32 + warnings.warn(f"{e}, warp size set to {warp_size} based on device name: {device_name}", UserWarning) + + else: + # cuda + warp_size = 32 + + max_block_sz = 1024 + max_num_warps = max_block_sz // warp_size + pruned_configs = [config for config in configs if config.num_warps <= max_num_warps] + return pruned_configs + +configs_autotune = [ + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + triton.Config({}, num_warps=16), + triton.Config({}, num_warps=32), + ] + +pruned_configs_autotune = config_prune(configs_autotune) + +@triton.autotune( + configs = pruned_configs_autotune, + key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None}) +@triton.heuristics({"HAS_X1": lambda args: args["X1"] is not None}) +@triton.heuristics({"HAS_W1": lambda args: args["W1"] is not None}) +@triton.heuristics({"HAS_B1": lambda args: args["B1"] is not None}) +@triton.jit +def _layer_norm_fwd_1pass_kernel( + X, # pointer to the input + Y, # pointer to the output + W, # pointer to the weights + B, # pointer to the biases + RESIDUAL, # pointer to the residual + X1, + W1, + B1, + Y1, + RESIDUAL_OUT, # pointer to the residual + ROWSCALE, + SEEDS, # Dropout seeds for each row + DROPOUT_MASK, + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_res_row, + stride_res_out_row, + stride_x1_row, + stride_y1_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + dropout_p, # Dropout probability + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_RESIDUAL: tl.constexpr, + STORE_RESIDUAL_OUT: tl.constexpr, + HAS_BIAS: tl.constexpr, + HAS_DROPOUT: tl.constexpr, + STORE_DROPOUT_MASK: tl.constexpr, + HAS_ROWSCALE: tl.constexpr, + HAS_X1: tl.constexpr, + HAS_W1: tl.constexpr, + HAS_B1: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + X += row * stride_x_row + Y += row * stride_y_row + if HAS_RESIDUAL: + RESIDUAL += row * stride_res_row + if STORE_RESIDUAL_OUT: + RESIDUAL_OUT += row * stride_res_out_row + if HAS_X1: + X1 += row * stride_x1_row + if HAS_W1: + Y1 += row * stride_y1_row + # Compute mean and variance + cols = tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) + if HAS_ROWSCALE: + rowscale = tl.load(ROWSCALE + row).to(tl.float32) + x *= rowscale + if HAS_DROPOUT: + # Compute dropout mask + # 7 rounds is good enough, and reduces register pressure + keep_mask = tl.rand(tl.load(SEEDS + row).to(tl.uint32), cols, n_rounds=7) > dropout_p + x = tl.where(keep_mask, x / (1.0 - dropout_p), 0.0) + if STORE_DROPOUT_MASK: + tl.store(DROPOUT_MASK + row * N + cols, keep_mask, mask=cols < N) + if HAS_X1: + x1 = tl.load(X1 + cols, mask=cols < N, other=0.0).to(tl.float32) + if HAS_ROWSCALE: + rowscale = tl.load(ROWSCALE + M + row).to(tl.float32) + x1 *= rowscale + if HAS_DROPOUT: + # Compute dropout mask + # 7 rounds is good enough, and reduces register pressure + keep_mask = ( + tl.rand(tl.load(SEEDS + M + row).to(tl.uint32), cols, n_rounds=7) > dropout_p + ) + x1 = tl.where(keep_mask, x1 / (1.0 - dropout_p), 0.0) + if STORE_DROPOUT_MASK: + tl.store(DROPOUT_MASK + (M + row) * N + cols, keep_mask, mask=cols < N) + x += x1 + if HAS_RESIDUAL: + residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32) + x += residual + if STORE_RESIDUAL_OUT: + tl.store(RESIDUAL_OUT + cols, x, mask=cols < N) + if not IS_RMS_NORM: + mean = tl.sum(x, axis=0) / N + tl.store(Mean + row, mean) + xbar = tl.where(cols < N, x - mean, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + else: + xbar = tl.where(cols < N, x, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + tl.store(Rstd + row, rstd) + # Normalize and apply linear transformation + mask = cols < N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if HAS_BIAS: + b = tl.load(B + cols, mask=mask).to(tl.float32) + x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + y = x_hat * w + b if HAS_BIAS else x_hat * w + # Write output + tl.store(Y + cols, y, mask=mask) + if HAS_W1: + w1 = tl.load(W1 + cols, mask=mask).to(tl.float32) + if HAS_B1: + b1 = tl.load(B1 + cols, mask=mask).to(tl.float32) + y1 = x_hat * w1 + b1 if HAS_B1 else x_hat * w1 + tl.store(Y1 + cols, y1, mask=mask) + + +def _layer_norm_fwd( + x, + weight, + bias, + eps, + residual=None, + x1=None, + weight1=None, + bias1=None, + dropout_p=0.0, + rowscale=None, + out_dtype=None, + residual_dtype=None, + is_rms_norm=False, + return_dropout_mask=False, +): + if residual is not None: + residual_dtype = residual.dtype + M, N = x.shape + assert x.stride(-1) == 1 + if residual is not None: + assert residual.stride(-1) == 1 + assert residual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + if x1 is not None: + assert x1.shape == x.shape + assert rowscale is None + assert x1.stride(-1) == 1 + if weight1 is not None: + assert weight1.shape == (N,) + assert weight1.stride(-1) == 1 + if bias1 is not None: + assert bias1.shape == (N,) + assert bias1.stride(-1) == 1 + if rowscale is not None: + assert rowscale.is_contiguous() + assert rowscale.shape == (M,) + # allocate output + y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype) + assert y.stride(-1) == 1 + if weight1 is not None: + y1 = torch.empty_like(y) + assert y1.stride(-1) == 1 + else: + y1 = None + if ( + residual is not None + or (residual_dtype is not None and residual_dtype != x.dtype) + or dropout_p > 0.0 + or rowscale is not None + or x1 is not None + ): + residual_out = torch.empty( + M, N, device=x.device, dtype=residual_dtype if residual_dtype is not None else x.dtype + ) + assert residual_out.stride(-1) == 1 + else: + residual_out = None + mean = torch.empty((M,), dtype=torch.float32, device=x.device) if not is_rms_norm else None + rstd = torch.empty((M,), dtype=torch.float32, device=x.device) + if dropout_p > 0.0: + seeds = torch.randint( + 2**32, (M if x1 is None else 2 * M,), device=x.device, dtype=torch.int64 + ) + else: + seeds = None + if return_dropout_mask and dropout_p > 0.0: + dropout_mask = torch.empty(M if x1 is None else 2 * M, N, device=x.device, dtype=torch.bool) + else: + dropout_mask = None + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + with torch.cuda.device(x.device.index): + _layer_norm_fwd_1pass_kernel[(M,)]( + x, + y, + weight, + bias, + residual, + x1, + weight1, + bias1, + y1, + residual_out, + rowscale, + seeds, + dropout_mask, + mean, + rstd, + x.stride(0), + y.stride(0), + residual.stride(0) if residual is not None else 0, + residual_out.stride(0) if residual_out is not None else 0, + x1.stride(0) if x1 is not None else 0, + y1.stride(0) if y1 is not None else 0, + M, + N, + eps, + dropout_p, + is_rms_norm, + BLOCK_N, + residual is not None, + residual_out is not None, + bias is not None, + dropout_p > 0.0, + dropout_mask is not None, + rowscale is not None, + ) + # residual_out is None if residual is None and residual_dtype == input_dtype and dropout_p == 0.0 + if dropout_mask is not None and x1 is not None: + dropout_mask, dropout_mask1 = dropout_mask.tensor_split(2, dim=0) + else: + dropout_mask1 = None + return ( + y, + y1, + mean, + rstd, + residual_out if residual_out is not None else x, + seeds, + dropout_mask, + dropout_mask1, + ) + + +@triton.autotune( + configs=pruned_configs_autotune, + key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS", "HAS_DROPOUT"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None}) +# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None}) +@triton.heuristics({"HAS_ROWSCALE": lambda args: args["ROWSCALE"] is not None}) +@triton.heuristics({"HAS_DY1": lambda args: args["DY1"] is not None}) +@triton.heuristics({"HAS_DX1": lambda args: args["DX1"] is not None}) +@triton.heuristics({"HAS_B1": lambda args: args["DB1"] is not None}) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) +@triton.jit +def _layer_norm_bwd_kernel( + X, # pointer to the input + W, # pointer to the weights + B, # pointer to the biases + Y, # pointer to the output to be recomputed + DY, # pointer to the output gradient + DX, # pointer to the input gradient + DW, # pointer to the partial sum of weights gradient + DB, # pointer to the partial sum of biases gradient + DRESIDUAL, + W1, + DY1, + DX1, + DW1, + DB1, + DRESIDUAL_IN, + ROWSCALE, + SEEDS, + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_dy_row, + stride_dx_row, + stride_dres_row, + stride_dy1_row, + stride_dx1_row, + stride_dres_in_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + dropout_p, + rows_per_program, + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_DRESIDUAL: tl.constexpr, + STORE_DRESIDUAL: tl.constexpr, + HAS_BIAS: tl.constexpr, + HAS_DROPOUT: tl.constexpr, + HAS_ROWSCALE: tl.constexpr, + HAS_DY1: tl.constexpr, + HAS_DX1: tl.constexpr, + HAS_B1: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, +): + # Map the program id to the elements of X, DX, and DY it should compute. + row_block_id = tl.program_id(0) + row_start = row_block_id * rows_per_program + # Do not early exit if row_start >= M, because we need to write DW and DB + cols = tl.arange(0, BLOCK_N) + mask = cols < N + X += row_start * stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += row_start * stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += row_start * stride_dres_in_row + DY += row_start * stride_dy_row + DX += row_start * stride_dx_row + if HAS_DY1: + DY1 += row_start * stride_dy1_row + if HAS_DX1: + DX1 += row_start * stride_dx1_row + if RECOMPUTE_OUTPUT: + Y += row_start * stride_y_row + w = tl.load(W + cols, mask=mask).to(tl.float32) + if RECOMPUTE_OUTPUT and HAS_BIAS: + b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32) + if HAS_DY1: + w1 = tl.load(W1 + cols, mask=mask).to(tl.float32) + dw = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_BIAS: + db = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_DY1: + dw1 = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_B1: + db1 = tl.zeros((BLOCK_N,), dtype=tl.float32) + row_end = min((row_block_id + 1) * rows_per_program, M) + for row in range(row_start, row_end): + # Load data to SRAM + x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) + dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) + if HAS_DY1: + dy1 = tl.load(DY1 + cols, mask=mask, other=0).to(tl.float32) + if not IS_RMS_NORM: + mean = tl.load(Mean + row) + rstd = tl.load(Rstd + row) + # Compute dx + xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + xhat = tl.where(mask, xhat, 0.0) + if RECOMPUTE_OUTPUT: + y = xhat * w + b if HAS_BIAS else xhat * w + tl.store(Y + cols, y, mask=mask) + wdy = w * dy + dw += dy * xhat + if HAS_BIAS: + db += dy + if HAS_DY1: + wdy += w1 * dy1 + dw1 += dy1 * xhat + if HAS_B1: + db1 += dy1 + if not IS_RMS_NORM: + c1 = tl.sum(xhat * wdy, axis=0) / N + c2 = tl.sum(wdy, axis=0) / N + dx = (wdy - (xhat * c1 + c2)) * rstd + else: + c1 = tl.sum(xhat * wdy, axis=0) / N + dx = (wdy - xhat * c1) * rstd + if HAS_DRESIDUAL: + dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32) + dx += dres + # Write dx + if STORE_DRESIDUAL: + tl.store(DRESIDUAL_IN + cols, dx, mask=mask) + if HAS_DX1: + if HAS_DROPOUT: + keep_mask = ( + tl.rand(tl.load(SEEDS + M + row).to(tl.uint32), cols, n_rounds=7) > dropout_p + ) + dx1 = tl.where(keep_mask, dx / (1.0 - dropout_p), 0.0) + else: + dx1 = dx + tl.store(DX1 + cols, dx1, mask=mask) + if HAS_DROPOUT: + keep_mask = tl.rand(tl.load(SEEDS + row).to(tl.uint32), cols, n_rounds=7) > dropout_p + dx = tl.where(keep_mask, dx / (1.0 - dropout_p), 0.0) + if HAS_ROWSCALE: + rowscale = tl.load(ROWSCALE + row).to(tl.float32) + dx *= rowscale + tl.store(DX + cols, dx, mask=mask) + + X += stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += stride_dres_in_row + if RECOMPUTE_OUTPUT: + Y += stride_y_row + DY += stride_dy_row + DX += stride_dx_row + if HAS_DY1: + DY1 += stride_dy1_row + if HAS_DX1: + DX1 += stride_dx1_row + tl.store(DW + row_block_id * N + cols, dw, mask=mask) + if HAS_BIAS: + tl.store(DB + row_block_id * N + cols, db, mask=mask) + if HAS_DY1: + tl.store(DW1 + row_block_id * N + cols, dw1, mask=mask) + if HAS_B1: + tl.store(DB1 + row_block_id * N + cols, db1, mask=mask) + + +def _layer_norm_bwd( + dy, + x, + weight, + bias, + eps, + mean, + rstd, + dresidual=None, + dy1=None, + weight1=None, + bias1=None, + seeds=None, + dropout_p=0.0, + rowscale=None, + has_residual=False, + has_x1=False, + is_rms_norm=False, + x_dtype=None, + recompute_output=False, +): + M, N = x.shape + assert x.stride(-1) == 1 + assert dy.stride(-1) == 1 + assert dy.shape == (M, N) + if dresidual is not None: + assert dresidual.stride(-1) == 1 + assert dresidual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + if dy1 is not None: + assert weight1 is not None + assert dy1.shape == dy.shape + assert dy1.stride(-1) == 1 + if weight1 is not None: + assert weight1.shape == (N,) + assert weight1.stride(-1) == 1 + if bias1 is not None: + assert bias1.shape == (N,) + assert bias1.stride(-1) == 1 + if seeds is not None: + assert seeds.is_contiguous() + assert seeds.shape == (M if not has_x1 else M * 2,) + if rowscale is not None: + assert rowscale.is_contiguous() + assert rowscale.shape == (M,) + # allocate output + dx = ( + torch.empty_like(x) + if x_dtype is None + else torch.empty(M, N, dtype=x_dtype, device=x.device) + ) + dresidual_in = ( + torch.empty_like(x) + if has_residual + and (dx.dtype != x.dtype or dropout_p > 0.0 or rowscale is not None or has_x1) + else None + ) + dx1 = torch.empty_like(dx) if (has_x1 and dropout_p > 0.0) else None + y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None + if recompute_output: + assert weight1 is None, "recompute_output is not supported with parallel LayerNorm" + + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device) + _db = ( + torch.empty((sm_count, N), dtype=torch.float32, device=bias.device) + if bias is not None + else None + ) + _dw1 = torch.empty_like(_dw) if weight1 is not None else None + _db1 = torch.empty_like(_db) if bias1 is not None else None + rows_per_program = math.ceil(M / sm_count) + grid = (sm_count,) + with torch.cuda.device(x.device.index): + _layer_norm_bwd_kernel[grid]( + x, + weight, + bias, + y, + dy, + dx, + _dw, + _db, + dresidual, + weight1, + dy1, + dx1, + _dw1, + _db1, + dresidual_in, + rowscale, + seeds, + mean, + rstd, + x.stride(0), + 0 if not recompute_output else y.stride(0), + dy.stride(0), + dx.stride(0), + dresidual.stride(0) if dresidual is not None else 0, + dy1.stride(0) if dy1 is not None else 0, + dx1.stride(0) if dx1 is not None else 0, + dresidual_in.stride(0) if dresidual_in is not None else 0, + M, + N, + eps, + dropout_p, + rows_per_program, + is_rms_norm, + BLOCK_N, + dresidual is not None, + dresidual_in is not None, + bias is not None, + dropout_p > 0.0, + ) + dw = _dw.sum(0).to(weight.dtype) + db = _db.sum(0).to(bias.dtype) if bias is not None else None + dw1 = _dw1.sum(0).to(weight1.dtype) if weight1 is not None else None + db1 = _db1.sum(0).to(bias1.dtype) if bias1 is not None else None + # Don't need to compute dresidual_in separately in this case + if has_residual and dx.dtype == x.dtype and dropout_p == 0.0 and rowscale is None: + dresidual_in = dx + if has_x1 and dropout_p == 0.0: + dx1 = dx + return ( + (dx, dw, db, dresidual_in, dx1, dw1, db1) + if not recompute_output + else (dx, dw, db, dresidual_in, dx1, dw1, db1, y) + ) + + +class LayerNormFn(torch.autograd.Function): + @staticmethod + def forward( + ctx, + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + return_dropout_mask=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + if x1 is not None: + assert x1.shape == x_shape_og + assert rowscale is None, "rowscale is not supported with parallel LayerNorm" + x1 = x1.reshape(-1, x1.shape[-1]) + if x1.stride(-1) != 1: + x1 = x1.contiguous() + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + if weight1 is not None: + weight1 = weight1.contiguous() + if bias1 is not None: + bias1 = bias1.contiguous() + if rowscale is not None: + rowscale = rowscale.reshape(-1).contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd( + x, + weight, + bias, + eps, + residual, + x1, + weight1, + bias1, + dropout_p=dropout_p, + rowscale=rowscale, + residual_dtype=residual_dtype, + is_rms_norm=is_rms_norm, + return_dropout_mask=return_dropout_mask, + ) + ctx.save_for_backward( + residual_out, weight, bias, weight1, bias1, rowscale, seeds, mean, rstd + ) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.dropout_p = dropout_p + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.has_x1 = x1 is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + y = y.reshape(x_shape_og) + y1 = y1.reshape(x_shape_og) if y1 is not None else None + residual_out = residual_out.reshape(x_shape_og) if residual_out is not None else None + dropout_mask = dropout_mask.reshape(x_shape_og) if dropout_mask is not None else None + dropout_mask1 = dropout_mask1.reshape(x_shape_og) if dropout_mask1 is not None else None + if not return_dropout_mask: + if weight1 is None: + return y if not prenorm else (y, residual_out) + else: + return (y, y1) if not prenorm else (y, y1, residual_out) + else: + if weight1 is None: + return ( + (y, dropout_mask, dropout_mask1) + if not prenorm + else (y, residual_out, dropout_mask, dropout_mask1) + ) + else: + return ( + (y, y1, dropout_mask, dropout_mask1) + if not prenorm + else (y, y1, residual_out, dropout_mask, dropout_mask1) + ) + + @staticmethod + def backward(ctx, dy, *args): + x, weight, bias, weight1, bias1, rowscale, seeds, mean, rstd = ctx.saved_tensors + dy = dy.reshape(-1, dy.shape[-1]) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if weight1 is not None: + dy1, args = args[0], args[1:] + dy1 = dy1.reshape(-1, dy1.shape[-1]) + if dy1.stride(-1) != 1: + dy1 = dy1.contiguous() + assert dy1.shape == x.shape + else: + dy1 = None + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dw, db, dresidual_in, dx1, dw1, db1 = _layer_norm_bwd( + dy, + x, + weight, + bias, + ctx.eps, + mean, + rstd, + dresidual, + dy1, + weight1, + bias1, + seeds, + ctx.dropout_p, + rowscale, + ctx.has_residual, + ctx.has_x1, + ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + ) + return ( + dx.reshape(ctx.x_shape_og), + dw, + db, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + dx1.reshape(ctx.x_shape_og) if dx1 is not None else None, + dw1, + db1, + None, + None, + None, + None, + None, + None, + None, + ) + + +def layer_norm_fn( + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + return_dropout_mask=False, +): + return LayerNormFn.apply( + x, + weight, + bias, + residual, + x1, + weight1, + bias1, + eps, + dropout_p, + rowscale, + prenorm, + residual_in_fp32, + is_rms_norm, + return_dropout_mask, + ) + + +def rms_norm_fn( + x, + weight, + bias, + residual=None, + x1=None, + weight1=None, + bias1=None, + eps=1e-6, + dropout_p=0.0, + rowscale=None, + prenorm=False, + residual_in_fp32=False, + return_dropout_mask=False, +): + return LayerNormFn.apply( + x, + weight, + bias, + residual, + x1, + weight1, + bias1, + eps, + dropout_p, + rowscale, + prenorm, + residual_in_fp32, + True, + return_dropout_mask, + ) + + +class RMSNorm(torch.nn.Module): + + def __init__(self, hidden_size, eps=1e-5, dropout_p=0.0, device=None, dtype=None): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + if dropout_p > 0.0: + self.drop = torch.nn.Dropout(dropout_p) + else: + self.drop = None + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.register_parameter("bias", None) + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + + def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False): + return rms_norm_fn( + x, + self.weight, + self.bias, + residual=residual, + eps=self.eps, + dropout_p=self.drop.p if self.drop is not None and self.training else 0.0, + prenorm=prenorm, + residual_in_fp32=residual_in_fp32, + ) + + +class LayerNormLinearFn(torch.autograd.Function): + @staticmethod + @custom_fwd + def forward( + ctx, + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + norm_weight = norm_weight.contiguous() + if norm_bias is not None: + norm_bias = norm_bias.contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, _, mean, rstd, residual_out, *rest = _layer_norm_fwd( + x, + norm_weight, + norm_bias, + eps, + residual, + out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype(), + residual_dtype=residual_dtype, + is_rms_norm=is_rms_norm, + ) + y = y.reshape(x_shape_og) + dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype + linear_weight = linear_weight.to(dtype) + linear_bias = linear_bias.to(dtype) if linear_bias is not None else None + out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias) + # We don't store y, will be recomputed in the backward pass to save memory + ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + ctx.linear_bias_is_none = linear_bias is None + return out if not prenorm else (out, residual_out.reshape(x_shape_og)) + + @staticmethod + @custom_bwd + def backward(ctx, dout, *args): + x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors + dout = dout.reshape(-1, dout.shape[-1]) + dy = F.linear(dout, linear_weight.t()) + dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dnorm_weight, dnorm_bias, dresidual_in, _, _, _, y = _layer_norm_bwd( + dy, + x, + norm_weight, + norm_bias, + ctx.eps, + mean, + rstd, + dresidual=dresidual, + has_residual=ctx.has_residual, + is_rms_norm=ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + recompute_output=True, + ) + dlinear_weight = torch.einsum("bo,bi->oi", dout, y) + return ( + dx.reshape(ctx.x_shape_og), + dnorm_weight, + dnorm_bias, + dlinear_weight, + dlinear_bias, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + None, + None, + None, + None, + ) + + +def layer_norm_linear_fn( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, +): + return LayerNormLinearFn.apply( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual, + eps, + prenorm, + residual_in_fp32, + is_rms_norm, + ) diff --git a/mamba/mamba_ssm/ops/triton/layernorm_gated.py b/mamba/mamba_ssm/ops/triton/layernorm_gated.py new file mode 100644 index 0000000000000000000000000000000000000000..de4b2f4815f6fa9d80291491e3826251f50ff5ad --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/layernorm_gated.py @@ -0,0 +1,437 @@ +# Copyright (c) 2024, Tri Dao. +# Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html +# For the backward pass, we keep weight_grad and bias_grad in registers and accumulate. +# This backward pass is faster for dimensions up to 8k, but after that it's much slower due to register spilling. +# The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine. + +import math + +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange + + +def rms_norm_ref(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, upcast=True): + dtype = x.dtype + N = x.shape[-1] + weight = weight.float() + bias = bias.float() if bias is not None else None + if upcast: + x = x.float() + z = z.float() if z is not None else z + if z is not None and not norm_before_gate: + x = x * F.silu(z) + if group_size is None: + rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps) + out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight) + else: + x_group = rearrange(x, "... (g d) -> ... g d", d=group_size) + rstd = 1 / torch.sqrt((x_group.square()).mean(dim=-1, keepdim=True) + eps) + out = rearrange(x_group * rstd, "... g d -> ... (g d)") * weight + if bias is not None: + out = out + bias + if z is not None and norm_before_gate: + out *= F.silu(z) + return out.to(dtype) + + +@triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["Z"] is not None}) +@triton.jit +def _layer_norm_fwd_1pass_kernel( + X, # pointer to the input + Y, # pointer to the output + W, # pointer to the weights + B, # pointer to the biases + Z, # pointer to the other branch + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_z_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + BLOCK_N: tl.constexpr, + HAS_BIAS: tl.constexpr, + HAS_Z: tl.constexpr, + NORM_BEFORE_GATE: tl.constexpr, + IS_RMS_NORM: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + group = tl.program_id(1) + X += row * stride_x_row + group * N + Y += row * stride_y_row + group * N + if HAS_Z: + Z += row * stride_z_row + group * N + if not IS_RMS_NORM: + Mean += group * M + Rstd += group * M + W += group * N + if HAS_BIAS: + B += group * N + # Compute mean and variance + cols = tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32) + if HAS_Z and not NORM_BEFORE_GATE: + z = tl.load(Z + cols, mask=cols < N).to(tl.float32) + x *= z * tl.sigmoid(z) + if not IS_RMS_NORM: + mean = tl.sum(x, axis=0) / N + tl.store(Mean + row, mean) + xbar = tl.where(cols < N, x - mean, 0.) + var = tl.sum(xbar * xbar, axis=0) / N + else: + xbar = tl.where(cols < N, x, 0.) + var = tl.sum(xbar * xbar, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + tl.store(Rstd + row, rstd) + # Normalize and apply linear transformation + mask = cols < N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if HAS_BIAS: + b = tl.load(B + cols, mask=mask).to(tl.float32) + x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + y = x_hat * w + b if HAS_BIAS else x_hat * w + if HAS_Z and NORM_BEFORE_GATE: + z = tl.load(Z + cols, mask=mask).to(tl.float32) + y *= z * tl.sigmoid(z) + # Write output + tl.store(Y + cols, y, mask=mask) + + +def _layer_norm_fwd(x, weight, bias, eps, z=None, out=None, group_size=None, norm_before_gate=True, is_rms_norm=False): + M, N = x.shape + if group_size is None: + group_size = N + assert N % group_size == 0 + ngroups = N // group_size + assert x.stride(-1) == 1 + if z is not None: + assert z.stride(-1) == 1 + assert z.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + if out is not None: + assert out.shape == x.shape + else: + out = torch.empty_like(x) + assert out.stride(-1) == 1 + mean = torch.empty((ngroups * M, ), dtype=torch.float32, device=x.device) if not is_rms_norm else None + rstd = torch.empty((ngroups * M, ), dtype=torch.float32, device=x.device) + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(group_size)) + if group_size > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + # heuristics for number of warps + num_warps = min(max(BLOCK_N // 256, 1), 8) + grid = (M, ngroups) + with torch.cuda.device(x.device.index): + _layer_norm_fwd_1pass_kernel[grid](x, out, weight, bias, z, mean, rstd, + x.stride(0), out.stride(0), z.stride(0) if z is not None else 0, + M, group_size, eps, + BLOCK_N=BLOCK_N, + NORM_BEFORE_GATE=norm_before_gate, + IS_RMS_NORM=is_rms_norm, + num_warps=num_warps) + return out, mean, rstd + + + +@triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["Z"] is not None}) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) +@triton.jit +def _layer_norm_bwd_kernel( + X, # pointer to the input + W, # pointer to the weights + B, # pointer to the biases + Z, # pointer to the other branch + Y, # pointer to the output to be recomputed + DY, # pointer to the output gradient + DX, # pointer to the input gradient + DW, # pointer to the partial sum of weights gradient + DB, # pointer to the partial sum of biases gradient + DZ, # pointer to the other branch + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_z_row, + stride_y_row, + stride_dy_row, + stride_dx_row, + stride_dz_row, + stride_dw_row, + stride_db_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + rows_per_program, + NORM_BEFORE_GATE: tl.constexpr, + IS_RMS_NORM: tl.constexpr, + HAS_BIAS: tl.constexpr, + HAS_Z: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, + BLOCK_N: tl.constexpr, +): + # Map the program id to the elements of X, DX, and DY it should compute. + row_block_id = tl.program_id(0) + group = tl.program_id(1) + row_start = row_block_id * rows_per_program + cols = tl.arange(0, BLOCK_N) + mask = cols < N + X += row_start * stride_x_row + group * N + if HAS_Z: + Z += row_start * stride_z_row + group * N + DZ += row_start * stride_dz_row + group * N + DY += row_start * stride_dy_row + group * N + DX += row_start * stride_dx_row + group * N + if RECOMPUTE_OUTPUT: + Y += row_start * stride_y_row + group * N + if not IS_RMS_NORM: + Mean += group * M + Rstd += group * M + W += group * N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if (RECOMPUTE_OUTPUT or HAS_Z) and HAS_BIAS: + B += group * N + b = tl.load(B + cols, mask=mask, other=0.).to(tl.float32) + dw = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_BIAS: + db = tl.zeros((BLOCK_N,), dtype=tl.float32) + row_end = min((row_block_id + 1) * rows_per_program, M) + for row in range(row_start, row_end): + # Load data to SRAM + x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) + dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) + if not IS_RMS_NORM: + mean = tl.load(Mean + row) + if HAS_Z and not NORM_BEFORE_GATE: + z = tl.load(Z + cols, mask=mask, other=0.).to(tl.float32) + x_og = x + x = x_og * z * tl.sigmoid(z) + rstd = tl.load(Rstd + row) + # Compute dx + xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + xhat = tl.where(mask, xhat, 0.) + if HAS_Z and NORM_BEFORE_GATE: + z = tl.load(Z + cols, mask=mask, other=0.).to(tl.float32) + z_sigmoid = tl.sigmoid(z) + y = xhat * w + b if HAS_BIAS else xhat * w + if RECOMPUTE_OUTPUT: + tl.store(Y + cols, y * z * z_sigmoid, mask=mask) + dz = dy * y * z_sigmoid * (1 + z * (1 - z_sigmoid)) + tl.store(DZ + cols, dz, mask=mask) + dy *= z * z_sigmoid + else: + if RECOMPUTE_OUTPUT: + y = xhat * w + b if HAS_BIAS else xhat * w + tl.store(Y + cols, y, mask=mask) + wdy = w * dy + c1 = tl.sum(xhat * wdy, axis=0) / N + if not IS_RMS_NORM: + c2 = tl.sum(wdy, axis=0) / N + dx = (wdy - (xhat * c1 + c2)) * rstd + else: + dx = (wdy - xhat * c1) * rstd + dw += dy * xhat + if HAS_BIAS: + db += dy + if HAS_Z and not NORM_BEFORE_GATE: + z_sigmoid = tl.sigmoid(z) + dz = dx * x_og * z_sigmoid * (1 + z * (1 - z_sigmoid)) + tl.store(DZ + cols, dz, mask=mask) + dx *= z * z_sigmoid + # Write dx + tl.store(DX + cols, dx, mask=mask) + + X += stride_x_row + if HAS_Z: + Z += stride_z_row + DZ += stride_dz_row + if RECOMPUTE_OUTPUT: + Y += stride_y_row + DY += stride_dy_row + DX += stride_dx_row + tl.store(DW + row_block_id * stride_dw_row + group * N + cols, dw, mask=mask) + if HAS_BIAS: + tl.store(DB + row_block_id * stride_db_row + group * N + cols, db, mask=mask) + + +def _layer_norm_bwd(dy, x, weight, bias, eps, mean, rstd, z=None, group_size=None, + norm_before_gate=True, is_rms_norm=False, recompute_output=False, dz=None, out=None): + M, N = x.shape + if group_size is None: + group_size = N + assert N % group_size == 0 + ngroups = N // group_size + assert x.stride(-1) == 1 + assert dy.stride(-1) == 1 + assert dy.shape == (M, N) + if z is not None: + assert z.stride(-1) == 1 + assert z.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + dx = torch.empty_like(x) + if dz is not None: + assert z is not None + assert dz.shape == z.shape + assert dz.stride(-1) == 1 + else: + dz = torch.empty_like(z) if z is not None else None + if recompute_output: + if out is None: + out = torch.empty_like(x) + assert out.shape == x.shape + + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(group_size)) + if group_size > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + # heuristics for number of warps + num_warps = min(max(BLOCK_N // 256, 1), 8) + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + # If group size is small (e.g., 64), we're only using 1 warp. So having just 108 programs + # would limit the occupancy. + nrow_groups = math.ceil(sm_count * math.ceil(4 / num_warps) / ngroups) + _dw = torch.empty((nrow_groups, N), dtype=torch.float32, device=weight.device) + _db = torch.empty((nrow_groups, N), dtype=torch.float32, device=bias.device) if bias is not None else None + rows_per_program = math.ceil(M / nrow_groups) + grid = (nrow_groups, ngroups) + with torch.cuda.device(x.device.index): + _layer_norm_bwd_kernel[grid](x, weight, bias, z, out if recompute_output else None, + dy, dx, _dw, _db, dz, mean, rstd, + x.stride(0), + z.stride(0) if z is not None else 0, + 0 if not recompute_output else out.stride(0), + dy.stride(0), dx.stride(0), + dz.stride(0) if dz is not None else 0, + _dw.stride(0), + _db.stride(0) if _db is not None else 0, + M, group_size, eps, + rows_per_program, + BLOCK_N=BLOCK_N, + NORM_BEFORE_GATE=norm_before_gate, + IS_RMS_NORM=is_rms_norm, + num_warps=num_warps) + dw = _dw.sum(0).to(weight.dtype) + db = _db.sum(0).to(bias.dtype) if bias is not None else None + return (dx, dw, db, dz) if not recompute_output else (dx, dw, db, dz, out) + + +class LayerNormFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, + is_rms_norm=False): + """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z)) + """ + + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if z is not None: + assert z.shape == x_shape_og + z = z.reshape(-1, z.shape[-1]) + if z.stride(-1) != 1: + z = z.contiguous() + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + y, mean, rstd = _layer_norm_fwd(x, weight, bias, eps, z=z, group_size=group_size, norm_before_gate=norm_before_gate, is_rms_norm=is_rms_norm) + ctx.save_for_backward(x, weight, bias, mean, rstd, z) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.group_size = group_size + ctx.norm_before_gate = norm_before_gate + ctx.is_rms_norm = is_rms_norm + return y.reshape(x_shape_og) + + @staticmethod + def backward(ctx, dy): + x, weight, bias, mean, rstd, z = ctx.saved_tensors + dy = dy.reshape(-1, dy.shape[-1]) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + dx, dw, db, dz = _layer_norm_bwd(dy, x, weight, bias, ctx.eps, mean, rstd, z, ctx.group_size, + ctx.norm_before_gate, ctx.is_rms_norm) + return dx.reshape(ctx.x_shape_og), dw, db, dz.reshape(ctx.x_shape_og) if dz is not None else None, None, None, None, None + + +def layernorm_fn(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, is_rms_norm=False): + return LayerNormFn.apply(x, weight, bias, z, eps, group_size, norm_before_gate, is_rms_norm) + + +def rmsnorm_fn(x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True): + return LayerNormFn.apply(x, weight, bias, z, eps, group_size, norm_before_gate, True) + + +class LayerNorm(torch.nn.Module): + + def __init__(self, hidden_size, eps=1e-5, group_size=None, norm_before_gate=True, device=None, dtype=None): + """If group_size is not None, we do GroupNorm with each group having group_size elements. + group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group). + """ + + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.bias = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.group_size = group_size + self.norm_before_gate = norm_before_gate + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + torch.nn.init.zeros_(self.bias) + + def forward(self, x, z=None): + """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z)) + """ + return layernorm_fn(x, self.weight, self.bias, z=z, group_size=self.group_size, eps=self.eps, + norm_before_gate=self.norm_before_gate) + + +class RMSNorm(torch.nn.Module): + + def __init__(self, hidden_size, eps=1e-5, group_size=None, norm_before_gate=True, device=None, dtype=None): + """If group_size is not None, we do GroupNorm with each group having group_size elements. + group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group). + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.register_parameter("bias", None) + self.group_size = group_size + self.norm_before_gate = norm_before_gate + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + + def forward(self, x, z=None): + """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z)) + """ + return rmsnorm_fn(x, self.weight, self.bias, z=z, eps=self.eps, group_size=self.group_size, + norm_before_gate=self.norm_before_gate) diff --git a/mamba/mamba_ssm/ops/triton/selective_state_update.py b/mamba/mamba_ssm/ops/triton/selective_state_update.py new file mode 100644 index 0000000000000000000000000000000000000000..bc78de90ec60304e38c3c86b44c49bf086650084 --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/selective_state_update.py @@ -0,0 +1,265 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or triton==2.2.0 or triton==2.3.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.softplus import softplus + + +@triton.heuristics({"HAS_DT_BIAS": lambda args: args["dt_bias_ptr"] is not None}) +@triton.heuristics({"HAS_D": lambda args: args["D_ptr"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["z_ptr"] is not None}) +@triton.heuristics({"BLOCK_SIZE_DSTATE": lambda args: triton.next_power_of_2(args["dstate"])}) +@triton.jit +def _selective_scan_update_kernel( + # Pointers to matrices + state_ptr, x_ptr, dt_ptr, dt_bias_ptr, A_ptr, B_ptr, C_ptr, D_ptr, z_ptr, out_ptr, + # Matrix dimensions + batch, nheads, dim, dstate, nheads_ngroups_ratio, + # Strides + stride_state_batch, stride_state_head, stride_state_dim, stride_state_dstate, + stride_x_batch, stride_x_head, stride_x_dim, + stride_dt_batch, stride_dt_head, stride_dt_dim, + stride_dt_bias_head, stride_dt_bias_dim, + stride_A_head, stride_A_dim, stride_A_dstate, + stride_B_batch, stride_B_group, stride_B_dstate, + stride_C_batch, stride_C_group, stride_C_dstate, + stride_D_head, stride_D_dim, + stride_z_batch, stride_z_head, stride_z_dim, + stride_out_batch, stride_out_head, stride_out_dim, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + TIE_HDIM: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + HAS_D: tl.constexpr, + HAS_Z: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_m = tl.program_id(axis=0) + pid_b = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + state_ptr += pid_b * stride_state_batch + pid_h * stride_state_head + x_ptr += pid_b * stride_x_batch + pid_h * stride_x_head + dt_ptr += pid_b * stride_dt_batch + pid_h * stride_dt_head + if HAS_DT_BIAS: + dt_bias_ptr += pid_h * stride_dt_bias_head + A_ptr += pid_h * stride_A_head + B_ptr += pid_b * stride_B_batch + (pid_h // nheads_ngroups_ratio) * stride_B_group + C_ptr += pid_b * stride_C_batch + (pid_h // nheads_ngroups_ratio) * stride_C_group + if HAS_Z: + z_ptr += pid_b * stride_z_batch + pid_h * stride_z_head + out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_DSTATE) + state_ptrs = state_ptr + (offs_m[:, None] * stride_state_dim + offs_n[None, :] * stride_state_dstate) + x_ptrs = x_ptr + offs_m * stride_x_dim + dt_ptrs = dt_ptr + offs_m * stride_dt_dim + if HAS_DT_BIAS: + dt_bias_ptrs = dt_bias_ptr + offs_m * stride_dt_bias_dim + if HAS_D: + D_ptr += pid_h * stride_D_head + A_ptrs = A_ptr + (offs_m[:, None] * stride_A_dim + offs_n[None, :] * stride_A_dstate) + B_ptrs = B_ptr + offs_n * stride_B_dstate + C_ptrs = C_ptr + offs_n * stride_C_dstate + if HAS_D: + D_ptrs = D_ptr + offs_m * stride_D_dim + if HAS_Z: + z_ptrs = z_ptr + offs_m * stride_z_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + + state = tl.load(state_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0) + x = tl.load(x_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if not TIE_HDIM: + dt = tl.load(dt_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt += tl.load(dt_bias_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if DT_SOFTPLUS: + dt = softplus(dt) + A = tl.load(A_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + dA = tl.exp(A * dt[:, None]) + else: + dt = tl.load(dt_ptr).to(tl.float32) + if HAS_DT_BIAS: + dt += tl.load(dt_bias_ptr).to(tl.float32) + if DT_SOFTPLUS: + dt = softplus(dt) + A = tl.load(A_ptr).to(tl.float32) + dA = tl.exp(A * dt) # scalar, not a matrix + + B = tl.load(B_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + C = tl.load(C_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + if HAS_D: + D = tl.load(D_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_Z: + z = tl.load(z_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + + if not TIE_HDIM: + dB = B[None, :] * dt[:, None] + else: + dB = B * dt # vector of size (dstate,) + state = state * dA + dB * x[:, None] + tl.store(state_ptrs, state, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate)) + out = tl.sum(state * C[None, :], axis=1) + if HAS_D: + out += x * D + if HAS_Z: + out *= z * tl.sigmoid(z) + tl.store(out_ptrs, out, mask=offs_m < dim) + + +def selective_state_update(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) or (batch, nheads, dim, dstate) + x: (batch, dim) or (batch, nheads, dim) + dt: (batch, dim) or (batch, nheads, dim) + A: (dim, dstate) or (nheads, dim, dstate) + B: (batch, dstate) or (batch, ngroups, dstate) + C: (batch, dstate) or (batch, ngroups, dstate) + D: (dim,) or (nheads, dim) + z: (batch, dim) or (batch, nheads, dim) + dt_bias: (dim,) or (nheads, dim) + Return: + out: (batch, dim) or (batch, nheads, dim) + """ + has_heads = state.dim() > 3 + if state.dim() == 3: + state = state.unsqueeze(1) + if x.dim() == 2: + x = x.unsqueeze(1) + if dt.dim() == 2: + dt = dt.unsqueeze(1) + if A.dim() == 2: + A = A.unsqueeze(0) + if B.dim() == 2: + B = B.unsqueeze(1) + if C.dim() == 2: + C = C.unsqueeze(1) + if D is not None and D.dim() == 1: + D = D.unsqueeze(0) + if z is not None and z.dim() == 2: + z = z.unsqueeze(1) + if dt_bias is not None and dt_bias.dim() == 1: + dt_bias = dt_bias.unsqueeze(0) + batch, nheads, dim, dstate = state.shape + assert x.shape == (batch, nheads, dim) + assert dt.shape == x.shape + assert A.shape == (nheads, dim, dstate) + ngroups = B.shape[1] + assert nheads % ngroups == 0, "nheads must be divisible by ngroups" + assert B.shape == (batch, ngroups, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (nheads, dim) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (nheads, dim) + out = torch.empty_like(x) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE_M']), batch, nheads) + z_strides = ((z.stride(0), z.stride(1), z.stride(2)) if z is not None else (0, 0, 0)) + # We don't want autotune since it will overwrite the state + # We instead tune by hand. + BLOCK_SIZE_M, num_warps = ((32, 4) if dstate <= 16 + else ((16, 4) if dstate <= 32 else + ((8, 4) if dstate <= 64 else + ((4, 4) if dstate <= 128 else + ((4, 8)))))) + tie_hdim = A.stride(-1) == 0 and A.stride(-2) == 0 and dt.stride(-1) == 0 and dt_bias.stride(-1) == 0 + with torch.cuda.device(x.device.index): + _selective_scan_update_kernel[grid]( + state, x, dt, dt_bias, A, B, C, D, z, out, + batch, nheads, dim, dstate, nheads // ngroups, + state.stride(0), state.stride(1), state.stride(2), state.stride(3), + x.stride(0), x.stride(1), x.stride(2), + dt.stride(0), dt.stride(1), dt.stride(2), + *(dt_bias.stride(0), dt_bias.stride(1)) if dt_bias is not None else 0, + A.stride(0), A.stride(1), A.stride(2), + B.stride(0), B.stride(1), B.stride(2), + C.stride(0), C.stride(1), C.stride(2), + *(D.stride(0), D.stride(1)) if D is not None else 0, + z_strides[0], z_strides[1], z_strides[2], + out.stride(0), out.stride(1), out.stride(2), + dt_softplus, + tie_hdim, + BLOCK_SIZE_M, + num_warps=num_warps, + ) + if not has_heads: + out = out.squeeze(1) + return out + + +def selective_state_update_ref(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) or (batch, nheads, dim, dstate) + x: (batch, dim) or (batch, nheads, dim) + dt: (batch, dim) or (batch, nheads, dim) + A: (dim, dstate) or (nheads, dim, dstate) + B: (batch, dstate) or (batch, ngroups, dstate) + C: (batch, dstate) or (batch, ngroups, dstate) + D: (dim,) or (nheads, dim) + z: (batch, dim) or (batch, nheads, dim) + dt_bias: (dim,) or (nheads, dim) + Return: + out: (batch, dim) or (batch, nheads, dim) + """ + has_heads = state.dim() > 3 + if state.dim() == 3: + state = state.unsqueeze(1) + if x.dim() == 2: + x = x.unsqueeze(1) + if dt.dim() == 2: + dt = dt.unsqueeze(1) + if A.dim() == 2: + A = A.unsqueeze(0) + if B.dim() == 2: + B = B.unsqueeze(1) + if C.dim() == 2: + C = C.unsqueeze(1) + if D is not None and D.dim() == 1: + D = D.unsqueeze(0) + if z is not None and z.dim() == 2: + z = z.unsqueeze(1) + if dt_bias is not None and dt_bias.dim() == 1: + dt_bias = dt_bias.unsqueeze(0) + batch, nheads, dim, dstate = state.shape + assert x.shape == (batch, nheads, dim) + assert dt.shape == x.shape + assert A.shape == (nheads, dim, dstate) + ngroups = B.shape[1] + assert nheads % ngroups == 0, "nheads must be divisible by ngroups" + assert B.shape == (batch, ngroups, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (nheads, dim) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (nheads, dim) + dt = dt + dt_bias + dt = F.softplus(dt) if dt_softplus else dt + dA = torch.exp(rearrange(dt, "b h d -> b h d 1") * A) # (batch, nheads, dim, dstate) + B = repeat(B, "b g n -> b (g h) n", h=nheads // ngroups) # (batch, nheads, dstate) + C = repeat(C, "b g n -> b (g h) n", h=nheads // ngroups) # (batch, nheads, dstate) + dB = rearrange(dt, "b h d -> b h d 1") * rearrange(B, "b h n -> b h 1 n") # (batch, nheads, dim, dstate) + state.copy_(state * dA + dB * rearrange(x, "b h d -> b h d 1")) # (batch, dim, dstate + out = torch.einsum("bhdn,bhn->bhd", state.to(C.dtype), C) + if D is not None: + out += (x * D).to(out.dtype) + out = (out if z is None else out * F.silu(z)).to(x.dtype) + if not has_heads: + out = out.squeeze(1) + return out diff --git a/mamba/mamba_ssm/ops/triton/softplus.py b/mamba/mamba_ssm/ops/triton/softplus.py new file mode 100644 index 0000000000000000000000000000000000000000..de68b46189178903432715557cdf525f7b644fc0 --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/softplus.py @@ -0,0 +1,17 @@ +import triton +import triton.language as tl +from packaging import version + +TRITON3 = version.parse(triton.__version__) >= version.parse("3.0.0") + + +if TRITON3: + @triton.jit + def softplus(dt): + dt = tl.where(dt <= 20.0, tl.math.log(tl.math.exp(dt) + 1), dt) + return dt +else: + @triton.jit + def softplus(dt): + dt = tl.where(dt <= 20.0, tl.math.log1p(tl.exp(dt)), dt) + return dt \ No newline at end of file diff --git a/mamba/mamba_ssm/ops/triton/ssd_bmm.py b/mamba/mamba_ssm/ops/triton/ssd_bmm.py new file mode 100644 index 0000000000000000000000000000000000000000..48fd4f063e7796ceea772c21956b7bbdcbf1d196 --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/ssd_bmm.py @@ -0,0 +1,262 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + + +def init_to_zero(names): + return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None] + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['chunk_size', 'K', 'IS_CAUSAL'], +) +@triton.jit +def _bmm_chunk_fwd_kernel( + # Pointers to matrices + a_ptr, b_ptr, out_ptr, seq_idx_ptr, + # Matrix dimensions + seqlen, chunk_size, K, ngroups, + stride_a_batch, stride_a_seqlen, stride_a_head, stride_ak, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_bk, + stride_out_batch, stride_out_chunk, stride_out_head, stride_outm, stride_outn, + stride_seq_idx_batch, stride_seq_idx_seqlen, + # Meta-parameters + IS_CAUSAL: tl.constexpr, + dot_dtype: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_ch = tl.program_id(axis=2) + pid_c = pid_ch // ngroups + pid_h = pid_ch - pid_c * ngroups + num_pid_n = tl.cdiv(chunk_size, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + if IS_CAUSAL: + if pid_n * BLOCK_SIZE_N >= (pid_m + 1) * BLOCK_SIZE_M: + return + a_ptr += pid_b * stride_a_batch + pid_c * chunk_size * stride_a_seqlen + pid_h * stride_a_head + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + pid_h * stride_b_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = a_ptr + (offs_m[:, None] * stride_a_seqlen + offs_k[None, :] * stride_ak) + b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_n[None, :] * stride_b_seqlen) + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0).to(dot_dtype) + b = tl.load(b_ptrs, mask=(offs_k[:, None] < K - k * BLOCK_SIZE_K) & (offs_n[None, :] < chunk_size_limit), other=0.0).to(dot_dtype) + acc += tl.dot(a, b) + a_ptrs += BLOCK_SIZE_K * stride_ak + b_ptrs += BLOCK_SIZE_K * stride_bk + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + if HAS_SEQ_IDX: + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_n = tl.load(seq_idx_ptr + offs_n * stride_seq_idx_seqlen, mask=offs_n < chunk_size_limit, other=-2) + acc = tl.where(seq_idx_m[:, None] == seq_idx_n[None, :], acc, 0.0) + out = acc.to(out_ptr.dtype.element_ty) + + out_ptr += pid_b * stride_out_batch + pid_c * stride_out_chunk + pid_h * stride_out_head + out_ptrs = out_ptr + (stride_outm * offs_m[:, None] + offs_n[None, :] * stride_outn) + tl.store(out_ptrs, out, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_CS': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_CS': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_CS': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_CS': 32}, num_stages=4, num_warps=2), + ], + key=['chunk_size', 'K'], +) +@triton.jit +def _bmm_chunk_bwd_kernel( + # Pointers to matrices + a_ptr, dout_ptr, db_ptr, res_ptr, + # Matrix dimensions + seqlen, chunk_size, K, ngroups, + stride_a_batch, stride_a_seqlen, stride_a_head, stride_ak, + stride_dout_batch, stride_dout_chunk, stride_dout_head, stride_dout_csize_m, stride_dout_csize_n, + stride_db_batch, stride_db_seqlen, stride_db_head, stride_db_k, + stride_res_batch, stride_res_seqlen, stride_res_head, stride_res_k, + # Meta-parameters + dot_dtype: tl.constexpr, + HAS_RESIDUAL: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_CS: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_ch = tl.program_id(axis=2) + pid_c = pid_ch // ngroups + pid_h = pid_ch - pid_c * ngroups + num_pid_n = tl.cdiv(K, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + + a_ptr += pid_b * stride_a_batch + pid_c * chunk_size * stride_a_seqlen + pid_h * stride_a_head + dout_ptr += pid_b * stride_dout_batch + pid_c * stride_dout_chunk + pid_h * stride_dout_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_cs = tl.arange(0, BLOCK_SIZE_CS) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_csize_n + offs_cs[None, :] * stride_dout_csize_m) + a_ptrs = a_ptr + (offs_cs[:, None] * stride_a_seqlen + offs_n[None, :] * stride_ak) + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for cs in range(0, tl.cdiv(chunk_size_limit, BLOCK_SIZE_CS)): + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_cs[None, :] < chunk_size_limit - cs * BLOCK_SIZE_CS), other=0.0).to(dot_dtype) + a = tl.load(a_ptrs, mask=(offs_cs[:, None] < chunk_size_limit - cs * BLOCK_SIZE_CS) & (offs_n[None, :] < K), other=0.0).to(dot_dtype) + acc += tl.dot(dout, a) + dout_ptrs += BLOCK_SIZE_CS * stride_dout_csize_m + a_ptrs += BLOCK_SIZE_CS * stride_a_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + if HAS_RESIDUAL: + res_ptr += pid_b * stride_res_batch + pid_c * chunk_size * stride_res_seqlen + pid_h * stride_res_head + res_ptrs = res_ptr + (offs_m[:, None] * stride_res_seqlen + offs_n[None, :] * stride_res_k) + res = tl.load(res_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < K)).to(tl.float32) + acc += res + db = acc.to(db_ptr.dtype.element_ty) + + db_ptr += pid_b * stride_db_batch + pid_c * chunk_size * stride_db_seqlen + pid_h * stride_db_head + db_ptrs = db_ptr + (offs_m[:, None] * stride_db_seqlen + offs_n[None, :] * stride_db_k) + tl.store(db_ptrs, db, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < K)) + + +def _bmm_chunk_fwd(a, b, chunk_size, seq_idx=None, causal=False, output_dtype=None): + """ + Argument: + a: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + b: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + seq_idx: (batch, seqlen) or None. out[i, j] for seq_idx[i] != seq_idx[j] will be zeroed out. + causal: if True, then out[i, j] for i > j will be arbitrary, only out[i, j] for i <= j are + guaranteed to be correct. + Return: + out: (batch, nchunks, chunk_size, chunk_size) or (batch, nchunks, ngroups, chunk_size, chunk_size) + """ + # Check constraints. + has_groups = a.dim() == 4 + if not has_groups: + batch, seqlen, k = a.shape + else: + batch, seqlen, ngroups, k = a.shape + assert b.shape == a.shape + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if a.stride(-1) != 1 and a.stride(1) != 1: + a = a.contiguous() + if b.stride(-1) != 1 and b.stride(1) != 1: + b = b.contiguous() + nchunks = math.ceil(seqlen / chunk_size) + # Allocates output. + out_dtype = a.dtype if output_dtype is None else output_dtype + out = torch.empty((batch, nchunks, chunk_size, chunk_size) if not has_groups else (batch, nchunks, ngroups, chunk_size, chunk_size), + device=a.device, dtype=out_dtype) + dot_dtype = (tl.bfloat16 if a.dtype == torch.bfloat16 or b.dtype == torch.bfloat16 else + (tl.float16 if a.dtype == torch.float16 or b.dtype == torch.float16 else tl.float32)) + grid = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(chunk_size, META['BLOCK_SIZE_N']), + batch, nchunks if not has_groups else nchunks * ngroups) + with torch.cuda.device(a.device.index): + _bmm_chunk_fwd_kernel[grid]( + a, b, out, seq_idx, + seqlen, chunk_size, k, ngroups if has_groups else 1, + a.stride(0), a.stride(1), 0 if not has_groups else a.stride(2), a.stride(-1), + b.stride(0), b.stride(1), 0 if not has_groups else b.stride(2), b.stride(-1), + out.stride(0), out.stride(1), 0 if not has_groups else out.stride(2), out.stride(-2), out.stride(-1), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + causal, + dot_dtype, + HAS_SEQ_IDX=seq_idx is not None, + ) + return out + + +def _bmm_chunk_bwd(a, dout, residual=None, out=None): + """ + Argument: + a: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + dout: (batch, nchunks, chunk_size, chunk_size) or (batch, nchunks, ngroups, chunk_size, chunk_size) + residual: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + Return: + out: (batch, seqlen, k) or (batch, seqlen, ngroups, k) + + If there was seq_idx in the fwd pass, then dout[i, j] for seq_idx[i] != seq_idx[j] should already be + zeroed out before calling this function. + """ + # Check constraints. + has_groups = a.dim() == 4 + if not has_groups: + batch, seqlen, k = a.shape + else: + batch, seqlen, ngroups, k = a.shape + nchunks, chunk_size = dout.shape[1], dout.shape[-1] + if a.stride(-1) != 1 and a.stride(-2) != 1: + a = a.contiguous() + if dout.stride(-1) != 1 and dout.stride(-2) != 1: + dout = dout.contiguous() + if residual is not None: + assert residual.shape == (batch, seqlen, k) if not has_groups else (batch, seqlen, ngroups, k) + if residual.stride(-1) != 1 and residual.stride(1) != 1: + residual = residual.contiguous() + # Allocates output. + if out is not None: + assert out.shape == a.shape + assert out.stride(-1) == 1 or out.stride(1) == 1 + else: + out = torch.empty_like(a) + dot_dtype = (tl.bfloat16 if a.dtype == torch.bfloat16 or dout.dtype == torch.bfloat16 else + (tl.float16 if a.dtype == torch.float16 or dout.dtype == torch.float16 else tl.float32)) + grid = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(k, META['BLOCK_SIZE_N']), batch, + nchunks if not has_groups else nchunks * ngroups) + residual_strides = ((residual.stride(0), residual.stride(1), 0 if not has_groups else residual.stride(2), + residual.stride(-1)) + if residual is not None else (0, 0, 0, 0)) + with torch.cuda.device(a.device.index): + _bmm_chunk_bwd_kernel[grid]( + a, dout, out, residual, + seqlen, chunk_size, k, ngroups if has_groups else 1, + a.stride(0), a.stride(1), 0 if not has_groups else a.stride(2), a.stride(-1), + dout.stride(0), dout.stride(1), 0 if not has_groups else dout.stride(2), dout.stride(-2), dout.stride(-1), + out.stride(0), out.stride(1), 0 if not has_groups else out.stride(2), out.stride(-1), + residual_strides[0], residual_strides[1], residual_strides[2], residual_strides[3], + dot_dtype, + HAS_RESIDUAL=residual is not None, + ) + return out diff --git a/mamba/mamba_ssm/ops/triton/ssd_chunk_scan.py b/mamba/mamba_ssm/ops/triton/ssd_chunk_scan.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa3a934615b85c09b4eb90f04f0f25caaea5980 --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/ssd_chunk_scan.py @@ -0,0 +1,1829 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +import math +from packaging import version + +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.ssd_bmm import _bmm_chunk_fwd, _bmm_chunk_bwd + +TRITON_22 = version.parse(triton.__version__) >= version.parse('2.2.0') + + +def init_to_zero(names): + return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None] + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['chunk_size', 'hdim', 'dstate', 'IS_CAUSAL'], +) +@triton.jit +def _chunk_scan_fwd_kernel( + # Pointers to matrices + cb_ptr, x_ptr, z_ptr, out_ptr, out_x_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, C_ptr, prev_states_ptr, D_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k, + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_z_batch, stride_z_seqlen, stride_z_head, stride_z_hdim, + stride_out_batch, stride_out_seqlen, stride_out_head, stride_out_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_C_batch, stride_C_seqlen, stride_C_head, stride_C_dstate, + stride_states_batch, stride_states_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_D_head, + # Meta-parameters + IS_CAUSAL: tl.constexpr, + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + HAS_Z: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, + IS_TRITON_22: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + C_ptr += pid_b * stride_C_batch + pid_c * chunk_size * stride_C_seqlen + (pid_h // nheads_ngroups_ratio) * stride_C_head + prev_states_ptr += pid_b * stride_states_batch + pid_c * stride_states_chunk + pid_h * stride_states_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Without the if (pid_c > -1), with Triton 2.1.0, I get + # Assertion `!(srcMmaLayout && dstMmaLayout) && "Unexpected mma -> mm a layout conversion"' failed. + # With Triton 2.2.0, this works + if IS_TRITON_22 or pid_c > -1: + # Faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128 + offs_k_dstate = tl.arange(0, BLOCK_SIZE_DSTATE if BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K) + C_ptrs = C_ptr + (offs_m[:, None] * stride_C_seqlen + offs_k_dstate[None, :] * stride_C_dstate) + prev_states_ptrs = prev_states_ptr + (offs_n[None, :] * stride_states_hdim + offs_k_dstate[:, None] * stride_states_dstate) + if not HAS_SEQ_IDX: + scale_m = tl.exp(dA_cs_m) + else: + scale_m = tl.where(seq_idx_m == seq_idx_prev, tl.exp(dA_cs_m), 0.0) + if BLOCK_SIZE_DSTATE <= 128: + C = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k_dstate[None, :] < dstate), other=0.0) + prev_states = tl.load(prev_states_ptrs, mask=(offs_k_dstate[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + prev_states = prev_states.to(C_ptr.dtype.element_ty) + acc = tl.dot(C, prev_states) * scale_m[:, None] + else: + for k in range(0, dstate, BLOCK_SIZE_K): + C = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k_dstate[None, :] < dstate - k), other=0.0) + # C = (C * scale_m[:, None]).to(C_ptr.dtype.element_ty) + prev_states = tl.load(prev_states_ptrs, mask=(offs_k_dstate[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0) + prev_states = prev_states.to(C_ptr.dtype.element_ty) + acc += tl.dot(C, prev_states) + C_ptrs += BLOCK_SIZE_K + prev_states_ptrs += BLOCK_SIZE_K + acc *= scale_m[:, None] + + offs_k = tl.arange(0, BLOCK_SIZE_K) + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_k[None, :] * stride_cb_csize_k) + x_ptrs = x_ptr + (offs_k[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_k * stride_dt_csize + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + K_MAX = chunk_size_limit if not IS_CAUSAL else min((pid_m + 1) * BLOCK_SIZE_M, chunk_size_limit) + for k in range(0, K_MAX, BLOCK_SIZE_K): + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_k[None, :] < chunk_size - k), other=0.0).to(tl.float32) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < chunk_size - k, other=0.0).to(tl.float32) + # If there's seq_idx, we already set cb[i, j] = 0 for seq_idx[i] != seq_idx[j]. + # So we don't need masking wrt seq_idx here. + cb *= tl.exp((dA_cs_m[:, None] - dA_cs_k[None, :])) + dt_k = tl.load(dt_ptrs, mask=offs_k < chunk_size - k, other=0.0).to(tl.float32) + cb *= dt_k + if IS_CAUSAL: + mask = offs_m[:, None] >= k + offs_k[None, :] + cb = tl.where(mask, cb, 0.0) + cb = cb.to(x_ptr.dtype.element_ty) + x = tl.load(x_ptrs, mask=(offs_k[:, None] < chunk_size_limit - k) & (offs_n[None, :] < hdim), other=0.0) + acc += tl.dot(cb, x) + cb_ptrs += BLOCK_SIZE_K * stride_cb_csize_k + x_ptrs += BLOCK_SIZE_K * stride_x_seqlen + dt_ptrs += BLOCK_SIZE_K * stride_dt_csize + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + + offs_out_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_out_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + if HAS_D: + if D_HAS_HDIM: + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + x_residual = tl.load(x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim), + mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + acc += x_residual * D + + if HAS_Z: + out_x_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + out_x_ptrs = out_x_ptr + (stride_out_seqlen * offs_out_m[:, None] + offs_out_n[None, :]) + tl.store(out_x_ptrs, acc, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim)) + + z_ptr += pid_b * stride_z_batch + pid_c * chunk_size * stride_z_seqlen + pid_h * stride_z_head + z_ptrs = z_ptr + (stride_z_seqlen * offs_out_m[:, None] + stride_z_hdim * offs_out_n[None, :]) + z = tl.load(z_ptrs, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim), other=0.0).to(tl.float32) + acc *= z * tl.sigmoid(z) + + out_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + out_ptrs = out_ptr + (stride_out_seqlen * offs_out_m[:, None] + offs_out_n[None, :] * stride_out_hdim) + tl.store(out_ptrs, acc, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim)) + + +@triton.autotune( + configs=[ + # triton.Config({'BLOCK_SIZE_N': 256}, num_stages=4, num_warps=4), + # triton.Config({'BLOCK_SIZE_N': 128}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_N': 64}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_N': 64}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=8), + ], + key=['chunk_size', 'hdim', 'dstate'], +) +@triton.jit +def _chunk_scan_fwd_kernel_wip( + # Pointers to matrices + cb_ptr, x_ptr, z_ptr, out_ptr, out_x_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, C_ptr, B_ptr, prev_states_ptr, D_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k, + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_z_batch, stride_z_seqlen, stride_z_head, stride_z_hdim, + stride_out_batch, stride_out_seqlen, stride_out_head, stride_out_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_C_batch, stride_C_seqlen, stride_C_head, stride_C_dstate, + stride_B_batch, stride_B_seqlen, stride_B_head, stride_B_dstate, + stride_states_batch, stride_states_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_D_head, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + HAS_Z: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + pid_n = tl.program_id(axis=0) + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + C_ptr += pid_b * stride_C_batch + pid_c * chunk_size * stride_C_seqlen + (pid_h // nheads_ngroups_ratio) * stride_C_head + B_ptr += pid_b * stride_B_batch + pid_c * chunk_size * stride_B_seqlen + (pid_h // nheads_ngroups_ratio) * stride_B_head + prev_states_ptr += pid_b * stride_states_batch + pid_c * stride_states_chunk + pid_h * stride_states_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + out_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + + offs_m = tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k_dstate = tl.arange(0, BLOCK_SIZE_DSTATE) + + C_ptrs = C_ptr + (offs_m[:, None] * stride_C_seqlen + offs_k_dstate[None, :] * stride_C_dstate) + B_ptrs = B_ptr + (offs_m[None, :] * stride_B_seqlen + offs_k_dstate[:, None] * stride_B_dstate) + prev_states_ptrs = prev_states_ptr + (offs_n[None, :] * stride_states_hdim + offs_k_dstate[:, None] * stride_states_dstate) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_m[None, :] * stride_cb_csize_k) + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + out_ptrs = out_ptr + (offs_m[:, None] * stride_out_seqlen + offs_n[None, :] * stride_out_hdim) + + prev_states = tl.load(prev_states_ptrs, mask=(offs_k_dstate[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + # if pid_c == 0: + # if pid_b == 0: + # if pid_h == 0: + # tl.device_print("", prev_states) + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + # dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + # scale_m = tl.exp(dA_cs_m) + # C = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k_dstate[None, :] < dstate), other=0.0) + # acc = tl.dot(C, prev_states.to(C_ptr.dtype.element_ty)) * scale_m[:, None] + # cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_m[None, :] < chunk_size), other=0.0).to(tl.float32) + # cb *= tl.exp((dA_cs_m[:, None] - dA_cs_m[None, :])) + # dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + # cb *= dt_m + # mask = offs_m[:, None] >= offs_m[None, :] + # cb = tl.where(mask, cb, 0.0) + # cb = cb.to(x_ptr.dtype.element_ty) + # x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0) + # acc += tl.dot(cb, x) + # if HAS_D: + # if D_HAS_HDIM: + # D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + # else: + # D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + # acc += x.to(tl.float32) * D + # tl.store(out_ptrs, acc, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + + for start_m in range(0, chunk_size_limit, BLOCK_SIZE_M): + start_m = tl.multiple_of(start_m, BLOCK_SIZE_M) + dA_cs_m = tl.load(dA_cumsum_ptr + (start_m + offs_m) * stride_dA_cs_csize, mask=offs_m < chunk_size - start_m, other=0.0).to(tl.float32) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr + start_m - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + seq_idx_m = tl.load(seq_idx_ptr + (start_m + offs_m) * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit - start_m, other=-1) + if not HAS_SEQ_IDX: + scale_m = tl.exp(dA_cs_m) + else: + scale_m = tl.where(seq_idx_m == seq_idx_prev, tl.exp(dA_cs_m), 0.0) + C = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit - start_m) & (offs_k_dstate[None, :] < dstate), other=0.0) + acc = tl.dot(C, prev_states.to(C_ptr.dtype.element_ty)) * scale_m[:, None] + # cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size - start_m) & (offs_m[None, :] < chunk_size - start_m), other=0.0).to(tl.float32) + # cb *= tl.exp((dA_cs_m[:, None] - dA_cs_m[None, :])) + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size - start_m, other=0.0).to(tl.float32) + # cb *= dt_m + # mask = offs_m[:, None] >= offs_m[None, :] + # cb = tl.where(mask, cb, 0.0) + # cb = cb.to(x_ptr.dtype.element_ty) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit - start_m) & (offs_n[None, :] < hdim), other=0.0) + # acc += tl.dot(cb, x) + + if HAS_D: + if D_HAS_HDIM: + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + acc += x.to(tl.float32) * D + + # if HAS_Z: + # out_x_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + # out_x_ptrs = out_x_ptr + (stride_out_seqlen * offs_out_m[:, None] + offs_out_n[None, :]) + # tl.store(out_x_ptrs, acc, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim)) + + # z_ptr += pid_b * stride_z_batch + pid_c * chunk_size * stride_z_seqlen + pid_h * stride_z_head + # z_ptrs = z_ptr + (stride_z_seqlen * offs_out_m[:, None] + stride_z_hdim * offs_out_n[None, :]) + # z = tl.load(z_ptrs, mask=(offs_out_m[:, None] < chunk_size_limit) & (offs_out_n[None, :] < hdim), other=0.0).to(tl.float32) + # acc *= z * tl.sigmoid(z) + + tl.store(out_ptrs, acc, mask=(offs_m[:, None] < chunk_size_limit - start_m) & (offs_n[None, :] < hdim)) + + # TODO: this is not correct, and quite a bit slower + if start_m + BLOCK_SIZE_M < chunk_size_limit: + # B = tl.load(B_ptrs, mask=(offs_m[None, :] < chunk_size_limit - start_m) & (offs_k_dstate[:, None] < dstate), other=0.0).to(tl.float32) + B = tl.load(B_ptrs, mask=(offs_m[None, :] < chunk_size_limit - start_m) & (offs_k_dstate[:, None] < dstate), other=0.0) + dA_cs_last = tl.load(dA_cumsum_ptr + (start_m + BLOCK_SIZE_M) * stride_dA_cs_csize).to(tl.float32) + # TODO: seq_idx + scale = tl.exp((dA_cs_last - dA_cs_m)) * dt_m + # B *= scale + B = B.to(x_ptr.dtype.element_ty) + tmp = tl.dot(B, x) + prev_states += tmp.to(prev_states.dtype) + + C_ptrs += BLOCK_SIZE_M * stride_C_seqlen + B_ptrs += BLOCK_SIZE_M * stride_B_seqlen + cb_ptrs += BLOCK_SIZE_M * stride_cb_csize_m + BLOCK_SIZE_M * stride_cb_csize_k + x_ptrs += BLOCK_SIZE_M * stride_x_seqlen + dt_ptrs += BLOCK_SIZE_M * stride_dt_csize + out_ptrs += BLOCK_SIZE_M * stride_out_seqlen + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32}), + triton.Config({'BLOCK_SIZE_M': 64}), + triton.Config({'BLOCK_SIZE_M': 128}), + triton.Config({'BLOCK_SIZE_M': 256}), + ], + key=["chunk_size", "hdim"], +) +@triton.jit +def _chunk_scan_bwd_dz_kernel( + # Pointers to matrices + dout_ptr, out_ptr, z_ptr, x_ptr, D_ptr, outz_ptr, dz_ptr, dout_x_ptr, dD_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_out_batch, stride_out_seqlen, stride_out_head, stride_out_hdim, + stride_z_batch, stride_z_seqlen, stride_z_head, stride_z_hdim, + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_D_head, + stride_outz_batch, stride_outz_seqlen, stride_outz_head, stride_outz_hdim, + stride_dz_batch, stride_dz_seqlen, stride_dz_head, stride_dz_hdim, + stride_doutx_batch, stride_doutx_seqlen, stride_doutx_head, stride_doutx_hdim, + stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_csize, stride_dD_hdim, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + HAS_DDACS: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dout_x_ptr += pid_b * stride_doutx_batch + pid_c * chunk_size * stride_doutx_seqlen + pid_h * stride_doutx_head + out_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + z_ptr += pid_b * stride_z_batch + pid_c * chunk_size * stride_z_seqlen + pid_h * stride_z_head + dz_ptr += pid_b * stride_dz_batch + pid_c * chunk_size * stride_dz_seqlen + pid_h * stride_dz_head + if RECOMPUTE_OUTPUT: + outz_ptr += pid_b * stride_outz_batch + pid_c * chunk_size * stride_outz_seqlen + pid_h * stride_outz_head + if HAS_DDACS: + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + if HAS_D: + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_N) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dout_x_ptrs = dout_x_ptr + (offs_m[:, None] * stride_doutx_seqlen + offs_n[None, :] * stride_doutx_hdim) + out_ptrs = out_ptr + (offs_m[:, None] * stride_out_seqlen + offs_n[None, :] * stride_out_hdim) + z_ptrs = z_ptr + (offs_m[:, None] * stride_z_seqlen + offs_n[None, :] * stride_z_hdim) + dz_ptrs = dz_ptr + (offs_m[:, None] * stride_dz_seqlen + offs_n[None, :] * stride_dz_hdim) + if RECOMPUTE_OUTPUT: + outz_ptrs = outz_ptr + (offs_m[:, None] * stride_outz_seqlen + offs_n[None, :] * stride_outz_hdim) + if HAS_D: + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + if D_HAS_HDIM: + dD_ptrs = dD_ptr + offs_n * stride_dD_hdim + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + out = tl.load(out_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + z = tl.load(z_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + z_sigmoid = tl.sigmoid(z) + if RECOMPUTE_OUTPUT: + outz = out * z * z_sigmoid + tl.store(outz_ptrs, outz, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + dz = dout * out * z_sigmoid * (1 + z * (1 - z_sigmoid)) + tl.store(dz_ptrs, dz, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + dout *= z * z_sigmoid + tl.store(dout_x_ptrs, dout, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + if HAS_D: + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if D_HAS_HDIM: + dD = tl.sum(dout * x, axis=0) + tl.store(dD_ptrs, dD, mask=offs_n < hdim) + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + dD = tl.sum(dout * x) + tl.store(dD_ptr, dD) + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + out -= x * D + if HAS_DDACS: + ddA_cs = tl.sum(dout * out, axis=1) + tl.store(ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize, ddA_cs, mask=offs_m < chunk_size) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['hdim', 'dstate', 'chunk_size'], +) +@triton.jit +def _chunk_scan_bwd_dstates_kernel( + # Pointers to matrices + dout_ptr, c_ptr, dprev_states_ptr, dA_cumsum_ptr, seq_idx_ptr, + # Matrix dimensions + hdim, dstate, chunk_size, + batch, seqlen, nchunks, nheads_ngroups_ratio, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_c_batch, stride_c_seqlen, stride_c_head, stride_c_dstate, + stride_dprev_states_batch, stride_dprev_states_chunk, stride_dprev_states_head, stride_dprev_states_hdim, stride_dprev_states_dstate, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + # Meta-parameters + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + c_ptr += pid_b * stride_c_batch + pid_c * chunk_size * stride_c_seqlen + (pid_h // nheads_ngroups_ratio) * stride_c_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_hdim + offs_k[None, :] * stride_dout_seqlen) + c_ptrs = c_ptr + (offs_n[None, :] * stride_c_dstate + offs_k[:, None] * stride_c_seqlen) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + if HAS_SEQ_IDX: + seq_idx_ptrs = seq_idx_ptr + offs_k * stride_seq_idx_seqlen + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + for k in range(0, chunk_size_limit, BLOCK_SIZE_K): + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < hdim) & (offs_k[None, :] < chunk_size_limit - k), other=0.0).to(tl.float32) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < chunk_size - k, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale_k = tl.exp(dA_cs_k) + else: + seq_idx_k = tl.load(seq_idx_ptrs, mask=offs_k < chunk_size_limit - k, other=-1) + scale_k = tl.where(seq_idx_k == seq_idx_prev, tl.exp(dA_cs_k), 0.0) + dout = (dout * scale_k).to(dout_ptr.dtype.element_ty) + c = tl.load(c_ptrs, mask=(offs_k[:, None] < chunk_size_limit - k) & (offs_n[None, :] < dstate), other=0.0) + acc += tl.dot(dout, c) + dout_ptrs += BLOCK_SIZE_K * stride_dout_seqlen + c_ptrs += BLOCK_SIZE_K * stride_c_seqlen + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + if HAS_SEQ_IDX: + seq_idx_ptrs += BLOCK_SIZE_K * stride_seq_idx_seqlen + out = acc.to(dprev_states_ptr.dtype.element_ty) + + dprev_states_ptr += pid_b * stride_dprev_states_batch + pid_c * stride_dprev_states_chunk + pid_h * stride_dprev_states_head + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dprev_states_ptrs = dprev_states_ptr + (offs_m[:, None] * stride_dprev_states_hdim + offs_n[None, :] * stride_dprev_states_dstate) + tl.store(dprev_states_ptrs, out, mask=(offs_m[:, None] < hdim) & (offs_n[None, :] < dstate)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'dstate', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_dc_kernel( + # Pointers to matrices + dout_ptr, prev_states_ptr, C_ptr, dA_cumsum_ptr, seq_idx_ptr, + dc_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, dstate, hdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_prev_states_batch, stride_prev_states_chunk, stride_prev_states_head, stride_prev_states_hdim, stride_prev_states_dstate, + stride_C_batch, stride_C_seqlen, stride_C_head, stride_C_dstate, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_dc_batch, stride_dc_seqlen, stride_dc_split, stride_dc_group, stride_dc_dstate, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_DDA_CS: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_sg = tl.program_id(axis=2) + pid_s = pid_sg // ngroups + pid_g = pid_sg - pid_s * ngroups + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dout_head + dc_ptr += pid_b * stride_dc_batch + pid_c * chunk_size * stride_dc_seqlen + pid_g * stride_dc_group + pid_s * stride_dc_split + prev_states_ptr += pid_b * stride_prev_states_batch + pid_c * stride_prev_states_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_prev_states_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dA_cs_head + if HAS_DDA_CS: + C_ptr += pid_b * stride_C_batch + pid_c * chunk_size * stride_C_seqlen + pid_g * stride_C_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_ddA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + prev_states_ptrs = prev_states_ptr + (offs_n[None, :] * stride_prev_states_dstate + offs_k[:, None] * stride_prev_states_hdim) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_m * stride_dA_cs_csize + if HAS_DDA_CS: + C_ptrs = C_ptr + (offs_m[:, None] * stride_C_seqlen + offs_n[None, :] * stride_C_dstate) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + if HAS_DDA_CS: + c = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + nheads_iter = min(nheads_per_program, nheads // ngroups - pid_s * nheads_per_program) + for h in range(nheads_iter): + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + prev_states = tl.load(prev_states_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < dstate), other=0.0) + prev_states = prev_states.to(dout_ptrs.dtype.element_ty) + dc = tl.dot(dout, prev_states) + dA_cs_m = tl.load(dA_cumsum_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_m) + else: + scale = tl.where(seq_idx_m == seq_idx_prev, tl.exp(dA_cs_m), 0.0) + dc *= scale[:, None] + if HAS_DDA_CS: + ddA_cs = tl.sum(dc * c, axis=1) + tl.atomic_add(ddA_cumsum_ptrs, ddA_cs, mask=offs_m < chunk_size) + acc += dc + dout_ptrs += stride_dout_head + prev_states_ptrs += stride_prev_states_head + dA_cumsum_ptrs += stride_dA_cs_head + if HAS_DDA_CS: + ddA_cumsum_ptrs += stride_ddA_cs_head + # if HAS_SEQ_IDX: + # seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + # seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + # acc = tl.where(seq_idx_m[:, None] == seq_idx_prev, acc, 0.0) + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dc_ptrs = dc_ptr + (offs_m[:, None] * stride_dc_seqlen + offs_n[None, :] * stride_dc_dstate) + tl.store(dc_ptrs, acc, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + ], + key=['chunk_size', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_dx_kernel( + # Pointers to matrices + x_ptr, cb_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, D_ptr, + dx_ptr, ddt_ptr, # dD_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_D_head, + stride_dx_batch, stride_dx_seqlen, stride_dx_head, stride_dx_hdim, + stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize, + # stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_hdim, stride_dD_csize, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + # if HAS_D: + # dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_k[None, :] * stride_cb_csize_k) + dout_ptrs = dout_ptr + (offs_k[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + # Idk why limiting K_MAX gives wrong results, is it a Triton bug? + # K_MAX = min((pid_m + 1) * BLOCK_SIZE_M, chunk_size_limit) + K_MAX = chunk_size_limit + for k in range(0, K_MAX, BLOCK_SIZE_K): + # For some reason setting mask to (offs_m[:, None] < chunk_size_limit) is much slower + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_k[None, :] < K_MAX - k), other=0.0) + dout = tl.load(dout_ptrs, mask=(offs_k[:, None] < K_MAX - k) & (offs_n[None, :] < hdim), other=0.0) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < K_MAX - k, other=0.0).to(tl.float32) + cb *= tl.exp(dA_cs_k[None, :] - dA_cs_m[:, None]) + # If we don't have the (k + offs_k[None, :] < K_MAX) mask, for indices outside this range, + # we might have dA_cs_m = 0.0 and dA_cs_k very negative, and tl.exp will return inf. + # Multiplying with cb, which is 0.0 outside the range, will make the result NaN. + # This will cause NaN in acc, and hence NaN in dx and ddt. + mask = (k + offs_k[None, :] >= offs_m[:, None]) & (k + offs_k[None, :] < K_MAX) + cb = tl.where(mask, cb, 0.0) + cb = cb.to(dout_ptr.dtype.element_ty) + acc += tl.dot(cb, dout) + cb_ptrs += BLOCK_SIZE_K * stride_cb_csize_k + dout_ptrs += BLOCK_SIZE_K * stride_dout_seqlen + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + dx = acc * dt_m[:, None] + dx_ptr += pid_b * stride_dx_batch + pid_c * chunk_size * stride_dx_seqlen + pid_h * stride_dx_head + dx_ptrs = dx_ptr + (offs_m[:, None] * stride_dx_seqlen + offs_n[None, :] * stride_dx_hdim) + if HAS_D: + dout_res_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dout_res = tl.load(dout_res_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if D_HAS_HDIM: + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + dx += dout_res * D + tl.store(dx_ptrs, dx, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + ddt = tl.sum(acc * x, axis=1) + ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize + tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size) + + # if HAS_D: + # dout_new_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_csize + offs_n[None, :] * stride_dout_hdim) + # dout = tl.load(dout_new_ptrs, mask=(offs_m[:, None] < M) & (offs_n[None, :] < N), other=0.0).to(tl.float32) + # dD = tl.sum(x * dout, axis=0) + # tl.store(dD_ptr + offs_n * stride_dD_hdim, dD, mask=offs_n < N) + + +# Disabling HAS_DDA_CS for now since it's much slower +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 16}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 64}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 128}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 16}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 64}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 128}, num_stages=4, num_warps=8), + ], + key=['chunk_size', 'hdim'], +) +# @triton.heuristics({"BLOCK_SIZE_N": lambda args: max(triton.next_power_of_2(args["chunk_size"]), 16)}) +# @triton.heuristics({"BLOCK_SIZE_N": lambda args: 32}) +@triton.jit +def _chunk_scan_bwd_dcb_kernel( + # Pointers to matrices + x_ptr, dout_ptr, cb_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, + dcb_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_n, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_dcb_batch, stride_dcb_chunk, stride_dcb_split, stride_dcb_group, stride_dcb_csize_m, stride_dcb_csize_n, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize_m, stride_ddA_cs_csize_n, + # Meta-parameters + HAS_DDA_CS: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_sg = tl.program_id(axis=2) + pid_s = pid_sg // ngroups + pid_g = pid_sg - pid_s * ngroups + num_pid_n = tl.cdiv(chunk_size, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_x_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dA_cs_head + if HAS_DDA_CS: + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + pid_g * stride_cb_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_ddA_cs_head + pid_m * stride_ddA_cs_csize_m + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + x_ptrs = x_ptr + (offs_n[None, :] * stride_x_seqlen + offs_k[:, None] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_n * stride_dt_csize + if HAS_DDA_CS: + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_n[None, :] * stride_cb_csize_n) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_n * stride_ddA_cs_csize_n + + if pid_n * BLOCK_SIZE_N >= (pid_m + 1) * BLOCK_SIZE_M: + dcb_ptr += pid_b * stride_dcb_batch + pid_c * stride_dcb_chunk + pid_g * stride_dcb_group + pid_s * stride_dcb_split + dcb_ptrs = dcb_ptr + (offs_m[:, None] * stride_dcb_csize_m + offs_n[None, :] * stride_dcb_csize_n) + tl.store(dcb_ptrs, tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=dcb_ptr.dtype.element_ty), mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size)) + return + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + chunk_size_limit_n = min(chunk_size_limit, (pid_m + 1) * BLOCK_SIZE_M) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + if HAS_DDA_CS: + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size), other=0.0).to(tl.float32) + nheads_iter = min(nheads_per_program, nheads // ngroups - pid_s * nheads_per_program) + for h in range(nheads_iter): + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit_n), other=0.0) + dcb = tl.dot(dout, x) + dt_n = tl.load(dt_ptrs, mask=offs_n < chunk_size, other=0.0).to(tl.float32) + dcb *= dt_n + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + dA_cs_n = tl.load(dA_cumsum_ptr + offs_n * stride_dA_cs_csize, mask=offs_n < chunk_size_limit, other=0.0).to(tl.float32) + dcb *= tl.exp(dA_cs_m[:, None] - dA_cs_n[None, :]) + if HAS_DDA_CS: + tl.static_assert(not HAS_SEQ_IDX, "HAS_SEQ_IDX not supported with HAS_DDA_CS yet") + ddA_cs = dcb * cb + mask = offs_m[:, None] >= offs_n[None, :] + 1 + ddA_cs = tl.where(mask, ddA_cs, 0.0) + ddA_cs = tl.cumsum(ddA_cs, axis=1) + ddA_cs = tl.where(mask, ddA_cs, 0.0) + ddA_cs = tl.sum(ddA_cs, axis=0) + tl.store(ddA_cumsum_ptrs + stride_ddA_cs_csize_n, ddA_cs, mask=offs_n < chunk_size - 1) + tl.store(ddA_cumsum_ptr, 0.0) + acc += dcb + dout_ptrs += stride_dout_head + x_ptrs += stride_x_head + dt_ptrs += stride_dt_head + dA_cumsum_ptr += stride_dA_cs_head + if HAS_DDA_CS: + ddA_cumsum_ptr += stride_ddA_cs_head + ddA_cumsum_ptrs += stride_ddA_cs_head + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + if HAS_SEQ_IDX: + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_n = tl.load(seq_idx_ptr + offs_n * stride_seq_idx_seqlen, mask=offs_n < chunk_size_limit, other=-2) + acc = tl.where(seq_idx_m[:, None] == seq_idx_n[None, :], acc, 0.0) + mask = offs_m[:, None] >= offs_n[None, :] + acc = tl.where(mask, acc, 0.0) + dcb_ptr += pid_b * stride_dcb_batch + pid_c * stride_dcb_chunk + pid_g * stride_dcb_group + pid_s * stride_dcb_split + dcb_ptrs = dcb_ptr + (offs_m[:, None] * stride_dcb_csize_m + offs_n[None, :] * stride_dcb_csize_n) + tl.store(dcb_ptrs, acc, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size)) + + +# Not numerically stable and should not be used. Leaving here for reference. +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32}), + triton.Config({'BLOCK_SIZE_M': 64}), + triton.Config({'BLOCK_SIZE_M': 128}), + triton.Config({'BLOCK_SIZE_M': 256}), + ], + key=["chunk_size", "hdim"], +) +@triton.jit +def _chunk_scan_bwd_ddAcs_unstable_kernel( + # Pointers to matrices + dout_ptr, out_ptr, dt_ptr, ddt_ptr, x_ptr, D_ptr, + ddA_cumsum_ptr, dD_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_out_batch, stride_out_seqlen, stride_out_head, stride_out_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize, + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_D_head, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_csize, stride_dD_hdim, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + SUBTRACT_DDTDT: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + out_ptr += pid_b * stride_out_batch + pid_c * chunk_size * stride_out_seqlen + pid_h * stride_out_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + if HAS_D: + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_N) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + out_ptrs = out_ptr + (offs_m[:, None] * stride_out_seqlen + offs_n[None, :] * stride_out_hdim) + if HAS_D: + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + if D_HAS_HDIM: + dD_ptrs = dD_ptr + offs_n * stride_dD_hdim + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + out = tl.load(out_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if HAS_D: + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if D_HAS_HDIM: + dD = tl.sum(dout * x, axis=0) + tl.store(dD_ptrs, dD, mask=offs_n < hdim) + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + dD = tl.sum(dout * x) + tl.store(dD_ptr, dD) + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + out -= x * D + ddA_cs = tl.sum(dout * out, axis=1) + if SUBTRACT_DDTDT: + dt = tl.load(dt_ptr + offs_m * stride_dt_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + ddt = tl.load(ddt_ptr + offs_m * stride_ddt_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + ddA_cs -= dt * ddt + tl.store(ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize, ddA_cs, mask=offs_m < chunk_size) + + +@triton.autotune( + configs=[ + # triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), + # triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 16}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 16}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64}, num_stages=4, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 128}, num_stages=4, num_warps=8), + ], + key=['chunk_size', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_ddAcs_stable_kernel_old( + # Pointers to matrices + x_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, cb_ptr, + ddAcs_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_n, + stride_ddAcs_batch, stride_ddAcs_chunk, stride_ddAcs_head, stride_ddAcs_csize_m, stride_ddAcs_csize_n, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(chunk_size, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + x_ptrs = x_ptr + (offs_n[None, :] * stride_x_seqlen + offs_k[:, None] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_n * stride_dt_csize + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_n[None, :] * stride_cb_csize_n) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + chunk_size_limit_n = min(chunk_size_limit, (pid_m + 1) * BLOCK_SIZE_M) + # Doing a matmul loop with cumsum later on will cause Triton to crash + # Instead we do just one big matmul + # acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + # for k in range(0, hdim, BLOCK_SIZE_K): + # dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim - k), other=0.0) + # x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim - k) & (offs_n[None, :] < chunk_size_limit), other=0.0) + # acc += tl.dot(dout, x) + # dout_ptrs += BLOCK_SIZE_K * stride_dout_hdim + # x_ptrs += BLOCK_SIZE_K * stride_x_hdim + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit_n), other=0.0) + acc = tl.dot(dout, x) + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size), other=0.0).to(tl.float32) + acc *= cb + dt_n = tl.load(dt_ptrs, mask=offs_n < chunk_size, other=0.0).to(tl.float32) + acc *= dt_n + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + dA_cs_n = tl.load(dA_cumsum_ptr + offs_n * stride_dA_cs_csize, mask=offs_n < chunk_size, other=0.0).to(tl.float32) + acc *= tl.exp(dA_cs_m[:, None] - dA_cs_n[None, :]) + mask = offs_m[:, None] >= offs_n[None, :] + 1 + acc = tl.where(mask, acc, 0.0) + acc = tl.cumsum(acc, axis=1) + acc = tl.where(mask, acc, 0.0) + ddA_cs = tl.sum(acc, axis=0) + ddAcs_ptr += pid_b * stride_ddAcs_batch + pid_c * stride_ddAcs_chunk + pid_h * stride_ddAcs_head + pid_m * stride_ddAcs_csize_m + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + ddAcs_ptrs = ddAcs_ptr + offs_n * stride_ddAcs_csize_n + tl.store(ddAcs_ptrs + stride_ddAcs_csize_n, ddA_cs, mask=offs_n < chunk_size - 1) + tl.store(ddAcs_ptr, 0.0) + + # offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, 64) + # offs_k = tl.arange(0, BLOCK_SIZE_K) + # dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + # x_ptrs = x_ptr + (offs_n[None, :] * stride_x_seqlen + offs_k[:, None] * stride_x_hdim) + # dt_ptrs = dt_ptr + offs_n * stride_dt_csize + # cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_n[None, :] * stride_cb_csize_n) + + # chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + # chunk_size_limit_n = min(chunk_size_limit, (pid_m + 1) * BLOCK_SIZE_M) + # rowsum = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32) + # dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + # dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + # ddAcs_ptr += pid_b * stride_ddAcs_batch + pid_c * stride_ddAcs_chunk + pid_h * stride_ddAcs_head + pid_m * stride_ddAcs_csize_m + # ddAcs_ptrs = ddAcs_ptr + offs_n * stride_ddAcs_csize_n + # for n in range(0, chunk_size_limit_n, 64): + # x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit_n - n), other=0.0) + # acc = tl.dot(dout, x) + # cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size - n), other=0.0).to(tl.float32) + # acc *= cb + # dt_n = tl.load(dt_ptrs, mask=offs_n < chunk_size - n, other=0.0).to(tl.float32) + # acc *= dt_n + # dA_cs_n = tl.load(dA_cumsum_ptr + offs_n * stride_dA_cs_csize, mask=offs_n < chunk_size - n, other=0.0).to(tl.float32) + # acc *= tl.exp(dA_cs_m[:, None] - dA_cs_n[None, :]) + # mask = offs_m[:, None] >= offs_n[None, :] + 1 + n + # acc = tl.where(mask, acc, 0.0) + # acc = tl.cumsum(acc, axis=1) + # acc = tl.where(mask, acc, 0.0) + # ddA_cs = tl.sum(acc, axis=0) + # tl.store(ddAcs_ptrs, ddA_cs, mask=offs_n < chunk_size - 1 - n) + # # tl.store(ddAcs_ptr, 0.0) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4), + ], + key=['chunk_size', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_ddAcs_stable_kernel( + # Pointers to matrices + x_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, cb_ptr, + ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_n, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize_m, stride_ddA_cs_csize_n, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + pid_m * stride_ddA_cs_csize_m + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + x_ptrs = x_ptr + (offs_n[None, :] * stride_x_seqlen + offs_k[:, None] * stride_x_hdim) + dt_ptrs = dt_ptr + offs_n * stride_dt_csize + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_n[None, :] * stride_cb_csize_n) + ddAcs_ptrs = ddA_cumsum_ptr + offs_n * stride_ddA_cs_csize_n + tl.store(ddA_cumsum_ptr, 0.0) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + rowsum = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32) + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + # Actually hi is (pid_m + 1) * BLOCK_SIZE_M - 1 but subtracting 1 makes it slower + lo, hi = 0, (pid_m + 1) * BLOCK_SIZE_M + # lo, hi = 0, chunk_size + for start_n in range(lo, hi, BLOCK_SIZE_N): + start_n = tl.multiple_of(start_n, BLOCK_SIZE_N) + # Doing a matmul loop with cumsum later on will cause Triton to crash + # Instead we do just one big matmul + # acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + # for k in range(0, hdim, BLOCK_SIZE_K): + # dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim - k), other=0.0) + # x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim - k) & (offs_n[None, :] < chunk_size_limit), other=0.0) + # acc += tl.dot(dout, x) + # dout_ptrs += BLOCK_SIZE_K * stride_dout_hdim + # x_ptrs += BLOCK_SIZE_K * stride_x_hdim + # x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit_n), other=0.0) + x = tl.load(x_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < chunk_size_limit - start_n), other=0.0) + acc = tl.dot(dout, x) + dt_n = tl.load(dt_ptrs, mask=offs_n < chunk_size - start_n, other=0.0).to(tl.float32) + acc *= dt_n + # If there's seq_idx, we already zero'ed out cb[i, j] for seq_idx[i] != seq_idx[j] + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_n[None, :] < chunk_size - start_n), other=0.0).to(tl.float32) + acc *= cb + dA_cs_n = tl.load(dA_cumsum_ptr + start_n + offs_n * stride_dA_cs_csize, mask=offs_n < chunk_size - start_n, other=0.0).to(tl.float32) + acc *= tl.exp(dA_cs_m[:, None] - dA_cs_n[None, :]) + mask = offs_m[:, None] >= start_n + offs_n[None, :] + 1 + acc = tl.where(mask, acc, 0.0) + rowsum_new = rowsum + tl.sum(acc, axis=1) + acc = rowsum[:, None] + tl.cumsum(acc, axis=1) + rowsum = rowsum_new + acc = tl.where(mask, acc, 0.0) + ddA_cs = tl.sum(acc, axis=0) + tl.store(ddAcs_ptrs + stride_ddA_cs_csize_n, ddA_cs, mask=offs_n < chunk_size - start_n - 1) + x_ptrs += BLOCK_SIZE_N * stride_x_seqlen + dt_ptrs += BLOCK_SIZE_N * stride_dt_csize + cb_ptrs += BLOCK_SIZE_N * stride_cb_csize_n + ddAcs_ptrs += BLOCK_SIZE_N * stride_ddA_cs_csize_n + + # Need to zero out the rest, since we'll be summing the rows together + for start_n in range(hi, chunk_size, BLOCK_SIZE_N): + tl.store(ddAcs_ptrs + stride_ddA_cs_csize_n, tl.zeros((BLOCK_SIZE_N,), dtype=tl.float32), mask=offs_n < chunk_size - start_n - 1) + ddAcs_ptrs += BLOCK_SIZE_N * stride_ddA_cs_csize_n + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'dstate', 'hdim'], +) +@triton.jit +def _chunk_scan_bwd_ddAcs_prev_kernel( + # Pointers to matrices + dout_ptr, prev_states_ptr, C_ptr, dA_cumsum_ptr, seq_idx_ptr, + ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, dstate, hdim, + batch, seqlen, nchunks, nheads_ngroups_ratio, + # Strides + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_prev_states_batch, stride_prev_states_chunk, stride_prev_states_head, stride_prev_states_hdim, stride_prev_states_dstate, + stride_C_batch, stride_C_seqlen, stride_C_head, stride_C_dstate, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + prev_states_ptr += pid_b * stride_prev_states_batch + pid_c * stride_prev_states_chunk + pid_h * stride_prev_states_head + C_ptr += pid_b * stride_C_batch + pid_c * chunk_size * stride_C_seqlen + (pid_h // nheads_ngroups_ratio) * stride_C_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + dout_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_k[None, :] * stride_dout_hdim) + prev_states_ptrs = prev_states_ptr + (offs_n[None, :] * stride_prev_states_dstate + offs_k[:, None] * stride_prev_states_hdim) + C_ptrs = C_ptr + (offs_m[:, None] * stride_C_seqlen + offs_n[None, :] * stride_C_dstate) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_m * stride_dA_cs_csize + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + dout = tl.load(dout_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + prev_states = tl.load(prev_states_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < dstate), other=0.0) + prev_states = prev_states.to(dout_ptrs.dtype.element_ty) + acc = tl.dot(dout, prev_states) + c = tl.load(C_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + ddA_cs = tl.sum(acc * c, axis=1) + dA_cs_m = tl.load(dA_cumsum_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_m) + if HAS_SEQ_IDX: + seq_idx_prev = tl.load(seq_idx_ptr - stride_seq_idx_seqlen, mask=pid_c >= 1, other=0) + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + scale = tl.where(seq_idx_m == seq_idx_prev, tl.exp(dA_cs_m), 0.0) + ddA_cs *= scale + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + tl.atomic_add(ddA_cumsum_ptrs, ddA_cs, mask=offs_m < chunk_size) + + +def _chunk_scan_fwd(cb, x, dt, dA_cumsum, C, states, D=None, z=None, seq_idx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = C.shape + assert nheads % ngroups == 0 + assert C.shape == (batch, seqlen, ngroups, dstate) + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + if z is not None: + assert z.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert states.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + # Allocates output. + out = torch.empty(batch, seqlen, nheads, headdim, device=x.device, dtype=x.dtype) + if z is not None: + out_x = torch.empty(batch, seqlen, nheads, headdim, device=x.device, dtype=x.dtype) + assert out_x.stride() == out.stride() + else: + out_x = None + grid = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + z_strides = ((z.stride(0), z.stride(1), z.stride(2), z.stride(3)) + if z is not None else (0, 0, 0, 0)) + _chunk_scan_fwd_kernel[grid]( + cb, x, z, out, out_x, dt, dA_cumsum, seq_idx, C, states, D, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(3), cb.stride(4), + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + z_strides[0], z_strides[1], z_strides[2], z_strides[3], + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + C.stride(0), C.stride(1), C.stride(2), C.stride(3), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), states.stride(4), + D.stride(0) if D is not None else 0, + True, + D is not None, + D.dim() == 2 if D is not None else True, + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + HAS_Z=z is not None, + HAS_SEQ_IDX=seq_idx is not None, + IS_TRITON_22=TRITON_22, + ) + return out, out_x + + +def _chunk_scan_fwd_wip(cb, x, dt, dA_cumsum, C, B, states, D=None, z=None, seq_idx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = C.shape + assert nheads % ngroups == 0 + assert C.shape == (batch, seqlen, ngroups, dstate) + assert B.shape == C.shape + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + if z is not None: + assert z.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert states.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + # Allocates output. + out = torch.empty(batch, seqlen, nheads, headdim, device=x.device, dtype=x.dtype) + if z is not None: + out_x = torch.empty(batch, seqlen, nheads, headdim, device=x.device, dtype=x.dtype) + assert out_x.stride() == out.stride() + else: + out_x = None + grid = lambda META: (triton.cdiv(headdim, META['BLOCK_SIZE_N']), batch * nchunks, nheads) + z_strides = ((z.stride(0), z.stride(1), z.stride(2), z.stride(3)) + if z is not None else (0, 0, 0, 0)) + _chunk_scan_fwd_kernel_wip[grid]( + cb, x, z, out, out_x, dt, dA_cumsum, seq_idx, C, B, states, D, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(3), cb.stride(4), + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + z_strides[0], z_strides[1], z_strides[2], z_strides[3], + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + C.stride(0), C.stride(1), C.stride(2), C.stride(3), + B.stride(0), B.stride(1), B.stride(2), B.stride(3), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), states.stride(4), + D.stride(0) if D is not None else 0, + D is not None, + D.dim() == 2 if D is not None else True, + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + BLOCK_SIZE_M=128, + HAS_Z=z is not None, + HAS_SEQ_IDX=seq_idx is not None, + ) + return out, out_x + + +def _chunk_scan_bwd_dz(x, z, out, dout, chunk_size, has_ddAcs=True, D=None, dz=None, recompute_output=False): + batch, seqlen, nheads, headdim = x.shape + assert z.shape == x.shape + assert out.shape == x.shape + assert dout.shape == out.shape + nchunks = math.ceil(seqlen / chunk_size) + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert D.stride(-1) == 1 + if has_ddAcs: + ddA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=x.device, dtype=torch.float32) + if D is not None: + BLOCK_SIZE_min = 32 + dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_min), batch, nchunks, nheads, + headdim if D.dim() == 2 else 1, device=D.device, dtype=torch.float32) + else: + dD = None + if dz is not None: + assert dz.shape == z.shape + else: + dz = torch.empty_like(z) + if recompute_output: + outz = torch.empty_like(x) + dout_x = torch.empty_like(dout) + dD_strides = ((dD.stride(0), dD.stride(1), dD.stride(2), dD.stride(3), dD.stride(4)) + if D is not None else (0, 0, 0, 0, 0)) + grid_dz = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']), batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_dz_kernel[grid_dz]( + dout, out, z, x, D, outz if recompute_output else None, + dz, dout_x, dD, ddA_cumsum if has_ddAcs else None, + chunk_size, headdim, + batch, seqlen, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + z.stride(0), z.stride(1), z.stride(2), z.stride(3), + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + D.stride(0) if D is not None else 0, + *((outz.stride(0), outz.stride(1), outz.stride(2), outz.stride(3)) if recompute_output else (0, 0, 0, 0)), + dz.stride(0), dz.stride(1), dz.stride(2), dz.stride(3), + dout_x.stride(0), dout_x.stride(1), dout_x.stride(2), dout_x.stride(3), + dD_strides[1], dD_strides[2], dD_strides[3], dD_strides[0], dD_strides[4], + *((ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3)) + if has_ddAcs else (0, 0, 0, 0)), + D is not None, + D.dim() == 2 if D is not None else True, + has_ddAcs, + BLOCK_SIZE_N=max(triton.next_power_of_2(headdim), 16), + RECOMPUTE_OUTPUT=recompute_output, + ) + if D is not None: + BLOCK_SIZE_actual = _chunk_scan_bwd_dz_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype) + if D.dim() == 1: + dD = rearrange(dD, "h 1 -> h") + return_vals = (dz, dout_x, dD, ddA_cumsum) if has_ddAcs else (dz, dout_x, dD) + return return_vals if not recompute_output else (*return_vals, outz) + + +def _chunk_scan_bwd_dstates(C, dA_cumsum, dout, seq_idx=None, dtype=None): + batch, seqlen, nheads, headdim = dout.shape + _, _, nchunks, chunk_size = dA_cumsum.shape + _, _, ngroups, dstate = C.shape + assert nheads % ngroups == 0 + assert C.shape == (batch, seqlen, ngroups, dstate) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + dtype = C.dtype if dtype is None else dtype + dprev_states = torch.empty(batch, nchunks, nheads, headdim, dstate, device=C.device, dtype=dtype) + grid_dstates = lambda META: (triton.cdiv(headdim, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(C.device.index): + _chunk_scan_bwd_dstates_kernel[grid_dstates]( + dout, C, dprev_states, dA_cumsum, seq_idx, + headdim, dstate, chunk_size, + batch, seqlen, nchunks, nheads // ngroups, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + C.stride(0), C.stride(1), C.stride(2), C.stride(3), + dprev_states.stride(0), dprev_states.stride(1), dprev_states.stride(2), dprev_states.stride(3), dprev_states.stride(4), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + HAS_SEQ_IDX=seq_idx is not None, + ) + return dprev_states + + +def _chunk_scan_bwd_dC(prev_states, dA_cumsum, dout, seq_idx=None, C=None, ngroups=1): + batch, nchunks, nheads, headdim, dstate = prev_states.shape + _, seqlen, _, _ = dout.shape + _, _, _, chunk_size = dA_cumsum.shape + assert prev_states.shape == (batch, nchunks, nheads, headdim, dstate) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert dout.shape == (batch, seqlen, nheads, headdim) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if C is not None: + assert C.shape == (batch, seqlen, ngroups, dstate) + C_strides = (C.stride(0), C.stride(1), C.stride(2), C.stride(3)) + ddA_cumsum_prev = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32) + ddA_cumsum_prev_strides = (ddA_cumsum_prev.stride(0), ddA_cumsum_prev.stride(2), ddA_cumsum_prev.stride(1), ddA_cumsum_prev.stride(3)) + else: + C_strides = (0, 0, 0, 0) + ddA_cumsum_prev = None + ddA_cumsum_prev_strides = (0, 0, 0, 0) + nheads_ngroups_ratio = nheads // ngroups + sm_count = torch.cuda.get_device_properties(dout.device).multi_processor_count + nheads_per_program = max(min(math.ceil(batch * nchunks * nheads / sm_count), nheads_ngroups_ratio), 1) + nsplits = triton.cdiv(nheads_ngroups_ratio, nheads_per_program) + dC = torch.empty(batch, seqlen, nsplits, ngroups, dstate, device=dout.device, dtype=torch.float32) + grid_dc = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nsplits * ngroups) + with torch.cuda.device(dout.device.index): + _chunk_scan_bwd_dc_kernel[grid_dc]( + dout, prev_states, C, dA_cumsum, seq_idx, dC, ddA_cumsum_prev, + chunk_size, dstate, headdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + prev_states.stride(0), prev_states.stride(1), prev_states.stride(2), prev_states.stride(3), prev_states.stride(4), + *C_strides, + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + dC.stride(0), dC.stride(1), dC.stride(2), dC.stride(3), dC.stride(4), + *ddA_cumsum_prev_strides, + HAS_DDA_CS=ddA_cumsum_prev is not None, + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + dC = dC.sum(2) + return dC if C is None else (dC, ddA_cumsum_prev) + + +def _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, seq_idx=None, CB=None, ngroups=1): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dout.shape == x.shape + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if CB is not None: + assert CB.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + CB_strides = (CB.stride(0), CB.stride(1), CB.stride(2), CB.stride(3), CB.stride(4)) + BLOCK_SIZE_M_min = 16 + ddA_cumsum = torch.empty(batch, nheads, nchunks, triton.cdiv(chunk_size, BLOCK_SIZE_M_min), + chunk_size, device=x.device, dtype=torch.float32) + ddA_cumsum_strides = (ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), ddA_cumsum.stride(4)) + else: + CB_strides = (0, 0, 0, 0, 0) + ddA_cumsum = None + ddA_cumsum_strides = (0, 0, 0, 0, 0) + nheads_ngroups_ratio = nheads // ngroups + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + nheads_per_program = max(min(math.ceil(batch * nchunks * nheads / sm_count), nheads_ngroups_ratio), 1) + nsplits = triton.cdiv(nheads_ngroups_ratio, nheads_per_program) + dcb = torch.empty(batch, nchunks, nsplits, ngroups, chunk_size, chunk_size, device=x.device, dtype=torch.float32) + grid_dcb = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(chunk_size, META['BLOCK_SIZE_N']), + batch * nchunks, nsplits * ngroups) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_dcb_kernel[grid_dcb]( + x, dout, CB, dt, dA_cumsum, seq_idx, dcb, ddA_cumsum, + chunk_size, headdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + *CB_strides, + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + dcb.stride(0), dcb.stride(1), dcb.stride(2), dcb.stride(3), dcb.stride(4), dcb.stride(5), + *ddA_cumsum_strides, + HAS_DDA_CS=ddA_cumsum is not None, + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + dcb = dcb.sum(2) + if ddA_cumsum is not None: + BLOCK_SIZE_M_actual = _chunk_scan_bwd_dcb_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_M_actual - 1) // BLOCK_SIZE_M_actual + ddA_cumsum = ddA_cumsum[:, :, :, :n_valid_blocks].sum(dim=3) + return dcb if CB is None else (dcb, ddA_cumsum) + + +def _chunk_scan_bwd_dx(cb, x, dt, dA_cumsum, dout, D=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + ngroups = cb.shape[2] + assert nheads % ngroups == 0 + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dout.shape == x.shape + # if D is not None: + # BLOCK_SIZE_M_min = 32 + # dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_M_min), batch, nchunks, nheads, headdim, device=D.device, dtype=torch.float32) + # else: + # dD = None + dx = torch.empty_like(x) + ddt = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32) + grid_dx = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_dx_kernel[grid_dx]( + x, cb, dout, dt, dA_cumsum, D, dx, ddt, # dD, + chunk_size, headdim, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(-1), cb.stride(-2), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + D.stride(0) if D is not None else 0, + dx.stride(0), dx.stride(1), dx.stride(2), dx.stride(3), + ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3), + # dD.stride(1) if dD is not None else 0, dD.stride(2) if dD is not None else 0, dD.stride(3) if dD is not None else 0, dD.stride(4) if dD is not None else 0, dD.stride(0) if dD is not None else 0, + D is not None, + D.dim() == 2 if D is not None else True, + ) + # if D is not None: + # BLOCK_SIZE_actual = _chunk_scan_bwd_dx_kernel.best_config.kwargs["BLOCK_SIZE_M"] + # n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + # dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype) + return dx, ddt.to(dtype=dt.dtype) + + +def _chunk_scan_bwd_ddAcs_unstable(x, dt, out, dout, ddt, D=None, subtract_ddtdt=True): + """Not numerically stable and should not be used. Leaving here for reference. + """ + + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert ddt.shape == dt.shape + assert out.shape == x.shape + assert dout.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + ddA_cumsum = torch.empty_like(dt) + grid_ddtcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']), batch * nchunks, nheads) + if D is not None: # Triton gives wrong results if we write to the same location + BLOCK_SIZE_min = 32 + dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_min), batch, nchunks, nheads, + headdim if D.dim() == 2 else 1, device=D.device, dtype=torch.float32) + else: + dD = None + dD_strides = ((dD.stride(0), dD.stride(1), dD.stride(2), dD.stride(3), dD.stride(4)) + if D is not None else (0, 0, 0, 0, 0)) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_ddAcs_unstable_kernel[grid_ddtcs]( + dout, out, dt, ddt, x, D, ddA_cumsum, dD, + chunk_size, headdim, + batch, seqlen, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3), + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + D.stride(0) if D is not None else 0, + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), + dD_strides[1], dD_strides[2], dD_strides[3], dD_strides[0], dD_strides[4], + D is not None, + D.dim() == 2 if D is not None else True, + subtract_ddtdt, + BLOCK_SIZE_N=max(triton.next_power_of_2(headdim), 16), + ) + if D is not None: + BLOCK_SIZE_actual = _chunk_scan_bwd_ddAcs_unstable_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype) + if D.dim() == 1: + dD = rearrange(dD, "h 1 -> h") + return ddA_cumsum, dD + + +def _chunk_scan_bwd_ddAcs_stable_old(x, dt, dA_cumsum, dout, cb): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dout.shape == x.shape + assert dA_cumsum.shape == dt.shape + ngroups = cb.shape[2] + assert nheads % ngroups == 0 + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + BLOCK_SIZE_M_min = 16 + ddA_cumsum = torch.empty(batch, nheads, nchunks, triton.cdiv(chunk_size, BLOCK_SIZE_M_min), + chunk_size, device=x.device, dtype=torch.float32) + grid_ddtcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']), batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_ddAcs_stable_kernel_old[grid_ddtcs]( + x, dout, dt, dA_cumsum, cb, ddA_cumsum, + chunk_size, headdim, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(3), cb.stride(4), + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), ddA_cumsum.stride(4), + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + BLOCK_SIZE_N=max(triton.next_power_of_2(chunk_size), 16), + ) + BLOCK_SIZE_M_actual = _chunk_scan_bwd_ddAcs_stable_kernel_old.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_M_actual - 1) // BLOCK_SIZE_M_actual + ddA_cumsum = ddA_cumsum[:, :, :, :n_valid_blocks].sum(dim=3) + return ddA_cumsum + + +def _chunk_scan_bwd_ddAcs_stable(x, dt, dA_cumsum, dout, cb): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dout.shape == x.shape + assert dA_cumsum.shape == dt.shape + ngroups = cb.shape[2] + assert nheads % ngroups == 0 + assert cb.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + BLOCK_SIZE_M_min = 32 + ddA_cumsum = torch.empty(batch, nheads, nchunks, triton.cdiv(chunk_size, BLOCK_SIZE_M_min), + chunk_size, device=x.device, dtype=torch.float32) + grid_ddtcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']), batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_bwd_ddAcs_stable_kernel[grid_ddtcs]( + x, dout, dt, dA_cumsum, cb, ddA_cumsum, + chunk_size, headdim, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + cb.stride(0), cb.stride(1), cb.stride(2), cb.stride(3), cb.stride(4), + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), ddA_cumsum.stride(4), + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + BLOCK_SIZE_M_actual = _chunk_scan_bwd_ddAcs_stable_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_M_actual - 1) // BLOCK_SIZE_M_actual + ddA_cumsum = ddA_cumsum[:, :, :, :n_valid_blocks].sum(dim=3) + return ddA_cumsum + + +def _chunk_scan_bwd_ddAcs_prev(prev_states, C, dout, dA_cumsum, seq_idx=None): + batch, nchunks, nheads, headdim, dstate = prev_states.shape + _, seqlen, _, _ = dout.shape + _, _, _, chunk_size = dA_cumsum.shape + assert prev_states.shape == (batch, nchunks, nheads, headdim, dstate) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert dout.shape == (batch, seqlen, nheads, headdim) + ngroups = C.shape[2] + assert nheads % ngroups == 0 + assert C.shape == (batch, seqlen, ngroups, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + ddA_cumsum_prev = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32) + grid_ddAcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(dout.device.index): + _chunk_scan_bwd_ddAcs_prev_kernel[grid_ddAcs]( + dout, prev_states, C, dA_cumsum, seq_idx, ddA_cumsum_prev, + chunk_size, dstate, headdim, + batch, seqlen, nchunks, nheads // ngroups, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + prev_states.stride(0), prev_states.stride(1), prev_states.stride(2), prev_states.stride(3), prev_states.stride(4), + C.stride(0), C.stride(1), C.stride(2), C.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + ddA_cumsum_prev.stride(0), ddA_cumsum_prev.stride(2), ddA_cumsum_prev.stride(1), ddA_cumsum_prev.stride(3), + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + return ddA_cumsum_prev + + +class ChunkScanFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, B, C, x, dt, dA_cumsum, prev_states, D=None, z=None): + # Check constraints. + batch, seqlen, nheads, headdim = x.shape + _, _, ngroups, dstate = B.shape + assert B.shape == (batch, seqlen, ngroups, dstate) + _, _, nchunks, chunk_size = dt.shape + assert seqlen == nchunks * chunk_size + assert C.shape == B.shape + if z is not None: + assert z.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + assert prev_states.shape == (batch, nchunks, nheads, headdim, dstate) + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if x.stride(-1) != 1 and x.stride(1) != 1: # Either M or K dimension should be contiguous + x = x.contiguous() + if z is not None and z.stride(-1) != 1 and z.stride(1) != 1: # Either M or K dimension should be contiguous + z = z.contiguous() + if D is not None and D.stride(-1) != 1: + D = D.contiguous() + CB = _bmm_chunk_fwd(C, B, chunk_size) + out, out_x = _chunk_scan_fwd(CB, x, dt, dA_cumsum, C, prev_states, D=D, z=z) + ctx.save_for_backward(out if z is None else out_x, B, C, CB, x, dt, dA_cumsum, prev_states, D, z) + return out + + @staticmethod + def backward(ctx, dout): + if dout.stride(-1) != 1: + dout = dout.contiguous() + out, B, C, CB, x, dt, dA_cumsum, prev_states, D, z = ctx.saved_tensors + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert dout.shape == (batch, seqlen, nheads, headdim) + if z is not None: + dz, dout, dD, ddA_cumsum = _chunk_scan_bwd_dz(x, z, out, dout, chunk_size=chunk_size, D=D) + else: + dz = None + dprev_states = _chunk_scan_bwd_dstates(C, dA_cumsum, dout, dtype=prev_states.dtype) + dC = _chunk_scan_bwd_dC(prev_states, dA_cumsum, dout, ngroups=ngroups) + dC = dC.to(C.dtype) + dCB = _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, ngroups=ngroups) + dCB = dCB.to(CB.dtype) + dB = _bmm_chunk_bwd(C, dCB) + dC = _bmm_chunk_bwd(B, rearrange(dCB, "... l s -> ... s l"), residual=dC) + dx, ddt = _chunk_scan_bwd_dx(CB, x, dt, dA_cumsum, dout, D=D) + # Formula for ddA_cumsum, assuming out is the output of the forward pass before adding x * D. + # ddA_cumsum = torch.einsum("bclhp,bclhp->bhcl", out.float(), dout.float()) - ddt * dt + if z is not None: + ddA_cumsum -= ddt * dt + else: # If z is not None, we already calculated ddA_cumsum and dD when computing dz + ddA_cumsum, dD = _chunk_scan_bwd_ddAcs_unstable(x, dt, out, dout, ddt, D=D) + ddA_cumsum = ddA_cumsum.to(dA_cumsum.dtype) + return dB, dC, dx, ddt, ddA_cumsum, dprev_states, dD, dz + + +def chunk_scan(B, C, x, dt, dA_cumsum, prev_states, D=None, z=None): + """ + prev_states contains the initial_states at index 0, and the state for the next-to-last chunk at index -1. + Argument: + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) + dA_cumsum: (batch, nheads, nchunks, chunk_size) + prev_states: (batch, nchunks, nheads, headdim, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + Return: + out: (batch, seqlen, nheads, headdim) + """ + return ChunkScanFn.apply(B, C, x, dt, dA_cumsum, prev_states, D, z) + + +def chunk_scan_ref(B, C, x, dt, dA_cumsum, prev_states, D=None, z=None): + """ + Argument: + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) + dA_cumsum: (batch, nheads, nchunks, chunk_size) + prev_states: (batch, nchunks, nheads, headdim, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + Return: + out: (batch, seqlen, nheads, headdim) + """ + batch, seqlen, nheads, headdim = x.shape + _, _, ngroups, dstate = B.shape + assert B.shape == (batch, seqlen, ngroups, dstate) + _, _, nchunks, chunk_size = dt.shape + assert seqlen == nchunks * chunk_size + assert C.shape == B.shape + B = repeat(B, "b l g d -> b l (g h) d", h=nheads // ngroups) + C = repeat(C, "b l g d -> b l (g h) d", h=nheads // ngroups) + CB = torch.einsum("bclhn,bcshn->bchls", rearrange(C, "b (c l) h n -> b c l h n", c=nchunks), + rearrange(B, "b (c s) h n -> b c s h n", c=nchunks)) + # (batch, nheads, nchunks, chunksize, chunksize) + dt_segment_sum = dA_cumsum[:, :, :, :, None] - dA_cumsum[:, :, :, None, :] + decay = torch.exp(dt_segment_sum) + scores_decay = CB * rearrange(decay, "b h c l s -> b c h l s") + causal_mask = torch.tril(torch.ones(chunk_size, chunk_size, device=x.device, dtype=bool), diagonal=0) + scores_decay = scores_decay.masked_fill(~causal_mask, 0) + out = torch.einsum('bchls,bhcs,bcshp->bclhp', scores_decay.to(x.dtype), dt.to(x.dtype), + rearrange(x, "b (c s) h p -> b c s h p", c=nchunks)) + state_decay_out = torch.exp(rearrange(dA_cumsum, "b h c l -> b c l h 1")) + out_prev = torch.einsum('bclhn,bchpn->bclhp', rearrange(C, "b (c l) h n -> b c l h n", c=nchunks), + prev_states.to(C.dtype)) * state_decay_out + out = out + out_prev + out = rearrange(out, "b c l h p -> b (c l) h p") + if D is not None: + if D.dim() == 1: + D = rearrange(D, "h -> h 1") + out = out + x * D + return out if z is None else out * F.silu(z) diff --git a/mamba/mamba_ssm/ops/triton/ssd_chunk_state.py b/mamba/mamba_ssm/ops/triton/ssd_chunk_state.py new file mode 100644 index 0000000000000000000000000000000000000000..c4971c5f5a6fdc0fca92e115ae2c0b9319b8107a --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/ssd_chunk_state.py @@ -0,0 +1,988 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.softplus import softplus + + +def init_to_zero(names): + return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None] + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_H': 1}), + triton.Config({'BLOCK_SIZE_H': 2}), + triton.Config({'BLOCK_SIZE_H': 4}), + triton.Config({'BLOCK_SIZE_H': 8}), + triton.Config({'BLOCK_SIZE_H': 16}), + triton.Config({'BLOCK_SIZE_H': 32}), + triton.Config({'BLOCK_SIZE_H': 64}), + ], + key=['chunk_size', 'nheads'], +) +@triton.jit +def _chunk_cumsum_fwd_kernel( + # Pointers to matrices + dt_ptr, A_ptr, dt_bias_ptr, dt_out_ptr, dA_cumsum_ptr, + # Matrix dimension + batch, seqlen, nheads, chunk_size, + dt_min, dt_max, + # Strides + stride_dt_batch, stride_dt_seqlen, stride_dt_head, + stride_A_head, + stride_dt_bias_head, + stride_dt_out_batch, stride_dt_out_chunk, stride_dt_out_head, stride_dt_out_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + BLOCK_SIZE_H: tl.constexpr, BLOCK_SIZE_CHUNK: tl.constexpr, +): + pid_b = tl.program_id(axis=0) + pid_c = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + dt_ptr += pid_b * stride_dt_batch + pid_c * chunk_size * stride_dt_seqlen + dt_out_ptr += pid_b * stride_dt_out_batch + pid_c * stride_dt_out_chunk + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + + offs_h = pid_h * BLOCK_SIZE_H + tl.arange(0, BLOCK_SIZE_H) + offs_c = tl.arange(0, BLOCK_SIZE_CHUNK) + dt_ptrs = dt_ptr + (offs_h[:, None] * stride_dt_head + offs_c[None, :] * stride_dt_seqlen) + A_ptrs = A_ptr + offs_h * stride_A_head + dt_out_ptrs = dt_out_ptr + (offs_h[:, None] * stride_dt_out_head + offs_c[None, :] * stride_dt_out_csize) + dA_cs_ptrs = dA_cumsum_ptr + (offs_h[:, None] * stride_dA_cs_head + offs_c[None, :] * stride_dA_cs_csize) + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + dt = tl.load(dt_ptrs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt_bias = tl.load(dt_bias_ptr + offs_h * stride_dt_bias_head, mask=offs_h < nheads, other=0.0).to(tl.float32) + dt += dt_bias[:, None] + if DT_SOFTPLUS: + dt = softplus(dt) + # As of Triton 2.2.0, tl.clamp is not available yet + # dt = tl.clamp(dt, dt_min, dt_max) + dt = tl.minimum(tl.maximum(dt, dt_min), dt_max) + dt = tl.where((offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), dt, 0.0) + tl.store(dt_out_ptrs, dt, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size)) + A = tl.load(A_ptrs, mask=offs_h < nheads, other=0.0).to(tl.float32) + dA = dt * A[:, None] + dA_cs = tl.cumsum(dA, axis=1) + tl.store(dA_cs_ptrs, dA_cs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_H': 1}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 2}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 4}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 8}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 16}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 32}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + triton.Config({'BLOCK_SIZE_H': 64}, pre_hook=init_to_zero(["dA_ptr", "ddt_bias_ptr"])), + ], + key=['chunk_size', 'nheads'], +) +@triton.jit +def _chunk_cumsum_bwd_kernel( + # Pointers to matrices + ddA_ptr, ddt_out_ptr, dt_ptr, A_ptr, dt_bias_ptr, + ddt_ptr, dA_ptr, ddt_bias_ptr, + # Matrix dimensions + batch, seqlen, nheads, chunk_size, + dt_min, dt_max, + # Strides + stride_ddA_batch, stride_ddA_chunk, stride_ddA_head, stride_ddA_csize, + stride_ddt_out_batch, stride_ddt_out_chunk, stride_ddt_out_head, stride_ddt_out_csize, + stride_dt_batch, stride_dt_seqlen, stride_dt_head, + stride_A_head, + stride_dt_bias_head, + stride_ddt_batch, stride_ddt_seqlen, stride_ddt_head, + stride_dA_head, + stride_ddt_bias_head, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + BLOCK_SIZE_H: tl.constexpr, BLOCK_SIZE_CHUNK: tl.constexpr, +): + pid_b = tl.program_id(axis=0) + pid_c = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + ddt_out_ptr += pid_b * stride_ddt_out_batch + pid_c * stride_ddt_out_chunk + ddA_ptr += pid_b * stride_ddA_batch + pid_c * stride_ddA_chunk + dt_ptr += pid_b * stride_dt_batch + pid_c * chunk_size * stride_dt_seqlen + ddt_ptr += pid_b * stride_ddt_batch + pid_c * chunk_size * stride_ddt_seqlen + + offs_h = pid_h * BLOCK_SIZE_H + tl.arange(0, BLOCK_SIZE_H) + offs_c = tl.arange(0, BLOCK_SIZE_CHUNK) + ddt_out_ptrs = ddt_out_ptr + (offs_h[:, None] * stride_ddt_out_head + offs_c[None, :] * stride_ddt_out_csize) + ddA_ptrs = ddA_ptr + (offs_h[:, None] * stride_ddA_head + offs_c[None, :] * stride_ddA_csize) + dt_ptrs = dt_ptr + (offs_h[:, None] * stride_dt_head + offs_c[None, :] * stride_dt_seqlen) + ddt_ptrs = ddt_ptr + (offs_h[:, None] * stride_ddt_head + offs_c[None, :] * stride_ddt_seqlen) + A_ptrs = A_ptr + offs_h * stride_A_head + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + ddA = tl.load(ddA_ptrs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), other=0.0).to(tl.float32) + ddt_out = tl.load(ddt_out_ptrs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), other=0.0).to(tl.float32) + A = tl.load(A_ptrs, mask=offs_h < nheads, other=0.0).to(tl.float32) + ddt = ddA * A[:, None] + ddt_out + dt = tl.load(dt_ptrs, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt_bias = tl.load(dt_bias_ptr + offs_h * stride_dt_bias_head, mask=offs_h < nheads, other=0.0).to(tl.float32) + dt += dt_bias[:, None] + if DT_SOFTPLUS: + dt_presoftplus = dt + dt = softplus(dt) + clamp_mask = (dt < dt_min) | (dt > dt_max) + # As of Triton 2.2.0, tl.clamp is not available yet + # dt = tl.clamp(dt, dt_min, dt_max) + dt = tl.minimum(tl.maximum(dt, dt_min), dt_max) + dt = tl.where((offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), dt, 0.0) + ddt = tl.where((offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit), ddt, 0.0) + ddt = tl.where(clamp_mask, 0.0, ddt) + if DT_SOFTPLUS: + ddt = tl.where(dt_presoftplus <= 20.0, ddt * tl.sigmoid(dt_presoftplus), ddt) + tl.store(ddt_ptrs, ddt, mask=(offs_h[:, None] < nheads) & (offs_c[None, :] < chunk_size_limit)) + dA = tl.sum(ddA * dt, axis=1) + tl.atomic_add(dA_ptr + offs_h * stride_dA_head, dA, mask=offs_h < nheads) + if HAS_DT_BIAS: + ddt_bias = tl.sum(ddt, axis=1) + tl.atomic_add(ddt_bias_ptr + offs_h * stride_ddt_bias_head, ddt_bias, mask=offs_h < nheads) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['hdim', 'dstate', 'chunk_size'], +) +@triton.jit +def _chunk_state_fwd_kernel( + # Pointers to matrices + x_ptr, b_ptr, states_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, + # Matrix dimensions + hdim, dstate, chunk_size, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_states_batch, stride_states_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + # Meta-parameters + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_hdim + offs_k[None, :] * stride_x_seqlen) + b_ptrs = b_ptr + (offs_n[None, :] * stride_b_dstate + offs_k[:, None] * stride_b_seqlen) + dt_ptrs = dt_ptr + offs_k * stride_dt_csize + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + if HAS_SEQ_IDX: + seq_idx_ptrs = seq_idx_ptr + offs_k * stride_seq_idx_seqlen + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + if HAS_SEQ_IDX: + seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, chunk_size_limit, BLOCK_SIZE_K): + x = tl.load(x_ptrs, mask=(offs_m[:, None] < hdim) & (offs_k[None, :] < chunk_size_limit - k), other=0.0) + b = tl.load(b_ptrs, mask=(offs_k[:, None] < chunk_size_limit - k) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < chunk_size_limit - k, other=0.0).to(tl.float32) + if HAS_SEQ_IDX: + seq_idx_k = tl.load(seq_idx_ptrs, mask=offs_k < chunk_size_limit - k, other=-1) + dt_k = tl.load(dt_ptrs, mask=offs_k < chunk_size_limit - k, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp((dA_cs_last - dA_cs_k)) * dt_k + else: + scale = tl.where(seq_idx_k == seq_idx_last, tl.exp((dA_cs_last - dA_cs_k)) * dt_k, 0.0) + b *= scale[:, None] + b = b.to(x_ptr.dtype.element_ty) + acc += tl.dot(x, b) + x_ptrs += BLOCK_SIZE_K * stride_x_seqlen + b_ptrs += BLOCK_SIZE_K * stride_b_seqlen + dt_ptrs += BLOCK_SIZE_K * stride_dt_csize + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + if HAS_SEQ_IDX: + seq_idx_ptrs += BLOCK_SIZE_K * stride_seq_idx_seqlen + states = acc.to(states_ptr.dtype.element_ty) + + states_ptr += pid_b * stride_states_batch + pid_c * stride_states_chunk + pid_h * stride_states_head + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + states_ptrs = states_ptr + (offs_m[:, None] * stride_states_hdim + offs_n[None, :] * stride_states_dstate) + c_mask = (offs_m[:, None] < hdim) & (offs_n[None, :] < dstate) + tl.store(states_ptrs, states, mask=c_mask) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr", "ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'hdim', 'dstate'], +) +@triton.jit +def _chunk_state_bwd_dx_kernel( + # Pointers to matrices + x_ptr, b_ptr, dstates_ptr, dt_ptr, dA_cumsum_ptr, + dx_ptr, ddt_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dstates_batch, stride_dstates_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_dx_batch, stride_dx_seqlen, stride_dx_head, stride_dx_hdim, + stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + pid_h * stride_states_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + # Faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128 + offs_k = tl.arange(0, BLOCK_SIZE_DSTATE if BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K) + b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_k[None, :] * stride_b_dstate) + dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_states_hdim + offs_k[:, None] * stride_states_dstate) + if BLOCK_SIZE_DSTATE <= 128: + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < dstate), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc = tl.dot(b, dstates) + else: + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, dstate, BLOCK_SIZE_K): + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < dstate - k), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc += tl.dot(b, dstates) + b_ptrs += BLOCK_SIZE_K * stride_b_dstate + dstates_ptrs += BLOCK_SIZE_K * stride_states_dstate + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dA_cumsum_ptrs = dA_cumsum_ptr + offs_m * stride_dA_cs_csize + dA_cs_m = tl.load(dA_cumsum_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + acc *= tl.exp(dA_cs_last - dA_cs_m)[:, None] + + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + ddt = tl.sum(acc * x, axis=1) + ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize + tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size) + ddA_cs = -(ddt * dt_m) + ddA_cs_last = -tl.sum(ddA_cs) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + tl.atomic_add(ddA_cumsum_ptrs, ddA_cs, mask=offs_m < chunk_size) + tl.atomic_add(ddA_cumsum_ptr + (chunk_size - 1) * stride_ddA_cs_csize, ddA_cs_last) + + dx = (acc * dt_m[:, None]).to(dx_ptr.dtype.element_ty) + dx_ptr += pid_b * stride_dx_batch + pid_c * chunk_size * stride_dx_seqlen + pid_h * stride_dx_head + dx_ptrs = dx_ptr + (offs_m[:, None] * stride_dx_seqlen + offs_n[None, :] * stride_dx_hdim) + tl.store(dx_ptrs, dx, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'dstate', 'hdim'], +) +@triton.jit +def _chunk_state_bwd_db_kernel( + # Pointers to matrices + x_ptr, dstates_ptr, b_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, + db_ptr, ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, dstate, hdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_dstates_batch, stride_dstates_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_db_batch, stride_db_seqlen, stride_db_split, stride_db_group, stride_db_dstate, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_DDA_CS: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_sg = tl.program_id(axis=2) + pid_s = pid_sg // ngroups + pid_g = pid_sg - pid_s * ngroups + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_x_head + db_ptr += pid_b * stride_db_batch + pid_c * chunk_size * stride_db_seqlen + pid_g * stride_db_group + pid_s * stride_db_split + dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_states_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_dA_cs_head + if HAS_DDA_CS: + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + pid_g * stride_b_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + (pid_g * (nheads // ngroups) + pid_s * nheads_per_program) * stride_ddA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_k[None, :] * stride_x_hdim) + dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_states_dstate + offs_k[:, None] * stride_states_hdim) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dA_cumsum_ptrs = dA_cumsum_ptr + offs_m * stride_dA_cs_csize + if HAS_DDA_CS: + b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_n[None, :] * stride_b_dstate) + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + if HAS_DDA_CS: + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + if HAS_SEQ_IDX: + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + nheads_iter = min(nheads_per_program, nheads // ngroups - pid_s * nheads_per_program) + for h in range(nheads_iter): + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < hdim), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < hdim) & (offs_n[None, :] < dstate), other=0.0) + dstates = dstates.to(x_ptrs.dtype.element_ty) + db = tl.dot(x, dstates) + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + dA_cs_m = tl.load(dA_cumsum_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_last - dA_cs_m) + else: + scale = tl.where(seq_idx_m == seq_idx_last, tl.exp(dA_cs_last - dA_cs_m), 0.0) + db *= (scale * dt_m)[:, None] + if HAS_DDA_CS: + # This is the gradient wrt (dA_cs_last - dA_cs_m), i.e. the exclusive reverse cumsum + ddA_cs = tl.sum(db * b, axis=1) + tl.atomic_add(ddA_cumsum_ptrs + stride_ddA_cs_csize, ddA_cs, mask=offs_m < chunk_size - 1) + acc += db + x_ptrs += stride_x_head + dstates_ptrs += stride_states_head + dt_ptrs += stride_dt_head + dA_cumsum_ptr += stride_dA_cs_head + dA_cumsum_ptrs += stride_dA_cs_head + if HAS_DDA_CS: + ddA_cumsum_ptrs += stride_ddA_cs_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + # if HAS_SEQ_IDX: + # seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + # seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + # acc = tl.where(seq_idx_m[:, None] == seq_idx_last, acc, 0.0) + db_ptrs = db_ptr + (offs_m[:, None] * stride_db_seqlen + offs_n[None, :] * stride_db_dstate) + tl.store(db_ptrs, acc, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < dstate)) + + +@triton.autotune( + configs=[ + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + # triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=8, pre_hook=init_to_zero(["ddA_cumsum_ptr"])), + ], + key=['chunk_size', 'hdim', 'dstate'], +) +@triton.jit +def _chunk_state_bwd_ddAcs_stable_kernel( + # Pointers to matrices + x_ptr, b_ptr, dstates_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, + ddA_cumsum_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dstates_batch, stride_dstates_chunk, stride_states_head, stride_states_hdim, stride_states_dstate, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, stride_ddA_cs_csize, + # Meta-parameters + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + pid_h * stride_states_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddA_cumsum_ptr += pid_b * stride_ddA_cs_batch + pid_c * stride_ddA_cs_chunk + pid_h * stride_ddA_cs_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + # Faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128 + offs_k = tl.arange(0, BLOCK_SIZE_DSTATE if BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K) + b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_k[None, :] * stride_b_dstate) + dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_states_hdim + offs_k[:, None] * stride_states_dstate) + if BLOCK_SIZE_DSTATE <= 128: + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < dstate), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc = tl.dot(b, dstates) + else: + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, dstate, BLOCK_SIZE_K): + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_k[None, :] < dstate - k), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_k[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc += tl.dot(b, dstates) + b_ptrs += BLOCK_SIZE_K * stride_b_dstate + dstates_ptrs += BLOCK_SIZE_K * stride_states_dstate + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_last - dA_cs_m) + else: + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + scale = tl.where(seq_idx_m == seq_idx_last, tl.exp(dA_cs_last - dA_cs_m), 0.0) + acc *= scale[:, None] + + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size, other=0.0).to(tl.float32) + ddt = tl.sum(acc * x, axis=1) + # ddA_cs = -(ddt * dt_m) + # Triton 2.2.0 errors if we have the cumsum here, so we just write it out + # then call torch.cumsum outside this kernel. + # ddA_cs = tl.cumsum(ddt * dt_m) + ddA_cs = ddt * dt_m + ddA_cumsum_ptrs = ddA_cumsum_ptr + offs_m * stride_ddA_cs_csize + # tl.atomic_add(ddA_cumsum_ptrs, ddA_cs, mask=offs_m < chunk_size) + tl.atomic_add(ddA_cumsum_ptrs + stride_ddA_cs_csize, ddA_cs, mask=offs_m < chunk_size - 1) + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=2), + ], + key=['hdim', 'dstate', 'chunk_size'], +) +@triton.jit +def _chunk_state_varlen_kernel( + # Pointers to matrices + x_ptr, b_ptr, dt_ptr, dA_cumsum_ptr, chunk_states_ptr, cu_seqlens_ptr, states_ptr, + # Matrix dimensions + hdim, dstate, chunk_size, + seqlen, nheads_ngroups_ratio, + # Strides + stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_chunk_states_chunk, stride_chunk_states_head, stride_chunk_states_hdim, stride_chunk_states_dstate, + stride_states_batch, stride_states_head, stride_states_hdim, stride_states_dstate, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(dstate, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + end_idx = tl.load(cu_seqlens_ptr + pid_b + 1) + pid_c = (end_idx - 1) // chunk_size + b_ptr += pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + x_ptr += pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + dt_ptr += pid_c * stride_dt_chunk + pid_h * stride_dt_head + dA_cumsum_ptr += pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + chunk_states_ptr += pid_c * stride_chunk_states_chunk + pid_h * stride_chunk_states_head + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_hdim + offs_k[None, :] * stride_x_seqlen) + b_ptrs = b_ptr + (offs_n[None, :] * stride_b_dstate + offs_k[:, None] * stride_b_seqlen) + dt_ptrs = dt_ptr + offs_k * stride_dt_csize + dA_cs_last = tl.load(dA_cumsum_ptr + (end_idx - pid_c * chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + + chunk_size_limit = end_idx - pid_c * chunk_size + start_idx = tl.load(cu_seqlens_ptr + pid_b) + start_idx_cur = tl.maximum(start_idx - pid_c * chunk_size, 0) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, chunk_size_limit, BLOCK_SIZE_K): + x = tl.load(x_ptrs, mask=(offs_m[:, None] < hdim) & (offs_k[None, :] < chunk_size_limit - k) & (offs_k[None, :] >= start_idx_cur - k), other=0.0) + b = tl.load(b_ptrs, mask=(offs_k[:, None] < chunk_size_limit - k) & (offs_n[None, :] < dstate) & (offs_k[:, None] >= start_idx_cur - k), other=0.0).to(tl.float32) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < chunk_size_limit - k, other=0.0).to(tl.float32) + dt_k = tl.load(dt_ptrs, mask=offs_k < chunk_size_limit - k, other=0.0).to(tl.float32) + scale = tl.where((offs_k >= start_idx_cur - k) & (offs_k < chunk_size_limit - k), + tl.exp((dA_cs_last - dA_cs_k)) * dt_k, 0.0) + b *= scale[:, None] + b = b.to(x_ptr.dtype.element_ty) + acc += tl.dot(x, b) + x_ptrs += BLOCK_SIZE_K * stride_x_seqlen + b_ptrs += BLOCK_SIZE_K * stride_b_seqlen + dt_ptrs += BLOCK_SIZE_K * stride_dt_csize + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + + # If the sequence starts after the last chunk idx, we don't need to add the contribution from the last chunk + if start_idx < pid_c * chunk_size: + chunk_states_ptrs = chunk_states_ptr + (offs_m[:, None] * stride_chunk_states_hdim + offs_n[None, :] * stride_chunk_states_dstate) + chunk_states = tl.load(chunk_states_ptrs, mask=(offs_m[:, None] < hdim) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + # scale = tl.where(start_idx < pid_c * chunk_size, tl.exp(dA_cs_last), 0.0) + scale = tl.exp(dA_cs_last) + acc += chunk_states * scale + + states = acc.to(states_ptr.dtype.element_ty) + + states_ptr += pid_b * stride_states_batch + pid_h * stride_states_head + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + states_ptrs = states_ptr + (offs_m[:, None] * stride_states_hdim + offs_n[None, :] * stride_states_dstate) + c_mask = (offs_m[:, None] < hdim) & (offs_n[None, :] < dstate) + tl.store(states_ptrs, states, mask=c_mask) + + +def _chunk_cumsum_fwd(dt, A, chunk_size, dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf"))): + batch, seqlen, nheads = dt.shape + assert A.shape == (nheads,) + if dt_bias is not None: + assert dt_bias.shape == (nheads,) + nchunks = math.ceil(seqlen / chunk_size) + dt_out = torch.empty(batch, nheads, nchunks, chunk_size, device=dt.device, dtype=torch.float32) + dA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=dt.device, dtype=torch.float32) + grid_chunk_cs = lambda META: (batch, nchunks, triton.cdiv(nheads, META['BLOCK_SIZE_H'])) + with torch.cuda.device(dt.device.index): + _chunk_cumsum_fwd_kernel[grid_chunk_cs]( + dt, A, dt_bias, dt_out, dA_cumsum, + batch, seqlen, nheads, chunk_size, + dt_limit[0], dt_limit[1], + dt.stride(0), dt.stride(1), dt.stride(2), + A.stride(0), + dt_bias.stride(0) if dt_bias is not None else 0, + dt_out.stride(0), dt_out.stride(2), dt_out.stride(1), dt_out.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + dt_softplus, + HAS_DT_BIAS=dt_bias is not None, + BLOCK_SIZE_CHUNK=triton.next_power_of_2(chunk_size), + ) + return dA_cumsum, dt_out + + +def _chunk_cumsum_bwd(ddA, ddt_out, dt, A, dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf")), ddt=None): + batch, seqlen, nheads = dt.shape + _, _, nchunks, chunk_size = ddA.shape + assert ddA.shape == (batch, nheads, nchunks, chunk_size) + assert ddt_out.shape == (batch, nheads, nchunks, chunk_size) + assert A.shape == (nheads,) + if dt_bias is not None: + assert dt_bias.shape == (nheads,) + ddt_bias = torch.empty_like(dt_bias, dtype=torch.float32) + else: + ddt_bias = None + if ddt is not None: + assert ddt.shape == dt.shape + else: + ddt = torch.empty_like(dt) + dA = torch.empty_like(A, dtype=torch.float32) + grid_chunk_cs = lambda META: (batch, nchunks, triton.cdiv(nheads, META['BLOCK_SIZE_H'])) + with torch.cuda.device(dt.device.index): + _chunk_cumsum_bwd_kernel[grid_chunk_cs]( + ddA, ddt_out, dt, A, dt_bias, ddt, dA, ddt_bias, + batch, seqlen, nheads, chunk_size, + dt_limit[0], dt_limit[1], + ddA.stride(0), ddA.stride(2), ddA.stride(1), ddA.stride(3), + ddt_out.stride(0), ddt_out.stride(2), ddt_out.stride(1), ddt_out.stride(3), + dt.stride(0), dt.stride(1), dt.stride(2), + A.stride(0), + dt_bias.stride(0) if dt_bias is not None else 0, + ddt.stride(0), ddt.stride(1), ddt.stride(2), + dA.stride(0), + ddt_bias.stride(0) if ddt_bias is not None else 0, + dt_softplus, + HAS_DT_BIAS=dt_bias is not None, + BLOCK_SIZE_CHUNK=triton.next_power_of_2(chunk_size), + ) + return ddt, dA, ddt_bias + + +def _chunk_state_fwd(B, x, dt, dA_cumsum, seq_idx=None, states=None, states_in_fp32=True): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if states is not None: + assert states.shape == (batch, nchunks, nheads, headdim, dstate) + else: + states_dtype = torch.float32 if states_in_fp32 else B.dtype + states = torch.empty((batch, nchunks, nheads, headdim, dstate), device=x.device, dtype=states_dtype) + grid = lambda META: (triton.cdiv(headdim, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_state_fwd_kernel[grid]( + x, B, states, dt, dA_cumsum, seq_idx, + headdim, dstate, chunk_size, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + B.stride(0), B.stride(1), B.stride(2), B.stride(-1), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), states.stride(4), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + HAS_SEQ_IDX=seq_idx is not None, + ) + return states + + +def _chunk_state_bwd_dx(B, x, dt, dA_cumsum, dstates, dx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if dx is not None: + assert dx.shape == x.shape + else: + dx = torch.empty_like(x) + ddt = torch.empty(batch, nheads, nchunks, chunk_size, device=dt.device, dtype=torch.float32) + ddA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=dA_cumsum.device, dtype=torch.float32) + grid_dx = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_state_bwd_dx_kernel[grid_dx]( + x, B, dstates, dt, dA_cumsum, dx, ddt, ddA_cumsum, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + B.stride(0), B.stride(1), B.stride(2), B.stride(-1), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + dx.stride(0), dx.stride(1), dx.stride(2), dx.stride(3), + ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3), + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + ) + return dx, ddt.to(dt.dtype), ddA_cumsum.to(dA_cumsum.dtype) + + +def _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, seq_idx=None, B=None, ngroups=1): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + dstate = dstates.shape[-1] + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if B is not None: + assert B.shape == (batch, seqlen, ngroups, dstate) + B_strides = (B.stride(0), B.stride(1), B.stride(2), B.stride(3)) + # Use torch.empty since the Triton kernel will call init_to_zero + ddA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=x.device, dtype=torch.float32) + ddA_cumsum_strides = (ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3)) + else: + B_strides = (0, 0, 0, 0) + ddA_cumsum = None + ddA_cumsum_strides = (0, 0, 0, 0) + nheads_ngroups_ratio = nheads // ngroups + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + nheads_per_program = max(min(math.ceil(batch * nchunks * nheads / sm_count), nheads_ngroups_ratio), 1) + nsplits = triton.cdiv(nheads_ngroups_ratio, nheads_per_program) + dB = torch.empty(batch, seqlen, nsplits, ngroups, dstate, device=x.device, dtype=torch.float32) + grid_db = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch * nchunks, nsplits * ngroups) + with torch.cuda.device(x.device.index): + _chunk_state_bwd_db_kernel[grid_db]( + x, dstates, B, dt, dA_cumsum, seq_idx, dB, ddA_cumsum, + chunk_size, dstate, headdim, + batch, seqlen, nheads, nheads_per_program, ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4), + *B_strides, + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + dB.stride(0), dB.stride(1), dB.stride(2), dB.stride(3), dB.stride(4), + *ddA_cumsum_strides, + HAS_DDA_CS=ddA_cumsum is not None, + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_K=max(triton.next_power_of_2(headdim), 16), + ) + dB = dB.sum(2) + if ddA_cumsum is not None: + # The first element of ddA_cumsum is always zero, since that dA_cumsum does not contribute + # to the state of the chunk. + # torch.cumsum(ddA_cumsum[..., 1:], dim=-1, out=ddA_cumsum[..., 1:]) + # But it's easier to just do the cumsum for all elements, the result will be the same. + torch.cumsum(ddA_cumsum, dim=-1, out=ddA_cumsum) + return dB if B is None else (dB, ddA_cumsum) + + +def _chunk_state_bwd_ddAcs_stable(B, x, dt, dA_cumsum, dstates, seq_idx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + # Use torch.empty since the Triton kernel will call init_to_zero + ddA_cumsum = torch.empty(batch, nheads, nchunks, chunk_size, device=x.device, dtype=torch.float32) + grid_ddtcs = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_state_bwd_ddAcs_stable_kernel[grid_ddtcs]( + x, B, dstates, dt, dA_cumsum, seq_idx, ddA_cumsum, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + B.stride(0), B.stride(1), B.stride(2), B.stride(-1), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + ddA_cumsum.stride(0), ddA_cumsum.stride(2), ddA_cumsum.stride(1), ddA_cumsum.stride(3), + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_M=max(triton.next_power_of_2(chunk_size), 16), + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + ) + torch.cumsum(ddA_cumsum[..., 1:], dim=-1, out=ddA_cumsum[..., 1:]) + return ddA_cumsum + + +def chunk_state_varlen(B, x, dt, dA_cumsum, cu_seqlens, chunk_states): + total_seqlen, nheads, headdim = x.shape + _, nchunks, chunk_size = dt.shape + _, ngroups, dstate = B.shape + batch = cu_seqlens.shape[0] - 1 + cu_seqlens = cu_seqlens.contiguous() + assert nheads % ngroups == 0 + assert B.shape == (total_seqlen, ngroups, dstate) + assert dt.shape == (nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert chunk_states.shape == (nchunks, nheads, headdim, dstate) + states = torch.empty(batch, nheads, headdim, dstate, dtype=chunk_states.dtype, device=chunk_states.device) + grid = lambda META: (triton.cdiv(headdim, META['BLOCK_SIZE_M']) * triton.cdiv(dstate, META['BLOCK_SIZE_N']), + batch, nheads) + with torch.cuda.device(x.device.index): + _chunk_state_varlen_kernel[grid]( + x, B, dt, dA_cumsum, chunk_states, cu_seqlens, states, + headdim, dstate, chunk_size, + total_seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), + B.stride(0), B.stride(1), B.stride(2), + dt.stride(1), dt.stride(0), dt.stride(2), + dA_cumsum.stride(1), dA_cumsum.stride(0), dA_cumsum.stride(2), + chunk_states.stride(0), chunk_states.stride(1), chunk_states.stride(2), chunk_states.stride(3), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), + ) + return states + + +class ChunkStateFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, B, x, dt, dA_cumsum, states_in_fp32=True): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + assert seqlen <= nchunks * chunk_size + _, _, ngroups, dstate = B.shape + assert B.shape == (batch, seqlen, ngroups, dstate) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + if B.stride(-1) != 1: + B = B.contiguous() + if x.stride(-1) != 1 and x.stride(1) != 1: # Either M or K dimension should be contiguous + x = x.contiguous() + states = _chunk_state_fwd(B, x, dt, dA_cumsum, states_in_fp32=states_in_fp32) + ctx.save_for_backward(B, x, dt, dA_cumsum) + return states + + @staticmethod + def backward(ctx, dstates): + B, x, dt, dA_cumsum = ctx.saved_tensors + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if dstates.stride(-1) != 1: + dstates = dstates.contiguous() + dx, ddt, ddA_cumsum = _chunk_state_bwd_dx(B, x, dt, dA_cumsum, dstates) + dB = _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, ngroups=ngroups) + dB = dB.to(B.dtype) + return dB, dx, ddt, ddA_cumsum, None + + +def chunk_state(B, x, dt, dA_cumsum, states_in_fp32=True): + """ + Argument: + B: (batch, seqlen, ngroups, headdim) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) + dA_cumsum: (batch, nheads, nchunks, chunk_size) + Return: + states: (batch, nchunks, nheads, headdim, dstate) + """ + return ChunkStateFn.apply(B, x, dt, dA_cumsum, states_in_fp32) + + +def chunk_state_ref(B, x, dt, dA_cumsum): + """ + Argument: + B: (batch, seqlen, ngroups, headdim) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) + dA_cumsum: (batch, nheads, nchunks, chunk_size) + Return: + states: (batch, nchunks, nheads, headdim, dstate) + """ + # Check constraints. + batch, seqlen, nheads, headdim = x.shape + dstate = B.shape[-1] + _, _, nchunks, chunk_size = dt.shape + assert seqlen <= nchunks * chunk_size + assert x.shape == (batch, seqlen, nheads, headdim) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + ngroups = B.shape[2] + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + B = repeat(B, "b l g d -> b l (g h) d", h=nheads // ngroups) + assert dA_cumsum.shape == (batch, nheads, nchunks, chunk_size) + if seqlen < nchunks * chunk_size: + x = F.pad(x, (0, 0, 0, 0, 0, nchunks * chunk_size - seqlen)) + B = F.pad(B, (0, 0, 0, 0, 0, nchunks * chunk_size - seqlen)) + x = rearrange(x, "b (c l) h p -> b c l h p", l=chunk_size) + B = rearrange(B, "b (c l) ... -> b c l ...", l=chunk_size) + decay_states = torch.exp((dA_cumsum[:, :, :, -1:] - dA_cumsum)) + return torch.einsum("bclhn,bhcl,bhcl,bclhp->bchpn", B.to(x.dtype), decay_states.to(x.dtype), dt.to(x.dtype), x) diff --git a/mamba/mamba_ssm/ops/triton/ssd_combined.py b/mamba/mamba_ssm/ops/triton/ssd_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..77d207152cdd2d9f18f5d6be6824cc0ef4637fdd --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/ssd_combined.py @@ -0,0 +1,981 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +from typing import Optional + +import math +from packaging import version + +import torch +import torch.nn.functional as F +from torch import Tensor +from torch.cuda.amp import custom_bwd, custom_fwd + +import triton +import triton.language as tl + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn + import causal_conv1d_cuda +except ImportError: + causal_conv1d_fn, causal_conv1d_cuda = None, None + +from mamba_ssm.ops.triton.ssd_bmm import _bmm_chunk_fwd, _bmm_chunk_bwd +from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_cumsum_fwd, _chunk_cumsum_bwd +from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_state_fwd, _chunk_state_bwd_db +from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_state_bwd_ddAcs_stable +from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state, chunk_state_ref +from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state_varlen +from mamba_ssm.ops.triton.ssd_state_passing import _state_passing_fwd, _state_passing_bwd +from mamba_ssm.ops.triton.ssd_state_passing import state_passing, state_passing_ref +from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_fwd, _chunk_scan_bwd_dz, _chunk_scan_bwd_dstates +from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_dC, _chunk_scan_bwd_dcb +from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_ddAcs_stable +from mamba_ssm.ops.triton.ssd_chunk_scan import chunk_scan, chunk_scan_ref +from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_ddAcs_prev +from mamba_ssm.ops.triton.layernorm_gated import rmsnorm_fn, _layer_norm_fwd, _layer_norm_bwd +from mamba_ssm.ops.triton.k_activations import _swiglu_fwd, _swiglu_bwd + +TRITON_22 = version.parse(triton.__version__) >= version.parse('2.2.0') + + +def init_to_zero(names): + return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None] + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])), + ], + key=['chunk_size', 'hdim', 'dstate'], +) +@triton.jit +def _chunk_scan_chunk_state_bwd_dx_kernel( + # Pointers to matrices + x_ptr, cb_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, D_ptr, + b_ptr, dstates_ptr, + dx_ptr, ddt_ptr, dD_ptr, + # Matrix dimensions + chunk_size, hdim, dstate, + batch, seqlen, nheads_ngroups_ratio, + # Strides + stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim, + stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k, + stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim, + stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_D_head, + stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate, + stride_dstates_batch, stride_dstates_chunk, stride_dstates_head, stride_dstates_hdim, stride_dstates_dstate, + stride_dx_batch, stride_dx_seqlen, stride_dx_head, stride_dx_hdim, + stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize, + stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_csize, stride_dD_hdim, + # Meta-parameters + HAS_D: tl.constexpr, + D_HAS_HDIM: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, + IS_TRITON_22: tl.constexpr, +): + pid_bc = tl.program_id(axis=1) + pid_c = pid_bc // batch + pid_b = pid_bc - pid_c * batch + pid_h = tl.program_id(axis=2) + num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N) + pid_m = tl.program_id(axis=0) // num_pid_n + pid_n = tl.program_id(axis=0) % num_pid_n + x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head + cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head + dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head + dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head + ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head + dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head + b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head + dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + pid_h * stride_dstates_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + + chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size) + + acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + + dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32) + if not HAS_SEQ_IDX: + scale = tl.exp(dA_cs_last - dA_cs_m) + else: + seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1) + seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen) + scale = tl.where(seq_idx_m == seq_idx_last, tl.exp(dA_cs_last - dA_cs_m), 0.0) + # Might be faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128 + # However, we're getting error with the Triton compiler 2.1.0 for that code path: + # Unexpected mma -> mma layout conversion + # Triton 2.2.0 fixes this + offs_dstate = tl.arange(0, BLOCK_SIZE_DSTATE if IS_TRITON_22 and BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K) + b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_dstate[None, :] * stride_b_dstate) + dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_dstates_hdim + offs_dstate[:, None] * stride_dstates_dstate) + if IS_TRITON_22 and BLOCK_SIZE_DSTATE <= 128: + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_dstate[None, :] < dstate), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_dstate[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc = tl.dot(b, dstates) * scale[:, None] + else: + for k in range(0, dstate, BLOCK_SIZE_K): + b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_dstate[None, :] < dstate - k), other=0.0) + dstates = tl.load(dstates_ptrs, mask=(offs_dstate[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0) + dstates = dstates.to(b_ptr.dtype.element_ty) + acc += tl.dot(b, dstates) + b_ptrs += BLOCK_SIZE_K * stride_b_dstate + dstates_ptrs += BLOCK_SIZE_K * stride_dstates_dstate + acc *= scale[:, None] + + # x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + # x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + # dt_ptrs = dt_ptr + offs_m * stride_dt_csize + # dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + # ddt = tl.sum(acc * x, axis=1) * dt_m + # ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize + # tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size) + + offs_k = tl.arange(0, BLOCK_SIZE_K) + cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_k[None, :] * stride_cb_csize_k) + dout_ptrs = dout_ptr + (offs_k[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize + K_MAX = chunk_size_limit + K_MIN = pid_m * BLOCK_SIZE_M + cb_ptrs += K_MIN * stride_cb_csize_k + dout_ptrs += K_MIN * stride_dout_seqlen + dA_cumsum_ptrs += K_MIN * stride_dA_cs_csize + for k in range(K_MIN, K_MAX, BLOCK_SIZE_K): + k = tl.multiple_of(k, BLOCK_SIZE_K) + # For some reason setting mask to (offs_m[:, None] < chunk_size_limit) is much slower + cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_k[None, :] < K_MAX - k), other=0.0) + dout = tl.load(dout_ptrs, mask=(offs_k[:, None] < K_MAX - k) & (offs_n[None, :] < hdim), other=0.0) + dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < K_MAX - k, other=0.0).to(tl.float32) + cb *= tl.exp(dA_cs_k[None, :] - dA_cs_m[:, None]) + # If we don't have the (k + offs_k[None, :] < K_MAX) mask, for indices outside this range, + # we might have dA_cs_m = 0.0 and dA_cs_k very negative, and tl.exp will return inf. + # Multiplying with cb, which is 0.0 outside the range, will make the result NaN. + # This will cause NaN in acc, and hence NaN in dx and ddt. + mask = (k + offs_k[None, :] >= offs_m[:, None]) & (k + offs_k[None, :] < K_MAX) + cb = tl.where(mask, cb, 0.0) + cb = cb.to(dout_ptr.dtype.element_ty) + acc += tl.dot(cb, dout) + cb_ptrs += BLOCK_SIZE_K * stride_cb_csize_k + dout_ptrs += BLOCK_SIZE_K * stride_dout_seqlen + dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + dt_ptrs = dt_ptr + offs_m * stride_dt_csize + dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32) + dx = acc * dt_m[:, None] + dx_ptr += pid_b * stride_dx_batch + pid_c * chunk_size * stride_dx_seqlen + pid_h * stride_dx_head + dx_ptrs = dx_ptr + (offs_m[:, None] * stride_dx_seqlen + offs_n[None, :] * stride_dx_hdim) + if HAS_D: + dout_res_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim) + dout_res = tl.load(dout_res_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if D_HAS_HDIM: + D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32) + else: + D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32) + dx += dout_res * D + tl.store(dx_ptrs, dx, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim)) + + x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim) + x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32) + if HAS_D: + dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize + if D_HAS_HDIM: + dD_ptrs = dD_ptr + offs_n * stride_dD_hdim + dD = tl.sum(dout_res * x, axis=0) + tl.store(dD_ptrs, dD, mask=offs_n < hdim) + else: + dD = tl.sum(dout_res * x) + tl.store(dD_ptr, dD) + ddt = tl.sum(acc * x, axis=1) + ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize + tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size) + + +def _chunk_scan_chunk_state_bwd_dx(x, dt, dA_cumsum, B, CB, dout, dstates, D=None, seq_idx=None, dx=None): + batch, seqlen, nheads, headdim = x.shape + _, _, nchunks, chunk_size = dt.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert CB.shape == (batch, nchunks, ngroups, chunk_size, chunk_size) + assert dt.shape == (batch, nheads, nchunks, chunk_size) + assert dA_cumsum.shape == dt.shape + assert dout.shape == x.shape + assert dstates.shape == (batch, nchunks, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + assert D.stride(-1) == 1 + BLOCK_SIZE_min = 32 + dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_min), batch, nchunks, nheads, + headdim if D.dim() == 2 else 1, device=D.device, dtype=torch.float32) + else: + dD = None + dD_strides = ((dD.stride(0), dD.stride(1), dD.stride(2), dD.stride(3), dD.stride(4)) + if D is not None else (0, 0, 0, 0, 0)) + if dx is None: + dx = torch.empty_like(x) + else: + assert dx.shape == x.shape + ddt = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32) + grid_dx = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']), + batch * nchunks, nheads) + with torch.cuda.device(x.device.index): + _chunk_scan_chunk_state_bwd_dx_kernel[grid_dx]( + x, CB, dout, dt, dA_cumsum, seq_idx, D, B, dstates, dx, ddt, dD, + chunk_size, headdim, dstate, + batch, seqlen, nheads // ngroups, + x.stride(0), x.stride(1), x.stride(2), x.stride(3), + CB.stride(0), CB.stride(1), CB.stride(2), CB.stride(-1), CB.stride(-2), + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3), + dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + D.stride(0) if D is not None else 0, + B.stride(0), B.stride(1), B.stride(2), B.stride(3), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4), + dx.stride(0), dx.stride(1), dx.stride(2), dx.stride(3), + ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3), + dD_strides[1], dD_strides[2], dD_strides[3], dD_strides[0], dD_strides[4], + D is not None, + D.dim() == 2 if D is not None else True, + HAS_SEQ_IDX=seq_idx is not None, + BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16), + IS_TRITON_22=TRITON_22 + ) + if D is not None: + BLOCK_SIZE_actual = _chunk_scan_chunk_state_bwd_dx_kernel.best_config.kwargs["BLOCK_SIZE_M"] + n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype) + if D.dim() == 1: + dD = rearrange(dD, "h 1 -> h") + return dx, ddt.to(dtype=dt.dtype), dD + + +def _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf"))): + batch, seqlen, nheads, headdim = x.shape + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert x.shape == (batch, seqlen, nheads, headdim) + assert dt.shape == (batch, seqlen, nheads) + assert A.shape == (nheads,) + assert C.shape == B.shape + if z is not None: + assert z.shape == x.shape + if D is not None: + assert D.shape == (nheads, headdim) or D.shape == (nheads,) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if x.stride(-1) != 1 and x.stride(1) != 1: # Either M or K dimension should be contiguous + x = x.contiguous() + if z is not None and z.stride(-1) != 1 and z.stride(1) != 1: # Either M or K dimension should be contiguous + z = z.contiguous() + if D is not None and D.stride(-1) != 1: + D = D.contiguous() + if initial_states is not None: + assert initial_states.shape == (batch, nheads, headdim, dstate) + # # (batch, nchunks, chunk_size, chunk_size) or (batch, nchunks, nheads, chunk_size, chunk_size) + # dA_cumsum_tmp0, dt_tmp0 = _chunk_cumsum_fwd(dt[:, :147], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus) + # dA_cumsum_tmp1, dt_tmp1 = _chunk_cumsum_fwd(dt[:, 147:], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus) + # dA_cumsum_tmp2, dt_tmp2 = _chunk_cumsum_fwd(dt[:, 147:256], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus) + dA_cumsum, dt = _chunk_cumsum_fwd(dt, A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit) + states = _chunk_state_fwd(B, x, dt, dA_cumsum, seq_idx=seq_idx, states_in_fp32=True) + # states_tmp0 = _chunk_state_fwd(B[:, :147], x[:, :147], dt_tmp0, dA_cumsum_tmp0, states_in_fp32=True) + # states_tmp1 = _chunk_state_fwd(B[:, 147:], x[:, 147:], dt_tmp1, dA_cumsum_tmp1, states_in_fp32=True) + # states_tmp2 = _chunk_state_fwd(B[:, 147:256], x[:, 147:256], dt_tmp2, dA_cumsum_tmp2, states_in_fp32=True) + states, final_states = _state_passing_fwd(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1], + initial_states=rearrange(initial_states, "... p n -> ... (p n)") if initial_states is not None else None, + seq_idx=seq_idx, chunk_size=chunk_size, out_dtype=C.dtype) + states, final_states = [rearrange(t, "... (p n) -> ... p n", n=dstate) for t in [states, final_states]] + # states_tmp0 = rearrange(_state_passing_fwd(rearrange(states_tmp0, "... p n -> ... (p n)"), dA_cumsum_tmp0[:, :, :, -1], chunk_size=chunk_size), "... (p n) -> ... p n", n=dstate) + # states_tmp1 = rearrange(_state_passing_fwd(rearrange(states_tmp1, "... p n -> ... (p n)"), dA_cumsum_tmp1[:, :, :, -1], chunk_size=chunk_size), "... (p n) -> ... p n", n=dstate) + CB = _bmm_chunk_fwd(C, B, chunk_size, seq_idx=seq_idx, output_dtype=torch.float32) + out, out_x = _chunk_scan_fwd(CB, x, dt, dA_cumsum, C, states, D=D, z=z, seq_idx=seq_idx) + if cu_seqlens is None: + return out, out_x, dt, dA_cumsum, states, final_states + else: + assert batch == 1, "passing cu_seqlens to get the varlen states is only supported if batch dimension is 1" + varlen_states = chunk_state_varlen(B.squeeze(0), x.squeeze(0), dt.squeeze(0), dA_cumsum.squeeze(0), + cu_seqlens, states.squeeze(0)) + return out, out_x, dt, dA_cumsum, states, final_states, varlen_states + + +def _mamba_chunk_scan_combined_bwd(dout, x, dt, A, B, C, out, chunk_size, D=None, z=None, + dt_bias=None, initial_states=None, dfinal_states=None, seq_idx=None, dt_softplus=False, + dt_limit=(0.0, float("inf")), + dx=None, ddt=None, dB=None, dC=None, dz=None, recompute_output=False): + if dout.stride(-1) != 1: + dout = dout.contiguous() + batch, seqlen, nheads, headdim = x.shape + nchunks = math.ceil(seqlen / chunk_size) + _, _, ngroups, dstate = B.shape + assert dout.shape == (batch, seqlen, nheads, headdim) + assert dt.shape == (batch, seqlen, nheads) + assert A.shape == (nheads,) + assert nheads % ngroups == 0 + assert B.shape == (batch, seqlen, ngroups, dstate) + assert C.shape == B.shape + assert out.shape == x.shape + if initial_states is not None: + assert initial_states.shape == (batch, nheads, headdim, dstate) + if seq_idx is not None: + assert seq_idx.shape == (batch, seqlen) + if dx is not None: + assert dx.shape == x.shape + if dB is not None: + assert dB.shape == B.shape + dB_given = dB + else: + dB_given = torch.empty_like(B) + if dC is not None: + assert dC.shape == C.shape + dC_given = dC + else: + dC_given = torch.empty_like(C) + if dz is not None: + assert z is not None + assert dz.shape == z.shape + if ddt is not None: + assert ddt.shape == dt.shape + ddt_given = ddt + else: + ddt_given = torch.empty_like(dt) + # TD: For some reason Triton (2.1.0 and 2.2.0) errors with + # "[CUDA]: invalid device context" (e.g. during varlne test), and cloning makes it work. Idk why. + dt_in = dt.clone() + dA_cumsum, dt = _chunk_cumsum_fwd(dt_in, A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus, + dt_limit=dt_limit) + CB = _bmm_chunk_fwd(C, B, chunk_size, seq_idx=seq_idx, output_dtype=torch.float32) + states = _chunk_state_fwd(B, x, dt, dA_cumsum, seq_idx=seq_idx, states_in_fp32=True) + states, _ = _state_passing_fwd(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1], + initial_states=rearrange(initial_states, "... p n -> ... (p n)") if initial_states is not None else None, + seq_idx=seq_idx, chunk_size=chunk_size) + states = rearrange(states, "... (p n) -> ... p n", n=dstate) + if z is not None: + dz, dout, dD, *rest = _chunk_scan_bwd_dz(x, z, out, dout, chunk_size=chunk_size, has_ddAcs=False, D=D, dz=dz, recompute_output=recompute_output) + outz = rest[0] if recompute_output else out + else: + dz = None + outz = out + dstates = _chunk_scan_bwd_dstates(C, dA_cumsum, dout, seq_idx=seq_idx, dtype=states.dtype) + # dstates has length nchunks, containing the gradient to initial states at index 0 and + # gradient to the states of chunk (nchunks - 2) at index (nchunks - 1) + # Do computation in fp32 but convert dstates and states to fp16/bf16 since dstates and states + # will be used in matmul in the next kernels. + dstates, ddA_chunk_cumsum, dinitial_states, states = _state_passing_bwd( + rearrange(states, "... p n -> ... (p n)"), + dA_cumsum[:, :, :, -1], + rearrange(dstates, "... p n -> ... (p n)"), + dfinal_states=rearrange(dfinal_states, "... p n -> ... (p n)") if dfinal_states is not None else None, + seq_idx=seq_idx, + has_initial_states=initial_states is not None, + dstates_dtype=x.dtype, + states_dtype=x.dtype, + chunk_size=chunk_size, + ) + # dstates has length nchunks, containing the gradient to states of chunk 0 at index 0 and + # gradient to the final states at index (nchunks - 1) + # states has length nchunks, containing the initial states at index 0 and the state for chunk (nchunks - 2) at index (nchunks - 1) + # The final states is not stored. + states = rearrange(states, "... (p n) -> ... p n", n=dstate) + dstates = rearrange(dstates, "... (p n) -> ... p n", n=dstate) + dinitial_states = rearrange(dinitial_states, "... (p n) -> ... p n", n=dstate) if dinitial_states is not None else None + dx, ddt, dD_from_x = _chunk_scan_chunk_state_bwd_dx(x, dt, dA_cumsum, B, CB, dout, dstates, D=D, seq_idx=seq_idx, dx=dx) + # dB = _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, seq_idx=seq_idx, ngroups=ngroups) + dB, ddA_next = _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, seq_idx=seq_idx, B=B, ngroups=ngroups) + # dC = _chunk_scan_bwd_dC(states[:, :-1].to(x.dtype), dA_cumsum, dout, seq_idx=seq_idx, ngroups=ngroups) + dC, ddA_cumsum_prev = _chunk_scan_bwd_dC(states.to(x.dtype), dA_cumsum, dout, seq_idx=seq_idx, C=C, ngroups=ngroups) + # Computing ddA with the dcb kernel is much slower, so we're not using it for now + dCB = _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, seq_idx=seq_idx, ngroups=ngroups) + # dCB, ddA_tmp = _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, seq_idx=seq_idx, CB=CB, ngroups=ngroups) + dCB = dCB.to(CB.dtype) + _bmm_chunk_bwd(C, dCB, residual=dB, out=dB_given) + _bmm_chunk_bwd(B, rearrange(dCB, "... l s -> ... s l"), residual=dC, out=dC_given) + # If we have z, then dout_x is recomputed in fp32 so dD = (dout_x * x).sum() is more accurate + # than dD_from_x = (dout_x * x).sum() where dout_x is in fp16/bf16 + if z is None: + dD = dD_from_x + # Formula for ddA_cumsum, assuming out is the output of the forward pass before adding x * D. + # ddA_cumsum = torch.einsum("bclhp,bclhp->bhcl", out.float(), dout.float()) - ddt * dt + # However, this is numerically unstable: when we do the reverse cumsum on ddA_cumsum, there might + # be a lot of underflow. + + # This is already done as part of bwd_dC kernel + # ddA_cumsum_prev = _chunk_scan_bwd_ddAcs_prev(states[:, :-1], C, dout, dA_cumsum, seq_idx=seq_idx) + ddA_cumsum_prev[..., -1] += ddA_chunk_cumsum + ddA_prev = ddA_cumsum_prev.flip([-1]).cumsum(dim=-1).flip([-1]) + # This is already done as part of bwd_dB kernel + # ddA_next = _chunk_state_bwd_ddAcs_stable(B, x, dt, dA_cumsum, dstates, seq_idx=seq_idx) + # We don't need to pass in seq_idx because CB also zeros out entries where seq_idx[i] != seq_idx[j] + ddA = _chunk_scan_bwd_ddAcs_stable(x, dt, dA_cumsum, dout, CB) + ddA += ddA_next + ddA_prev + + ddt_given, dA, ddt_bias = _chunk_cumsum_bwd(ddA, ddt, dt_in, A, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit, ddt=ddt_given) + + # These 2 lines are just to test ddt and dA being computed by old code + # _, dA = selective_scan_bwd(dout, x, dt, A, B, C, D=D.float(), z=z) + # ddt_given.copy_(ddt) + + return_vals = (dx, ddt_given, dA, dB_given, dC_given, dD, dz, ddt_bias, dinitial_states) + return return_vals if not recompute_output else (*return_vals, outz) + + +def selective_scan_bwd(dout, x, dt, A, B, C, D=None, z=None): + """ + Argument: + dout: (batch, seqlen, nheads, headdim) + x: (batch, seqlen, nheads, headdim) + dt: (batch, nheads, nchunks, chunk_size) or (batch, nheads, headdim, nchunks, chunk_size) + A: (nheads) or (dim, dstate) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + Return: + out: (batch, seqlen, nheads, headdim) + """ + import selective_scan + + batch, seqlen, nheads, headdim = x.shape + chunk_size = dt.shape[-1] + _, _, ngroups, dstate = B.shape + assert nheads % ngroups == 0 + x = rearrange(x, "b l h p -> b (h p) l") + squeeze_dt = dt.dim() == 4 + if dt.dim() == 4: + dt = repeat(dt, "b h c l -> b h p c l", p=headdim) + dt = rearrange(dt, "b h p c l -> b (h p) (c l)", p=headdim) + squeeze_A = A.dim() == 1 + if A.dim() == 1: + A = repeat(A, "h -> (h p) n", p=headdim, n=dstate).to(dtype=torch.float32) + else: + A = A.to(dtype=torch.float32) + B = rearrange(B, "b l g n -> b g n l") + C = rearrange(C, "b l g n -> b g n l") + if D is not None: + if D.dim() == 2: + D = rearrange(D, "h p -> (h p)") + else: + D = repeat(D, "h -> (h p)", p=headdim) + if z is not None: + z = rearrange(z, "b l h p -> b (h p) l") + + if x.stride(-1) != 1: + x = x.contiguous() + if dt.stride(-1) != 1: + dt = dt.contiguous() + if D is not None: + D = D.contiguous() + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if z is not None and z.stride(-1) != 1: + z = z.contiguous() + _, intermediate, *rest = selective_scan.fwd(x, dt.to(dtype=x.dtype), A, B, C, D, z, None, False) + if z is not None: + out = rest[0] + else: + out = None + + dout = rearrange(dout, "b l h p -> b (h p) l") + + if dout.stride(-1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan with the backward of chunk). + # Here we just pass in None and dz will be allocated in the C++ code. + _, ddt, dA, *rest = selective_scan.bwd( + x, dt.to(dtype=x.dtype), A, B, C, D, z, None, dout, intermediate, out, None, False, + False # option to recompute out_z, not used here + ) + ddt = rearrange(ddt, "b (h p) (c l) -> b h p c l", p=headdim, l=chunk_size) + if squeeze_dt: + ddt = ddt.float().sum(dim=2) + if squeeze_A: + dA = rearrange(dA, "(h p) n -> h p n", p=headdim).sum(dim=(1, 2)) + return ddt, dA + + +class MambaChunkScanCombinedFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf")), return_final_states=False, return_varlen_states=False): + ctx.dt_dtype = dt.dtype + if not return_varlen_states: + cu_seqlens = None + else: + assert cu_seqlens is not None, "cu_seqlens must be provided if return_varlen_states is True" + out, out_x, dt_out, dA_cumsum, states, final_states, *rest = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, cu_seqlens=cu_seqlens, dt_softplus=dt_softplus, dt_limit=dt_limit) + ctx.save_for_backward(out if z is None else out_x, x, dt, dA_cumsum, A, B, C, D, z, dt_bias, initial_states, seq_idx) + ctx.dt_softplus = dt_softplus + ctx.chunk_size = chunk_size + ctx.dt_limit = dt_limit + ctx.return_final_states = return_final_states + ctx.return_varlen_states = return_varlen_states + if not return_varlen_states: + return out if not return_final_states else (out, final_states) + else: + varlen_states = rest[0] + return (out, varlen_states) if not return_final_states else (out, final_states, varlen_states) + + @staticmethod + def backward(ctx, dout, *args): + out, x, dt, dA_cumsum, A, B, C, D, z, dt_bias, initial_states, seq_idx = ctx.saved_tensors + assert not ctx.return_varlen_states, "return_varlen_states is not supported in backward" + dfinal_states = args[0] if ctx.return_final_states else None + dx, ddt, dA, dB, dC, dD, dz, ddt_bias, dinitial_states = _mamba_chunk_scan_combined_bwd(dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=ctx.dt_softplus, dt_limit=ctx.dt_limit) + return dx, ddt, dA, dB, dC, None, dD, dz, ddt_bias, dinitial_states, None, None, None, None, None, None + + +def mamba_chunk_scan_combined(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf")), return_final_states=False, return_varlen_states=False): + """ + Argument: + x: (batch, seqlen, nheads, headdim) + dt: (batch, seqlen, nheads) + A: (nheads) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + chunk_size: int + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + dt_bias: (nheads,) + initial_states: (batch, nheads, headdim, dstate) + seq_idx: (batch, seqlen) + cu_seqlens: (num_sequences + 1) or None, only used if return_varlen_states is True + dt_softplus: Whether to apply softplus to dt + Return: + out: (batch, seqlen, nheads, headdim) + """ + return MambaChunkScanCombinedFn.apply(x, dt, A, B, C, chunk_size, D, z, dt_bias, initial_states, seq_idx, cu_seqlens, dt_softplus, dt_limit, return_final_states, return_varlen_states) + + +def mamba_chunk_scan(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + x: (batch, seqlen, nheads, headdim) + dt: (batch, seqlen, nheads) + A: (nheads) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + dt_bias: (nheads,) + Return: + out: (batch, seqlen, nheads, headdim) + """ + batch, seqlen, nheads, headdim = x.shape + dstate = B.shape[-1] + if seqlen % chunk_size != 0: + dt = F.pad(dt, (0, 0, 0, chunk_size - seqlen % chunk_size)) + dt = rearrange(dt, "b (c l) h -> b h c l", l=chunk_size) + dt = dt.float() # We want high precision for this before cumsum + if dt_bias is not None: + dt = dt + rearrange(dt_bias, "h -> h 1 1") + if dt_softplus: + dt = F.softplus(dt) + dA = dt * rearrange(A, "h -> h 1 1") + dA = dt * rearrange(A, "h -> h 1 1") + dA_cumsum = torch.cumsum(dA, dim=-1) + # 1. Compute the state for each chunk + states = chunk_state(B, x, dt, dA_cumsum, states_in_fp32=True) + # 2. Pass the state to all the chunks by weighted cumsum. + states = rearrange(state_passing(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1])[0], + "... (p n) -> ... p n", n=dstate) + # 3. Compute the output for each chunk + out = chunk_scan(B, C, x, dt, dA_cumsum, states, D=D, z=z) + return out + + +def ssd_chunk_scan_combined_ref(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + x: (batch, seqlen, nheads, headdim) + dt: (batch, seqlen, nheads) + A: (nheads) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + dt_bias: (nheads,) + Return: + out: (batch, seqlen, nheads, headdim) + """ + batch, seqlen, nheads, headdim = x.shape + dstate = B.shape[-1] + if seqlen % chunk_size != 0: + dt = F.pad(dt, (0, 0, 0, chunk_size - seqlen % chunk_size)) + dt = rearrange(dt, "b (c l) h -> b h c l", l=chunk_size) + dt = dt.float() # We want high precision for this before cumsum + if dt_bias is not None: + dt = dt + rearrange(dt_bias, "h -> h 1 1") + if dt_softplus: + dt = F.softplus(dt) + dA = dt * rearrange(A, "h -> h 1 1") + dA_cumsum = torch.cumsum(dA, dim=-1) + # 1. Compute the state for each chunk + states = chunk_state_ref(B, x, dt, dA_cumsum) + states_dtype = states.dtype + if states.dtype not in [torch.float32, torch.float64]: + states = states.to(torch.float32) + # 2. Pass the state to all the chunks by weighted cumsum. + # state_passing_ref is much less numerically stable + states = rearrange(state_passing_ref(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1])[0], + "... (p n) -> ... p n", n=dstate) + states = states.to(states_dtype) + # 3. Compute the output for each chunk + out = chunk_scan_ref(B, C, x, dt, dA_cumsum, states, D=D, z=z) + return out + + +def ssd_selective_scan(x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf"))): + """ + Argument: + x: (batch, seqlen, nheads, headdim) + dt: (batch, seqlen, nheads) or (batch, seqlen, nheads, headdim) + A: (nheads) or (dim, dstate) + B: (batch, seqlen, ngroups, dstate) + C: (batch, seqlen, ngroups, dstate) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, nheads, headdim) + dt_bias: (nheads,) or (nheads, headdim) + Return: + out: (batch, seqlen, nheads, headdim) + """ + from mamba_ssm.ops.selective_scan_interface import selective_scan_fn + + batch, seqlen, nheads, headdim = x.shape + _, _, ngroups, dstate = B.shape + x = rearrange(x, "b l h p -> b (h p) l") + if dt.dim() == 3: + dt = repeat(dt, "b l h -> b l h p", p=headdim) + dt = rearrange(dt, "b l h p -> b (h p) l") + if A.dim() == 1: + A = repeat(A, "h -> (h p) n", p=headdim, n=dstate).to(dtype=torch.float32) + else: + A = A.to(dtype=torch.float32) + B = rearrange(B, "b l g n -> b g n l") + C = rearrange(C, "b l g n -> b g n l") + if D is not None: + if D.dim() == 2: + D = rearrange(D, "h p -> (h p)") + else: + D = repeat(D, "h -> (h p)", p=headdim) + if z is not None: + z = rearrange(z, "b l h p -> b (h p) l") + if dt_bias is not None: + if dt_bias.dim() == 1: + dt_bias = repeat(dt_bias, "h -> h p", p=headdim) + dt_bias = rearrange(dt_bias, "h p -> (h p)") + if dt_limit != (0.0, float("inf")): + if dt_bias is not None: + dt = dt + rearrange(dt_bias, "d -> d 1") + if dt_softplus: + dt = F.softplus(dt) + dt = dt.clamp(min=dt_limit[0], max=dt_limit[1]).to(x.dtype) + dt_bias = None + dt_softplus = None + out = selective_scan_fn(x, dt, A, B, C, D=D, z=z, delta_bias=dt_bias, delta_softplus=dt_softplus) + return rearrange(out, "b (h p) l -> b l h p", p=headdim) + + +def mamba_conv1d_scan_ref(xBC, conv1d_weight, conv1d_bias, dt, A, chunk_size, D=None, z=None, + dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf")), + activation="silu", headdim=None, ngroups=1): + """ + Argument: + xBC: (batch, seqlen, dim + 2 * ngroups * dstate) where dim == nheads * headdim + conv1d_weight: (dim + 2 * ngroups * dstate, width) + conv1d_bias: (dim + 2 * ngroups * dstate,) + dt: (batch, seqlen, nheads) or (batch, seqlen, nheads, headdim) + A: (nheads) + D: (nheads, headdim) or (nheads,) + z: (batch, seqlen, dim) + dt_bias: (nheads) or (nheads, headdim) + headdim: if D is 1D and z is None, headdim must be passed in + Return: + out: (batch, seqlen, dim) + """ + batch, seqlen, nheads = dt.shape[:3] + assert nheads % ngroups == 0 + if z is not None: + dim = z.shape[-1] + assert dim % nheads == 0 + headdim = dim // nheads + else: + if D.dim() == 1: + assert headdim is not None + else: + headdim = D.shape[1] + dim = nheads * headdim + xBC = rearrange(causal_conv1d_fn(rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias, activation=activation), + "b d s -> b s d") + dstate = (xBC.shape[-1] - dim) // ngroups // 2 + x, B, C = torch.split(xBC, [dim, ngroups * dstate, ngroups * dstate], dim=-1) + x = rearrange(x, "b l (h p) -> b l h p", h=nheads) + B = rearrange(B, "b l (g n) -> b l g n", g=ngroups) + C = rearrange(C, "b l (g n) -> b l g n", g=ngroups) + z = rearrange(z, "b l (h p) -> b l h p", h=nheads) if z is not None else None + out = ssd_selective_scan(x, dt.to(x.dtype), A, B, C, D=D.float(), z=z, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit) + return rearrange(out, "b s h p -> b s (h p)") + + +class MambaSplitConv1dScanCombinedFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states=None, seq_idx=None, dt_limit=(0.0, float("inf")), return_final_states=False, activation="silu", + rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None, + ngroups=1, norm_before_gate=True): + assert activation in [None, "silu", "swish"] + if D.dim() == 1: + assert headdim is not None + nheads, = D.shape + else: + nheads, headdim = D.shape + batch, seqlen, _ = zxbcdt.shape + dim = nheads * headdim + assert nheads % ngroups == 0 + dstate = (conv1d_weight.shape[0] - dim) // ngroups // 2 + d_nonssm = (zxbcdt.shape[-1] - 2 * dim - 2 * ngroups * dstate - nheads) // 2 + assert d_nonssm >= 0 + assert zxbcdt.shape == (batch, seqlen, 2 * d_nonssm + 2 * dim + 2 * ngroups * dstate + nheads) + assert dt_bias.shape == (nheads,) + assert A.shape == (nheads,) + zx0, z, xBC, dt = torch.split(zxbcdt, [2 * d_nonssm, dim, dim + ngroups * dstate * 2, nheads], dim=-1) + seq_idx = seq_idx.contiguous() if seq_idx is not None else None + xBC_conv = rearrange( + causal_conv1d_cuda.causal_conv1d_fwd(rearrange(xBC, "b s d -> b d s"), + conv1d_weight, conv1d_bias, seq_idx, None, None, activation in ["silu", "swish"]), + "b d s -> b s d" + ) + x, B, C = torch.split(xBC_conv, [dim, ngroups * dstate, ngroups * dstate], dim=-1) + x = rearrange(x, "b l (h p) -> b l h p", h=nheads) + B = rearrange(B, "b l (g n) -> b l g n", g=ngroups) + C = rearrange(C, "b l (g n) -> b l g n", g=ngroups) + z = rearrange(z, "b l (h p) -> b l h p", h=nheads) if z is not None else None + if rmsnorm_weight is None: + out, out_x, dt_out, dA_cumsum, states, final_states = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size=chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=dt_limit) + out = rearrange(out, "b s h p -> b s (h p)") + rstd = None + if d_nonssm > 0: + out = torch.cat([_swiglu_fwd(zx0), out], dim=-1) + else: + out_x, _, dt_out, dA_cumsum, states, final_states = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size=chunk_size, D=D, z=None, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=dt_limit) + # reshape input data into 2D tensor + x_rms = rearrange(out_x, "b s h p -> (b s) (h p)") + z_rms = rearrange(z, "b s h p -> (b s) (h p)") + rmsnorm_weight = rmsnorm_weight.contiguous() + if d_nonssm == 0: + out = None + else: + out01 = torch.empty((batch, seqlen, d_nonssm + dim), dtype=x_rms.dtype, device=x_rms.device) + out = rearrange(out01[..., d_nonssm:], "b s d -> (b s) d") + _swiglu_fwd(zx0, out=out01[..., :d_nonssm]) + out, _, rstd = _layer_norm_fwd(x_rms, rmsnorm_weight, None, rmsnorm_eps, z_rms, out=out, + group_size=dim // ngroups, + norm_before_gate=norm_before_gate, is_rms_norm=True) + if d_nonssm == 0: + out = rearrange(out, "(b s) d -> b s d", b=batch) + else: + out = out01 + ctx.outproj_weight_dtype = outproj_weight.dtype if outproj_weight is not None else None + if outproj_weight is not None: + if torch.is_autocast_enabled(): + dtype = torch.get_autocast_gpu_dtype() + out, outproj_weight = out.to(dtype), outproj_weight.to(dtype) + outproj_bias = outproj_bias.to(dtype) if outproj_bias is not None else None + out = F.linear(out, outproj_weight, outproj_bias) + else: + assert outproj_bias is None + ctx.save_for_backward(zxbcdt, conv1d_weight, conv1d_bias, + out_x, A, D, dt_bias, initial_states, seq_idx, rmsnorm_weight, rstd, outproj_weight, outproj_bias) + ctx.dt_limit = dt_limit + ctx.return_final_states = return_final_states + ctx.activation = activation + ctx.rmsnorm_eps = rmsnorm_eps + ctx.norm_before_gate = norm_before_gate + ctx.chunk_size = chunk_size + ctx.headdim = headdim + ctx.ngroups = ngroups + return out if not return_final_states else (out, final_states) + + @staticmethod + @custom_bwd + def backward(ctx, dout, *args): + zxbcdt, conv1d_weight, conv1d_bias, out, A, D, dt_bias, initial_states, seq_idx, rmsnorm_weight, rstd, outproj_weight, outproj_bias = ctx.saved_tensors + dfinal_states = args[0] if ctx.return_final_states else None + headdim = ctx.headdim + nheads = D.shape[0] + dim = nheads * headdim + assert nheads % ctx.ngroups == 0 + dstate = (conv1d_weight.shape[0] - dim) // ctx.ngroups // 2 + d_nonssm = (zxbcdt.shape[-1] - 2 * dim - 2 * ctx.ngroups * dstate - nheads) // 2 + assert d_nonssm >= 0 + recompute_output = outproj_weight is not None + if recompute_output: + out_recompute = torch.empty(*out.shape[:2], d_nonssm + dim, device=out.device, dtype=out.dtype) + out0_recompute, out1_recompute = out_recompute.split([d_nonssm, dim], dim=-1) + zx0, z, xBC, dt = torch.split(zxbcdt, [2 * d_nonssm, dim, dim + 2 * ctx.ngroups * dstate, nheads], dim=-1) + # Recompute x, B, C + xBC_conv = rearrange( + causal_conv1d_cuda.causal_conv1d_fwd(rearrange(xBC, "b s d -> b d s"), + conv1d_weight, conv1d_bias, seq_idx, None, None, ctx.activation in ["silu", "swish"]), + "b d s -> b s d" + ) + x, B, C = torch.split(xBC_conv, [dim, ctx.ngroups * dstate, ctx.ngroups * dstate], dim=-1) + x = rearrange(x, "b l (h p) -> b l h p", h=nheads) + B = rearrange(B, "b l (g n) -> b l g n", g=ctx.ngroups) + C = rearrange(C, "b l (g n) -> b l g n", g=ctx.ngroups) + dzxbcdt = torch.empty_like(zxbcdt) + dzx0, dz, dxBC_given, ddt_given = torch.split(dzxbcdt, [2 * d_nonssm, dim, dim + 2 * ctx.ngroups * dstate, nheads], dim=-1) + dxBC = torch.empty_like(xBC) + dx, dB, dC = torch.split(dxBC, [dim, ctx.ngroups * dstate, ctx.ngroups * dstate], dim=-1) + z = rearrange(z, "b l (h p) -> b l h p", h=nheads) + dx = rearrange(dx, "b l (h p) -> b l h p", h=nheads) + dB = rearrange(dB, "b l (g n) -> b l g n", g=ctx.ngroups) + dC = rearrange(dC, "b l (g n) -> b l g n", g=ctx.ngroups) + if outproj_weight is not None: + dout_og = dout + dout = F.linear(dout, outproj_weight.t()) + if d_nonssm > 0: + dout0, dout = dout.split([d_nonssm, dim], dim=-1) + _swiglu_bwd(zx0, dout0, dxy=dzx0, recompute_output=True, out=out0_recompute) + dout = rearrange(dout, "b s (h p) -> b s h p", p=headdim) + if rmsnorm_weight is None: + dz = rearrange(dz, "b l (h p) -> b l h p", h=nheads) + dx, ddt, dA, dB, dC, dD, dz, ddt_bias, dinitial_states, *rest = _mamba_chunk_scan_combined_bwd( + dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=ctx.dt_limit, dx=dx, ddt=ddt_given, dB=dB, dC=dC, dz=dz, recompute_output=recompute_output + ) + out_for_linear = rearrange(rest[0], "b s h p -> b s (h p)") if recompute_output else None + drmsnorm_weight = None + else: + batch = dout.shape[0] + dy_rms = rearrange(dout, "b s h p -> (b s) (h p)") + dz = rearrange(dz, "b l d -> (b l) d") + x_rms = rearrange(out, "b s h p -> (b s) (h p)") + z_rms = rearrange(z, "b s h p -> (b s) (h p)") + out1_recompute = rearrange(out1_recompute, "b s d -> (b s) d") if recompute_output else None + dout, drmsnorm_weight, _, dz, *rest = _layer_norm_bwd(dy_rms, x_rms, rmsnorm_weight, None, ctx.rmsnorm_eps, None, rstd, z_rms, norm_before_gate=ctx.norm_before_gate, is_rms_norm=True, recompute_output=recompute_output, dz=dz, out=out1_recompute if recompute_output else None) + out_for_linear = out_recompute if recompute_output else None + dout = rearrange(dout, "(b s) (h p) -> b s h p", b=batch, p=headdim) + dx, ddt, dA, dB, dC, dD, _, ddt_bias, dinitial_states = _mamba_chunk_scan_combined_bwd( + dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=None, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=ctx.dt_limit, dx=dx, ddt=ddt_given, dB=dB, dC=dC + ) + + if outproj_weight is not None: + doutproj_weight = torch.einsum("bso,bsd->od", dout_og, out_for_linear) + doutproj_bias = dout_og.sum(dim=(0, 1)) if outproj_bias is not None else None + else: + doutproj_weight, doutproj_bias = None, None + dxBC_given = rearrange(dxBC_given, "b s d -> b d s") + dxBC_given, dweight, dbias, *_ = causal_conv1d_cuda.causal_conv1d_bwd( + rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias, + rearrange(dxBC, "b s d -> b d s"), seq_idx, None, None, dxBC_given, False, ctx.activation in ["silu", "swish"] + ) + dxBC_given = rearrange(dxBC_given, "b d s -> b s d") + return dzxbcdt, dweight, dbias, ddt_bias, dA, dD, None, dinitial_states, None, None, None, None, drmsnorm_weight, None, doutproj_weight, doutproj_bias, None, None, None + + +def mamba_split_conv1d_scan_combined(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states=None, seq_idx=None, dt_limit=(0.0, float("inf")), return_final_states=False, activation="silu", rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None, ngroups=1, norm_before_gate=True): + """ + Argument: + zxbcdt: (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads) where dim == nheads * headdim + conv1d_weight: (dim + 2 * ngroups * dstate, width) + conv1d_bias: (dim + 2 * ngroups * dstate,) + dt_bias: (nheads,) + A: (nheads) + D: (nheads, headdim) or (nheads,) + initial_states: (batch, nheads, headdim, dstate) + seq_idx: (batch, seqlen), int32 + rmsnorm_weight: (dim,) + outproj_weight: (out_dim, dim) + outproj_bias: (out_dim,) + headdim: if D is 1D, headdim must be passed in + norm_before_gate: if True, we do RMSNorm(x) * F.silu(z). If False, we do RMSNorm(x * F.silu(z)) + Return: + out: (batch, seqlen, dim) + """ + return MambaSplitConv1dScanCombinedFn.apply(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states, seq_idx, dt_limit, return_final_states, activation, rmsnorm_weight, rmsnorm_eps, outproj_weight, outproj_bias, headdim, ngroups, norm_before_gate) + + +def mamba_split_conv1d_scan_ref(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, dt_limit=(0.0, float("inf")), activation="silu", rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None, ngroups=1, norm_before_gate=True): + """ + Argument: + zxbcdt: (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads) where dim == nheads * headdim + conv1d_weight: (dim + 2 * ngroups * dstate, width) + conv1d_bias: (dim + 2 * ngroups * dstate,) + dt_bias: (nheads,) + A: (nheads) + D: (nheads, headdim) or (nheads,) + rmsnorm_weight: (dim,) + outproj_weight: (out_dim, dim) + outproj_bias: (out_dim,) + headdim: if D is 1D, headdim must be passed in + norm_before_gate: if True, we do RMSNorm(x) * F.silu(z). If False, we do RMSNorm(x * F.silu(z)) + Return: + out: (batch, seqlen, dim) + """ + if D.dim() == 1: + assert headdim is not None + nheads, = D.shape + else: + nheads, headdim = D.shape + assert nheads % ngroups == 0 + batch, seqlen, _ = zxbcdt.shape + dim = nheads * headdim + dstate = (zxbcdt.shape[-1] - 2 * dim - nheads) // ngroups // 2 + assert zxbcdt.shape == (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads) + assert dt_bias.shape == (nheads,) + assert A.shape == (nheads,) + if rmsnorm_weight is not None: + assert rmsnorm_weight.shape == (dim,) + z, xBC, dt = torch.split(zxbcdt, [dim, dim + 2 * ngroups * dstate, nheads], dim=-1) + xBC = rearrange(causal_conv1d_fn(rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias, activation=activation), + "b d s -> b s d") + x, B, C = torch.split(xBC, [dim, ngroups * dstate, ngroups * dstate], dim=-1) + x = rearrange(x, "b l (h p) -> b l h p", h=nheads) + B = rearrange(B, "b l (g n) -> b l g n", g=ngroups) + C = rearrange(C, "b l (g n) -> b l g n", g=ngroups) + z = rearrange(z, "b l (h p) -> b l h p", h=nheads) + out = ssd_selective_scan(x, dt.to(x.dtype), A, B, C, D=D.float(), + z=z if rmsnorm_weight is None else None, dt_bias=dt_bias, dt_softplus=True, dt_limit=dt_limit) + out = rearrange(out, "b s h p -> b s (h p)") + if rmsnorm_weight is not None: + out = rmsnorm_fn(out, rmsnorm_weight, None, z=rearrange(z, "b l h p -> b l (h p)"), eps=rmsnorm_eps, + norm_before_gate=norm_before_gate) + if outproj_weight is not None: + out = F.linear(out, outproj_weight, outproj_bias) + return out + diff --git a/mamba/mamba_ssm/ops/triton/ssd_state_passing.py b/mamba/mamba_ssm/ops/triton/ssd_state_passing.py new file mode 100644 index 0000000000000000000000000000000000000000..63863b8236e1c091741c9faeb6f4a41376fc5b42 --- /dev/null +++ b/mamba/mamba_ssm/ops/triton/ssd_state_passing.py @@ -0,0 +1,348 @@ +# Copyright (c) 2024, Tri Dao, Albert Gu. + +"""We want triton==2.1.0 or 2.2.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE': 64}), + triton.Config({'BLOCK_SIZE': 128}), + triton.Config({'BLOCK_SIZE': 256}), + triton.Config({'BLOCK_SIZE': 512}), + triton.Config({'BLOCK_SIZE': 1024}), + triton.Config({'BLOCK_SIZE': 2048}), + ], + key=['dim'], +) +@triton.jit +def _state_passing_fwd_kernel( + # Pointers to matrices + states_ptr, out_ptr, final_states_ptr, dA_cs_ptr, initstates_ptr, seq_idx_ptr, + # Matrix dimensions + dim, nchunks, seqlen, chunk_size, + # Strides + stride_states_batch, stride_states_chunk, stride_states_head, stride_states_dim, + stride_out_batch, stride_out_chunk, stride_out_head, stride_out_dim, + stride_final_states_batch, stride_final_states_head, stride_final_states_dim, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, + stride_initstates_batch, stride_initstates_head, stride_initstates_dim, + stride_seq_idx_batch, stride_seq_idx_seqlen, + # Meta-parameters + HAS_INITSTATES: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + states_ptr += pid_b * stride_states_batch + pid_h * stride_states_head + dA_cs_ptr += pid_b * stride_dA_cs_batch + pid_h * stride_dA_cs_head + out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + final_states_ptr += pid_b * stride_final_states_batch + pid_h * stride_final_states_head + if HAS_INITSTATES: + initstates_ptr += pid_b * stride_initstates_batch + pid_h * stride_initstates_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + + offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + states_ptrs = states_ptr + offs_m * stride_states_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + final_states_ptrs = final_states_ptr + offs_m * stride_final_states_dim + + if not HAS_INITSTATES: + states = tl.zeros((BLOCK_SIZE, ), dtype=tl.float32) + else: + initstates_ptrs = initstates_ptr + offs_m * stride_initstates_dim + states = tl.load(initstates_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + tl.store(out_ptrs, states, mask=offs_m < dim) + out_ptrs += stride_out_chunk + seq_idx = 0 + for c in range(nchunks): + new_states = tl.load(states_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dA_cs = tl.load(dA_cs_ptr).to(tl.float32) + scale = tl.exp(dA_cs) + if HAS_SEQ_IDX: + seq_idx_new = tl.load(seq_idx_ptr + (min((c + 1) * chunk_size, seqlen) - 1) * stride_seq_idx_seqlen) + scale = tl.where(seq_idx_new == seq_idx, scale, 0.0) + seq_idx = seq_idx_new + states = scale * states + new_states + if c < nchunks - 1: + tl.store(out_ptrs, states, mask=offs_m < dim) + else: + tl.store(final_states_ptrs, states, mask=offs_m < dim) + states_ptrs += stride_states_chunk + dA_cs_ptr += stride_dA_cs_chunk + out_ptrs += stride_out_chunk + + +@triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE': 64}), + triton.Config({'BLOCK_SIZE': 128}), + triton.Config({'BLOCK_SIZE': 256}), + triton.Config({'BLOCK_SIZE': 512}), + triton.Config({'BLOCK_SIZE': 1024}), + triton.Config({'BLOCK_SIZE': 2048}), + ], + key=['dim'], +) +@triton.jit +def _state_passing_bwd_kernel( + # Pointers to matrices + dout_ptr, out_ptr, dA_cs_ptr, dfinal_states_ptr, seq_idx_ptr, + dstates_ptr, ddA_cs_ptr, dinitstates_ptr, states_converted_ptr, + # Matrix dimensions + dim, nchunks, seqlen, chunk_size, + # Strides + stride_dout_batch, stride_dout_chunk, stride_dout_head, stride_dout_dim, + stride_out_batch, stride_out_chunk, stride_out_head, stride_out_dim, + stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, + stride_dfinal_states_batch, stride_dfinal_states_head, stride_dfinal_states_dim, + stride_seq_idx_batch, stride_seq_idx_seqlen, + stride_dstates_batch, stride_dstates_chunk, stride_dstates_head, stride_dstates_dim, + stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head, + stride_dinitstates_batch, stride_dinitstates_head, stride_dinitstates_dim, + # Meta-parameters + CONVERT_STATES: tl.constexpr, + HAS_DFINAL_STATES: tl.constexpr, + HAS_DINITSTATES: tl.constexpr, + HAS_SEQ_IDX: tl.constexpr, + BLOCK_SIZE: tl.constexpr, +): + pid_b = tl.program_id(axis=1) + pid_h = tl.program_id(axis=2) + pid_m = tl.program_id(axis=0) + dstates_ptr += pid_b * stride_dstates_batch + pid_h * stride_dstates_head + (nchunks - 1) * stride_dstates_chunk + dA_cs_ptr += pid_b * stride_dA_cs_batch + pid_h * stride_dA_cs_head + (nchunks - 1) * stride_dA_cs_chunk + ddA_cs_ptr += pid_b * stride_ddA_cs_batch + pid_h * stride_ddA_cs_head + (nchunks - 1) * stride_ddA_cs_chunk + pid_m + out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + (nchunks - 1) * stride_out_chunk + dout_ptr += pid_b * stride_dout_batch + pid_h * stride_dout_head + (nchunks - 1) * stride_dout_chunk + if CONVERT_STATES: + states_converted_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + (nchunks - 1) * stride_out_chunk + if HAS_DFINAL_STATES: + dfinal_states_ptr += pid_b * stride_dfinal_states_batch + pid_h * stride_dfinal_states_head + if HAS_DINITSTATES: + dinitstates_ptr += pid_b * stride_dinitstates_batch + pid_h * stride_dinitstates_head + if HAS_SEQ_IDX: + seq_idx_ptr += pid_b * stride_seq_idx_batch + + offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + dstates_ptrs = dstates_ptr + offs_m * stride_dstates_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + dout_ptrs = dout_ptr + offs_m * stride_dout_dim + if CONVERT_STATES: + states_converted_ptrs = states_converted_ptr + offs_m * stride_out_dim + + if HAS_DFINAL_STATES: + dstates = tl.load(dfinal_states_ptr + offs_m * stride_dfinal_states_dim, mask=offs_m < dim, other=0.0).to(tl.float32) + else: + dstates = tl.zeros((BLOCK_SIZE, ), dtype=tl.float32) + tl.store(dstates_ptrs, dstates, mask=offs_m < dim) + if HAS_SEQ_IDX: + seq_idx = tl.load(seq_idx_ptr + (seqlen - 1) * stride_seq_idx_seqlen) + dstates_ptrs -= stride_dstates_chunk + for c in range(nchunks - 1): + dA_cs = tl.load(dA_cs_ptr).to(tl.float32) + scale = tl.exp(dA_cs) + if HAS_SEQ_IDX: + seq_idx_new = tl.load(seq_idx_ptr + (((nchunks - c - 1) * chunk_size - 1) * stride_seq_idx_seqlen)) + scale = tl.where(seq_idx_new == seq_idx, scale, 0.0) + seq_idx = seq_idx_new + out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if CONVERT_STATES: + tl.store(states_converted_ptrs, out, mask=offs_m < dim) + ddA = tl.sum(out * dstates) * scale + tl.store(ddA_cs_ptr, ddA) + dout = tl.load(dout_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dstates = scale * dstates + dout + tl.store(dstates_ptrs, dstates, mask=offs_m < dim) + dout_ptrs -= stride_dout_chunk + dstates_ptrs -= stride_dstates_chunk + dA_cs_ptr -= stride_dA_cs_chunk + ddA_cs_ptr -= stride_ddA_cs_chunk + out_ptrs -= stride_out_chunk + if CONVERT_STATES: + states_converted_ptrs -= stride_out_chunk + if CONVERT_STATES: + out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + tl.store(states_converted_ptrs, out, mask=offs_m < dim) + if not HAS_DINITSTATES: + tl.store(ddA_cs_ptr, 0.0) + else: + dA_cs = tl.load(dA_cs_ptr).to(tl.float32) + scale = tl.exp(dA_cs) + if HAS_SEQ_IDX: + scale = tl.where(seq_idx == 0, scale, 0.0) + out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + ddA = tl.sum(out * dstates) * scale + tl.store(ddA_cs_ptr, ddA) + dout = tl.load(dout_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dstates = scale * dstates + dout + tl.store(dinitstates_ptr + offs_m * stride_dinitstates_dim, dstates, mask=offs_m < dim) + + +def _state_passing_fwd(states, dA_chunk_cumsum, initial_states=None, seq_idx=None, chunk_size=None, + out_dtype=None): + batch, nchunks, nheads, dim = states.shape + assert dA_chunk_cumsum.shape == (batch, nheads, nchunks) + if initial_states is not None: + assert initial_states.shape == (batch, nheads, dim) + if seq_idx is not None: + assert chunk_size is not None + seqlen = seq_idx.shape[-1] + assert seq_idx.shape == (batch, seqlen) + out_dtype = states.dtype if out_dtype is None else out_dtype + out = torch.empty((batch, nchunks, nheads, dim), device=states.device, dtype=out_dtype) + final_states = torch.empty((batch, nheads, dim), device=states.device, dtype=torch.float32) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE']), batch, nheads) + with torch.cuda.device(states.device.index): + _state_passing_fwd_kernel[grid]( + states, out, final_states, dA_chunk_cumsum, initial_states, seq_idx, + dim, nchunks, seqlen if seq_idx is not None else 0, chunk_size if seq_idx is not None else 0, + states.stride(0), states.stride(1), states.stride(2), states.stride(3), + out.stride(0), out.stride(1), out.stride(2), out.stride(3), + final_states.stride(0), final_states.stride(1), final_states.stride(2), + dA_chunk_cumsum.stride(0), dA_chunk_cumsum.stride(2), dA_chunk_cumsum.stride(1), + *((initial_states.stride(0), initial_states.stride(1), initial_states.stride(2)) + if initial_states is not None else (0, 0, 0)), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + HAS_INITSTATES=initial_states is not None, + HAS_SEQ_IDX=seq_idx is not None, + ) + return out, final_states + + +def _state_passing_bwd( + states, dA_chunk_cumsum, dout, dfinal_states=None, seq_idx=None, has_initial_states=None, + dstates_dtype=None, states_dtype=None, chunk_size=None +): + """ + states contains the initial_states at index 0. The final states are not included in states. + """ + batch, nchunks, nheads, dim = states.shape + assert dA_chunk_cumsum.shape == (batch, nheads, nchunks) + assert dout.shape == (batch, nchunks, nheads, dim) + if seq_idx is not None: + assert chunk_size is not None + seqlen = seq_idx.shape[-1] + assert seq_idx.shape == (batch, seqlen) + dstates = torch.empty_like(dout, dtype=dstates_dtype if dstates_dtype is not None else dout.dtype) + if states_dtype is not None and states_dtype != states.dtype: + states_converted = torch.empty_like(states, dtype=dstates_dtype if dstates_dtype is not None else dout.dtype) + assert states_converted.stride() == states.stride() + else: + states_converted = None + if has_initial_states: + dinitstates = torch.empty_like(dstates[:, 0]) + else: + dinitstates = None + if dfinal_states is not None: + assert dfinal_states.shape == (batch, nheads, dim) + BLOCK_SIZE_min = 64 + n_blocks = (dim + BLOCK_SIZE_min - 1) // BLOCK_SIZE_min + ddA_chunk_cumsum = torch.empty(batch, nheads, nchunks, n_blocks, + dtype=torch.float32, device=dA_chunk_cumsum.device) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE']), batch, nheads) + with torch.cuda.device(dout.device.index): + _state_passing_bwd_kernel[grid]( + dout, states, dA_chunk_cumsum, dfinal_states, seq_idx, + dstates, ddA_chunk_cumsum, dinitstates, states_converted, + dim, nchunks, seqlen if seq_idx is not None else 0, chunk_size if seq_idx is not None else 0, + dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3), + states.stride(0), states.stride(1), states.stride(2), states.stride(3), + dA_chunk_cumsum.stride(0), dA_chunk_cumsum.stride(2), dA_chunk_cumsum.stride(1), + *((dfinal_states.stride(0), dfinal_states.stride(1), dfinal_states.stride(2)) + if dfinal_states is not None else (0, 0, 0)), + *((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)), + dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), + ddA_chunk_cumsum.stride(0), ddA_chunk_cumsum.stride(2), ddA_chunk_cumsum.stride(1), + *((dinitstates.stride(0), dinitstates.stride(1), dinitstates.stride(2)) + if dinitstates is not None else (0, 0, 0)), + CONVERT_STATES=states_converted is not None, + HAS_DFINAL_STATES=dfinal_states is not None, + HAS_DINITSTATES=dinitstates is not None, + HAS_SEQ_IDX=seq_idx is not None, + ) + BLOCK_SIZE_actual = _state_passing_bwd_kernel.best_config.kwargs["BLOCK_SIZE"] + n_valid_blocks = (dim + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual + ddA_chunk_cumsum = ddA_chunk_cumsum[..., :n_valid_blocks].sum(dim=-1).to(dtype=dA_chunk_cumsum.dtype) + if states_dtype is not None and states_dtype == states.dtype: + states_converted = states + return (dstates, ddA_chunk_cumsum, dinitstates) if states_dtype is None else (dstates, ddA_chunk_cumsum, dinitstates, states_converted) + + +class StatePassingFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, states, dA_chunk_cumsum, initial_states=None): + batch, nchunks, nheads, dim = states.shape + assert dA_chunk_cumsum.shape == (batch, nheads, nchunks) + if states.stride(-1) != 1: + states = states.contiguous() + out, final_states = _state_passing_fwd(states, dA_chunk_cumsum, initial_states) + ctx.save_for_backward(out, dA_chunk_cumsum) + ctx.has_initial_states = initial_states is not None + return out, final_states + + @staticmethod + def backward(ctx, dout, dfinal_states): + out, dA_chunk_cumsum = ctx.saved_tensors + batch, nchunks, nheads, dim = out.shape + assert dout.shape == (batch, nchunks, nheads, dim) + assert dA_chunk_cumsum.shape == (batch, nheads, nchunks) + assert dfinal_states.shape == (batch, nheads, dim) + if dout.stride(-1) != 1: + dout = dout.contiguous() + dstates, ddA_chunk_cumsum, dinitstates = _state_passing_bwd( + out, dA_chunk_cumsum, dout, dfinal_states=dfinal_states , has_initial_states=ctx.has_initial_states + ) + return dstates, ddA_chunk_cumsum, dinitstates + + +def state_passing(states, dA_chunk_cumsum, initial_states=None): + """ + Argument: + states: (batch, nchunks, nheads, dim) + dA_chunk_cumsum: (batch, nheads, nchunks) + initial_states: (batch, nheads, dim) + Return: + out: (batch, nchunks, nheads, dim) + final_states: (batch, nheads, dim) + """ + return StatePassingFn.apply(states, dA_chunk_cumsum, initial_states) + + +def state_passing_ref(states, dA_chunk_cumsum, initial_states=None): + """ + Argument: + states: (batch, nchunks, nheads, dim) + dA_chunk_cumsum: (batch, nheads, nchunks) + initial_states: (batch, nheads, dim) + Return: + out: (batch, nchunks, nheads, dim) + final_states: (batch, nheads, dim) + """ + if initial_states is None: + initial_states = torch.zeros_like(states[:, 0]) + states = torch.cat([rearrange(initial_states, "b h d -> b 1 h d"), states], dim=1) + dA_chunk_cumsum = F.pad(dA_chunk_cumsum, (1, 0)) + dA_chunk_cumsum = torch.cumsum(dA_chunk_cumsum, dim=-1) + nchunks = dA_chunk_cumsum.shape[-1] + # (batch, nheads, nchunks, nchunks) + dt_chunk_segment_sum = dA_chunk_cumsum[:, :, :, None] - dA_chunk_cumsum[:, :, None, :] + # (batch, nheads, nchunks, nchunks) + decay_chunk = torch.exp(dt_chunk_segment_sum) + causal_mask = torch.tril(torch.ones(nchunks, nchunks, device=states.device, dtype=bool), diagonal=0) + decay_chunk = decay_chunk.masked_fill(~causal_mask, 0) + out = torch.einsum("bhzc,bchd->bzhd", decay_chunk.to(dtype=states.dtype), states) + return out[:, :-1], out[:, -1] diff --git a/mamba/mamba_ssm/utils/__init__.py b/mamba/mamba_ssm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mamba/mamba_ssm/utils/generation.py b/mamba/mamba_ssm/utils/generation.py new file mode 100644 index 0000000000000000000000000000000000000000..74abead94003bdbdeee954ca3e4f4f38ea7ac9fc --- /dev/null +++ b/mamba/mamba_ssm/utils/generation.py @@ -0,0 +1,387 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. +import gc +import time +from collections import namedtuple +from dataclasses import dataclass, field +from functools import partial +from typing import Callable, Optional, Sequence, Union + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat +from torch import Tensor +from torch.profiler import ProfilerActivity, profile, record_function +from transformers.generation import GreedySearchDecoderOnlyOutput, SampleDecoderOnlyOutput, TextStreamer + + +@dataclass +class InferenceParams: + """Inference parameters that are passed to the main model in order + to efficienly calculate and store the context during inference.""" + + max_seqlen: int + max_batch_size: int + seqlen_offset: int = 0 + batch_size_offset: int = 0 + key_value_memory_dict: dict = field(default_factory=dict) + lengths_per_sample: Optional[Tensor] = None + + def reset(self, max_seqlen, max_batch_size): + self.max_seqlen = max_seqlen + self.max_batch_size = max_batch_size + self.seqlen_offset = 0 + if self.lengths_per_sample is not None: + self.lengths_per_sample.zero_() + + +def modify_logits_for_min_p_filtering(logits, min_p): + """Set the logits for none min_p values to -inf. Done in-place.""" + if min_p <= 0.0 or min_p >= 1.0: + return + indices_to_remove = logits < min_p + logits.masked_fill_(indices_to_remove, float("-Inf")) +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L231 +def modify_logits_for_top_k_filtering(logits, top_k): + """Set the logits for none top-k values to -inf. Done in-place.""" + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits.masked_fill_(indices_to_remove, float("-Inf")) + + +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L170 +def modify_logits_for_top_p_filtering(logits, top_p): + """Set the logits for none top-p values to -inf. Done in-place.""" + if top_p <= 0.0 or top_p >= 1.0: + return + # First sort and calculate cumulative sum of probabilities. + sorted_logits, sorted_indices = torch.sort(logits, descending=False) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs <= (1 - top_p) + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter( + 1, sorted_indices, sorted_indices_to_remove + ) + logits.masked_fill_(indices_to_remove, float("-inf")) + + +def modify_logit_for_repetition_penalty(logits, prev_output_tokens, repetition_penalty=1.0): + """Apply repetition penalty. See https://arxiv.org/abs/1909.05858 + logits: (batch_size, vocab_size) + prev_output_tokens: (batch_size, seq_len) + """ + if repetition_penalty == 1.0: + return logits + score = torch.gather(logits, 1, prev_output_tokens) + # if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability + score = torch.where(score < 0, score * repetition_penalty, score / repetition_penalty) + logits.scatter_(1, prev_output_tokens, score) + return logits + + +def sample(logits, top_k=1, top_p=0.0, min_p=0.0, temperature=1.0): + """Sample from top-k logits. + Arguments: + logits: Tensor of shape (batch_size, vocab_size) + """ + if top_k == 1: # Short-circuit for greedy decoding + return logits.argmax(dim=-1) + else: + if top_p > 0.0: + assert top_p <= 1.0, "top-p should be in (0, 1]." + if top_k > 0: + top_k = min(top_k, logits.size(-1)) # Safety check + logits_top, indices = torch.topk(logits, top_k, dim=-1) + if temperature != 1.0: + logits_top /= temperature + modify_logits_for_top_p_filtering(logits_top, top_p) + return indices[ + torch.arange(indices.shape[0], device=indices.device), + torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1), + ] + else: + if min_p > 0.0: + logits_top = logits.clone() + max_prob = logits_top[..., 0].item() + min_prob = max_prob * min_p + modify_logits_for_min_p_filtering(logits_top, min_prob) + if temperature != 1.0: + logits_top /= temperature + return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1) + # Clone so that when we modify for top_p we don't change the original logits + logits_top = logits / temperature if temperature != 1.0 else logits.clone() + modify_logits_for_top_p_filtering(logits_top, top_p) + return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze( + dim=-1 + ) + + +@torch.inference_mode() +def decode( + input_ids, + model, + max_length, + top_k=1, + top_p=0.0, + min_p=0.0, + temperature=1.0, + repetition_penalty=1.0, + eos_token_id=None, + teacher_outputs=None, + vocab_size=None, + cg=False, + enable_timing=False, + streamer: Optional[TextStreamer] = None +): + """Decoding, either greedy or with top-k or top-p sampling. + If top-k = 0, don't limit the number of candidates (pure sampling). + Top-k and top-p can be used together. If top_k > 0 and top_p > 0, then top-k is applied first, + then top-p. + We assume that all sequences in the same batch have the same length. + + Arguments: + input_ids: (batch, seq_len) + max_length: int + teacher_outputs (optional): (batch, seq_len). If provided, instead of sampling from the + logits, the next token is taken from the teacher_outputs. Useful for testing. + Returns: GreedySearchDecoderOnlyOutput or SampleDecoderOnlyOutput, with the following fields: + sequences: (batch, max_length) + scores: tuples of (batch, vocab_size) + """ + if streamer is not None: + streamer.put(input_ids.cpu()) + + batch_size, seqlen_og = input_ids.shape + teacher_output_len = teacher_outputs.shape[1] if teacher_outputs is not None else 0 + if cg: + if not hasattr(model, "_decoding_cache"): + model._decoding_cache = None + model._decoding_cache = update_graph_cache( + model, + model._decoding_cache, + batch_size, + seqlen_og, + max_length, + ) + inference_params = model._decoding_cache.inference_params + inference_params.reset(max_length, batch_size) + else: + inference_params = InferenceParams(max_seqlen=max_length, max_batch_size=batch_size) + + def get_logits(input_ids, inference_params): + decoding = inference_params.seqlen_offset > 0 + if decoding: + position_ids = torch.full( + (batch_size, 1), + inference_params.seqlen_offset, + dtype=torch.long, + device=input_ids.device, + ) + else: + position_ids = None + if not cg or not decoding: + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=1, + ).logits.squeeze(dim=1) + else: + logits = model._decoding_cache.run( + input_ids, position_ids, inference_params.seqlen_offset + ).squeeze(dim=1) + return logits[..., :vocab_size] if vocab_size is not None else logits + + def sample_tokens(logits, inference_params): + if teacher_outputs is None or teacher_output_len <= inference_params.seqlen_offset: + token = sample(logits, top_k=top_k, top_p=top_p, min_p=min_p, temperature=temperature) + else: + token = teacher_outputs[:, inference_params.seqlen_offset] + # return rearrange(token, "b -> b 1") + return token.unsqueeze(1) + + def should_stop(current_token, inference_params): + if inference_params.seqlen_offset == 0: + return False + if eos_token_id is not None and (current_token == eos_token_id).all(): + return True + if inference_params.seqlen_offset >= max_length - 1: + return True + return False + + start = torch.cuda.Event(enable_timing=enable_timing) + end = torch.cuda.Event(enable_timing=enable_timing) + + if enable_timing: + start.record() + scores, sequences = [], [input_ids] + sequences_cat = input_ids + while not should_stop(sequences[-1], inference_params): + scores.append(get_logits(sequences[-1], inference_params)) + inference_params.seqlen_offset += sequences[-1].shape[1] + if repetition_penalty == 1.0: + sampled_tokens = sample_tokens(scores[-1], inference_params) + else: + logits = modify_logit_for_repetition_penalty( + scores[-1].clone(), sequences_cat, repetition_penalty + ) + sampled_tokens = sample_tokens(logits, inference_params) + sequences_cat = torch.cat([sequences_cat, sampled_tokens], dim=1) + sequences.append(sampled_tokens) + if streamer is not None: + streamer.put(sampled_tokens.cpu()) + if streamer is not None: + streamer.end() + if enable_timing: + end.record() + torch.cuda.synchronize() + print(f"Prompt processing + decoding time: {(start.elapsed_time(end)):.0f}ms") + output_cls = GreedySearchDecoderOnlyOutput if top_k == 1 else SampleDecoderOnlyOutput + return output_cls(sequences=torch.cat(sequences, dim=1), scores=tuple(scores)) + + +class GenerationMixin: + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + raise NotImplementedError + + def generate( + self, + input_ids, + max_length, + top_k=1, + top_p=0.0, + min_p=0.0, + temperature=1.0, + return_dict_in_generate=False, + output_scores=False, + **kwargs, + ): + output = decode( + input_ids, self, max_length, top_k=top_k, top_p=top_p, min_p = min_p, temperature=temperature, **kwargs + ) + if not output_scores: + output.scores = None + return output if return_dict_in_generate else output.sequences + + +@dataclass +class DecodingCGCache: + max_batch_size: int = 0 + max_seqlen: int = 0 + device = None + dtype = None + callables: dict = field(default_factory=dict) + mempool = None + inference_params: Optional[InferenceParams] = None + run: Optional[Callable] = None + + +@torch.inference_mode() +def update_graph_cache( + model, + cache, + batch_size, + seqlen_og, + max_seqlen, + decoding_seqlens=(1,), + dtype=None, + n_warmups=2, +): + if cache is None: + cache = DecodingCGCache() + param_example = next(iter(model.parameters())) + device = param_example.device + if dtype is None: + dtype = param_example.dtype + if ( + (device, dtype) != (cache.device, cache.dtype) + or batch_size > cache.max_batch_size + or max_seqlen > cache.max_seqlen + ): # Invalidate the cache + cache.callables = {} + cache.mempool = None + cache.inference_params = None + gc.collect() + cache.device, cache.dtype = device, dtype + cache.max_batch_size, cache.max_seqlen = batch_size, max_seqlen + assert hasattr(model, "allocate_inference_cache"), "CUDA graph decoding requires that the model has a method allocate_inference_cache" + inf_cache = model.allocate_inference_cache(batch_size, max_seqlen, dtype) + lengths_per_sample = torch.full((batch_size,), seqlen_og, dtype=torch.int32, device=device) + cache.inference_params = InferenceParams( + max_seqlen=max_seqlen, + max_batch_size=batch_size, + seqlen_offset=seqlen_og, + key_value_memory_dict=inf_cache, + lengths_per_sample=lengths_per_sample, + ) + cache.mempool = torch.cuda.graphs.graph_pool_handle() + for decoding_seqlen in decoding_seqlens: + if (batch_size, decoding_seqlen) not in cache.callables: + cache.callables[batch_size, decoding_seqlen] = capture_graph( + model, + cache.inference_params, + batch_size, + max_seqlen, + decoding_seqlen=decoding_seqlen, + mempool=cache.mempool, + n_warmups=n_warmups, + ) + + def dispatch(input_ids, position_ids, seqlen): + batch_size, decoding_seqlen = input_ids.shape[:2] + return cache.callables[batch_size, decoding_seqlen](input_ids, position_ids, seqlen) + + cache.run = dispatch + cache.inference_params.seqlen_offset = 0 # Reset so it's not confusing + return cache + + +def capture_graph( + model, inference_params, batch_size, max_seqlen, decoding_seqlen=1, mempool=None, n_warmups=2 +): + device = next(iter(model.parameters())).device + input_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + position_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + seqlen_offset_og = inference_params.seqlen_offset + inference_params.seqlen_offset = max_seqlen - decoding_seqlen + inference_params.lengths_per_sample[:] = inference_params.seqlen_offset + + # Warmup before capture + s = torch.cuda.Stream() + s.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(s): + for _ in range(n_warmups): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + s.synchronize() + # This might be needed for correctness if we run with NCCL_GRAPH_MIXING_SUPPORT=0, + # which requires that graph launch and non-captured launch to not overlap (I think, + # that's how I interpret the documentation). I'm not sure if this is required. + if torch.distributed.is_initialized(): + torch.distributed.barrier() + torch.cuda.current_stream().wait_stream(s) + # Captures the graph + # To allow capture, automatically sets a side stream as the current stream in the context + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, pool=mempool): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + + def run(new_input_ids, new_position_ids, seqlen): + inference_params.lengths_per_sample[:] = seqlen + input_ids.copy_(new_input_ids) + position_ids.copy_(new_position_ids) + graph.replay() + return logits.clone() + + inference_params.seqlen_offset = seqlen_offset_og + return run diff --git a/mamba/mamba_ssm/utils/hf.py b/mamba/mamba_ssm/utils/hf.py new file mode 100644 index 0000000000000000000000000000000000000000..0d7555acddbd260636d1d14d5bd6324f6af0056a --- /dev/null +++ b/mamba/mamba_ssm/utils/hf.py @@ -0,0 +1,23 @@ +import json + +import torch + +from transformers.utils import WEIGHTS_NAME, CONFIG_NAME +from transformers.utils.hub import cached_file + + +def load_config_hf(model_name): + resolved_archive_file = cached_file(model_name, CONFIG_NAME, _raise_exceptions_for_missing_entries=False) + return json.load(open(resolved_archive_file)) + + +def load_state_dict_hf(model_name, device=None, dtype=None): + # If not fp32, then we don't want to load directly to the GPU + mapped_device = "cpu" if dtype not in [torch.float32, None] else device + resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False) + return torch.load(resolved_archive_file, map_location=mapped_device) + # Convert dtype before moving to GPU to save memory + if dtype is not None: + state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()} + state_dict = {k: v.to(device=device) for k, v in state_dict.items()} + return state_dict diff --git a/mamba/pyproject.toml b/mamba/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..8703ad3513d2155eb8124b03cecbbc9d622cb0b0 --- /dev/null +++ b/mamba/pyproject.toml @@ -0,0 +1,46 @@ +[project] +name = "mamba_ssm" +description = "Mamba state-space model" +readme = "README.md" +authors = [ + { name = "Tri Dao", email = "tri@tridao.me" }, + { name = "Albert Gu", email = "agu@cs.cmu.edu" } +] +requires-python = ">= 3.7" +dynamic = ["version"] +license = { file = "LICENSE" } # Include a LICENSE file in your repo +keywords = ["cuda", "pytorch", "state-space model"] +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: BSD License", + "Operating System :: Unix" +] +dependencies = [ + "torch", + "ninja", + "einops", + "triton", + "transformers", + "packaging", + "setuptools>=61.0.0", +] +urls = { name = "Repository", url = "https://github.com/state-spaces/mamba"} + +[project.optional-dependencies] +causal-conv1d = [ + "causal-conv1d>=1.2.0" +] +dev = [ + "pytest" +] + + +[build-system] +requires = [ + "setuptools>=61.0.0", + "wheel", + "torch", + "packaging", + "ninja", +] +build-backend = "setuptools.build_meta" diff --git a/mamba/rocm_patch/rocm6_0.patch b/mamba/rocm_patch/rocm6_0.patch new file mode 100644 index 0000000000000000000000000000000000000000..e1fa60d42b4e8972e638aac8ec3aacdf49b52946 --- /dev/null +++ b/mamba/rocm_patch/rocm6_0.patch @@ -0,0 +1,56 @@ +--- /opt/rocm/include/hip/amd_detail/amd_hip_bf16.h 2023-12-12 20:11:48.000000000 +0000 ++++ rocm_update_files/amd_hip_bf16.h 2024-05-20 17:40:26.983349079 +0000 +@@ -137,7 +137,7 @@ + * \ingroup HIP_INTRINSIC_BFLOAT16_CONV + * \brief Converts float to bfloat16 + */ +-__HOST_DEVICE__ __hip_bfloat16 __float2bfloat16(float f) { ++__HOST_DEVICE__ static inline __hip_bfloat16 __float2bfloat16(float f) { + __hip_bfloat16 ret; + union { + float fp32; +@@ -181,7 +181,7 @@ + * \ingroup HIP_INTRINSIC_BFLOAT162_CONV + * \brief Converts and moves bfloat162 to float2 + */ +-__HOST_DEVICE__ float2 __bfloat1622float2(const __hip_bfloat162 a) { ++__HOST_DEVICE__ static inline float2 __bfloat1622float2(const __hip_bfloat162 a) { + return float2{__bfloat162float(a.x), __bfloat162float(a.y)}; + } + +@@ -209,7 +209,7 @@ + * \ingroup HIP_INTRINSIC_BFLOAT162_CONV + * \brief Convert double to __hip_bfloat16 + */ +-__HOST_DEVICE__ __hip_bfloat16 __double2bfloat16(const double a) { ++__HOST_DEVICE__ static inline __hip_bfloat16 __double2bfloat16(const double a) { + return __float2bfloat16((float)a); + } + +@@ -217,7 +217,7 @@ + * \ingroup HIP_INTRINSIC_BFLOAT162_CONV + * \brief Convert float2 to __hip_bfloat162 + */ +-__HOST_DEVICE__ __hip_bfloat162 __float22bfloat162_rn(const float2 a) { ++__HOST_DEVICE__ static inline __hip_bfloat162 __float22bfloat162_rn(const float2 a) { + return __hip_bfloat162{__float2bfloat16(a.x), __float2bfloat16(a.y)}; + } + +@@ -247,7 +247,7 @@ + * \ingroup HIP_INTRINSIC_BFLOAT162_CONV + * \brief Converts high 16 bits of __hip_bfloat162 to float and returns the result + */ +-__HOST_DEVICE__ float __high2float(const __hip_bfloat162 a) { return __bfloat162float(a.y); } ++__HOST_DEVICE__ static inline float __high2float(const __hip_bfloat162 a) { return __bfloat162float(a.y); } + + /** + * \ingroup HIP_INTRINSIC_BFLOAT162_CONV +@@ -275,7 +275,7 @@ + * \ingroup HIP_INTRINSIC_BFLOAT162_CONV + * \brief Converts low 16 bits of __hip_bfloat162 to float and returns the result + */ +-__HOST_DEVICE__ float __low2float(const __hip_bfloat162 a) { return __bfloat162float(a.x); } ++__HOST_DEVICE__ static inline float __low2float(const __hip_bfloat162 a) { return __bfloat162float(a.x); } + + /** + * \ingroup HIP_INTRINSIC_BFLOAT162_CONV diff --git a/mamba/setup.py b/mamba/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..83930675ba28344e914049ccb9f6b33b71cef495 --- /dev/null +++ b/mamba/setup.py @@ -0,0 +1,278 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. +import sys +import warnings +import os +import re +import ast +from pathlib import Path +from packaging.version import parse, Version +import platform +import shutil + +from setuptools import setup, find_packages +import subprocess + +import urllib.request +import urllib.error +from wheel.bdist_wheel import bdist_wheel as _bdist_wheel + +import torch +from torch.utils.cpp_extension import ( + BuildExtension, + CUDAExtension, + HIP_HOME +) + + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + + +# ninja build does not work unless include_dirs are abs path +this_dir = os.path.dirname(os.path.abspath(__file__)) + +PACKAGE_NAME = "mamba_ssm" + +BASE_WHEEL_URL = "https://github.com/state-spaces/mamba/releases/download/{tag_name}/{wheel_name}" + +# FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels +# SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation +FORCE_BUILD = os.getenv("MAMBA_FORCE_BUILD", "FALSE") == "TRUE" +SKIP_CUDA_BUILD = os.getenv("MAMBA_SKIP_CUDA_BUILD", "FALSE") == "TRUE" +# For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI +FORCE_CXX11_ABI = os.getenv("MAMBA_FORCE_CXX11_ABI", "FALSE") == "TRUE" + + +def get_platform(): + """ + Returns the platform name as used in wheel filenames. + """ + if sys.platform.startswith("linux"): + return "linux_x86_64" + elif sys.platform == "darwin": + mac_version = ".".join(platform.mac_ver()[0].split(".")[:2]) + return f"macosx_{mac_version}_x86_64" + elif sys.platform == "win32": + return "win_amd64" + else: + raise ValueError("Unsupported platform: {}".format(sys.platform)) + + + + +def get_hip_version(rocm_dir): + + hipcc_bin = "hipcc" if rocm_dir is None else os.path.join(rocm_dir, "bin", "hipcc") + try: + raw_output = subprocess.check_output( + [hipcc_bin, "--version"], universal_newlines=True + ) + except Exception as e: + print( + f"hip installation not found: {e} ROCM_PATH={os.environ.get('ROCM_PATH')}" + ) + return None, None + + for line in raw_output.split("\n"): + if "HIP version" in line: + rocm_version = parse(line.split()[-1].rstrip('-').replace('-', '+')) # local version is not parsed correctly + return line, rocm_version + + return None, None + + +def get_torch_hip_version(): + + if torch.version.hip: + return parse(torch.version.hip.split()[-1].rstrip('-').replace('-', '+')) + else: + return None + + +def check_if_hip_home_none(global_option: str) -> None: + + if HIP_HOME is not None: + return + # warn instead of error because user could be downloading prebuilt wheels, so hipcc won't be necessary + # in that case. + warnings.warn( + f"{global_option} was requested, but hipcc was not found. Are you sure your environment has hipcc available?" + ) + + + +def append_nvcc_threads(nvcc_extra_args): + return nvcc_extra_args + ["--threads", "4"] + + +cmdclass = {} +ext_modules = [] + + +HIP_BUILD = bool(torch.version.hip) + +if not SKIP_CUDA_BUILD: + print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__)) + TORCH_MAJOR = int(torch.__version__.split(".")[0]) + TORCH_MINOR = int(torch.__version__.split(".")[1]) + + cc_flag = [] + + if HIP_BUILD: + check_if_hip_home_none(PACKAGE_NAME) + + rocm_home = os.getenv("ROCM_PATH") + _, hip_version = get_hip_version(rocm_home) + + if HIP_HOME is not None: + if hip_version < Version("6.0"): + raise RuntimeError( + f"{PACKAGE_NAME} is only supported on ROCm 6.0 and above. " + "Note: make sure HIP has a supported version by running hipcc --version." + ) + if hip_version == Version("6.0"): + warnings.warn( + f"{PACKAGE_NAME} requires a patch to be applied when running on ROCm 6.0. " + "Refer to the README.md for detailed instructions.", + UserWarning + ) + + cc_flag.append("-DBUILD_PYTHON_PACKAGE") + + else: + + cc_flag.append("-gencode") + cc_flag.append("arch=compute_53,code=sm_53") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_62,code=sm_62") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_70,code=sm_70") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_72,code=sm_72") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_87,code=sm_87") + + + # HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as + # torch._C._GLIBCXX_USE_CXX11_ABI + # https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920 + if FORCE_CXX11_ABI: + torch._C._GLIBCXX_USE_CXX11_ABI = True + + if HIP_BUILD: + + extra_compile_args = { + "cxx": ["-O3", "-std=c++17"], + } + else: + extra_compile_args = { + "cxx": ["-O3", "-std=c++17"], + } + + +def get_package_version(): + with open(Path(this_dir) / PACKAGE_NAME / "__init__.py", "r") as f: + version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE) + public_version = ast.literal_eval(version_match.group(1)) + local_version = os.environ.get("MAMBA_LOCAL_VERSION") + if local_version: + return f"{public_version}+{local_version}" + else: + return str(public_version) + + +def get_wheel_url(): + # Determine the version numbers that will be used to determine the correct wheel + torch_version_raw = parse(torch.__version__) + + if HIP_BUILD: + # We're using the HIP version used to build torch, not the one currently installed + torch_hip_version = get_torch_hip_version() + hip_ver = f"{torch_hip_version.major}{torch_hip_version.minor}" + else: + # We're using the CUDA version used to build torch, not the one currently installed + # _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME) + torch_cuda_version = parse(torch.version.cuda) + # For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2 + # to save CI time. Minor versions should be compatible. + torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2") + cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}" + + gpu_compute_version = hip_ver if HIP_BUILD else cuda_version + cuda_or_hip = "hip" if HIP_BUILD else "cu" + + python_version = f"cp{sys.version_info.major}{sys.version_info.minor}" + platform_name = get_platform() + mamba_ssm_version = get_package_version() + torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}" + cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper() + + # Determine wheel URL based on CUDA version, torch version, python version and OS + wheel_filename = f"{PACKAGE_NAME}-{mamba_ssm_version}+{cuda_or_hip}{gpu_compute_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl" + wheel_url = BASE_WHEEL_URL.format( + tag_name=f"v{mamba_ssm_version}", wheel_name=wheel_filename + ) + return wheel_url, wheel_filename + + +class CachedWheelsCommand(_bdist_wheel): + """ + The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot + find an existing wheel (which is currently the case for all installs). We use + the environment parameters to detect whether there is already a pre-built version of a compatible + wheel available and short-circuits the standard full build pipeline. + """ + + def run(self): + if FORCE_BUILD: + return super().run() + + wheel_url, wheel_filename = get_wheel_url() + print("Guessing wheel URL: ", wheel_url) + try: + urllib.request.urlretrieve(wheel_url, wheel_filename) + + # Make the archive + # Lifted from the root wheel processing command + # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85 + if not os.path.exists(self.dist_dir): + os.makedirs(self.dist_dir) + + impl_tag, abi_tag, plat_tag = self.get_tag() + archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}" + + wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl") + print("Raw wheel path", wheel_path) + shutil.move(wheel_filename, wheel_path) + except urllib.error.HTTPError: + print("Precompiled wheel not found. Building from source...") + # If the wheel could not be downloaded, build from source + super().run() + +setup( + name=PACKAGE_NAME, + version=get_package_version(), + packages=find_packages( + exclude=( + "build", + "csrc", + "include", + "tests", + "dist", + "docs", + "benchmarks", + "mamba_ssm.egg-info", + ) + ), + long_description=long_description, + long_description_content_type="text/markdown", + + ext_modules=ext_modules, + cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension} + if ext_modules + else { + "bdist_wheel": CachedWheelsCommand, + } +) diff --git a/mamba/tests/ops/test_selective_scan.py b/mamba/tests/ops/test_selective_scan.py new file mode 100644 index 0000000000000000000000000000000000000000..8a834b3c93267d05f81c3e5156b35622a3c2d956 --- /dev/null +++ b/mamba/tests/ops/test_selective_scan.py @@ -0,0 +1,247 @@ +# Copyright (C) 2023, Tri Dao. + +import math + +import torch +import torch.nn.functional as F +import pytest + +from einops import rearrange + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, selective_scan_ref +from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, mamba_inner_ref + + +# @pytest.mark.parametrize('wtype', [torch.float32, torch.complex64]) +@pytest.mark.parametrize('wtype', [torch.float32]) +# @pytest.mark.parametrize('itype', [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('itype', [torch.float32]) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 372, 512, 784, 1024, 1134, 2048, 4096]) +@pytest.mark.parametrize('seqlen', [128, 256, 512, 1024, 2048, 4096]) +# @pytest.mark.parametrize('seqlen', [128]) +# @pytest.mark.parametrize("return_last_state", [False, True]) +@pytest.mark.parametrize("return_last_state", [True]) +# @pytest.mark.parametrize('has_delta_bias', [False, True]) +@pytest.mark.parametrize('has_delta_bias', [True]) +# @pytest.mark.parametrize('delta_softplus', [False, True]) +@pytest.mark.parametrize('delta_softplus', [True]) +# @pytest.mark.parametrize('has_z', [False, True]) +@pytest.mark.parametrize('has_z', [True]) +# @pytest.mark.parametrize('has_D', [False, True]) +@pytest.mark.parametrize('has_D', [True]) +@pytest.mark.parametrize("varBC_groups", [1, 2]) +# @pytest.mark.parametrize("varBC_groups", [1]) +# @pytest.mark.parametrize("is_variable_C", [False, True]) +@pytest.mark.parametrize("is_variable_C", [True]) +# @pytest.mark.parametrize("is_variable_B", [False, True]) +@pytest.mark.parametrize("is_variable_B", [True]) +def test_selective_scan(is_variable_B, is_variable_C, varBC_groups, has_D, has_z, has_delta_bias, + delta_softplus, return_last_state, seqlen, itype, wtype): + if varBC_groups > 1 and (not is_variable_B or not is_variable_C): + pytest.skip() # This config is not applicable + device = 'cuda' + rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 3e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + if has_z: # If we have z, the errors on the weights seem higher + rtolw = max(rtolw, rtol) + atolw = max(atolw, atol) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + dim = 4 + dstate = 8 + is_complex = wtype == torch.complex64 + A = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_() + if not is_variable_B: + B_shape = (dim, dstate) + elif varBC_groups == 1: + B_shape = (batch_size, dstate, seqlen if not is_complex else seqlen * 2) + else: + B_shape = (batch_size, varBC_groups, dstate, seqlen if not is_complex else seqlen * 2) + B = torch.randn(*B_shape, device=device, dtype=wtype if not is_variable_B else itype, + requires_grad=True) + if not is_variable_C: + C_shape = (dim, dstate) + elif varBC_groups == 1: + C_shape = (batch_size, dstate, seqlen if not is_complex else seqlen * 2) + else: + C_shape = (batch_size, varBC_groups, dstate, seqlen if not is_complex else seqlen * 2) + C = torch.randn(*C_shape, device=device, dtype=wtype if not is_variable_C else itype, + requires_grad=True) + if has_D: + D = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + else: + D = None + if has_z: + z = torch.randn(batch_size, dim, seqlen, device=device, dtype=itype, requires_grad=True) + else: + z = None + if has_delta_bias: + delta_bias = (0.5 * torch.rand(dim, device=device, dtype=torch.float32)).requires_grad_() + else: + delta_bias = None + u = torch.randn(batch_size, dim, seqlen, device=device, dtype=itype, requires_grad=True) + delta = (0.5 * torch.rand(batch_size, dim, seqlen, device=device, dtype=itype)).requires_grad_() + A_ref = A.detach().clone().requires_grad_() + B_ref = B.detach().clone().requires_grad_() + C_ref = C.detach().clone().requires_grad_() + D_ref = D.detach().clone().requires_grad_() if D is not None else None + z_ref = z.detach().clone().requires_grad_() if z is not None else None + u_ref = u.detach().clone().requires_grad_() + delta_ref = delta.detach().clone().requires_grad_() + delta_bias_ref = delta_bias.detach().clone().requires_grad_() if delta_bias is not None else None + out, *rest = selective_scan_fn( + u, delta, A, B, C, D, z=z, + delta_bias=delta_bias, delta_softplus=delta_softplus, + return_last_state=return_last_state + ) + if return_last_state: + state = rest[0] + out_ref, *rest = selective_scan_ref( + u_ref, delta_ref, A_ref, B_ref, C_ref, D_ref, z=z_ref, + delta_bias=delta_bias_ref, delta_softplus=delta_softplus, + return_last_state=return_last_state + ) + if return_last_state: + state_ref = rest[0] + # dA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + # dt_u = delta * u + + print(f'Output max diff: {(out - out_ref).abs().max().item()}') + print(f'Output mean diff: {(out - out_ref).abs().mean().item()}') + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + if return_last_state: + print(f'State max diff: {(state - state_ref).abs().max().item()}') + assert torch.allclose(state, state_ref, rtol=rtol, atol=atol) + + g = torch.randn_like(out) + out_ref.backward(g) + out.backward(g) + + print(f'du max diff: {(u.grad - u_ref.grad).abs().max().item()}') + print(f'ddelta max diff: {(delta.grad - delta_ref.grad).abs().max().item()}') + print(f'dA max diff: {(A.grad - A_ref.grad).abs().max().item()}') + print(f'dB max diff: {(B.grad - B_ref.grad).abs().max().item()}') + print(f'dC max diff: {(C.grad - C_ref.grad).abs().max().item()}') + if has_D: + print(f'dD max diff: {(D.grad - D_ref.grad).abs().max().item()}') + if has_z: + print(f'dz max diff: {(z.grad - z_ref.grad).abs().max().item()}') + if has_delta_bias: + print(f'ddelta_bias max diff: {(delta_bias.grad - delta_bias_ref.grad).abs().max().item()}') + + assert torch.allclose(u.grad, u_ref.grad.to(dtype=itype), rtol=rtol * 2, atol=atol * 2) + assert torch.allclose(delta.grad, delta_ref.grad.to(dtype=itype), rtol=rtol * 5, atol=atol * 10) + assert torch.allclose(A.grad, A_ref.grad, rtol=rtolw, atol=atolw * 5) + assert torch.allclose(B.grad, B_ref.grad, rtol=rtolw if not is_variable_B else rtol, + atol=atolw if not is_variable_B else atol) + assert torch.allclose(C.grad, C_ref.grad, rtol=rtolw if not is_variable_C else rtol, + atol=atolw if not is_variable_C else atol) + if has_D: + assert torch.allclose(D.grad, D_ref.grad, rtol=rtolw, atol=atolw) + if has_z: + assert torch.allclose(z.grad, z_ref.grad, rtol=rtolw, atol=atolw) + if has_delta_bias: + assert torch.allclose(delta_bias.grad, delta_bias_ref.grad, rtol=rtolw, atol=atolw) + + +@pytest.mark.parametrize('wtype', [torch.float32, torch.complex64]) +# @pytest.mark.parametrize('wtype', [torch.complex64]) +# @pytest.mark.parametrize('itype', [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('itype', [torch.float32]) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 372, 512, 784, 1024, 1134, 2048, 4096]) +@pytest.mark.parametrize('seqlen', [128]) +@pytest.mark.parametrize("is_variable_C", [False, True]) +# @pytest.mark.parametrize("is_variable_C", [False]) +@pytest.mark.parametrize("is_variable_B", [False, True]) +# @pytest.mark.parametrize("is_variable_B", [True]) +def test_mamba_inner_fn(is_variable_B, is_variable_C, seqlen, itype, wtype): + device = 'cuda' + rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 3e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + # If we have z, the errors on the weights seem higher + rtolw = max(rtolw, rtol) + atolw = max(atolw, atol) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + dim = 768 + dstate = 8 + dt_rank = 48 + is_complex = wtype == torch.complex64 + xz = torch.randn(batch_size, 2 * dim, seqlen, device=device, dtype=itype, requires_grad=True) + conv1d_weight = torch.randn(dim, 1, 3, device=device, dtype=torch.float32, requires_grad=True) + conv1d_bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + x_proj_weight = torch.randn(dt_rank + (bool(is_variable_B) + bool(is_variable_C)) * dstate + * (1 if not is_complex else 2), + dim, device=device, dtype=itype, requires_grad=True) + delta_proj_weight = torch.randn(dim, dt_rank, device=device, dtype=itype, requires_grad=True) + out_proj_weight = torch.randn(dim // 2, dim, device=device, dtype=itype, requires_grad=True) + out_proj_bias = None + A = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_() + B = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True) + if not is_variable_B else None) + C = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True) + if not is_variable_C else None) + D = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + delta_bias = (0.5 * torch.rand(dim, device=device, dtype=torch.float32)).requires_grad_() + B_proj_bias = None + C_proj_bias = None + xz_ref = xz.detach().clone().requires_grad_() + conv1d_weight_ref = conv1d_weight.detach().clone().requires_grad_() + conv1d_bias_ref = conv1d_bias.detach().clone().requires_grad_() + x_proj_weight_ref = x_proj_weight.detach().clone().requires_grad_() + delta_proj_weight_ref = delta_proj_weight.detach().clone().requires_grad_() + out_proj_weight_ref = out_proj_weight.detach().clone().requires_grad_() + out_proj_bias_ref = (out_proj_bias.detach().clone().requires_grad_() + if out_proj_bias is not None else None) + A_ref = A.detach().clone().requires_grad_() + B_ref = B.detach().clone().requires_grad_() if B is not None else None + C_ref = C.detach().clone().requires_grad_() if C is not None else None + D_ref = D.detach().clone().requires_grad_() + delta_bias_ref = delta_bias.detach().clone().requires_grad_() if delta_bias is not None else None + out = mamba_inner_fn(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B, C, D, delta_bias=delta_bias, delta_softplus=True) + out_ref = mamba_inner_ref(xz_ref, conv1d_weight_ref, conv1d_bias_ref, x_proj_weight_ref, + delta_proj_weight_ref, out_proj_weight_ref, out_proj_bias_ref, + A_ref, B_ref, C_ref, D_ref, + delta_bias=delta_bias_ref, delta_softplus=True) + # dA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + # dt_u = delta * u + + print(f'Output max diff: {(out - out_ref).abs().max().item()}') + print(f'Output mean diff: {(out - out_ref).abs().mean().item()}') + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + + g = torch.randn_like(out) + out_ref.backward(g) + out.backward(g) + + print(f'dxz max diff: {(xz.grad - xz_ref.grad).abs().max().item()}') + print(f'dA max diff: {(A.grad - A_ref.grad).abs().max().item()}') + if not is_variable_B: + print(f'dB max diff: {(B.grad - B_ref.grad).abs().max().item()}') + if not is_variable_C: + print(f'dC max diff: {(C.grad - C_ref.grad).abs().max().item()}') + print(f'dD max diff: {(D.grad - D_ref.grad).abs().max().item()}') + print(f'ddelta_bias max diff: {(delta_bias.grad - delta_bias_ref.grad).abs().max().item()}') + print(f'dout_proj_weight max diff: {(out_proj_weight.grad - out_proj_weight_ref.grad).abs().max().item()}') + print(f'ddelta_proj_weight max diff: {(delta_proj_weight.grad - delta_proj_weight_ref.grad).abs().max().item()}') + print(f'dx_proj_weight max diff: {(x_proj_weight.grad - x_proj_weight_ref.grad).abs().max().item()}') + print(f'dconv1d_weight max diff: {(conv1d_weight.grad - conv1d_weight_ref.grad).abs().max().item()}') + print(f'dconv1d_bias max diff: {(conv1d_bias.grad - conv1d_bias_ref.grad).abs().max().item()}') + + # assert torch.allclose(xz.grad, xz_ref.grad.to(dtype=itype), rtol=rtol * 2, atol=atol * 2) + # assert torch.allclose(delta.grad, delta_ref.grad.to(dtype=itype), rtol=rtol * 5, atol=atol * 10) + # assert torch.allclose(A.grad, A_ref.grad, rtol=rtolw, atol=atolw * 5) + # assert torch.allclose(B.grad, B_ref.grad, rtol=rtolw if not is_variable_B else rtol, + # atol=atolw if not is_variable_B else atol) + # assert torch.allclose(C.grad, C_ref.grad, rtol=rtolw if not is_variable_C else rtol, + # atol=atolw if not is_variable_C else atol) + # assert torch.allclose(D.grad, D_ref.grad, rtol=rtolw, atol=atolw) + # assert torch.allclose(delta_bias.grad, delta_bias_ref.grad, rtol=rtolw, atol=atolw) diff --git a/mamba/tests/ops/triton/test_layernorm_gated.py b/mamba/tests/ops/triton/test_layernorm_gated.py new file mode 100644 index 0000000000000000000000000000000000000000..de669e85b3fed009b5538621e62bf10b6f2cb9e4 --- /dev/null +++ b/mamba/tests/ops/triton/test_layernorm_gated.py @@ -0,0 +1,103 @@ +import math + +import torch +import torch.nn.functional as F + +import pytest + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.layernorm_gated import layernorm_fn, rms_norm_ref + + +@pytest.mark.parametrize("norm_before_gate", [True, False]) +# @pytest.mark.parametrize("norm_before_gate", [False]) +@pytest.mark.parametrize("has_group", [False, True]) +# @pytest.mark.parametrize("has_group", [False]) +@pytest.mark.parametrize("is_rms_norm", [False, True]) +# @pytest.mark.parametrize("is_rms_norm", [True]) +@pytest.mark.parametrize("has_z", [False, True]) +# @pytest.mark.parametrize("has_z", [True]) +@pytest.mark.parametrize("has_bias", [False, True]) +# @pytest.mark.parametrize("has_bias", [False]) +# @pytest.mark.parametrize('dtype', [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('dtype', [torch.float16]) +# @pytest.mark.parametrize("wtype", [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize("wtype", [torch.float32]) +@pytest.mark.parametrize('d', [2048, 4096]) +# @pytest.mark.parametrize('d', [4096]) +def test_layer_norm_gated(d, dtype, wtype, has_bias, has_z, is_rms_norm, has_group, norm_before_gate): + if not has_z and not norm_before_gate: + pytest.skip() + if not norm_before_gate and not is_rms_norm: # Reference LN isn't implemented for this case yet + pytest.skip() + device = 'cuda' + rtol, atol = (1e-5, 1e-5) if dtype == torch.float32 else (1e-2, 8e-3) + group_size = None if not has_group else 64 + # set seed + torch.random.manual_seed(0) + batch = 16 + seqlen = 1024 + x = torch.randn(batch, seqlen, d, dtype=dtype, device=device, requires_grad=True) + if has_z: + z = torch.randn(batch, seqlen, d, dtype=dtype, device=device, requires_grad=True) + else: + z = None + weight = torch.randn(d, dtype=wtype, device=device, requires_grad=True) + if has_bias: + bias = torch.randn(d, dtype=wtype, device=device, requires_grad=True) + else: + bias = None + x_ref = x.detach().clone().requires_grad_() + x_pt = x.detach().clone().requires_grad_() + z_ref = z.detach().clone().requires_grad_() if z is not None else None + z_pt = z.detach().clone().requires_grad_() if z is not None else None + weight_ref = weight.detach().clone().requires_grad_() + weight_pt = weight.detach().clone().requires_grad_() + bias_ref = bias.detach().clone().requires_grad_() if bias is not None else None + bias_pt = bias.detach().clone().requires_grad_() if bias is not None else None + out = layernorm_fn(x, weight, bias, z=z, eps=1e-5, group_size=group_size, norm_before_gate=norm_before_gate, + is_rms_norm=is_rms_norm) + if not is_rms_norm: + if not has_group: + out_ref = F.layer_norm(x_ref.float(), (d,), weight=weight_ref.float(), bias=bias_ref.float() if bias_ref is not None else None, eps=1e-5) + out_pt = F.layer_norm(x_pt.to(wtype), (d,), weight=weight_pt, bias=bias_pt, eps=1e-5) + else: + out_ref = rearrange(F.layer_norm(rearrange(x_ref, "... (g d) -> ... g d", d=group_size).float(), (group_size,), eps=1e-5), "... g d -> ... (g d)") * weight_ref.float() + if has_bias: + out_ref = out_ref + bias_ref.float() + out_pt = rearrange(F.layer_norm(rearrange(x_pt, "... (g d) -> ... g d", d=group_size), (group_size,), eps=1e-5), "... g d -> ... (g d)") * weight_pt + if has_bias: + out_pt = out_pt + bias_pt + if has_z and norm_before_gate: + out_ref = out_ref * F.silu(z_ref.float()) + out_pt = out_pt * F.silu(z_pt) + else: + out_ref = rms_norm_ref(x_ref, weight_ref, bias_ref, z=z_ref, eps=1e-5, group_size=group_size, + norm_before_gate=norm_before_gate) + out_pt = rms_norm_ref(x_pt, weight_pt, bias_pt, z=z_pt, eps=1e-5, group_size=group_size, + norm_before_gate=norm_before_gate, upcast=False) + print(f"Max diff = {(out - out_ref).abs().max().item()}") + print(f"Max diff Pytorch = {(out_pt - out_ref).abs().max().item()}") + assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item() + atol + + g = torch.randn_like(out) + out.backward(g) + out_ref.backward(g) + out_pt.backward(g) + print(f"Max dx diff = {(x.grad - x_ref.grad).abs().max().item()}") + print(f"Max dx diff Pytorch = {(x_pt.grad - x_ref.grad).abs().max().item()}") + if has_z: + print(f"Max dz diff = {(z.grad - z_ref.grad).abs().max().item()}") + print(f"Max dz diff Pytorch = {(z_pt.grad - z_ref.grad).abs().max().item()}") + print(f"Max dw diff = {(weight.grad - weight_ref.grad).abs().max().item()}") + print(f"Max dw diff Pytorch = {(weight_pt.grad - weight_ref.grad).abs().max().item()}") + if has_bias: + print(f"Max db diff = {(bias.grad - bias_ref.grad).abs().max().item()}") + print(f"Max db diff Pytorch = {(bias_pt.grad - bias_ref.grad).abs().max().item()}") + assert (x.grad - x_ref.grad).abs().max().item() <= 2 * (x_pt.grad - x_ref.grad).abs().max().item() + atol + if has_z: + assert (z.grad - z_ref.grad).abs().max().item() <= 2 * (z_pt.grad - z_ref.grad).abs().max().item() + atol + assert (weight.grad - weight_ref.grad).abs().max().item() <= 2 * (weight_pt.grad - weight_ref.grad).abs().max().item() + atol + if has_bias: + assert (bias.grad - bias_ref.grad).abs().max().item() <= 2 * (bias_pt.grad - bias_ref.grad).abs().max().item() + atol diff --git a/mamba/tests/ops/triton/test_selective_state_update.py b/mamba/tests/ops/triton/test_selective_state_update.py new file mode 100644 index 0000000000000000000000000000000000000000..e81807ae510ded87ba04ae1652c62e2880cb8379 --- /dev/null +++ b/mamba/tests/ops/triton/test_selective_state_update.py @@ -0,0 +1,102 @@ +# Copyright (C) 2023, Tri Dao. + +import math + +import torch +import torch.nn.functional as F +import pytest + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.selective_state_update import selective_state_update, selective_state_update_ref + + +@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('itype', [torch.float16]) +@pytest.mark.parametrize("has_z", [False, True]) +# @pytest.mark.parametrize('has_z', [True]) +@pytest.mark.parametrize("dstate", [16, 32, 64]) +# @pytest.mark.parametrize("dstate", [16]) +@pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) +# @pytest.mark.parametrize("dim", [2048]) +def test_selective_state_update(dim, dstate, has_z, itype): + device = "cuda" + rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 1e-2) + if itype == torch.bfloat16: + rtol, atol = 1e-2, 5e-2 + if torch.version.hip: + atol *= 2 + # set seed + torch.random.manual_seed(0) + batch_size = 2 + state = torch.randn(batch_size, dim, dstate, dtype=itype, device=device) + x = torch.randn(batch_size, dim, device=device, dtype=itype) + dt = torch.randn(batch_size, dim, device=device, dtype=itype) + dt_bias = torch.rand(dim, device=device) - 4.0 + A = -torch.rand(dim, dstate, device=device) - 1.0 + B = torch.randn(batch_size, dstate, device=device) + C = torch.randn(batch_size, dstate, device=device) + D = torch.randn(dim, device=device) + if has_z: + z = torch.randn_like(x) + else: + z = None + state_ref = state.detach().clone() + out = selective_state_update(state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True) + out_ref = selective_state_update_ref(state_ref, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True) + + print(f"Output max diff: {(out - out_ref).abs().max().item()}") + print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") + assert torch.allclose(state, state_ref, rtol=rtol, atol=atol) + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + + +@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('itype', [torch.float16]) +@pytest.mark.parametrize("has_z", [False, True]) +# @pytest.mark.parametrize('has_z', [True]) +@pytest.mark.parametrize("tie_hdim", [False, True]) +# @pytest.mark.parametrize('tie_hdim', [True]) +@pytest.mark.parametrize("ngroups", [1, 2, 4]) +# @pytest.mark.parametrize("ngroups", [2]) +@pytest.mark.parametrize("dstate", [16, 32, 64]) +# @pytest.mark.parametrize("dstate", [16]) +@pytest.mark.parametrize("dim", [2048, 4096]) +# @pytest.mark.parametrize("dim", [2048]) +def test_selective_state_update_with_heads(dim, dstate, ngroups, has_z, tie_hdim, itype): + device = "cuda" + rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 3e-2) + if itype == torch.bfloat16: + rtol, atol = 1e-2, 1e-1 + # set seed + torch.random.manual_seed(0) + batch_size = 2 + headdim = 64 + nheads = dim // headdim + state = torch.randn(batch_size, nheads, headdim, dstate, dtype=itype, device=device) + x = torch.randn(batch_size, nheads, headdim, device=device, dtype=itype) + if not tie_hdim: + dt = torch.randn(batch_size, nheads, headdim, device=device, dtype=itype) + dt_bias = torch.rand(nheads, headdim, device=device) - 4.0 + A = -torch.rand(nheads, headdim, dstate, device=device) - 1.0 + D = torch.randn(nheads, headdim, device=device) + else: + dt = repeat(torch.randn(batch_size, nheads, device=device, dtype=itype), "b h -> b h p", p=headdim) + dt_bias = repeat(torch.rand(nheads, device=device) - 4.0, "h -> h p", p=headdim) + A = repeat(-torch.rand(nheads, device=device) - 1.0, "h -> h p n", p=headdim, n=dstate) + D = repeat(torch.randn(nheads, device=device), "h -> h p", p=headdim) + B = torch.randn(batch_size, ngroups, dstate, device=device) + C = torch.randn(batch_size, ngroups, dstate, device=device) + if has_z: + z = torch.randn_like(x) + else: + z = None + state_ref = state.detach().clone() + state_og = state.detach().clone() + out = selective_state_update(state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True) + out_ref = selective_state_update_ref(state_ref, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True) + + print(f"Output max diff: {(out - out_ref).abs().max().item()}") + print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") + assert torch.allclose(state, state_ref, rtol=rtol, atol=atol) + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) diff --git a/mamba/tests/ops/triton/test_ssd.py b/mamba/tests/ops/triton/test_ssd.py new file mode 100644 index 0000000000000000000000000000000000000000..d45152d677903c85a73abeb5833a476d40eed602 --- /dev/null +++ b/mamba/tests/ops/triton/test_ssd.py @@ -0,0 +1,78 @@ +import math + +import torch +import torch.nn.functional as F + +import pytest + +from einops import rearrange, repeat + +from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state, chunk_state_ref +from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_cumsum_fwd, _chunk_state_fwd +from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state_varlen +from mamba_ssm.ops.triton.ssd_state_passing import state_passing, state_passing_ref +from mamba_ssm.ops.triton.ssd_state_passing import _state_passing_fwd +from mamba_ssm.ops.triton.ssd_chunk_scan import chunk_scan, chunk_scan_ref +from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_chunk_scan, ssd_chunk_scan_combined_ref, ssd_selective_scan +from mamba_ssm.ops.triton.ssd_combined import mamba_split_conv1d_scan_combined, mamba_split_conv1d_scan_ref + + +def detach_clone(*args): + return tuple([arg.detach().clone().requires_grad_() if arg is not None else None for arg in args]) + + +@pytest.mark.parametrize('dtype', [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('dtype', [torch.bfloat16]) +@pytest.mark.parametrize('ngroups', [1, 2, 8, "max"]) +# @pytest.mark.parametrize('ngroups', [1]) +@pytest.mark.parametrize('chunk_size', [64, 128]) +# @pytest.mark.parametrize('chunk_size', [128]) +def test_chunk_state_varlen(chunk_size, ngroups, dtype): + device = 'cuda' + rtol, atol = (1e-2, 3e-3) + # set seed + torch.random.manual_seed(chunk_size + (ngroups if ngroups != "max" else 64)) + batch = 300 + seqlens = torch.randint(1, 200, (batch,), device=device) + # batch = 3 + # seqlens = torch.tensor([201, 56, 5], device=device) + cu_seqlens = F.pad(seqlens.cumsum(0), (1, 0)) + total_seqlen = seqlens.sum().item() + seq_idx = torch.cat([torch.full((s,), i, dtype=torch.int32, device=device) for i, s in enumerate(seqlens)], dim=0).unsqueeze(0) + dim = 4096 + # dim = 64 + headdim = 64 + # dim = 32 + dstate = 32 + assert dim % headdim == 0 + nheads = dim // headdim + if ngroups == "max": + ngroups = nheads + assert nheads % ngroups == 0 + B = torch.randn(total_seqlen, ngroups, dstate, dtype=dtype, device=device) / 5 + x = torch.randn(total_seqlen, nheads, headdim, dtype=dtype, device=device) + A = -0.1 * (torch.rand(nheads, device=device)) + dt = F.softplus(torch.randn(total_seqlen, nheads, device=device, dtype=torch.float32) - 4) + dA_cumsum, dt_rounded = _chunk_cumsum_fwd(dt.unsqueeze(0), A, chunk_size) + chunk_states = _chunk_state_fwd(B.unsqueeze(0), x.unsqueeze(0), dt_rounded, dA_cumsum, seq_idx=seq_idx) + chunk_states, _ = _state_passing_fwd(rearrange(chunk_states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1], + seq_idx=seq_idx, chunk_size=chunk_size) + chunk_states = rearrange(chunk_states, "... (p n) -> ... p n", n=dstate) + chunk_states = chunk_states.squeeze(0) + dA_cumsum = dA_cumsum.squeeze(0) + dt_rounded = dt_rounded.squeeze(0) + out = chunk_state_varlen(B, x, dt_rounded, dA_cumsum, cu_seqlens, chunk_states) + out_ref = [] + for b in range(batch): + x_s = x[cu_seqlens[b]:cu_seqlens[b + 1]].unsqueeze(0) + B_s = B[cu_seqlens[b]:cu_seqlens[b + 1]].unsqueeze(0) + dt_s = dt[cu_seqlens[b]:cu_seqlens[b + 1]].unsqueeze(0) + dA_cumsum_s, dt_rounded_s = _chunk_cumsum_fwd(dt_s, A, chunk_size) + states = chunk_state(B_s, x_s, dt_rounded_s, dA_cumsum_s) + _, final_states = _state_passing_fwd(rearrange(states, "... p n -> ... (p n)"), dA_cumsum_s[:, :, :, -1], + chunk_size=chunk_size) + final_states = rearrange(final_states, "... (p n) -> ... p n", n=dstate) + out_ref.append(final_states) + out_ref = torch.cat(out_ref, dim=0) + print(f"Max diff = {(out - out_ref).abs().max().item()}") + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) diff --git a/mamba/tests/test_generation.py b/mamba/tests/test_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..77e1aedfa1e909510e156341837f02fcd09ab74f --- /dev/null +++ b/mamba/tests/test_generation.py @@ -0,0 +1,113 @@ +import torch +import torch.nn.functional as F + +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel +from mamba_ssm.models.config_mamba import MambaConfig +from mamba_ssm.utils.generation import InferenceParams + +import pytest + +from einops import rearrange, repeat + + +def test_generation(): + batch = 3 + seqlen = 20 + device = "cuda" + dtype = torch.float16 + + config = MambaConfig( + d_model=1024, + n_layer=4, + vocab_size=50277, + ssm_cfg=dict(layer="Mamba2"), + rms_norm=True, + residual_in_fp32=True, + fused_add_norm=True, + pad_vocab_size_multiple=16, + ) + torch.manual_seed(2357) + model = MambaLMHeadModel(config, device=device, dtype=dtype) + x = torch.randint(0, 1000, (batch, seqlen), device=device, dtype=torch.long) + out_ref = model(x).logits + prompt_len = seqlen // 2 + out = model.generate( + input_ids = x[:, :prompt_len], max_length=seqlen, output_scores=True, return_dict_in_generate=True, + cg=True, # Can turn off CUDA graph for easier debugging + # instead of sampling, we take output tokens from x, to get logits for testing + # For actual generation, don't pass in teacher_outputs + teacher_outputs=x, + ) + out_scores = torch.stack(out.scores, dim=1) + print(f"Max diff: {(out_scores - out_ref[:, prompt_len - 1: -1]).abs().max()}") + assert torch.allclose(out_scores, out_ref[:, prompt_len - 1: -1], rtol=1e-3, atol=1e-2) + + +def test_generation_varlen(): + seqlens = [170, 65, 100] + genlen = 20 + total_seqlen = sum(seqlens) + device = "cuda" + dtype = torch.float16 + + config = MambaConfig( + d_model=1024, + n_layer=4, + vocab_size=50277, + ssm_cfg=dict(layer="Mamba2"), + rms_norm=True, + residual_in_fp32=True, + fused_add_norm=True, + pad_vocab_size_multiple=16, + ) + torch.manual_seed(2357) + model = MambaLMHeadModel(config, device=device, dtype=dtype) + xs = [torch.randint(0, 1000, (1, seqlen), device=device, dtype=torch.long) for seqlen in seqlens] + + # Reference 1: Forward pass with seq_idx + x = torch.cat(xs, dim=1) + seq_idx = torch.cat([torch.full((ids.shape[1],), i, dtype=torch.int32, device=device) + for i, ids in enumerate(xs)], dim=0).unsqueeze(0) + cu_seqlens = F.pad(torch.tensor(seqlens, device=device, dtype=torch.int32).cumsum(dim=0), (1, 0)) + out_ref = model(x, seq_idx=seq_idx).logits + # Only take the last @genlen logits of each sequence + out_ref = torch.cat([out_ref[:, cu_seqlens[i + 1] - genlen - 1:cu_seqlens[i + 1] - 1] + for i in range(len(seqlens))], dim=0) + + # Reference 2: Generate the last @genlen tokens of each sequence in a for loop + out_loop = [] + for input_ids in xs: + out = model.generate( + input_ids=input_ids[:, :-genlen], max_length=input_ids.shape[1], output_scores=True, + return_dict_in_generate=True, cg=True, teacher_outputs=input_ids, + ).scores + out_loop.append(torch.stack(out, dim=1)) + out_loop = torch.cat(out_loop, dim=0) + print(f"Max diff between ref1 and ref2: {(out_loop - out_ref).abs().max()}") + + # Varlen generation + input_ids = torch.cat([ids[:, :-genlen] for ids in xs], dim=1) + prompt_seqlens = [seqlen - genlen for seqlen in seqlens] + cu_seqlens = F.pad(torch.tensor(prompt_seqlens, device=device, dtype=torch.int32).cumsum(dim=0), (1, 0)) + seq_idx = torch.cat([torch.full((seqlen,), i, dtype=torch.int32, device=device) + for i, seqlen in enumerate(prompt_seqlens)], dim=0).unsqueeze(0) + inference_params = InferenceParams(max_seqlen=2048, max_batch_size=len(seqlens)) + + scores, sequences = [], [] + # Both seq_idx and cu_seqlens must be passed in for varlen generation + logits = model(input_ids, inference_params=inference_params, seq_idx=seq_idx, cu_seqlens=cu_seqlens).logits + logits = rearrange(logits[0, cu_seqlens[1:] - 1], "b d -> b 1 d") + scores.append(logits) + # In practice we should sample. In this case we take from the teacher_output for testing + sampled_tokens = rearrange(torch.stack([ids[0, -genlen] for ids in xs], dim=0), "b -> b 1") + sequences.append(sampled_tokens) + for i in range(1, genlen): + inference_params.seqlen_offset += 1 + logits = model(sampled_tokens, inference_params=inference_params, num_last_tokens=1).logits + scores.append(logits) + # In practice we should sample. In this case we take from the teacher_output for testing + sampled_tokens = rearrange(torch.stack([ids[0, -genlen + i] for ids in xs], dim=0), "b -> b 1") + sequences.append(sampled_tokens) + out_varlen = torch.cat(scores, dim=1) + print(f"Max diff: {(out_varlen - out_ref).abs().max()}") + assert (out_varlen - out_ref).abs().max() < 2 * (out_loop - out_ref).abs().max() diff --git a/models/.DS_Store b/models/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..9adc067ea3ec308a83b34b59044446457478de8b Binary files /dev/null and b/models/.DS_Store differ diff --git a/models/rwkv-6-world-1b6/.gitattributes b/models/rwkv-6-world-1b6/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b --- /dev/null +++ b/models/rwkv-6-world-1b6/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/models/rwkv-6-world-1b6/README.md b/models/rwkv-6-world-1b6/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0a05edbb65f8b5589171a4c1d7f5ad244666e846 --- /dev/null +++ b/models/rwkv-6-world-1b6/README.md @@ -0,0 +1,174 @@ +### Run Huggingface RWKV6 World Model + +> origin pth weight from https://huggingface.co/BlinkDL/rwkv-6-world/blob/main/RWKV-x060-World-1B6-v2.1-20240328-ctx4096.pth . + +#### CPU + +```python +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +def generate_prompt(instruction, input=""): + instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n') + input = input.strip().replace('\r\n','\n').replace('\n\n','\n') + if input: + return f"""Instruction: {instruction} + +Input: {input} + +Response:""" + else: + return f"""User: hi + +Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it. + +User: {instruction} + +Assistant:""" + + +model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-6-world-1b6", trust_remote_code=True).to(torch.float32) +tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-6-world-1b6", trust_remote_code=True, padding_side='left', pad_token="") + +text = "请介绍北京的旅游景点" +prompt = generate_prompt(text) + +inputs = tokenizer(prompt, return_tensors="pt") +output = model.generate(inputs["input_ids"], max_new_tokens=333, do_sample=True, temperature=1.0, top_p=0.3, top_k=0, ) +print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True)) +``` + +output: + +```shell +User: hi + +Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it. + +User: 请介绍北京的旅游景点 + +Assistant: 北京是中国的首都,拥有众多的旅游景点,以下是其中一些著名的景点: +1. 故宫:位于北京市中心,是明清两代的皇宫,内有大量的文物和艺术品。 +2. 天安门广场:是中国最著名的广场之一,是中国人民政治协商会议的旧址,也是中国人民政治协商会议的中心。 +3. 颐和园:是中国古代皇家园林之一,有着悠久的历史和丰富的文化内涵。 +4. 长城:是中国古代的一道长城,全长约万里,是中国最著名的旅游景点之一。 +5. 北京大学:是中国著名的高等教育机构之一,有着悠久的历史和丰富的文化内涵。 +6. 北京动物园:是中国最大的动物园之一,有着丰富的动物资源和丰富的文化内涵。 +7. 故宫博物院:是中国最著名的博物馆之一,收藏了大量的文物和艺术品,是中国最重要的文化遗产之一。 +8. 天坛:是中国古代皇家 +``` + +#### GPU + +```python +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +def generate_prompt(instruction, input=""): + instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n') + input = input.strip().replace('\r\n','\n').replace('\n\n','\n') + if input: + return f"""Instruction: {instruction} + +Input: {input} + +Response:""" + else: + return f"""User: hi + +Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it. + +User: {instruction} + +Assistant:""" + + +model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-6-world-1b6", trust_remote_code=True, torch_dtype=torch.float16).to(0) +tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-6-world-1b6", trust_remote_code=True, padding_side='left', pad_token="") + +text = "介绍一下大熊猫" +prompt = generate_prompt(text) + +inputs = tokenizer(prompt, return_tensors="pt").to(0) +output = model.generate(inputs["input_ids"], max_new_tokens=128, do_sample=True, temperature=1.0, top_p=0.3, top_k=0, ) +print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True)) +``` + +output: + +```shell +User: hi + +Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it. + +User: 介绍一下大熊猫 + +Assistant: 大熊猫是一种中国特有的哺乳动物,也是中国的国宝之一。它们的外貌特征是圆形的黑白相间的身体,有着黑色的毛发和白色的耳朵。大熊猫的食物主要是竹子,它们会在竹林中寻找竹子,并且会将竹子放在竹笼中进行储存。大熊猫的寿命约为20至30年,但由于栖息地的丧失和人类活动的 +``` + +#### Batch Inference + +```python +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +def generate_prompt(instruction, input=""): + instruction = instruction.strip().replace('\r\n', '\n').replace('\n\n', '\n') + input = input.strip().replace('\r\n', '\n').replace('\n\n', '\n') + if input: + return f"""Instruction: {instruction} + +Input: {input} + +Response:""" + else: + return f"""User: hi + +Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it. + +User: {instruction} + +Assistant:""" + +model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-6-world-1b6", trust_remote_code=True).to(torch.float32) +tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-6-world-1b6", trust_remote_code=True, padding_side='left', pad_token="") + +texts = ["请介绍北京的旅游景点", "介绍一下大熊猫", "乌兰察布"] +prompts = [generate_prompt(text) for text in texts] + +inputs = tokenizer(prompts, return_tensors="pt", padding=True) +outputs = model.generate(inputs["input_ids"], max_new_tokens=128, do_sample=True, temperature=1.0, top_p=0.3, top_k=0, ) + +for output in outputs: + print(tokenizer.decode(output.tolist(), skip_special_tokens=True)) + +``` + +output: + +```shell +User: hi + +Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it. + +User: 请介绍北京的旅游景点 + +Assistant: 北京是中国的首都,拥有丰富的旅游资源和历史文化遗产。以下是一些北京的旅游景点: +1. 故宫:位于北京市中心,是明清两代的皇宫,是中国最大的古代宫殿建筑群之一。 +2. 天安门广场:位于北京市中心,是中国最著名的城市广场之一,也是中国最大的城市广场。 +3. 颐和 +User: hi + +Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it. + +User: 介绍一下大熊猫 + +Assistant: 大熊猫是一种生活在中国中部地区的哺乳动物,也是中国的国宝之一。它们的外貌特征是圆形的黑白相间的身体,有着黑色的毛发和圆圆的眼睛。大熊猫是一种濒危物种,目前只有在野外的几个保护区才能看到它们的身影。大熊猫的食物主要是竹子,它们会在竹子上寻找食物,并且可以通 +User: hi + +Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it. + +User: 乌兰察布 + +Assistant: 乌兰察布是中国新疆维吾尔自治区的一个县级市,位于新疆维吾尔自治区中部,是新疆的第二大城市。乌兰察布市是新疆的第一大城市,也是新疆的重要城市之一。乌兰察布市是新疆的经济中心,也是新疆的重要交通枢纽之一。乌兰察布市的人口约为2.5万人,其中汉族占绝大多数。乌 +``` \ No newline at end of file diff --git a/models/rwkv-6-world-1b6/added_tokens.json b/models/rwkv-6-world-1b6/added_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4da57d323dd080527dfb2df57e789e586a47d750 --- /dev/null +++ b/models/rwkv-6-world-1b6/added_tokens.json @@ -0,0 +1,3 @@ +{ + "": 0 +} diff --git a/models/rwkv-6-world-1b6/config.json b/models/rwkv-6-world-1b6/config.json new file mode 100644 index 0000000000000000000000000000000000000000..cc390dd450b75d1ad6a8e4e5edc9fe8e2edd2f0c --- /dev/null +++ b/models/rwkv-6-world-1b6/config.json @@ -0,0 +1,25 @@ +{ + "architectures": [ + "Rwkv6ForCausalLM" + ], + "auto_map": { + "AutoConfig": "configuration_rwkv6.Rwkv6Config", + "AutoModelForCausalLM": "modeling_rwkv6.Rwkv6ForCausalLM" + }, + "attention_hidden_size": 2048, + "bos_token_id": 0, + "eos_token_id": 0, + "head_size": 64, + "head_size_divisor": 8, + "hidden_size": 2048, + "intermediate_size": null, + "layer_norm_epsilon": 1e-05, + "model_type": "rwkv6", + "num_attention_heads": 64, + "num_hidden_layers": 24, + "rescale_every": 6, + "tie_word_embeddings": false, + "transformers_version": "4.34.0", + "use_cache": true, + "vocab_size": 65536 +} diff --git a/models/rwkv-6-world-1b6/configuration_rwkv6.py b/models/rwkv-6-world-1b6/configuration_rwkv6.py new file mode 100644 index 0000000000000000000000000000000000000000..d39e99edacb3d04e34aab5fbcee6f076cfc637ea --- /dev/null +++ b/models/rwkv-6-world-1b6/configuration_rwkv6.py @@ -0,0 +1,118 @@ +# coding=utf-8 +# Copyright 2023 The OpenAI Team Authors and HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" RWKV configuration""" + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + +RWKV6_PRETRAINED_CONFIG_ARCHIVE_MAP = {} + + +class Rwkv6Config(PretrainedConfig): + """ + This is the configuration class to store the configuration of a [`Rwkv6Model`]. It is used to instantiate a RWKV6 + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the RWVK-4 + [RWKV/rwkv-5-world-1b5](https://huggingface.co/RWKV/rwkv-5-world-1b5) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 65536): + Vocabulary size of the RWKV6 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`Rwkv6Model`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the embeddings and hidden states. + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the model. + attention_hidden_size (`int`, *optional*): + Dimensionality of the attention hidden states. Will default to `hidden_size` if unset. + num_attention_heads (`int`, *optional*, defaults to 64): + The attention heads to use in rwkv6 self_attention module. + head_size (`int`, *optional*, defaults to 64): head_size of rwkv6 self_attention module. + intermediate_size (`int`, *optional*): + Dimensionality of the inner feed-forward layers. Will default to 4 times `hidden_size` if unset. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): + The epsilon to use in the layer normalization layers. + bos_token_id (`int`, *optional*, defaults to 0): + The id of the beginning of sentence token in the vocabulary. Defaults to 0. + eos_token_id (`int`, *optional*, defaults to 0): + The id of the end of sentence token in the vocabulary. Defaults to 0. + rescale_every (`int`, *optional*, defaults to 6): + At inference, the hidden states (and weights of the correponding output layers) are divided by 2 every + `rescale_every` layer. If set to 0 or a negative number, no rescale is done. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether or not to tie the word embeddings with the input token embeddings. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last state. + + + Example: + + ```python + >>> from transformers import Rwkv6Config, Rwkv6Model + + >>> # Initializing a Rwkv6 configuration + >>> configuration = Rwkv6Config() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = Rwkv6Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "rwkv6" + + def __init__( + self, + vocab_size=65536, + hidden_size=768, + num_hidden_layers=24, + attention_hidden_size=None, + head_size=64, + head_size_divisor=8, + intermediate_size=None, + layer_norm_epsilon=1e-5, + bos_token_id=0, + eos_token_id=0, + rescale_every=6, + tie_word_embeddings=False, + use_cache=True, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.attention_hidden_size = attention_hidden_size if attention_hidden_size is not None else hidden_size + self.head_size = head_size + self.head_size_divisor = head_size_divisor + self.intermediate_size = None + self.layer_norm_epsilon = layer_norm_epsilon + self.rescale_every = rescale_every + self.use_cache = use_cache + + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + + super().__init__( + tie_word_embeddings=tie_word_embeddings, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs + ) diff --git a/models/rwkv-6-world-1b6/generation_config.json b/models/rwkv-6-world-1b6/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..25063660a7ea5f01c455d0780080e8f84900e583 --- /dev/null +++ b/models/rwkv-6-world-1b6/generation_config.json @@ -0,0 +1,12 @@ +{ + "chat_format": "chatml", + "eos_token_id": 0, + "pad_token_id": 0, + "max_window_size": 2048, + "max_new_tokens": 2048, + "do_sample": true, + "top_k": 0, + "top_p": 0.1, + "repetition_penalty": 1.0, + "transformers_version": "4.31.1" +} \ No newline at end of file diff --git a/models/rwkv-6-world-1b6/hf_rwkv_tokenizer.py b/models/rwkv-6-world-1b6/hf_rwkv_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..569cf659aa4bd2bc9c52fe8c304b2e091c5359bd --- /dev/null +++ b/models/rwkv-6-world-1b6/hf_rwkv_tokenizer.py @@ -0,0 +1,279 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for RWKV6.""" + +import os +import re +from typing import TYPE_CHECKING, List, Optional, Tuple + +from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer +from transformers.utils import logging + + +if TYPE_CHECKING: + pass + +logger = logging.get_logger(__name__) + + +VOCAB_FILES_NAMES = { + "vocab_file": "rwkv_vocab_v20230424.txt", +} + +class TRIE: + __slots__ = tuple("ch,to,values,front".split(",")) + to: list + values: set + + def __init__(self, front=None, ch=None): + self.ch = ch + self.to = [None for ch in range(256)] + self.values = set() + self.front = front + + def __repr__(self): + fr = self + ret = [] + while fr != None: + if fr.ch != None: + ret.append(fr.ch) + fr = fr.front + return "" % (ret[::-1], self.values) + + def add(self, key: bytes, idx: int = 0, val=None): + if idx == len(key): + if val is None: + val = key + self.values.add(val) + return self + ch = key[idx] + if self.to[ch] is None: + self.to[ch] = TRIE(front=self, ch=ch) + return self.to[ch].add(key, idx=idx + 1, val=val) + + def find_longest(self, key: bytes, idx: int = 0): + u: TRIE = self + ch: int = key[idx] + + while u.to[ch] is not None: + u = u.to[ch] + idx += 1 + if u.values: + ret = idx, u, u.values + if idx == len(key): + break + ch = key[idx] + return ret + + +class RWKV_TOKENIZER: + def __init__(self, file_name): + self.idx2token = {} + sorted = [] # must be already sorted + with open(file_name, "r", encoding="utf-8") as f: + lines = f.readlines() + for l in lines: + idx = int(l[: l.index(" ")]) + x = eval(l[l.index(" ") : l.rindex(" ")]) + x = x.encode("utf-8") if isinstance(x, str) else x + assert isinstance(x, bytes) + + assert len(x) == int(l[l.rindex(" ") :]) + sorted += [x] + self.idx2token[idx] = x + + self.token2idx = {} + for k, v in self.idx2token.items(): + self.token2idx[v] = int(k) + + self.root = TRIE() + for t, i in self.token2idx.items(): + _ = self.root.add(t, val=(t, i)) + + def encodeBytes(self, src: bytes): + idx: int = 0 + tokens = [] + while idx < len(src): + _idx: int = idx + idx, _, values = self.root.find_longest(src, idx) + assert idx != _idx + _, token = next(iter(values)) + tokens.append(token) + return tokens + + def decodeBytes(self, tokens): + return b"".join(map(lambda i: self.idx2token[i], tokens)) + + def encode(self, src): + if isinstance(src, str): + return [self.encodeBytes(src.encode("utf-8"))] + elif isinstance(src, list): + return [self.encodeBytes(s.encode("utf-8")) for s in src] + + def decode(self, tokens): + return [self.decodeBytes(batch).decode("utf-8") for batch in tokens] + # try: + # return self.decodeBytes(tokens).decode('utf-8') + # except: + # return '\ufffd' # bad utf-8 + + def printTokens(self, tokens): + for i in tokens: + s = self.idx2token[i] + try: + s = s.decode("utf-8") + except: + pass + print(f"{repr(s)}{i}", end=" ") + print() + + +class Rwkv6Tokenizer(PreTrainedTokenizer): + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, vocab_file, bos_token="", eos_token="", unk_token="", **kwargs + ): + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + + if "add_bos_token" in kwargs: + self.add_bos_token = kwargs["add_bos_token"] + else: + self.add_bos_token = False + self.trie_tokenizer = RWKV_TOKENIZER(vocab_file) + vocab = self.trie_tokenizer.token2idx + self.encoder = vocab + self.decoder = {v: k for k, v in vocab.items()} + self._added_tokens_decoder = {0: AddedToken(str(bos_token))} + super().__init__( + bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs + ) + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + vocab = {str(self.convert_ids_to_tokens(i)): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text, split_special_tokens=False): + # return self.wordpiece_tokenizer.tokenize(text.encode("utf-8")) + return self.trie_tokenizer.encode(text)[0] + + def _convert_token_to_id(self, token): + return token + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (byte) using the vocab.""" + token = self.decoder.get(index, self.unk_token) + if isinstance(token, (bytes)): + token = token.decode("utf-8", errors="replace") + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (bytes) in a single string. Additional tokens are encoded to bytes""" + out_string = b"".join( + [k.encode(errors="replace") if isinstance(k, str) else k for k in tokens] + ).decode("utf-8") + return out_string + + def save_vocabulary( + self, save_directory: str, filename_prefix: Optional[str] = None + ) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + "vocab.txt", + ) + else: + vocab_file = ( + filename_prefix + "-" if filename_prefix else "" + ) + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted( + self.encoder.items(), key=lambda kv: kv[1] + ): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(str(token) + "\n") + index += 1 + return (vocab_file,) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + if self.add_bos_token: + bos_token_ids = [self.bos_token_id] + else: + bos_token_ids = [] + + output = bos_token_ids + token_ids_0 + + if token_ids_1 is None: + return output + + return output + bos_token_ids + token_ids_1 + + def get_special_tokens_mask( + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None, + already_has_special_tokens: bool = False, + ) -> List[int]: + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, + token_ids_1=token_ids_1, + already_has_special_tokens=True, + ) + + if not self.add_bos_token: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, + token_ids_1=token_ids_1, + already_has_special_tokens=False, + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) diff --git a/models/rwkv-6-world-1b6/modeling_rwkv6.py b/models/rwkv-6-world-1b6/modeling_rwkv6.py new file mode 100644 index 0000000000000000000000000000000000000000..6daf95420f1712612da74ee91c95ebfabca62dd0 --- /dev/null +++ b/models/rwkv-6-world-1b6/modeling_rwkv6.py @@ -0,0 +1,746 @@ +# coding=utf-8 +# Copyright 2024 The RWKV team and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch RWKV6 World model.""" + +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +from pathlib import Path + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_ninja_available, + is_torch_cuda_available, + logging, +) + +from .configuration_rwkv6 import Rwkv6Config +try: + from fla.ops.rwkv6.recurrent_fuse import fused_recurrent_rwkv6 +except ImportError: + print("Required module is not installed. Please install it using the following commands:") + print("pip install -U git+https://github.com/sustcsonglin/flash-linear-attention") + print("Additionally, ensure you have the correct version of Triton installed:") + print("pip install triton==2.2.0") + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "RWKV/rwkv-6-world-1b6" +_CONFIG_FOR_DOC = "Rwkv6Config" + +def rwkv6_linear_attention_cpu(receptance, key, value, time_decay, time_first, state): + # For CPU fallback. Will be slower and probably take more memory than the custom CUDA kernel if not executed + # within a torch.no_grad. + batch, seq_length, _ = receptance.shape + num_heads, head_size = time_first.shape + key = key.float().view(batch, seq_length, num_heads, head_size).transpose(1, 2).transpose(-2, -1) + value = value.float().view(batch, seq_length, num_heads, head_size).transpose(1, 2) + receptance = receptance.float().view(batch, seq_length, num_heads, head_size).transpose(1, 2) + time_decay = torch.exp(-torch.exp(time_decay.float())).view(batch, seq_length, num_heads, head_size).permute(0, 2, 3, 1) + time_first = time_first.float().reshape(-1, 1, 1).reshape(num_heads, -1, 1) + out = torch.zeros_like(key).reshape(batch, seq_length, num_heads, head_size) + + for current_index in range(seq_length): + current_receptance = receptance[:, :, current_index:current_index+1, :] + current_key = key[:, :, :, current_index:current_index+1] + current_value = value[:, :, current_index:current_index+1, :] + current_time_decay = time_decay[:, :, :, current_index:current_index+1] + attention_output = current_key @ current_value + out[:, current_index] = (current_receptance @ (time_first * attention_output + state)).squeeze(2) + with torch.no_grad(): + state = attention_output + current_time_decay * state + + return out, state + +def rwkv6_linear_attention( + training, + receptance, + key, + value, + time_decay, + time_first, + state, +): + no_cuda = any(t.device.type != "cuda" for t in [time_decay, time_first, receptance, key, value]) + # Launching the CUDA kernel for just one token will actually be slower (there is no for loop in the CPU version + # in this case). + one_token = key.size(1) == 1 + if not training or no_cuda or one_token: + return rwkv6_linear_attention_cpu( + receptance, key, value, time_decay, time_first, state + ) + else: + batch, seq_length, _ = receptance.shape + num_heads, head_size = time_first.shape + key = key.float().view(batch, seq_length, num_heads, head_size).transpose(1, 2) # B, T, H, K -> B, H, T, K + value = value.float().view(batch, seq_length, num_heads, head_size).transpose(1, 2) # B, T, H, K - > B, H, T, V + receptance = receptance.float().view(batch, seq_length, num_heads, head_size).transpose(1, 2) # B, H, T, K + time_decay = -torch.exp(time_decay.float()).view(batch, seq_length, num_heads, head_size).permute(0, 2, 1, 3) # B, T, H, K -> B, H, T, K + time_first = time_first.float().reshape(num_heads, head_size) # H, K + out, state = fused_recurrent_rwkv6(receptance, key, value, time_decay, time_first, scale=1.0, initial_state=state, output_final_state=True) + return out.transpose(1, 2), state + + +class Rwkv6SelfAttention(nn.Module): + def __init__(self, config, layer_id=0): + super().__init__() + self.config = config + self.layer_id = layer_id + hidden_size = config.hidden_size + attention_hidden_size = config.attention_hidden_size + self.attention_hidden_size = attention_hidden_size + head_size = config.head_size + num_heads = attention_hidden_size // head_size + + self.time_maa_x = nn.Parameter(torch.empty(1, 1, hidden_size)) + self.time_maa_w = nn.Parameter(torch.empty(1, 1, hidden_size)) + self.time_maa_k = nn.Parameter(torch.empty(1, 1, hidden_size)) + self.time_maa_v = nn.Parameter(torch.empty(1, 1, hidden_size)) + self.time_maa_r = nn.Parameter(torch.empty(1, 1, hidden_size)) + self.time_maa_g = nn.Parameter(torch.empty(1, 1, hidden_size)) + + TIME_MIX_EXTRA_DIM = 32 # generate TIME_MIX for w,k,v,r,g + self.time_maa_w1 = nn.Parameter(torch.empty(hidden_size, TIME_MIX_EXTRA_DIM*5)) + self.time_maa_w2 = nn.Parameter(torch.empty(5, TIME_MIX_EXTRA_DIM, hidden_size)) + + self.time_decay = nn.Parameter(torch.empty(1, 1, attention_hidden_size)) + + TIME_DECAY_EXTRA_DIM = 64 + self.time_decay_w1 = nn.Parameter(torch.empty(hidden_size, TIME_DECAY_EXTRA_DIM)) + self.time_decay_w2 = nn.Parameter(torch.empty(TIME_DECAY_EXTRA_DIM, attention_hidden_size)) + + self.time_faaaa = nn.Parameter(torch.empty(num_heads, config.head_size)) + + + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + self.receptance = nn.Linear(hidden_size, attention_hidden_size, bias=False) + self.key = nn.Linear(hidden_size, attention_hidden_size, bias=False) + self.value = nn.Linear(hidden_size, attention_hidden_size, bias=False) + self.gate = nn.Linear(hidden_size, attention_hidden_size, bias=False) + self.output = nn.Linear(attention_hidden_size, hidden_size, bias=False) + self.ln_x = nn.GroupNorm(num_heads, hidden_size, eps=(1e-5)*(config.head_size_divisor**2)) + + def extract_key_value(self, hidden, state=None): + # Mix hidden with the previous timestep to produce key, value, receptance + if hidden.size(1) == 1 and state is not None: + shifted = state[0][:, :, self.layer_id] + else: + shifted = self.time_shift(hidden) + if state is not None: + shifted[:, 0] = state[0][:, :, self.layer_id] + if len(shifted.size()) == 2: + shifted = shifted.unsqueeze(1) + + x = hidden + + B, T, C = hidden.shape + + xx = shifted - x + + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) + + time_decay = x + xx * (self.time_maa_w + mw) + key = x + xx * (self.time_maa_k + mk) + value = x + xx * (self.time_maa_v + mv) + receptance = x + xx * (self.time_maa_r + mr) + gate = x + xx * (self.time_maa_g + mg) + + receptance = self.receptance(receptance) + key = self.key(key) + value = self.value(value) + gate = F.silu(self.gate(gate)) + + time_decay = torch.tanh(time_decay @ self.time_decay_w1) @ self.time_decay_w2 + time_decay = self.time_decay + time_decay + + if state is not None: + state[0][:, :, self.layer_id] = hidden[:, -1] + + return receptance, key, value, gate, time_decay, state + + def forward(self, hidden, state=None, use_cache=False, seq_mode=True): + receptance, key, value, gate, time_decay, state = self.extract_key_value(hidden, state=state) + + B,T,C = receptance.shape + H, S = self.time_faaaa.shape + + layer_state = state[1][:, :, :, :, self.layer_id] if state is not None else None + out, layer_state = rwkv6_linear_attention( + self.training, receptance, key, value, time_decay, self.time_faaaa, layer_state, + ) + + if layer_state is not None: + state[1][:, :, :, :, self.layer_id] = layer_state + + out = out.reshape(B * T, H * S) + out = F.group_norm(out, num_groups=H, weight=self.ln_x.weight.to(out.dtype), bias=self.ln_x.bias.to(out.dtype), eps=self.ln_x.eps).reshape(B, T, H * S) + out = out.to(dtype=hidden.dtype) * gate + out = self.output(out) + return out, state + + +class Rwkv6FeedForward(nn.Module): + def __init__(self, config, layer_id=0): + super().__init__() + self.config = config + self.layer_id = layer_id + hidden_size = config.hidden_size + # https://github.com/BlinkDL/RWKV-LM/blob/3db37a72356b736966ddd377268f02b80963af3f/RWKV-v4neo/train.py#L168 + intermediate_size = ( + config.intermediate_size + if config.intermediate_size is not None + else int((config.hidden_size * 3.5) // 32 * 32) + ) + + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + self.time_maa_k = nn.Parameter(torch.empty(1, 1, hidden_size)) + self.time_maa_r = nn.Parameter(torch.empty(1, 1, hidden_size)) + + self.key = nn.Linear(hidden_size, intermediate_size, bias=False) + self.receptance = nn.Linear(hidden_size, hidden_size, bias=False) + self.value = nn.Linear(intermediate_size, hidden_size, bias=False) + + def forward(self, hidden, state=None): + if hidden.size(1) == 1 and state is not None: + shifted = state[2][:, :, self.layer_id] + else: + shifted = self.time_shift(hidden) + if state is not None: + shifted[:, 0] = state[2][:, :, self.layer_id] + if len(shifted.size()) == 2: + shifted = shifted.unsqueeze(1) + + delta_hidden_to_shifted = shifted - hidden + key = hidden + delta_hidden_to_shifted * self.time_maa_k + receptance = hidden + delta_hidden_to_shifted * self.time_maa_r + + key = torch.square(torch.relu(self.key(key))) + value = self.value(key) + receptance = torch.sigmoid(self.receptance(receptance)) + + if state is not None: + state[2][:, :, self.layer_id] = hidden[:, -1] + + return receptance * value, state + + +class Rwkv6Block(nn.Module): + def __init__(self, config, layer_id): + super().__init__() + self.config = config + self.layer_id = layer_id + + if layer_id == 0: + self.pre_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) + + self.ln1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) + self.ln2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) + + self.attention = Rwkv6SelfAttention(config, layer_id) + self.feed_forward = Rwkv6FeedForward(config, layer_id) + + def forward(self, hidden, state=None, use_cache=False, output_attentions=False, seq_mode=True): + if self.layer_id == 0: + hidden = self.pre_ln(hidden) + attention, state = self.attention(self.ln1(hidden), state=state, use_cache=use_cache, seq_mode=seq_mode) + hidden = hidden + attention + + feed_forward, state = self.feed_forward(self.ln2(hidden), state=state) + hidden = hidden + feed_forward + + outputs = (hidden, state) + if output_attentions: + outputs += (attention,) + else: + outputs += (None,) + + return outputs + + +class Rwkv6PreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = Rwkv6Config + base_model_prefix = "rwkv6" + _no_split_modules = ["Rwkv6Block"] + _keep_in_fp32_modules = ["time_decay", "time_first"] + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, Rwkv6SelfAttention): + layer_id = module.layer_id + num_hidden_layers = module.config.num_hidden_layers + hidden_size = module.config.hidden_size + attention_hidden_size = module.attention_hidden_size + head_size = module.config.head_size + num_heads = attention_hidden_size // head_size + + ratio_0_to_1 = layer_id / (num_hidden_layers - 1) # 0 to 1 + ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0 + + time_weight = torch.tensor( + [i / hidden_size for i in range(hidden_size)], + dtype=module.time_maa_k.dtype, + device=module.time_maa_k.device, + ) + time_weight = time_weight[None, None, :] + + decay_speed = [ + -6.0 + 5.0 * (h / (attention_hidden_size - 1)) ** (0.7 + 1.3 * ratio_0_to_1) + for h in range(attention_hidden_size) + ] + decay_speed = torch.tensor(decay_speed, dtype=module.time_decay.dtype, device=module.time_decay.device) + tmp = torch.tensor( + [ + (1.0 - (i / (attention_hidden_size - 1.0))) * ratio_0_to_1 + 0.1 * ((i + 1) % 3 - 1) + for i in range(attention_hidden_size) + ], + dtype=module.time_faaaa.dtype, + device=module.time_faaaa.device, + ) + + with torch.no_grad(): + module.time_maa_x.data = 1.0 - torch.pow(time_weight, ratio_1_to_almost0) + module.time_maa_w.data = 1.0 - torch.pow(time_weight, ratio_1_to_almost0) + module.time_maa_k.data = 1.0 - torch.pow(time_weight, ratio_1_to_almost0) + module.time_maa_v.data = 1.0 - (torch.pow(time_weight, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) + module.time_maa_r.data = 1.0 - torch.pow(time_weight, 0.5 * ratio_1_to_almost0) + module.time_maa_g.data = 1.0 - torch.pow(time_weight, 0.5 * ratio_1_to_almost0) + + TIME_MIX_EXTRA_DIM = 32 # generate TIME_MIX for w,k,v,r,g + module.time_maa_w1.data = torch.zeros(hidden_size, TIME_MIX_EXTRA_DIM*5, dtype=module.time_maa_w1.dtype, device=module.time_maa_w1.device).uniform_(-1e-4, 1e-4) + module.time_maa_w2.data = torch.zeros(5, TIME_MIX_EXTRA_DIM, hidden_size, dtype=module.time_maa_w2.dtype, device=module.time_maa_w2.device).uniform_(-1e-4, 1e-4) + + TIME_DECAY_EXTRA_DIM = 64 + module.time_decay_w1.data = torch.zeros(hidden_size, TIME_DECAY_EXTRA_DIM, dtype=module.time_decay_w1.dtype, device=module.time_decay_w1.device).uniform_(-1e-4, 1e-4) + module.time_decay_w2.data = torch.zeros(TIME_DECAY_EXTRA_DIM, attention_hidden_size, dtype=module.time_decay_w2.dtype, device=module.time_decay_w2.device).uniform_(-1e-4, 1e-4) + + module.time_decay.data = decay_speed.reshape(num_heads, head_size) + module.time_faaaa.data = tmp.reshape(num_heads, head_size) + + elif isinstance(module, Rwkv6FeedForward): + layer_id = module.layer_id + num_hidden_layers = module.config.num_hidden_layers + hidden_size = module.config.hidden_size + + ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0 + + time_weight = torch.tensor( + [i / hidden_size for i in range(hidden_size)], + dtype=module.time_maa_k.dtype, + device=module.time_maa_k.device, + ) + time_weight = time_weight[None, None, :] + + with torch.no_grad(): + module.time_maa_k.data = 1.0 - torch.pow(time_weight, ratio_1_to_almost0) + module.time_maa_r.data = 1.0 - torch.pow(time_weight, ratio_1_to_almost0) + + +@dataclass +class Rwkv6Output(ModelOutput): + """ + Class for the RWKV model outputs. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`): + The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to + avoid providing the old `input_ids`. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of + the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + state: Optional[List[torch.FloatTensor]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class Rwkv6CausalLMOutput(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`): + The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to + avoid providing the old `input_ids`. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of + the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + state: Optional[List[torch.FloatTensor]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +RWKV6_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) + subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to + general usage and behavior. + + Parameters: + config ([`Rwkv6Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +RWKV6_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length` if `past_key_values` is `None` else + `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input + sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their + past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input + IDs?](../glossary#input-ids) + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + state (tuple of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`, *optional*): + If passed along, the model uses the previous state in all the blocks (which will give the output for the + `input_ids` provided as if the model add `state_input_ids + input_ids` as context). + use_cache (`bool`, *optional*): + If set to `True`, the last state is returned and can be used to quickly generate the next logits. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare RWKV6 Model transformer outputting raw hidden-states without any specific head on top.", + RWKV6_START_DOCSTRING, +) +class Rwkv6Model(Rwkv6PreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) + self.blocks = nn.ModuleList([Rwkv6Block(config, layer_id=idx) for idx in range(config.num_hidden_layers)]) + self.ln_out = nn.LayerNorm(config.hidden_size) + + self.layers_are_rescaled = False + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings + + def set_input_embeddings(self, new_embeddings): + self.embeddings = new_embeddings + + @add_start_docstrings_to_model_forward(RWKV6_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=Rwkv6Output, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, # noqa + inputs_embeds: Optional[torch.FloatTensor] = None, + state: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Rwkv6Output]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + # FIXME - training is supportable with the CUDA code + # rwkv6 only support inference in huggingface. + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.training == self.layers_are_rescaled and ( + self.embeddings.weight.dtype == torch.float16 or self.embeddings.weight.dtype == torch.bfloat16 + ): + self._rescale_layers() + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is None and inputs_embeds is None: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embeddings(input_ids) + + if state is None: + state = [] + head_size = self.config.head_size + num_heads = self.config.attention_hidden_size // head_size + state_attn_x = torch.zeros( + (inputs_embeds.size(0), self.config.hidden_size, self.config.num_hidden_layers), + dtype=inputs_embeds.dtype, + requires_grad=False, + device=inputs_embeds.device, + ).contiguous() + state_attn_kv = torch.zeros( + ( + inputs_embeds.size(0), + num_heads, + head_size, + head_size, + self.config.num_hidden_layers, + ), + dtype=torch.float32, + requires_grad=False, + device=inputs_embeds.device, + ).contiguous() + state_ffn_x = torch.zeros( + (inputs_embeds.size(0), self.config.hidden_size, self.config.num_hidden_layers), + dtype=inputs_embeds.dtype, + requires_grad=False, + device=inputs_embeds.device, + ).contiguous() + state.append(state_attn_x) + state.append(state_attn_kv) + state.append(state_ffn_x) + + seq_mode = inputs_embeds.shape[1] > 1 + hidden_states = inputs_embeds + + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + for idx, block in enumerate(self.blocks): + hidden_states, state, attentions = block( + hidden_states, state=state, use_cache=use_cache, output_attentions=output_attentions, seq_mode=seq_mode + ) + if ( + self.layers_are_rescaled + and self.config.rescale_every > 0 + and (idx + 1) % self.config.rescale_every == 0 + ): + hidden_states = hidden_states / 2 + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if output_attentions: + all_self_attentions = all_self_attentions + (attentions,) + + hidden_states = self.ln_out(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return (hidden_states, state, all_hidden_states, all_self_attentions) + + return Rwkv6Output( + last_hidden_state=hidden_states, + state=state, + hidden_states=all_hidden_states, # None + attentions=all_self_attentions, # None + ) + + def _rescale_layers(self): + # Layers should be rescaled for inference only. + if self.layers_are_rescaled == (not self.training): + return + if self.config.rescale_every > 0: + with torch.no_grad(): + for block_id, block in enumerate(self.blocks): + if self.training: + block.attention.output.weight.mul_(2 ** int(block_id // self.config.rescale_every)) + block.feed_forward.value.weight.mul_(2 ** int(block_id // self.config.rescale_every)) + else: + # Deal with quantization statistics + if hasattr(block.attention.output.weight, "SCB"): + block.attention.output.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every)) + block.feed_forward.value.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every)) + elif hasattr(block.attention.output.weight, "quant_state"): + self._bnb_4bit_dequantize_and_rescale(block.attention.output, block_id) + self._bnb_4bit_dequantize_and_rescale(block.feed_forward.value, block_id) + else: + block.attention.output.weight.div_(2 ** int(block_id // self.config.rescale_every)) + block.feed_forward.value.weight.div_(2 ** int(block_id // self.config.rescale_every)) + + self.layers_are_rescaled = not self.training + + def _bnb_4bit_dequantize_and_rescale(self, target_layer, block_id): + r""" + Perform the dequantization and rescaling of the weights of a given layer. After that operation the layer will + be quantized again. + """ + if not is_bitsandbytes_available(): + raise ImportError("Please install bitsandbytes to use this method.") + import bitsandbytes as bnb + + dequant_weights = bnb.functional.dequantize_4bit(target_layer.weight.data, target_layer.weight.quant_state) + + dequant_weights.div_(2 ** int(block_id // self.config.rescale_every)) + + # re-quantize the model: + # we need to put it first on CPU then back to the device + # this will create an overhead :/ + # We set requires_grad=False as we cannot compute gradients on top of 4bit parameters anyway and to avoid + # bugs with bnb + quant_weight = bnb.nn.Params4bit(dequant_weights.to("cpu"), requires_grad=False).to(dequant_weights.device) + setattr(target_layer, "weight", quant_weight) + + +# copied from HuggingFace https://github.com/huggingface/transformers/blob/main/src/transformers/models/rwkv/modeling_rwkv.py +@add_start_docstrings( + """ + The RWKV6 Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + RWKV6_START_DOCSTRING, +) +class Rwkv6ForCausalLM(Rwkv6PreTrainedModel): + _tied_weights_keys = ["head.weight"] + + def __init__(self, config): + super().__init__(config) + self.rwkv = Rwkv6Model(config) + self.head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.head + + def set_output_embeddings(self, new_embeddings): + self.head = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, state=None, inputs_embeds=None, **kwargs): + # only last token for inputs_ids if the state is passed along. + if state is not None: + input_ids = input_ids[:, -1].unsqueeze(-1) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and state is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs["state"] = state + return model_inputs + + @add_start_docstrings_to_model_forward(RWKV6_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=Rwkv6CausalLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + state: Optional[List[torch.FloatTensor]] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Rwkv6CausalLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.rwkv( + input_ids, + inputs_embeds=inputs_embeds, + state=state, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = outputs[0] + + logits = self.head(hidden_states) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return Rwkv6CausalLMOutput( + loss=loss, + logits=logits, + state=outputs.state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/models/rwkv-6-world-1b6/pytorch_model.bin b/models/rwkv-6-world-1b6/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..9f4822d40f791f36b745cd266d5ef3ed2be777aa --- /dev/null +++ b/models/rwkv-6-world-1b6/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:609ffca33ff73d53bf059f7336396dc39bfe76764d3b263429ee5f2933688993 +size 3199826561 diff --git a/models/rwkv-6-world-1b6/rwkv_vocab_v20230424.txt b/models/rwkv-6-world-1b6/rwkv_vocab_v20230424.txt new file mode 100644 index 0000000000000000000000000000000000000000..b8c3f7260d2cac8b83bfbe7bc43f462fef8c4012 --- /dev/null +++ b/models/rwkv-6-world-1b6/rwkv_vocab_v20230424.txt @@ -0,0 +1,65529 @@ +1 '\x00' 1 +2 '\x01' 1 +3 '\x02' 1 +4 '\x03' 1 +5 '\x04' 1 +6 '\x05' 1 +7 '\x06' 1 +8 '\x07' 1 +9 '\x08' 1 +10 '\t' 1 +11 '\n' 1 +12 '\x0b' 1 +13 '\x0c' 1 +14 '\r' 1 +15 '\x0e' 1 +16 '\x0f' 1 +17 '\x10' 1 +18 '\x11' 1 +19 '\x12' 1 +20 '\x13' 1 +21 '\x14' 1 +22 '\x15' 1 +23 '\x16' 1 +24 '\x17' 1 +25 '\x18' 1 +26 '\x19' 1 +27 '\x1a' 1 +28 '\x1b' 1 +29 '\x1c' 1 +30 '\x1d' 1 +31 '\x1e' 1 +32 '\x1f' 1 +33 ' ' 1 +34 '!' 1 +35 '"' 1 +36 '#' 1 +37 '$' 1 +38 '%' 1 +39 '&' 1 +40 "'" 1 +41 '(' 1 +42 ')' 1 +43 '*' 1 +44 '+' 1 +45 ',' 1 +46 '-' 1 +47 '.' 1 +48 '/' 1 +49 '0' 1 +50 '1' 1 +51 '2' 1 +52 '3' 1 +53 '4' 1 +54 '5' 1 +55 '6' 1 +56 '7' 1 +57 '8' 1 +58 '9' 1 +59 ':' 1 +60 ';' 1 +61 '<' 1 +62 '=' 1 +63 '>' 1 +64 '?' 1 +65 '@' 1 +66 'A' 1 +67 'B' 1 +68 'C' 1 +69 'D' 1 +70 'E' 1 +71 'F' 1 +72 'G' 1 +73 'H' 1 +74 'I' 1 +75 'J' 1 +76 'K' 1 +77 'L' 1 +78 'M' 1 +79 'N' 1 +80 'O' 1 +81 'P' 1 +82 'Q' 1 +83 'R' 1 +84 'S' 1 +85 'T' 1 +86 'U' 1 +87 'V' 1 +88 'W' 1 +89 'X' 1 +90 'Y' 1 +91 'Z' 1 +92 '[' 1 +93 '\\' 1 +94 ']' 1 +95 '^' 1 +96 '_' 1 +97 '`' 1 +98 'a' 1 +99 'b' 1 +100 'c' 1 +101 'd' 1 +102 'e' 1 +103 'f' 1 +104 'g' 1 +105 'h' 1 +106 'i' 1 +107 'j' 1 +108 'k' 1 +109 'l' 1 +110 'm' 1 +111 'n' 1 +112 'o' 1 +113 'p' 1 +114 'q' 1 +115 'r' 1 +116 's' 1 +117 't' 1 +118 'u' 1 +119 'v' 1 +120 'w' 1 +121 'x' 1 +122 'y' 1 +123 'z' 1 +124 '{' 1 +125 '|' 1 +126 '}' 1 +127 '~' 1 +128 '\x7f' 1 +129 b'\x80' 1 +130 b'\x81' 1 +131 b'\x82' 1 +132 b'\x83' 1 +133 b'\x84' 1 +134 b'\x85' 1 +135 b'\x86' 1 +136 b'\x87' 1 +137 b'\x88' 1 +138 b'\x89' 1 +139 b'\x8a' 1 +140 b'\x8b' 1 +141 b'\x8c' 1 +142 b'\x8d' 1 +143 b'\x8e' 1 +144 b'\x8f' 1 +145 b'\x90' 1 +146 b'\x91' 1 +147 b'\x92' 1 +148 b'\x93' 1 +149 b'\x94' 1 +150 b'\x95' 1 +151 b'\x96' 1 +152 b'\x97' 1 +153 b'\x98' 1 +154 b'\x99' 1 +155 b'\x9a' 1 +156 b'\x9b' 1 +157 b'\x9c' 1 +158 b'\x9d' 1 +159 b'\x9e' 1 +160 b'\x9f' 1 +161 b'\xa0' 1 +162 b'\xa1' 1 +163 b'\xa2' 1 +164 b'\xa3' 1 +165 b'\xa4' 1 +166 b'\xa5' 1 +167 b'\xa6' 1 +168 b'\xa7' 1 +169 b'\xa8' 1 +170 b'\xa9' 1 +171 b'\xaa' 1 +172 b'\xab' 1 +173 b'\xac' 1 +174 b'\xad' 1 +175 b'\xae' 1 +176 b'\xaf' 1 +177 b'\xb0' 1 +178 b'\xb1' 1 +179 b'\xb2' 1 +180 b'\xb3' 1 +181 b'\xb4' 1 +182 b'\xb5' 1 +183 b'\xb6' 1 +184 b'\xb7' 1 +185 b'\xb8' 1 +186 b'\xb9' 1 +187 b'\xba' 1 +188 b'\xbb' 1 +189 b'\xbc' 1 +190 b'\xbd' 1 +191 b'\xbe' 1 +192 b'\xbf' 1 +193 b'\xc0' 1 +194 b'\xc1' 1 +195 b'\xc2' 1 +196 b'\xc3' 1 +197 b'\xc4' 1 +198 b'\xc5' 1 +199 b'\xc6' 1 +200 b'\xc7' 1 +201 b'\xc8' 1 +202 b'\xc9' 1 +203 b'\xca' 1 +204 b'\xcb' 1 +205 b'\xcc' 1 +206 b'\xcd' 1 +207 b'\xce' 1 +208 b'\xcf' 1 +209 b'\xd0' 1 +210 b'\xd1' 1 +211 b'\xd2' 1 +212 b'\xd3' 1 +213 b'\xd4' 1 +214 b'\xd5' 1 +215 b'\xd6' 1 +216 b'\xd7' 1 +217 b'\xd8' 1 +218 b'\xd9' 1 +219 b'\xda' 1 +220 b'\xdb' 1 +221 b'\xdc' 1 +222 b'\xdd' 1 +223 b'\xde' 1 +224 b'\xdf' 1 +225 b'\xe0' 1 +226 b'\xe1' 1 +227 b'\xe2' 1 +228 b'\xe3' 1 +229 b'\xe4' 1 +230 b'\xe5' 1 +231 b'\xe6' 1 +232 b'\xe7' 1 +233 b'\xe8' 1 +234 b'\xe9' 1 +235 b'\xea' 1 +236 b'\xeb' 1 +237 b'\xec' 1 +238 b'\xed' 1 +239 b'\xee' 1 +240 b'\xef' 1 +241 b'\xf0' 1 +242 b'\xf1' 1 +243 b'\xf2' 1 +244 b'\xf3' 1 +245 b'\xf4' 1 +246 b'\xf5' 1 +247 b'\xf6' 1 +248 b'\xf7' 1 +249 b'\xf8' 1 +250 b'\xf9' 1 +251 b'\xfa' 1 +252 b'\xfb' 1 +253 b'\xfc' 1 +254 b'\xfd' 1 +255 b'\xfe' 1 +256 b'\xff' 1 +257 '\t\t' 2 +258 '\t\n' 2 +259 '\t ' 2 +260 '\n\t' 2 +261 '\n\n' 2 +262 '\n ' 2 +263 '\r\n' 2 +264 ' \t' 2 +265 ' \n' 2 +266 ' \r' 2 +267 ' ' 2 +268 ' !' 2 +269 ' "' 2 +270 ' #' 2 +271 ' $' 2 +272 ' %' 2 +273 ' &' 2 +274 " '" 2 +275 ' (' 2 +276 ' )' 2 +277 ' *' 2 +278 ' +' 2 +279 ' ,' 2 +280 ' -' 2 +281 ' .' 2 +282 ' /' 2 +283 ' 0' 2 +284 ' 1' 2 +285 ' 2' 2 +286 ' 3' 2 +287 ' 4' 2 +288 ' 5' 2 +289 ' 6' 2 +290 ' 7' 2 +291 ' 8' 2 +292 ' 9' 2 +293 ' :' 2 +294 ' ;' 2 +295 ' <' 2 +296 ' =' 2 +297 ' >' 2 +298 ' ?' 2 +299 ' @' 2 +300 ' A' 2 +301 ' B' 2 +302 ' C' 2 +303 ' D' 2 +304 ' E' 2 +305 ' F' 2 +306 ' G' 2 +307 ' H' 2 +308 ' I' 2 +309 ' J' 2 +310 ' K' 2 +311 ' L' 2 +312 ' M' 2 +313 ' N' 2 +314 ' O' 2 +315 ' P' 2 +316 ' Q' 2 +317 ' R' 2 +318 ' S' 2 +319 ' T' 2 +320 ' U' 2 +321 ' V' 2 +322 ' W' 2 +323 ' X' 2 +324 ' Y' 2 +325 ' Z' 2 +326 ' [' 2 +327 ' \\' 2 +328 ' ]' 2 +329 ' ^' 2 +330 ' _' 2 +331 ' `' 2 +332 ' a' 2 +333 ' b' 2 +334 ' c' 2 +335 ' d' 2 +336 ' e' 2 +337 ' f' 2 +338 ' g' 2 +339 ' h' 2 +340 ' i' 2 +341 ' j' 2 +342 ' k' 2 +343 ' l' 2 +344 ' m' 2 +345 ' n' 2 +346 ' o' 2 +347 ' p' 2 +348 ' q' 2 +349 ' r' 2 +350 ' s' 2 +351 ' t' 2 +352 ' u' 2 +353 ' v' 2 +354 ' w' 2 +355 ' x' 2 +356 ' y' 2 +357 ' z' 2 +358 ' {' 2 +359 ' |' 2 +360 ' }' 2 +361 ' ~' 2 +362 '!!' 2 +363 '!"' 2 +364 "!'" 2 +365 '!(' 2 +366 '!)' 2 +367 '!,' 2 +368 '!.' 2 +369 '!/' 2 +370 '!=' 2 +371 '!?' 2 +372 '![' 2 +373 '!\\' 2 +374 '""' 2 +375 '"#' 2 +376 '"$' 2 +377 '"%' 2 +378 '"&' 2 +379 '"\'' 2 +380 '"(' 2 +381 '")' 2 +382 '"*' 2 +383 '"+' 2 +384 '",' 2 +385 '"-' 2 +386 '".' 2 +387 '"/' 2 +388 '":' 2 +389 '";' 2 +390 '"<' 2 +391 '">' 2 +392 '"?' 2 +393 '"[' 2 +394 '"\\' 2 +395 '"]' 2 +396 '"_' 2 +397 '"`' 2 +398 '"{' 2 +399 '"}' 2 +400 '#!' 2 +401 '#"' 2 +402 '##' 2 +403 "#'" 2 +404 '#,' 2 +405 '#.' 2 +406 '#:' 2 +407 '#{' 2 +408 '$"' 2 +409 '$$' 2 +410 "$'" 2 +411 '$(' 2 +412 '$,' 2 +413 '$.' 2 +414 '$/' 2 +415 '$:' 2 +416 '$;' 2 +417 '$\\' 2 +418 '$_' 2 +419 '${' 2 +420 '%"' 2 +421 '%%' 2 +422 "%'" 2 +423 '%(' 2 +424 '%)' 2 +425 '%,' 2 +426 '%-' 2 +427 '%.' 2 +428 '%;' 2 +429 '%=' 2 +430 '%\\' 2 +431 '&#' 2 +432 '&&' 2 +433 '&=' 2 +434 '&\\' 2 +435 '\'"' 2 +436 "'#" 2 +437 "'$" 2 +438 "'%" 2 +439 "''" 2 +440 "'(" 2 +441 "')" 2 +442 "'*" 2 +443 "'+" 2 +444 "'," 2 +445 "'-" 2 +446 "'." 2 +447 "'/" 2 +448 "':" 2 +449 "';" 2 +450 "'<" 2 +451 "'>" 2 +452 "'?" 2 +453 "'[" 2 +454 "'\\" 2 +455 "']" 2 +456 "'^" 2 +457 "'_" 2 +458 "'d" 2 +459 "'m" 2 +460 "'s" 2 +461 "'t" 2 +462 "'{" 2 +463 "'}" 2 +464 '(!' 2 +465 '("' 2 +466 '(#' 2 +467 '($' 2 +468 '(%' 2 +469 '(&' 2 +470 "('" 2 +471 '((' 2 +472 '()' 2 +473 '(*' 2 +474 '(+' 2 +475 '(-' 2 +476 '(.' 2 +477 '(/' 2 +478 '(:' 2 +479 '(<' 2 +480 '(?' 2 +481 '(@' 2 +482 '([' 2 +483 '(\\' 2 +484 '(_' 2 +485 '(`' 2 +486 '({' 2 +487 '(|' 2 +488 ')!' 2 +489 ')"' 2 +490 ')$' 2 +491 ')&' 2 +492 ")'" 2 +493 ')(' 2 +494 '))' 2 +495 ')*' 2 +496 ')+' 2 +497 '),' 2 +498 ')-' 2 +499 ').' 2 +500 ')/' 2 +501 '):' 2 +502 ');' 2 +503 ')<' 2 +504 ')=' 2 +505 ')>' 2 +506 ')?' 2 +507 ')[' 2 +508 ')\\' 2 +509 ')]' 2 +510 ')^' 2 +511 ')_' 2 +512 ')`' 2 +513 '){' 2 +514 ')|' 2 +515 ')}' 2 +516 '*"' 2 +517 "*'" 2 +518 '*(' 2 +519 '*)' 2 +520 '**' 2 +521 '*,' 2 +522 '*-' 2 +523 '*.' 2 +524 '*/' 2 +525 '*:' 2 +526 '*=' 2 +527 '*>' 2 +528 '*\\' 2 +529 '*_' 2 +530 '*}' 2 +531 '+"' 2 +532 '+$' 2 +533 "+'" 2 +534 '+(' 2 +535 '+)' 2 +536 '++' 2 +537 '+,' 2 +538 '+-' 2 +539 '+.' 2 +540 '+/' 2 +541 '+=' 2 +542 '+[' 2 +543 '+\\' 2 +544 ',"' 2 +545 ',#' 2 +546 ',$' 2 +547 ',%' 2 +548 ",'" 2 +549 ',(' 2 +550 ',)' 2 +551 ',*' 2 +552 ',,' 2 +553 ',-' 2 +554 ',.' 2 +555 ',[' 2 +556 ',\\' 2 +557 ',_' 2 +558 ',{' 2 +559 '-"' 2 +560 '-$' 2 +561 '-%' 2 +562 "-'" 2 +563 '-(' 2 +564 '-)' 2 +565 '-*' 2 +566 '-+' 2 +567 '-,' 2 +568 '--' 2 +569 '-.' 2 +570 '-=' 2 +571 '->' 2 +572 '-[' 2 +573 '-\\' 2 +574 '-{' 2 +575 '."' 2 +576 '.$' 2 +577 '.%' 2 +578 ".'" 2 +579 '.(' 2 +580 '.)' 2 +581 '.*' 2 +582 '.+' 2 +583 '.,' 2 +584 '.-' 2 +585 '..' 2 +586 './' 2 +587 '.:' 2 +588 '.;' 2 +589 '.<' 2 +590 '.=' 2 +591 '.?' 2 +592 '.[' 2 +593 '.\\' 2 +594 '.]' 2 +595 '._' 2 +596 '.|' 2 +597 '/"' 2 +598 '/#' 2 +599 '/$' 2 +600 '/%' 2 +601 "/'" 2 +602 '/(' 2 +603 '/)' 2 +604 '/*' 2 +605 '/+' 2 +606 '/,' 2 +607 '/-' 2 +608 '/.' 2 +609 '//' 2 +610 '/:' 2 +611 '/<' 2 +612 '/>' 2 +613 '/?' 2 +614 '/@' 2 +615 '/[' 2 +616 '/\\' 2 +617 '/_' 2 +618 '/{' 2 +619 '/~' 2 +620 '00' 2 +621 '01' 2 +622 '02' 2 +623 '03' 2 +624 '04' 2 +625 '05' 2 +626 '06' 2 +627 '07' 2 +628 '08' 2 +629 '09' 2 +630 '10' 2 +631 '11' 2 +632 '12' 2 +633 '13' 2 +634 '14' 2 +635 '15' 2 +636 '16' 2 +637 '17' 2 +638 '18' 2 +639 '19' 2 +640 '20' 2 +641 '21' 2 +642 '22' 2 +643 '23' 2 +644 '24' 2 +645 '25' 2 +646 '26' 2 +647 '27' 2 +648 '28' 2 +649 '29' 2 +650 '30' 2 +651 '31' 2 +652 '32' 2 +653 '33' 2 +654 '34' 2 +655 '35' 2 +656 '36' 2 +657 '37' 2 +658 '38' 2 +659 '39' 2 +660 '40' 2 +661 '41' 2 +662 '42' 2 +663 '43' 2 +664 '44' 2 +665 '45' 2 +666 '46' 2 +667 '47' 2 +668 '48' 2 +669 '49' 2 +670 '50' 2 +671 '51' 2 +672 '52' 2 +673 '53' 2 +674 '54' 2 +675 '55' 2 +676 '56' 2 +677 '57' 2 +678 '58' 2 +679 '59' 2 +680 '60' 2 +681 '61' 2 +682 '62' 2 +683 '63' 2 +684 '64' 2 +685 '65' 2 +686 '66' 2 +687 '67' 2 +688 '68' 2 +689 '69' 2 +690 '70' 2 +691 '71' 2 +692 '72' 2 +693 '73' 2 +694 '74' 2 +695 '75' 2 +696 '76' 2 +697 '77' 2 +698 '78' 2 +699 '79' 2 +700 '80' 2 +701 '81' 2 +702 '82' 2 +703 '83' 2 +704 '84' 2 +705 '85' 2 +706 '86' 2 +707 '87' 2 +708 '88' 2 +709 '89' 2 +710 '90' 2 +711 '91' 2 +712 '92' 2 +713 '93' 2 +714 '94' 2 +715 '95' 2 +716 '96' 2 +717 '97' 2 +718 '98' 2 +719 '99' 2 +720 ':"' 2 +721 ':#' 2 +722 ':$' 2 +723 ':%' 2 +724 ":'" 2 +725 ':(' 2 +726 ':)' 2 +727 ':*' 2 +728 ':,' 2 +729 ':-' 2 +730 ':.' 2 +731 ':/' 2 +732 '::' 2 +733 ':=' 2 +734 ':@' 2 +735 ':[' 2 +736 ':\\' 2 +737 ':]' 2 +738 ':_' 2 +739 ':`' 2 +740 ':{' 2 +741 ';"' 2 +742 ';&' 2 +743 ";'" 2 +744 ';-' 2 +745 ';/' 2 +746 ';;' 2 +747 ';<' 2 +748 ';\\' 2 +749 ';}' 2 +750 '' 2 +758 '' 2 +774 '=[' 2 +775 '=\\' 2 +776 '=_' 2 +777 '=`' 2 +778 '={' 2 +779 '>"' 2 +780 '>&' 2 +781 ">'" 2 +782 '>(' 2 +783 '>)' 2 +784 '>,' 2 +785 '>-' 2 +786 '>.' 2 +787 '>/' 2 +788 '>:' 2 +789 '>;' 2 +790 '><' 2 +791 '>=' 2 +792 '>>' 2 +793 '>[' 2 +794 '>\\' 2 +795 '>]' 2 +796 '>`' 2 +797 '>{' 2 +798 '?!' 2 +799 '?"' 2 +800 "?'" 2 +801 '?(' 2 +802 '?)' 2 +803 '?,' 2 +804 '?.' 2 +805 '?:' 2 +806 '?>' 2 +807 '??' 2 +808 '?\\' 2 +809 '@"' 2 +810 '@@' 2 +811 '@{' 2 +812 'AA' 2 +813 'AB' 2 +814 'AC' 2 +815 'AD' 2 +816 'AE' 2 +817 'AF' 2 +818 'AG' 2 +819 'AH' 2 +820 'AI' 2 +821 'AJ' 2 +822 'AK' 2 +823 'AL' 2 +824 'AM' 2 +825 'AN' 2 +826 'AO' 2 +827 'AP' 2 +828 'AQ' 2 +829 'AR' 2 +830 'AS' 2 +831 'AT' 2 +832 'AU' 2 +833 'AV' 2 +834 'AW' 2 +835 'AX' 2 +836 'AY' 2 +837 'AZ' 2 +838 'Ab' 2 +839 'Ac' 2 +840 'Ad' 2 +841 'Af' 2 +842 'Ag' 2 +843 'Ah' 2 +844 'Ai' 2 +845 'Aj' 2 +846 'Ak' 2 +847 'Al' 2 +848 'Am' 2 +849 'An' 2 +850 'Ao' 2 +851 'Ap' 2 +852 'Ar' 2 +853 'As' 2 +854 'At' 2 +855 'Au' 2 +856 'Av' 2 +857 'Aw' 2 +858 'Ax' 2 +859 'Ay' 2 +860 'Az' 2 +861 'BA' 2 +862 'BB' 2 +863 'BC' 2 +864 'BD' 2 +865 'BE' 2 +866 'BF' 2 +867 'BG' 2 +868 'BH' 2 +869 'BI' 2 +870 'BJ' 2 +871 'BK' 2 +872 'BL' 2 +873 'BM' 2 +874 'BN' 2 +875 'BO' 2 +876 'BP' 2 +877 'BR' 2 +878 'BS' 2 +879 'BT' 2 +880 'BU' 2 +881 'BV' 2 +882 'BW' 2 +883 'BY' 2 +884 'BZ' 2 +885 'Ba' 2 +886 'Be' 2 +887 'Bg' 2 +888 'Bi' 2 +889 'Bl' 2 +890 'Bo' 2 +891 'Br' 2 +892 'Bs' 2 +893 'Bu' 2 +894 'By' 2 +895 'CA' 2 +896 'CB' 2 +897 'CC' 2 +898 'CD' 2 +899 'CE' 2 +900 'CF' 2 +901 'CG' 2 +902 'CH' 2 +903 'CI' 2 +904 'CK' 2 +905 'CL' 2 +906 'CM' 2 +907 'CN' 2 +908 'CO' 2 +909 'CP' 2 +910 'CR' 2 +911 'CS' 2 +912 'CT' 2 +913 'CU' 2 +914 'CV' 2 +915 'CW' 2 +916 'CX' 2 +917 'CY' 2 +918 'Ca' 2 +919 'Cb' 2 +920 'Cd' 2 +921 'Ce' 2 +922 'Ch' 2 +923 'Ci' 2 +924 'Cl' 2 +925 'Co' 2 +926 'Cp' 2 +927 'Cr' 2 +928 'Cs' 2 +929 'Ct' 2 +930 'Cu' 2 +931 'Cy' 2 +932 'DA' 2 +933 'DB' 2 +934 'DC' 2 +935 'DD' 2 +936 'DE' 2 +937 'DF' 2 +938 'DG' 2 +939 'DH' 2 +940 'DI' 2 +941 'DJ' 2 +942 'DK' 2 +943 'DL' 2 +944 'DM' 2 +945 'DN' 2 +946 'DO' 2 +947 'DP' 2 +948 'DQ' 2 +949 'DR' 2 +950 'DS' 2 +951 'DT' 2 +952 'DU' 2 +953 'DV' 2 +954 'DW' 2 +955 'DX' 2 +956 'DY' 2 +957 'Da' 2 +958 'Db' 2 +959 'De' 2 +960 'Di' 2 +961 'Do' 2 +962 'Dr' 2 +963 'Ds' 2 +964 'Du' 2 +965 'Dy' 2 +966 'EA' 2 +967 'EB' 2 +968 'EC' 2 +969 'ED' 2 +970 'EE' 2 +971 'EF' 2 +972 'EG' 2 +973 'EH' 2 +974 'EI' 2 +975 'EK' 2 +976 'EL' 2 +977 'EM' 2 +978 'EN' 2 +979 'EO' 2 +980 'EP' 2 +981 'EQ' 2 +982 'ER' 2 +983 'ES' 2 +984 'ET' 2 +985 'EU' 2 +986 'EV' 2 +987 'EW' 2 +988 'EX' 2 +989 'EY' 2 +990 'Ec' 2 +991 'Ed' 2 +992 'Eg' 2 +993 'Eh' 2 +994 'El' 2 +995 'Em' 2 +996 'En' 2 +997 'Ep' 2 +998 'Eq' 2 +999 'Er' 2 +1000 'Es' 2 +1001 'Et' 2 +1002 'Eu' 2 +1003 'Ev' 2 +1004 'Ex' 2 +1005 'Ey' 2 +1006 'FA' 2 +1007 'FB' 2 +1008 'FC' 2 +1009 'FD' 2 +1010 'FE' 2 +1011 'FF' 2 +1012 'FG' 2 +1013 'FH' 2 +1014 'FI' 2 +1015 'FK' 2 +1016 'FL' 2 +1017 'FM' 2 +1018 'FN' 2 +1019 'FO' 2 +1020 'FP' 2 +1021 'FR' 2 +1022 'FS' 2 +1023 'FT' 2 +1024 'FU' 2 +1025 'FV' 2 +1026 'FW' 2 +1027 'FX' 2 +1028 'FY' 2 +1029 'Fa' 2 +1030 'Fc' 2 +1031 'Fe' 2 +1032 'Fi' 2 +1033 'Fl' 2 +1034 'Fn' 2 +1035 'Fo' 2 +1036 'Fr' 2 +1037 'Fs' 2 +1038 'Fu' 2 +1039 'GA' 2 +1040 'GB' 2 +1041 'GC' 2 +1042 'GD' 2 +1043 'GE' 2 +1044 'GF' 2 +1045 'GG' 2 +1046 'GH' 2 +1047 'GI' 2 +1048 'GL' 2 +1049 'GM' 2 +1050 'GN' 2 +1051 'GO' 2 +1052 'GP' 2 +1053 'GR' 2 +1054 'GS' 2 +1055 'GT' 2 +1056 'GU' 2 +1057 'GV' 2 +1058 'GW' 2 +1059 'GY' 2 +1060 'Ga' 2 +1061 'Gb' 2 +1062 'Ge' 2 +1063 'Gh' 2 +1064 'Gi' 2 +1065 'Gl' 2 +1066 'Go' 2 +1067 'Gr' 2 +1068 'Gs' 2 +1069 'Gu' 2 +1070 'Gy' 2 +1071 'HA' 2 +1072 'HB' 2 +1073 'HC' 2 +1074 'HD' 2 +1075 'HE' 2 +1076 'HF' 2 +1077 'HG' 2 +1078 'HH' 2 +1079 'HI' 2 +1080 'HK' 2 +1081 'HL' 2 +1082 'HM' 2 +1083 'HN' 2 +1084 'HO' 2 +1085 'HP' 2 +1086 'HQ' 2 +1087 'HR' 2 +1088 'HS' 2 +1089 'HT' 2 +1090 'HU' 2 +1091 'HV' 2 +1092 'HW' 2 +1093 'HY' 2 +1094 'Ha' 2 +1095 'He' 2 +1096 'Hg' 2 +1097 'Hi' 2 +1098 'Ho' 2 +1099 'Hp' 2 +1100 'Hs' 2 +1101 'Hu' 2 +1102 'Hy' 2 +1103 'Hz' 2 +1104 'IA' 2 +1105 'IB' 2 +1106 'IC' 2 +1107 'ID' 2 +1108 'IE' 2 +1109 'IF' 2 +1110 'IG' 2 +1111 'IH' 2 +1112 'II' 2 +1113 'IJ' 2 +1114 'IK' 2 +1115 'IL' 2 +1116 'IM' 2 +1117 'IN' 2 +1118 'IO' 2 +1119 'IP' 2 +1120 'IQ' 2 +1121 'IR' 2 +1122 'IS' 2 +1123 'IT' 2 +1124 'IU' 2 +1125 'IV' 2 +1126 'IW' 2 +1127 'IX' 2 +1128 'IZ' 2 +1129 'Id' 2 +1130 'If' 2 +1131 'Ig' 2 +1132 'Ii' 2 +1133 'Ik' 2 +1134 'Il' 2 +1135 'Im' 2 +1136 'In' 2 +1137 'Io' 2 +1138 'Ip' 2 +1139 'Ir' 2 +1140 'Is' 2 +1141 'It' 2 +1142 'Iz' 2 +1143 'JA' 2 +1144 'JB' 2 +1145 'JC' 2 +1146 'JD' 2 +1147 'JE' 2 +1148 'JF' 2 +1149 'JI' 2 +1150 'JJ' 2 +1151 'JK' 2 +1152 'JM' 2 +1153 'JO' 2 +1154 'JP' 2 +1155 'JR' 2 +1156 'JS' 2 +1157 'JT' 2 +1158 'JU' 2 +1159 'Ja' 2 +1160 'Je' 2 +1161 'Ji' 2 +1162 'Jo' 2 +1163 'Js' 2 +1164 'Ju' 2 +1165 'Jy' 2 +1166 'KA' 2 +1167 'KB' 2 +1168 'KC' 2 +1169 'KD' 2 +1170 'KE' 2 +1171 'KF' 2 +1172 'KG' 2 +1173 'KH' 2 +1174 'KI' 2 +1175 'KK' 2 +1176 'KL' 2 +1177 'KM' 2 +1178 'KN' 2 +1179 'KO' 2 +1180 'KP' 2 +1181 'KR' 2 +1182 'KS' 2 +1183 'KT' 2 +1184 'KV' 2 +1185 'KW' 2 +1186 'KY' 2 +1187 'Ka' 2 +1188 'Ke' 2 +1189 'Kh' 2 +1190 'Ki' 2 +1191 'Kn' 2 +1192 'Ko' 2 +1193 'Kr' 2 +1194 'Ku' 2 +1195 'Ky' 2 +1196 'LA' 2 +1197 'LB' 2 +1198 'LC' 2 +1199 'LD' 2 +1200 'LE' 2 +1201 'LF' 2 +1202 'LG' 2 +1203 'LH' 2 +1204 'LI' 2 +1205 'LL' 2 +1206 'LM' 2 +1207 'LN' 2 +1208 'LO' 2 +1209 'LP' 2 +1210 'LR' 2 +1211 'LS' 2 +1212 'LT' 2 +1213 'LU' 2 +1214 'LV' 2 +1215 'LW' 2 +1216 'LY' 2 +1217 'La' 2 +1218 'Le' 2 +1219 'Li' 2 +1220 'Ll' 2 +1221 'Ln' 2 +1222 'Lo' 2 +1223 'Lt' 2 +1224 'Lu' 2 +1225 'Ly' 2 +1226 'MA' 2 +1227 'MB' 2 +1228 'MC' 2 +1229 'MD' 2 +1230 'ME' 2 +1231 'MF' 2 +1232 'MG' 2 +1233 'MH' 2 +1234 'MI' 2 +1235 'MK' 2 +1236 'ML' 2 +1237 'MM' 2 +1238 'MN' 2 +1239 'MO' 2 +1240 'MP' 2 +1241 'MQ' 2 +1242 'MR' 2 +1243 'MS' 2 +1244 'MT' 2 +1245 'MU' 2 +1246 'MV' 2 +1247 'MW' 2 +1248 'MX' 2 +1249 'MY' 2 +1250 'Ma' 2 +1251 'Mb' 2 +1252 'Mc' 2 +1253 'Me' 2 +1254 'Mg' 2 +1255 'Mi' 2 +1256 'Mj' 2 +1257 'Mn' 2 +1258 'Mo' 2 +1259 'Mp' 2 +1260 'Mr' 2 +1261 'Ms' 2 +1262 'Mt' 2 +1263 'Mu' 2 +1264 'My' 2 +1265 'Mz' 2 +1266 'NA' 2 +1267 'NB' 2 +1268 'NC' 2 +1269 'ND' 2 +1270 'NE' 2 +1271 'NF' 2 +1272 'NG' 2 +1273 'NH' 2 +1274 'NI' 2 +1275 'NJ' 2 +1276 'NK' 2 +1277 'NL' 2 +1278 'NM' 2 +1279 'NN' 2 +1280 'NO' 2 +1281 'NP' 2 +1282 'NR' 2 +1283 'NS' 2 +1284 'NT' 2 +1285 'NU' 2 +1286 'NV' 2 +1287 'NW' 2 +1288 'NX' 2 +1289 'NY' 2 +1290 'NZ' 2 +1291 'Na' 2 +1292 'Nb' 2 +1293 'Nd' 2 +1294 'Ne' 2 +1295 'Ng' 2 +1296 'Ni' 2 +1297 'No' 2 +1298 'Nr' 2 +1299 'Ns' 2 +1300 'Nu' 2 +1301 'Nx' 2 +1302 'Ny' 2 +1303 'Nz' 2 +1304 'OA' 2 +1305 'OB' 2 +1306 'OC' 2 +1307 'OD' 2 +1308 'OE' 2 +1309 'OF' 2 +1310 'OG' 2 +1311 'OH' 2 +1312 'OI' 2 +1313 'OK' 2 +1314 'OL' 2 +1315 'OM' 2 +1316 'ON' 2 +1317 'OO' 2 +1318 'OP' 2 +1319 'OR' 2 +1320 'OS' 2 +1321 'OT' 2 +1322 'OU' 2 +1323 'OV' 2 +1324 'OW' 2 +1325 'OX' 2 +1326 'OY' 2 +1327 'Ob' 2 +1328 'Oc' 2 +1329 'Od' 2 +1330 'Of' 2 +1331 'Oh' 2 +1332 'Oi' 2 +1333 'Ok' 2 +1334 'Ol' 2 +1335 'Om' 2 +1336 'On' 2 +1337 'Op' 2 +1338 'Or' 2 +1339 'Os' 2 +1340 'Ot' 2 +1341 'Ox' 2 +1342 'PA' 2 +1343 'PB' 2 +1344 'PC' 2 +1345 'PD' 2 +1346 'PE' 2 +1347 'PF' 2 +1348 'PG' 2 +1349 'PH' 2 +1350 'PI' 2 +1351 'PK' 2 +1352 'PL' 2 +1353 'PM' 2 +1354 'PN' 2 +1355 'PO' 2 +1356 'PP' 2 +1357 'PR' 2 +1358 'PS' 2 +1359 'PT' 2 +1360 'PU' 2 +1361 'PV' 2 +1362 'PW' 2 +1363 'PY' 2 +1364 'Pa' 2 +1365 'Pb' 2 +1366 'Pe' 2 +1367 'Ph' 2 +1368 'Pi' 2 +1369 'Pl' 2 +1370 'Po' 2 +1371 'Pr' 2 +1372 'Ps' 2 +1373 'Pt' 2 +1374 'Pu' 2 +1375 'Px' 2 +1376 'Py' 2 +1377 'QA' 2 +1378 'QB' 2 +1379 'QC' 2 +1380 'QE' 2 +1381 'QI' 2 +1382 'QL' 2 +1383 'QM' 2 +1384 'QP' 2 +1385 'QQ' 2 +1386 'QR' 2 +1387 'QS' 2 +1388 'QT' 2 +1389 'QU' 2 +1390 'Qi' 2 +1391 'Qt' 2 +1392 'Qu' 2 +1393 'RA' 2 +1394 'RB' 2 +1395 'RC' 2 +1396 'RD' 2 +1397 'RE' 2 +1398 'RF' 2 +1399 'RG' 2 +1400 'RH' 2 +1401 'RI' 2 +1402 'RK' 2 +1403 'RL' 2 +1404 'RM' 2 +1405 'RN' 2 +1406 'RO' 2 +1407 'RP' 2 +1408 'RR' 2 +1409 'RS' 2 +1410 'RT' 2 +1411 'RU' 2 +1412 'RV' 2 +1413 'RW' 2 +1414 'RX' 2 +1415 'RY' 2 +1416 'Ra' 2 +1417 'Re' 2 +1418 'Rh' 2 +1419 'Ri' 2 +1420 'Ro' 2 +1421 'Rp' 2 +1422 'Rs' 2 +1423 'Ru' 2 +1424 'Rv' 2 +1425 'Rx' 2 +1426 'Ry' 2 +1427 'SA' 2 +1428 'SB' 2 +1429 'SC' 2 +1430 'SD' 2 +1431 'SE' 2 +1432 'SF' 2 +1433 'SG' 2 +1434 'SH' 2 +1435 'SI' 2 +1436 'SK' 2 +1437 'SL' 2 +1438 'SM' 2 +1439 'SN' 2 +1440 'SO' 2 +1441 'SP' 2 +1442 'SQ' 2 +1443 'SR' 2 +1444 'SS' 2 +1445 'ST' 2 +1446 'SU' 2 +1447 'SV' 2 +1448 'SW' 2 +1449 'SY' 2 +1450 'SZ' 2 +1451 'Sa' 2 +1452 'Sb' 2 +1453 'Sc' 2 +1454 'Se' 2 +1455 'Sh' 2 +1456 'Si' 2 +1457 'Sk' 2 +1458 'Sl' 2 +1459 'Sm' 2 +1460 'Sn' 2 +1461 'So' 2 +1462 'Sp' 2 +1463 'Sq' 2 +1464 'Sr' 2 +1465 'St' 2 +1466 'Su' 2 +1467 'Sw' 2 +1468 'Sy' 2 +1469 'Sz' 2 +1470 'TA' 2 +1471 'TB' 2 +1472 'TC' 2 +1473 'TD' 2 +1474 'TE' 2 +1475 'TF' 2 +1476 'TG' 2 +1477 'TH' 2 +1478 'TI' 2 +1479 'TK' 2 +1480 'TL' 2 +1481 'TM' 2 +1482 'TN' 2 +1483 'TO' 2 +1484 'TP' 2 +1485 'TR' 2 +1486 'TS' 2 +1487 'TT' 2 +1488 'TU' 2 +1489 'TV' 2 +1490 'TW' 2 +1491 'TX' 2 +1492 'TY' 2 +1493 'TZ' 2 +1494 'Ta' 2 +1495 'Tc' 2 +1496 'Te' 2 +1497 'Th' 2 +1498 'Ti' 2 +1499 'Tk' 2 +1500 'To' 2 +1501 'Tp' 2 +1502 'Tr' 2 +1503 'Ts' 2 +1504 'Tu' 2 +1505 'Tw' 2 +1506 'Tx' 2 +1507 'Ty' 2 +1508 'UA' 2 +1509 'UB' 2 +1510 'UC' 2 +1511 'UD' 2 +1512 'UE' 2 +1513 'UF' 2 +1514 'UG' 2 +1515 'UH' 2 +1516 'UI' 2 +1517 'UK' 2 +1518 'UL' 2 +1519 'UM' 2 +1520 'UN' 2 +1521 'UP' 2 +1522 'UR' 2 +1523 'US' 2 +1524 'UT' 2 +1525 'UU' 2 +1526 'UV' 2 +1527 'UX' 2 +1528 'UY' 2 +1529 'Ub' 2 +1530 'Uh' 2 +1531 'Ui' 2 +1532 'Uk' 2 +1533 'Ul' 2 +1534 'Um' 2 +1535 'Un' 2 +1536 'Up' 2 +1537 'Ur' 2 +1538 'Us' 2 +1539 'Ut' 2 +1540 'VA' 2 +1541 'VB' 2 +1542 'VC' 2 +1543 'VD' 2 +1544 'VE' 2 +1545 'VF' 2 +1546 'VG' 2 +1547 'VH' 2 +1548 'VI' 2 +1549 'VK' 2 +1550 'VL' 2 +1551 'VM' 2 +1552 'VN' 2 +1553 'VO' 2 +1554 'VP' 2 +1555 'VR' 2 +1556 'VS' 2 +1557 'VT' 2 +1558 'VV' 2 +1559 'Va' 2 +1560 'Ve' 2 +1561 'Vi' 2 +1562 'Vm' 2 +1563 'Vo' 2 +1564 'Vs' 2 +1565 'Vu' 2 +1566 'Vy' 2 +1567 'WA' 2 +1568 'WB' 2 +1569 'WC' 2 +1570 'WD' 2 +1571 'WE' 2 +1572 'WF' 2 +1573 'WG' 2 +1574 'WH' 2 +1575 'WI' 2 +1576 'WK' 2 +1577 'WL' 2 +1578 'WM' 2 +1579 'WN' 2 +1580 'WO' 2 +1581 'WP' 2 +1582 'WR' 2 +1583 'WS' 2 +1584 'WT' 2 +1585 'WV' 2 +1586 'WW' 2 +1587 'WX' 2 +1588 'Wa' 2 +1589 'We' 2 +1590 'Wh' 2 +1591 'Wi' 2 +1592 'Wo' 2 +1593 'Wr' 2 +1594 'Ws' 2 +1595 'Wy' 2 +1596 'XA' 2 +1597 'XB' 2 +1598 'XC' 2 +1599 'XD' 2 +1600 'XF' 2 +1601 'XG' 2 +1602 'XI' 2 +1603 'XL' 2 +1604 'XM' 2 +1605 'XP' 2 +1606 'XR' 2 +1607 'XS' 2 +1608 'XT' 2 +1609 'XV' 2 +1610 'XX' 2 +1611 'XY' 2 +1612 'Xi' 2 +1613 'YA' 2 +1614 'YC' 2 +1615 'YE' 2 +1616 'YL' 2 +1617 'YM' 2 +1618 'YN' 2 +1619 'YO' 2 +1620 'YP' 2 +1621 'YR' 2 +1622 'YS' 2 +1623 'YT' 2 +1624 'YW' 2 +1625 'YX' 2 +1626 'YY' 2 +1627 'Ya' 2 +1628 'Ye' 2 +1629 'Yo' 2 +1630 'Yu' 2 +1631 'ZA' 2 +1632 'ZE' 2 +1633 'ZH' 2 +1634 'ZO' 2 +1635 'ZT' 2 +1636 'ZW' 2 +1637 'ZX' 2 +1638 'ZY' 2 +1639 'ZZ' 2 +1640 'Za' 2 +1641 'Ze' 2 +1642 'Zh' 2 +1643 'Zn' 2 +1644 '["' 2 +1645 '[$' 2 +1646 "['" 2 +1647 '[(' 2 +1648 '[*' 2 +1649 '[,' 2 +1650 '[-' 2 +1651 '[/' 2 +1652 '[:' 2 +1653 '[@' 2 +1654 '[[' 2 +1655 '[\\' 2 +1656 '[]' 2 +1657 '[^' 2 +1658 '[_' 2 +1659 '[{' 2 +1660 '\\"' 2 +1661 '\\$' 2 +1662 '\\%' 2 +1663 "\\'" 2 +1664 '\\(' 2 +1665 '\\)' 2 +1666 '\\,' 2 +1667 '\\-' 2 +1668 '\\.' 2 +1669 '\\/' 2 +1670 '\\;' 2 +1671 '\\<' 2 +1672 '\\[' 2 +1673 '\\\\' 2 +1674 '\\]' 2 +1675 '\\_' 2 +1676 '\\{' 2 +1677 '\\}' 2 +1678 ']"' 2 +1679 ']$' 2 +1680 "]'" 2 +1681 '](' 2 +1682 '])' 2 +1683 ']*' 2 +1684 ']+' 2 +1685 '],' 2 +1686 ']-' 2 +1687 '].' 2 +1688 ']/' 2 +1689 ']:' 2 +1690 '];' 2 +1691 ']<' 2 +1692 ']=' 2 +1693 ']>' 2 +1694 ']?' 2 +1695 '][' 2 +1696 ']\\' 2 +1697 ']]' 2 +1698 ']_' 2 +1699 ']{' 2 +1700 ']|' 2 +1701 ']}' 2 +1702 '^(' 2 +1703 '^*' 2 +1704 '^-' 2 +1705 '^\\' 2 +1706 '^^' 2 +1707 '^{' 2 +1708 '_"' 2 +1709 '_%' 2 +1710 "_'" 2 +1711 '_(' 2 +1712 '_)' 2 +1713 '_*' 2 +1714 '_,' 2 +1715 '_-' 2 +1716 '_.' 2 +1717 '_:' 2 +1718 '_;' 2 +1719 '_<' 2 +1720 '_>' 2 +1721 '_[' 2 +1722 '_\\' 2 +1723 '_]' 2 +1724 '__' 2 +1725 '_{' 2 +1726 '`)' 2 +1727 '`,' 2 +1728 '`.' 2 +1729 '`:' 2 +1730 '`;' 2 +1731 '`\\' 2 +1732 '``' 2 +1733 'aa' 2 +1734 'ab' 2 +1735 'ac' 2 +1736 'ad' 2 +1737 'ae' 2 +1738 'af' 2 +1739 'ag' 2 +1740 'ah' 2 +1741 'ai' 2 +1742 'aj' 2 +1743 'ak' 2 +1744 'al' 2 +1745 'am' 2 +1746 'an' 2 +1747 'ao' 2 +1748 'ap' 2 +1749 'aq' 2 +1750 'ar' 2 +1751 'as' 2 +1752 'at' 2 +1753 'au' 2 +1754 'av' 2 +1755 'aw' 2 +1756 'ax' 2 +1757 'ay' 2 +1758 'az' 2 +1759 'ba' 2 +1760 'bb' 2 +1761 'bc' 2 +1762 'bd' 2 +1763 'be' 2 +1764 'bf' 2 +1765 'bg' 2 +1766 'bh' 2 +1767 'bi' 2 +1768 'bj' 2 +1769 'bk' 2 +1770 'bl' 2 +1771 'bm' 2 +1772 'bn' 2 +1773 'bo' 2 +1774 'bp' 2 +1775 'br' 2 +1776 'bs' 2 +1777 'bt' 2 +1778 'bu' 2 +1779 'bv' 2 +1780 'bw' 2 +1781 'bx' 2 +1782 'by' 2 +1783 'bz' 2 +1784 'ca' 2 +1785 'cb' 2 +1786 'cc' 2 +1787 'cd' 2 +1788 'ce' 2 +1789 'cf' 2 +1790 'cg' 2 +1791 'ch' 2 +1792 'ci' 2 +1793 'cj' 2 +1794 'ck' 2 +1795 'cl' 2 +1796 'cm' 2 +1797 'cn' 2 +1798 'co' 2 +1799 'cp' 2 +1800 'cq' 2 +1801 'cr' 2 +1802 'cs' 2 +1803 'ct' 2 +1804 'cu' 2 +1805 'cv' 2 +1806 'cw' 2 +1807 'cx' 2 +1808 'cy' 2 +1809 'cz' 2 +1810 'dB' 2 +1811 'dL' 2 +1812 'dT' 2 +1813 'dX' 2 +1814 'da' 2 +1815 'db' 2 +1816 'dc' 2 +1817 'dd' 2 +1818 'de' 2 +1819 'df' 2 +1820 'dg' 2 +1821 'dh' 2 +1822 'di' 2 +1823 'dj' 2 +1824 'dk' 2 +1825 'dl' 2 +1826 'dm' 2 +1827 'dn' 2 +1828 'do' 2 +1829 'dp' 2 +1830 'dq' 2 +1831 'dr' 2 +1832 'ds' 2 +1833 'dt' 2 +1834 'du' 2 +1835 'dv' 2 +1836 'dw' 2 +1837 'dx' 2 +1838 'dy' 2 +1839 'dz' 2 +1840 'ea' 2 +1841 'eb' 2 +1842 'ec' 2 +1843 'ed' 2 +1844 'ee' 2 +1845 'ef' 2 +1846 'eg' 2 +1847 'eh' 2 +1848 'ei' 2 +1849 'ej' 2 +1850 'ek' 2 +1851 'el' 2 +1852 'em' 2 +1853 'en' 2 +1854 'eo' 2 +1855 'ep' 2 +1856 'eq' 2 +1857 'er' 2 +1858 'es' 2 +1859 'et' 2 +1860 'eu' 2 +1861 'ev' 2 +1862 'ew' 2 +1863 'ex' 2 +1864 'ey' 2 +1865 'ez' 2 +1866 'fa' 2 +1867 'fb' 2 +1868 'fc' 2 +1869 'fd' 2 +1870 'fe' 2 +1871 'ff' 2 +1872 'fg' 2 +1873 'fh' 2 +1874 'fi' 2 +1875 'fj' 2 +1876 'fk' 2 +1877 'fl' 2 +1878 'fm' 2 +1879 'fn' 2 +1880 'fo' 2 +1881 'fp' 2 +1882 'fq' 2 +1883 'fr' 2 +1884 'fs' 2 +1885 'ft' 2 +1886 'fu' 2 +1887 'fv' 2 +1888 'fw' 2 +1889 'fx' 2 +1890 'fy' 2 +1891 'ga' 2 +1892 'gb' 2 +1893 'gc' 2 +1894 'gd' 2 +1895 'ge' 2 +1896 'gf' 2 +1897 'gg' 2 +1898 'gh' 2 +1899 'gi' 2 +1900 'gl' 2 +1901 'gm' 2 +1902 'gn' 2 +1903 'go' 2 +1904 'gp' 2 +1905 'gr' 2 +1906 'gs' 2 +1907 'gt' 2 +1908 'gu' 2 +1909 'gv' 2 +1910 'gw' 2 +1911 'gx' 2 +1912 'gy' 2 +1913 'gz' 2 +1914 'ha' 2 +1915 'hb' 2 +1916 'hc' 2 +1917 'hd' 2 +1918 'he' 2 +1919 'hf' 2 +1920 'hg' 2 +1921 'hh' 2 +1922 'hi' 2 +1923 'hj' 2 +1924 'hk' 2 +1925 'hl' 2 +1926 'hm' 2 +1927 'hn' 2 +1928 'ho' 2 +1929 'hp' 2 +1930 'hr' 2 +1931 'hs' 2 +1932 'ht' 2 +1933 'hu' 2 +1934 'hw' 2 +1935 'hy' 2 +1936 'hz' 2 +1937 'ia' 2 +1938 'ib' 2 +1939 'ic' 2 +1940 'id' 2 +1941 'ie' 2 +1942 'if' 2 +1943 'ig' 2 +1944 'ih' 2 +1945 'ii' 2 +1946 'ij' 2 +1947 'ik' 2 +1948 'il' 2 +1949 'im' 2 +1950 'in' 2 +1951 'io' 2 +1952 'ip' 2 +1953 'iq' 2 +1954 'ir' 2 +1955 'is' 2 +1956 'it' 2 +1957 'iu' 2 +1958 'iv' 2 +1959 'iw' 2 +1960 'ix' 2 +1961 'iy' 2 +1962 'iz' 2 +1963 'ja' 2 +1964 'jb' 2 +1965 'jc' 2 +1966 'jd' 2 +1967 'je' 2 +1968 'jh' 2 +1969 'ji' 2 +1970 'jj' 2 +1971 'jk' 2 +1972 'jl' 2 +1973 'jm' 2 +1974 'jn' 2 +1975 'jo' 2 +1976 'jp' 2 +1977 'jq' 2 +1978 'jr' 2 +1979 'js' 2 +1980 'jt' 2 +1981 'ju' 2 +1982 'jv' 2 +1983 'kB' 2 +1984 'ka' 2 +1985 'kb' 2 +1986 'kc' 2 +1987 'kd' 2 +1988 'ke' 2 +1989 'kg' 2 +1990 'kh' 2 +1991 'ki' 2 +1992 'kj' 2 +1993 'kk' 2 +1994 'kl' 2 +1995 'km' 2 +1996 'kn' 2 +1997 'ko' 2 +1998 'kp' 2 +1999 'kr' 2 +2000 'ks' 2 +2001 'kt' 2 +2002 'ku' 2 +2003 'kv' 2 +2004 'kw' 2 +2005 'kx' 2 +2006 'ky' 2 +2007 'la' 2 +2008 'lb' 2 +2009 'lc' 2 +2010 'ld' 2 +2011 'le' 2 +2012 'lf' 2 +2013 'lg' 2 +2014 'lh' 2 +2015 'li' 2 +2016 'lj' 2 +2017 'lk' 2 +2018 'll' 2 +2019 'lm' 2 +2020 'ln' 2 +2021 'lo' 2 +2022 'lp' 2 +2023 'lr' 2 +2024 'ls' 2 +2025 'lt' 2 +2026 'lu' 2 +2027 'lv' 2 +2028 'lw' 2 +2029 'lx' 2 +2030 'ly' 2 +2031 'lz' 2 +2032 'mL' 2 +2033 'mV' 2 +2034 'ma' 2 +2035 'mb' 2 +2036 'mc' 2 +2037 'md' 2 +2038 'me' 2 +2039 'mf' 2 +2040 'mg' 2 +2041 'mh' 2 +2042 'mi' 2 +2043 'mj' 2 +2044 'mk' 2 +2045 'ml' 2 +2046 'mm' 2 +2047 'mn' 2 +2048 'mo' 2 +2049 'mp' 2 +2050 'mq' 2 +2051 'mr' 2 +2052 'ms' 2 +2053 'mt' 2 +2054 'mu' 2 +2055 'mv' 2 +2056 'mw' 2 +2057 'mx' 2 +2058 'my' 2 +2059 'na' 2 +2060 'nb' 2 +2061 'nc' 2 +2062 'nd' 2 +2063 'ne' 2 +2064 'nf' 2 +2065 'ng' 2 +2066 'nh' 2 +2067 'ni' 2 +2068 'nj' 2 +2069 'nk' 2 +2070 'nl' 2 +2071 'nm' 2 +2072 'nn' 2 +2073 'no' 2 +2074 'np' 2 +2075 'nr' 2 +2076 'ns' 2 +2077 'nt' 2 +2078 'nu' 2 +2079 'nv' 2 +2080 'nw' 2 +2081 'nx' 2 +2082 'ny' 2 +2083 'nz' 2 +2084 'oS' 2 +2085 'oa' 2 +2086 'ob' 2 +2087 'oc' 2 +2088 'od' 2 +2089 'oe' 2 +2090 'of' 2 +2091 'og' 2 +2092 'oh' 2 +2093 'oi' 2 +2094 'oj' 2 +2095 'ok' 2 +2096 'ol' 2 +2097 'om' 2 +2098 'on' 2 +2099 'oo' 2 +2100 'op' 2 +2101 'or' 2 +2102 'os' 2 +2103 'ot' 2 +2104 'ou' 2 +2105 'ov' 2 +2106 'ow' 2 +2107 'ox' 2 +2108 'oy' 2 +2109 'oz' 2 +2110 'pH' 2 +2111 'pa' 2 +2112 'pb' 2 +2113 'pc' 2 +2114 'pd' 2 +2115 'pe' 2 +2116 'pf' 2 +2117 'pg' 2 +2118 'ph' 2 +2119 'pi' 2 +2120 'pk' 2 +2121 'pl' 2 +2122 'pm' 2 +2123 'pn' 2 +2124 'po' 2 +2125 'pp' 2 +2126 'pq' 2 +2127 'pr' 2 +2128 'ps' 2 +2129 'pt' 2 +2130 'pu' 2 +2131 'pv' 2 +2132 'pw' 2 +2133 'px' 2 +2134 'py' 2 +2135 'qa' 2 +2136 'qb' 2 +2137 'qc' 2 +2138 'qd' 2 +2139 'qh' 2 +2140 'qi' 2 +2141 'ql' 2 +2142 'qn' 2 +2143 'qp' 2 +2144 'qq' 2 +2145 'qr' 2 +2146 'qs' 2 +2147 'qt' 2 +2148 'qu' 2 +2149 'qv' 2 +2150 'qw' 2 +2151 'ra' 2 +2152 'rb' 2 +2153 'rc' 2 +2154 'rd' 2 +2155 're' 2 +2156 'rf' 2 +2157 'rg' 2 +2158 'rh' 2 +2159 'ri' 2 +2160 'rk' 2 +2161 'rl' 2 +2162 'rm' 2 +2163 'rn' 2 +2164 'ro' 2 +2165 'rp' 2 +2166 'rq' 2 +2167 'rr' 2 +2168 'rs' 2 +2169 'rt' 2 +2170 'ru' 2 +2171 'rv' 2 +2172 'rw' 2 +2173 'rx' 2 +2174 'ry' 2 +2175 'rz' 2 +2176 'sa' 2 +2177 'sb' 2 +2178 'sc' 2 +2179 'sd' 2 +2180 'se' 2 +2181 'sf' 2 +2182 'sg' 2 +2183 'sh' 2 +2184 'si' 2 +2185 'sj' 2 +2186 'sk' 2 +2187 'sl' 2 +2188 'sm' 2 +2189 'sn' 2 +2190 'so' 2 +2191 'sp' 2 +2192 'sq' 2 +2193 'sr' 2 +2194 'ss' 2 +2195 'st' 2 +2196 'su' 2 +2197 'sv' 2 +2198 'sw' 2 +2199 'sx' 2 +2200 'sy' 2 +2201 'sz' 2 +2202 'ta' 2 +2203 'tb' 2 +2204 'tc' 2 +2205 'td' 2 +2206 'te' 2 +2207 'tf' 2 +2208 'tg' 2 +2209 'th' 2 +2210 'ti' 2 +2211 'tk' 2 +2212 'tl' 2 +2213 'tm' 2 +2214 'tn' 2 +2215 'to' 2 +2216 'tp' 2 +2217 'tr' 2 +2218 'ts' 2 +2219 'tt' 2 +2220 'tu' 2 +2221 'tv' 2 +2222 'tw' 2 +2223 'tx' 2 +2224 'ty' 2 +2225 'tz' 2 +2226 'ua' 2 +2227 'ub' 2 +2228 'uc' 2 +2229 'ud' 2 +2230 'ue' 2 +2231 'uf' 2 +2232 'ug' 2 +2233 'uh' 2 +2234 'ui' 2 +2235 'uj' 2 +2236 'uk' 2 +2237 'ul' 2 +2238 'um' 2 +2239 'un' 2 +2240 'uo' 2 +2241 'up' 2 +2242 'ur' 2 +2243 'us' 2 +2244 'ut' 2 +2245 'uu' 2 +2246 'uv' 2 +2247 'uw' 2 +2248 'ux' 2 +2249 'uy' 2 +2250 'uz' 2 +2251 'va' 2 +2252 'vb' 2 +2253 'vc' 2 +2254 'vd' 2 +2255 've' 2 +2256 'vf' 2 +2257 'vg' 2 +2258 'vh' 2 +2259 'vi' 2 +2260 'vk' 2 +2261 'vl' 2 +2262 'vm' 2 +2263 'vn' 2 +2264 'vo' 2 +2265 'vp' 2 +2266 'vr' 2 +2267 'vs' 2 +2268 'vt' 2 +2269 'vu' 2 +2270 'vv' 2 +2271 'vw' 2 +2272 'vx' 2 +2273 'vy' 2 +2274 'wa' 2 +2275 'wb' 2 +2276 'wc' 2 +2277 'wd' 2 +2278 'we' 2 +2279 'wf' 2 +2280 'wg' 2 +2281 'wh' 2 +2282 'wi' 2 +2283 'wk' 2 +2284 'wl' 2 +2285 'wm' 2 +2286 'wn' 2 +2287 'wo' 2 +2288 'wp' 2 +2289 'wr' 2 +2290 'ws' 2 +2291 'wt' 2 +2292 'wu' 2 +2293 'ww' 2 +2294 'wx' 2 +2295 'wy' 2 +2296 'xA' 2 +2297 'xB' 2 +2298 'xC' 2 +2299 'xD' 2 +2300 'xE' 2 +2301 'xF' 2 +2302 'xa' 2 +2303 'xb' 2 +2304 'xc' 2 +2305 'xd' 2 +2306 'xe' 2 +2307 'xf' 2 +2308 'xg' 2 +2309 'xh' 2 +2310 'xi' 2 +2311 'xl' 2 +2312 'xm' 2 +2313 'xn' 2 +2314 'xo' 2 +2315 'xp' 2 +2316 'xr' 2 +2317 'xs' 2 +2318 'xt' 2 +2319 'xu' 2 +2320 'xx' 2 +2321 'xy' 2 +2322 'xz' 2 +2323 'ya' 2 +2324 'yb' 2 +2325 'yc' 2 +2326 'yd' 2 +2327 'ye' 2 +2328 'yg' 2 +2329 'yi' 2 +2330 'yk' 2 +2331 'yl' 2 +2332 'ym' 2 +2333 'yn' 2 +2334 'yo' 2 +2335 'yp' 2 +2336 'yr' 2 +2337 'ys' 2 +2338 'yt' 2 +2339 'yu' 2 +2340 'yw' 2 +2341 'yx' 2 +2342 'yy' 2 +2343 'yz' 2 +2344 'zA' 2 +2345 'za' 2 +2346 'zb' 2 +2347 'zc' 2 +2348 'zd' 2 +2349 'ze' 2 +2350 'zh' 2 +2351 'zi' 2 +2352 'zk' 2 +2353 'zl' 2 +2354 'zm' 2 +2355 'zn' 2 +2356 'zo' 2 +2357 'zr' 2 +2358 'zs' 2 +2359 'zt' 2 +2360 'zu' 2 +2361 'zw' 2 +2362 'zy' 2 +2363 'zz' 2 +2364 '{"' 2 +2365 '{$' 2 +2366 '{%' 2 +2367 "{'" 2 +2368 '{(' 2 +2369 '{-' 2 +2370 '{:' 2 +2371 '{@' 2 +2372 '{\\' 2 +2373 '{{' 2 +2374 '{|' 2 +2375 '{}' 2 +2376 '|$' 2 +2377 '|(' 2 +2378 '|-' 2 +2379 '|.' 2 +2380 '|\\' 2 +2381 '|^' 2 +2382 '||' 2 +2383 '}"' 2 +2384 '}$' 2 +2385 '}%' 2 +2386 '}&' 2 +2387 "}'" 2 +2388 '}(' 2 +2389 '})' 2 +2390 '}+' 2 +2391 '},' 2 +2392 '}-' 2 +2393 '}.' 2 +2394 '}/' 2 +2395 '}:' 2 +2396 '};' 2 +2397 '}<' 2 +2398 '}=' 2 +2399 '}>' 2 +2400 '}?' 2 +2401 '}[' 2 +2402 '}\\' 2 +2403 '}]' 2 +2404 '}_' 2 +2405 '}`' 2 +2406 '}{' 2 +2407 '}|' 2 +2408 '}}' 2 +2409 '~/' 2 +2410 '~\\' 2 +2411 '~~' 2 +2412 b'\x82\xac' 2 +2413 b'\x83\xbd' 2 +2414 b'\x86\x92' 2 +2415 b'\x88\x98' 2 +2416 b'\x8c\x80' 2 +2417 b'\x99\x82' 2 +2418 b'\x9d\xbc' 2 +2419 b'\xa3\xbc' 2 +2420 b'\xa6\x82' 2 +2421 b'\xb7\xb8' 2 +2422 b'\xbf\xbd' 2 +2423 '\x80' 2 +2424 '\x81' 2 +2425 '\x91' 2 +2426 '\x92' 2 +2427 '\x93' 2 +2428 '\x94' 2 +2429 '\x97' 2 +2430 '\xa0' 2 +2431 '¡' 2 +2432 '¢' 2 +2433 '£' 2 +2434 '¤' 2 +2435 '¥' 2 +2436 '¦' 2 +2437 '§' 2 +2438 '¨' 2 +2439 '©' 2 +2440 'ª' 2 +2441 '«' 2 +2442 '¬' 2 +2443 '\xad' 2 +2444 '®' 2 +2445 '¯' 2 +2446 '°' 2 +2447 '±' 2 +2448 '²' 2 +2449 '³' 2 +2450 '´' 2 +2451 'µ' 2 +2452 '¶' 2 +2453 '·' 2 +2454 '¸' 2 +2455 '¹' 2 +2456 'º' 2 +2457 '»' 2 +2458 '¼' 2 +2459 '½' 2 +2460 '¾' 2 +2461 '¿' 2 +2462 'À' 2 +2463 'Á' 2 +2464 'Â' 2 +2465 'Ã' 2 +2466 'Ä' 2 +2467 'Å' 2 +2468 'Æ' 2 +2469 'Ç' 2 +2470 'È' 2 +2471 'É' 2 +2472 'Ê' 2 +2473 'Ë' 2 +2474 'Ì' 2 +2475 'Í' 2 +2476 'Î' 2 +2477 'Ï' 2 +2478 'Ð' 2 +2479 'Ñ' 2 +2480 'Ò' 2 +2481 'Ó' 2 +2482 'Ô' 2 +2483 'Õ' 2 +2484 'Ö' 2 +2485 '×' 2 +2486 'Ø' 2 +2487 'Ù' 2 +2488 'Ú' 2 +2489 'Û' 2 +2490 'Ü' 2 +2491 'Ý' 2 +2492 'Þ' 2 +2493 'ß' 2 +2494 'à' 2 +2495 'á' 2 +2496 'â' 2 +2497 'ã' 2 +2498 'ä' 2 +2499 'å' 2 +2500 'æ' 2 +2501 'ç' 2 +2502 'è' 2 +2503 'é' 2 +2504 'ê' 2 +2505 'ë' 2 +2506 'ì' 2 +2507 'í' 2 +2508 'î' 2 +2509 'ï' 2 +2510 'ð' 2 +2511 'ñ' 2 +2512 'ò' 2 +2513 'ó' 2 +2514 'ô' 2 +2515 'õ' 2 +2516 'ö' 2 +2517 '÷' 2 +2518 'ø' 2 +2519 'ù' 2 +2520 'ú' 2 +2521 'û' 2 +2522 'ü' 2 +2523 'ý' 2 +2524 'þ' 2 +2525 'ÿ' 2 +2526 'Ā' 2 +2527 'ā' 2 +2528 'Ă' 2 +2529 'ă' 2 +2530 'ą' 2 +2531 'Ć' 2 +2532 'ć' 2 +2533 'ĉ' 2 +2534 'ċ' 2 +2535 'Č' 2 +2536 'č' 2 +2537 'Ď' 2 +2538 'ď' 2 +2539 'Đ' 2 +2540 'đ' 2 +2541 'Ē' 2 +2542 'ē' 2 +2543 'ĕ' 2 +2544 'Ė' 2 +2545 'ė' 2 +2546 'ę' 2 +2547 'Ě' 2 +2548 'ě' 2 +2549 'ĝ' 2 +2550 'ğ' 2 +2551 'Ġ' 2 +2552 'ġ' 2 +2553 'Ħ' 2 +2554 'ħ' 2 +2555 'ĩ' 2 +2556 'Ī' 2 +2557 'ī' 2 +2558 'ĭ' 2 +2559 'į' 2 +2560 'İ' 2 +2561 'ı' 2 +2562 'ķ' 2 +2563 'ļ' 2 +2564 'Ľ' 2 +2565 'ľ' 2 +2566 'Ł' 2 +2567 'ł' 2 +2568 'ń' 2 +2569 'ņ' 2 +2570 'ň' 2 +2571 'ŋ' 2 +2572 'Ō' 2 +2573 'ō' 2 +2574 'ŏ' 2 +2575 'Ő' 2 +2576 'ő' 2 +2577 'Œ' 2 +2578 'œ' 2 +2579 'Ř' 2 +2580 'ř' 2 +2581 'Ś' 2 +2582 'ś' 2 +2583 'ŝ' 2 +2584 'Ş' 2 +2585 'ş' 2 +2586 'Š' 2 +2587 'š' 2 +2588 'Ţ' 2 +2589 'ţ' 2 +2590 'Ť' 2 +2591 'ť' 2 +2592 'ũ' 2 +2593 'ū' 2 +2594 'ŭ' 2 +2595 'ů' 2 +2596 'Ű' 2 +2597 'ű' 2 +2598 'ų' 2 +2599 'Ÿ' 2 +2600 'Ź' 2 +2601 'ź' 2 +2602 'Ż' 2 +2603 'ż' 2 +2604 'Ž' 2 +2605 'ž' 2 +2606 'ſ' 2 +2607 'Ə' 2 +2608 'ƒ' 2 +2609 'ơ' 2 +2610 'ư' 2 +2611 'ǎ' 2 +2612 'ǐ' 2 +2613 'ǒ' 2 +2614 'ǔ' 2 +2615 'ǚ' 2 +2616 'ǧ' 2 +2617 'ǫ' 2 +2618 'Ș' 2 +2619 'ș' 2 +2620 'Ț' 2 +2621 'ț' 2 +2622 'ɐ' 2 +2623 'ɑ' 2 +2624 'ɒ' 2 +2625 'ɔ' 2 +2626 'ɕ' 2 +2627 'ə' 2 +2628 'ɛ' 2 +2629 'ɡ' 2 +2630 'ɣ' 2 +2631 'ɨ' 2 +2632 'ɪ' 2 +2633 'ɫ' 2 +2634 'ɯ' 2 +2635 'ɲ' 2 +2636 'ɵ' 2 +2637 'ɹ' 2 +2638 'ɾ' 2 +2639 'ʀ' 2 +2640 'ʁ' 2 +2641 'ʂ' 2 +2642 'ʃ' 2 +2643 'ʊ' 2 +2644 'ʋ' 2 +2645 'ʌ' 2 +2646 'ʎ' 2 +2647 'ʐ' 2 +2648 'ʑ' 2 +2649 'ʒ' 2 +2650 'ʔ' 2 +2651 'ʰ' 2 +2652 'ʲ' 2 +2653 'ʷ' 2 +2654 'ʹ' 2 +2655 'ʻ' 2 +2656 'ʼ' 2 +2657 'ʾ' 2 +2658 'ʿ' 2 +2659 'ˆ' 2 +2660 'ˇ' 2 +2661 'ˈ' 2 +2662 'ˉ' 2 +2663 'ˊ' 2 +2664 'ˋ' 2 +2665 'ˌ' 2 +2666 'ː' 2 +2667 '˙' 2 +2668 '˚' 2 +2669 '˜' 2 +2670 'ˠ' 2 +2671 '̀' 2 +2672 '́' 2 +2673 '̂' 2 +2674 '̃' 2 +2675 '̄' 2 +2676 '̈' 2 +2677 '̌' 2 +2678 '̍' 2 +2679 '̣' 2 +2680 '̥' 2 +2681 '̩' 2 +2682 '̪' 2 +2683 '̯' 2 +2684 '̱' 2 +2685 '̲' 2 +2686 '̶' 2 +2687 '͒' 2 +2688 '͓' 2 +2689 '͘' 2 +2690 '͡' 2 +2691 'Ά' 2 +2692 'Έ' 2 +2693 'Α' 2 +2694 'Β' 2 +2695 'Γ' 2 +2696 'Δ' 2 +2697 'Ε' 2 +2698 'Ζ' 2 +2699 'Η' 2 +2700 'Θ' 2 +2701 'Ι' 2 +2702 'Κ' 2 +2703 'Λ' 2 +2704 'Μ' 2 +2705 'Ν' 2 +2706 'Ξ' 2 +2707 'Ο' 2 +2708 'Π' 2 +2709 'Ρ' 2 +2710 'Σ' 2 +2711 'Τ' 2 +2712 'Υ' 2 +2713 'Φ' 2 +2714 'Χ' 2 +2715 'Ψ' 2 +2716 'Ω' 2 +2717 'ά' 2 +2718 'έ' 2 +2719 'ή' 2 +2720 'ί' 2 +2721 'α' 2 +2722 'β' 2 +2723 'γ' 2 +2724 'δ' 2 +2725 'ε' 2 +2726 'ζ' 2 +2727 'η' 2 +2728 'θ' 2 +2729 'ι' 2 +2730 'κ' 2 +2731 'λ' 2 +2732 'μ' 2 +2733 'ν' 2 +2734 'ξ' 2 +2735 'ο' 2 +2736 'π' 2 +2737 'ρ' 2 +2738 'ς' 2 +2739 'σ' 2 +2740 'τ' 2 +2741 'υ' 2 +2742 'φ' 2 +2743 'χ' 2 +2744 'ψ' 2 +2745 'ω' 2 +2746 'ϊ' 2 +2747 'ό' 2 +2748 'ύ' 2 +2749 'ώ' 2 +2750 'ϕ' 2 +2751 'ϵ' 2 +2752 'Ё' 2 +2753 'Ђ' 2 +2754 'Є' 2 +2755 'І' 2 +2756 'Ї' 2 +2757 'Ј' 2 +2758 'Љ' 2 +2759 'Њ' 2 +2760 'Ћ' 2 +2761 'Џ' 2 +2762 'А' 2 +2763 'Б' 2 +2764 'В' 2 +2765 'Г' 2 +2766 'Д' 2 +2767 'Е' 2 +2768 'Ж' 2 +2769 'З' 2 +2770 'И' 2 +2771 'Й' 2 +2772 'К' 2 +2773 'Л' 2 +2774 'М' 2 +2775 'Н' 2 +2776 'О' 2 +2777 'П' 2 +2778 'Р' 2 +2779 'С' 2 +2780 'Т' 2 +2781 'У' 2 +2782 'Ф' 2 +2783 'Х' 2 +2784 'Ц' 2 +2785 'Ч' 2 +2786 'Ш' 2 +2787 'Щ' 2 +2788 'Ъ' 2 +2789 'Ы' 2 +2790 'Ь' 2 +2791 'Э' 2 +2792 'Ю' 2 +2793 'Я' 2 +2794 'а' 2 +2795 'б' 2 +2796 'в' 2 +2797 'г' 2 +2798 'д' 2 +2799 'е' 2 +2800 'ж' 2 +2801 'з' 2 +2802 'и' 2 +2803 'й' 2 +2804 'к' 2 +2805 'л' 2 +2806 'м' 2 +2807 'н' 2 +2808 'о' 2 +2809 'п' 2 +2810 'р' 2 +2811 'с' 2 +2812 'т' 2 +2813 'у' 2 +2814 'ф' 2 +2815 'х' 2 +2816 'ц' 2 +2817 'ч' 2 +2818 'ш' 2 +2819 'щ' 2 +2820 'ъ' 2 +2821 'ы' 2 +2822 'ь' 2 +2823 'э' 2 +2824 'ю' 2 +2825 'я' 2 +2826 'ѐ' 2 +2827 'ё' 2 +2828 'ђ' 2 +2829 'є' 2 +2830 'і' 2 +2831 'ї' 2 +2832 'ј' 2 +2833 'љ' 2 +2834 'њ' 2 +2835 'ћ' 2 +2836 'ѝ' 2 +2837 'ў' 2 +2838 'џ' 2 +2839 'ѣ' 2 +2840 'ѫ' 2 +2841 'Ґ' 2 +2842 'ґ' 2 +2843 'ғ' 2 +2844 'Қ' 2 +2845 'қ' 2 +2846 'ҡ' 2 +2847 'ң' 2 +2848 'ү' 2 +2849 'ұ' 2 +2850 'ӏ' 2 +2851 'ә' 2 +2852 'ө' 2 +2853 'Ա' 2 +2854 'Հ' 2 +2855 'Մ' 2 +2856 'Ս' 2 +2857 'ա' 2 +2858 'բ' 2 +2859 'գ' 2 +2860 'դ' 2 +2861 'ե' 2 +2862 'զ' 2 +2863 'թ' 2 +2864 'ի' 2 +2865 'լ' 2 +2866 'կ' 2 +2867 'հ' 2 +2868 'ղ' 2 +2869 'մ' 2 +2870 'յ' 2 +2871 'ն' 2 +2872 'շ' 2 +2873 'ո' 2 +2874 'պ' 2 +2875 'ս' 2 +2876 'վ' 2 +2877 'տ' 2 +2878 'ր' 2 +2879 'ց' 2 +2880 'ւ' 2 +2881 'ք' 2 +2882 'ְ' 2 +2883 'ִ' 2 +2884 'ֵ' 2 +2885 'ֶ' 2 +2886 'ַ' 2 +2887 'ָ' 2 +2888 'ֹ' 2 +2889 'ּ' 2 +2890 'ׁ' 2 +2891 'א' 2 +2892 'ב' 2 +2893 'ג' 2 +2894 'ד' 2 +2895 'ה' 2 +2896 'ו' 2 +2897 'ז' 2 +2898 'ח' 2 +2899 'ט' 2 +2900 'י' 2 +2901 'ך' 2 +2902 'כ' 2 +2903 'ל' 2 +2904 'ם' 2 +2905 'מ' 2 +2906 'ן' 2 +2907 'נ' 2 +2908 'ס' 2 +2909 'ע' 2 +2910 'ף' 2 +2911 'פ' 2 +2912 'ץ' 2 +2913 'צ' 2 +2914 'ק' 2 +2915 'ר' 2 +2916 'ש' 2 +2917 'ת' 2 +2918 '،' 2 +2919 'ء' 2 +2920 'آ' 2 +2921 'أ' 2 +2922 'إ' 2 +2923 'ئ' 2 +2924 'ا' 2 +2925 'ب' 2 +2926 'ة' 2 +2927 'ت' 2 +2928 'ث' 2 +2929 'ج' 2 +2930 'ح' 2 +2931 'خ' 2 +2932 'د' 2 +2933 'ذ' 2 +2934 'ر' 2 +2935 'ز' 2 +2936 'س' 2 +2937 'ش' 2 +2938 'ص' 2 +2939 'ض' 2 +2940 'ط' 2 +2941 'ظ' 2 +2942 'ع' 2 +2943 'غ' 2 +2944 'ـ' 2 +2945 'ف' 2 +2946 'ق' 2 +2947 'ك' 2 +2948 'ل' 2 +2949 'م' 2 +2950 'ن' 2 +2951 'ه' 2 +2952 'و' 2 +2953 'ى' 2 +2954 'ي' 2 +2955 'ً' 2 +2956 'َ' 2 +2957 'ُ' 2 +2958 'ِ' 2 +2959 'ّ' 2 +2960 'ْ' 2 +2961 'پ' 2 +2962 'چ' 2 +2963 'ک' 2 +2964 'گ' 2 +2965 'ھ' 2 +2966 'ہ' 2 +2967 'ی' 2 +2968 'ے' 2 +2969 'ە' 2 +2970 'ܐ' 2 +2971 'ܝ' 2 +2972 '߬' 2 +2973 b'\xe0\xa4' 2 +2974 b'\xe0\xa5' 2 +2975 b'\xe0\xa6' 2 +2976 b'\xe0\xa7' 2 +2977 b'\xe0\xa8' 2 +2978 b'\xe0\xa9' 2 +2979 b'\xe0\xaa' 2 +2980 b'\xe0\xab' 2 +2981 b'\xe0\xac' 2 +2982 b'\xe0\xae' 2 +2983 b'\xe0\xaf' 2 +2984 b'\xe0\xb0' 2 +2985 b'\xe0\xb1' 2 +2986 b'\xe0\xb2' 2 +2987 b'\xe0\xb3' 2 +2988 b'\xe0\xb4' 2 +2989 b'\xe0\xb5' 2 +2990 b'\xe0\xb6' 2 +2991 b'\xe0\xb7' 2 +2992 b'\xe0\xb8' 2 +2993 b'\xe0\xb9' 2 +2994 b'\xe0\xba' 2 +2995 b'\xe0\xbc' 2 +2996 b'\xe0\xbd' 2 +2997 b'\xe1\x80' 2 +2998 b'\xe1\x83' 2 +2999 b'\xe1\x9e' 2 +3000 b'\xe1\x9f' 2 +3001 b'\xe1\xb8' 2 +3002 b'\xe1\xb9' 2 +3003 b'\xe1\xba' 2 +3004 b'\xe1\xbb' 2 +3005 b'\xe1\xbd' 2 +3006 b'\xe2\x80' 2 +3007 b'\xe2\x81' 2 +3008 b'\xe2\x82' 2 +3009 b'\xe2\x84' 2 +3010 b'\xe2\x86' 2 +3011 b'\xe2\x88' 2 +3012 b'\xe2\x89' 2 +3013 b'\xe2\x94' 2 +3014 b'\xe2\x95' 2 +3015 b'\xe2\x96' 2 +3016 b'\xe2\x97' 2 +3017 b'\xe2\x98' 2 +3018 b'\xe2\x99' 2 +3019 b'\xe2\x9c' 2 +3020 b'\xe2\x9d' 2 +3021 b'\xe3\x80' 2 +3022 b'\xe3\x81' 2 +3023 b'\xe3\x82' 2 +3024 b'\xe3\x83' 2 +3025 b'\xe4\xb8' 2 +3026 b'\xe4\xb9' 2 +3027 b'\xe4\xba' 2 +3028 b'\xe4\xbb' 2 +3029 b'\xe4\xbc' 2 +3030 b'\xe4\xbd' 2 +3031 b'\xe4\xbe' 2 +3032 b'\xe4\xbf' 2 +3033 b'\xe5\x80' 2 +3034 b'\xe5\x81' 2 +3035 b'\xe5\x82' 2 +3036 b'\xe5\x83' 2 +3037 b'\xe5\x84' 2 +3038 b'\xe5\x85' 2 +3039 b'\xe5\x86' 2 +3040 b'\xe5\x87' 2 +3041 b'\xe5\x88' 2 +3042 b'\xe5\x89' 2 +3043 b'\xe5\x8a' 2 +3044 b'\xe5\x8b' 2 +3045 b'\xe5\x8c' 2 +3046 b'\xe5\x8d' 2 +3047 b'\xe5\x8e' 2 +3048 b'\xe5\x8f' 2 +3049 b'\xe5\x90' 2 +3050 b'\xe5\x91' 2 +3051 b'\xe5\x92' 2 +3052 b'\xe5\x93' 2 +3053 b'\xe5\x94' 2 +3054 b'\xe5\x95' 2 +3055 b'\xe5\x96' 2 +3056 b'\xe5\x99' 2 +3057 b'\xe5\x9b' 2 +3058 b'\xe5\x9c' 2 +3059 b'\xe5\x9d' 2 +3060 b'\xe5\x9e' 2 +3061 b'\xe5\x9f' 2 +3062 b'\xe5\xa0' 2 +3063 b'\xe5\xa1' 2 +3064 b'\xe5\xa2' 2 +3065 b'\xe5\xa3' 2 +3066 b'\xe5\xa4' 2 +3067 b'\xe5\xa5' 2 +3068 b'\xe5\xa6' 2 +3069 b'\xe5\xa7' 2 +3070 b'\xe5\xad' 2 +3071 b'\xe5\xae' 2 +3072 b'\xe5\xaf' 2 +3073 b'\xe5\xb0' 2 +3074 b'\xe5\xb1' 2 +3075 b'\xe5\xb2' 2 +3076 b'\xe5\xb7' 2 +3077 b'\xe5\xb8' 2 +3078 b'\xe5\xb9' 2 +3079 b'\xe5\xba' 2 +3080 b'\xe5\xbb' 2 +3081 b'\xe5\xbc' 2 +3082 b'\xe5\xbd' 2 +3083 b'\xe5\xbe' 2 +3084 b'\xe5\xbf' 2 +3085 b'\xe6\x80' 2 +3086 b'\xe6\x81' 2 +3087 b'\xe6\x82' 2 +3088 b'\xe6\x83' 2 +3089 b'\xe6\x84' 2 +3090 b'\xe6\x85' 2 +3091 b'\xe6\x88' 2 +3092 b'\xe6\x89' 2 +3093 b'\xe6\x8a' 2 +3094 b'\xe6\x8b' 2 +3095 b'\xe6\x8c' 2 +3096 b'\xe6\x8d' 2 +3097 b'\xe6\x8e' 2 +3098 b'\xe6\x8f' 2 +3099 b'\xe6\x91' 2 +3100 b'\xe6\x92' 2 +3101 b'\xe6\x93' 2 +3102 b'\xe6\x94' 2 +3103 b'\xe6\x95' 2 +3104 b'\xe6\x96' 2 +3105 b'\xe6\x97' 2 +3106 b'\xe6\x98' 2 +3107 b'\xe6\x99' 2 +3108 b'\xe6\x9a' 2 +3109 b'\xe6\x9b' 2 +3110 b'\xe6\x9c' 2 +3111 b'\xe6\x9d' 2 +3112 b'\xe6\x9e' 2 +3113 b'\xe6\x9f' 2 +3114 b'\xe6\xa0' 2 +3115 b'\xe6\xa1' 2 +3116 b'\xe6\xa2' 2 +3117 b'\xe6\xa3' 2 +3118 b'\xe6\xa5' 2 +3119 b'\xe6\xa7' 2 +3120 b'\xe6\xa8' 2 +3121 b'\xe6\xa9' 2 +3122 b'\xe6\xac' 2 +3123 b'\xe6\xad' 2 +3124 b'\xe6\xae' 2 +3125 b'\xe6\xaf' 2 +3126 b'\xe6\xb0' 2 +3127 b'\xe6\xb1' 2 +3128 b'\xe6\xb2' 2 +3129 b'\xe6\xb3' 2 +3130 b'\xe6\xb4' 2 +3131 b'\xe6\xb5' 2 +3132 b'\xe6\xb6' 2 +3133 b'\xe6\xb7' 2 +3134 b'\xe6\xb8' 2 +3135 b'\xe6\xb9' 2 +3136 b'\xe6\xba' 2 +3137 b'\xe6\xbb' 2 +3138 b'\xe6\xbc' 2 +3139 b'\xe7\x81' 2 +3140 b'\xe7\x82' 2 +3141 b'\xe7\x84' 2 +3142 b'\xe7\x88' 2 +3143 b'\xe7\x89' 2 +3144 b'\xe7\x8a' 2 +3145 b'\xe7\x8e' 2 +3146 b'\xe7\x8f' 2 +3147 b'\xe7\x90' 2 +3148 b'\xe7\x94' 2 +3149 b'\xe7\x95' 2 +3150 b'\xe7\x97' 2 +3151 b'\xe7\x99' 2 +3152 b'\xe7\x9a' 2 +3153 b'\xe7\x9b' 2 +3154 b'\xe7\x9c' 2 +3155 b'\xe7\x9d' 2 +3156 b'\xe7\x9f' 2 +3157 b'\xe7\xa0' 2 +3158 b'\xe7\xa1' 2 +3159 b'\xe7\xa2' 2 +3160 b'\xe7\xa4' 2 +3161 b'\xe7\xa5' 2 +3162 b'\xe7\xa6' 2 +3163 b'\xe7\xa7' 2 +3164 b'\xe7\xa8' 2 +3165 b'\xe7\xa9' 2 +3166 b'\xe7\xaa' 2 +3167 b'\xe7\xab' 2 +3168 b'\xe7\xac' 2 +3169 b'\xe7\xad' 2 +3170 b'\xe7\xae' 2 +3171 b'\xe7\xaf' 2 +3172 b'\xe7\xb1' 2 +3173 b'\xe7\xb2' 2 +3174 b'\xe7\xb3' 2 +3175 b'\xe7\xb4' 2 +3176 b'\xe7\xb5' 2 +3177 b'\xe7\xb6' 2 +3178 b'\xe7\xb7' 2 +3179 b'\xe7\xba' 2 +3180 b'\xe7\xbb' 2 +3181 b'\xe7\xbc' 2 +3182 b'\xe7\xbd' 2 +3183 b'\xe7\xbe' 2 +3184 b'\xe7\xbf' 2 +3185 b'\xe8\x80' 2 +3186 b'\xe8\x81' 2 +3187 b'\xe8\x82' 2 +3188 b'\xe8\x83' 2 +3189 b'\xe8\x84' 2 +3190 b'\xe8\x87' 2 +3191 b'\xe8\x88' 2 +3192 b'\xe8\x89' 2 +3193 b'\xe8\x8a' 2 +3194 b'\xe8\x8b' 2 +3195 b'\xe8\x8c' 2 +3196 b'\xe8\x8d' 2 +3197 b'\xe8\x8e' 2 +3198 b'\xe8\x90' 2 +3199 b'\xe8\x99' 2 +3200 b'\xe8\xa1' 2 +3201 b'\xe8\xa2' 2 +3202 b'\xe8\xa3' 2 +3203 b'\xe8\xa6' 2 +3204 b'\xe8\xa7' 2 +3205 b'\xe8\xa8' 2 +3206 b'\xe8\xa9' 2 +3207 b'\xe8\xaa' 2 +3208 b'\xe8\xab' 2 +3209 b'\xe8\xad' 2 +3210 b'\xe8\xae' 2 +3211 b'\xe8\xaf' 2 +3212 b'\xe8\xb0' 2 +3213 b'\xe8\xb1' 2 +3214 b'\xe8\xb2' 2 +3215 b'\xe8\xb3' 2 +3216 b'\xe8\xb4' 2 +3217 b'\xe8\xb5' 2 +3218 b'\xe8\xb6' 2 +3219 b'\xe8\xb7' 2 +3220 b'\xe8\xba' 2 +3221 b'\xe8\xbb' 2 +3222 b'\xe8\xbc' 2 +3223 b'\xe8\xbd' 2 +3224 b'\xe8\xbe' 2 +3225 b'\xe8\xbf' 2 +3226 b'\xe9\x80' 2 +3227 b'\xe9\x81' 2 +3228 b'\xe9\x82' 2 +3229 b'\xe9\x83' 2 +3230 b'\xe9\x85' 2 +3231 b'\xe9\x87' 2 +3232 b'\xe9\x8c' 2 +3233 b'\xe9\x92' 2 +3234 b'\xe9\x93' 2 +3235 b'\xe9\x94' 2 +3236 b'\xe9\x95' 2 +3237 b'\xe9\x96' 2 +3238 b'\xe9\x97' 2 +3239 b'\xe9\x98' 2 +3240 b'\xe9\x99' 2 +3241 b'\xe9\x9a' 2 +3242 b'\xe9\x9b' 2 +3243 b'\xe9\x9c' 2 +3244 b'\xe9\x9d' 2 +3245 b'\xe9\x9f' 2 +3246 b'\xe9\xa0' 2 +3247 b'\xe9\xa1' 2 +3248 b'\xe9\xa2' 2 +3249 b'\xe9\xa3' 2 +3250 b'\xe9\xa6' 2 +3251 b'\xe9\xa9' 2 +3252 b'\xe9\xaa' 2 +3253 b'\xe9\xab' 2 +3254 b'\xe9\xbb' 2 +3255 b'\xe9\xbe' 2 +3256 b'\xea\xb0' 2 +3257 b'\xea\xb1' 2 +3258 b'\xea\xb2' 2 +3259 b'\xea\xb3' 2 +3260 b'\xea\xb5' 2 +3261 b'\xea\xb7' 2 +3262 b'\xea\xb8' 2 +3263 b'\xea\xb9' 2 +3264 b'\xeb\x82' 2 +3265 b'\xeb\x84' 2 +3266 b'\xeb\x85' 2 +3267 b'\xeb\x8a' 2 +3268 b'\xeb\x8b' 2 +3269 b'\xeb\x8d' 2 +3270 b'\xeb\x8f' 2 +3271 b'\xeb\x90' 2 +3272 b'\xeb\x93' 2 +3273 b'\xeb\x9e' 2 +3274 b'\xeb\x9f' 2 +3275 b'\xeb\xa0' 2 +3276 b'\xeb\xa1' 2 +3277 b'\xeb\xa3' 2 +3278 b'\xeb\xa5' 2 +3279 b'\xeb\xa6' 2 +3280 b'\xeb\xa7' 2 +3281 b'\xeb\xa9' 2 +3282 b'\xeb\xaa' 2 +3283 b'\xeb\xb0' 2 +3284 b'\xeb\xb2' 2 +3285 b'\xeb\xb3' 2 +3286 b'\xeb\xb6' 2 +3287 b'\xec\x83' 2 +3288 b'\xec\x84' 2 +3289 b'\xec\x85' 2 +3290 b'\xec\x86' 2 +3291 b'\xec\x8a' 2 +3292 b'\xec\x8b' 2 +3293 b'\xec\x95' 2 +3294 b'\xec\x96' 2 +3295 b'\xec\x97' 2 +3296 b'\xec\x98' 2 +3297 b'\xec\x99' 2 +3298 b'\xec\x9a' 2 +3299 b'\xec\x9b' 2 +3300 b'\xec\x9c' 2 +3301 b'\xec\x9d' 2 +3302 b'\xec\x9e' 2 +3303 b'\xec\xa0' 2 +3304 b'\xec\xa7' 2 +3305 b'\xec\xb0' 2 +3306 b'\xec\xb2' 2 +3307 b'\xec\xb9' 2 +3308 b'\xed\x83' 2 +3309 b'\xed\x8a' 2 +3310 b'\xed\x8c' 2 +3311 b'\xed\x95' 2 +3312 b'\xed\x98' 2 +3313 b'\xed\x99' 2 +3314 b'\xef\xb8' 2 +3315 b'\xef\xbc' 2 +3316 b'\xef\xbd' 2 +3317 b'\xef\xbf' 2 +3318 b'\xf0\x9d' 2 +3319 b'\xf0\x9f' 2 +3320 '\t\t\t' 3 +3321 '\t\t\n' 3 +3322 '\t\t ' 3 +3323 '\t ' 3 +3324 '\n\t\t' 3 +3325 '\n\t\n' 3 +3326 '\n\t ' 3 +3327 '\n\n\t' 3 +3328 '\n\n\n' 3 +3329 '\n\n ' 3 +3330 '\n \n' 3 +3331 '\n ' 3 +3332 '\r\n\t' 3 +3333 '\r\n ' 3 +3334 ' \t\t' 3 +3335 ' \n\t' 3 +3336 ' \n\n' 3 +3337 ' \n ' 3 +3338 ' \r\n' 3 +3339 ' \n' 3 +3340 ' ' 3 +3341 ' !!' 3 +3342 ' !(' 3 +3343 ' !=' 3 +3344 ' ""' 3 +3345 ' "#' 3 +3346 ' "$' 3 +3347 ' "%' 3 +3348 ' "&' 3 +3349 ' "\'' 3 +3350 ' "(' 3 +3351 ' ")' 3 +3352 ' "*' 3 +3353 ' "+' 3 +3354 ' ",' 3 +3355 ' "-' 3 +3356 ' ".' 3 +3357 ' "/' 3 +3358 ' ":' 3 +3359 ' ";' 3 +3360 ' "<' 3 +3361 ' ">' 3 +3362 ' "@' 3 +3363 ' "[' 3 +3364 ' "\\' 3 +3365 ' "]' 3 +3366 ' "^' 3 +3367 ' "_' 3 +3368 ' "`' 3 +3369 ' "{' 3 +3370 ' "~' 3 +3371 ' #"' 3 +3372 ' ##' 3 +3373 ' #(' 3 +3374 ' #:' 3 +3375 ' #[' 3 +3376 ' #{' 3 +3377 ' $$' 3 +3378 ' $(' 3 +3379 ' $,' 3 +3380 ' $.' 3 +3381 ' $\\' 3 +3382 ' $_' 3 +3383 ' ${' 3 +3384 ' %%' 3 +3385 ' %.' 3 +3386 ' %>' 3 +3387 ' %{' 3 +3388 ' %}' 3 +3389 ' &#' 3 +3390 ' &$' 3 +3391 ' &&' 3 +3392 ' &=' 3 +3393 ' \'"' 3 +3394 " '#" 3 +3395 " '$" 3 +3396 " '%" 3 +3397 " '&" 3 +3398 " ''" 3 +3399 " ')" 3 +3400 " '*" 3 +3401 " '+" 3 +3402 " '," 3 +3403 " '-" 3 +3404 " '." 3 +3405 " '/" 3 +3406 " ':" 3 +3407 " ';" 3 +3408 " '<" 3 +3409 " '@" 3 +3410 " '[" 3 +3411 " '\\" 3 +3412 " '_" 3 +3413 " '{" 3 +3414 ' (!' 3 +3415 ' ("' 3 +3416 ' (#' 3 +3417 ' ($' 3 +3418 ' (%' 3 +3419 ' (&' 3 +3420 " ('" 3 +3421 ' ((' 3 +3422 ' ()' 3 +3423 ' (*' 3 +3424 ' (+' 3 +3425 ' (-' 3 +3426 ' (.' 3 +3427 ' (/' 3 +3428 ' (:' 3 +3429 ' (;' 3 +3430 ' (<' 3 +3431 ' (=' 3 +3432 ' (>' 3 +3433 ' (?' 3 +3434 ' (@' 3 +3435 ' ([' 3 +3436 ' (\\' 3 +3437 ' (_' 3 +3438 ' (`' 3 +3439 ' ({' 3 +3440 ' ))' 3 +3441 ' ),' 3 +3442 ' ).' 3 +3443 ' ):' 3 +3444 ' );' 3 +3445 ' ){' 3 +3446 ' *(' 3 +3447 ' *)' 3 +3448 ' **' 3 +3449 ' *,' 3 +3450 ' *.' 3 +3451 ' */' 3 +3452 ' *=' 3 +3453 ' *[' 3 +3454 ' +"' 3 +3455 ' ++' 3 +3456 ' +=' 3 +3457 ' +\\' 3 +3458 ' ,"' 3 +3459 ' -(' 3 +3460 ' --' 3 +3461 ' -.' 3 +3462 ' -=' 3 +3463 ' ->' 3 +3464 ' ."' 3 +3465 ' ..' 3 +3466 ' ./' 3 +3467 ' .=' 3 +3468 ' /*' 3 +3469 ' //' 3 +3470 ' /=' 3 +3471 ' />' 3 +3472 ' /\\' 3 +3473 ' 00' 3 +3474 ' 01' 3 +3475 ' 02' 3 +3476 ' 03' 3 +3477 ' 04' 3 +3478 ' 05' 3 +3479 ' 06' 3 +3480 ' 07' 3 +3481 ' 08' 3 +3482 ' 09' 3 +3483 ' 10' 3 +3484 ' 11' 3 +3485 ' 12' 3 +3486 ' 13' 3 +3487 ' 14' 3 +3488 ' 15' 3 +3489 ' 16' 3 +3490 ' 17' 3 +3491 ' 18' 3 +3492 ' 19' 3 +3493 ' 20' 3 +3494 ' 21' 3 +3495 ' 22' 3 +3496 ' 23' 3 +3497 ' 24' 3 +3498 ' 25' 3 +3499 ' 26' 3 +3500 ' 27' 3 +3501 ' 28' 3 +3502 ' 29' 3 +3503 ' 30' 3 +3504 ' 31' 3 +3505 ' 32' 3 +3506 ' 33' 3 +3507 ' 34' 3 +3508 ' 35' 3 +3509 ' 36' 3 +3510 ' 37' 3 +3511 ' 38' 3 +3512 ' 39' 3 +3513 ' 40' 3 +3514 ' 41' 3 +3515 ' 42' 3 +3516 ' 43' 3 +3517 ' 44' 3 +3518 ' 45' 3 +3519 ' 46' 3 +3520 ' 47' 3 +3521 ' 48' 3 +3522 ' 49' 3 +3523 ' 50' 3 +3524 ' 51' 3 +3525 ' 52' 3 +3526 ' 53' 3 +3527 ' 54' 3 +3528 ' 55' 3 +3529 ' 56' 3 +3530 ' 57' 3 +3531 ' 58' 3 +3532 ' 59' 3 +3533 ' 60' 3 +3534 ' 61' 3 +3535 ' 62' 3 +3536 ' 63' 3 +3537 ' 64' 3 +3538 ' 65' 3 +3539 ' 66' 3 +3540 ' 67' 3 +3541 ' 68' 3 +3542 ' 69' 3 +3543 ' 70' 3 +3544 ' 71' 3 +3545 ' 72' 3 +3546 ' 73' 3 +3547 ' 74' 3 +3548 ' 75' 3 +3549 ' 76' 3 +3550 ' 77' 3 +3551 ' 78' 3 +3552 ' 79' 3 +3553 ' 80' 3 +3554 ' 81' 3 +3555 ' 82' 3 +3556 ' 83' 3 +3557 ' 84' 3 +3558 ' 85' 3 +3559 ' 86' 3 +3560 ' 87' 3 +3561 ' 88' 3 +3562 ' 89' 3 +3563 ' 90' 3 +3564 ' 91' 3 +3565 ' 92' 3 +3566 ' 93' 3 +3567 ' 94' 3 +3568 ' 95' 3 +3569 ' 96' 3 +3570 ' 97' 3 +3571 ' 98' 3 +3572 ' 99' 3 +3573 ' :"' 3 +3574 ' :(' 3 +3575 ' :)' 3 +3576 ' :-' 3 +3577 ' ::' 3 +3578 ' :=' 3 +3579 ' ;)' 3 +3580 ' ;;' 3 +3581 ' ' 3 +3588 ' ' 3 +3592 ' =\\' 3 +3593 ' =~' 3 +3594 ' >=' 3 +3595 ' >>' 3 +3596 ' ?,' 3 +3597 ' ?>' 3 +3598 ' ??' 3 +3599 ' @"' 3 +3600 ' @@' 3 +3601 ' AA' 3 +3602 ' AB' 3 +3603 ' AC' 3 +3604 ' AD' 3 +3605 ' AE' 3 +3606 ' AF' 3 +3607 ' AG' 3 +3608 ' AH' 3 +3609 ' AI' 3 +3610 ' AJ' 3 +3611 ' AK' 3 +3612 ' AL' 3 +3613 ' AM' 3 +3614 ' AN' 3 +3615 ' AO' 3 +3616 ' AP' 3 +3617 ' AQ' 3 +3618 ' AR' 3 +3619 ' AS' 3 +3620 ' AT' 3 +3621 ' AU' 3 +3622 ' AV' 3 +3623 ' AW' 3 +3624 ' AX' 3 +3625 ' AZ' 3 +3626 ' Ab' 3 +3627 ' Ac' 3 +3628 ' Ad' 3 +3629 ' Af' 3 +3630 ' Ag' 3 +3631 ' Ah' 3 +3632 ' Ai' 3 +3633 ' Aj' 3 +3634 ' Ak' 3 +3635 ' Al' 3 +3636 ' Am' 3 +3637 ' An' 3 +3638 ' Ao' 3 +3639 ' Ap' 3 +3640 ' Ar' 3 +3641 ' As' 3 +3642 ' At' 3 +3643 ' Au' 3 +3644 ' Av' 3 +3645 ' Aw' 3 +3646 ' Ax' 3 +3647 ' Ay' 3 +3648 ' Az' 3 +3649 ' BA' 3 +3650 ' BB' 3 +3651 ' BC' 3 +3652 ' BD' 3 +3653 ' BE' 3 +3654 ' BF' 3 +3655 ' BG' 3 +3656 ' BH' 3 +3657 ' BI' 3 +3658 ' BJ' 3 +3659 ' BL' 3 +3660 ' BM' 3 +3661 ' BN' 3 +3662 ' BO' 3 +3663 ' BP' 3 +3664 ' BR' 3 +3665 ' BS' 3 +3666 ' BT' 3 +3667 ' BU' 3 +3668 ' BV' 3 +3669 ' BW' 3 +3670 ' BY' 3 +3671 ' Ba' 3 +3672 ' Bd' 3 +3673 ' Be' 3 +3674 ' Bh' 3 +3675 ' Bi' 3 +3676 ' Bj' 3 +3677 ' Bl' 3 +3678 ' Bo' 3 +3679 ' Br' 3 +3680 ' Bu' 3 +3681 ' By' 3 +3682 ' CA' 3 +3683 ' CB' 3 +3684 ' CC' 3 +3685 ' CD' 3 +3686 ' CE' 3 +3687 ' CF' 3 +3688 ' CG' 3 +3689 ' CH' 3 +3690 ' CI' 3 +3691 ' CJ' 3 +3692 ' CK' 3 +3693 ' CL' 3 +3694 ' CM' 3 +3695 ' CN' 3 +3696 ' CO' 3 +3697 ' CP' 3 +3698 ' CR' 3 +3699 ' CS' 3 +3700 ' CT' 3 +3701 ' CU' 3 +3702 ' CV' 3 +3703 ' CW' 3 +3704 ' CX' 3 +3705 ' CY' 3 +3706 ' Ca' 3 +3707 ' Cd' 3 +3708 ' Ce' 3 +3709 ' Cf' 3 +3710 ' Ch' 3 +3711 ' Ci' 3 +3712 ' Cl' 3 +3713 ' Co' 3 +3714 ' Cp' 3 +3715 ' Cr' 3 +3716 ' Cs' 3 +3717 ' Ct' 3 +3718 ' Cu' 3 +3719 ' Cy' 3 +3720 ' Cz' 3 +3721 ' DA' 3 +3722 ' DB' 3 +3723 ' DC' 3 +3724 ' DD' 3 +3725 ' DE' 3 +3726 ' DF' 3 +3727 ' DG' 3 +3728 ' DH' 3 +3729 ' DI' 3 +3730 ' DJ' 3 +3731 ' DK' 3 +3732 ' DL' 3 +3733 ' DM' 3 +3734 ' DN' 3 +3735 ' DO' 3 +3736 ' DP' 3 +3737 ' DR' 3 +3738 ' DS' 3 +3739 ' DT' 3 +3740 ' DU' 3 +3741 ' DV' 3 +3742 ' DW' 3 +3743 ' DX' 3 +3744 ' Da' 3 +3745 ' Db' 3 +3746 ' De' 3 +3747 ' Dh' 3 +3748 ' Di' 3 +3749 ' Dj' 3 +3750 ' Do' 3 +3751 ' Dr' 3 +3752 ' Du' 3 +3753 ' Dw' 3 +3754 ' Dy' 3 +3755 ' EA' 3 +3756 ' EB' 3 +3757 ' EC' 3 +3758 ' ED' 3 +3759 ' EE' 3 +3760 ' EF' 3 +3761 ' EG' 3 +3762 ' EH' 3 +3763 ' EL' 3 +3764 ' EM' 3 +3765 ' EN' 3 +3766 ' EO' 3 +3767 ' EP' 3 +3768 ' EQ' 3 +3769 ' ER' 3 +3770 ' ES' 3 +3771 ' ET' 3 +3772 ' EU' 3 +3773 ' EV' 3 +3774 ' EW' 3 +3775 ' EX' 3 +3776 ' Eb' 3 +3777 ' Ec' 3 +3778 ' Ed' 3 +3779 ' Eg' 3 +3780 ' Eh' 3 +3781 ' Ej' 3 +3782 ' Ek' 3 +3783 ' El' 3 +3784 ' Em' 3 +3785 ' En' 3 +3786 ' Ep' 3 +3787 ' Eq' 3 +3788 ' Er' 3 +3789 ' Es' 3 +3790 ' Et' 3 +3791 ' Eu' 3 +3792 ' Ev' 3 +3793 ' Ex' 3 +3794 ' Ey' 3 +3795 ' Ez' 3 +3796 ' FA' 3 +3797 ' FB' 3 +3798 ' FC' 3 +3799 ' FD' 3 +3800 ' FE' 3 +3801 ' FF' 3 +3802 ' FG' 3 +3803 ' FH' 3 +3804 ' FI' 3 +3805 ' FK' 3 +3806 ' FL' 3 +3807 ' FM' 3 +3808 ' FN' 3 +3809 ' FO' 3 +3810 ' FP' 3 +3811 ' FR' 3 +3812 ' FS' 3 +3813 ' FT' 3 +3814 ' FW' 3 +3815 ' FX' 3 +3816 ' FY' 3 +3817 ' Fa' 3 +3818 ' Fe' 3 +3819 ' Fi' 3 +3820 ' Fl' 3 +3821 ' Fo' 3 +3822 ' Fr' 3 +3823 ' Ft' 3 +3824 ' Fu' 3 +3825 ' GA' 3 +3826 ' GB' 3 +3827 ' GC' 3 +3828 ' GD' 3 +3829 ' GE' 3 +3830 ' GF' 3 +3831 ' GG' 3 +3832 ' GH' 3 +3833 ' GI' 3 +3834 ' GL' 3 +3835 ' GM' 3 +3836 ' GN' 3 +3837 ' GO' 3 +3838 ' GP' 3 +3839 ' GR' 3 +3840 ' GS' 3 +3841 ' GT' 3 +3842 ' GU' 3 +3843 ' GV' 3 +3844 ' GW' 3 +3845 ' Ga' 3 +3846 ' Ge' 3 +3847 ' Gh' 3 +3848 ' Gi' 3 +3849 ' Gl' 3 +3850 ' Gn' 3 +3851 ' Go' 3 +3852 ' Gr' 3 +3853 ' Gu' 3 +3854 ' Gy' 3 +3855 ' HA' 3 +3856 ' HB' 3 +3857 ' HC' 3 +3858 ' HD' 3 +3859 ' HE' 3 +3860 ' HF' 3 +3861 ' HG' 3 +3862 ' HH' 3 +3863 ' HI' 3 +3864 ' HK' 3 +3865 ' HL' 3 +3866 ' HM' 3 +3867 ' HO' 3 +3868 ' HP' 3 +3869 ' HQ' 3 +3870 ' HR' 3 +3871 ' HS' 3 +3872 ' HT' 3 +3873 ' HU' 3 +3874 ' HV' 3 +3875 ' HW' 3 +3876 ' HY' 3 +3877 ' Ha' 3 +3878 ' He' 3 +3879 ' Hg' 3 +3880 ' Hi' 3 +3881 ' Ho' 3 +3882 ' Hu' 3 +3883 ' Hy' 3 +3884 ' Hz' 3 +3885 ' IA' 3 +3886 ' IB' 3 +3887 ' IC' 3 +3888 ' ID' 3 +3889 ' IE' 3 +3890 ' IF' 3 +3891 ' IG' 3 +3892 ' II' 3 +3893 ' IK' 3 +3894 ' IL' 3 +3895 ' IM' 3 +3896 ' IN' 3 +3897 ' IO' 3 +3898 ' IP' 3 +3899 ' IQ' 3 +3900 ' IR' 3 +3901 ' IS' 3 +3902 ' IT' 3 +3903 ' IU' 3 +3904 ' IV' 3 +3905 ' IX' 3 +3906 ' Ib' 3 +3907 ' Id' 3 +3908 ' If' 3 +3909 ' Ig' 3 +3910 ' Ik' 3 +3911 ' Il' 3 +3912 ' Im' 3 +3913 ' In' 3 +3914 ' Io' 3 +3915 ' Ip' 3 +3916 ' Ir' 3 +3917 ' Is' 3 +3918 ' It' 3 +3919 ' Iv' 3 +3920 ' Iz' 3 +3921 ' JA' 3 +3922 ' JC' 3 +3923 ' JD' 3 +3924 ' JE' 3 +3925 ' JJ' 3 +3926 ' JM' 3 +3927 ' JO' 3 +3928 ' JP' 3 +3929 ' JR' 3 +3930 ' JS' 3 +3931 ' JU' 3 +3932 ' Ja' 3 +3933 ' Je' 3 +3934 ' Ji' 3 +3935 ' Jo' 3 +3936 ' Jr' 3 +3937 ' Ju' 3 +3938 ' KA' 3 +3939 ' KB' 3 +3940 ' KC' 3 +3941 ' KD' 3 +3942 ' KE' 3 +3943 ' KG' 3 +3944 ' KH' 3 +3945 ' KK' 3 +3946 ' KL' 3 +3947 ' KM' 3 +3948 ' KN' 3 +3949 ' KO' 3 +3950 ' KP' 3 +3951 ' KR' 3 +3952 ' KS' 3 +3953 ' KT' 3 +3954 ' KY' 3 +3955 ' Ka' 3 +3956 ' Ke' 3 +3957 ' Kh' 3 +3958 ' Ki' 3 +3959 ' Kl' 3 +3960 ' Kn' 3 +3961 ' Ko' 3 +3962 ' Kr' 3 +3963 ' Ku' 3 +3964 ' Kw' 3 +3965 ' Ky' 3 +3966 ' LA' 3 +3967 ' LB' 3 +3968 ' LC' 3 +3969 ' LD' 3 +3970 ' LE' 3 +3971 ' LF' 3 +3972 ' LG' 3 +3973 ' LH' 3 +3974 ' LI' 3 +3975 ' LL' 3 +3976 ' LM' 3 +3977 ' LN' 3 +3978 ' LO' 3 +3979 ' LP' 3 +3980 ' LR' 3 +3981 ' LS' 3 +3982 ' LT' 3 +3983 ' LU' 3 +3984 ' LV' 3 +3985 ' LW' 3 +3986 ' LX' 3 +3987 ' La' 3 +3988 ' Le' 3 +3989 ' Li' 3 +3990 ' Ll' 3 +3991 ' Lo' 3 +3992 ' Lt' 3 +3993 ' Lu' 3 +3994 ' Lv' 3 +3995 ' Ly' 3 +3996 ' MA' 3 +3997 ' MB' 3 +3998 ' MC' 3 +3999 ' MD' 3 +4000 ' ME' 3 +4001 ' MF' 3 +4002 ' MG' 3 +4003 ' MH' 3 +4004 ' MI' 3 +4005 ' MJ' 3 +4006 ' MK' 3 +4007 ' ML' 3 +4008 ' MM' 3 +4009 ' MN' 3 +4010 ' MO' 3 +4011 ' MP' 3 +4012 ' MQ' 3 +4013 ' MR' 3 +4014 ' MS' 3 +4015 ' MT' 3 +4016 ' MU' 3 +4017 ' MV' 3 +4018 ' MW' 3 +4019 ' MX' 3 +4020 ' MY' 3 +4021 ' Ma' 3 +4022 ' Mb' 3 +4023 ' Mc' 3 +4024 ' Md' 3 +4025 ' Me' 3 +4026 ' Mg' 3 +4027 ' Mi' 3 +4028 ' Mk' 3 +4029 ' Mn' 3 +4030 ' Mo' 3 +4031 ' Mr' 3 +4032 ' Ms' 3 +4033 ' Mt' 3 +4034 ' Mu' 3 +4035 ' My' 3 +4036 ' NA' 3 +4037 ' NB' 3 +4038 ' NC' 3 +4039 ' ND' 3 +4040 ' NE' 3 +4041 ' NF' 3 +4042 ' NG' 3 +4043 ' NH' 3 +4044 ' NI' 3 +4045 ' NJ' 3 +4046 ' NK' 3 +4047 ' NL' 3 +4048 ' NM' 3 +4049 ' NN' 3 +4050 ' NO' 3 +4051 ' NP' 3 +4052 ' NR' 3 +4053 ' NS' 3 +4054 ' NT' 3 +4055 ' NU' 3 +4056 ' NV' 3 +4057 ' NW' 3 +4058 ' NY' 3 +4059 ' NZ' 3 +4060 ' Na' 3 +4061 ' Nb' 3 +4062 ' Nd' 3 +4063 ' Ne' 3 +4064 ' Ng' 3 +4065 ' Ni' 3 +4066 ' No' 3 +4067 ' Nr' 3 +4068 ' Nu' 3 +4069 ' Ny' 3 +4070 ' OA' 3 +4071 ' OB' 3 +4072 ' OC' 3 +4073 ' OD' 3 +4074 ' OF' 3 +4075 ' OH' 3 +4076 ' OK' 3 +4077 ' OL' 3 +4078 ' OM' 3 +4079 ' ON' 3 +4080 ' OP' 3 +4081 ' OR' 3 +4082 ' OS' 3 +4083 ' OT' 3 +4084 ' OU' 3 +4085 ' OV' 3 +4086 ' Ob' 3 +4087 ' Oc' 3 +4088 ' Od' 3 +4089 ' Of' 3 +4090 ' Og' 3 +4091 ' Oh' 3 +4092 ' Ok' 3 +4093 ' Ol' 3 +4094 ' Om' 3 +4095 ' On' 3 +4096 ' Op' 3 +4097 ' Or' 3 +4098 ' Os' 3 +4099 ' Ot' 3 +4100 ' Ou' 3 +4101 ' Ow' 3 +4102 ' Ox' 3 +4103 ' Oz' 3 +4104 ' PA' 3 +4105 ' PB' 3 +4106 ' PC' 3 +4107 ' PD' 3 +4108 ' PE' 3 +4109 ' PF' 3 +4110 ' PG' 3 +4111 ' PH' 3 +4112 ' PI' 3 +4113 ' PJ' 3 +4114 ' PK' 3 +4115 ' PL' 3 +4116 ' PM' 3 +4117 ' PN' 3 +4118 ' PO' 3 +4119 ' PP' 3 +4120 ' PR' 3 +4121 ' PS' 3 +4122 ' PT' 3 +4123 ' PU' 3 +4124 ' PV' 3 +4125 ' PW' 3 +4126 ' PY' 3 +4127 ' Pa' 3 +4128 ' Pb' 3 +4129 ' Pe' 3 +4130 ' Pf' 3 +4131 ' Ph' 3 +4132 ' Pi' 3 +4133 ' Pl' 3 +4134 ' Po' 3 +4135 ' Pr' 3 +4136 ' Ps' 3 +4137 ' Pt' 3 +4138 ' Pu' 3 +4139 ' Py' 3 +4140 ' QA' 3 +4141 ' QB' 3 +4142 ' QC' 3 +4143 ' QQ' 3 +4144 ' QR' 3 +4145 ' QS' 3 +4146 ' QT' 3 +4147 ' QU' 3 +4148 ' Qi' 3 +4149 ' Qt' 3 +4150 ' Qu' 3 +4151 ' RA' 3 +4152 ' RB' 3 +4153 ' RC' 3 +4154 ' RD' 3 +4155 ' RE' 3 +4156 ' RF' 3 +4157 ' RG' 3 +4158 ' RH' 3 +4159 ' RI' 3 +4160 ' RJ' 3 +4161 ' RL' 3 +4162 ' RM' 3 +4163 ' RN' 3 +4164 ' RO' 3 +4165 ' RP' 3 +4166 ' RR' 3 +4167 ' RS' 3 +4168 ' RT' 3 +4169 ' RU' 3 +4170 ' RV' 3 +4171 ' RW' 3 +4172 ' RX' 3 +4173 ' Ra' 3 +4174 ' Rd' 3 +4175 ' Re' 3 +4176 ' Rh' 3 +4177 ' Ri' 3 +4178 ' Ro' 3 +4179 ' Rs' 3 +4180 ' Ru' 3 +4181 ' Rx' 3 +4182 ' Ry' 3 +4183 ' SA' 3 +4184 ' SB' 3 +4185 ' SC' 3 +4186 ' SD' 3 +4187 ' SE' 3 +4188 ' SF' 3 +4189 ' SG' 3 +4190 ' SH' 3 +4191 ' SI' 3 +4192 ' SJ' 3 +4193 ' SK' 3 +4194 ' SL' 3 +4195 ' SM' 3 +4196 ' SN' 3 +4197 ' SO' 3 +4198 ' SP' 3 +4199 ' SQ' 3 +4200 ' SR' 3 +4201 ' SS' 3 +4202 ' ST' 3 +4203 ' SU' 3 +4204 ' SV' 3 +4205 ' SW' 3 +4206 ' SY' 3 +4207 ' SZ' 3 +4208 ' Sa' 3 +4209 ' Sc' 3 +4210 ' Se' 3 +4211 ' Sh' 3 +4212 ' Si' 3 +4213 ' Sk' 3 +4214 ' Sl' 3 +4215 ' Sm' 3 +4216 ' Sn' 3 +4217 ' So' 3 +4218 ' Sp' 3 +4219 ' Sr' 3 +4220 ' St' 3 +4221 ' Su' 3 +4222 ' Sv' 3 +4223 ' Sw' 3 +4224 ' Sy' 3 +4225 ' Sz' 3 +4226 ' TA' 3 +4227 ' TB' 3 +4228 ' TC' 3 +4229 ' TD' 3 +4230 ' TE' 3 +4231 ' TF' 3 +4232 ' TG' 3 +4233 ' TH' 3 +4234 ' TI' 3 +4235 ' TK' 3 +4236 ' TL' 3 +4237 ' TM' 3 +4238 ' TN' 3 +4239 ' TO' 3 +4240 ' TP' 3 +4241 ' TR' 3 +4242 ' TS' 3 +4243 ' TT' 3 +4244 ' TU' 3 +4245 ' TV' 3 +4246 ' TW' 3 +4247 ' TX' 3 +4248 ' TY' 3 +4249 ' Ta' 3 +4250 ' Tb' 3 +4251 ' Te' 3 +4252 ' Th' 3 +4253 ' Ti' 3 +4254 ' Tk' 3 +4255 ' To' 3 +4256 ' Tr' 3 +4257 ' Ts' 3 +4258 ' Tu' 3 +4259 ' Tw' 3 +4260 ' Tx' 3 +4261 ' Ty' 3 +4262 ' UA' 3 +4263 ' UC' 3 +4264 ' UD' 3 +4265 ' UE' 3 +4266 ' UI' 3 +4267 ' UK' 3 +4268 ' UL' 3 +4269 ' UM' 3 +4270 ' UN' 3 +4271 ' UP' 3 +4272 ' UR' 3 +4273 ' US' 3 +4274 ' UT' 3 +4275 ' UV' 3 +4276 ' UW' 3 +4277 ' UX' 3 +4278 ' Ub' 3 +4279 ' Ud' 3 +4280 ' Ug' 3 +4281 ' Uh' 3 +4282 ' Ui' 3 +4283 ' Uk' 3 +4284 ' Ul' 3 +4285 ' Um' 3 +4286 ' Un' 3 +4287 ' Up' 3 +4288 ' Ur' 3 +4289 ' Us' 3 +4290 ' Ut' 3 +4291 ' VA' 3 +4292 ' VB' 3 +4293 ' VC' 3 +4294 ' VE' 3 +4295 ' VG' 3 +4296 ' VI' 3 +4297 ' VK' 3 +4298 ' VL' 3 +4299 ' VM' 3 +4300 ' VO' 3 +4301 ' VP' 3 +4302 ' VR' 3 +4303 ' VS' 3 +4304 ' VT' 3 +4305 ' VW' 3 +4306 ' Va' 3 +4307 ' Ve' 3 +4308 ' Vi' 3 +4309 ' Vo' 3 +4310 ' Vs' 3 +4311 ' Vu' 3 +4312 ' Vy' 3 +4313 ' WA' 3 +4314 ' WB' 3 +4315 ' WC' 3 +4316 ' WD' 3 +4317 ' WE' 3 +4318 ' WF' 3 +4319 ' WG' 3 +4320 ' WH' 3 +4321 ' WI' 3 +4322 ' WL' 3 +4323 ' WM' 3 +4324 ' WO' 3 +4325 ' WP' 3 +4326 ' WR' 3 +4327 ' WS' 3 +4328 ' WT' 3 +4329 ' WW' 3 +4330 ' Wa' 3 +4331 ' We' 3 +4332 ' Wh' 3 +4333 ' Wi' 3 +4334 ' Wo' 3 +4335 ' Wr' 3 +4336 ' Wu' 3 +4337 ' Wy' 3 +4338 ' XI' 3 +4339 ' XL' 3 +4340 ' XP' 3 +4341 ' XS' 3 +4342 ' XV' 3 +4343 ' XX' 3 +4344 ' XY' 3 +4345 ' Xi' 3 +4346 ' Xu' 3 +4347 ' YA' 3 +4348 ' YE' 3 +4349 ' Ya' 3 +4350 ' Ye' 3 +4351 ' Yi' 3 +4352 ' Yo' 3 +4353 ' Yu' 3 +4354 ' ZZ' 3 +4355 ' Za' 3 +4356 ' Ze' 3 +4357 ' Zh' 3 +4358 ' Zi' 3 +4359 ' Zn' 3 +4360 ' Zo' 3 +4361 ' Zu' 3 +4362 ' Zw' 3 +4363 ' ["' 3 +4364 ' [$' 3 +4365 " ['" 3 +4366 ' [(' 3 +4367 ' [*' 3 +4368 ' [-' 3 +4369 ' [:' 3 +4370 ' [<' 3 +4371 ' [[' 3 +4372 ' [\\' 3 +4373 ' []' 3 +4374 ' [_' 3 +4375 ' [`' 3 +4376 ' [{' 3 +4377 ' \\"' 3 +4378 ' \\$' 3 +4379 " \\'" 3 +4380 ' \\(' 3 +4381 ' \\;' 3 +4382 ' \\<' 3 +4383 ' \\\\' 3 +4384 ' \\{' 3 +4385 ' \\|' 3 +4386 ' ],' 3 +4387 ' ];' 3 +4388 ' ]]' 3 +4389 ' ^{' 3 +4390 ' _(' 3 +4391 ' _)' 3 +4392 ' _,' 3 +4393 ' _.' 3 +4394 ' __' 3 +4395 ' _{' 3 +4396 ' `%' 3 +4397 " `'" 3 +4398 ' `(' 3 +4399 ' `-' 3 +4400 ' `.' 3 +4401 ' `[' 3 +4402 ' `\\' 3 +4403 ' `_' 3 +4404 ' ``' 3 +4405 ' `{' 3 +4406 ' aa' 3 +4407 ' ab' 3 +4408 ' ac' 3 +4409 ' ad' 3 +4410 ' ae' 3 +4411 ' af' 3 +4412 ' ag' 3 +4413 ' ah' 3 +4414 ' ai' 3 +4415 ' aj' 3 +4416 ' ak' 3 +4417 ' al' 3 +4418 ' am' 3 +4419 ' an' 3 +4420 ' ao' 3 +4421 ' ap' 3 +4422 ' ar' 3 +4423 ' as' 3 +4424 ' at' 3 +4425 ' au' 3 +4426 ' av' 3 +4427 ' aw' 3 +4428 ' ax' 3 +4429 ' ay' 3 +4430 ' az' 3 +4431 ' ba' 3 +4432 ' bb' 3 +4433 ' bc' 3 +4434 ' bd' 3 +4435 ' be' 3 +4436 ' bf' 3 +4437 ' bg' 3 +4438 ' bh' 3 +4439 ' bi' 3 +4440 ' bl' 3 +4441 ' bm' 3 +4442 ' bn' 3 +4443 ' bo' 3 +4444 ' bp' 3 +4445 ' br' 3 +4446 ' bs' 3 +4447 ' bt' 3 +4448 ' bu' 3 +4449 ' bw' 3 +4450 ' by' 3 +4451 ' bz' 3 +4452 ' ca' 3 +4453 ' cb' 3 +4454 ' cc' 3 +4455 ' cd' 3 +4456 ' ce' 3 +4457 ' cf' 3 +4458 ' cg' 3 +4459 ' ch' 3 +4460 ' ci' 3 +4461 ' ck' 3 +4462 ' cl' 3 +4463 ' cm' 3 +4464 ' cn' 3 +4465 ' co' 3 +4466 ' cp' 3 +4467 ' cr' 3 +4468 ' cs' 3 +4469 ' ct' 3 +4470 ' cu' 3 +4471 ' cv' 3 +4472 ' cw' 3 +4473 ' cx' 3 +4474 ' cy' 3 +4475 ' cz' 3 +4476 ' dB' 3 +4477 ' da' 3 +4478 ' db' 3 +4479 ' dc' 3 +4480 ' dd' 3 +4481 ' de' 3 +4482 ' df' 3 +4483 ' dg' 3 +4484 ' dh' 3 +4485 ' di' 3 +4486 ' dj' 3 +4487 ' dk' 3 +4488 ' dl' 3 +4489 ' dm' 3 +4490 ' dn' 3 +4491 ' do' 3 +4492 ' dp' 3 +4493 ' dq' 3 +4494 ' dr' 3 +4495 ' ds' 3 +4496 ' dt' 3 +4497 ' du' 3 +4498 ' dv' 3 +4499 ' dw' 3 +4500 ' dx' 3 +4501 ' dy' 3 +4502 ' dz' 3 +4503 ' eV' 3 +4504 ' ea' 3 +4505 ' eb' 3 +4506 ' ec' 3 +4507 ' ed' 3 +4508 ' ee' 3 +4509 ' ef' 3 +4510 ' eg' 3 +4511 ' eh' 3 +4512 ' ei' 3 +4513 ' ej' 3 +4514 ' ek' 3 +4515 ' el' 3 +4516 ' em' 3 +4517 ' en' 3 +4518 ' ep' 3 +4519 ' eq' 3 +4520 ' er' 3 +4521 ' es' 3 +4522 ' et' 3 +4523 ' eu' 3 +4524 ' ev' 3 +4525 ' ew' 3 +4526 ' ex' 3 +4527 ' ey' 3 +4528 ' ez' 3 +4529 ' fa' 3 +4530 ' fb' 3 +4531 ' fc' 3 +4532 ' fd' 3 +4533 ' fe' 3 +4534 ' ff' 3 +4535 ' fi' 3 +4536 ' fj' 3 +4537 ' fl' 3 +4538 ' fm' 3 +4539 ' fn' 3 +4540 ' fo' 3 +4541 ' fp' 3 +4542 ' fr' 3 +4543 ' fs' 3 +4544 ' ft' 3 +4545 ' fu' 3 +4546 ' fx' 3 +4547 ' fy' 3 +4548 ' ga' 3 +4549 ' gb' 3 +4550 ' gc' 3 +4551 ' ge' 3 +4552 ' gg' 3 +4553 ' gh' 3 +4554 ' gi' 3 +4555 ' gj' 3 +4556 ' gl' 3 +4557 ' gm' 3 +4558 ' gn' 3 +4559 ' go' 3 +4560 ' gp' 3 +4561 ' gr' 3 +4562 ' gs' 3 +4563 ' gt' 3 +4564 ' gu' 3 +4565 ' gw' 3 +4566 ' gy' 3 +4567 ' ha' 3 +4568 ' hd' 3 +4569 ' he' 3 +4570 ' hf' 3 +4571 ' hi' 3 +4572 ' hl' 3 +4573 ' ho' 3 +4574 ' hp' 3 +4575 ' hr' 3 +4576 ' hs' 3 +4577 ' ht' 3 +4578 ' hu' 3 +4579 ' hv' 3 +4580 ' hw' 3 +4581 ' hy' 3 +4582 ' iT' 3 +4583 ' ia' 3 +4584 ' ib' 3 +4585 ' ic' 3 +4586 ' id' 3 +4587 ' ie' 3 +4588 ' if' 3 +4589 ' ig' 3 +4590 ' ih' 3 +4591 ' ii' 3 +4592 ' ij' 3 +4593 ' ik' 3 +4594 ' il' 3 +4595 ' im' 3 +4596 ' in' 3 +4597 ' io' 3 +4598 ' ip' 3 +4599 ' ir' 3 +4600 ' is' 3 +4601 ' it' 3 +4602 ' iv' 3 +4603 ' ix' 3 +4604 ' iy' 3 +4605 ' iz' 3 +4606 ' ja' 3 +4607 ' je' 3 +4608 ' ji' 3 +4609 ' jj' 3 +4610 ' jo' 3 +4611 ' js' 3 +4612 ' ju' 3 +4613 ' kB' 3 +4614 ' kW' 3 +4615 ' ka' 3 +4616 ' kb' 3 +4617 ' ke' 3 +4618 ' kg' 3 +4619 ' kh' 3 +4620 ' ki' 3 +4621 ' kj' 3 +4622 ' kk' 3 +4623 ' kl' 3 +4624 ' km' 3 +4625 ' kn' 3 +4626 ' ko' 3 +4627 ' kp' 3 +4628 ' kr' 3 +4629 ' ks' 3 +4630 ' kt' 3 +4631 ' ku' 3 +4632 ' kv' 3 +4633 ' kw' 3 +4634 ' ky' 3 +4635 ' la' 3 +4636 ' lb' 3 +4637 ' lc' 3 +4638 ' ld' 3 +4639 ' le' 3 +4640 ' lg' 3 +4641 ' li' 3 +4642 ' ll' 3 +4643 ' lm' 3 +4644 ' ln' 3 +4645 ' lo' 3 +4646 ' lp' 3 +4647 ' lr' 3 +4648 ' ls' 3 +4649 ' lt' 3 +4650 ' lu' 3 +4651 ' lv' 3 +4652 ' lw' 3 +4653 ' ly' 3 +4654 ' mL' 3 +4655 ' mM' 3 +4656 ' ma' 3 +4657 ' mb' 3 +4658 ' mc' 3 +4659 ' md' 3 +4660 ' me' 3 +4661 ' mf' 3 +4662 ' mg' 3 +4663 ' mi' 3 +4664 ' mk' 3 +4665 ' ml' 3 +4666 ' mm' 3 +4667 ' mn' 3 +4668 ' mo' 3 +4669 ' mp' 3 +4670 ' mr' 3 +4671 ' ms' 3 +4672 ' mt' 3 +4673 ' mu' 3 +4674 ' mv' 3 +4675 ' mw' 3 +4676 ' mx' 3 +4677 ' my' 3 +4678 ' na' 3 +4679 ' nb' 3 +4680 ' nc' 3 +4681 ' nd' 3 +4682 ' ne' 3 +4683 ' nf' 3 +4684 ' ng' 3 +4685 ' nh' 3 +4686 ' ni' 3 +4687 ' nj' 3 +4688 ' nk' 3 +4689 ' nl' 3 +4690 ' nm' 3 +4691 ' nn' 3 +4692 ' no' 3 +4693 ' np' 3 +4694 ' nr' 3 +4695 ' ns' 3 +4696 ' nt' 3 +4697 ' nu' 3 +4698 ' nv' 3 +4699 ' nw' 3 +4700 ' nx' 3 +4701 ' ny' 3 +4702 ' nz' 3 +4703 ' ob' 3 +4704 ' oc' 3 +4705 ' od' 3 +4706 ' of' 3 +4707 ' og' 3 +4708 ' oh' 3 +4709 ' ok' 3 +4710 ' ol' 3 +4711 ' om' 3 +4712 ' on' 3 +4713 ' oo' 3 +4714 ' op' 3 +4715 ' or' 3 +4716 ' os' 3 +4717 ' ot' 3 +4718 ' ou' 3 +4719 ' ov' 3 +4720 ' ow' 3 +4721 ' ox' 3 +4722 ' oy' 3 +4723 ' oz' 3 +4724 ' pH' 3 +4725 ' pa' 3 +4726 ' pb' 3 +4727 ' pc' 3 +4728 ' pd' 3 +4729 ' pe' 3 +4730 ' pf' 3 +4731 ' pg' 3 +4732 ' ph' 3 +4733 ' pi' 3 +4734 ' pk' 3 +4735 ' pl' 3 +4736 ' pm' 3 +4737 ' pn' 3 +4738 ' po' 3 +4739 ' pp' 3 +4740 ' pq' 3 +4741 ' pr' 3 +4742 ' ps' 3 +4743 ' pt' 3 +4744 ' pu' 3 +4745 ' pv' 3 +4746 ' pw' 3 +4747 ' px' 3 +4748 ' py' 3 +4749 ' qi' 3 +4750 ' qq' 3 +4751 ' qt' 3 +4752 ' qu' 3 +4753 ' ra' 3 +4754 ' rb' 3 +4755 ' rc' 3 +4756 ' rd' 3 +4757 ' re' 3 +4758 ' rf' 3 +4759 ' rg' 3 +4760 ' rh' 3 +4761 ' ri' 3 +4762 ' rm' 3 +4763 ' rn' 3 +4764 ' ro' 3 +4765 ' rp' 3 +4766 ' rr' 3 +4767 ' rs' 3 +4768 ' rt' 3 +4769 ' ru' 3 +4770 ' rv' 3 +4771 ' rw' 3 +4772 ' rx' 3 +4773 ' ry' 3 +4774 ' sa' 3 +4775 ' sb' 3 +4776 ' sc' 3 +4777 ' sd' 3 +4778 ' se' 3 +4779 ' sf' 3 +4780 ' sg' 3 +4781 ' sh' 3 +4782 ' si' 3 +4783 ' sj' 3 +4784 ' sk' 3 +4785 ' sl' 3 +4786 ' sm' 3 +4787 ' sn' 3 +4788 ' so' 3 +4789 ' sp' 3 +4790 ' sq' 3 +4791 ' sr' 3 +4792 ' ss' 3 +4793 ' st' 3 +4794 ' su' 3 +4795 ' sv' 3 +4796 ' sw' 3 +4797 ' sy' 3 +4798 ' sz' 3 +4799 ' ta' 3 +4800 ' tb' 3 +4801 ' tc' 3 +4802 ' td' 3 +4803 ' te' 3 +4804 ' tf' 3 +4805 ' th' 3 +4806 ' ti' 3 +4807 ' tk' 3 +4808 ' tl' 3 +4809 ' tm' 3 +4810 ' tn' 3 +4811 ' to' 3 +4812 ' tp' 3 +4813 ' tr' 3 +4814 ' ts' 3 +4815 ' tt' 3 +4816 ' tu' 3 +4817 ' tv' 3 +4818 ' tw' 3 +4819 ' tx' 3 +4820 ' ty' 3 +4821 ' tz' 3 +4822 ' ua' 3 +4823 ' ub' 3 +4824 ' uc' 3 +4825 ' ud' 3 +4826 ' ug' 3 +4827 ' uh' 3 +4828 ' ui' 3 +4829 ' uk' 3 +4830 ' ul' 3 +4831 ' um' 3 +4832 ' un' 3 +4833 ' up' 3 +4834 ' ur' 3 +4835 ' us' 3 +4836 ' ut' 3 +4837 ' uv' 3 +4838 ' uw' 3 +4839 ' uz' 3 +4840 ' va' 3 +4841 ' vb' 3 +4842 ' vc' 3 +4843 ' ve' 3 +4844 ' vi' 3 +4845 ' vl' 3 +4846 ' vm' 3 +4847 ' vn' 3 +4848 ' vo' 3 +4849 ' vp' 3 +4850 ' vr' 3 +4851 ' vs' 3 +4852 ' vt' 3 +4853 ' vu' 3 +4854 ' vy' 3 +4855 ' wa' 3 +4856 ' wb' 3 +4857 ' wc' 3 +4858 ' we' 3 +4859 ' wf' 3 +4860 ' wh' 3 +4861 ' wi' 3 +4862 ' wk' 3 +4863 ' wo' 3 +4864 ' wp' 3 +4865 ' wr' 3 +4866 ' ws' 3 +4867 ' wt' 3 +4868 ' ww' 3 +4869 ' wx' 3 +4870 ' wy' 3 +4871 ' xe' 3 +4872 ' xi' 3 +4873 ' xl' 3 +4874 ' xs' 3 +4875 ' xt' 3 +4876 ' xx' 3 +4877 ' xy' 3 +4878 ' ya' 3 +4879 ' ye' 3 +4880 ' yi' 3 +4881 ' yo' 3 +4882 ' yr' 3 +4883 ' ys' 3 +4884 ' yy' 3 +4885 ' za' 3 +4886 ' ze' 3 +4887 ' zh' 3 +4888 ' zi' 3 +4889 ' zo' 3 +4890 ' zu' 3 +4891 ' zw' 3 +4892 ' zz' 3 +4893 ' {"' 3 +4894 ' {$' 3 +4895 ' {%' 3 +4896 " {'" 3 +4897 ' {(' 3 +4898 ' {-' 3 +4899 ' {:' 3 +4900 ' {@' 3 +4901 ' {\\' 3 +4902 ' {{' 3 +4903 ' {}' 3 +4904 ' |=' 3 +4905 ' |\\' 3 +4906 ' ||' 3 +4907 ' })' 3 +4908 ' },' 3 +4909 ' };' 3 +4910 ' }\\' 3 +4911 ' }]' 3 +4912 ' }{' 3 +4913 ' }}' 3 +4914 ' ~/' 3 +4915 ' \xa0' 3 +4916 ' ¡' 3 +4917 ' ¢' 3 +4918 ' £' 3 +4919 ' ¤' 3 +4920 ' ¥' 3 +4921 ' ¦' 3 +4922 ' §' 3 +4923 ' ©' 3 +4924 ' «' 3 +4925 ' ¬' 3 +4926 ' \xad' 3 +4927 ' ®' 3 +4928 ' °' 3 +4929 ' ±' 3 +4930 ' µ' 3 +4931 ' ¶' 3 +4932 ' ·' 3 +4933 ' »' 3 +4934 ' ¼' 3 +4935 ' ½' 3 +4936 ' ¿' 3 +4937 ' À' 3 +4938 ' Á' 3 +4939 ' Â' 3 +4940 ' Ã' 3 +4941 ' Ä' 3 +4942 ' Å' 3 +4943 ' Ç' 3 +4944 ' È' 3 +4945 ' É' 3 +4946 ' Ê' 3 +4947 ' Í' 3 +4948 ' Î' 3 +4949 ' Ð' 3 +4950 ' Ñ' 3 +4951 ' Ò' 3 +4952 ' Ó' 3 +4953 ' Ô' 3 +4954 ' Ö' 3 +4955 ' ×' 3 +4956 ' Ø' 3 +4957 ' Ú' 3 +4958 ' Ü' 3 +4959 ' Þ' 3 +4960 ' ß' 3 +4961 ' à' 3 +4962 ' á' 3 +4963 ' â' 3 +4964 ' ã' 3 +4965 ' ä' 3 +4966 ' å' 3 +4967 ' æ' 3 +4968 ' ç' 3 +4969 ' è' 3 +4970 ' é' 3 +4971 ' ê' 3 +4972 ' ë' 3 +4973 ' ì' 3 +4974 ' í' 3 +4975 ' î' 3 +4976 ' ð' 3 +4977 ' ñ' 3 +4978 ' ó' 3 +4979 ' ô' 3 +4980 ' ö' 3 +4981 ' ÷' 3 +4982 ' ø' 3 +4983 ' ú' 3 +4984 ' ü' 3 +4985 ' þ' 3 +4986 ' Ā' 3 +4987 ' ā' 3 +4988 ' ĉ' 3 +4989 ' Č' 3 +4990 ' č' 3 +4991 ' Đ' 3 +4992 ' đ' 3 +4993 ' İ' 3 +4994 ' Ł' 3 +4995 ' ł' 3 +4996 ' ő' 3 +4997 ' œ' 3 +4998 ' ř' 3 +4999 ' Ś' 3 +5000 ' ś' 3 +5001 ' ŝ' 3 +5002 ' Ş' 3 +5003 ' ş' 3 +5004 ' Š' 3 +5005 ' š' 3 +5006 ' ū' 3 +5007 ' Ż' 3 +5008 ' ż' 3 +5009 ' Ž' 3 +5010 ' ž' 3 +5011 ' ǫ' 3 +5012 ' ́' 3 +5013 ' ̃' 3 +5014 ' ̄' 3 +5015 ' ̇' 3 +5016 ' ̈' 3 +5017 ' ̊' 3 +5018 ' ̧' 3 +5019 ' Α' 3 +5020 ' Γ' 3 +5021 ' Δ' 3 +5022 ' Ε' 3 +5023 ' Θ' 3 +5024 ' Κ' 3 +5025 ' Λ' 3 +5026 ' Μ' 3 +5027 ' Π' 3 +5028 ' Σ' 3 +5029 ' Τ' 3 +5030 ' Φ' 3 +5031 ' Ψ' 3 +5032 ' Ω' 3 +5033 ' έ' 3 +5034 ' α' 3 +5035 ' β' 3 +5036 ' γ' 3 +5037 ' δ' 3 +5038 ' ε' 3 +5039 ' ζ' 3 +5040 ' η' 3 +5041 ' θ' 3 +5042 ' ι' 3 +5043 ' κ' 3 +5044 ' λ' 3 +5045 ' μ' 3 +5046 ' ν' 3 +5047 ' ξ' 3 +5048 ' ο' 3 +5049 ' π' 3 +5050 ' ρ' 3 +5051 ' σ' 3 +5052 ' τ' 3 +5053 ' υ' 3 +5054 ' φ' 3 +5055 ' χ' 3 +5056 ' ψ' 3 +5057 ' ω' 3 +5058 ' ό' 3 +5059 ' Є' 3 +5060 ' І' 3 +5061 ' Ј' 3 +5062 ' А' 3 +5063 ' Б' 3 +5064 ' В' 3 +5065 ' Г' 3 +5066 ' Д' 3 +5067 ' Е' 3 +5068 ' Ж' 3 +5069 ' З' 3 +5070 ' И' 3 +5071 ' Й' 3 +5072 ' К' 3 +5073 ' Л' 3 +5074 ' М' 3 +5075 ' Н' 3 +5076 ' О' 3 +5077 ' П' 3 +5078 ' Р' 3 +5079 ' С' 3 +5080 ' Т' 3 +5081 ' У' 3 +5082 ' Ф' 3 +5083 ' Х' 3 +5084 ' Ц' 3 +5085 ' Ч' 3 +5086 ' Ш' 3 +5087 ' Щ' 3 +5088 ' Э' 3 +5089 ' Ю' 3 +5090 ' Я' 3 +5091 ' а' 3 +5092 ' б' 3 +5093 ' в' 3 +5094 ' г' 3 +5095 ' д' 3 +5096 ' е' 3 +5097 ' ж' 3 +5098 ' з' 3 +5099 ' и' 3 +5100 ' й' 3 +5101 ' к' 3 +5102 ' л' 3 +5103 ' м' 3 +5104 ' н' 3 +5105 ' о' 3 +5106 ' п' 3 +5107 ' р' 3 +5108 ' с' 3 +5109 ' т' 3 +5110 ' у' 3 +5111 ' ф' 3 +5112 ' х' 3 +5113 ' ц' 3 +5114 ' ч' 3 +5115 ' ш' 3 +5116 ' щ' 3 +5117 ' э' 3 +5118 ' ю' 3 +5119 ' я' 3 +5120 ' є' 3 +5121 ' і' 3 +5122 ' ї' 3 +5123 ' ј' 3 +5124 ' א' 3 +5125 ' ב' 3 +5126 ' ה' 3 +5127 ' ו' 3 +5128 ' י' 3 +5129 ' כ' 3 +5130 ' ל' 3 +5131 ' מ' 3 +5132 ' נ' 3 +5133 ' ע' 3 +5134 ' ש' 3 +5135 ' آ' 3 +5136 ' أ' 3 +5137 ' إ' 3 +5138 ' ا' 3 +5139 ' ب' 3 +5140 ' ت' 3 +5141 ' ج' 3 +5142 ' ح' 3 +5143 ' خ' 3 +5144 ' د' 3 +5145 ' ر' 3 +5146 ' س' 3 +5147 ' ش' 3 +5148 ' ص' 3 +5149 ' ع' 3 +5150 ' ف' 3 +5151 ' ق' 3 +5152 ' ك' 3 +5153 ' ل' 3 +5154 ' م' 3 +5155 ' ن' 3 +5156 ' ه' 3 +5157 ' و' 3 +5158 ' ي' 3 +5159 ' پ' 3 +5160 ' ک' 3 +5161 ' گ' 3 +5162 ' ی' 3 +5163 '!!!' 3 +5164 '!")' 3 +5165 '!",' 3 +5166 "!'," 3 +5167 '!),' 3 +5168 '!).' 3 +5169 '!--' 3 +5170 '"""' 3 +5171 '"",' 3 +5172 '"))' 3 +5173 '"),' 3 +5174 '").' 3 +5175 '"):' 3 +5176 '");' 3 +5177 '")]' 3 +5178 '","' 3 +5179 '"--' 3 +5180 '"/>' 3 +5181 '":"' 3 +5182 '":[' 3 +5183 '":{' 3 +5184 '"' 3 +5186 '">&' 3 +5187 '">\'' 3 +5188 '"><' 3 +5189 '"?>' 3 +5190 '"])' 3 +5191 '"],' 3 +5192 '"].' 3 +5193 '"]:' 3 +5194 '"];' 3 +5195 '"][' 3 +5196 '"]]' 3 +5197 '"]}' 3 +5198 '"})' 3 +5199 '"},' 3 +5200 '"}}' 3 +5201 '###' 3 +5202 '%),' 3 +5203 '%).' 3 +5204 '\'",' 3 +5205 "'''" 3 +5206 "')(" 3 +5207 "'))" 3 +5208 "')," 3 +5209 "')." 3 +5210 "'):" 3 +5211 "');" 3 +5212 "')[" 3 +5213 "')]" 3 +5214 "','" 3 +5215 "':'" 3 +5216 "'" 3 +5218 "'><" 3 +5219 "'])" 3 +5220 "']*" 3 +5221 "']," 3 +5222 "']." 3 +5223 "']:" 3 +5224 "'];" 3 +5225 "']=" 3 +5226 "'][" 3 +5227 "']]" 3 +5228 "']}" 3 +5229 "'ll" 3 +5230 "'re" 3 +5231 "'ve" 3 +5232 "'})" 3 +5233 "'}," 3 +5234 '(""' 3 +5235 '("#' 3 +5236 '("%' 3 +5237 '("+' 3 +5238 '(",' 3 +5239 '("-' 3 +5240 '(".' 3 +5241 '("/' 3 +5242 '(":' 3 +5243 '("<' 3 +5244 '("@' 3 +5245 '("\\' 3 +5246 '($_' 3 +5247 "('#" 3 +5248 "('$" 3 +5249 "('," 3 +5250 "('-" 3 +5251 "('." 3 +5252 "('/" 3 +5253 "(':" 3 +5254 "('<" 3 +5255 "('@" 3 +5256 "('[" 3 +5257 "('\\" 3 +5258 "('^" 3 +5259 "('_" 3 +5260 "(('" 3 +5261 '(((' 3 +5262 '(()' 3 +5263 '()"' 3 +5264 '()(' 3 +5265 '())' 3 +5266 '(),' 3 +5267 '().' 3 +5268 '():' 3 +5269 '();' 3 +5270 '()[' 3 +5271 '()]' 3 +5272 '()`' 3 +5273 '(){' 3 +5274 '()}' 3 +5275 '(*)' 3 +5276 '(**' 3 +5277 '(?:' 3 +5278 '(@"' 3 +5279 '(["' 3 +5280 "(['" 3 +5281 '([[' 3 +5282 '([\\' 3 +5283 '([^' 3 +5284 '(__' 3 +5285 "({'" 3 +5286 ')")' 3 +5287 ')",' 3 +5288 ')".' 3 +5289 ')">' 3 +5290 ")'," 3 +5291 ')(?' 3 +5292 ')))' 3 +5293 '))*' 3 +5294 ')),' 3 +5295 ')).' 3 +5296 '))/' 3 +5297 ')):' 3 +5298 '));' 3 +5299 '))?' 3 +5300 '))\\' 3 +5301 '))]' 3 +5302 ')){' 3 +5303 ')*(' 3 +5304 ')**' 3 +5305 ')+(' 3 +5306 '),(' 3 +5307 '),\\' 3 +5308 ')--' 3 +5309 ')->' 3 +5310 ')."' 3 +5311 ')..' 3 +5312 ').[' 3 +5313 ').\\' 3 +5314 ')/(' 3 +5315 ');\\' 3 +5316 ')' 3 +5349 '->_' 3 +5350 '.""' 3 +5351 '.")' 3 +5352 '.",' 3 +5353 '."[' 3 +5354 '.$$' 3 +5355 '.\'"' 3 +5356 ".''" 3 +5357 ".')" 3 +5358 ".'," 3 +5359 '.),' 3 +5360 '.).' 3 +5361 '.--' 3 +5362 '...' 3 +5363 '../' 3 +5364 '.' 3 +5371 "/')" 3 +5372 "/'," 3 +5373 '/*!' 3 +5374 '/**' 3 +5375 '/*.' 3 +5376 '///' 3 +5377 '/>.' 3 +5378 '/__' 3 +5379 ':")' 3 +5380 ':",' 3 +5381 ":')" 3 +5382 ":'," 3 +5383 ':**' 3 +5384 ':--' 3 +5385 '://' 3 +5386 ':' 3 +5393 ';")' 3 +5431 '>",' 3 +5432 '>";' 3 +5433 ">')" 3 +5434 ">'," 3 +5435 ">';" 3 +5436 '>()' 3 +5437 '>).' 3 +5438 '>::' 3 +5439 '>>>' 3 +5441 '>{{' 3 +5442 '?",' 3 +5443 "?'," 3 +5444 '?),' 3 +5445 '?).' 3 +5446 '???' 3 +5447 'AAA' 3 +5448 'ABA' 3 +5449 'ABC' 3 +5450 'ABI' 3 +5451 'ABS' 3 +5452 'ACA' 3 +5453 'ACC' 3 +5454 'ACE' 3 +5455 'ACH' 3 +5456 'ACK' 3 +5457 'ACP' 3 +5458 'ACS' 3 +5459 'ACT' 3 +5460 'ADA' 3 +5461 'ADC' 3 +5462 'ADD' 3 +5463 'ADE' 3 +5464 'ADO' 3 +5465 'ADS' 3 +5466 'AES' 3 +5467 'AFF' 3 +5468 'AFP' 3 +5469 'AGE' 3 +5470 'AGG' 3 +5471 'AIL' 3 +5472 'AIN' 3 +5473 'AIR' 3 +5474 'ALA' 3 +5475 'ALE' 3 +5476 'ALK' 3 +5477 'ALL' 3 +5478 'ALS' 3 +5479 'ALT' 3 +5480 'AMA' 3 +5481 'AMB' 3 +5482 'AMD' 3 +5483 'AME' 3 +5484 'AMI' 3 +5485 'AML' 3 +5486 'AMP' 3 +5487 'AMS' 3 +5488 'ANA' 3 +5489 'ANC' 3 +5490 'AND' 3 +5491 'ANE' 3 +5492 'ANG' 3 +5493 'ANI' 3 +5494 'ANK' 3 +5495 'ANN' 3 +5496 'ANO' 3 +5497 'ANS' 3 +5498 'ANT' 3 +5499 'ANY' 3 +5500 'APE' 3 +5501 'APH' 3 +5502 'API' 3 +5503 'APP' 3 +5504 'APS' 3 +5505 'ARA' 3 +5506 'ARB' 3 +5507 'ARC' 3 +5508 'ARD' 3 +5509 'ARE' 3 +5510 'ARG' 3 +5511 'ARI' 3 +5512 'ARK' 3 +5513 'ARM' 3 +5514 'ARN' 3 +5515 'ARP' 3 +5516 'ARR' 3 +5517 'ARS' 3 +5518 'ART' 3 +5519 'ARY' 3 +5520 'ASA' 3 +5521 'ASC' 3 +5522 'ASE' 3 +5523 'ASH' 3 +5524 'ASK' 3 +5525 'ASM' 3 +5526 'ASP' 3 +5527 'ASS' 3 +5528 'AST' 3 +5529 'ASY' 3 +5530 'ATA' 3 +5531 'ATE' 3 +5532 'ATH' 3 +5533 'ATI' 3 +5534 'ATO' 3 +5535 'ATS' 3 +5536 'ATT' 3 +5537 'AUD' 3 +5538 'AUT' 3 +5539 'AVA' 3 +5540 'AVE' 3 +5541 'AWS' 3 +5542 'Abs' 3 +5543 'Acc' 3 +5544 'Ack' 3 +5545 'Act' 3 +5546 'Ada' 3 +5547 'Add' 3 +5548 'Adj' 3 +5549 'Adv' 3 +5550 'Aff' 3 +5551 'Age' 3 +5552 'Agg' 3 +5553 'Air' 3 +5554 'Akt' 3 +5555 'Ald' 3 +5556 'Ale' 3 +5557 'Alg' 3 +5558 'Ali' 3 +5559 'All' 3 +5560 'Alt' 3 +5561 'Amb' 3 +5562 'Amy' 3 +5563 'And' 3 +5564 'Ang' 3 +5565 'Ann' 3 +5566 'Ans' 3 +5567 'Ant' 3 +5568 'Any' 3 +5569 'Api' 3 +5570 'App' 3 +5571 'Apr' 3 +5572 'Aqu' 3 +5573 'Arc' 3 +5574 'Are' 3 +5575 'Arg' 3 +5576 'Ari' 3 +5577 'Arm' 3 +5578 'Arn' 3 +5579 'Arr' 3 +5580 'Art' 3 +5581 'Asc' 3 +5582 'Ash' 3 +5583 'Ask' 3 +5584 'Asp' 3 +5585 'Ass' 3 +5586 'Ast' 3 +5587 'Ath' 3 +5588 'Atl' 3 +5589 'Att' 3 +5590 'Aud' 3 +5591 'Aug' 3 +5592 'Aut' 3 +5593 'Aux' 3 +5594 'Avg' 3 +5595 'Aws' 3 +5596 'BAD' 3 +5597 'BAL' 3 +5598 'BAR' 3 +5599 'BAS' 3 +5600 'BAT' 3 +5601 'BBC' 3 +5602 'BER' 3 +5603 'BIG' 3 +5604 'BIN' 3 +5605 'BIT' 3 +5606 'BLE' 3 +5607 'BMI' 3 +5608 'BOT' 3 +5609 'BOX' 3 +5610 'BRE' 3 +5611 'BSD' 3 +5612 'BUF' 3 +5613 'BUG' 3 +5614 'BUR' 3 +5615 'BUS' 3 +5616 'Bab' 3 +5617 'Bad' 3 +5618 'Bag' 3 +5619 'Bah' 3 +5620 'Bal' 3 +5621 'Ban' 3 +5622 'Bar' 3 +5623 'Bas' 3 +5624 'Bat' 3 +5625 'Bay' 3 +5626 'Bbb' 3 +5627 'Bed' 3 +5628 'Bel' 3 +5629 'Ben' 3 +5630 'Ber' 3 +5631 'Bes' 3 +5632 'Bet' 3 +5633 'Bib' 3 +5634 'Bid' 3 +5635 'Big' 3 +5636 'Bin' 3 +5637 'Bio' 3 +5638 'Bir' 3 +5639 'Bit' 3 +5640 'Blo' 3 +5641 'Bob' 3 +5642 'Bol' 3 +5643 'Bon' 3 +5644 'Bor' 3 +5645 'Bot' 3 +5646 'Bow' 3 +5647 'Box' 3 +5648 'Boy' 3 +5649 'Bra' 3 +5650 'Bre' 3 +5651 'Bro' 3 +5652 'Btn' 3 +5653 'Buf' 3 +5654 'Bug' 3 +5655 'Bul' 3 +5656 'Bur' 3 +5657 'Bus' 3 +5658 'But' 3 +5659 'Buy' 3 +5660 'CAC' 3 +5661 'CAD' 3 +5662 'CAL' 3 +5663 'CAM' 3 +5664 'CAN' 3 +5665 'CAP' 3 +5666 'CAR' 3 +5667 'CAS' 3 +5668 'CAT' 3 +5669 'CBC' 3 +5670 'CBS' 3 +5671 'CCA' 3 +5672 'CCC' 3 +5673 'CDC' 3 +5674 'CDF' 3 +5675 'CEL' 3 +5676 'CEO' 3 +5677 'CEP' 3 +5678 'CER' 3 +5679 'CES' 3 +5680 'CFG' 3 +5681 'CHA' 3 +5682 'CHE' 3 +5683 'CHO' 3 +5684 'CHR' 3 +5685 'CID' 3 +5686 'CLA' 3 +5687 'CLC' 3 +5688 'CLE' 3 +5689 'CLI' 3 +5690 'CLK' 3 +5691 'CLS' 3 +5692 'CLU' 3 +5693 'CMD' 3 +5694 'CMS' 3 +5695 'CNN' 3 +5696 'CNT' 3 +5697 'COD' 3 +5698 'COL' 3 +5699 'COM' 3 +5700 'CON' 3 +5701 'COR' 3 +5702 'COS' 3 +5703 'CPP' 3 +5704 'CPU' 3 +5705 'CRC' 3 +5706 'CRE' 3 +5707 'CSI' 3 +5708 'CSS' 3 +5709 'CSV' 3 +5710 'CTC' 3 +5711 'CTL' 3 +5712 'CTT' 3 +5713 'CTX' 3 +5714 'CUR' 3 +5715 'Cab' 3 +5716 'Cad' 3 +5717 'Cal' 3 +5718 'Cam' 3 +5719 'Can' 3 +5720 'Cap' 3 +5721 'Car' 3 +5722 'Cas' 3 +5723 'Cat' 3 +5724 'Cel' 3 +5725 'Cfg' 3 +5726 'Cha' 3 +5727 'Che' 3 +5728 'Chi' 3 +5729 'Cho' 3 +5730 'Cir' 3 +5731 'Cit' 3 +5732 'Cla' 3 +5733 'Cle' 3 +5734 'Cli' 3 +5735 'Clo' 3 +5736 'Cmd' 3 +5737 'Cnt' 3 +5738 'CoV' 3 +5739 'Cod' 3 +5740 'Cog' 3 +5741 'Col' 3 +5742 'Com' 3 +5743 'Con' 3 +5744 'Cop' 3 +5745 'Cor' 3 +5746 'Cos' 3 +5747 'Cov' 3 +5748 'Cre' 3 +5749 'Cro' 3 +5750 'Css' 3 +5751 'Csv' 3 +5752 'Ctr' 3 +5753 'Ctx' 3 +5754 'Cur' 3 +5755 'Cut' 3 +5756 'DAC' 3 +5757 'DAG' 3 +5758 'DAO' 3 +5759 'DAT' 3 +5760 'DAY' 3 +5761 'DBC' 3 +5762 'DEC' 3 +5763 'DED' 3 +5764 'DEF' 3 +5765 'DEL' 3 +5766 'DEM' 3 +5767 'DEN' 3 +5768 'DEP' 3 +5769 'DER' 3 +5770 'DES' 3 +5771 'DET' 3 +5772 'DEV' 3 +5773 'DEX' 3 +5774 'DIC' 3 +5775 'DIG' 3 +5776 'DIM' 3 +5777 'DIR' 3 +5778 'DIS' 3 +5779 'DIV' 3 +5780 'DLL' 3 +5781 'DNA' 3 +5782 'DNS' 3 +5783 'DOC' 3 +5784 'DOM' 3 +5785 'DON' 3 +5786 'DOT' 3 +5787 'DTD' 3 +5788 'DVD' 3 +5789 'Dal' 3 +5790 'Dam' 3 +5791 'Dan' 3 +5792 'Dao' 3 +5793 'Dar' 3 +5794 'Das' 3 +5795 'Dat' 3 +5796 'Dav' 3 +5797 'Day' 3 +5798 'Deb' 3 +5799 'Dec' 3 +5800 'Def' 3 +5801 'Deg' 3 +5802 'Del' 3 +5803 'Dem' 3 +5804 'Den' 3 +5805 'Dep' 3 +5806 'Der' 3 +5807 'Des' 3 +5808 'Det' 3 +5809 'Dev' 3 +5810 'Dic' 3 +5811 'Did' 3 +5812 'Die' 3 +5813 'Dig' 3 +5814 'Dim' 3 +5815 'Dir' 3 +5816 'Dis' 3 +5817 'Div' 3 +5818 'Dlg' 3 +5819 'Doc' 3 +5820 'Dog' 3 +5821 'Dom' 3 +5822 'Don' 3 +5823 'Dot' 3 +5824 'Dou' 3 +5825 'Dry' 3 +5826 'Dub' 3 +5827 'Due' 3 +5828 'Dup' 3 +5829 'Dur' 3 +5830 'Dyn' 3 +5831 'Dé' 3 +5832 'EAR' 3 +5833 'ECD' 3 +5834 'ECK' 3 +5835 'ECT' 3 +5836 'EEE' 3 +5837 'EEK' 3 +5838 'EFF' 3 +5839 'ELD' 3 +5840 'ELE' 3 +5841 'ELL' 3 +5842 'ELS' 3 +5843 'ELY' 3 +5844 'EMA' 3 +5845 'EMP' 3 +5846 'ENA' 3 +5847 'ENC' 3 +5848 'END' 3 +5849 'ENE' 3 +5850 'ENG' 3 +5851 'ENO' 3 +5852 'ENS' 3 +5853 'ENT' 3 +5854 'ENV' 3 +5855 'EOF' 3 +5856 'EPS' 3 +5857 'ERA' 3 +5858 'ERC' 3 +5859 'ERE' 3 +5860 'ERN' 3 +5861 'ERO' 3 +5862 'ERR' 3 +5863 'ERS' 3 +5864 'ERT' 3 +5865 'ERV' 3 +5866 'ERY' 3 +5867 'ESA' 3 +5868 'ESC' 3 +5869 'ESH' 3 +5870 'ESP' 3 +5871 'ESS' 3 +5872 'EST' 3 +5873 'ETA' 3 +5874 'ETH' 3 +5875 'ETS' 3 +5876 'EUR' 3 +5877 'EXP' 3 +5878 'EXT' 3 +5879 'Ear' 3 +5880 'Eff' 3 +5881 'Ele' 3 +5882 'Ell' 3 +5883 'Emb' 3 +5884 'Emp' 3 +5885 'Enc' 3 +5886 'End' 3 +5887 'Eng' 3 +5888 'Enh' 3 +5889 'Ent' 3 +5890 'Env' 3 +5891 'Equ' 3 +5892 'Err' 3 +5893 'Esc' 3 +5894 'Esp' 3 +5895 'Ess' 3 +5896 'Est' 3 +5897 'Eth' 3 +5898 'Exc' 3 +5899 'Exp' 3 +5900 'Ext' 3 +5901 'Eye' 3 +5902 'FER' 3 +5903 'FET' 3 +5904 'FFF' 3 +5905 'FFT' 3 +5906 'FIG' 3 +5907 'FIL' 3 +5908 'FIN' 3 +5909 'FIR' 3 +5910 'FIT' 3 +5911 'FIX' 3 +5912 'FLO' 3 +5913 'FOR' 3 +5914 'FUN' 3 +5915 'Fab' 3 +5916 'Fac' 3 +5917 'Fal' 3 +5918 'Fan' 3 +5919 'Far' 3 +5920 'Fat' 3 +5921 'Feb' 3 +5922 'Fed' 3 +5923 'Fel' 3 +5924 'Fer' 3 +5925 'Few' 3 +5926 'Fig' 3 +5927 'Fil' 3 +5928 'Fin' 3 +5929 'Fit' 3 +5930 'Fix' 3 +5931 'Flo' 3 +5932 'Flu' 3 +5933 'Fly' 3 +5934 'Fmt' 3 +5935 'Foo' 3 +5936 'For' 3 +5937 'Fox' 3 +5938 'Fra' 3 +5939 'Fre' 3 +5940 'Fri' 3 +5941 'Fun' 3 +5942 'GAL' 3 +5943 'GAN' 3 +5944 'GAT' 3 +5945 'GBT' 3 +5946 'GCC' 3 +5947 'GEN' 3 +5948 'GER' 3 +5949 'GES' 3 +5950 'GET' 3 +5951 'GHz' 3 +5952 'GIN' 3 +5953 'GIS' 3 +5954 'GIT' 3 +5955 'GLE' 3 +5956 'GMT' 3 +5957 'GNU' 3 +5958 'GPL' 3 +5959 'GPS' 3 +5960 'GPU' 3 +5961 'GRA' 3 +5962 'GRE' 3 +5963 'GRO' 3 +5964 'GRP' 3 +5965 'GUI' 3 +5966 'Gab' 3 +5967 'Gal' 3 +5968 'Gap' 3 +5969 'Gar' 3 +5970 'Gas' 3 +5971 'GeV' 3 +5972 'Gem' 3 +5973 'Gen' 3 +5974 'Geo' 3 +5975 'Ger' 3 +5976 'Get' 3 +5977 'Gib' 3 +5978 'Gil' 3 +5979 'Git' 3 +5980 'God' 3 +5981 'Got' 3 +5982 'Gra' 3 +5983 'Gre' 3 +5984 'Gro' 3 +5985 'Gui' 3 +5986 'Gun' 3 +5987 'Guy' 3 +5988 'HAL' 3 +5989 'HAS' 3 +5990 'HEL' 3 +5991 'HER' 3 +5992 'HIV' 3 +5993 'HOW' 3 +5994 'Had' 3 +5995 'Hal' 3 +5996 'Ham' 3 +5997 'Han' 3 +5998 'Har' 3 +5999 'Has' 3 +6000 'Haw' 3 +6001 'Hay' 3 +6002 'Haz' 3 +6003 'Hel' 3 +6004 'Hen' 3 +6005 'Her' 3 +6006 'Hex' 3 +6007 'Hey' 3 +6008 'Hig' 3 +6009 'Hip' 3 +6010 'His' 3 +6011 'Hit' 3 +6012 'Hol' 3 +6013 'Hom' 3 +6014 'Hon' 3 +6015 'Hop' 3 +6016 'Hor' 3 +6017 'Hot' 3 +6018 'How' 3 +6019 'Hub' 3 +6020 'Hum' 3 +6021 'IAL' 3 +6022 'IAN' 3 +6023 'IAS' 3 +6024 'IBM' 3 +6025 'ICA' 3 +6026 'ICC' 3 +6027 'ICE' 3 +6028 'ICH' 3 +6029 'ICI' 3 +6030 'ICK' 3 +6031 'ICO' 3 +6032 'ICS' 3 +6033 'ICT' 3 +6034 'IDA' 3 +6035 'IDD' 3 +6036 'IDE' 3 +6037 'IDI' 3 +6038 'IDS' 3 +6039 'IDs' 3 +6040 'IED' 3 +6041 'IER' 3 +6042 'IES' 3 +6043 'IEW' 3 +6044 'IFE' 3 +6045 'IFF' 3 +6046 'IFI' 3 +6047 'IFT' 3 +6048 'IFY' 3 +6049 'IGH' 3 +6050 'IGN' 3 +6051 'III' 3 +6052 'ILD' 3 +6053 'ILE' 3 +6054 'ILL' 3 +6055 'ILS' 3 +6056 'ILY' 3 +6057 'IMA' 3 +6058 'IME' 3 +6059 'IMG' 3 +6060 'IMO' 3 +6061 'IMP' 3 +6062 'IMS' 3 +6063 'INA' 3 +6064 'INC' 3 +6065 'IND' 3 +6066 'INE' 3 +6067 'INF' 3 +6068 'ING' 3 +6069 'INI' 3 +6070 'INK' 3 +6071 'INO' 3 +6072 'INS' 3 +6073 'INT' 3 +6074 'ION' 3 +6075 'IOR' 3 +6076 'IOS' 3 +6077 'IPA' 3 +6078 'IPP' 3 +6079 'IPS' 3 +6080 'IPT' 3 +6081 'IPV' 3 +6082 'IPv' 3 +6083 'IRA' 3 +6084 'IRC' 3 +6085 'IRD' 3 +6086 'IRE' 3 +6087 'IRS' 3 +6088 'IRT' 3 +6089 'ISA' 3 +6090 'ISC' 3 +6091 'ISE' 3 +6092 'ISH' 3 +6093 'ISM' 3 +6094 'ISO' 3 +6095 'ISP' 3 +6096 'ISS' 3 +6097 'IST' 3 +6098 'ITA' 3 +6099 'ITE' 3 +6100 'ITH' 3 +6101 'ITS' 3 +6102 'ITT' 3 +6103 'ITY' 3 +6104 'IUM' 3 +6105 'IVE' 3 +6106 'IZE' 3 +6107 'Ice' 3 +6108 'Ich' 3 +6109 'Ide' 3 +6110 'Ids' 3 +6111 'Idx' 3 +6112 'Ign' 3 +6113 'Ill' 3 +6114 'Img' 3 +6115 'Imm' 3 +6116 'Imp' 3 +6117 'Inc' 3 +6118 'Ind' 3 +6119 'Inf' 3 +6120 'Ing' 3 +6121 'Ini' 3 +6122 'Ins' 3 +6123 'Int' 3 +6124 'Inv' 3 +6125 'Ion' 3 +6126 'Isa' 3 +6127 'Isn' 3 +6128 'Iso' 3 +6129 'Iss' 3 +6130 'Its' 3 +6131 'JOB' 3 +6132 'JPG' 3 +6133 'Jac' 3 +6134 'Jam' 3 +6135 'Jan' 3 +6136 'Jar' 3 +6137 'Jay' 3 +6138 'Jen' 3 +6139 'Jer' 3 +6140 'Jet' 3 +6141 'Jim' 3 +6142 'Job' 3 +6143 'Joe' 3 +6144 'Joh' 3 +6145 'Jon' 3 +6146 'Jos' 3 +6147 'Joy' 3 +6148 'Jud' 3 +6149 'Jul' 3 +6150 'Jun' 3 +6151 'KEN' 3 +6152 'KER' 3 +6153 'KEY' 3 +6154 'Kal' 3 +6155 'Kam' 3 +6156 'Kar' 3 +6157 'Kat' 3 +6158 'Kay' 3 +6159 'Ken' 3 +6160 'Ker' 3 +6161 'Key' 3 +6162 'Kim' 3 +6163 'Kin' 3 +6164 'Kir' 3 +6165 'Kit' 3 +6166 'Kon' 3 +6167 'LAB' 3 +6168 'LAN' 3 +6169 'LAR' 3 +6170 'LAS' 3 +6171 'LAT' 3 +6172 'LAY' 3 +6173 'LED' 3 +6174 'LEN' 3 +6175 'LER' 3 +6176 'LES' 3 +6177 'LET' 3 +6178 'LEV' 3 +6179 'LEX' 3 +6180 'LEY' 3 +6181 'LIB' 3 +6182 'LIN' 3 +6183 'LOB' 3 +6184 'LOC' 3 +6185 'LOG' 3 +6186 'LOS' 3 +6187 'LOW' 3 +6188 'Lab' 3 +6189 'Lag' 3 +6190 'Lam' 3 +6191 'Lap' 3 +6192 'Lar' 3 +6193 'Las' 3 +6194 'Lat' 3 +6195 'Law' 3 +6196 'Lay' 3 +6197 'Lbl' 3 +6198 'Lee' 3 +6199 'Leg' 3 +6200 'Len' 3 +6201 'Les' 3 +6202 'Let' 3 +6203 'Lev' 3 +6204 'Lew' 3 +6205 'Lex' 3 +6206 'Lib' 3 +6207 'Lic' 3 +6208 'Lie' 3 +6209 'Lif' 3 +6210 'Lik' 3 +6211 'Lim' 3 +6212 'Lin' 3 +6213 'Lip' 3 +6214 'Lit' 3 +6215 'Lng' 3 +6216 'Loc' 3 +6217 'Log' 3 +6218 'Lon' 3 +6219 'Los' 3 +6220 'Lot' 3 +6221 'Lou' 3 +6222 'Low' 3 +6223 'Lua' 3 +6224 'Luc' 3 +6225 'Lux' 3 +6226 'MAC' 3 +6227 'MAG' 3 +6228 'MAL' 3 +6229 'MAN' 3 +6230 'MAP' 3 +6231 'MAR' 3 +6232 'MAS' 3 +6233 'MAT' 3 +6234 'MAX' 3 +6235 'MED' 3 +6236 'MEM' 3 +6237 'MEN' 3 +6238 'MER' 3 +6239 'MES' 3 +6240 'MET' 3 +6241 'MHz' 3 +6242 'MIC' 3 +6243 'MIN' 3 +6244 'MIS' 3 +6245 'MIT' 3 +6246 'MIX' 3 +6247 'MLE' 3 +6248 'MLP' 3 +6249 'MOD' 3 +6250 'MON' 3 +6251 'MOS' 3 +6252 'MOV' 3 +6253 'MPI' 3 +6254 'MPL' 3 +6255 'MRI' 3 +6256 'MSC' 3 +6257 'MSE' 3 +6258 'MSG' 3 +6259 'Mac' 3 +6260 'Mad' 3 +6261 'Mag' 3 +6262 'Mah' 3 +6263 'Mal' 3 +6264 'Man' 3 +6265 'Map' 3 +6266 'Mar' 3 +6267 'Mas' 3 +6268 'Mat' 3 +6269 'Max' 3 +6270 'May' 3 +6271 'McC' 3 +6272 'Med' 3 +6273 'Meg' 3 +6274 'Mel' 3 +6275 'Mem' 3 +6276 'Men' 3 +6277 'Mer' 3 +6278 'Mes' 3 +6279 'Met' 3 +6280 'Mex' 3 +6281 'Mgr' 3 +6282 'Mic' 3 +6283 'Mid' 3 +6284 'Mil' 3 +6285 'Min' 3 +6286 'Mir' 3 +6287 'Mis' 3 +6288 'Mit' 3 +6289 'Mix' 3 +6290 'Mob' 3 +6291 'Mod' 3 +6292 'Moh' 3 +6293 'Mol' 3 +6294 'Mom' 3 +6295 'Mon' 3 +6296 'Mor' 3 +6297 'Mot' 3 +6298 'Mov' 3 +6299 'Mrs' 3 +6300 'Msg' 3 +6301 'Mul' 3 +6302 'Mur' 3 +6303 'Mus' 3 +6304 'Mut' 3 +6305 'Mvc' 3 +6306 'NAL' 3 +6307 'NAM' 3 +6308 'NAS' 3 +6309 'NAT' 3 +6310 'NBC' 3 +6311 'NEL' 3 +6312 'NER' 3 +6313 'NES' 3 +6314 'NET' 3 +6315 'NEW' 3 +6316 'NON' 3 +6317 'NOR' 3 +6318 'NOT' 3 +6319 'NOW' 3 +6320 'NPC' 3 +6321 'NUM' 3 +6322 'NaN' 3 +6323 'Nam' 3 +6324 'Nan' 3 +6325 'Nat' 3 +6326 'Nav' 3 +6327 'Neg' 3 +6328 'Net' 3 +6329 'New' 3 +6330 'Nic' 3 +6331 'Nik' 3 +6332 'Nil' 3 +6333 'Nit' 3 +6334 'Nom' 3 +6335 'Non' 3 +6336 'Nor' 3 +6337 'Nos' 3 +6338 'Not' 3 +6339 'Nov' 3 +6340 'Now' 3 +6341 'Num' 3 +6342 'OBJ' 3 +6343 'OCI' 3 +6344 'OCK' 3 +6345 'OCT' 3 +6346 'ODE' 3 +6347 'ODO' 3 +6348 'ODY' 3 +6349 'OFF' 3 +6350 'OID' 3 +6351 'OLD' 3 +6352 'OME' 3 +6353 'ONA' 3 +6354 'OND' 3 +6355 'ONE' 3 +6356 'ONG' 3 +6357 'ONS' 3 +6358 'ONT' 3 +6359 'OPS' 3 +6360 'OPT' 3 +6361 'ORA' 3 +6362 'ORD' 3 +6363 'ORE' 3 +6364 'ORG' 3 +6365 'ORK' 3 +6366 'ORM' 3 +6367 'ORN' 3 +6368 'ORS' 3 +6369 'ORT' 3 +6370 'ORY' 3 +6371 'OSE' 3 +6372 'OSS' 3 +6373 'OST' 3 +6374 'OTA' 3 +6375 'OTE' 3 +6376 'OTH' 3 +6377 'OTO' 3 +6378 'OTP' 3 +6379 'OTS' 3 +6380 'OTT' 3 +6381 'OUR' 3 +6382 'OUS' 3 +6383 'OUT' 3 +6384 'OVA' 3 +6385 'OVE' 3 +6386 'OWN' 3 +6387 'Obj' 3 +6388 'Obs' 3 +6389 'Occ' 3 +6390 'Oct' 3 +6391 'Off' 3 +6392 'Old' 3 +6393 'One' 3 +6394 'Ont' 3 +6395 'Opp' 3 +6396 'Ops' 3 +6397 'Opt' 3 +6398 'Ord' 3 +6399 'Org' 3 +6400 'Ori' 3 +6401 'Our' 3 +6402 'Out' 3 +6403 'Own' 3 +6404 'PAD' 3 +6405 'PAN' 3 +6406 'PAR' 3 +6407 'PAS' 3 +6408 'PAT' 3 +6409 'PBS' 3 +6410 'PCA' 3 +6411 'PCI' 3 +6412 'PCM' 3 +6413 'PCR' 3 +6414 'PDF' 3 +6415 'PED' 3 +6416 'PEG' 3 +6417 'PER' 3 +6418 'PET' 3 +6419 'PHA' 3 +6420 'PHP' 3 +6421 'PIC' 3 +6422 'PID' 3 +6423 'PIN' 3 +6424 'PIO' 3 +6425 'PIP' 3 +6426 'PLA' 3 +6427 'PLC' 3 +6428 'PLE' 3 +6429 'PNG' 3 +6430 'POL' 3 +6431 'POP' 3 +6432 'POR' 3 +6433 'POS' 3 +6434 'PRE' 3 +6435 'PRI' 3 +6436 'PRO' 3 +6437 'PTR' 3 +6438 'PUT' 3 +6439 'PWM' 3 +6440 'Pac' 3 +6441 'Pad' 3 +6442 'Pag' 3 +6443 'Pak' 3 +6444 'Pal' 3 +6445 'Pan' 3 +6446 'Pap' 3 +6447 'Par' 3 +6448 'Pas' 3 +6449 'Pat' 3 +6450 'Pay' 3 +6451 'Pdf' 3 +6452 'Ped' 3 +6453 'Pen' 3 +6454 'Per' 3 +6455 'Pet' 3 +6456 'Phi' 3 +6457 'Pic' 3 +6458 'Pie' 3 +6459 'Pin' 3 +6460 'Pix' 3 +6461 'Pod' 3 +6462 'Pol' 3 +6463 'Pop' 3 +6464 'Por' 3 +6465 'Pos' 3 +6466 'Pot' 3 +6467 'Pow' 3 +6468 'Pre' 3 +6469 'Pri' 3 +6470 'Pro' 3 +6471 'Psi' 3 +6472 'Ptr' 3 +6473 'Pub' 3 +6474 'Pur' 3 +6475 'Put' 3 +6476 'QUE' 3 +6477 'Qty' 3 +6478 'Que' 3 +6479 'Qui' 3 +6480 'RAD' 3 +6481 'RAL' 3 +6482 'RAM' 3 +6483 'RAN' 3 +6484 'RAW' 3 +6485 'RAY' 3 +6486 'REC' 3 +6487 'RED' 3 +6488 'REE' 3 +6489 'REF' 3 +6490 'REG' 3 +6491 'REL' 3 +6492 'REM' 3 +6493 'REN' 3 +6494 'REP' 3 +6495 'REQ' 3 +6496 'RES' 3 +6497 'RET' 3 +6498 'RFC' 3 +6499 'RGB' 3 +6500 'RIC' 3 +6501 'RIX' 3 +6502 'RMS' 3 +6503 'RNA' 3 +6504 'RNN' 3 +6505 'ROC' 3 +6506 'ROI' 3 +6507 'ROL' 3 +6508 'ROM' 3 +6509 'RON' 3 +6510 'ROP' 3 +6511 'ROS' 3 +6512 'ROT' 3 +6513 'ROW' 3 +6514 'RPC' 3 +6515 'RSA' 3 +6516 'RSS' 3 +6517 'RTC' 3 +6518 'RUN' 3 +6519 'Rab' 3 +6520 'Rad' 3 +6521 'Ram' 3 +6522 'Rat' 3 +6523 'Raw' 3 +6524 'Ray' 3 +6525 'Rec' 3 +6526 'Red' 3 +6527 'Ref' 3 +6528 'Reg' 3 +6529 'Rel' 3 +6530 'Rem' 3 +6531 'Ren' 3 +6532 'Rep' 3 +6533 'Req' 3 +6534 'Res' 3 +6535 'Ret' 3 +6536 'Rev' 3 +6537 'Rew' 3 +6538 'Ric' 3 +6539 'Rob' 3 +6540 'Rod' 3 +6541 'Rol' 3 +6542 'Rom' 3 +6543 'Ron' 3 +6544 'Ros' 3 +6545 'Rot' 3 +6546 'Row' 3 +6547 'Roy' 3 +6548 'Rub' 3 +6549 'Run' 3 +6550 'Ré' 3 +6551 'SAM' 3 +6552 'SAN' 3 +6553 'SAT' 3 +6554 'SCH' 3 +6555 'SCI' 3 +6556 'SCO' 3 +6557 'SCR' 3 +6558 'SDK' 3 +6559 'SDL' 3 +6560 'SEC' 3 +6561 'SED' 3 +6562 'SEE' 3 +6563 'SEG' 3 +6564 'SEL' 3 +6565 'SEM' 3 +6566 'SEP' 3 +6567 'SEQ' 3 +6568 'SER' 3 +6569 'SET' 3 +6570 'SHA' 3 +6571 'SID' 3 +6572 'SIG' 3 +6573 'SIM' 3 +6574 'SMS' 3 +6575 'SNP' 3 +6576 'SOC' 3 +6577 'SOL' 3 +6578 'SON' 3 +6579 'SPE' 3 +6580 'SPI' 3 +6581 'SQL' 3 +6582 'SRC' 3 +6583 'SSH' 3 +6584 'SSL' 3 +6585 'STA' 3 +6586 'STD' 3 +6587 'STE' 3 +6588 'STM' 3 +6589 'STR' 3 +6590 'STS' 3 +6591 'SUB' 3 +6592 'SUM' 3 +6593 'SUP' 3 +6594 'SUR' 3 +6595 'SVG' 3 +6596 'SYS' 3 +6597 'Sab' 3 +6598 'Sac' 3 +6599 'Sad' 3 +6600 'Saf' 3 +6601 'Sal' 3 +6602 'Sam' 3 +6603 'San' 3 +6604 'Sar' 3 +6605 'Sat' 3 +6606 'Sav' 3 +6607 'Say' 3 +6608 'Sch' 3 +6609 'Sci' 3 +6610 'Sdk' 3 +6611 'Sea' 3 +6612 'Sec' 3 +6613 'See' 3 +6614 'Seg' 3 +6615 'Sel' 3 +6616 'Sem' 3 +6617 'Sen' 3 +6618 'Sep' 3 +6619 'Seq' 3 +6620 'Ser' 3 +6621 'Set' 3 +6622 'Sex' 3 +6623 'Sha' 3 +6624 'She' 3 +6625 'Sid' 3 +6626 'Sig' 3 +6627 'Sil' 3 +6628 'Sim' 3 +6629 'Sin' 3 +6630 'Sir' 3 +6631 'Sit' 3 +6632 'Six' 3 +6633 'Sky' 3 +6634 'Soc' 3 +6635 'Sol' 3 +6636 'Son' 3 +6637 'Sou' 3 +6638 'Spe' 3 +6639 'Spl' 3 +6640 'Spr' 3 +6641 'Spy' 3 +6642 'Sql' 3 +6643 'Squ' 3 +6644 'Src' 3 +6645 'Sta' 3 +6646 'Std' 3 +6647 'Ste' 3 +6648 'Sto' 3 +6649 'Str' 3 +6650 'Sty' 3 +6651 'Sub' 3 +6652 'Suc' 3 +6653 'Sud' 3 +6654 'Sum' 3 +6655 'Sun' 3 +6656 'Sup' 3 +6657 'Sur' 3 +6658 'Sus' 3 +6659 'Sym' 3 +6660 'Syn' 3 +6661 'Sys' 3 +6662 'TAB' 3 +6663 'TAG' 3 +6664 'TCP' 3 +6665 'TED' 3 +6666 'TEM' 3 +6667 'TER' 3 +6668 'TES' 3 +6669 'TEX' 3 +6670 'THE' 3 +6671 'TIM' 3 +6672 'TLS' 3 +6673 'TMP' 3 +6674 'TON' 3 +6675 'TOP' 3 +6676 'TOR' 3 +6677 'TRA' 3 +6678 'TRY' 3 +6679 'Tab' 3 +6680 'Tag' 3 +6681 'Tai' 3 +6682 'Tak' 3 +6683 'Tal' 3 +6684 'Tam' 3 +6685 'Tan' 3 +6686 'Tap' 3 +6687 'Tar' 3 +6688 'Tax' 3 +6689 'TeV' 3 +6690 'TeX' 3 +6691 'Ted' 3 +6692 'Tek' 3 +6693 'Tel' 3 +6694 'Tem' 3 +6695 'Ten' 3 +6696 'Ter' 3 +6697 'Tes' 3 +6698 'Tex' 3 +6699 'The' 3 +6700 'Thu' 3 +6701 'Tim' 3 +6702 'Tip' 3 +6703 'Tit' 3 +6704 'Tmp' 3 +6705 'Tok' 3 +6706 'Tom' 3 +6707 'Ton' 3 +6708 'Too' 3 +6709 'Top' 3 +6710 'Tor' 3 +6711 'Tot' 3 +6712 'Toy' 3 +6713 'Tra' 3 +6714 'Tre' 3 +6715 'Tri' 3 +6716 'Tro' 3 +6717 'Try' 3 +6718 'Tue' 3 +6719 'Tur' 3 +6720 'Two' 3 +6721 'Txt' 3 +6722 'Typ' 3 +6723 'UAL' 3 +6724 'UCK' 3 +6725 'UCT' 3 +6726 'UDP' 3 +6727 'UES' 3 +6728 'UFF' 3 +6729 'UGH' 3 +6730 'UID' 3 +6731 'UIT' 3 +6732 'ULD' 3 +6733 'ULE' 3 +6734 'ULL' 3 +6735 'ULT' 3 +6736 'UME' 3 +6737 'UMN' 3 +6738 'UMP' 3 +6739 'UNC' 3 +6740 'UND' 3 +6741 'UNE' 3 +6742 'UNK' 3 +6743 'UNT' 3 +6744 'URA' 3 +6745 'URE' 3 +6746 'URI' 3 +6747 'URL' 3 +6748 'URN' 3 +6749 'URS' 3 +6750 'USA' 3 +6751 'USB' 3 +6752 'USD' 3 +6753 'USE' 3 +6754 'USH' 3 +6755 'USS' 3 +6756 'UST' 3 +6757 'UTC' 3 +6758 'UTE' 3 +6759 'UTF' 3 +6760 'UTH' 3 +6761 'Uid' 3 +6762 'Ult' 3 +6763 'Und' 3 +6764 'Uni' 3 +6765 'Uns' 3 +6766 'Uri' 3 +6767 'Url' 3 +6768 'Use' 3 +6769 'Usu' 3 +6770 'VAL' 3 +6771 'VAR' 3 +6772 'VED' 3 +6773 'VEL' 3 +6774 'VEN' 3 +6775 'VER' 3 +6776 'VES' 3 +6777 'VIC' 3 +6778 'VID' 3 +6779 'VIE' 3 +6780 'VII' 3 +6781 'VIS' 3 +6782 'VOL' 3 +6783 'VPN' 3 +6784 'Vac' 3 +6785 'Val' 3 +6786 'Van' 3 +6787 'Var' 3 +6788 'Vec' 3 +6789 'Vel' 3 +6790 'Ven' 3 +6791 'Ver' 3 +6792 'Via' 3 +6793 'Vin' 3 +6794 'Vir' 3 +6795 'Vis' 3 +6796 'Vol' 3 +6797 'WAR' 3 +6798 'WAY' 3 +6799 'WEB' 3 +6800 'WER' 3 +6801 'WHO' 3 +6802 'WID' 3 +6803 'WIN' 3 +6804 'WOR' 3 +6805 'Wal' 3 +6806 'War' 3 +6807 'Was' 3 +6808 'Wat' 3 +6809 'Way' 3 +6810 'Web' 3 +6811 'Wed' 3 +6812 'Wel' 3 +6813 'Who' 3 +6814 'Why' 3 +6815 'Wik' 3 +6816 'Wil' 3 +6817 'Win' 3 +6818 'Wol' 3 +6819 'Won' 3 +6820 'Wow' 3 +6821 'XML' 3 +6822 'XXX' 3 +6823 'XYZ' 3 +6824 'Xiv' 3 +6825 'Xml' 3 +6826 'YES' 3 +6827 'YLE' 3 +6828 'YOU' 3 +6829 'YPE' 3 +6830 'YYY' 3 +6831 'Yes' 3 +6832 'Yet' 3 +6833 'You' 3 +6834 'ZIP' 3 +6835 'Zen' 3 +6836 'Zip' 3 +6837 "['_" 3 +6838 '[:,' 3 +6839 '[:-' 3 +6840 '[:]' 3 +6841 "[['" 3 +6842 '[])' 3 +6843 '[],' 3 +6844 '[]{' 3 +6845 '\\""' 3 +6846 '\\",' 3 +6847 '\\":' 3 +6848 '\\">' 3 +6849 '\\\\\\' 3 +6850 '\\}$' 3 +6851 ']")' 3 +6852 ']",' 3 +6853 "]'," 3 +6854 '](#' 3 +6855 ']))' 3 +6856 ']),' 3 +6857 ']).' 3 +6858 ']):' 3 +6859 ']);' 3 +6860 '],[' 3 +6861 ']->' 3 +6862 '].[' 3 +6863 ']="' 3 +6864 ']["' 3 +6865 "]['" 3 +6866 ']\\\\' 3 +6867 ']])' 3 +6868 ']],' 3 +6869 ']];' 3 +6870 ']},' 3 +6871 '^{+' 3 +6872 '^{-' 3 +6873 '^{\\' 3 +6874 '_("' 3 +6875 "_('" 3 +6876 '_->' 3 +6877 '__(' 3 +6878 '__)' 3 +6879 '__,' 3 +6880 '__.' 3 +6881 '___' 3 +6882 '_{\\' 3 +6883 '`).' 3 +6884 '```' 3 +6885 'aaa' 3 +6886 'aab' 3 +6887 'aan' 3 +6888 'aar' 3 +6889 'aba' 3 +6890 'abb' 3 +6891 'abc' 3 +6892 'abd' 3 +6893 'abe' 3 +6894 'abi' 3 +6895 'abl' 3 +6896 'abo' 3 +6897 'abr' 3 +6898 'abs' 3 +6899 'aby' 3 +6900 'aca' 3 +6901 'acc' 3 +6902 'ace' 3 +6903 'ach' 3 +6904 'aci' 3 +6905 'ack' 3 +6906 'acl' 3 +6907 'aco' 3 +6908 'acs' 3 +6909 'act' 3 +6910 'acy' 3 +6911 'ada' 3 +6912 'adb' 3 +6913 'add' 3 +6914 'ade' 3 +6915 'adh' 3 +6916 'adi' 3 +6917 'adj' 3 +6918 'adm' 3 +6919 'ado' 3 +6920 'adr' 3 +6921 'ads' 3 +6922 'adt' 3 +6923 'adu' 3 +6924 'adv' 3 +6925 'ady' 3 +6926 'aea' 3 +6927 'ael' 3 +6928 'aes' 3 +6929 'afa' 3 +6930 'afe' 3 +6931 'aff' 3 +6932 'afi' 3 +6933 'aft' 3 +6934 'aga' 3 +6935 'age' 3 +6936 'agg' 3 +6937 'agh' 3 +6938 'agi' 3 +6939 'agn' 3 +6940 'ago' 3 +6941 'agr' 3 +6942 'ags' 3 +6943 'agt' 3 +6944 'agu' 3 +6945 'agy' 3 +6946 'aha' 3 +6947 'ahi' 3 +6948 'ahl' 3 +6949 'ahn' 3 +6950 'aho' 3 +6951 'ahr' 3 +6952 'ahu' 3 +6953 'aic' 3 +6954 'aid' 3 +6955 'ail' 3 +6956 'aim' 3 +6957 'ain' 3 +6958 'air' 3 +6959 'ais' 3 +6960 'ait' 3 +6961 'aja' 3 +6962 'aje' 3 +6963 'aji' 3 +6964 'ajo' 3 +6965 'aju' 3 +6966 'aka' 3 +6967 'ake' 3 +6968 'akh' 3 +6969 'aki' 3 +6970 'akk' 3 +6971 'ako' 3 +6972 'aks' 3 +6973 'akt' 3 +6974 'aku' 3 +6975 'aky' 3 +6976 'ala' 3 +6977 'alc' 3 +6978 'ald' 3 +6979 'ale' 3 +6980 'alf' 3 +6981 'alg' 3 +6982 'ali' 3 +6983 'alk' 3 +6984 'all' 3 +6985 'alm' 3 +6986 'alo' 3 +6987 'als' 3 +6988 'alt' 3 +6989 'alu' 3 +6990 'aly' 3 +6991 'ama' 3 +6992 'amb' 3 +6993 'amd' 3 +6994 'ame' 3 +6995 'ami' 3 +6996 'aml' 3 +6997 'amm' 3 +6998 'amo' 3 +6999 'amp' 3 +7000 'ams' 3 +7001 'amt' 3 +7002 'amy' 3 +7003 'ana' 3 +7004 'anc' 3 +7005 'and' 3 +7006 'ane' 3 +7007 'ang' 3 +7008 'anh' 3 +7009 'ani' 3 +7010 'anj' 3 +7011 'ank' 3 +7012 'ann' 3 +7013 'ano' 3 +7014 'ans' 3 +7015 'ant' 3 +7016 'anu' 3 +7017 'any' 3 +7018 'anz' 3 +7019 'aos' 3 +7020 'apa' 3 +7021 'ape' 3 +7022 'aph' 3 +7023 'api' 3 +7024 'apk' 3 +7025 'apo' 3 +7026 'app' 3 +7027 'apr' 3 +7028 'aps' 3 +7029 'apt' 3 +7030 'apy' 3 +7031 'aqu' 3 +7032 'ara' 3 +7033 'arb' 3 +7034 'arc' 3 +7035 'ard' 3 +7036 'are' 3 +7037 'arf' 3 +7038 'arg' 3 +7039 'ari' 3 +7040 'ark' 3 +7041 'arl' 3 +7042 'arm' 3 +7043 'arn' 3 +7044 'aro' 3 +7045 'arp' 3 +7046 'arr' 3 +7047 'ars' 3 +7048 'art' 3 +7049 'aru' 3 +7050 'ary' 3 +7051 'asa' 3 +7052 'asc' 3 +7053 'ase' 3 +7054 'ash' 3 +7055 'asi' 3 +7056 'ask' 3 +7057 'asm' 3 +7058 'aso' 3 +7059 'asp' 3 +7060 'ass' 3 +7061 'ast' 3 +7062 'asu' 3 +7063 'asy' 3 +7064 'asz' 3 +7065 'ata' 3 +7066 'ate' 3 +7067 'ath' 3 +7068 'ati' 3 +7069 'atl' 3 +7070 'ato' 3 +7071 'atr' 3 +7072 'ats' 3 +7073 'att' 3 +7074 'atu' 3 +7075 'aty' 3 +7076 'atz' 3 +7077 'auc' 3 +7078 'aud' 3 +7079 'auf' 3 +7080 'aug' 3 +7081 'aul' 3 +7082 'aur' 3 +7083 'aus' 3 +7084 'aut' 3 +7085 'aux' 3 +7086 'ava' 3 +7087 'ave' 3 +7088 'avg' 3 +7089 'avi' 3 +7090 'avo' 3 +7091 'avy' 3 +7092 'awa' 3 +7093 'awi' 3 +7094 'awk' 3 +7095 'awn' 3 +7096 'aws' 3 +7097 'awt' 3 +7098 'axe' 3 +7099 'axy' 3 +7100 'aya' 3 +7101 'aye' 3 +7102 'ays' 3 +7103 'aza' 3 +7104 'aze' 3 +7105 'azi' 3 +7106 'azo' 3 +7107 'azu' 3 +7108 'azy' 3 +7109 'azz' 3 +7110 'añ' 3 +7111 'ać' 3 +7112 'ał' 3 +7113 'aż' 3 +7114 'bab' 3 +7115 'bac' 3 +7116 'bad' 3 +7117 'bag' 3 +7118 'bah' 3 +7119 'bai' 3 +7120 'bak' 3 +7121 'bal' 3 +7122 'bam' 3 +7123 'ban' 3 +7124 'bar' 3 +7125 'bas' 3 +7126 'bat' 3 +7127 'bau' 3 +7128 'bay' 3 +7129 'baz' 3 +7130 'bbc' 3 +7131 'bbe' 3 +7132 'bdd' 3 +7133 'bec' 3 +7134 'bed' 3 +7135 'bee' 3 +7136 'bef' 3 +7137 'beg' 3 +7138 'beh' 3 +7139 'bei' 3 +7140 'bek' 3 +7141 'bel' 3 +7142 'ben' 3 +7143 'ber' 3 +7144 'bes' 3 +7145 'bet' 3 +7146 'bey' 3 +7147 'bfd' 3 +7148 'bia' 3 +7149 'bib' 3 +7150 'bic' 3 +7151 'bid' 3 +7152 'bie' 3 +7153 'big' 3 +7154 'bil' 3 +7155 'bin' 3 +7156 'bio' 3 +7157 'bir' 3 +7158 'bis' 3 +7159 'bit' 3 +7160 'biz' 3 +7161 'bla' 3 +7162 'ble' 3 +7163 'blk' 3 +7164 'blo' 3 +7165 'blr' 3 +7166 'bly' 3 +7167 'bmp' 3 +7168 'bnb' 3 +7169 'boa' 3 +7170 'bob' 3 +7171 'bol' 3 +7172 'bon' 3 +7173 'boo' 3 +7174 'bor' 3 +7175 'bos' 3 +7176 'bot' 3 +7177 'bow' 3 +7178 'box' 3 +7179 'boy' 3 +7180 'bps' 3 +7181 'bra' 3 +7182 'bre' 3 +7183 'bro' 3 +7184 'bru' 3 +7185 'bsd' 3 +7186 'bst' 3 +7187 'btn' 3 +7188 'bud' 3 +7189 'buf' 3 +7190 'bug' 3 +7191 'bul' 3 +7192 'bum' 3 +7193 'bur' 3 +7194 'bus' 3 +7195 'but' 3 +7196 'buy' 3 +7197 'bye' 3 +7198 'bys' 3 +7199 'bé' 3 +7200 'bü' 3 +7201 'bě' 3 +7202 'cab' 3 +7203 'cac' 3 +7204 'cad' 3 +7205 'cal' 3 +7206 'cam' 3 +7207 'can' 3 +7208 'cap' 3 +7209 'car' 3 +7210 'cas' 3 +7211 'cat' 3 +7212 'cca' 3 +7213 'ccc' 3 +7214 'cci' 3 +7215 'cco' 3 +7216 'cdf' 3 +7217 'cdn' 3 +7218 'cea' 3 +7219 'ced' 3 +7220 'cel' 3 +7221 'cem' 3 +7222 'cen' 3 +7223 'cep' 3 +7224 'cer' 3 +7225 'ces' 3 +7226 'ceu' 3 +7227 'cfg' 3 +7228 'cgi' 3 +7229 'cha' 3 +7230 'che' 3 +7231 'chi' 3 +7232 'chk' 3 +7233 'chl' 3 +7234 'chn' 3 +7235 'cho' 3 +7236 'chr' 3 +7237 'chs' 3 +7238 'cht' 3 +7239 'chu' 3 +7240 'chy' 3 +7241 'cia' 3 +7242 'cid' 3 +7243 'cie' 3 +7244 'cig' 3 +7245 'cii' 3 +7246 'cil' 3 +7247 'cin' 3 +7248 'cio' 3 +7249 'cip' 3 +7250 'cir' 3 +7251 'cis' 3 +7252 'cit' 3 +7253 'cke' 3 +7254 'cki' 3 +7255 'cko' 3 +7256 'cks' 3 +7257 'cla' 3 +7258 'cle' 3 +7259 'clf' 3 +7260 'cli' 3 +7261 'clk' 3 +7262 'clo' 3 +7263 'cls' 3 +7264 'cmb' 3 +7265 'cmd' 3 +7266 'cmp' 3 +7267 'cms' 3 +7268 'cnt' 3 +7269 'cod' 3 +7270 'coe' 3 +7271 'col' 3 +7272 'com' 3 +7273 'con' 3 +7274 'cop' 3 +7275 'cor' 3 +7276 'cos' 3 +7277 'cot' 3 +7278 'cou' 3 +7279 'cov' 3 +7280 'cow' 3 +7281 'cox' 3 +7282 'cpp' 3 +7283 'cpu' 3 +7284 'cpy' 3 +7285 'cra' 3 +7286 'crc' 3 +7287 'cre' 3 +7288 'cri' 3 +7289 'cro' 3 +7290 'cru' 3 +7291 'cry' 3 +7292 'csr' 3 +7293 'css' 3 +7294 'csv' 3 +7295 'cta' 3 +7296 'ctl' 3 +7297 'ctr' 3 +7298 'ctu' 3 +7299 'ctx' 3 +7300 'cub' 3 +7301 'cue' 3 +7302 'cul' 3 +7303 'cum' 3 +7304 'cup' 3 +7305 'cur' 3 +7306 'cus' 3 +7307 'cut' 3 +7308 'cwd' 3 +7309 'czy' 3 +7310 'cé' 3 +7311 'cí' 3 +7312 'dac' 3 +7313 'dad' 3 +7314 'dag' 3 +7315 'dal' 3 +7316 'dam' 3 +7317 'dan' 3 +7318 'dao' 3 +7319 'dap' 3 +7320 'dar' 3 +7321 'das' 3 +7322 'dat' 3 +7323 'dav' 3 +7324 'day' 3 +7325 'dbc' 3 +7326 'dbg' 3 +7327 'dbl' 3 +7328 'ddd' 3 +7329 'dea' 3 +7330 'deb' 3 +7331 'dec' 3 +7332 'ded' 3 +7333 'dee' 3 +7334 'def' 3 +7335 'deg' 3 +7336 'dek' 3 +7337 'del' 3 +7338 'dem' 3 +7339 'den' 3 +7340 'dep' 3 +7341 'der' 3 +7342 'des' 3 +7343 'det' 3 +7344 'dev' 3 +7345 'dex' 3 +7346 'dez' 3 +7347 'dfs' 3 +7348 'dia' 3 +7349 'dic' 3 +7350 'did' 3 +7351 'die' 3 +7352 'dif' 3 +7353 'dig' 3 +7354 'dil' 3 +7355 'dim' 3 +7356 'din' 3 +7357 'dio' 3 +7358 'dip' 3 +7359 'dir' 3 +7360 'dis' 3 +7361 'dit' 3 +7362 'div' 3 +7363 'dle' 3 +7364 'dll' 3 +7365 'dna' 3 +7366 'dob' 3 +7367 'doc' 3 +7368 'dof' 3 +7369 'dog' 3 +7370 'doi' 3 +7371 'dol' 3 +7372 'dom' 3 +7373 'don' 3 +7374 'dor' 3 +7375 'dos' 3 +7376 'dot' 3 +7377 'dou' 3 +7378 'dpi' 3 +7379 'dra' 3 +7380 'dre' 3 +7381 'dri' 3 +7382 'dro' 3 +7383 'drv' 3 +7384 'dry' 3 +7385 'dst' 3 +7386 'dtd' 3 +7387 'duc' 3 +7388 'due' 3 +7389 'dup' 3 +7390 'dur' 3 +7391 'dyn' 3 +7392 'ead' 3 +7393 'eah' 3 +7394 'ean' 3 +7395 'ear' 3 +7396 'eas' 3 +7397 'eat' 3 +7398 'eau' 3 +7399 'eba' 3 +7400 'ebb' 3 +7401 'eca' 3 +7402 'ecc' 3 +7403 'ecd' 3 +7404 'ece' 3 +7405 'ech' 3 +7406 'eck' 3 +7407 'ecl' 3 +7408 'eco' 3 +7409 'ecs' 3 +7410 'ect' 3 +7411 'eda' 3 +7412 'edd' 3 +7413 'ede' 3 +7414 'edi' 3 +7415 'edo' 3 +7416 'eds' 3 +7417 'edu' 3 +7418 'edy' 3 +7419 'eed' 3 +7420 'een' 3 +7421 'eer' 3 +7422 'ees' 3 +7423 'efe' 3 +7424 'eff' 3 +7425 'eft' 3 +7426 'ega' 3 +7427 'egg' 3 +7428 'ego' 3 +7429 'egr' 3 +7430 'egu' 3 +7431 'eil' 3 +7432 'ein' 3 +7433 'eka' 3 +7434 'eki' 3 +7435 'eks' 3 +7436 'ekt' 3 +7437 'ela' 3 +7438 'eld' 3 +7439 'ele' 3 +7440 'elf' 3 +7441 'eli' 3 +7442 'ell' 3 +7443 'elm' 3 +7444 'eln' 3 +7445 'elo' 3 +7446 'elp' 3 +7447 'els' 3 +7448 'elt' 3 +7449 'elu' 3 +7450 'ely' 3 +7451 'ema' 3 +7452 'emb' 3 +7453 'eme' 3 +7454 'emi' 3 +7455 'emn' 3 +7456 'emo' 3 +7457 'emp' 3 +7458 'ems' 3 +7459 'emu' 3 +7460 'emy' 3 +7461 'ena' 3 +7462 'enc' 3 +7463 'end' 3 +7464 'ene' 3 +7465 'enf' 3 +7466 'eng' 3 +7467 'enh' 3 +7468 'eni' 3 +7469 'enk' 3 +7470 'enn' 3 +7471 'eno' 3 +7472 'ens' 3 +7473 'ent' 3 +7474 'enu' 3 +7475 'env' 3 +7476 'eny' 3 +7477 'enz' 3 +7478 'eof' 3 +7479 'eon' 3 +7480 'eor' 3 +7481 'eph' 3 +7482 'epi' 3 +7483 'eps' 3 +7484 'ept' 3 +7485 'eqn' 3 +7486 'equ' 3 +7487 'era' 3 +7488 'erb' 3 +7489 'erc' 3 +7490 'erd' 3 +7491 'ere' 3 +7492 'erg' 3 +7493 'eri' 3 +7494 'erk' 3 +7495 'erm' 3 +7496 'ern' 3 +7497 'ero' 3 +7498 'erp' 3 +7499 'err' 3 +7500 'ers' 3 +7501 'ert' 3 +7502 'erv' 3 +7503 'ery' 3 +7504 'esa' 3 +7505 'esc' 3 +7506 'ese' 3 +7507 'esh' 3 +7508 'esi' 3 +7509 'esk' 3 +7510 'eso' 3 +7511 'esp' 3 +7512 'ess' 3 +7513 'est' 3 +7514 'esy' 3 +7515 'eta' 3 +7516 'etc' 3 +7517 'ete' 3 +7518 'eth' 3 +7519 'eti' 3 +7520 'eto' 3 +7521 'etr' 3 +7522 'ets' 3 +7523 'ett' 3 +7524 'etu' 3 +7525 'ety' 3 +7526 'etz' 3 +7527 'eur' 3 +7528 'eus' 3 +7529 'eva' 3 +7530 'eve' 3 +7531 'evt' 3 +7532 'ews' 3 +7533 'exc' 3 +7534 'exe' 3 +7535 'exp' 3 +7536 'ext' 3 +7537 'eye' 3 +7538 'fab' 3 +7539 'fac' 3 +7540 'fal' 3 +7541 'fan' 3 +7542 'far' 3 +7543 'fas' 3 +7544 'fat' 3 +7545 'fav' 3 +7546 'fax' 3 +7547 'feb' 3 +7548 'fed' 3 +7549 'fee' 3 +7550 'fel' 3 +7551 'fem' 3 +7552 'fen' 3 +7553 'fer' 3 +7554 'fet' 3 +7555 'few' 3 +7556 'ffe' 3 +7557 'fff' 3 +7558 'ffi' 3 +7559 'fft' 3 +7560 'fib' 3 +7561 'fic' 3 +7562 'fid' 3 +7563 'fif' 3 +7564 'fig' 3 +7565 'fil' 3 +7566 'fin' 3 +7567 'fir' 3 +7568 'fit' 3 +7569 'fix' 3 +7570 'fld' 3 +7571 'fle' 3 +7572 'flo' 3 +7573 'flu' 3 +7574 'fly' 3 +7575 'fmt' 3 +7576 'fol' 3 +7577 'fon' 3 +7578 'foo' 3 +7579 'for' 3 +7580 'fos' 3 +7581 'fox' 3 +7582 'fra' 3 +7583 'fre' 3 +7584 'fri' 3 +7585 'frm' 3 +7586 'fro' 3 +7587 'fst' 3 +7588 'fte' 3 +7589 'ftp' 3 +7590 'fts' 3 +7591 'fty' 3 +7592 'ful' 3 +7593 'fun' 3 +7594 'fur' 3 +7595 'fut' 3 +7596 'fé' 3 +7597 'fø' 3 +7598 'fü' 3 +7599 'gae' 3 +7600 'gal' 3 +7601 'gam' 3 +7602 'gan' 3 +7603 'gap' 3 +7604 'gar' 3 +7605 'gas' 3 +7606 'gat' 3 +7607 'gay' 3 +7608 'gca' 3 +7609 'gcc' 3 +7610 'gcd' 3 +7611 'geb' 3 +7612 'ged' 3 +7613 'geh' 3 +7614 'gel' 3 +7615 'gem' 3 +7616 'gen' 3 +7617 'geo' 3 +7618 'geq' 3 +7619 'ger' 3 +7620 'ges' 3 +7621 'get' 3 +7622 'gew' 3 +7623 'gex' 3 +7624 'ght' 3 +7625 'gia' 3 +7626 'gid' 3 +7627 'gie' 3 +7628 'gif' 3 +7629 'gil' 3 +7630 'gin' 3 +7631 'gio' 3 +7632 'gis' 3 +7633 'git' 3 +7634 'gle' 3 +7635 'gly' 3 +7636 'gmt' 3 +7637 'gnu' 3 +7638 'god' 3 +7639 'gol' 3 +7640 'gom' 3 +7641 'gon' 3 +7642 'gor' 3 +7643 'gos' 3 +7644 'got' 3 +7645 'gov' 3 +7646 'gow' 3 +7647 'gpu' 3 +7648 'gra' 3 +7649 'gre' 3 +7650 'gro' 3 +7651 'grp' 3 +7652 'gru' 3 +7653 'gte' 3 +7654 'gtk' 3 +7655 'gua' 3 +7656 'gue' 3 +7657 'gui' 3 +7658 'gun' 3 +7659 'gut' 3 +7660 'hab' 3 +7661 'had' 3 +7662 'hai' 3 +7663 'hal' 3 +7664 'ham' 3 +7665 'han' 3 +7666 'hao' 3 +7667 'hap' 3 +7668 'har' 3 +7669 'has' 3 +7670 'hat' 3 +7671 'hav' 3 +7672 'haw' 3 +7673 'hay' 3 +7674 'haz' 3 +7675 'hdr' 3 +7676 'hea' 3 +7677 'hed' 3 +7678 'hee' 3 +7679 'hei' 3 +7680 'hel' 3 +7681 'hem' 3 +7682 'hen' 3 +7683 'hep' 3 +7684 'her' 3 +7685 'hes' 3 +7686 'het' 3 +7687 'hev' 3 +7688 'hew' 3 +7689 'hex' 3 +7690 'hey' 3 +7691 'hib' 3 +7692 'hic' 3 +7693 'hid' 3 +7694 'hig' 3 +7695 'hil' 3 +7696 'him' 3 +7697 'hin' 3 +7698 'hip' 3 +7699 'hir' 3 +7700 'his' 3 +7701 'hit' 3 +7702 'hma' 3 +7703 'hoc' 3 +7704 'hod' 3 +7705 'hoe' 3 +7706 'hof' 3 +7707 'hog' 3 +7708 'hol' 3 +7709 'hom' 3 +7710 'hon' 3 +7711 'hop' 3 +7712 'hor' 3 +7713 'hos' 3 +7714 'hot' 3 +7715 'hou' 3 +7716 'hov' 3 +7717 'how' 3 +7718 'hpp' 3 +7719 'hra' 3 +7720 'hta' 3 +7721 'hti' 3 +7722 'htm' 3 +7723 'htt' 3 +7724 'hua' 3 +7725 'hub' 3 +7726 'hue' 3 +7727 'hui' 3 +7728 'hum' 3 +7729 'hur' 3 +7730 'hus' 3 +7731 'hyd' 3 +7732 'hyp' 3 +7733 'há' 3 +7734 'hã' 3 +7735 'hä' 3 +7736 'hé' 3 +7737 'hö' 3 +7738 'iOS' 3 +7739 'iac' 3 +7740 'iae' 3 +7741 'iah' 3 +7742 'iak' 3 +7743 'ial' 3 +7744 'iam' 3 +7745 'ian' 3 +7746 'iao' 3 +7747 'iar' 3 +7748 'ias' 3 +7749 'iat' 3 +7750 'iaz' 3 +7751 'iba' 3 +7752 'ibe' 3 +7753 'ibi' 3 +7754 'ibo' 3 +7755 'ibr' 3 +7756 'ibu' 3 +7757 'ica' 3 +7758 'icc' 3 +7759 'ice' 3 +7760 'ich' 3 +7761 'ici' 3 +7762 'ick' 3 +7763 'icl' 3 +7764 'ico' 3 +7765 'ics' 3 +7766 'ict' 3 +7767 'icy' 3 +7768 'icz' 3 +7769 'ida' 3 +7770 'idd' 3 +7771 'ide' 3 +7772 'idi' 3 +7773 'idl' 3 +7774 'ido' 3 +7775 'ids' 3 +7776 'idx' 3 +7777 'idy' 3 +7778 'iec' 3 +7779 'ied' 3 +7780 'ief' 3 +7781 'ieg' 3 +7782 'iei' 3 +7783 'iej' 3 +7784 'iek' 3 +7785 'iel' 3 +7786 'iem' 3 +7787 'ien' 3 +7788 'ier' 3 +7789 'ies' 3 +7790 'iet' 3 +7791 'ieu' 3 +7792 'iev' 3 +7793 'iew' 3 +7794 'iez' 3 +7795 'ifa' 3 +7796 'ife' 3 +7797 'iff' 3 +7798 'ifi' 3 +7799 'ifs' 3 +7800 'ift' 3 +7801 'ify' 3 +7802 'iga' 3 +7803 'ige' 3 +7804 'igg' 3 +7805 'igh' 3 +7806 'igi' 3 +7807 'igl' 3 +7808 'igm' 3 +7809 'ign' 3 +7810 'igo' 3 +7811 'igr' 3 +7812 'igs' 3 +7813 'igt' 3 +7814 'igu' 3 +7815 'iii' 3 +7816 'ija' 3 +7817 'ije' 3 +7818 'iji' 3 +7819 'ijk' 3 +7820 'ijn' 3 +7821 'ijo' 3 +7822 'iju' 3 +7823 'ika' 3 +7824 'ike' 3 +7825 'ikh' 3 +7826 'iki' 3 +7827 'ikk' 3 +7828 'iko' 3 +7829 'iks' 3 +7830 'ikt' 3 +7831 'iku' 3 +7832 'ila' 3 +7833 'ild' 3 +7834 'ile' 3 +7835 'ili' 3 +7836 'ilk' 3 +7837 'ill' 3 +7838 'ilo' 3 +7839 'ils' 3 +7840 'ilt' 3 +7841 'ily' 3 +7842 'ima' 3 +7843 'imb' 3 +7844 'ime' 3 +7845 'img' 3 +7846 'imi' 3 +7847 'imm' 3 +7848 'imo' 3 +7849 'imp' 3 +7850 'ims' 3 +7851 'ina' 3 +7852 'inc' 3 +7853 'ind' 3 +7854 'ine' 3 +7855 'inf' 3 +7856 'ing' 3 +7857 'inh' 3 +7858 'ini' 3 +7859 'inj' 3 +7860 'ink' 3 +7861 'inn' 3 +7862 'ino' 3 +7863 'inp' 3 +7864 'ins' 3 +7865 'int' 3 +7866 'inu' 3 +7867 'inv' 3 +7868 'inx' 3 +7869 'iny' 3 +7870 'inz' 3 +7871 'iod' 3 +7872 'iol' 3 +7873 'iom' 3 +7874 'ion' 3 +7875 'iop' 3 +7876 'ior' 3 +7877 'ios' 3 +7878 'iot' 3 +7879 'iou' 3 +7880 'iov' 3 +7881 'iox' 3 +7882 'ipa' 3 +7883 'ipe' 3 +7884 'iph' 3 +7885 'ipl' 3 +7886 'ipo' 3 +7887 'ipp' 3 +7888 'ips' 3 +7889 'ipt' 3 +7890 'ipv' 3 +7891 'ipy' 3 +7892 'iqu' 3 +7893 'ira' 3 +7894 'irc' 3 +7895 'ird' 3 +7896 'ire' 3 +7897 'iri' 3 +7898 'irk' 3 +7899 'irl' 3 +7900 'irm' 3 +7901 'iro' 3 +7902 'irq' 3 +7903 'irs' 3 +7904 'irt' 3 +7905 'iry' 3 +7906 'isa' 3 +7907 'isc' 3 +7908 'isd' 3 +7909 'ise' 3 +7910 'isf' 3 +7911 'ish' 3 +7912 'isi' 3 +7913 'isk' 3 +7914 'isl' 3 +7915 'ism' 3 +7916 'iso' 3 +7917 'isp' 3 +7918 'iss' 3 +7919 'ist' 3 +7920 'isu' 3 +7921 'isy' 3 +7922 'isz' 3 +7923 'ita' 3 +7924 'ite' 3 +7925 'ith' 3 +7926 'iti' 3 +7927 'itm' 3 +7928 'ito' 3 +7929 'itr' 3 +7930 'its' 3 +7931 'itt' 3 +7932 'itu' 3 +7933 'ity' 3 +7934 'itz' 3 +7935 'ium' 3 +7936 'ius' 3 +7937 'iva' 3 +7938 'ive' 3 +7939 'ivi' 3 +7940 'ivo' 3 +7941 'ivy' 3 +7942 'ixa' 3 +7943 'ixo' 3 +7944 'iya' 3 +7945 'iza' 3 +7946 'ize' 3 +7947 'izi' 3 +7948 'izo' 3 +7949 'izu' 3 +7950 'izz' 3 +7951 'iß' 3 +7952 'ié' 3 +7953 'ië' 3 +7954 'ió' 3 +7955 'ią' 3 +7956 'ić' 3 +7957 'ič' 3 +7958 'ię' 3 +7959 'ił' 3 +7960 'iş' 3 +7961 'iš' 3 +7962 'jab' 3 +7963 'jac' 3 +7964 'jad' 3 +7965 'jah' 3 +7966 'jak' 3 +7967 'jal' 3 +7968 'jam' 3 +7969 'jan' 3 +7970 'jar' 3 +7971 'jas' 3 +7972 'jav' 3 +7973 'jax' 3 +7974 'jay' 3 +7975 'jdk' 3 +7976 'jee' 3 +7977 'jel' 3 +7978 'jem' 3 +7979 'jen' 3 +7980 'jer' 3 +7981 'jes' 3 +7982 'jet' 3 +7983 'jid' 3 +7984 'jin' 3 +7985 'jis' 3 +7986 'jit' 3 +7987 'job' 3 +7988 'jon' 3 +7989 'jor' 3 +7990 'jos' 3 +7991 'jou' 3 +7992 'joy' 3 +7993 'jpg' 3 +7994 'jsp' 3 +7995 'jud' 3 +7996 'jug' 3 +7997 'jul' 3 +7998 'jun' 3 +7999 'jur' 3 +8000 'jà' 3 +8001 'jä' 3 +8002 'jö' 3 +8003 'jø' 3 +8004 'ją' 3 +8005 'ję' 3 +8006 'kal' 3 +8007 'kan' 3 +8008 'kap' 3 +8009 'kar' 3 +8010 'kas' 3 +8011 'kat' 3 +8012 'ked' 3 +8013 'kee' 3 +8014 'keh' 3 +8015 'kel' 3 +8016 'ken' 3 +8017 'ker' 3 +8018 'kes' 3 +8019 'ket' 3 +8020 'key' 3 +8021 'kid' 3 +8022 'kie' 3 +8023 'kil' 3 +8024 'kim' 3 +8025 'kin' 3 +8026 'kip' 3 +8027 'kir' 3 +8028 'kit' 3 +8029 'kle' 3 +8030 'kok' 3 +8031 'kol' 3 +8032 'kom' 3 +8033 'kon' 3 +8034 'kop' 3 +8035 'kor' 3 +8036 'kos' 3 +8037 'kov' 3 +8038 'kow' 3 +8039 'ksi' 3 +8040 'kte' 3 +8041 'kun' 3 +8042 'kur' 3 +8043 'kus' 3 +8044 'ká' 3 +8045 'kä' 3 +8046 'ké' 3 +8047 'kö' 3 +8048 'ką' 3 +8049 'kę' 3 +8050 'lab' 3 +8051 'lac' 3 +8052 'lad' 3 +8053 'lag' 3 +8054 'lah' 3 +8055 'lam' 3 +8056 'lan' 3 +8057 'lap' 3 +8058 'lar' 3 +8059 'las' 3 +8060 'lat' 3 +8061 'lav' 3 +8062 'law' 3 +8063 'lay' 3 +8064 'lbl' 3 +8065 'lea' 3 +8066 'lec' 3 +8067 'led' 3 +8068 'lee' 3 +8069 'lef' 3 +8070 'leg' 3 +8071 'lei' 3 +8072 'lek' 3 +8073 'lem' 3 +8074 'len' 3 +8075 'lep' 3 +8076 'leq' 3 +8077 'ler' 3 +8078 'les' 3 +8079 'let' 3 +8080 'lev' 3 +8081 'lew' 3 +8082 'lex' 3 +8083 'ley' 3 +8084 'lez' 3 +8085 'lia' 3 +8086 'lib' 3 +8087 'lic' 3 +8088 'lid' 3 +8089 'lie' 3 +8090 'lif' 3 +8091 'lig' 3 +8092 'lij' 3 +8093 'lik' 3 +8094 'lim' 3 +8095 'lin' 3 +8096 'lio' 3 +8097 'lip' 3 +8098 'lis' 3 +8099 'lit' 3 +8100 'liv' 3 +8101 'lla' 3 +8102 'lle' 3 +8103 'lli' 3 +8104 'llo' 3 +8105 'lng' 3 +8106 'lob' 3 +8107 'loc' 3 +8108 'lod' 3 +8109 'loe' 3 +8110 'log' 3 +8111 'lon' 3 +8112 'loo' 3 +8113 'lop' 3 +8114 'lor' 3 +8115 'los' 3 +8116 'lot' 3 +8117 'lou' 3 +8118 'lov' 3 +8119 'low' 3 +8120 'loy' 3 +8121 'lst' 3 +8122 'lua' 3 +8123 'luc' 3 +8124 'lum' 3 +8125 'lun' 3 +8126 'lus' 3 +8127 'lut' 3 +8128 'lux' 3 +8129 'lvl' 3 +8130 'lyn' 3 +8131 'lys' 3 +8132 'là' 3 +8133 'lá' 3 +8134 'lä' 3 +8135 'lé' 3 +8136 'ló' 3 +8137 'lö' 3 +8138 'lą' 3 +8139 'lı' 3 +8140 'mAh' 3 +8141 'mac' 3 +8142 'mad' 3 +8143 'mag' 3 +8144 'mai' 3 +8145 'maj' 3 +8146 'mak' 3 +8147 'mal' 3 +8148 'man' 3 +8149 'map' 3 +8150 'mar' 3 +8151 'mas' 3 +8152 'mat' 3 +8153 'max' 3 +8154 'may' 3 +8155 'maz' 3 +8156 'mbH' 3 +8157 'med' 3 +8158 'meg' 3 +8159 'mek' 3 +8160 'mel' 3 +8161 'mem' 3 +8162 'men' 3 +8163 'mer' 3 +8164 'mes' 3 +8165 'met' 3 +8166 'mez' 3 +8167 'mgr' 3 +8168 'mia' 3 +8169 'mic' 3 +8170 'mid' 3 +8171 'mie' 3 +8172 'mil' 3 +8173 'mim' 3 +8174 'min' 3 +8175 'mir' 3 +8176 'mis' 3 +8177 'mit' 3 +8178 'mix' 3 +8179 'mma' 3 +8180 'mmm' 3 +8181 'mob' 3 +8182 'mod' 3 +8183 'mol' 3 +8184 'mom' 3 +8185 'mon' 3 +8186 'mor' 3 +8187 'mos' 3 +8188 'mot' 3 +8189 'mov' 3 +8190 'moz' 3 +8191 'mph' 3 +8192 'mpi' 3 +8193 'mpl' 3 +8194 'mse' 3 +8195 'msg' 3 +8196 'mud' 3 +8197 'mul' 3 +8198 'mun' 3 +8199 'mur' 3 +8200 'mus' 3 +8201 'mut' 3 +8202 'mux' 3 +8203 'mys' 3 +8204 'mé' 3 +8205 'nad' 3 +8206 'nah' 3 +8207 'nai' 3 +8208 'nak' 3 +8209 'nal' 3 +8210 'nam' 3 +8211 'nan' 3 +8212 'nap' 3 +8213 'nar' 3 +8214 'nas' 3 +8215 'nat' 3 +8216 'nav' 3 +8217 'nbr' 3 +8218 'nce' 3 +8219 'nda' 3 +8220 'nds' 3 +8221 'nea' 3 +8222 'ned' 3 +8223 'nee' 3 +8224 'neg' 3 +8225 'neh' 3 +8226 'nej' 3 +8227 'nek' 3 +8228 'nel' 3 +8229 'nem' 3 +8230 'nen' 3 +8231 'neo' 3 +8232 'neq' 3 +8233 'ner' 3 +8234 'nes' 3 +8235 'net' 3 +8236 'neu' 3 +8237 'new' 3 +8238 'nex' 3 +8239 'ney' 3 +8240 'nez' 3 +8241 'nia' 3 +8242 'nic' 3 +8243 'nie' 3 +8244 'nih' 3 +8245 'nik' 3 +8246 'nil' 3 +8247 'nim' 3 +8248 'nin' 3 +8249 'nio' 3 +8250 'nis' 3 +8251 'nit' 3 +8252 'nob' 3 +8253 'noc' 3 +8254 'nod' 3 +8255 'nom' 3 +8256 'non' 3 +8257 'nop' 3 +8258 'nor' 3 +8259 'nos' 3 +8260 'not' 3 +8261 'nou' 3 +8262 'nov' 3 +8263 'now' 3 +8264 'nox' 3 +8265 'npc' 3 +8266 'npm' 3 +8267 'npy' 3 +8268 'nth' 3 +8269 'num' 3 +8270 'nut' 3 +8271 'nya' 3 +8272 'ná' 3 +8273 'né' 3 +8274 'ní' 3 +8275 'ný' 3 +8276 'ną' 3 +8277 'nę' 3 +8278 'ně' 3 +8279 'oad' 3 +8280 'oba' 3 +8281 'obb' 3 +8282 'obe' 3 +8283 'obi' 3 +8284 'obj' 3 +8285 'obl' 3 +8286 'obo' 3 +8287 'obs' 3 +8288 'oby' 3 +8289 'oca' 3 +8290 'occ' 3 +8291 'oce' 3 +8292 'och' 3 +8293 'oci' 3 +8294 'ock' 3 +8295 'ocl' 3 +8296 'oco' 3 +8297 'ocr' 3 +8298 'ocs' 3 +8299 'oct' 3 +8300 'ocy' 3 +8301 'oda' 3 +8302 'odb' 3 +8303 'odd' 3 +8304 'ode' 3 +8305 'odi' 3 +8306 'odo' 3 +8307 'ods' 3 +8308 'ody' 3 +8309 'oen' 3 +8310 'oes' 3 +8311 'off' 3 +8312 'ofs' 3 +8313 'oft' 3 +8314 'oga' 3 +8315 'oge' 3 +8316 'ogg' 3 +8317 'ogh' 3 +8318 'ogi' 3 +8319 'ogl' 3 +8320 'ogn' 3 +8321 'ogo' 3 +8322 'ogr' 3 +8323 'ogs' 3 +8324 'ogy' 3 +8325 'ohl' 3 +8326 'ohn' 3 +8327 'oho' 3 +8328 'oid' 3 +8329 'oil' 3 +8330 'oin' 3 +8331 'oir' 3 +8332 'ois' 3 +8333 'oit' 3 +8334 'oka' 3 +8335 'oke' 3 +8336 'oki' 3 +8337 'oko' 3 +8338 'oks' 3 +8339 'oku' 3 +8340 'oky' 3 +8341 'ola' 3 +8342 'old' 3 +8343 'ole' 3 +8344 'olf' 3 +8345 'oli' 3 +8346 'olk' 3 +8347 'oll' 3 +8348 'oln' 3 +8349 'olo' 3 +8350 'ols' 3 +8351 'olt' 3 +8352 'olu' 3 +8353 'oly' 3 +8354 'oma' 3 +8355 'omb' 3 +8356 'ome' 3 +8357 'omi' 3 +8358 'omm' 3 +8359 'omo' 3 +8360 'omp' 3 +8361 'oms' 3 +8362 'omy' 3 +8363 'ona' 3 +8364 'onc' 3 +8365 'ond' 3 +8366 'one' 3 +8367 'ong' 3 +8368 'oni' 3 +8369 'onn' 3 +8370 'ono' 3 +8371 'ons' 3 +8372 'ont' 3 +8373 'ony' 3 +8374 'onz' 3 +8375 'ood' 3 +8376 'ook' 3 +8377 'ool' 3 +8378 'oom' 3 +8379 'oon' 3 +8380 'ooo' 3 +8381 'oop' 3 +8382 'oor' 3 +8383 'oot' 3 +8384 'opa' 3 +8385 'ope' 3 +8386 'opf' 3 +8387 'oph' 3 +8388 'opi' 3 +8389 'opl' 3 +8390 'opo' 3 +8391 'opp' 3 +8392 'ops' 3 +8393 'opt' 3 +8394 'opy' 3 +8395 'ora' 3 +8396 'orb' 3 +8397 'orc' 3 +8398 'ord' 3 +8399 'ore' 3 +8400 'orf' 3 +8401 'org' 3 +8402 'ori' 3 +8403 'ork' 3 +8404 'orm' 3 +8405 'orn' 3 +8406 'oro' 3 +8407 'orp' 3 +8408 'orr' 3 +8409 'ors' 3 +8410 'ort' 3 +8411 'oru' 3 +8412 'ory' 3 +8413 'osa' 3 +8414 'osc' 3 +8415 'ose' 3 +8416 'osh' 3 +8417 'osi' 3 +8418 'oso' 3 +8419 'osp' 3 +8420 'oss' 3 +8421 'ost' 3 +8422 'ota' 3 +8423 'ote' 3 +8424 'oth' 3 +8425 'oti' 3 +8426 'oto' 3 +8427 'ots' 3 +8428 'ott' 3 +8429 'oty' 3 +8430 'oub' 3 +8431 'oud' 3 +8432 'oug' 3 +8433 'oui' 3 +8434 'ouk' 3 +8435 'oul' 3 +8436 'oun' 3 +8437 'oup' 3 +8438 'our' 3 +8439 'ous' 3 +8440 'out' 3 +8441 'ouv' 3 +8442 'oux' 3 +8443 'ova' 3 +8444 'ove' 3 +8445 'ovi' 3 +8446 'ovo' 3 +8447 'ovy' 3 +8448 'owa' 3 +8449 'owe' 3 +8450 'owi' 3 +8451 'owl' 3 +8452 'own' 3 +8453 'owo' 3 +8454 'ows' 3 +8455 'owy' 3 +8456 'oxy' 3 +8457 'oya' 3 +8458 'oyo' 3 +8459 'ozo' 3 +8460 'ozy' 3 +8461 'oł' 3 +8462 'pac' 3 +8463 'pad' 3 +8464 'pag' 3 +8465 'pak' 3 +8466 'pal' 3 +8467 'pan' 3 +8468 'pap' 3 +8469 'par' 3 +8470 'pas' 3 +8471 'pat' 3 +8472 'pay' 3 +8473 'pci' 3 +8474 'pdb' 3 +8475 'pdf' 3 +8476 'pec' 3 +8477 'ped' 3 +8478 'pee' 3 +8479 'peg' 3 +8480 'pei' 3 +8481 'pel' 3 +8482 'pem' 3 +8483 'pen' 3 +8484 'per' 3 +8485 'pes' 3 +8486 'pet' 3 +8487 'pex' 3 +8488 'pez' 3 +8489 'pha' 3 +8490 'phe' 3 +8491 'phi' 3 +8492 'php' 3 +8493 'phy' 3 +8494 'pic' 3 +8495 'pid' 3 +8496 'pie' 3 +8497 'pig' 3 +8498 'pin' 3 +8499 'pio' 3 +8500 'pip' 3 +8501 'pir' 3 +8502 'pis' 3 +8503 'pit' 3 +8504 'pix' 3 +8505 'pkg' 3 +8506 'pkl' 3 +8507 'pla' 3 +8508 'ple' 3 +8509 'plt' 3 +8510 'ply' 3 +8511 'png' 3 +8512 'pod' 3 +8513 'pol' 3 +8514 'pom' 3 +8515 'pon' 3 +8516 'pop' 3 +8517 'por' 3 +8518 'pos' 3 +8519 'pot' 3 +8520 'pow' 3 +8521 'ppa' 3 +8522 'ppe' 3 +8523 'ppo' 3 +8524 'pps' 3 +8525 'ppy' 3 +8526 'pra' 3 +8527 'pre' 3 +8528 'pri' 3 +8529 'pro' 3 +8530 'psi' 3 +8531 'psy' 3 +8532 'pta' 3 +8533 'pte' 3 +8534 'pth' 3 +8535 'pto' 3 +8536 'ptr' 3 +8537 'pts' 3 +8538 'pty' 3 +8539 'pub' 3 +8540 'pul' 3 +8541 'pun' 3 +8542 'pur' 3 +8543 'pus' 3 +8544 'put' 3 +8545 'pwd' 3 +8546 'qrt' 3 +8547 'qty' 3 +8548 'qua' 3 +8549 'que' 3 +8550 'qui' 3 +8551 'quo' 3 +8552 'rab' 3 +8553 'rac' 3 +8554 'rad' 3 +8555 'rae' 3 +8556 'raf' 3 +8557 'rag' 3 +8558 'rah' 3 +8559 'rai' 3 +8560 'raj' 3 +8561 'rak' 3 +8562 'ral' 3 +8563 'ram' 3 +8564 'ran' 3 +8565 'rap' 3 +8566 'raq' 3 +8567 'rar' 3 +8568 'ras' 3 +8569 'rat' 3 +8570 'rav' 3 +8571 'raw' 3 +8572 'rax' 3 +8573 'ray' 3 +8574 'raz' 3 +8575 'rdf' 3 +8576 'rea' 3 +8577 'reb' 3 +8578 'rec' 3 +8579 'red' 3 +8580 'ree' 3 +8581 'ref' 3 +8582 'reg' 3 +8583 'reh' 3 +8584 'rei' 3 +8585 'rek' 3 +8586 'rel' 3 +8587 'rem' 3 +8588 'ren' 3 +8589 'reo' 3 +8590 'rep' 3 +8591 'req' 3 +8592 'rer' 3 +8593 'res' 3 +8594 'ret' 3 +8595 'reu' 3 +8596 'rev' 3 +8597 'rew' 3 +8598 'rex' 3 +8599 'rey' 3 +8600 'rez' 3 +8601 'rgb' 3 +8602 'rho' 3 +8603 'rhs' 3 +8604 'ria' 3 +8605 'rib' 3 +8606 'ric' 3 +8607 'rid' 3 +8608 'rie' 3 +8609 'rif' 3 +8610 'rig' 3 +8611 'rij' 3 +8612 'rik' 3 +8613 'ril' 3 +8614 'rim' 3 +8615 'rin' 3 +8616 'rio' 3 +8617 'rip' 3 +8618 'rir' 3 +8619 'ris' 3 +8620 'rit' 3 +8621 'riv' 3 +8622 'rix' 3 +8623 'riz' 3 +8624 'rms' 3 +8625 'rna' 3 +8626 'rnd' 3 +8627 'rng' 3 +8628 'rnn' 3 +8629 'rob' 3 +8630 'roc' 3 +8631 'rod' 3 +8632 'roe' 3 +8633 'rog' 3 +8634 'roi' 3 +8635 'rok' 3 +8636 'rol' 3 +8637 'rom' 3 +8638 'ron' 3 +8639 'rop' 3 +8640 'ror' 3 +8641 'ros' 3 +8642 'rot' 3 +8643 'rou' 3 +8644 'rov' 3 +8645 'row' 3 +8646 'rox' 3 +8647 'roy' 3 +8648 'roz' 3 +8649 'rpc' 3 +8650 'rpm' 3 +8651 'rsa' 3 +8652 'rsp' 3 +8653 'rss' 3 +8654 'rst' 3 +8655 'rtl' 3 +8656 'rub' 3 +8657 'rud' 3 +8658 'rue' 3 +8659 'rug' 3 +8660 'rum' 3 +8661 'run' 3 +8662 'rup' 3 +8663 'rus' 3 +8664 'rut' 3 +8665 'ryn' 3 +8666 'rys' 3 +8667 'rà' 3 +8668 'rá' 3 +8669 'rä' 3 +8670 'rå' 3 +8671 'ré' 3 +8672 'rí' 3 +8673 'ró' 3 +8674 'rę' 3 +8675 'sac' 3 +8676 'sad' 3 +8677 'saf' 3 +8678 'sal' 3 +8679 'sam' 3 +8680 'san' 3 +8681 'sar' 3 +8682 'sas' 3 +8683 'sat' 3 +8684 'sav' 3 +8685 'saw' 3 +8686 'say' 3 +8687 'sce' 3 +8688 'sch' 3 +8689 'sci' 3 +8690 'scr' 3 +8691 'sdk' 3 +8692 'sea' 3 +8693 'sec' 3 +8694 'sed' 3 +8695 'see' 3 +8696 'seg' 3 +8697 'sei' 3 +8698 'sek' 3 +8699 'sel' 3 +8700 'sem' 3 +8701 'sen' 3 +8702 'sep' 3 +8703 'seq' 3 +8704 'ser' 3 +8705 'ses' 3 +8706 'set' 3 +8707 'sex' 3 +8708 'sey' 3 +8709 'sez' 3 +8710 'sha' 3 +8711 'she' 3 +8712 'shi' 3 +8713 'shr' 3 +8714 'sic' 3 +8715 'sid' 3 +8716 'sie' 3 +8717 'sig' 3 +8718 'sil' 3 +8719 'sim' 3 +8720 'sin' 3 +8721 'sis' 3 +8722 'sit' 3 +8723 'six' 3 +8724 'ska' 3 +8725 'ske' 3 +8726 'ski' 3 +8727 'sku' 3 +8728 'sky' 3 +8729 'snd' 3 +8730 'soc' 3 +8731 'sof' 3 +8732 'sol' 3 +8733 'som' 3 +8734 'son' 3 +8735 'sor' 3 +8736 'sov' 3 +8737 'spe' 3 +8738 'spi' 3 +8739 'spl' 3 +8740 'spo' 3 +8741 'spr' 3 +8742 'spy' 3 +8743 'sql' 3 +8744 'squ' 3 +8745 'src' 3 +8746 'ssa' 3 +8747 'ssh' 3 +8748 'ssl' 3 +8749 'sta' 3 +8750 'std' 3 +8751 'ste' 3 +8752 'sth' 3 +8753 'sti' 3 +8754 'stm' 3 +8755 'sto' 3 +8756 'str' 3 +8757 'sts' 3 +8758 'stu' 3 +8759 'sty' 3 +8760 'sub' 3 +8761 'suc' 3 +8762 'sum' 3 +8763 'sun' 3 +8764 'sup' 3 +8765 'sur' 3 +8766 'sus' 3 +8767 'svg' 3 +8768 'svn' 3 +8769 'swe' 3 +8770 'sym' 3 +8771 'syn' 3 +8772 'sys' 3 +8773 'tab' 3 +8774 'tag' 3 +8775 'tah' 3 +8776 'tal' 3 +8777 'tam' 3 +8778 'tan' 3 +8779 'tap' 3 +8780 'tar' 3 +8781 'tas' 3 +8782 'tat' 3 +8783 'tau' 3 +8784 'tax' 3 +8785 'tbl' 3 +8786 'tcp' 3 +8787 'tea' 3 +8788 'tec' 3 +8789 'ted' 3 +8790 'tee' 3 +8791 'tek' 3 +8792 'tel' 3 +8793 'tem' 3 +8794 'ten' 3 +8795 'ter' 3 +8796 'tes' 3 +8797 'tet' 3 +8798 'tex' 3 +8799 'tgt' 3 +8800 'tha' 3 +8801 'the' 3 +8802 'thi' 3 +8803 'thm' 3 +8804 'thr' 3 +8805 'ths' 3 +8806 'thy' 3 +8807 'tic' 3 +8808 'tid' 3 +8809 'tie' 3 +8810 'tif' 3 +8811 'tig' 3 +8812 'tik' 3 +8813 'til' 3 +8814 'tim' 3 +8815 'tin' 3 +8816 'tip' 3 +8817 'tis' 3 +8818 'tit' 3 +8819 'tle' 3 +8820 'tls' 3 +8821 'tml' 3 +8822 'tmp' 3 +8823 'toc' 3 +8824 'tod' 3 +8825 'tok' 3 +8826 'tol' 3 +8827 'tom' 3 +8828 'ton' 3 +8829 'too' 3 +8830 'top' 3 +8831 'tor' 3 +8832 'tos' 3 +8833 'tot' 3 +8834 'tow' 3 +8835 'tpl' 3 +8836 'tra' 3 +8837 'tre' 3 +8838 'tri' 3 +8839 'trl' 3 +8840 'tro' 3 +8841 'tru' 3 +8842 'try' 3 +8843 'tte' 3 +8844 'tti' 3 +8845 'ttl' 3 +8846 'ttp' 3 +8847 'tty' 3 +8848 'tum' 3 +8849 'tun' 3 +8850 'tur' 3 +8851 'two' 3 +8852 'txt' 3 +8853 'typ' 3 +8854 'té' 3 +8855 'tó' 3 +8856 'ual' 3 +8857 'uan' 3 +8858 'uar' 3 +8859 'uba' 3 +8860 'ubb' 3 +8861 'ube' 3 +8862 'ubi' 3 +8863 'ubl' 3 +8864 'ubs' 3 +8865 'uby' 3 +8866 'uca' 3 +8867 'ucc' 3 +8868 'uce' 3 +8869 'uch' 3 +8870 'uci' 3 +8871 'uck' 3 +8872 'uct' 3 +8873 'uda' 3 +8874 'udd' 3 +8875 'ude' 3 +8876 'udi' 3 +8877 'udo' 3 +8878 'uds' 3 +8879 'ued' 3 +8880 'uel' 3 +8881 'uen' 3 +8882 'uer' 3 +8883 'ues' 3 +8884 'uet' 3 +8885 'uez' 3 +8886 'ufe' 3 +8887 'uff' 3 +8888 'uga' 3 +8889 'uge' 3 +8890 'ugg' 3 +8891 'ugh' 3 +8892 'ugi' 3 +8893 'ugo' 3 +8894 'ugs' 3 +8895 'ugu' 3 +8896 'uid' 3 +8897 'uil' 3 +8898 'uin' 3 +8899 'uir' 3 +8900 'uis' 3 +8901 'uit' 3 +8902 'uje' 3 +8903 'uka' 3 +8904 'uke' 3 +8905 'uki' 3 +8906 'uko' 3 +8907 'uks' 3 +8908 'uku' 3 +8909 'ula' 3 +8910 'uld' 3 +8911 'ule' 3 +8912 'ulf' 3 +8913 'uli' 3 +8914 'ulk' 3 +8915 'ull' 3 +8916 'ulo' 3 +8917 'ulp' 3 +8918 'uls' 3 +8919 'ult' 3 +8920 'ulu' 3 +8921 'uly' 3 +8922 'uma' 3 +8923 'umb' 3 +8924 'ume' 3 +8925 'umi' 3 +8926 'uml' 3 +8927 'umm' 3 +8928 'umn' 3 +8929 'umo' 3 +8930 'ump' 3 +8931 'ums' 3 +8932 'umu' 3 +8933 'una' 3 +8934 'unc' 3 +8935 'und' 3 +8936 'une' 3 +8937 'ung' 3 +8938 'uni' 3 +8939 'unj' 3 +8940 'unk' 3 +8941 'unn' 3 +8942 'uno' 3 +8943 'uns' 3 +8944 'unt' 3 +8945 'upa' 3 +8946 'upe' 3 +8947 'upo' 3 +8948 'upp' 3 +8949 'ups' 3 +8950 'upt' 3 +8951 'ura' 3 +8952 'urb' 3 +8953 'urd' 3 +8954 'ure' 3 +8955 'urf' 3 +8956 'urg' 3 +8957 'uri' 3 +8958 'urk' 3 +8959 'url' 3 +8960 'urm' 3 +8961 'urn' 3 +8962 'uro' 3 +8963 'urr' 3 +8964 'urs' 3 +8965 'urt' 3 +8966 'uru' 3 +8967 'ury' 3 +8968 'usa' 3 +8969 'usb' 3 +8970 'usc' 3 +8971 'use' 3 +8972 'ush' 3 +8973 'usi' 3 +8974 'usk' 3 +8975 'uso' 3 +8976 'usp' 3 +8977 'usr' 3 +8978 'uss' 3 +8979 'ust' 3 +8980 'usu' 3 +8981 'usz' 3 +8982 'uta' 3 +8983 'utc' 3 +8984 'ute' 3 +8985 'utf' 3 +8986 'uth' 3 +8987 'uti' 3 +8988 'utm' 3 +8989 'uto' 3 +8990 'uts' 3 +8991 'utt' 3 +8992 'uty' 3 +8993 'utz' 3 +8994 'uum' 3 +8995 'uve' 3 +8996 'uvo' 3 +8997 'uxe' 3 +8998 'uya' 3 +8999 'uzz' 3 +9000 'uß' 3 +9001 'ué' 3 +9002 'uí' 3 +9003 'už' 3 +9004 'vac' 3 +9005 'vae' 3 +9006 'val' 3 +9007 'van' 3 +9008 'var' 3 +9009 'vas' 3 +9010 'vat' 3 +9011 'vec' 3 +9012 'ved' 3 +9013 'vee' 3 +9014 'veg' 3 +9015 'veh' 3 +9016 'vel' 3 +9017 'ven' 3 +9018 'ver' 3 +9019 'ves' 3 +9020 'vet' 3 +9021 'vex' 3 +9022 'vey' 3 +9023 'vez' 3 +9024 'via' 3 +9025 'vic' 3 +9026 'vid' 3 +9027 'vie' 3 +9028 'vig' 3 +9029 'vii' 3 +9030 'vik' 3 +9031 'vil' 3 +9032 'vim' 3 +9033 'vin' 3 +9034 'vio' 3 +9035 'vip' 3 +9036 'vir' 3 +9037 'vis' 3 +9038 'vit' 3 +9039 'viv' 3 +9040 'viz' 3 +9041 'voc' 3 +9042 'vod' 3 +9043 'vol' 3 +9044 'von' 3 +9045 'vor' 3 +9046 'vos' 3 +9047 'vox' 3 +9048 'voy' 3 +9049 'vre' 3 +9050 'vue' 3 +9051 'vá' 3 +9052 'vä' 3 +9053 'vé' 3 +9054 'ví' 3 +9055 'vě' 3 +9056 'wal' 3 +9057 'wan' 3 +9058 'wap' 3 +9059 'war' 3 +9060 'was' 3 +9061 'wat' 3 +9062 'wav' 3 +9063 'way' 3 +9064 'web' 3 +9065 'wed' 3 +9066 'weg' 3 +9067 'wei' 3 +9068 'wel' 3 +9069 'wen' 3 +9070 'wer' 3 +9071 'wet' 3 +9072 'whe' 3 +9073 'who' 3 +9074 'why' 3 +9075 'wid' 3 +9076 'wie' 3 +9077 'wig' 3 +9078 'wik' 3 +9079 'wil' 3 +9080 'win' 3 +9081 'wis' 3 +9082 'wit' 3 +9083 'wol' 3 +9084 'won' 3 +9085 'wor' 3 +9086 'www' 3 +9087 'wyn' 3 +9088 'xFF' 3 +9089 'xcb' 3 +9090 'xed' 3 +9091 'xes' 3 +9092 'xfe' 3 +9093 'xff' 3 +9094 'xhr' 3 +9095 'xia' 3 +9096 'xic' 3 +9097 'xim' 3 +9098 'xin' 3 +9099 'xis' 3 +9100 'xit' 3 +9101 'xiv' 3 +9102 'xls' 3 +9103 'xml' 3 +9104 'xon' 3 +9105 'xor' 3 +9106 'xsd' 3 +9107 'xsl' 3 +9108 'xxx' 3 +9109 'xyz' 3 +9110 'yah' 3 +9111 'yal' 3 +9112 'yam' 3 +9113 'yan' 3 +9114 'yar' 3 +9115 'yaw' 3 +9116 'ych' 3 +9117 'ycl' 3 +9118 'yel' 3 +9119 'yen' 3 +9120 'yer' 3 +9121 'yes' 3 +9122 'yet' 3 +9123 'yla' 3 +9124 'yle' 3 +9125 'yll' 3 +9126 'yme' 3 +9127 'yml' 3 +9128 'yna' 3 +9129 'ync' 3 +9130 'yne' 3 +9131 'ynn' 3 +9132 'ynt' 3 +9133 'yon' 3 +9134 'yor' 3 +9135 'you' 3 +9136 'ype' 3 +9137 'yre' 3 +9138 'ysi' 3 +9139 'yst' 3 +9140 'ysz' 3 +9141 'yth' 3 +9142 'yun' 3 +9143 'yyy' 3 +9144 'yó' 3 +9145 'zag' 3 +9146 'zak' 3 +9147 'zan' 3 +9148 'zar' 3 +9149 'zas' 3 +9150 'zed' 3 +9151 'zee' 3 +9152 'zej' 3 +9153 'zek' 3 +9154 'zel' 3 +9155 'zem' 3 +9156 'zen' 3 +9157 'zer' 3 +9158 'zes' 3 +9159 'zet' 3 +9160 'zew' 3 +9161 'zia' 3 +9162 'zie' 3 +9163 'zig' 3 +9164 'zik' 3 +9165 'zin' 3 +9166 'zip' 3 +9167 'zon' 3 +9168 'zor' 3 +9169 'zos' 3 +9170 'zyk' 3 +9171 'zym' 3 +9172 'zza' 3 +9173 'zzi' 3 +9174 'zzo' 3 +9175 'zá' 3 +9176 'zó' 3 +9177 'zą' 3 +9178 'zę' 3 +9179 'ző' 3 +9180 '{{\\' 3 +9181 '{})' 3 +9182 '{},' 3 +9183 '{}.' 3 +9184 '{}\\' 3 +9185 '{}_' 3 +9186 '}")' 3 +9187 '}",' 3 +9188 '}$$' 3 +9189 "}')" 3 +9190 "}'," 3 +9191 '}))' 3 +9192 '}),' 3 +9193 '}).' 3 +9194 '});' 3 +9195 '})\\' 3 +9196 '},"' 3 +9197 '},{' 3 +9198 '}.{' 3 +9199 '}/{' 3 +9200 '}:{' 3 +9201 '}%' 4 +19277 ' \'"\'' 4 +19278 " '#'" 4 +19279 " '''" 4 +19280 " '')" 4 +19281 " ''," 4 +19282 " '';" 4 +19283 " '*'" 4 +19284 " '-'" 4 +19285 " '--" 4 +19286 " './" 4 +19287 " '/'" 4 +19288 " ':'" 4 +19289 " '' 4 +19312 ' ...' 4 +19313 ' ../' 4 +19314 ' /**' 4 +19315 ' ///' 4 +19316 ' /><' 4 +19317 ' :-)' 4 +19318 ' <%=' 4 +19319 ' <--' 4 +19320 ' ===' 4 +19321 ' ==>' 4 +19322 ' >>>' 4 +19323 ' ???' 4 +19324 ' AAA' 4 +19325 ' AAP' 4 +19326 ' ABC' 4 +19327 ' ABI' 4 +19328 ' ABS' 4 +19329 ' ACC' 4 +19330 ' ACE' 4 +19331 ' ACK' 4 +19332 ' ACL' 4 +19333 ' ACS' 4 +19334 ' ACT' 4 +19335 ' ADA' 4 +19336 ' ADC' 4 +19337 ' ADD' 4 +19338 ' AES' 4 +19339 ' AFC' 4 +19340 ' AFL' 4 +19341 ' AFP' 4 +19342 ' AIR' 4 +19343 ' ALL' 4 +19344 ' ALS' 4 +19345 ' ALT' 4 +19346 ' AMD' 4 +19347 ' AMP' 4 +19348 ' ANC' 4 +19349 ' AND' 4 +19350 ' ANN' 4 +19351 ' ANY' 4 +19352 ' APC' 4 +19353 ' API' 4 +19354 ' APP' 4 +19355 ' APR' 4 +19356 ' ARE' 4 +19357 ' ARG' 4 +19358 ' ARM' 4 +19359 ' ART' 4 +19360 ' ASC' 4 +19361 ' ASD' 4 +19362 ' ASE' 4 +19363 ' ASF' 4 +19364 ' ASP' 4 +19365 ' ASS' 4 +19366 ' AST' 4 +19367 ' ATM' 4 +19368 ' ATP' 4 +19369 ' ATT' 4 +19370 ' AUT' 4 +19371 ' AWS' 4 +19372 ' Abb' 4 +19373 ' Abd' 4 +19374 ' Abe' 4 +19375 ' Abl' 4 +19376 ' Abr' 4 +19377 ' Abs' 4 +19378 ' Abu' 4 +19379 ' Acc' 4 +19380 ' Ace' 4 +19381 ' Ach' 4 +19382 ' Act' 4 +19383 ' Ada' 4 +19384 ' Add' 4 +19385 ' Ade' 4 +19386 ' Adv' 4 +19387 ' Aer' 4 +19388 ' Aff' 4 +19389 ' Afr' 4 +19390 ' Age' 4 +19391 ' Agg' 4 +19392 ' Agr' 4 +19393 ' Agu' 4 +19394 ' Aid' 4 +19395 ' Aim' 4 +19396 ' Ain' 4 +19397 ' Air' 4 +19398 ' Akt' 4 +19399 ' Ala' 4 +19400 ' Alb' 4 +19401 ' Alc' 4 +19402 ' Ald' 4 +19403 ' Ale' 4 +19404 ' Alf' 4 +19405 ' Alg' 4 +19406 ' Ali' 4 +19407 ' All' 4 +19408 ' Alo' 4 +19409 ' Als' 4 +19410 ' Alt' 4 +19411 ' Ama' 4 +19412 ' Amb' 4 +19413 ' Amp' 4 +19414 ' Amy' 4 +19415 ' Ana' 4 +19416 ' Anc' 4 +19417 ' And' 4 +19418 ' Ang' 4 +19419 ' Ank' 4 +19420 ' Ann' 4 +19421 ' Ans' 4 +19422 ' Ant' 4 +19423 ' Any' 4 +19424 ' Aph' 4 +19425 ' Api' 4 +19426 ' App' 4 +19427 ' Apr' 4 +19428 ' Aqu' 4 +19429 ' Ara' 4 +19430 ' Arc' 4 +19431 ' Are' 4 +19432 ' Arg' 4 +19433 ' Ari' 4 +19434 ' Ark' 4 +19435 ' Arm' 4 +19436 ' Arn' 4 +19437 ' Arr' 4 +19438 ' Ars' 4 +19439 ' Art' 4 +19440 ' Asc' 4 +19441 ' Ash' 4 +19442 ' Ask' 4 +19443 ' Asp' 4 +19444 ' Ass' 4 +19445 ' Ast' 4 +19446 ' Ath' 4 +19447 ' Atl' 4 +19448 ' Att' 4 +19449 ' Aub' 4 +19450 ' Aud' 4 +19451 ' Auf' 4 +19452 ' Aug' 4 +19453 ' Aur' 4 +19454 ' Aus' 4 +19455 ' Aut' 4 +19456 ' Aux' 4 +19457 ' Ave' 4 +19458 ' Aβ' 4 +19459 ' BAL' 4 +19460 ' BAR' 4 +19461 ' BAS' 4 +19462 ' BAT' 4 +19463 ' BBB' 4 +19464 ' BBC' 4 +19465 ' BCE' 4 +19466 ' BEL' 4 +19467 ' BET' 4 +19468 ' BIG' 4 +19469 ' BIN' 4 +19470 ' BIT' 4 +19471 ' BJP' 4 +19472 ' BMC' 4 +19473 ' BMI' 4 +19474 ' BMP' 4 +19475 ' BMW' 4 +19476 ' BRE' 4 +19477 ' BSD' 4 +19478 ' BTC' 4 +19479 ' BUS' 4 +19480 ' BUT' 4 +19481 ' Bab' 4 +19482 ' Bac' 4 +19483 ' Bad' 4 +19484 ' Bag' 4 +19485 ' Bah' 4 +19486 ' Bai' 4 +19487 ' Bak' 4 +19488 ' Bal' 4 +19489 ' Bam' 4 +19490 ' Ban' 4 +19491 ' Bar' 4 +19492 ' Bas' 4 +19493 ' Bat' 4 +19494 ' Bau' 4 +19495 ' Bav' 4 +19496 ' Bay' 4 +19497 ' Baz' 4 +19498 ' Bea' 4 +19499 ' Bec' 4 +19500 ' Bed' 4 +19501 ' Bee' 4 +19502 ' Beg' 4 +19503 ' Beh' 4 +19504 ' Bei' 4 +19505 ' Bek' 4 +19506 ' Bel' 4 +19507 ' Ben' 4 +19508 ' Ber' 4 +19509 ' Bes' 4 +19510 ' Bet' 4 +19511 ' Bew' 4 +19512 ' Bey' 4 +19513 ' Bez' 4 +19514 ' Bib' 4 +19515 ' Bid' 4 +19516 ' Big' 4 +19517 ' Bij' 4 +19518 ' Bil' 4 +19519 ' Bin' 4 +19520 ' Bio' 4 +19521 ' Bir' 4 +19522 ' Bis' 4 +19523 ' Bit' 4 +19524 ' Ble' 4 +19525 ' Blo' 4 +19526 ' Blu' 4 +19527 ' Bob' 4 +19528 ' Bod' 4 +19529 ' Bog' 4 +19530 ' Boh' 4 +19531 ' Bol' 4 +19532 ' Bom' 4 +19533 ' Bon' 4 +19534 ' Bor' 4 +19535 ' Bos' 4 +19536 ' Bot' 4 +19537 ' Bou' 4 +19538 ' Bow' 4 +19539 ' Box' 4 +19540 ' Boy' 4 +19541 ' Bra' 4 +19542 ' Bre' 4 +19543 ' Bri' 4 +19544 ' Bro' 4 +19545 ' Bru' 4 +19546 ' Bry' 4 +19547 ' Buc' 4 +19548 ' Bud' 4 +19549 ' Bug' 4 +19550 ' Buk' 4 +19551 ' Bul' 4 +19552 ' Bun' 4 +19553 ' Bur' 4 +19554 ' Bus' 4 +19555 ' But' 4 +19556 ' Buy' 4 +19557 ' Byr' 4 +19558 ' Byz' 4 +19559 ' Bé' 4 +19560 ' Bö' 4 +19561 ' Bü' 4 +19562 ' CAB' 4 +19563 ' CAD' 4 +19564 ' CAL' 4 +19565 ' CAM' 4 +19566 ' CAN' 4 +19567 ' CAP' 4 +19568 ' CAR' 4 +19569 ' CAS' 4 +19570 ' CAT' 4 +19571 ' CBC' 4 +19572 ' CBD' 4 +19573 ' CBS' 4 +19574 ' CCC' 4 +19575 ' CCD' 4 +19576 ' CCT' 4 +19577 ' CDC' 4 +19578 ' CDs' 4 +19579 ' CEO' 4 +19580 ' CES' 4 +19581 ' CGI' 4 +19582 ' CHE' 4 +19583 ' CHO' 4 +19584 ' CIA' 4 +19585 ' CID' 4 +19586 ' CIF' 4 +19587 ' CIS' 4 +19588 ' CIT' 4 +19589 ' CLA' 4 +19590 ' CLI' 4 +19591 ' CMD' 4 +19592 ' CMS' 4 +19593 ' CNN' 4 +19594 ' CNS' 4 +19595 ' COL' 4 +19596 ' COM' 4 +19597 ' CON' 4 +19598 ' COP' 4 +19599 ' COR' 4 +19600 ' COS' 4 +19601 ' CPR' 4 +19602 ' CPU' 4 +19603 ' CRC' 4 +19604 ' CRE' 4 +19605 ' CRM' 4 +19606 ' CSR' 4 +19607 ' CSS' 4 +19608 ' CST' 4 +19609 ' CSV' 4 +19610 ' CTR' 4 +19611 ' CUR' 4 +19612 ' Cab' 4 +19613 ' Cad' 4 +19614 ' Caf' 4 +19615 ' Cal' 4 +19616 ' Cam' 4 +19617 ' Can' 4 +19618 ' Cap' 4 +19619 ' Car' 4 +19620 ' Cas' 4 +19621 ' Cat' 4 +19622 ' Cav' 4 +19623 ' Cay' 4 +19624 ' Cec' 4 +19625 ' Ced' 4 +19626 ' Cel' 4 +19627 ' Cer' 4 +19628 ' Ces' 4 +19629 ' Cet' 4 +19630 ' Cha' 4 +19631 ' Che' 4 +19632 ' Chi' 4 +19633 ' Cho' 4 +19634 ' Chr' 4 +19635 ' Chu' 4 +19636 ' Cic' 4 +19637 ' Cin' 4 +19638 ' Cir' 4 +19639 ' Cit' 4 +19640 ' Civ' 4 +19641 ' Cla' 4 +19642 ' Cle' 4 +19643 ' Cli' 4 +19644 ' Clo' 4 +19645 ' Cly' 4 +19646 ' Cob' 4 +19647 ' Coc' 4 +19648 ' Cod' 4 +19649 ' Coh' 4 +19650 ' Col' 4 +19651 ' Com' 4 +19652 ' Con' 4 +19653 ' Cop' 4 +19654 ' Cor' 4 +19655 ' Cos' 4 +19656 ' Cot' 4 +19657 ' Cou' 4 +19658 ' Cov' 4 +19659 ' Cow' 4 +19660 ' Cox' 4 +19661 ' Coy' 4 +19662 ' Cra' 4 +19663 ' Cre' 4 +19664 ' Cri' 4 +19665 ' Cro' 4 +19666 ' Cru' 4 +19667 ' Cry' 4 +19668 ' Cub' 4 +19669 ' Cul' 4 +19670 ' Cum' 4 +19671 ' Cup' 4 +19672 ' Cur' 4 +19673 ' Cut' 4 +19674 ' Cyr' 4 +19675 ' DAC' 4 +19676 ' DAG' 4 +19677 ' DAM' 4 +19678 ' DAR' 4 +19679 ' DAT' 4 +19680 ' DAY' 4 +19681 ' DDR' 4 +19682 ' DEA' 4 +19683 ' DEC' 4 +19684 ' DEF' 4 +19685 ' DEL' 4 +19686 ' DEM' 4 +19687 ' DEN' 4 +19688 ' DEP' 4 +19689 ' DES' 4 +19690 ' DET' 4 +19691 ' DEV' 4 +19692 ' DFS' 4 +19693 ' DHS' 4 +19694 ' DID' 4 +19695 ' DIG' 4 +19696 ' DIR' 4 +19697 ' DIS' 4 +19698 ' DIV' 4 +19699 ' DIY' 4 +19700 ' DLL' 4 +19701 ' DNA' 4 +19702 ' DNS' 4 +19703 ' DOC' 4 +19704 ' DOI' 4 +19705 ' DOM' 4 +19706 ' DON' 4 +19707 ' DOS' 4 +19708 ' DOT' 4 +19709 ' DSL' 4 +19710 ' DSM' 4 +19711 ' DVD' 4 +19712 ' Dad' 4 +19713 ' Dag' 4 +19714 ' Dah' 4 +19715 ' Dai' 4 +19716 ' Dak' 4 +19717 ' Dal' 4 +19718 ' Dam' 4 +19719 ' Dan' 4 +19720 ' Dar' 4 +19721 ' Das' 4 +19722 ' Dat' 4 +19723 ' Dav' 4 +19724 ' Daw' 4 +19725 ' Day' 4 +19726 ' Deb' 4 +19727 ' Dec' 4 +19728 ' Ded' 4 +19729 ' Dee' 4 +19730 ' Def' 4 +19731 ' Deg' 4 +19732 ' Dek' 4 +19733 ' Del' 4 +19734 ' Dem' 4 +19735 ' Den' 4 +19736 ' Dep' 4 +19737 ' Der' 4 +19738 ' Des' 4 +19739 ' Det' 4 +19740 ' Dev' 4 +19741 ' Dew' 4 +19742 ' Dex' 4 +19743 ' Dez' 4 +19744 ' Dia' 4 +19745 ' Did' 4 +19746 ' Die' 4 +19747 ' Dig' 4 +19748 ' Dil' 4 +19749 ' Dim' 4 +19750 ' Din' 4 +19751 ' Dip' 4 +19752 ' Dir' 4 +19753 ' Dis' 4 +19754 ' Dit' 4 +19755 ' Div' 4 +19756 ' Dix' 4 +19757 ' Dob' 4 +19758 ' Doc' 4 +19759 ' Dod' 4 +19760 ' Doe' 4 +19761 ' Dog' 4 +19762 ' Dok' 4 +19763 ' Dol' 4 +19764 ' Dom' 4 +19765 ' Don' 4 +19766 ' Dop' 4 +19767 ' Dor' 4 +19768 ' Dos' 4 +19769 ' Dot' 4 +19770 ' Dou' 4 +19771 ' Dow' 4 +19772 ' Dra' 4 +19773 ' Dre' 4 +19774 ' Dro' 4 +19775 ' Dru' 4 +19776 ' Dry' 4 +19777 ' Dub' 4 +19778 ' Duc' 4 +19779 ' Dud' 4 +19780 ' Due' 4 +19781 ' Dul' 4 +19782 ' Dum' 4 +19783 ' Dun' 4 +19784 ' Duo' 4 +19785 ' Dup' 4 +19786 ' Dur' 4 +19787 ' Dyn' 4 +19788 ' Dé' 4 +19789 ' Dí' 4 +19790 ' ECM' 4 +19791 ' EEG' 4 +19792 ' EMP' 4 +19793 ' EMS' 4 +19794 ' END' 4 +19795 ' ENG' 4 +19796 ' EOF' 4 +19797 ' EOS' 4 +19798 ' EPA' 4 +19799 ' EPS' 4 +19800 ' ERA' 4 +19801 ' ERR' 4 +19802 ' ESA' 4 +19803 ' ESC' 4 +19804 ' ESP' 4 +19805 ' EST' 4 +19806 ' ETH' 4 +19807 ' EUR' 4 +19808 ' EXP' 4 +19809 ' EXT' 4 +19810 ' Ear' 4 +19811 ' Eat' 4 +19812 ' Eck' 4 +19813 ' Eco' 4 +19814 ' Edd' 4 +19815 ' Edu' 4 +19816 ' Eff' 4 +19817 ' Egg' 4 +19818 ' Ein' 4 +19819 ' Eld' 4 +19820 ' Ele' 4 +19821 ' Eli' 4 +19822 ' Ell' 4 +19823 ' Emb' 4 +19824 ' Emp' 4 +19825 ' Enc' 4 +19826 ' End' 4 +19827 ' Eng' 4 +19828 ' Enh' 4 +19829 ' Ens' 4 +19830 ' Ent' 4 +19831 ' Env' 4 +19832 ' Eph' 4 +19833 ' Equ' 4 +19834 ' Era' 4 +19835 ' Erd' 4 +19836 ' Ern' 4 +19837 ' Err' 4 +19838 ' Esc' 4 +19839 ' Esp' 4 +19840 ' Ess' 4 +19841 ' Est' 4 +19842 ' Eth' 4 +19843 ' Eug' 4 +19844 ' Eur' 4 +19845 ' Eva' 4 +19846 ' Eve' 4 +19847 ' Exc' 4 +19848 ' Exp' 4 +19849 ' Ext' 4 +19850 ' Eye' 4 +19851 ' FAA' 4 +19852 ' FAC' 4 +19853 ' FAQ' 4 +19854 ' FAR' 4 +19855 ' FAT' 4 +19856 ' FBI' 4 +19857 ' FCC' 4 +19858 ' FDA' 4 +19859 ' FFT' 4 +19860 ' FIF' 4 +19861 ' FIG' 4 +19862 ' FIL' 4 +19863 ' FIN' 4 +19864 ' FIR' 4 +19865 ' FIT' 4 +19866 ' FIX' 4 +19867 ' FOR' 4 +19868 ' FOX' 4 +19869 ' FPS' 4 +19870 ' FTP' 4 +19871 ' FUN' 4 +19872 ' Fab' 4 +19873 ' Fac' 4 +19874 ' Fah' 4 +19875 ' Fal' 4 +19876 ' Fam' 4 +19877 ' Fan' 4 +19878 ' Far' 4 +19879 ' Fas' 4 +19880 ' Fat' 4 +19881 ' Fay' 4 +19882 ' Feb' 4 +19883 ' Fed' 4 +19884 ' Fel' 4 +19885 ' Fem' 4 +19886 ' Fen' 4 +19887 ' Fer' 4 +19888 ' Fet' 4 +19889 ' Few' 4 +19890 ' Fib' 4 +19891 ' Fif' 4 +19892 ' Fig' 4 +19893 ' Fil' 4 +19894 ' Fin' 4 +19895 ' Fir' 4 +19896 ' Fit' 4 +19897 ' Fix' 4 +19898 ' Fla' 4 +19899 ' Fle' 4 +19900 ' Flo' 4 +19901 ' Flu' 4 +19902 ' Fly' 4 +19903 ' Fog' 4 +19904 ' Fol' 4 +19905 ' Fon' 4 +19906 ' Foo' 4 +19907 ' For' 4 +19908 ' Fot' 4 +19909 ' Fou' 4 +19910 ' Fox' 4 +19911 ' Fra' 4 +19912 ' Fre' 4 +19913 ' Fri' 4 +19914 ' Fro' 4 +19915 ' Fry' 4 +19916 ' Fuj' 4 +19917 ' Fuk' 4 +19918 ' Ful' 4 +19919 ' Fun' 4 +19920 ' Fur' 4 +19921 ' Fut' 4 +19922 ' Fé' 4 +19923 ' GAM' 4 +19924 ' GCC' 4 +19925 ' GDP' 4 +19926 ' GEN' 4 +19927 ' GET' 4 +19928 ' GFP' 4 +19929 ' GHz' 4 +19930 ' GMT' 4 +19931 ' GNU' 4 +19932 ' GOD' 4 +19933 ' GOP' 4 +19934 ' GPL' 4 +19935 ' GPS' 4 +19936 ' GPU' 4 +19937 ' GRE' 4 +19938 ' GRO' 4 +19939 ' GSM' 4 +19940 ' GST' 4 +19941 ' GUI' 4 +19942 ' Gab' 4 +19943 ' Gad' 4 +19944 ' Gal' 4 +19945 ' Gam' 4 +19946 ' Gan' 4 +19947 ' Gap' 4 +19948 ' Gar' 4 +19949 ' Gas' 4 +19950 ' Gat' 4 +19951 ' Gay' 4 +19952 ' Gaz' 4 +19953 ' GeV' 4 +19954 ' Geb' 4 +19955 ' Ged' 4 +19956 ' Geg' 4 +19957 ' Gel' 4 +19958 ' Gem' 4 +19959 ' Gen' 4 +19960 ' Geo' 4 +19961 ' Ger' 4 +19962 ' Ges' 4 +19963 ' Get' 4 +19964 ' Gew' 4 +19965 ' Gib' 4 +19966 ' Gig' 4 +19967 ' Gil' 4 +19968 ' Gin' 4 +19969 ' Gir' 4 +19970 ' Git' 4 +19971 ' Gle' 4 +19972 ' Gly' 4 +19973 ' Gob' 4 +19974 ' God' 4 +19975 ' Gol' 4 +19976 ' Gon' 4 +19977 ' Gor' 4 +19978 ' Gos' 4 +19979 ' Got' 4 +19980 ' Gov' 4 +19981 ' Gow' 4 +19982 ' Gra' 4 +19983 ' Gre' 4 +19984 ' Gri' 4 +19985 ' Gro' 4 +19986 ' Gru' 4 +19987 ' Gtk' 4 +19988 ' Gul' 4 +19989 ' Gum' 4 +19990 ' Gun' 4 +19991 ' Gur' 4 +19992 ' Gus' 4 +19993 ' Gut' 4 +19994 ' Guy' 4 +19995 ' Gym' 4 +19996 ' Gä' 4 +19997 ' Gé' 4 +19998 ' Gó' 4 +19999 ' Gö' 4 +20000 ' Gü' 4 +20001 ' HAL' 4 +20002 ' HAR' 4 +20003 ' HAS' 4 +20004 ' HBO' 4 +20005 ' HEL' 4 +20006 ' HER' 4 +20007 ' HIS' 4 +20008 ' HIV' 4 +20009 ' HMS' 4 +20010 ' HOW' 4 +20011 ' HPV' 4 +20012 ' HTC' 4 +20013 ' Hab' 4 +20014 ' Had' 4 +20015 ' Hag' 4 +20016 ' Hai' 4 +20017 ' Haj' 4 +20018 ' Hak' 4 +20019 ' Hal' 4 +20020 ' Ham' 4 +20021 ' Han' 4 +20022 ' Har' 4 +20023 ' Has' 4 +20024 ' Hat' 4 +20025 ' Hav' 4 +20026 ' Haw' 4 +20027 ' Hay' 4 +20028 ' Haz' 4 +20029 ' Heb' 4 +20030 ' Hed' 4 +20031 ' Hel' 4 +20032 ' Hem' 4 +20033 ' Hen' 4 +20034 ' Hep' 4 +20035 ' Her' 4 +20036 ' Het' 4 +20037 ' Hew' 4 +20038 ' Hex' 4 +20039 ' Hey' 4 +20040 ' Hib' 4 +20041 ' Hig' 4 +20042 ' Hij' 4 +20043 ' Hil' 4 +20044 ' Him' 4 +20045 ' Hin' 4 +20046 ' Hip' 4 +20047 ' Hir' 4 +20048 ' His' 4 +20049 ' Hit' 4 +20050 ' Hmm' 4 +20051 ' Hob' 4 +20052 ' Hod' 4 +20053 ' Hof' 4 +20054 ' Hog' 4 +20055 ' Hol' 4 +20056 ' Hom' 4 +20057 ' Hon' 4 +20058 ' Hop' 4 +20059 ' Hor' 4 +20060 ' Hos' 4 +20061 ' Hot' 4 +20062 ' Hou' 4 +20063 ' How' 4 +20064 ' Hoy' 4 +20065 ' Hua' 4 +20066 ' Hub' 4 +20067 ' Hud' 4 +20068 ' Hug' 4 +20069 ' Hum' 4 +20070 ' Hun' 4 +20071 ' Hur' 4 +20072 ' Hus' 4 +20073 ' Hut' 4 +20074 ' Hyp' 4 +20075 ' Hä' 4 +20076 ' Hö' 4 +20077 ' IBM' 4 +20078 ' ICC' 4 +20079 ' ICE' 4 +20080 ' ICO' 4 +20081 ' ICT' 4 +20082 ' ICU' 4 +20083 ' IDE' 4 +20084 ' IDs' 4 +20085 ' III' 4 +20086 ' IIS' 4 +20087 ' IMF' 4 +20088 ' IMP' 4 +20089 ' INC' 4 +20090 ' IND' 4 +20091 ' INF' 4 +20092 ' INS' 4 +20093 ' INT' 4 +20094 ' IPA' 4 +20095 ' IPO' 4 +20096 ' IPS' 4 +20097 ' IPv' 4 +20098 ' IRA' 4 +20099 ' IRC' 4 +20100 ' IRS' 4 +20101 ' ISO' 4 +20102 ' ISP' 4 +20103 ' ISS' 4 +20104 ' ITE' 4 +20105 ' ITS' 4 +20106 ' Ian' 4 +20107 ' Ice' 4 +20108 ' Ich' 4 +20109 ' Ide' 4 +20110 ' Ign' 4 +20111 ' Ill' 4 +20112 ' Ils' 4 +20113 ' Imm' 4 +20114 ' Imp' 4 +20115 ' Inc' 4 +20116 ' Ind' 4 +20117 ' Inf' 4 +20118 ' Ing' 4 +20119 ' Ink' 4 +20120 ' Inn' 4 +20121 ' Ins' 4 +20122 ' Int' 4 +20123 ' Inv' 4 +20124 ' IoT' 4 +20125 ' Ion' 4 +20126 ' Ips' 4 +20127 ' Ira' 4 +20128 ' Isa' 4 +20129 ' Ish' 4 +20130 ' Isl' 4 +20131 ' Isn' 4 +20132 ' Iss' 4 +20133 ' Ist' 4 +20134 ' Its' 4 +20135 ' Ivy' 4 +20136 ' JVM' 4 +20137 ' Jab' 4 +20138 ' Jac' 4 +20139 ' Jag' 4 +20140 ' Jah' 4 +20141 ' Jak' 4 +20142 ' Jal' 4 +20143 ' Jam' 4 +20144 ' Jan' 4 +20145 ' Jar' 4 +20146 ' Jas' 4 +20147 ' Jaw' 4 +20148 ' Jay' 4 +20149 ' Jed' 4 +20150 ' Jen' 4 +20151 ' Jer' 4 +20152 ' Jes' 4 +20153 ' Jet' 4 +20154 ' Jew' 4 +20155 ' Jim' 4 +20156 ' Jin' 4 +20157 ' Job' 4 +20158 ' Joe' 4 +20159 ' Joh' 4 +20160 ' Jon' 4 +20161 ' Jos' 4 +20162 ' Joy' 4 +20163 ' Jub' 4 +20164 ' Jud' 4 +20165 ' Jug' 4 +20166 ' Jul' 4 +20167 ' Jun' 4 +20168 ' Jur' 4 +20169 ' Já' 4 +20170 ' Jó' 4 +20171 ' KDE' 4 +20172 ' KEY' 4 +20173 ' Kab' 4 +20174 ' Kad' 4 +20175 ' Kag' 4 +20176 ' Kah' 4 +20177 ' Kai' 4 +20178 ' Kak' 4 +20179 ' Kal' 4 +20180 ' Kam' 4 +20181 ' Kan' 4 +20182 ' Kap' 4 +20183 ' Kar' 4 +20184 ' Kas' 4 +20185 ' Kat' 4 +20186 ' Kaw' 4 +20187 ' Kay' 4 +20188 ' Kaz' 4 +20189 ' Kel' 4 +20190 ' Kem' 4 +20191 ' Ken' 4 +20192 ' Ker' 4 +20193 ' Kes' 4 +20194 ' Ket' 4 +20195 ' Key' 4 +20196 ' Kid' 4 +20197 ' Kil' 4 +20198 ' Kim' 4 +20199 ' Kin' 4 +20200 ' Kir' 4 +20201 ' Kit' 4 +20202 ' Kle' 4 +20203 ' Kob' 4 +20204 ' Kod' 4 +20205 ' Koh' 4 +20206 ' Kok' 4 +20207 ' Kol' 4 +20208 ' Kom' 4 +20209 ' Kon' 4 +20210 ' Kop' 4 +20211 ' Kor' 4 +20212 ' Kos' 4 +20213 ' Kot' 4 +20214 ' Kou' 4 +20215 ' Kov' 4 +20216 ' Kra' 4 +20217 ' Kre' 4 +20218 ' Kro' 4 +20219 ' Kub' 4 +20220 ' Kul' 4 +20221 ' Kum' 4 +20222 ' Kun' 4 +20223 ' Kur' 4 +20224 ' Kut' 4 +20225 ' Kö' 4 +20226 ' Kü' 4 +20227 ' LAB' 4 +20228 ' LAN' 4 +20229 ' LAP' 4 +20230 ' LAS' 4 +20231 ' LAT' 4 +20232 ' LAW' 4 +20233 ' LCD' 4 +20234 ' LDL' 4 +20235 ' LED' 4 +20236 ' LEG' 4 +20237 ' LET' 4 +20238 ' LIB' 4 +20239 ' LIM' 4 +20240 ' LIN' 4 +20241 ' LLC' 4 +20242 ' LLP' 4 +20243 ' LOC' 4 +20244 ' LOG' 4 +20245 ' LOL' 4 +20246 ' LOS' 4 +20247 ' LOT' 4 +20248 ' LPS' 4 +20249 ' LSD' 4 +20250 ' LTE' 4 +20251 ' Lab' 4 +20252 ' Lac' 4 +20253 ' Lad' 4 +20254 ' Laf' 4 +20255 ' Lag' 4 +20256 ' Lah' 4 +20257 ' Lak' 4 +20258 ' Lal' 4 +20259 ' Lam' 4 +20260 ' Lan' 4 +20261 ' Lap' 4 +20262 ' Lar' 4 +20263 ' Las' 4 +20264 ' Lat' 4 +20265 ' Lau' 4 +20266 ' Lav' 4 +20267 ' Law' 4 +20268 ' Lay' 4 +20269 ' Laz' 4 +20270 ' Leb' 4 +20271 ' Lec' 4 +20272 ' Led' 4 +20273 ' Lee' 4 +20274 ' Leg' 4 +20275 ' Leh' 4 +20276 ' Lei' 4 +20277 ' Lem' 4 +20278 ' Len' 4 +20279 ' Leo' 4 +20280 ' Ler' 4 +20281 ' Les' 4 +20282 ' Let' 4 +20283 ' Lev' 4 +20284 ' Lew' 4 +20285 ' Lex' 4 +20286 ' Ley' 4 +20287 ' Lia' 4 +20288 ' Lib' 4 +20289 ' Lic' 4 +20290 ' Lid' 4 +20291 ' Lie' 4 +20292 ' Lif' 4 +20293 ' Lig' 4 +20294 ' Lik' 4 +20295 ' Lil' 4 +20296 ' Lim' 4 +20297 ' Lin' 4 +20298 ' Lip' 4 +20299 ' Lis' 4 +20300 ' Lit' 4 +20301 ' Liu' 4 +20302 ' Liv' 4 +20303 ' Liz' 4 +20304 ' Lob' 4 +20305 ' Loc' 4 +20306 ' Log' 4 +20307 ' Lok' 4 +20308 ' Lon' 4 +20309 ' Lor' 4 +20310 ' Los' 4 +20311 ' Lot' 4 +20312 ' Lou' 4 +20313 ' Lov' 4 +20314 ' Low' 4 +20315 ' Ltd' 4 +20316 ' Lua' 4 +20317 ' Lub' 4 +20318 ' Luc' 4 +20319 ' Lud' 4 +20320 ' Lug' 4 +20321 ' Luk' 4 +20322 ' Lum' 4 +20323 ' Lun' 4 +20324 ' Lup' 4 +20325 ' Lux' 4 +20326 ' Lyn' 4 +20327 ' Lys' 4 +20328 ' Lé' 4 +20329 ' Lö' 4 +20330 ' Lü' 4 +20331 ' MAC' 4 +20332 ' MAG' 4 +20333 ' MAL' 4 +20334 ' MAN' 4 +20335 ' MAP' 4 +20336 ' MAR' 4 +20337 ' MAS' 4 +20338 ' MAT' 4 +20339 ' MAX' 4 +20340 ' MAY' 4 +20341 ' MBA' 4 +20342 ' MED' 4 +20343 ' MEM' 4 +20344 ' MEN' 4 +20345 ' MEP' 4 +20346 ' MER' 4 +20347 ' MET' 4 +20348 ' MHz' 4 +20349 ' MIC' 4 +20350 ' MID' 4 +20351 ' MIL' 4 +20352 ' MIN' 4 +20353 ' MIS' 4 +20354 ' MIT' 4 +20355 ' MLB' 4 +20356 ' MLP' 4 +20357 ' MLS' 4 +20358 ' MMA' 4 +20359 ' MMP' 4 +20360 ' MOD' 4 +20361 ' MON' 4 +20362 ' MOR' 4 +20363 ' MOS' 4 +20364 ' MOT' 4 +20365 ' MPI' 4 +20366 ' MPs' 4 +20367 ' MRI' 4 +20368 ' MSC' 4 +20369 ' MSE' 4 +20370 ' MSG' 4 +20371 ' MSM' 4 +20372 ' MTV' 4 +20373 ' MUS' 4 +20374 ' MVC' 4 +20375 ' MVP' 4 +20376 ' Mac' 4 +20377 ' Mad' 4 +20378 ' Mae' 4 +20379 ' Mag' 4 +20380 ' Mah' 4 +20381 ' Mai' 4 +20382 ' Maj' 4 +20383 ' Mak' 4 +20384 ' Mal' 4 +20385 ' Mam' 4 +20386 ' Man' 4 +20387 ' Mao' 4 +20388 ' Map' 4 +20389 ' Mar' 4 +20390 ' Mas' 4 +20391 ' Mat' 4 +20392 ' Mau' 4 +20393 ' Max' 4 +20394 ' May' 4 +20395 ' Maz' 4 +20396 ' McC' 4 +20397 ' McD' 4 +20398 ' McG' 4 +20399 ' McK' 4 +20400 ' McL' 4 +20401 ' McN' 4 +20402 ' MeV' 4 +20403 ' Med' 4 +20404 ' Meg' 4 +20405 ' Meh' 4 +20406 ' Mei' 4 +20407 ' Mel' 4 +20408 ' Mem' 4 +20409 ' Men' 4 +20410 ' Mer' 4 +20411 ' Mes' 4 +20412 ' Met' 4 +20413 ' Mex' 4 +20414 ' Mey' 4 +20415 ' Mia' 4 +20416 ' Mic' 4 +20417 ' Mid' 4 +20418 ' Mig' 4 +20419 ' Mik' 4 +20420 ' Mil' 4 +20421 ' Mim' 4 +20422 ' Min' 4 +20423 ' Mir' 4 +20424 ' Mis' 4 +20425 ' Mit' 4 +20426 ' Mix' 4 +20427 ' Miy' 4 +20428 ' Miz' 4 +20429 ' Mob' 4 +20430 ' Mod' 4 +20431 ' Mog' 4 +20432 ' Moh' 4 +20433 ' Mol' 4 +20434 ' Mom' 4 +20435 ' Mon' 4 +20436 ' Mor' 4 +20437 ' Mos' 4 +20438 ' Mot' 4 +20439 ' Mou' 4 +20440 ' Mov' 4 +20441 ' Moy' 4 +20442 ' Moz' 4 +20443 ' Mrs' 4 +20444 ' Msg' 4 +20445 ' Mud' 4 +20446 ' Mug' 4 +20447 ' Muk' 4 +20448 ' Mul' 4 +20449 ' Mum' 4 +20450 ' Mun' 4 +20451 ' Mur' 4 +20452 ' Mus' 4 +20453 ' Mut' 4 +20454 ' Mys' 4 +20455 ' Mé' 4 +20456 ' Mö' 4 +20457 ' Mü' 4 +20458 ' NAD' 4 +20459 ' NAS' 4 +20460 ' NAT' 4 +20461 ' NBA' 4 +20462 ' NBC' 4 +20463 ' NEC' 4 +20464 ' NET' 4 +20465 ' NEW' 4 +20466 ' NFC' 4 +20467 ' NFL' 4 +20468 ' NGC' 4 +20469 ' NGO' 4 +20470 ' NHL' 4 +20471 ' NHS' 4 +20472 ' NIC' 4 +20473 ' NIH' 4 +20474 ' NON' 4 +20475 ' NOR' 4 +20476 ' NOT' 4 +20477 ' NOW' 4 +20478 ' NPC' 4 +20479 ' NPR' 4 +20480 ' NRA' 4 +20481 ' NSA' 4 +20482 ' NSW' 4 +20483 ' NUM' 4 +20484 ' NYC' 4 +20485 ' NaN' 4 +20486 ' Nab' 4 +20487 ' Nad' 4 +20488 ' Nag' 4 +20489 ' Nah' 4 +20490 ' Naj' 4 +20491 ' Nak' 4 +20492 ' Nam' 4 +20493 ' Nan' 4 +20494 ' Nap' 4 +20495 ' Nar' 4 +20496 ' Nas' 4 +20497 ' Nat' 4 +20498 ' Nav' 4 +20499 ' Naz' 4 +20500 ' Neb' 4 +20501 ' Nec' 4 +20502 ' Ned' 4 +20503 ' Neg' 4 +20504 ' Nel' 4 +20505 ' Nem' 4 +20506 ' Neo' 4 +20507 ' Nep' 4 +20508 ' Ner' 4 +20509 ' Net' 4 +20510 ' Neu' 4 +20511 ' Nev' 4 +20512 ' New' 4 +20513 ' Nex' 4 +20514 ' Nic' 4 +20515 ' Nie' 4 +20516 ' Nig' 4 +20517 ' Nik' 4 +20518 ' Nil' 4 +20519 ' Nim' 4 +20520 ' Nin' 4 +20521 ' Nit' 4 +20522 ' Nob' 4 +20523 ' Nom' 4 +20524 ' Non' 4 +20525 ' Nor' 4 +20526 ' Nos' 4 +20527 ' Not' 4 +20528 ' Nou' 4 +20529 ' Nov' 4 +20530 ' Now' 4 +20531 ' Nug' 4 +20532 ' Num' 4 +20533 ' Nun' 4 +20534 ' Nur' 4 +20535 ' Nut' 4 +20536 ' Nä' 4 +20537 ' Né' 4 +20538 ' OCD' 4 +20539 ' OCT' 4 +20540 ' OFF' 4 +20541 ' ONE' 4 +20542 ' OPT' 4 +20543 ' OUR' 4 +20544 ' OUT' 4 +20545 ' Oak' 4 +20546 ' Obj' 4 +20547 ' Obl' 4 +20548 ' Obs' 4 +20549 ' Occ' 4 +20550 ' Oct' 4 +20551 ' Odd' 4 +20552 ' Off' 4 +20553 ' Oil' 4 +20554 ' Old' 4 +20555 ' Ole' 4 +20556 ' One' 4 +20557 ' Ont' 4 +20558 ' Opp' 4 +20559 ' Ops' 4 +20560 ' Opt' 4 +20561 ' Orb' 4 +20562 ' Ord' 4 +20563 ' Ore' 4 +20564 ' Org' 4 +20565 ' Ori' 4 +20566 ' Orn' 4 +20567 ' Ort' 4 +20568 ' Osc' 4 +20569 ' Ost' 4 +20570 ' Ott' 4 +20571 ' Our' 4 +20572 ' Out' 4 +20573 ' Own' 4 +20574 ' PAC' 4 +20575 ' PAD' 4 +20576 ' PAL' 4 +20577 ' PAN' 4 +20578 ' PAR' 4 +20579 ' PAS' 4 +20580 ' PAT' 4 +20581 ' PAY' 4 +20582 ' PBS' 4 +20583 ' PCA' 4 +20584 ' PCB' 4 +20585 ' PCI' 4 +20586 ' PCR' 4 +20587 ' PCs' 4 +20588 ' PDB' 4 +20589 ' PDF' 4 +20590 ' PDO' 4 +20591 ' PDT' 4 +20592 ' PEM' 4 +20593 ' PER' 4 +20594 ' PET' 4 +20595 ' PHP' 4 +20596 ' PHY' 4 +20597 ' PID' 4 +20598 ' PIL' 4 +20599 ' PIN' 4 +20600 ' PLA' 4 +20601 ' PLC' 4 +20602 ' PLL' 4 +20603 ' PMC' 4 +20604 ' PNG' 4 +20605 ' POL' 4 +20606 ' POP' 4 +20607 ' POS' 4 +20608 ' PPP' 4 +20609 ' PRE' 4 +20610 ' PRI' 4 +20611 ' PRO' 4 +20612 ' PSA' 4 +20613 ' PSD' 4 +20614 ' PST' 4 +20615 ' PUR' 4 +20616 ' PUT' 4 +20617 ' PVC' 4 +20618 ' Pac' 4 +20619 ' Pad' 4 +20620 ' Pag' 4 +20621 ' Pak' 4 +20622 ' Pal' 4 +20623 ' Pam' 4 +20624 ' Pan' 4 +20625 ' Pap' 4 +20626 ' Par' 4 +20627 ' Pas' 4 +20628 ' Pat' 4 +20629 ' Pav' 4 +20630 ' Paw' 4 +20631 ' Pay' 4 +20632 ' Paz' 4 +20633 ' Pdf' 4 +20634 ' Pec' 4 +20635 ' Ped' 4 +20636 ' Peg' 4 +20637 ' Pel' 4 +20638 ' Pen' 4 +20639 ' Pep' 4 +20640 ' Per' 4 +20641 ' Pes' 4 +20642 ' Pet' 4 +20643 ' PhD' 4 +20644 ' Phi' 4 +20645 ' Pho' 4 +20646 ' Pic' 4 +20647 ' Pie' 4 +20648 ' Pig' 4 +20649 ' Pik' 4 +20650 ' Pil' 4 +20651 ' Pin' 4 +20652 ' Pip' 4 +20653 ' Pir' 4 +20654 ' Pis' 4 +20655 ' Pit' 4 +20656 ' Pix' 4 +20657 ' Ple' 4 +20658 ' Ply' 4 +20659 ' Pod' 4 +20660 ' Pok' 4 +20661 ' Pol' 4 +20662 ' Pom' 4 +20663 ' Pon' 4 +20664 ' Pop' 4 +20665 ' Por' 4 +20666 ' Pos' 4 +20667 ' Pot' 4 +20668 ' Pow' 4 +20669 ' Poz' 4 +20670 ' Pra' 4 +20671 ' Pre' 4 +20672 ' Pri' 4 +20673 ' Pro' 4 +20674 ' Psy' 4 +20675 ' Pub' 4 +20676 ' Pul' 4 +20677 ' Pun' 4 +20678 ' Pur' 4 +20679 ' Put' 4 +20680 ' Pé' 4 +20681 ' QUE' 4 +20682 ' Que' 4 +20683 ' Qui' 4 +20684 ' RAD' 4 +20685 ' RAF' 4 +20686 ' RAM' 4 +20687 ' RAW' 4 +20688 ' RBI' 4 +20689 ' REC' 4 +20690 ' RED' 4 +20691 ' REF' 4 +20692 ' REG' 4 +20693 ' REL' 4 +20694 ' REM' 4 +20695 ' RES' 4 +20696 ' RET' 4 +20697 ' RFC' 4 +20698 ' RGB' 4 +20699 ' RIP' 4 +20700 ' RMS' 4 +20701 ' RNA' 4 +20702 ' ROC' 4 +20703 ' ROI' 4 +20704 ' ROM' 4 +20705 ' ROS' 4 +20706 ' ROT' 4 +20707 ' RPC' 4 +20708 ' RPG' 4 +20709 ' RPM' 4 +20710 ' RSA' 4 +20711 ' RSS' 4 +20712 ' RUN' 4 +20713 ' Rab' 4 +20714 ' Rac' 4 +20715 ' Rad' 4 +20716 ' Raf' 4 +20717 ' Rag' 4 +20718 ' Rah' 4 +20719 ' Raj' 4 +20720 ' Rak' 4 +20721 ' Ram' 4 +20722 ' Ran' 4 +20723 ' Rao' 4 +20724 ' Rap' 4 +20725 ' Ras' 4 +20726 ' Rat' 4 +20727 ' Rav' 4 +20728 ' Raw' 4 +20729 ' Ray' 4 +20730 ' Raz' 4 +20731 ' Reb' 4 +20732 ' Rec' 4 +20733 ' Red' 4 +20734 ' Ref' 4 +20735 ' Reg' 4 +20736 ' Rei' 4 +20737 ' Rel' 4 +20738 ' Rem' 4 +20739 ' Ren' 4 +20740 ' Rep' 4 +20741 ' Res' 4 +20742 ' Ret' 4 +20743 ' Rev' 4 +20744 ' Rew' 4 +20745 ' Rex' 4 +20746 ' Rey' 4 +20747 ' Rhe' 4 +20748 ' Rib' 4 +20749 ' Ric' 4 +20750 ' Rid' 4 +20751 ' Rif' 4 +20752 ' Rig' 4 +20753 ' Rim' 4 +20754 ' Rin' 4 +20755 ' Rio' 4 +20756 ' Rip' 4 +20757 ' Ris' 4 +20758 ' Rit' 4 +20759 ' Riv' 4 +20760 ' Rob' 4 +20761 ' Roc' 4 +20762 ' Rod' 4 +20763 ' Rog' 4 +20764 ' Roh' 4 +20765 ' Rol' 4 +20766 ' Rom' 4 +20767 ' Ron' 4 +20768 ' Ros' 4 +20769 ' Rot' 4 +20770 ' Rou' 4 +20771 ' Row' 4 +20772 ' Rox' 4 +20773 ' Roy' 4 +20774 ' Rub' 4 +20775 ' Rud' 4 +20776 ' Rue' 4 +20777 ' Rug' 4 +20778 ' Rum' 4 +20779 ' Run' 4 +20780 ' Rus' 4 +20781 ' Rut' 4 +20782 ' Ré' 4 +20783 ' Rö' 4 +20784 ' SAF' 4 +20785 ' SAL' 4 +20786 ' SAM' 4 +20787 ' SAN' 4 +20788 ' SAP' 4 +20789 ' SAR' 4 +20790 ' SAS' 4 +20791 ' SAT' 4 +20792 ' SCH' 4 +20793 ' SCI' 4 +20794 ' SCM' 4 +20795 ' SCO' 4 +20796 ' SDK' 4 +20797 ' SDL' 4 +20798 ' SDS' 4 +20799 ' SEC' 4 +20800 ' SEE' 4 +20801 ' SEL' 4 +20802 ' SEM' 4 +20803 ' SEO' 4 +20804 ' SER' 4 +20805 ' SES' 4 +20806 ' SET' 4 +20807 ' SGD' 4 +20808 ' SHA' 4 +20809 ' SHE' 4 +20810 ' SHO' 4 +20811 ' SIG' 4 +20812 ' SIL' 4 +20813 ' SIM' 4 +20814 ' SIP' 4 +20815 ' SMB' 4 +20816 ' SMS' 4 +20817 ' SNP' 4 +20818 ' SOC' 4 +20819 ' SOL' 4 +20820 ' SOM' 4 +20821 ' SOS' 4 +20822 ' SPD' 4 +20823 ' SPE' 4 +20824 ' SPI' 4 +20825 ' SPR' 4 +20826 ' SQL' 4 +20827 ' SSD' 4 +20828 ' SSH' 4 +20829 ' SSL' 4 +20830 ' STD' 4 +20831 ' STE' 4 +20832 ' STR' 4 +20833 ' SUB' 4 +20834 ' SUM' 4 +20835 ' SUN' 4 +20836 ' SUP' 4 +20837 ' SUR' 4 +20838 ' SUS' 4 +20839 ' SUV' 4 +20840 ' SVG' 4 +20841 ' SVM' 4 +20842 ' Sab' 4 +20843 ' Sac' 4 +20844 ' Sad' 4 +20845 ' Saf' 4 +20846 ' Sag' 4 +20847 ' Sah' 4 +20848 ' Sai' 4 +20849 ' Sak' 4 +20850 ' Sal' 4 +20851 ' Sam' 4 +20852 ' San' 4 +20853 ' Sap' 4 +20854 ' Sar' 4 +20855 ' Sas' 4 +20856 ' Sat' 4 +20857 ' Sau' 4 +20858 ' Sav' 4 +20859 ' Saw' 4 +20860 ' Sax' 4 +20861 ' Say' 4 +20862 ' Sch' 4 +20863 ' Sci' 4 +20864 ' Sco' 4 +20865 ' Scr' 4 +20866 ' Sea' 4 +20867 ' Sec' 4 +20868 ' Sed' 4 +20869 ' See' 4 +20870 ' Seg' 4 +20871 ' Sek' 4 +20872 ' Sel' 4 +20873 ' Sem' 4 +20874 ' Sen' 4 +20875 ' Sep' 4 +20876 ' Seq' 4 +20877 ' Ser' 4 +20878 ' Ses' 4 +20879 ' Set' 4 +20880 ' Sew' 4 +20881 ' Sex' 4 +20882 ' Sey' 4 +20883 ' Sgt' 4 +20884 ' Sha' 4 +20885 ' She' 4 +20886 ' Shi' 4 +20887 ' Sho' 4 +20888 ' Sic' 4 +20889 ' Sid' 4 +20890 ' Sie' 4 +20891 ' Sig' 4 +20892 ' Sik' 4 +20893 ' Sil' 4 +20894 ' Sim' 4 +20895 ' Sin' 4 +20896 ' Sir' 4 +20897 ' Sit' 4 +20898 ' Six' 4 +20899 ' Ske' 4 +20900 ' Ski' 4 +20901 ' Sky' 4 +20902 ' Sob' 4 +20903 ' Soc' 4 +20904 ' Sof' 4 +20905 ' Sok' 4 +20906 ' Sol' 4 +20907 ' Som' 4 +20908 ' Son' 4 +20909 ' Sor' 4 +20910 ' Sou' 4 +20911 ' Sov' 4 +20912 ' Sox' 4 +20913 ' Soy' 4 +20914 ' Spa' 4 +20915 ' Spe' 4 +20916 ' Spl' 4 +20917 ' Spo' 4 +20918 ' Spr' 4 +20919 ' Spy' 4 +20920 ' Sql' 4 +20921 ' Squ' 4 +20922 ' Sri' 4 +20923 ' Sta' 4 +20924 ' Ste' 4 +20925 ' Sto' 4 +20926 ' Str' 4 +20927 ' Sty' 4 +20928 ' Sub' 4 +20929 ' Suc' 4 +20930 ' Sud' 4 +20931 ' Sue' 4 +20932 ' Sug' 4 +20933 ' Suk' 4 +20934 ' Sul' 4 +20935 ' Sum' 4 +20936 ' Sun' 4 +20937 ' Sup' 4 +20938 ' Sur' 4 +20939 ' Sus' 4 +20940 ' Suz' 4 +20941 ' Swe' 4 +20942 ' Syd' 4 +20943 ' Syl' 4 +20944 ' Sym' 4 +20945 ' Syn' 4 +20946 ' Sys' 4 +20947 ' Sé' 4 +20948 ' Sü' 4 +20949 ' TAB' 4 +20950 ' TAG' 4 +20951 ' TAM' 4 +20952 ' TCP' 4 +20953 ' TED' 4 +20954 ' TEM' 4 +20955 ' TER' 4 +20956 ' THE' 4 +20957 ' TIM' 4 +20958 ' TLS' 4 +20959 ' TOD' 4 +20960 ' TOP' 4 +20961 ' TPP' 4 +20962 ' TRA' 4 +20963 ' TRE' 4 +20964 ' TRI' 4 +20965 ' TWO' 4 +20966 ' Tab' 4 +20967 ' Tac' 4 +20968 ' Tag' 4 +20969 ' Tah' 4 +20970 ' Tai' 4 +20971 ' Taj' 4 +20972 ' Tak' 4 +20973 ' Tal' 4 +20974 ' Tam' 4 +20975 ' Tan' 4 +20976 ' Tao' 4 +20977 ' Tap' 4 +20978 ' Tar' 4 +20979 ' Tas' 4 +20980 ' Tat' 4 +20981 ' Tau' 4 +20982 ' Tax' 4 +20983 ' Tay' 4 +20984 ' Tea' 4 +20985 ' Tec' 4 +20986 ' Ted' 4 +20987 ' Teh' 4 +20988 ' Tek' 4 +20989 ' Tel' 4 +20990 ' Tem' 4 +20991 ' Ten' 4 +20992 ' Ter' 4 +20993 ' Tes' 4 +20994 ' Tet' 4 +20995 ' Tex' 4 +20996 ' The' 4 +20997 ' Thi' 4 +20998 ' Thr' 4 +20999 ' Thu' 4 +21000 ' Thy' 4 +21001 ' Tib' 4 +21002 ' Tie' 4 +21003 ' Tig' 4 +21004 ' Tik' 4 +21005 ' Til' 4 +21006 ' Tim' 4 +21007 ' Tin' 4 +21008 ' Tip' 4 +21009 ' Tir' 4 +21010 ' Tit' 4 +21011 ' Tob' 4 +21012 ' Tod' 4 +21013 ' Tok' 4 +21014 ' Tol' 4 +21015 ' Tom' 4 +21016 ' Ton' 4 +21017 ' Too' 4 +21018 ' Top' 4 +21019 ' Tor' 4 +21020 ' Tos' 4 +21021 ' Tot' 4 +21022 ' Tou' 4 +21023 ' Tow' 4 +21024 ' Toy' 4 +21025 ' Tra' 4 +21026 ' Tre' 4 +21027 ' Tri' 4 +21028 ' Tro' 4 +21029 ' Tru' 4 +21030 ' Try' 4 +21031 ' Tub' 4 +21032 ' Tuc' 4 +21033 ' Tud' 4 +21034 ' Tue' 4 +21035 ' Tul' 4 +21036 ' Tum' 4 +21037 ' Tun' 4 +21038 ' Tur' 4 +21039 ' Tus' 4 +21040 ' Tut' 4 +21041 ' Twe' 4 +21042 ' Two' 4 +21043 ' Typ' 4 +21044 ' Tyr' 4 +21045 ' UAE' 4 +21046 ' UDP' 4 +21047 ' UFC' 4 +21048 ' UFO' 4 +21049 ' UID' 4 +21050 ' UIT' 4 +21051 ' UPS' 4 +21052 ' URI' 4 +21053 ' URL' 4 +21054 ' USA' 4 +21055 ' USB' 4 +21056 ' USC' 4 +21057 ' USD' 4 +21058 ' USE' 4 +21059 ' USS' 4 +21060 ' UTC' 4 +21061 ' UTF' 4 +21062 ' Uhr' 4 +21063 ' Ult' 4 +21064 ' Una' 4 +21065 ' Und' 4 +21066 ' Une' 4 +21067 ' Ung' 4 +21068 ' Uni' 4 +21069 ' Uns' 4 +21070 ' Unt' 4 +21071 ' Urb' 4 +21072 ' Uri' 4 +21073 ' Url' 4 +21074 ' Urs' 4 +21075 ' Use' 4 +21076 ' Utt' 4 +21077 ' VAL' 4 +21078 ' VAR' 4 +21079 ' VAT' 4 +21080 ' VER' 4 +21081 ' VID' 4 +21082 ' VII' 4 +21083 ' VIP' 4 +21084 ' VIS' 4 +21085 ' VOC' 4 +21086 ' VOL' 4 +21087 ' VPN' 4 +21088 ' Vac' 4 +21089 ' Val' 4 +21090 ' Van' 4 +21091 ' Var' 4 +21092 ' Vas' 4 +21093 ' Vec' 4 +21094 ' Ved' 4 +21095 ' Veg' 4 +21096 ' Veh' 4 +21097 ' Vel' 4 +21098 ' Ven' 4 +21099 ' Ver' 4 +21100 ' Ves' 4 +21101 ' Via' 4 +21102 ' Vic' 4 +21103 ' Vid' 4 +21104 ' Vie' 4 +21105 ' Vig' 4 +21106 ' Vij' 4 +21107 ' Vik' 4 +21108 ' Vil' 4 +21109 ' Vim' 4 +21110 ' Vin' 4 +21111 ' Vir' 4 +21112 ' Vis' 4 +21113 ' Vit' 4 +21114 ' Viv' 4 +21115 ' Voc' 4 +21116 ' Vog' 4 +21117 ' Vol' 4 +21118 ' Von' 4 +21119 ' Vor' 4 +21120 ' Vox' 4 +21121 ' Voy' 4 +21122 ' Vue' 4 +21123 ' Vul' 4 +21124 ' Vé' 4 +21125 ' WAR' 4 +21126 ' WAS' 4 +21127 ' WAY' 4 +21128 ' WEB' 4 +21129 ' WHO' 4 +21130 ' WIN' 4 +21131 ' WIT' 4 +21132 ' WOR' 4 +21133 ' WWE' 4 +21134 ' Wah' 4 +21135 ' Wak' 4 +21136 ' Wal' 4 +21137 ' Wan' 4 +21138 ' War' 4 +21139 ' Was' 4 +21140 ' Wat' 4 +21141 ' Way' 4 +21142 ' Web' 4 +21143 ' Wed' 4 +21144 ' Wei' 4 +21145 ' Wel' 4 +21146 ' Wen' 4 +21147 ' Wer' 4 +21148 ' Wes' 4 +21149 ' Wet' 4 +21150 ' Whe' 4 +21151 ' Who' 4 +21152 ' Why' 4 +21153 ' Wid' 4 +21154 ' Wie' 4 +21155 ' Wii' 4 +21156 ' Wik' 4 +21157 ' Wil' 4 +21158 ' Win' 4 +21159 ' Wir' 4 +21160 ' Wis' 4 +21161 ' Wit' 4 +21162 ' Wol' 4 +21163 ' Won' 4 +21164 ' Woo' 4 +21165 ' Wor' 4 +21166 ' Wow' 4 +21167 ' Wyn' 4 +21168 ' XII' 4 +21169 ' XIV' 4 +21170 ' XML' 4 +21171 ' XOR' 4 +21172 ' XVI' 4 +21173 ' XXX' 4 +21174 ' Xen' 4 +21175 ' Xia' 4 +21176 ' Xin' 4 +21177 ' Xml' 4 +21178 ' YES' 4 +21179 ' YOU' 4 +21180 ' Yad' 4 +21181 ' Yak' 4 +21182 ' Yam' 4 +21183 ' Yan' 4 +21184 ' Yao' 4 +21185 ' Yas' 4 +21186 ' Yes' 4 +21187 ' Yet' 4 +21188 ' Yin' 4 +21189 ' You' 4 +21190 ' Yuk' 4 +21191 ' Yun' 4 +21192 ' Yus' 4 +21193 ' ZIP' 4 +21194 ' Zag' 4 +21195 ' Zah' 4 +21196 ' Zak' 4 +21197 ' Zam' 4 +21198 ' Zap' 4 +21199 ' Zar' 4 +21200 ' Zel' 4 +21201 ' Zen' 4 +21202 ' Zhu' 4 +21203 ' Zig' 4 +21204 ' Zip' 4 +21205 ' Zoe' 4 +21206 ' Zoo' 4 +21207 ' Zum' 4 +21208 ' Zur' 4 +21209 ' Zwe' 4 +21210 ' [],' 4 +21211 ' [];' 4 +21212 ' ___' 4 +21213 ' ``(' 4 +21214 ' ```' 4 +21215 ' aan' 4 +21216 ' abb' 4 +21217 ' abc' 4 +21218 ' abi' 4 +21219 ' abl' 4 +21220 ' abs' 4 +21221 ' aby' 4 +21222 ' acc' 4 +21223 ' ace' 4 +21224 ' ach' 4 +21225 ' acl' 4 +21226 ' act' 4 +21227 ' add' 4 +21228 ' ade' 4 +21229 ' adj' 4 +21230 ' adm' 4 +21231 ' ado' 4 +21232 ' ads' 4 +21233 ' adv' 4 +21234 ' aer' 4 +21235 ' aes' 4 +21236 ' aff' 4 +21237 ' aft' 4 +21238 ' age' 4 +21239 ' agg' 4 +21240 ' ago' 4 +21241 ' agr' 4 +21242 ' aid' 4 +21243 ' ail' 4 +21244 ' aim' 4 +21245 ' ain' 4 +21246 ' air' 4 +21247 ' aka' 4 +21248 ' akt' 4 +21249 ' alb' 4 +21250 ' alc' 4 +21251 ' ald' 4 +21252 ' ale' 4 +21253 ' alg' 4 +21254 ' ali' 4 +21255 ' alk' 4 +21256 ' all' 4 +21257 ' als' 4 +21258 ' alt' 4 +21259 ' ama' 4 +21260 ' amb' 4 +21261 ' ami' 4 +21262 ' amp' 4 +21263 ' ana' 4 +21264 ' anc' 4 +21265 ' and' 4 +21266 ' ang' 4 +21267 ' ank' 4 +21268 ' ann' 4 +21269 ' ano' 4 +21270 ' ans' 4 +21271 ' ant' 4 +21272 ' anx' 4 +21273 ' any' 4 +21274 ' aos' 4 +21275 ' aph' 4 +21276 ' api' 4 +21277 ' apo' 4 +21278 ' app' 4 +21279 ' apr' 4 +21280 ' apt' 4 +21281 ' aqu' 4 +21282 ' ara' 4 +21283 ' arb' 4 +21284 ' arc' 4 +21285 ' ard' 4 +21286 ' are' 4 +21287 ' arg' 4 +21288 ' ark' 4 +21289 ' arm' 4 +21290 ' arr' 4 +21291 ' art' 4 +21292 ' asc' 4 +21293 ' ash' 4 +21294 ' asi' 4 +21295 ' ask' 4 +21296 ' asm' 4 +21297 ' asp' 4 +21298 ' ass' 4 +21299 ' ast' 4 +21300 ' ate' 4 +21301 ' ath' 4 +21302 ' atm' 4 +21303 ' att' 4 +21304 ' auc' 4 +21305 ' aud' 4 +21306 ' auf' 4 +21307 ' aug' 4 +21308 ' aur' 4 +21309 ' aus' 4 +21310 ' aut' 4 +21311 ' aux' 4 +21312 ' ave' 4 +21313 ' avg' 4 +21314 ' avo' 4 +21315 ' awa' 4 +21316 ' awe' 4 +21317 ' awk' 4 +21318 ' aws' 4 +21319 ' axe' 4 +21320 ' aç' 4 +21321 ' añ' 4 +21322 ' až' 4 +21323 ' bab' 4 +21324 ' bac' 4 +21325 ' bad' 4 +21326 ' bag' 4 +21327 ' bak' 4 +21328 ' bal' 4 +21329 ' bam' 4 +21330 ' ban' 4 +21331 ' bar' 4 +21332 ' bas' 4 +21333 ' bat' 4 +21334 ' bay' 4 +21335 ' baz' 4 +21336 ' bec' 4 +21337 ' bed' 4 +21338 ' bee' 4 +21339 ' bef' 4 +21340 ' beg' 4 +21341 ' beh' 4 +21342 ' bei' 4 +21343 ' bek' 4 +21344 ' bel' 4 +21345 ' bem' 4 +21346 ' ben' 4 +21347 ' ber' 4 +21348 ' bes' 4 +21349 ' bet' 4 +21350 ' bew' 4 +21351 ' bey' 4 +21352 ' bez' 4 +21353 ' bib' 4 +21354 ' bic' 4 +21355 ' bid' 4 +21356 ' bif' 4 +21357 ' big' 4 +21358 ' bij' 4 +21359 ' bil' 4 +21360 ' bin' 4 +21361 ' bio' 4 +21362 ' bip' 4 +21363 ' bir' 4 +21364 ' bis' 4 +21365 ' bit' 4 +21366 ' biz' 4 +21367 ' bla' 4 +21368 ' ble' 4 +21369 ' blk' 4 +21370 ' blo' 4 +21371 ' bob' 4 +21372 ' bod' 4 +21373 ' bog' 4 +21374 ' bol' 4 +21375 ' bom' 4 +21376 ' bon' 4 +21377 ' boo' 4 +21378 ' bor' 4 +21379 ' bos' 4 +21380 ' bot' 4 +21381 ' bou' 4 +21382 ' bow' 4 +21383 ' box' 4 +21384 ' boy' 4 +21385 ' bra' 4 +21386 ' bre' 4 +21387 ' bri' 4 +21388 ' bro' 4 +21389 ' bru' 4 +21390 ' btn' 4 +21391 ' bub' 4 +21392 ' bud' 4 +21393 ' buf' 4 +21394 ' bug' 4 +21395 ' bul' 4 +21396 ' bum' 4 +21397 ' bun' 4 +21398 ' bur' 4 +21399 ' bus' 4 +21400 ' but' 4 +21401 ' buy' 4 +21402 ' bye' 4 +21403 ' bzw' 4 +21404 ' bä' 4 +21405 ' bé' 4 +21406 ' bý' 4 +21407 ' cab' 4 +21408 ' cac' 4 +21409 ' cad' 4 +21410 ' caf' 4 +21411 ' cal' 4 +21412 ' cam' 4 +21413 ' can' 4 +21414 ' cap' 4 +21415 ' car' 4 +21416 ' cas' 4 +21417 ' cat' 4 +21418 ' cav' 4 +21419 ' cel' 4 +21420 ' cen' 4 +21421 ' cep' 4 +21422 ' cer' 4 +21423 ' ces' 4 +21424 ' cet' 4 +21425 ' cfg' 4 +21426 ' cha' 4 +21427 ' che' 4 +21428 ' chi' 4 +21429 ' chk' 4 +21430 ' cho' 4 +21431 ' chr' 4 +21432 ' cic' 4 +21433 ' cid' 4 +21434 ' cig' 4 +21435 ' cin' 4 +21436 ' cir' 4 +21437 ' cis' 4 +21438 ' cit' 4 +21439 ' civ' 4 +21440 ' cla' 4 +21441 ' cle' 4 +21442 ' cli' 4 +21443 ' clo' 4 +21444 ' cls' 4 +21445 ' cmd' 4 +21446 ' cmp' 4 +21447 ' cnt' 4 +21448 ' cob' 4 +21449 ' coc' 4 +21450 ' cod' 4 +21451 ' cof' 4 +21452 ' cog' 4 +21453 ' coh' 4 +21454 ' col' 4 +21455 ' com' 4 +21456 ' con' 4 +21457 ' cop' 4 +21458 ' cor' 4 +21459 ' cos' 4 +21460 ' cot' 4 +21461 ' cou' 4 +21462 ' cov' 4 +21463 ' cow' 4 +21464 ' coy' 4 +21465 ' cpu' 4 +21466 ' cra' 4 +21467 ' cre' 4 +21468 ' cri' 4 +21469 ' cro' 4 +21470 ' cru' 4 +21471 ' cry' 4 +21472 ' css' 4 +21473 ' csv' 4 +21474 ' ctx' 4 +21475 ' cub' 4 +21476 ' cuc' 4 +21477 ' cue' 4 +21478 ' cui' 4 +21479 ' cul' 4 +21480 ' cum' 4 +21481 ' cup' 4 +21482 ' cur' 4 +21483 ' cus' 4 +21484 ' cut' 4 +21485 ' cyl' 4 +21486 ' cyn' 4 +21487 ' cyt' 4 +21488 ' cá' 4 +21489 ' câ' 4 +21490 ' cé' 4 +21491 ' cí' 4 +21492 ' có' 4 +21493 ' cô' 4 +21494 ' că' 4 +21495 ' cơ' 4 +21496 ' dab' 4 +21497 ' dad' 4 +21498 ' dag' 4 +21499 ' dah' 4 +21500 ' dai' 4 +21501 ' dal' 4 +21502 ' dam' 4 +21503 ' dan' 4 +21504 ' dao' 4 +21505 ' dar' 4 +21506 ' das' 4 +21507 ' dat' 4 +21508 ' dav' 4 +21509 ' day' 4 +21510 ' dbo' 4 +21511 ' deb' 4 +21512 ' dec' 4 +21513 ' ded' 4 +21514 ' dee' 4 +21515 ' def' 4 +21516 ' deg' 4 +21517 ' dei' 4 +21518 ' dej' 4 +21519 ' del' 4 +21520 ' dem' 4 +21521 ' den' 4 +21522 ' dep' 4 +21523 ' der' 4 +21524 ' des' 4 +21525 ' det' 4 +21526 ' dev' 4 +21527 ' dex' 4 +21528 ' dez' 4 +21529 ' dia' 4 +21530 ' dib' 4 +21531 ' dic' 4 +21532 ' did' 4 +21533 ' die' 4 +21534 ' dif' 4 +21535 ' dig' 4 +21536 ' dil' 4 +21537 ' dim' 4 +21538 ' din' 4 +21539 ' dio' 4 +21540 ' dip' 4 +21541 ' dir' 4 +21542 ' dis' 4 +21543 ' dit' 4 +21544 ' div' 4 +21545 ' diz' 4 +21546 ' dll' 4 +21547 ' dns' 4 +21548 ' dob' 4 +21549 ' doc' 4 +21550 ' dod' 4 +21551 ' dog' 4 +21552 ' doi' 4 +21553 ' dok' 4 +21554 ' dol' 4 +21555 ' dom' 4 +21556 ' don' 4 +21557 ' dop' 4 +21558 ' dor' 4 +21559 ' dos' 4 +21560 ' dot' 4 +21561 ' dou' 4 +21562 ' dow' 4 +21563 ' dpi' 4 +21564 ' dra' 4 +21565 ' dre' 4 +21566 ' dri' 4 +21567 ' dro' 4 +21568 ' dru' 4 +21569 ' dry' 4 +21570 ' dst' 4 +21571 ' dub' 4 +21572 ' due' 4 +21573 ' dug' 4 +21574 ' dum' 4 +21575 ' dun' 4 +21576 ' duo' 4 +21577 ' dup' 4 +21578 ' dur' 4 +21579 ' dus' 4 +21580 ' dut' 4 +21581 ' dye' 4 +21582 ' dyn' 4 +21583 ' dys' 4 +21584 ' dá' 4 +21585 ' då' 4 +21586 ' dé' 4 +21587 ' dí' 4 +21588 ' dó' 4 +21589 ' dü' 4 +21590 ' ear' 4 +21591 ' eas' 4 +21592 ' eat' 4 +21593 ' ecc' 4 +21594 ' ech' 4 +21595 ' ecl' 4 +21596 ' eco' 4 +21597 ' ect' 4 +21598 ' eds' 4 +21599 ' een' 4 +21600 ' eer' 4 +21601 ' eff' 4 +21602 ' egg' 4 +21603 ' ego' 4 +21604 ' egy' 4 +21605 ' eig' 4 +21606 ' ein' 4 +21607 ' ela' 4 +21608 ' ele' 4 +21609 ' elf' 4 +21610 ' ell' 4 +21611 ' els' 4 +21612 ' emb' 4 +21613 ' emo' 4 +21614 ' emp' 4 +21615 ' enc' 4 +21616 ' end' 4 +21617 ' enf' 4 +21618 ' eng' 4 +21619 ' enh' 4 +21620 ' ens' 4 +21621 ' ent' 4 +21622 ' env' 4 +21623 ' eos' 4 +21624 ' eps' 4 +21625 ' equ' 4 +21626 ' era' 4 +21627 ' ere' 4 +21628 ' erf' 4 +21629 ' erg' 4 +21630 ' ern' 4 +21631 ' err' 4 +21632 ' ers' 4 +21633 ' eru' 4 +21634 ' ery' 4 +21635 ' esa' 4 +21636 ' esc' 4 +21637 ' ese' 4 +21638 ' eso' 4 +21639 ' esp' 4 +21640 ' ess' 4 +21641 ' est' 4 +21642 ' eta' 4 +21643 ' etc' 4 +21644 ' eth' 4 +21645 ' ett' 4 +21646 ' eux' 4 +21647 ' eve' 4 +21648 ' evt' 4 +21649 ' exc' 4 +21650 ' exe' 4 +21651 ' exh' 4 +21652 ' exp' 4 +21653 ' ext' 4 +21654 ' eye' 4 +21655 ' fab' 4 +21656 ' fac' 4 +21657 ' fal' 4 +21658 ' fam' 4 +21659 ' fan' 4 +21660 ' far' 4 +21661 ' fas' 4 +21662 ' fat' 4 +21663 ' fav' 4 +21664 ' fax' 4 +21665 ' faz' 4 +21666 ' fed' 4 +21667 ' fee' 4 +21668 ' fel' 4 +21669 ' fem' 4 +21670 ' fen' 4 +21671 ' fer' 4 +21672 ' fet' 4 +21673 ' feu' 4 +21674 ' few' 4 +21675 ' fft' 4 +21676 ' fib' 4 +21677 ' fic' 4 +21678 ' fid' 4 +21679 ' fif' 4 +21680 ' fig' 4 +21681 ' fil' 4 +21682 ' fim' 4 +21683 ' fin' 4 +21684 ' fir' 4 +21685 ' fis' 4 +21686 ' fit' 4 +21687 ' fix' 4 +21688 ' fla' 4 +21689 ' fle' 4 +21690 ' flo' 4 +21691 ' flu' 4 +21692 ' fly' 4 +21693 ' fmt' 4 +21694 ' foc' 4 +21695 ' fog' 4 +21696 ' foi' 4 +21697 ' fol' 4 +21698 ' fon' 4 +21699 ' foo' 4 +21700 ' for' 4 +21701 ' fos' 4 +21702 ' fot' 4 +21703 ' fou' 4 +21704 ' fox' 4 +21705 ' fra' 4 +21706 ' fre' 4 +21707 ' fri' 4 +21708 ' frm' 4 +21709 ' fro' 4 +21710 ' fru' 4 +21711 ' fry' 4 +21712 ' ftp' 4 +21713 ' fue' 4 +21714 ' fug' 4 +21715 ' ful' 4 +21716 ' fun' 4 +21717 ' fur' 4 +21718 ' fus' 4 +21719 ' fut' 4 +21720 ' fá' 4 +21721 ' få' 4 +21722 ' fé' 4 +21723 ' fö' 4 +21724 ' fø' 4 +21725 ' fő' 4 +21726 ' gab' 4 +21727 ' gad' 4 +21728 ' gag' 4 +21729 ' gal' 4 +21730 ' gam' 4 +21731 ' gan' 4 +21732 ' gap' 4 +21733 ' gar' 4 +21734 ' gas' 4 +21735 ' gau' 4 +21736 ' gay' 4 +21737 ' gaz' 4 +21738 ' gcc' 4 +21739 ' gcd' 4 +21740 ' geb' 4 +21741 ' ged' 4 +21742 ' gef' 4 +21743 ' geg' 4 +21744 ' gek' 4 +21745 ' gel' 4 +21746 ' gem' 4 +21747 ' gen' 4 +21748 ' geo' 4 +21749 ' ger' 4 +21750 ' ges' 4 +21751 ' get' 4 +21752 ' gew' 4 +21753 ' gez' 4 +21754 ' gib' 4 +21755 ' gid' 4 +21756 ' gif' 4 +21757 ' gig' 4 +21758 ' gin' 4 +21759 ' gir' 4 +21760 ' git' 4 +21761 ' giv' 4 +21762 ' gle' 4 +21763 ' gli' 4 +21764 ' glo' 4 +21765 ' glu' 4 +21766 ' gly' 4 +21767 ' gob' 4 +21768 ' god' 4 +21769 ' gol' 4 +21770 ' gon' 4 +21771 ' gor' 4 +21772 ' got' 4 +21773 ' gou' 4 +21774 ' gpu' 4 +21775 ' gra' 4 +21776 ' gre' 4 +21777 ' gri' 4 +21778 ' gro' 4 +21779 ' gru' 4 +21780 ' gtk' 4 +21781 ' gui' 4 +21782 ' gul' 4 +21783 ' gum' 4 +21784 ' gun' 4 +21785 ' gut' 4 +21786 ' guy' 4 +21787 ' gym' 4 +21788 ' gå' 4 +21789 ' gé' 4 +21790 ' gö' 4 +21791 ' gü' 4 +21792 ' gł' 4 +21793 ' hab' 4 +21794 ' hac' 4 +21795 ' had' 4 +21796 ' hal' 4 +21797 ' ham' 4 +21798 ' han' 4 +21799 ' har' 4 +21800 ' has' 4 +21801 ' hat' 4 +21802 ' hav' 4 +21803 ' haw' 4 +21804 ' hay' 4 +21805 ' haz' 4 +21806 ' hed' 4 +21807 ' hel' 4 +21808 ' hem' 4 +21809 ' hen' 4 +21810 ' hep' 4 +21811 ' her' 4 +21812 ' hes' 4 +21813 ' het' 4 +21814 ' hex' 4 +21815 ' hey' 4 +21816 ' hid' 4 +21817 ' hig' 4 +21818 ' hij' 4 +21819 ' hil' 4 +21820 ' him' 4 +21821 ' hin' 4 +21822 ' hip' 4 +21823 ' his' 4 +21824 ' hit' 4 +21825 ' hob' 4 +21826 ' hoc' 4 +21827 ' hog' 4 +21828 ' hol' 4 +21829 ' hom' 4 +21830 ' hon' 4 +21831 ' hop' 4 +21832 ' hor' 4 +21833 ' hos' 4 +21834 ' hot' 4 +21835 ' how' 4 +21836 ' hrs' 4 +21837 ' htt' 4 +21838 ' hub' 4 +21839 ' hue' 4 +21840 ' hug' 4 +21841 ' huh' 4 +21842 ' hum' 4 +21843 ' hun' 4 +21844 ' hur' 4 +21845 ' hus' 4 +21846 ' hut' 4 +21847 ' hyd' 4 +21848 ' hym' 4 +21849 ' hyp' 4 +21850 ' há' 4 +21851 ' hä' 4 +21852 ' hå' 4 +21853 ' hé' 4 +21854 ' hö' 4 +21855 ' iOS' 4 +21856 ' ice' 4 +21857 ' ich' 4 +21858 ' ici' 4 +21859 ' icy' 4 +21860 ' ide' 4 +21861 ' idi' 4 +21862 ' ids' 4 +21863 ' idx' 4 +21864 ' iff' 4 +21865 ' ign' 4 +21866 ' ihm' 4 +21867 ' ihn' 4 +21868 ' ihr' 4 +21869 ' iii' 4 +21870 ' ile' 4 +21871 ' ili' 4 +21872 ' ill' 4 +21873 ' ils' 4 +21874 ' imb' 4 +21875 ' img' 4 +21876 ' imm' 4 +21877 ' imp' 4 +21878 ' inc' 4 +21879 ' ind' 4 +21880 ' inf' 4 +21881 ' ing' 4 +21882 ' inh' 4 +21883 ' ini' 4 +21884 ' inj' 4 +21885 ' ink' 4 +21886 ' inn' 4 +21887 ' ins' 4 +21888 ' int' 4 +21889 ' inv' 4 +21890 ' iod' 4 +21891 ' ion' 4 +21892 ' ios' 4 +21893 ' ips' 4 +21894 ' ire' 4 +21895 ' irr' 4 +21896 ' isn' 4 +21897 ' iso' 4 +21898 ' iss' 4 +21899 ' ist' 4 +21900 ' ith' 4 +21901 ' itr' 4 +21902 ' its' 4 +21903 ' iç' 4 +21904 ' iş' 4 +21905 ' jab' 4 +21906 ' jac' 4 +21907 ' jag' 4 +21908 ' jak' 4 +21909 ' jal' 4 +21910 ' jam' 4 +21911 ' jan' 4 +21912 ' jap' 4 +21913 ' jar' 4 +21914 ' jas' 4 +21915 ' jav' 4 +21916 ' jaw' 4 +21917 ' jed' 4 +21918 ' jej' 4 +21919 ' jel' 4 +21920 ' jer' 4 +21921 ' jet' 4 +21922 ' jeu' 4 +21923 ' jew' 4 +21924 ' job' 4 +21925 ' jog' 4 +21926 ' jou' 4 +21927 ' joy' 4 +21928 ' jud' 4 +21929 ' jug' 4 +21930 ' jul' 4 +21931 ' jun' 4 +21932 ' jur' 4 +21933 ' jus' 4 +21934 ' já' 4 +21935 ' jä' 4 +21936 ' jó' 4 +21937 ' jú' 4 +21938 ' kHz' 4 +21939 ' kad' 4 +21940 ' kal' 4 +21941 ' kam' 4 +21942 ' kan' 4 +21943 ' kap' 4 +21944 ' kar' 4 +21945 ' kas' 4 +21946 ' kat' 4 +21947 ' kay' 4 +21948 ' kde' 4 +21949 ' kel' 4 +21950 ' ker' 4 +21951 ' ket' 4 +21952 ' key' 4 +21953 ' kid' 4 +21954 ' kil' 4 +21955 ' kin' 4 +21956 ' kir' 4 +21957 ' kit' 4 +21958 ' kle' 4 +21959 ' kne' 4 +21960 ' kol' 4 +21961 ' kom' 4 +21962 ' kon' 4 +21963 ' kop' 4 +21964 ' kor' 4 +21965 ' kos' 4 +21966 ' kot' 4 +21967 ' kre' 4 +21968 ' kun' 4 +21969 ' kur' 4 +21970 ' kä' 4 +21971 ' ké' 4 +21972 ' kö' 4 +21973 ' kø' 4 +21974 ' kü' 4 +21975 ' kā' 4 +21976 ' lab' 4 +21977 ' lac' 4 +21978 ' lad' 4 +21979 ' lag' 4 +21980 ' lak' 4 +21981 ' lam' 4 +21982 ' lan' 4 +21983 ' lap' 4 +21984 ' lar' 4 +21985 ' las' 4 +21986 ' lat' 4 +21987 ' lav' 4 +21988 ' law' 4 +21989 ' lax' 4 +21990 ' lay' 4 +21991 ' laz' 4 +21992 ' lbl' 4 +21993 ' lbs' 4 +21994 ' led' 4 +21995 ' leg' 4 +21996 ' lei' 4 +21997 ' lem' 4 +21998 ' len' 4 +21999 ' ler' 4 +22000 ' les' 4 +22001 ' let' 4 +22002 ' lev' 4 +22003 ' lex' 4 +22004 ' lhs' 4 +22005 ' lia' 4 +22006 ' lib' 4 +22007 ' lic' 4 +22008 ' lid' 4 +22009 ' lie' 4 +22010 ' lif' 4 +22011 ' lig' 4 +22012 ' lik' 4 +22013 ' lil' 4 +22014 ' lim' 4 +22015 ' lin' 4 +22016 ' lip' 4 +22017 ' lis' 4 +22018 ' lit' 4 +22019 ' liv' 4 +22020 ' lle' 4 +22021 ' lng' 4 +22022 ' lob' 4 +22023 ' loc' 4 +22024 ' lod' 4 +22025 ' log' 4 +22026 ' loi' 4 +22027 ' lok' 4 +22028 ' lol' 4 +22029 ' lon' 4 +22030 ' los' 4 +22031 ' lot' 4 +22032 ' lou' 4 +22033 ' lov' 4 +22034 ' low' 4 +22035 ' lst' 4 +22036 ' lua' 4 +22037 ' lub' 4 +22038 ' luc' 4 +22039 ' lud' 4 +22040 ' lug' 4 +22041 ' lui' 4 +22042 ' lum' 4 +22043 ' lun' 4 +22044 ' lup' 4 +22045 ' lur' 4 +22046 ' lut' 4 +22047 ' lux' 4 +22048 ' lyr' 4 +22049 ' lys' 4 +22050 ' là' 4 +22051 ' lá' 4 +22052 ' lä' 4 +22053 ' lå' 4 +22054 ' læ' 4 +22055 ' lé' 4 +22056 ' lí' 4 +22057 ' lö' 4 +22058 ' lø' 4 +22059 ' mac' 4 +22060 ' mad' 4 +22061 ' mag' 4 +22062 ' mah' 4 +22063 ' mai' 4 +22064 ' maj' 4 +22065 ' mak' 4 +22066 ' mal' 4 +22067 ' mam' 4 +22068 ' man' 4 +22069 ' map' 4 +22070 ' mar' 4 +22071 ' mas' 4 +22072 ' mat' 4 +22073 ' max' 4 +22074 ' may' 4 +22075 ' mec' 4 +22076 ' med' 4 +22077 ' meg' 4 +22078 ' mel' 4 +22079 ' mem' 4 +22080 ' men' 4 +22081 ' mer' 4 +22082 ' mes' 4 +22083 ' met' 4 +22084 ' meu' 4 +22085 ' mex' 4 +22086 ' mez' 4 +22087 ' miR' 4 +22088 ' mic' 4 +22089 ' mid' 4 +22090 ' mie' 4 +22091 ' mig' 4 +22092 ' mij' 4 +22093 ' mil' 4 +22094 ' mim' 4 +22095 ' min' 4 +22096 ' mir' 4 +22097 ' mis' 4 +22098 ' mit' 4 +22099 ' mix' 4 +22100 ' mob' 4 +22101 ' mod' 4 +22102 ' mog' 4 +22103 ' moi' 4 +22104 ' mol' 4 +22105 ' mom' 4 +22106 ' mon' 4 +22107 ' mor' 4 +22108 ' mos' 4 +22109 ' mot' 4 +22110 ' mou' 4 +22111 ' mov' 4 +22112 ' moy' 4 +22113 ' mph' 4 +22114 ' msg' 4 +22115 ' muc' 4 +22116 ' mud' 4 +22117 ' mug' 4 +22118 ' mul' 4 +22119 ' mum' 4 +22120 ' mun' 4 +22121 ' mur' 4 +22122 ' mus' 4 +22123 ' mut' 4 +22124 ' muy' 4 +22125 ' mys' 4 +22126 ' mà' 4 +22127 ' má' 4 +22128 ' mã' 4 +22129 ' må' 4 +22130 ' mé' 4 +22131 ' mí' 4 +22132 ' mó' 4 +22133 ' mô' 4 +22134 ' mö' 4 +22135 ' mø' 4 +22136 ' mú' 4 +22137 ' mü' 4 +22138 ' mě' 4 +22139 ' mű' 4 +22140 ' nab' 4 +22141 ' nad' 4 +22142 ' nag' 4 +22143 ' nah' 4 +22144 ' naj' 4 +22145 ' nak' 4 +22146 ' nal' 4 +22147 ' nam' 4 +22148 ' nan' 4 +22149 ' nap' 4 +22150 ' nar' 4 +22151 ' nas' 4 +22152 ' nat' 4 +22153 ' nau' 4 +22154 ' nav' 4 +22155 ' naz' 4 +22156 ' neb' 4 +22157 ' nec' 4 +22158 ' ned' 4 +22159 ' neg' 4 +22160 ' nel' 4 +22161 ' nem' 4 +22162 ' nen' 4 +22163 ' neo' 4 +22164 ' nep' 4 +22165 ' ner' 4 +22166 ' net' 4 +22167 ' neu' 4 +22168 ' new' 4 +22169 ' nex' 4 +22170 ' nib' 4 +22171 ' nic' 4 +22172 ' nid' 4 +22173 ' nie' 4 +22174 ' nig' 4 +22175 ' nil' 4 +22176 ' nim' 4 +22177 ' nin' 4 +22178 ' nit' 4 +22179 ' nob' 4 +22180 ' noc' 4 +22181 ' nod' 4 +22182 ' nog' 4 +22183 ' nom' 4 +22184 ' non' 4 +22185 ' nor' 4 +22186 ' nos' 4 +22187 ' not' 4 +22188 ' nou' 4 +22189 ' nov' 4 +22190 ' now' 4 +22191 ' npc' 4 +22192 ' npm' 4 +22193 ' nth' 4 +22194 ' nud' 4 +22195 ' nue' 4 +22196 ' num' 4 +22197 ' nun' 4 +22198 ' nur' 4 +22199 ' nut' 4 +22200 ' ná' 4 +22201 ' nä' 4 +22202 ' nå' 4 +22203 ' né' 4 +22204 ' në' 4 +22205 ' nó' 4 +22206 ' nú' 4 +22207 ' ně' 4 +22208 ' oak' 4 +22209 ' obj' 4 +22210 ' obl' 4 +22211 ' obs' 4 +22212 ' obt' 4 +22213 ' occ' 4 +22214 ' och' 4 +22215 ' oct' 4 +22216 ' odd' 4 +22217 ' ode' 4 +22218 ' off' 4 +22219 ' oft' 4 +22220 ' oil' 4 +22221 ' old' 4 +22222 ' ole' 4 +22223 ' oli' 4 +22224 ' omn' 4 +22225 ' onc' 4 +22226 ' one' 4 +22227 ' ont' 4 +22228 ' ook' 4 +22229 ' opp' 4 +22230 ' ops' 4 +22231 ' opt' 4 +22232 ' ora' 4 +22233 ' orb' 4 +22234 ' ord' 4 +22235 ' ore' 4 +22236 ' org' 4 +22237 ' ori' 4 +22238 ' orn' 4 +22239 ' oro' 4 +22240 ' ort' 4 +22241 ' osc' 4 +22242 ' osm' 4 +22243 ' osp' 4 +22244 ' oss' 4 +22245 ' ost' 4 +22246 ' ott' 4 +22247 ' oun' 4 +22248 ' our' 4 +22249 ' out' 4 +22250 ' owe' 4 +22251 ' own' 4 +22252 ' oxy' 4 +22253 ' où' 4 +22254 ' pac' 4 +22255 ' pad' 4 +22256 ' pag' 4 +22257 ' pai' 4 +22258 ' pak' 4 +22259 ' pal' 4 +22260 ' pam' 4 +22261 ' pan' 4 +22262 ' pap' 4 +22263 ' par' 4 +22264 ' pas' 4 +22265 ' pat' 4 +22266 ' pau' 4 +22267 ' pav' 4 +22268 ' paw' 4 +22269 ' pay' 4 +22270 ' pdf' 4 +22271 ' pec' 4 +22272 ' ped' 4 +22273 ' peg' 4 +22274 ' pel' 4 +22275 ' pem' 4 +22276 ' pen' 4 +22277 ' pep' 4 +22278 ' per' 4 +22279 ' pes' 4 +22280 ' pet' 4 +22281 ' peu' 4 +22282 ' phi' 4 +22283 ' php' 4 +22284 ' phr' 4 +22285 ' phy' 4 +22286 ' pic' 4 +22287 ' pid' 4 +22288 ' pie' 4 +22289 ' pig' 4 +22290 ' pil' 4 +22291 ' pin' 4 +22292 ' pip' 4 +22293 ' pir' 4 +22294 ' pis' 4 +22295 ' pit' 4 +22296 ' piv' 4 +22297 ' pix' 4 +22298 ' pkg' 4 +22299 ' pla' 4 +22300 ' ple' 4 +22301 ' plt' 4 +22302 ' ply' 4 +22303 ' png' 4 +22304 ' pob' 4 +22305 ' poc' 4 +22306 ' pod' 4 +22307 ' pog' 4 +22308 ' poi' 4 +22309 ' pok' 4 +22310 ' pol' 4 +22311 ' pom' 4 +22312 ' pon' 4 +22313 ' pop' 4 +22314 ' por' 4 +22315 ' pos' 4 +22316 ' pot' 4 +22317 ' pou' 4 +22318 ' pov' 4 +22319 ' pow' 4 +22320 ' poz' 4 +22321 ' ppm' 4 +22322 ' pra' 4 +22323 ' pre' 4 +22324 ' pri' 4 +22325 ' pro' 4 +22326 ' prz' 4 +22327 ' pse' 4 +22328 ' psi' 4 +22329 ' psy' 4 +22330 ' ptr' 4 +22331 ' pts' 4 +22332 ' pub' 4 +22333 ' pud' 4 +22334 ' pul' 4 +22335 ' pun' 4 +22336 ' pup' 4 +22337 ' pur' 4 +22338 ' pus' 4 +22339 ' put' 4 +22340 ' pyg' 4 +22341 ' pyl' 4 +22342 ' pá' 4 +22343 ' pä' 4 +22344 ' på' 4 +22345 ' pé' 4 +22346 ' pó' 4 +22347 ' pł' 4 +22348 ' př' 4 +22349 ' pů' 4 +22350 ' que' 4 +22351 ' qui' 4 +22352 ' quo' 4 +22353 ' rab' 4 +22354 ' rac' 4 +22355 ' rad' 4 +22356 ' rag' 4 +22357 ' ram' 4 +22358 ' ran' 4 +22359 ' rap' 4 +22360 ' ras' 4 +22361 ' rat' 4 +22362 ' rav' 4 +22363 ' raw' 4 +22364 ' ray' 4 +22365 ' raz' 4 +22366 ' reb' 4 +22367 ' rec' 4 +22368 ' red' 4 +22369 ' ref' 4 +22370 ' reg' 4 +22371 ' rel' 4 +22372 ' rem' 4 +22373 ' ren' 4 +22374 ' rep' 4 +22375 ' req' 4 +22376 ' rer' 4 +22377 ' res' 4 +22378 ' ret' 4 +22379 ' rev' 4 +22380 ' rez' 4 +22381 ' rgb' 4 +22382 ' rhe' 4 +22383 ' rho' 4 +22384 ' rhs' 4 +22385 ' rib' 4 +22386 ' ric' 4 +22387 ' rid' 4 +22388 ' rif' 4 +22389 ' rig' 4 +22390 ' rim' 4 +22391 ' rin' 4 +22392 ' rip' 4 +22393 ' ris' 4 +22394 ' rit' 4 +22395 ' riv' 4 +22396 ' rms' 4 +22397 ' rng' 4 +22398 ' rob' 4 +22399 ' roc' 4 +22400 ' rod' 4 +22401 ' roi' 4 +22402 ' rol' 4 +22403 ' rom' 4 +22404 ' ros' 4 +22405 ' rot' 4 +22406 ' rou' 4 +22407 ' row' 4 +22408 ' roy' 4 +22409 ' roz' 4 +22410 ' rpm' 4 +22411 ' rst' 4 +22412 ' rub' 4 +22413 ' rud' 4 +22414 ' rue' 4 +22415 ' rug' 4 +22416 ' rul' 4 +22417 ' rum' 4 +22418 ' run' 4 +22419 ' rus' 4 +22420 ' rut' 4 +22421 ' rá' 4 +22422 ' rå' 4 +22423 ' rè' 4 +22424 ' ré' 4 +22425 ' rê' 4 +22426 ' sab' 4 +22427 ' sac' 4 +22428 ' sad' 4 +22429 ' saf' 4 +22430 ' sag' 4 +22431 ' sal' 4 +22432 ' sam' 4 +22433 ' san' 4 +22434 ' sap' 4 +22435 ' sar' 4 +22436 ' sat' 4 +22437 ' sau' 4 +22438 ' sav' 4 +22439 ' saw' 4 +22440 ' sax' 4 +22441 ' say' 4 +22442 ' sch' 4 +22443 ' sci' 4 +22444 ' sco' 4 +22445 ' scr' 4 +22446 ' sea' 4 +22447 ' sec' 4 +22448 ' sed' 4 +22449 ' see' 4 +22450 ' seg' 4 +22451 ' sei' 4 +22452 ' sel' 4 +22453 ' sem' 4 +22454 ' sen' 4 +22455 ' sep' 4 +22456 ' seq' 4 +22457 ' ser' 4 +22458 ' ses' 4 +22459 ' set' 4 +22460 ' seu' 4 +22461 ' sew' 4 +22462 ' sex' 4 +22463 ' sha' 4 +22464 ' she' 4 +22465 ' sho' 4 +22466 ' shr' 4 +22467 ' shy' 4 +22468 ' sia' 4 +22469 ' sib' 4 +22470 ' sic' 4 +22471 ' sid' 4 +22472 ' sie' 4 +22473 ' sig' 4 +22474 ' sil' 4 +22475 ' sim' 4 +22476 ' sin' 4 +22477 ' sip' 4 +22478 ' sir' 4 +22479 ' sis' 4 +22480 ' sit' 4 +22481 ' six' 4 +22482 ' ske' 4 +22483 ' ski' 4 +22484 ' sku' 4 +22485 ' sky' 4 +22486 ' sla' 4 +22487 ' sle' 4 +22488 ' sme' 4 +22489 ' smo' 4 +22490 ' sms' 4 +22491 ' snd' 4 +22492 ' sne' 4 +22493 ' sob' 4 +22494 ' soc' 4 +22495 ' sod' 4 +22496 ' sog' 4 +22497 ' sol' 4 +22498 ' som' 4 +22499 ' son' 4 +22500 ' sop' 4 +22501 ' sor' 4 +22502 ' sou' 4 +22503 ' sow' 4 +22504 ' soy' 4 +22505 ' spa' 4 +22506 ' spe' 4 +22507 ' sph' 4 +22508 ' spl' 4 +22509 ' spo' 4 +22510 ' spp' 4 +22511 ' spr' 4 +22512 ' spy' 4 +22513 ' sql' 4 +22514 ' squ' 4 +22515 ' src' 4 +22516 ' ssh' 4 +22517 ' ssl' 4 +22518 ' sta' 4 +22519 ' std' 4 +22520 ' ste' 4 +22521 ' sto' 4 +22522 ' str' 4 +22523 ' sty' 4 +22524 ' sua' 4 +22525 ' sub' 4 +22526 ' suc' 4 +22527 ' sud' 4 +22528 ' sue' 4 +22529 ' suf' 4 +22530 ' sug' 4 +22531 ' sul' 4 +22532 ' sum' 4 +22533 ' sun' 4 +22534 ' suo' 4 +22535 ' sup' 4 +22536 ' sur' 4 +22537 ' sus' 4 +22538 ' sut' 4 +22539 ' svg' 4 +22540 ' swe' 4 +22541 ' swo' 4 +22542 ' sym' 4 +22543 ' syn' 4 +22544 ' sys' 4 +22545 ' sä' 4 +22546 ' så' 4 +22547 ' sæ' 4 +22548 ' sé' 4 +22549 ' sí' 4 +22550 ' só' 4 +22551 ' sø' 4 +22552 ' sú' 4 +22553 ' sû' 4 +22554 ' sü' 4 +22555 ' să' 4 +22556 ' są' 4 +22557 ' sł' 4 +22558 ' tab' 4 +22559 ' tac' 4 +22560 ' tad' 4 +22561 ' tag' 4 +22562 ' tai' 4 +22563 ' tak' 4 +22564 ' tal' 4 +22565 ' tam' 4 +22566 ' tan' 4 +22567 ' tap' 4 +22568 ' tar' 4 +22569 ' tas' 4 +22570 ' tat' 4 +22571 ' tau' 4 +22572 ' tax' 4 +22573 ' tbl' 4 +22574 ' tcp' 4 +22575 ' tea' 4 +22576 ' tec' 4 +22577 ' ted' 4 +22578 ' tee' 4 +22579 ' tej' 4 +22580 ' tek' 4 +22581 ' tel' 4 +22582 ' tem' 4 +22583 ' ten' 4 +22584 ' ter' 4 +22585 ' tes' 4 +22586 ' tet' 4 +22587 ' tex' 4 +22588 ' tgt' 4 +22589 ' tha' 4 +22590 ' the' 4 +22591 ' thi' 4 +22592 ' tho' 4 +22593 ' thr' 4 +22594 ' thy' 4 +22595 ' tib' 4 +22596 ' tic' 4 +22597 ' tid' 4 +22598 ' tie' 4 +22599 ' til' 4 +22600 ' tim' 4 +22601 ' tin' 4 +22602 ' tip' 4 +22603 ' tir' 4 +22604 ' tit' 4 +22605 ' tmp' 4 +22606 ' tob' 4 +22607 ' toc' 4 +22608 ' tod' 4 +22609 ' toe' 4 +22610 ' tok' 4 +22611 ' tol' 4 +22612 ' tom' 4 +22613 ' ton' 4 +22614 ' too' 4 +22615 ' top' 4 +22616 ' tor' 4 +22617 ' tot' 4 +22618 ' tou' 4 +22619 ' tow' 4 +22620 ' tox' 4 +22621 ' toy' 4 +22622 ' tra' 4 +22623 ' tre' 4 +22624 ' tri' 4 +22625 ' tro' 4 +22626 ' try' 4 +22627 ' tsp' 4 +22628 ' tub' 4 +22629 ' tud' 4 +22630 ' tug' 4 +22631 ' tul' 4 +22632 ' tum' 4 +22633 ' tun' 4 +22634 ' tup' 4 +22635 ' tur' 4 +22636 ' tut' 4 +22637 ' twe' 4 +22638 ' two' 4 +22639 ' txt' 4 +22640 ' typ' 4 +22641 ' tyr' 4 +22642 ' tá' 4 +22643 ' tä' 4 +22644 ' té' 4 +22645 ' të' 4 +22646 ' tí' 4 +22647 ' tö' 4 +22648 ' tú' 4 +22649 ' uid' 4 +22650 ' uit' 4 +22651 ' ult' 4 +22652 ' uma' 4 +22653 ' umb' 4 +22654 ' una' 4 +22655 ' unb' 4 +22656 ' unc' 4 +22657 ' und' 4 +22658 ' une' 4 +22659 ' unf' 4 +22660 ' ung' 4 +22661 ' unh' 4 +22662 ' uni' 4 +22663 ' unl' 4 +22664 ' unm' 4 +22665 ' uno' 4 +22666 ' uns' 4 +22667 ' unt' 4 +22668 ' unw' 4 +22669 ' upd' 4 +22670 ' upl' 4 +22671 ' upp' 4 +22672 ' ups' 4 +22673 ' upt' 4 +22674 ' urb' 4 +22675 ' ure' 4 +22676 ' urg' 4 +22677 ' uri' 4 +22678 ' url' 4 +22679 ' urn' 4 +22680 ' usb' 4 +22681 ' use' 4 +22682 ' uso' 4 +22683 ' usu' 4 +22684 ' utf' 4 +22685 ' vac' 4 +22686 ' vad' 4 +22687 ' vag' 4 +22688 ' vai' 4 +22689 ' val' 4 +22690 ' van' 4 +22691 ' vap' 4 +22692 ' var' 4 +22693 ' vas' 4 +22694 ' vec' 4 +22695 ' ved' 4 +22696 ' veg' 4 +22697 ' veh' 4 +22698 ' vel' 4 +22699 ' ven' 4 +22700 ' ver' 4 +22701 ' ves' 4 +22702 ' vet' 4 +22703 ' vex' 4 +22704 ' vez' 4 +22705 ' via' 4 +22706 ' vib' 4 +22707 ' vic' 4 +22708 ' vid' 4 +22709 ' vie' 4 +22710 ' vig' 4 +22711 ' vil' 4 +22712 ' vim' 4 +22713 ' vin' 4 +22714 ' vip' 4 +22715 ' vir' 4 +22716 ' vis' 4 +22717 ' vit' 4 +22718 ' viv' 4 +22719 ' viz' 4 +22720 ' voc' 4 +22721 ' vol' 4 +22722 ' vom' 4 +22723 ' von' 4 +22724 ' vor' 4 +22725 ' vos' 4 +22726 ' vot' 4 +22727 ' vou' 4 +22728 ' vow' 4 +22729 ' vox' 4 +22730 ' voy' 4 +22731 ' voz' 4 +22732 ' vra' 4 +22733 ' vue' 4 +22734 ' vul' 4 +22735 ' và' 4 +22736 ' vá' 4 +22737 ' vä' 4 +22738 ' vå' 4 +22739 ' væ' 4 +22740 ' vé' 4 +22741 ' ví' 4 +22742 ' võ' 4 +22743 ' vý' 4 +22744 ' vě' 4 +22745 ' vš' 4 +22746 ' wal' 4 +22747 ' war' 4 +22748 ' was' 4 +22749 ' wat' 4 +22750 ' wav' 4 +22751 ' wax' 4 +22752 ' way' 4 +22753 ' web' 4 +22754 ' wed' 4 +22755 ' wee' 4 +22756 ' weg' 4 +22757 ' wel' 4 +22758 ' wen' 4 +22759 ' wer' 4 +22760 ' wet' 4 +22761 ' whe' 4 +22762 ' who' 4 +22763 ' why' 4 +22764 ' wid' 4 +22765 ' wie' 4 +22766 ' wig' 4 +22767 ' wij' 4 +22768 ' wik' 4 +22769 ' wil' 4 +22770 ' win' 4 +22771 ' wir' 4 +22772 ' wis' 4 +22773 ' wit' 4 +22774 ' wob' 4 +22775 ' wol' 4 +22776 ' wom' 4 +22777 ' won' 4 +22778 ' woo' 4 +22779 ' wor' 4 +22780 ' wow' 4 +22781 ' wra' 4 +22782 ' wre' 4 +22783 ' wsp' 4 +22784 ' wur' 4 +22785 ' www' 4 +22786 ' wä' 4 +22787 ' wł' 4 +22788 ' xen' 4 +22789 ' xml' 4 +22790 ' xxx' 4 +22791 ' xyz' 4 +22792 ' yap' 4 +22793 ' yaw' 4 +22794 ' yen' 4 +22795 ' yer' 4 +22796 ' yes' 4 +22797 ' yet' 4 +22798 ' yog' 4 +22799 ' you' 4 +22800 ' zab' 4 +22801 ' zak' 4 +22802 ' zal' 4 +22803 ' zam' 4 +22804 ' zap' 4 +22805 ' zar' 4 +22806 ' zaw' 4 +22807 ' zen' 4 +22808 ' zer' 4 +22809 ' zig' 4 +22810 ' zij' 4 +22811 ' zip' 4 +22812 ' zon' 4 +22813 ' zoo' 4 +22814 ' zug' 4 +22815 ' zum' 4 +22816 ' zur' 4 +22817 ' zwe' 4 +22818 ' zá' 4 +22819 ' zł' 4 +22820 ' {},' 4 +22821 ' {};' 4 +22822 ' {¶' 4 +22823 ' }).' 4 +22824 ' });' 4 +22825 ' »,' 4 +22826 ' ».' 4 +22827 ' Ál' 4 +22828 ' Éd' 4 +22829 ' És' 4 +22830 ' Ét' 4 +22831 ' În' 4 +22832 ' às' 4 +22833 ' ál' 4 +22834 ' ár' 4 +22835 ' át' 4 +22836 ' äl' 4 +22837 ' än' 4 +22838 ' är' 4 +22839 ' år' 4 +22840 ' ça' 4 +22841 ' éc' 4 +22842 ' éd' 4 +22843 ' ég' 4 +22844 ' él' 4 +22845 ' én' 4 +22846 ' ép' 4 +22847 ' ér' 4 +22848 ' és' 4 +22849 ' ét' 4 +22850 ' év' 4 +22851 ' éx' 4 +22852 ' în' 4 +22853 ' ór' 4 +22854 ' ön' 4 +22855 ' új' 4 +22856 ' ún' 4 +22857 ' će' 4 +22858 ' či' 4 +22859 ' đi' 4 +22860 ' św' 4 +22861 ' şi' 4 +22862 ' że' 4 +22863 ' ży' 4 +22864 ' že' 4 +22865 ' și' 4 +22866 ' μL' 4 +22867 ' μM' 4 +22868 ' μg' 4 +22869 ' μl' 4 +22870 ' μm' 4 +22871 ' μs' 4 +22872 ' अ' 4 +22873 ' आ' 4 +22874 ' क' 4 +22875 ' ज' 4 +22876 ' त' 4 +22877 ' द' 4 +22878 ' न' 4 +22879 ' प' 4 +22880 ' ब' 4 +22881 ' म' 4 +22882 ' र' 4 +22883 ' ल' 4 +22884 ' व' 4 +22885 ' स' 4 +22886 ' ह' 4 +22887 ' ক' 4 +22888 ' ਦ' 4 +22889 ' ਸ' 4 +22890 ' ப' 4 +22891 ' เ' 4 +22892 ' ở' 4 +22893 ' ἀ' 4 +22894 ' ἐ' 4 +22895 ' \u200b' 4 +22896 ' \u200e' 4 +22897 ' –' 4 +22898 ' —' 4 +22899 ' ―' 4 +22900 ' ‖' 4 +22901 ' ‘' 4 +22902 ' ’' 4 +22903 ' “' 4 +22904 ' ”' 4 +22905 ' „' 4 +22906 ' †' 4 +22907 ' •' 4 +22908 ' …' 4 +22909 ' ′' 4 +22910 ' ›' 4 +22911 ' €' 4 +22912 ' ₹' 4 +22913 ' №' 4 +22914 ' ←' 4 +22915 ' ↑' 4 +22916 ' →' 4 +22917 ' ↓' 4 +22918 ' ↔' 4 +22919 ' ⇒' 4 +22920 ' ⇔' 4 +22921 ' ∀' 4 +22922 ' ∂' 4 +22923 ' ∃' 4 +22924 ' ∅' 4 +22925 ' ∆' 4 +22926 ' ∇' 4 +22927 ' ∈' 4 +22928 ' ∑' 4 +22929 ' −' 4 +22930 ' ∗' 4 +22931 ' ∘' 4 +22932 ' √' 4 +22933 ' ∞' 4 +22934 ' ∧' 4 +22935 ' ∨' 4 +22936 ' ∩' 4 +22937 ' ∪' 4 +22938 ' ∫' 4 +22939 ' ∼' 4 +22940 ' ≃' 4 +22941 ' ≈' 4 +22942 ' ≠' 4 +22943 ' ≡' 4 +22944 ' ≤' 4 +22945 ' ≥' 4 +22946 ' ⊂' 4 +22947 ' ⊆' 4 +22948 ' ⊕' 4 +22949 ' ⊗' 4 +22950 ' ⊥' 4 +22951 ' ⋅' 4 +22952 ' ⋯' 4 +22953 ' │' 4 +22954 ' ├' 4 +22955 ' ╚' 4 +22956 ' █' 4 +22957 ' ░' 4 +22958 ' ■' 4 +22959 ' ►' 4 +22960 ' ●' 4 +22961 ' ★' 4 +22962 ' ♥' 4 +22963 ' ♦' 4 +22964 ' ♪' 4 +22965 ' ✓' 4 +22966 ' ✔' 4 +22967 ' ❤' 4 +22968 ' ⟨' 4 +22969 ' ⟩' 4 +22970 ' 。' 4 +22971 ' 〈' 4 +22972 ' 「' 4 +22973 ' 【' 4 +22974 ' 가' 4 +22975 ' 각' 4 +22976 ' 간' 4 +22977 ' 감' 4 +22978 ' 강' 4 +22979 ' 같' 4 +22980 ' 개' 4 +22981 ' 거' 4 +22982 ' 건' 4 +22983 ' 걸' 4 +22984 ' 검' 4 +22985 ' 것' 4 +22986 ' 게' 4 +22987 ' 결' 4 +22988 ' 경' 4 +22989 ' 계' 4 +22990 ' 고' 4 +22991 ' 공' 4 +22992 ' 과' 4 +22993 ' 관' 4 +22994 ' 광' 4 +22995 ' 교' 4 +22996 ' 구' 4 +22997 ' 국' 4 +22998 ' 군' 4 +22999 ' 권' 4 +23000 ' 규' 4 +23001 ' 그' 4 +23002 ' 근' 4 +23003 ' 금' 4 +23004 ' 기' 4 +23005 ' 김' 4 +23006 ' 나' 4 +23007 ' 날' 4 +23008 ' 남' 4 +23009 ' 내' 4 +23010 ' 네' 4 +23011 ' 노' 4 +23012 ' 높' 4 +23013 ' 누' 4 +23014 ' 눈' 4 +23015 ' 다' 4 +23016 ' 단' 4 +23017 ' 달' 4 +23018 ' 당' 4 +23019 ' 대' 4 +23020 ' 더' 4 +23021 ' 덤' 4 +23022 ' 데' 4 +23023 ' 도' 4 +23024 ' 독' 4 +23025 ' 돌' 4 +23026 ' 동' 4 +23027 ' 되' 4 +23028 ' 된' 4 +23029 ' 두' 4 +23030 ' 뒤' 4 +23031 ' 드' 4 +23032 ' 들' 4 +23033 ' 등' 4 +23034 ' 디' 4 +23035 ' 따' 4 +23036 ' 때' 4 +23037 ' 또' 4 +23038 ' 라' 4 +23039 ' 레' 4 +23040 ' 로' 4 +23041 ' 루' 4 +23042 ' 리' 4 +23043 ' 링' 4 +23044 ' 마' 4 +23045 ' 만' 4 +23046 ' 많' 4 +23047 ' 말' 4 +23048 ' 맞' 4 +23049 ' 매' 4 +23050 ' 메' 4 +23051 ' 명' 4 +23052 ' 모' 4 +23053 ' 목' 4 +23054 ' 못' 4 +23055 ' 무' 4 +23056 ' 문' 4 +23057 ' 물' 4 +23058 ' 미' 4 +23059 ' 민' 4 +23060 ' 및' 4 +23061 ' 바' 4 +23062 ' 박' 4 +23063 ' 반' 4 +23064 ' 받' 4 +23065 ' 발' 4 +23066 ' 밝' 4 +23067 ' 방' 4 +23068 ' 배' 4 +23069 ' 백' 4 +23070 ' 버' 4 +23071 ' 번' 4 +23072 ' 법' 4 +23073 ' 베' 4 +23074 ' 변' 4 +23075 ' 병' 4 +23076 ' 보' 4 +23077 ' 복' 4 +23078 ' 본' 4 +23079 ' 부' 4 +23080 ' 북' 4 +23081 ' 분' 4 +23082 ' 불' 4 +23083 ' 브' 4 +23084 ' 비' 4 +23085 ' 사' 4 +23086 ' 산' 4 +23087 ' 살' 4 +23088 ' 삼' 4 +23089 ' 상' 4 +23090 ' 새' 4 +23091 ' 생' 4 +23092 ' 서' 4 +23093 ' 선' 4 +23094 ' 설' 4 +23095 ' 성' 4 +23096 ' 세' 4 +23097 ' 소' 4 +23098 ' 속' 4 +23099 ' 손' 4 +23100 ' 수' 4 +23101 ' 순' 4 +23102 ' 스' 4 +23103 ' 승' 4 +23104 ' 시' 4 +23105 ' 신' 4 +23106 ' 실' 4 +23107 ' 심' 4 +23108 ' 아' 4 +23109 ' 안' 4 +23110 ' 않' 4 +23111 ' 알' 4 +23112 ' 앞' 4 +23113 ' 애' 4 +23114 ' 야' 4 +23115 ' 약' 4 +23116 ' 양' 4 +23117 ' 어' 4 +23118 ' 언' 4 +23119 ' 얼' 4 +23120 ' 업' 4 +23121 ' 없' 4 +23122 ' 에' 4 +23123 ' 여' 4 +23124 ' 역' 4 +23125 ' 연' 4 +23126 ' 열' 4 +23127 ' 영' 4 +23128 ' 예' 4 +23129 ' 오' 4 +23130 ' 온' 4 +23131 ' 올' 4 +23132 ' 완' 4 +23133 ' 왕' 4 +23134 ' 외' 4 +23135 ' 요' 4 +23136 ' 용' 4 +23137 ' 우' 4 +23138 ' 운' 4 +23139 ' 원' 4 +23140 ' 월' 4 +23141 ' 위' 4 +23142 ' 유' 4 +23143 ' 음' 4 +23144 ' 의' 4 +23145 ' 이' 4 +23146 ' 인' 4 +23147 ' 일' 4 +23148 ' 임' 4 +23149 ' 입' 4 +23150 ' 있' 4 +23151 ' 자' 4 +23152 ' 작' 4 +23153 ' 잘' 4 +23154 ' 장' 4 +23155 ' 재' 4 +23156 ' 저' 4 +23157 ' 적' 4 +23158 ' 전' 4 +23159 ' 점' 4 +23160 ' 정' 4 +23161 ' 제' 4 +23162 ' 조' 4 +23163 ' 존' 4 +23164 ' 종' 4 +23165 ' 좋' 4 +23166 ' 주' 4 +23167 ' 죽' 4 +23168 ' 준' 4 +23169 ' 중' 4 +23170 ' 증' 4 +23171 ' 지' 4 +23172 ' 직' 4 +23173 ' 진' 4 +23174 ' 집' 4 +23175 ' 차' 4 +23176 ' 참' 4 +23177 ' 창' 4 +23178 ' 찾' 4 +23179 ' 채' 4 +23180 ' 책' 4 +23181 ' 처' 4 +23182 ' 천' 4 +23183 ' 철' 4 +23184 ' 첫' 4 +23185 ' 청' 4 +23186 ' 체' 4 +23187 ' 초' 4 +23188 ' 총' 4 +23189 ' 최' 4 +23190 ' 추' 4 +23191 ' 축' 4 +23192 ' 출' 4 +23193 ' 충' 4 +23194 ' 취' 4 +23195 ' 치' 4 +23196 ' 친' 4 +23197 ' 카' 4 +23198 ' 코' 4 +23199 ' 크' 4 +23200 ' 클' 4 +23201 ' 타' 4 +23202 ' 태' 4 +23203 ' 테' 4 +23204 ' 토' 4 +23205 ' 통' 4 +23206 ' 투' 4 +23207 ' 트' 4 +23208 ' 특' 4 +23209 ' 팀' 4 +23210 ' 파' 4 +23211 ' 판' 4 +23212 ' 패' 4 +23213 ' 페' 4 +23214 ' 편' 4 +23215 ' 평' 4 +23216 ' 포' 4 +23217 ' 표' 4 +23218 ' 프' 4 +23219 ' 플' 4 +23220 ' 피' 4 +23221 ' 필' 4 +23222 ' 하' 4 +23223 ' 학' 4 +23224 ' 한' 4 +23225 ' 할' 4 +23226 ' 함' 4 +23227 ' 합' 4 +23228 ' 항' 4 +23229 ' 해' 4 +23230 ' 했' 4 +23231 ' 행' 4 +23232 ' 현' 4 +23233 ' 형' 4 +23234 ' 호' 4 +23235 ' 화' 4 +23236 ' 확' 4 +23237 ' 환' 4 +23238 ' 활' 4 +23239 ' 황' 4 +23240 ' 회' 4 +23241 ' 후' 4 +23242 ' 히' 4 +23243 ' \ufeff' 4 +23244 ' (' 4 +23245 ' ,' 4 +23246 ' :' 4 +23247 ' �' 4 +23248 '!!!!' 4 +23249 '!");' 4 +23250 '!’' 4 +23251 '!”' 4 +23252 '""""' 4 +23253 '")))' 4 +23254 '")),' 4 +23255 '"));' 4 +23256 '"...' 4 +23257 '"/><' 4 +23258 '":["' 4 +23259 '":{"' 4 +23260 '">' 4 +23264 '"]["' 4 +23265 '"—' 4 +23266 '####' 4 +23267 '$$$$' 4 +23268 '$’' 4 +23269 '%%%%' 4 +23270 "')))" 4 +23271 "'))," 4 +23272 "'))." 4 +23273 "'));" 4 +23274 "')->" 4 +23275 "']))" 4 +23276 "'])," 4 +23277 "'])." 4 +23278 "']):" 4 +23279 "']);" 4 +23280 "']==" 4 +23281 "']['" 4 +23282 "']]," 4 +23283 '("--' 4 +23284 '("./' 4 +23285 "(''," 4 +23286 "('--" 4 +23287 "('./" 4 +23288 '()))' 4 +23289 '()),' 4 +23290 '()).' 4 +23291 '()):' 4 +23292 '());' 4 +23293 '()->' 4 +23294 '()' 4 +23340 '="${' 4 +23341 '="@+' 4 +23342 "='')" 4 +23343 '=-=-' 4 +23344 '====' 4 +23345 '=”' 4 +23346 '>();' 4 +23347 '>>>>' 4 +23348 '?,?,' 4 +23349 '????' 4 +23350 '?’' 4 +23351 '?”' 4 +23352 '@@@@' 4 +23353 'AAAA' 4 +23354 'ABEL' 4 +23355 'ABLE' 4 +23356 'ACES' 4 +23357 'ACHE' 4 +23358 'ADDR' 4 +23359 'ADER' 4 +23360 'AGES' 4 +23361 'AIDS' 4 +23362 'ALLY' 4 +23363 'ALOG' 4 +23364 'ALSE' 4 +23365 'ALTH' 4 +23366 'AMES' 4 +23367 'ANCE' 4 +23368 'ANGE' 4 +23369 'ANGO' 4 +23370 'ANTS' 4 +23371 'ARCH' 4 +23372 'ARGS' 4 +23373 'ATAL' 4 +23374 'ATCH' 4 +23375 'ATED' 4 +23376 'ATEG' 4 +23377 'ATER' 4 +23378 'ATES' 4 +23379 'ATIC' 4 +23380 'ATOM' 4 +23381 'ATOR' 4 +23382 'ATTR' 4 +23383 'AUTH' 4 +23384 'AUTO' 4 +23385 'Adam' 4 +23386 'Addr' 4 +23387 'Alan' 4 +23388 'Alex' 4 +23389 'Also' 4 +23390 'Anal' 4 +23391 'Andy' 4 +23392 'Anim' 4 +23393 'Anna' 4 +23394 'Anne' 4 +23395 'Anth' 4 +23396 'Anti' 4 +23397 'Appe' 4 +23398 'Apps' 4 +23399 'Arab' 4 +23400 'Arch' 4 +23401 'Area' 4 +23402 'Args' 4 +23403 'Asia' 4 +23404 'Atom' 4 +23405 'Attr' 4 +23406 'Auth' 4 +23407 'Auto' 4 +23408 'Axes' 4 +23409 'Axis' 4 +23410 'BACK' 4 +23411 'BASE' 4 +23412 'BERT' 4 +23413 'BITS' 4 +23414 'BLUE' 4 +23415 'BOOK' 4 +23416 'BOOL' 4 +23417 'BUFF' 4 +23418 'BYTE' 4 +23419 'Baby' 4 +23420 'Back' 4 +23421 'Ball' 4 +23422 'Band' 4 +23423 'Bang' 4 +23424 'Bank' 4 +23425 'Base' 4 +23426 'Beam' 4 +23427 'Bean' 4 +23428 'Beat' 4 +23429 'Bell' 4 +23430 'Bern' 4 +23431 'Bert' 4 +23432 'Best' 4 +23433 'Beta' 4 +23434 'Bias' 4 +23435 'Bill' 4 +23436 'Bind' 4 +23437 'Bits' 4 +23438 'Blob' 4 +23439 'Blog' 4 +23440 'Blue' 4 +23441 'Blur' 4 +23442 'Body' 4 +23443 'Bold' 4 +23444 'Book' 4 +23445 'Bool' 4 +23446 'Boot' 4 +23447 'Born' 4 +23448 'Boss' 4 +23449 'Both' 4 +23450 'Brad' 4 +23451 'Brit' 4 +23452 'Bron' 4 +23453 'Buff' 4 +23454 'Burn' 4 +23455 'ById' 4 +23456 'Byte' 4 +23457 'CADE' 4 +23458 'CALL' 4 +23459 'CASE' 4 +23460 'CAST' 4 +23461 'CCCC' 4 +23462 'CENT' 4 +23463 'CEPT' 4 +23464 'CHAR' 4 +23465 'CLUD' 4 +23466 'CLUS' 4 +23467 'CODE' 4 +23468 'COMM' 4 +23469 'COMP' 4 +23470 'COND' 4 +23471 'CONF' 4 +23472 'CONN' 4 +23473 'CONT' 4 +23474 'COPY' 4 +23475 'CORE' 4 +23476 'COUN' 4 +23477 'CTOR' 4 +23478 'CTRL' 4 +23479 'CUDA' 4 +23480 'Calc' 4 +23481 'Call' 4 +23482 'Camb' 4 +23483 'Camp' 4 +23484 'Cand' 4 +23485 'Capt' 4 +23486 'Card' 4 +23487 'Care' 4 +23488 'Carl' 4 +23489 'Cart' 4 +23490 'Case' 4 +23491 'Cash' 4 +23492 'Cast' 4 +23493 'Cath' 4 +23494 'Cell' 4 +23495 'Cent' 4 +23496 'Cert' 4 +23497 'Chan' 4 +23498 'Chap' 4 +23499 'Char' 4 +23500 'Chat' 4 +23501 'Chem' 4 +23502 'Chen' 4 +23503 'Chip' 4 +23504 'Circ' 4 +23505 'City' 4 +23506 'Clar' 4 +23507 'Clip' 4 +23508 'Club' 4 +23509 'Code' 4 +23510 'Coin' 4 +23511 'Cold' 4 +23512 'Cole' 4 +23513 'Coll' 4 +23514 'Cols' 4 +23515 'Comb' 4 +23516 'Come' 4 +23517 'Comm' 4 +23518 'Comp' 4 +23519 'Cond' 4 +23520 'Conf' 4 +23521 'Cong' 4 +23522 'Conn' 4 +23523 'Cons' 4 +23524 'Cont' 4 +23525 'Conv' 4 +23526 'Cook' 4 +23527 'Cool' 4 +23528 'Copy' 4 +23529 'Core' 4 +23530 'Corn' 4 +23531 'Corp' 4 +23532 'Cost' 4 +23533 'Cour' 4 +23534 'Cred' 4 +23535 'Crit' 4 +23536 'Crop' 4 +23537 'Ctrl' 4 +23538 'Cube' 4 +23539 'Curr' 4 +23540 'DATA' 4 +23541 'DATE' 4 +23542 'DECL' 4 +23543 'DESC' 4 +23544 'DIFF' 4 +23545 'DIST' 4 +23546 'DONE' 4 +23547 'DOWN' 4 +23548 'DRAW' 4 +23549 'DROP' 4 +23550 'Damn' 4 +23551 'Dark' 4 +23552 'Dash' 4 +23553 'Data' 4 +23554 'Date' 4 +23555 'Dave' 4 +23556 'Days' 4 +23557 'Dead' 4 +23558 'Dear' 4 +23559 'Decl' 4 +23560 'Deep' 4 +23561 'Dele' 4 +23562 'Demo' 4 +23563 'Desc' 4 +23564 'Dest' 4 +23565 'Diam' 4 +23566 'Dick' 4 +23567 'Dict' 4 +23568 'Diff' 4 +23569 'Dire' 4 +23570 'Disc' 4 +23571 'Disk' 4 +23572 'Disp' 4 +23573 'Dist' 4 +23574 'Dock' 4 +23575 'Docs' 4 +23576 'Does' 4 +23577 'Done' 4 +23578 'Door' 4 +23579 'Doug' 4 +23580 'Down' 4 +23581 'Drag' 4 +23582 'Draw' 4 +23583 'Drop' 4 +23584 'Drug' 4 +23585 'Dump' 4 +23586 'EDIT' 4 +23587 'EEEE' 4 +23588 'EGIN' 4 +23589 'EMPL' 4 +23590 'ENCE' 4 +23591 'ENCY' 4 +23592 'ENER' 4 +23593 'ENSE' 4 +23594 'ENTS' 4 +23595 'ERIC' 4 +23596 'ESCO' 4 +23597 'EXEC' 4 +23598 'EXIT' 4 +23599 'Each' 4 +23600 'East' 4 +23601 'Easy' 4 +23602 'Echo' 4 +23603 'Edge' 4 +23604 'Edit' 4 +23605 'Educ' 4 +23606 'Elem' 4 +23607 'Else' 4 +23608 'Emer' 4 +23609 'Emit' 4 +23610 'Enum' 4 +23611 'Eric' 4 +23612 'Euro' 4 +23613 'Eval' 4 +23614 'Even' 4 +23615 'Ever' 4 +23616 'Exec' 4 +23617 'Exit' 4 +23618 'Expl' 4 +23619 'Expr' 4 +23620 'FACE' 4 +23621 'FAIL' 4 +23622 'FAST' 4 +23623 'FFER' 4 +23624 'FFFF' 4 +23625 'FILE' 4 +23626 'FLAG' 4 +23627 'FLOW' 4 +23628 'FONT' 4 +23629 'FORE' 4 +23630 'FORM' 4 +23631 'FREE' 4 +23632 'FROM' 4 +23633 'FULL' 4 +23634 'FUNC' 4 +23635 'Face' 4 +23636 'Fact' 4 +23637 'Fail' 4 +23638 'Fair' 4 +23639 'Fake' 4 +23640 'Fall' 4 +23641 'Farm' 4 +23642 'Fast' 4 +23643 'Feed' 4 +23644 'Feel' 4 +23645 'File' 4 +23646 'Fill' 4 +23647 'Film' 4 +23648 'Find' 4 +23649 'Fine' 4 +23650 'Fire' 4 +23651 'Fish' 4 +23652 'Five' 4 +23653 'Flag' 4 +23654 'Flat' 4 +23655 'Flex' 4 +23656 'Flip' 4 +23657 'Flor' 4 +23658 'Flow' 4 +23659 'Fold' 4 +23660 'Font' 4 +23661 'Food' 4 +23662 'Foot' 4 +23663 'Ford' 4 +23664 'Fore' 4 +23665 'Form' 4 +23666 'Fort' 4 +23667 'Four' 4 +23668 'Frag' 4 +23669 'Fran' 4 +23670 'Fred' 4 +23671 'Free' 4 +23672 'From' 4 +23673 'Fuck' 4 +23674 'Full' 4 +23675 'Func' 4 +23676 'Fund' 4 +23677 'Für' 4 +23678 'GPIO' 4 +23679 'GRAM' 4 +23680 'GUID' 4 +23681 'Gain' 4 +23682 'Game' 4 +23683 'Gary' 4 +23684 'Gate' 4 +23685 'Gene' 4 +23686 'Geom' 4 +23687 'Germ' 4 +23688 'Gest' 4 +23689 'Girl' 4 +23690 'Give' 4 +23691 'Glob' 4 +23692 'Goal' 4 +23693 'Gold' 4 +23694 'Good' 4 +23695 'Grab' 4 +23696 'Grad' 4 +23697 'Gram' 4 +23698 'Gran' 4 +23699 'Gray' 4 +23700 'Greg' 4 +23701 'Grid' 4 +23702 'Grow' 4 +23703 'Guid' 4 +23704 'HAND' 4 +23705 'HASH' 4 +23706 'HEAD' 4 +23707 'HERE' 4 +23708 'HIGH' 4 +23709 'HOME' 4 +23710 'HOST' 4 +23711 'HOUT' 4 +23712 'HTML' 4 +23713 'HTTP' 4 +23714 'Half' 4 +23715 'Hall' 4 +23716 'Hand' 4 +23717 'Hang' 4 +23718 'Hard' 4 +23719 'Hart' 4 +23720 'Hash' 4 +23721 'Have' 4 +23722 'Head' 4 +23723 'Heap' 4 +23724 'Heat' 4 +23725 'Hell' 4 +23726 'Help' 4 +23727 'Here' 4 +23728 'Hero' 4 +23729 'Hide' 4 +23730 'High' 4 +23731 'Hill' 4 +23732 'Hint' 4 +23733 'Hist' 4 +23734 'Hold' 4 +23735 'Holy' 4 +23736 'Home' 4 +23737 'Hong' 4 +23738 'Hook' 4 +23739 'Hope' 4 +23740 'Host' 4 +23741 'Hour' 4 +23742 'Html' 4 +23743 'Http' 4 +23744 'Hung' 4 +23745 'IBLE' 4 +23746 'IBUT' 4 +23747 'ICAL' 4 +23748 'ICAg' 4 +23749 'ICES' 4 +23750 'ICLE' 4 +23751 'ICON' 4 +23752 'IDER' 4 +23753 'IDTH' 4 +23754 'IEEE' 4 +23755 'IENT' 4 +23756 'IFIC' 4 +23757 'IGHT' 4 +23758 'ILED' 4 +23759 'ILLE' 4 +23760 'IMAL' 4 +23761 'IMIT' 4 +23762 'INCT' 4 +23763 'INES' 4 +23764 'INFO' 4 +23765 'INGS' 4 +23766 'INIT' 4 +23767 'INST' 4 +23768 'IONS' 4 +23769 'IOUS' 4 +23770 'IRED' 4 +23771 'IRST' 4 +23772 'ISBN' 4 +23773 'ISON' 4 +23774 'ISTR' 4 +23775 'ISTS' 4 +23776 'ITAL' 4 +23777 'ITCH' 4 +23778 'ITED' 4 +23779 'ITEM' 4 +23780 'ITER' 4 +23781 'ITES' 4 +23782 'ITLE' 4 +23783 'ITOR' 4 +23784 'IVER' 4 +23785 'IZED' 4 +23786 'IZER' 4 +23787 'Icon' 4 +23788 'Idle' 4 +23789 'Impl' 4 +23790 'Infl' 4 +23791 'Info' 4 +23792 'Init' 4 +23793 'Insp' 4 +23794 'Inst' 4 +23795 'Into' 4 +23796 'Iran' 4 +23797 'Iron' 4 +23798 'Ital' 4 +23799 'Item' 4 +23800 'Iter' 4 +23801 'IÓN' 4 +23802 'JECT' 4 +23803 'JOIN' 4 +23804 'JSON' 4 +23805 'JUST' 4 +23806 'Jack' 4 +23807 'Jane' 4 +23808 'Java' 4 +23809 'Jean' 4 +23810 'Jeff' 4 +23811 'Jess' 4 +23812 'Jobs' 4 +23813 'John' 4 +23814 'Join' 4 +23815 'Jose' 4 +23816 'Josh' 4 +23817 'Json' 4 +23818 'July' 4 +23819 'Jump' 4 +23820 'June' 4 +23821 'Just' 4 +23822 'KEEP' 4 +23823 'Kate' 4 +23824 'Keep' 4 +23825 'Kenn' 4 +23826 'Keys' 4 +23827 'Kill' 4 +23828 'Kind' 4 +23829 'King' 4 +23830 'Know' 4 +23831 'LAND' 4 +23832 'LANG' 4 +23833 'LAST' 4 +23834 'LDAP' 4 +23835 'LEAN' 4 +23836 'LEAR' 4 +23837 'LECT' 4 +23838 'LEFT' 4 +23839 'LETE' 4 +23840 'LINE' 4 +23841 'LINK' 4 +23842 'LIST' 4 +23843 'LOAD' 4 +23844 'LOAT' 4 +23845 'LOCK' 4 +23846 'LONG' 4 +23847 'LOOP' 4 +23848 'LSTM' 4 +23849 'Lady' 4 +23850 'Lake' 4 +23851 'Land' 4 +23852 'Lang' 4 +23853 'Last' 4 +23854 'Late' 4 +23855 'Lazy' 4 +23856 'Lead' 4 +23857 'Leaf' 4 +23858 'Lean' 4 +23859 'Lear' 4 +23860 'Left' 4 +23861 'Leon' 4 +23862 'Less' 4 +23863 'Life' 4 +23864 'Like' 4 +23865 'Line' 4 +23866 'Link' 4 +23867 'List' 4 +23868 'Lite' 4 +23869 'Live' 4 +23870 'Load' 4 +23871 'Lock' 4 +23872 'Logo' 4 +23873 'Long' 4 +23874 'Look' 4 +23875 'Loop' 4 +23876 'Lord' 4 +23877 'Loss' 4 +23878 'Lost' 4 +23879 'Love' 4 +23880 'Luke' 4 +23881 'MAIL' 4 +23882 'MAIN' 4 +23883 'MAKE' 4 +23884 'MARK' 4 +23885 'MASK' 4 +23886 'MBOL' 4 +23887 'MENT' 4 +23888 'MENU' 4 +23889 'MESS' 4 +23890 'META' 4 +23891 'MISS' 4 +23892 'MMMM' 4 +23893 'MODE' 4 +23894 'MORE' 4 +23895 'MULT' 4 +23896 'Mach' 4 +23897 'Made' 4 +23898 'Magn' 4 +23899 'Mail' 4 +23900 'Main' 4 +23901 'Make' 4 +23902 'Male' 4 +23903 'Many' 4 +23904 'Maps' 4 +23905 'Marc' 4 +23906 'Marg' 4 +23907 'Mark' 4 +23908 'Mart' 4 +23909 'Mary' 4 +23910 'Mask' 4 +23911 'Mass' 4 +23912 'Math' 4 +23913 'Matt' 4 +23914 'Mean' 4 +23915 'Meet' 4 +23916 'Memo' 4 +23917 'Menu' 4 +23918 'Merc' 4 +23919 'Mesh' 4 +23920 'Mess' 4 +23921 'Meta' 4 +23922 'Mich' 4 +23923 'Mike' 4 +23924 'Mill' 4 +23925 'Mind' 4 +23926 'Mini' 4 +23927 'Misc' 4 +23928 'Miss' 4 +23929 'Mock' 4 +23930 'Mode' 4 +23931 'Mont' 4 +23932 'Moon' 4 +23933 'More' 4 +23934 'Most' 4 +23935 'Move' 4 +23936 'Much' 4 +23937 'Mult' 4 +23938 'Must' 4 +23939 'NAME' 4 +23940 'NASA' 4 +23941 'NECT' 4 +23942 'NESS' 4 +23943 'NEWS' 4 +23944 'NEXT' 4 +23945 'NING' 4 +23946 'NODE' 4 +23947 'NONE' 4 +23948 'NOTE' 4 +23949 'NULL' 4 +23950 'Name' 4 +23951 'Near' 4 +23952 'Need' 4 +23953 'Neil' 4 +23954 'News' 4 +23955 'Next' 4 +23956 'Nice' 4 +23957 'Nick' 4 +23958 'Node' 4 +23959 'Nome' 4 +23960 'None' 4 +23961 'Norm' 4 +23962 'Note' 4 +23963 'Nova' 4 +23964 'Null' 4 +23965 'Não' 4 +23966 'ONES' 4 +23967 'ONLY' 4 +23968 'OPEN' 4 +23969 'OPER' 4 +23970 'ORIZ' 4 +23971 'OTAL' 4 +23972 'OUND' 4 +23973 'OVER' 4 +23974 'OWER' 4 +23975 'Ohio' 4 +23976 'Okay' 4 +23977 'Once' 4 +23978 'Only' 4 +23979 'Oops' 4 +23980 'Open' 4 +23981 'Oper' 4 +23982 'Opts' 4 +23983 'Orig' 4 +23984 'Over' 4 +23985 'PACK' 4 +23986 'PAGE' 4 +23987 'PART' 4 +23988 'PASS' 4 +23989 'PATH' 4 +23990 'PECT' 4 +23991 'PING' 4 +23992 'PLAY' 4 +23993 'PORT' 4 +23994 'POSE' 4 +23995 'POST' 4 +23996 'PRES' 4 +23997 'PROC' 4 +23998 'PROP' 4 +23999 'PUBL' 4 +24000 'Pack' 4 +24001 'Page' 4 +24002 'Pain' 4 +24003 'Pair' 4 +24004 'Pane' 4 +24005 'Para' 4 +24006 'Park' 4 +24007 'Part' 4 +24008 'Pass' 4 +24009 'Past' 4 +24010 'Path' 4 +24011 'Paul' 4 +24012 'Pear' 4 +24013 'Peer' 4 +24014 'Perm' 4 +24015 'Pers' 4 +24016 'Phil' 4 +24017 'Phot' 4 +24018 'Phys' 4 +24019 'Pick' 4 +24020 'Pier' 4 +24021 'Ping' 4 +24022 'Pipe' 4 +24023 'Plan' 4 +24024 'Play' 4 +24025 'Plot' 4 +24026 'Plug' 4 +24027 'Plus' 4 +24028 'Poll' 4 +24029 'Poly' 4 +24030 'Pont' 4 +24031 'Pool' 4 +24032 'Poor' 4 +24033 'Port' 4 +24034 'Pose' 4 +24035 'Poss' 4 +24036 'Post' 4 +24037 'Pour' 4 +24038 'Prec' 4 +24039 'Pred' 4 +24040 'Pref' 4 +24041 'Prem' 4 +24042 'Prep' 4 +24043 'Pres' 4 +24044 'Prev' 4 +24045 'Prim' 4 +24046 'Priv' 4 +24047 'Prob' 4 +24048 'Proc' 4 +24049 'Prod' 4 +24050 'Prof' 4 +24051 'Prog' 4 +24052 'Proj' 4 +24053 'Prom' 4 +24054 'Prop' 4 +24055 'Pros' 4 +24056 'Prot' 4 +24057 'Prov' 4 +24058 'Pull' 4 +24059 'Pure' 4 +24060 'Push' 4 +24061 'QUAL' 4 +24062 'Quad' 4 +24063 'Qual' 4 +24064 'Quit' 4 +24065 'Qué' 4 +24066 'RATE' 4 +24067 'READ' 4 +24068 'REAL' 4 +24069 'REAM' 4 +24070 'RECT' 4 +24071 'RENT' 4 +24072 'REPL' 4 +24073 'REQU' 4 +24074 'RESH' 4 +24075 'RESS' 4 +24076 'REST' 4 +24077 'RGBA' 4 +24078 'RIPT' 4 +24079 'RNAs' 4 +24080 'ROLE' 4 +24081 'ROLL' 4 +24082 'ROOT' 4 +24083 'ROUP' 4 +24084 'ROUT' 4 +24085 'Race' 4 +24086 'Radi' 4 +24087 'Rail' 4 +24088 'Rain' 4 +24089 'Rand' 4 +24090 'Rank' 4 +24091 'Rate' 4 +24092 'ReLU' 4 +24093 'Read' 4 +24094 'Real' 4 +24095 'Rece' 4 +24096 'Rect' 4 +24097 'Repo' 4 +24098 'Resp' 4 +24099 'Rest' 4 +24100 'Rich' 4 +24101 'Rick' 4 +24102 'Ring' 4 +24103 'Risk' 4 +24104 'Road' 4 +24105 'Rock' 4 +24106 'Role' 4 +24107 'Roll' 4 +24108 'Room' 4 +24109 'Root' 4 +24110 'Rose' 4 +24111 'Ross' 4 +24112 'Rout' 4 +24113 'Rows' 4 +24114 'Ruby' 4 +24115 'Rule' 4 +24116 'Russ' 4 +24117 'Ryan' 4 +24118 'SAME' 4 +24119 'SCAN' 4 +24120 'SELF' 4 +24121 'SENT' 4 +24122 'SEQU' 4 +24123 'SHOT' 4 +24124 'SIGN' 4 +24125 'SION' 4 +24126 'SIZE' 4 +24127 'SKIP' 4 +24128 'SMTP' 4 +24129 'SPEC' 4 +24130 'STAR' 4 +24131 'STAT' 4 +24132 'STEM' 4 +24133 'STEP' 4 +24134 'STER' 4 +24135 'STIT' 4 +24136 'STOP' 4 +24137 'STRU' 4 +24138 'Safe' 4 +24139 'Sale' 4 +24140 'Salt' 4 +24141 'Same' 4 +24142 'Sand' 4 +24143 'Sans' 4 +24144 'Save' 4 +24145 'Scal' 4 +24146 'Scan' 4 +24147 'Sche' 4 +24148 'Seed' 4 +24149 'Seek' 4 +24150 'Self' 4 +24151 'Sell' 4 +24152 'Send' 4 +24153 'Sent' 4 +24154 'Sept' 4 +24155 'Sequ' 4 +24156 'Serv' 4 +24157 'Sets' 4 +24158 'Shar' 4 +24159 'Sher' 4 +24160 'Ship' 4 +24161 'Shop' 4 +24162 'Shot' 4 +24163 'Show' 4 +24164 'Side' 4 +24165 'Sign' 4 +24166 'Sing' 4 +24167 'Sink' 4 +24168 'Site' 4 +24169 'Size' 4 +24170 'Skin' 4 +24171 'Skip' 4 +24172 'Slot' 4 +24173 'Slow' 4 +24174 'Snap' 4 +24175 'Snow' 4 +24176 'Soft' 4 +24177 'Sold' 4 +24178 'Some' 4 +24179 'Song' 4 +24180 'Sony' 4 +24181 'Soon' 4 +24182 'Sort' 4 +24183 'Soup' 4 +24184 'Span' 4 +24185 'Spec' 4 +24186 'Spin' 4 +24187 'Spot' 4 +24188 'Stan' 4 +24189 'Star' 4 +24190 'Stat' 4 +24191 'Stay' 4 +24192 'Step' 4 +24193 'Stmt' 4 +24194 'Stop' 4 +24195 'Stra' 4 +24196 'Stre' 4 +24197 'Stub' 4 +24198 'Stud' 4 +24199 'Such' 4 +24200 'Suit' 4 +24201 'Supp' 4 +24202 'Sure' 4 +24203 'Swap' 4 +24204 'Sync' 4 +24205 'TAIN' 4 +24206 'TASK' 4 +24207 'TEMP' 4 +24208 'TERN' 4 +24209 'TEST' 4 +24210 'TEXT' 4 +24211 'THER' 4 +24212 'THIS' 4 +24213 'THON' 4 +24214 'TIME' 4 +24215 'TING' 4 +24216 'TION' 4 +24217 'TODO' 4 +24218 'TOOL' 4 +24219 'TRAN' 4 +24220 'TRUE' 4 +24221 'TYPE' 4 +24222 'Tabs' 4 +24223 'Tags' 4 +24224 'Tail' 4 +24225 'Take' 4 +24226 'Talk' 4 +24227 'Tang' 4 +24228 'Task' 4 +24229 'Team' 4 +24230 'Tech' 4 +24231 'Tele' 4 +24232 'Tell' 4 +24233 'Temp' 4 +24234 'Term' 4 +24235 'Test' 4 +24236 'Text' 4 +24237 'Than' 4 +24238 'That' 4 +24239 'Then' 4 +24240 'Ther' 4 +24241 'They' 4 +24242 'This' 4 +24243 'Thus' 4 +24244 'Tick' 4 +24245 'Tile' 4 +24246 'Time' 4 +24247 'Tipo' 4 +24248 'Tips' 4 +24249 'Todo' 4 +24250 'Tony' 4 +24251 'Tool' 4 +24252 'Tour' 4 +24253 'Town' 4 +24254 'Trad' 4 +24255 'Tree' 4 +24256 'Trim' 4 +24257 'Trip' 4 +24258 'True' 4 +24259 'Tube' 4 +24260 'Turn' 4 +24261 'Type' 4 +24262 'Tên' 4 +24263 'UBLE' 4 +24264 'UILD' 4 +24265 'UINT' 4 +24266 'UInt' 4 +24267 'ULAR' 4 +24268 'UNIT' 4 +24269 'URAL' 4 +24270 'URES' 4 +24271 'USED' 4 +24272 'USER' 4 +24273 'UUID' 4 +24274 'Uint' 4 +24275 'Undo' 4 +24276 'Unit' 4 +24277 'Unix' 4 +24278 'Upon' 4 +24279 'Urls' 4 +24280 'Used' 4 +24281 'User' 4 +24282 'Util' 4 +24283 'VARI' 4 +24284 'VENT' 4 +24285 'VERS' 4 +24286 'VERT' 4 +24287 'VICE' 4 +24288 'VIEW' 4 +24289 'Vari' 4 +24290 'Vars' 4 +24291 'Verb' 4 +24292 'Vers' 4 +24293 'Vert' 4 +24294 'Very' 4 +24295 'Vict' 4 +24296 'Viet' 4 +24297 'View' 4 +24298 'Vill' 4 +24299 'Viol' 4 +24300 'Void' 4 +24301 'Vote' 4 +24302 'Vous' 4 +24303 'WAIT' 4 +24304 'WARD' 4 +24305 'WARE' 4 +24306 'WARN' 4 +24307 'WAYS' 4 +24308 'WEEN' 4 +24309 'WHAT' 4 +24310 'WISE' 4 +24311 'WITH' 4 +24312 'WORD' 4 +24313 'WORK' 4 +24314 'Wait' 4 +24315 'Walk' 4 +24316 'Wall' 4 +24317 'Wang' 4 +24318 'Want' 4 +24319 'Warn' 4 +24320 'Wave' 4 +24321 'Weak' 4 +24322 'Week' 4 +24323 'Well' 4 +24324 'Were' 4 +24325 'West' 4 +24326 'What' 4 +24327 'When' 4 +24328 'Whit' 4 +24329 'Wide' 4 +24330 'Wiki' 4 +24331 'Wild' 4 +24332 'Will' 4 +24333 'Wind' 4 +24334 'Wire' 4 +24335 'With' 4 +24336 'Wolf' 4 +24337 'Wood' 4 +24338 'Word' 4 +24339 'Work' 4 +24340 'Wrap' 4 +24341 'Writ' 4 +24342 'XXXX' 4 +24343 'YEAR' 4 +24344 'YYYY' 4 +24345 'Yang' 4 +24346 'Yeah' 4 +24347 'Year' 4 +24348 'York' 4 +24349 'Your' 4 +24350 'ZERO' 4 +24351 'ZONE' 4 +24352 'Zero' 4 +24353 'Zone' 4 +24354 'Zoom' 4 +24355 '\\\\\\\\' 4 +24356 '])))' 4 +24357 '])),' 4 +24358 ']));' 4 +24359 '^^^^' 4 +24360 '^−' 4 +24361 '__("' 4 +24362 '__()' 4 +24363 '____' 4 +24364 'aaaa' 4 +24365 'abad' 4 +24366 'abal' 4 +24367 'aban' 4 +24368 'abar' 4 +24369 'abbr' 4 +24370 'abcd' 4 +24371 'abei' 4 +24372 'abel' 4 +24373 'aben' 4 +24374 'aber' 4 +24375 'abet' 4 +24376 'abil' 4 +24377 'abin' 4 +24378 'abis' 4 +24379 'abit' 4 +24380 'abla' 4 +24381 'able' 4 +24382 'ablo' 4 +24383 'ably' 4 +24384 'abol' 4 +24385 'abor' 4 +24386 'abul' 4 +24387 'abus' 4 +24388 'abwe' 4 +24389 'acao' 4 +24390 'acas' 4 +24391 'acci' 4 +24392 'acco' 4 +24393 'acea' 4 +24394 'aced' 4 +24395 'acer' 4 +24396 'aces' 4 +24397 'acet' 4 +24398 'acey' 4 +24399 'acha' 4 +24400 'ache' 4 +24401 'achi' 4 +24402 'acho' 4 +24403 'acht' 4 +24404 'achu' 4 +24405 'achy' 4 +24406 'acia' 4 +24407 'acic' 4 +24408 'acid' 4 +24409 'acin' 4 +24410 'acio' 4 +24411 'acks' 4 +24412 'acle' 4 +24413 'acon' 4 +24414 'acos' 4 +24415 'acre' 4 +24416 'acro' 4 +24417 'acts' 4 +24418 'acus' 4 +24419 'ací' 4 +24420 'adal' 4 +24421 'adam' 4 +24422 'adan' 4 +24423 'adas' 4 +24424 'aday' 4 +24425 'addr' 4 +24426 'addy' 4 +24427 'aded' 4 +24428 'adel' 4 +24429 'adem' 4 +24430 'aden' 4 +24431 'ader' 4 +24432 'ades' 4 +24433 'adia' 4 +24434 'adic' 4 +24435 'adin' 4 +24436 'adir' 4 +24437 'adoc' 4 +24438 'ador' 4 +24439 'ados' 4 +24440 'adow' 4 +24441 'adó' 4 +24442 'aeda' 4 +24443 'afen' 4 +24444 'affe' 4 +24445 'afia' 4 +24446 'afka' 4 +24447 'afé' 4 +24448 'agan' 4 +24449 'agar' 4 +24450 'agas' 4 +24451 'aged' 4 +24452 'agem' 4 +24453 'agen' 4 +24454 'ager' 4 +24455 'ages' 4 +24456 'agic' 4 +24457 'agin' 4 +24458 'agit' 4 +24459 'agle' 4 +24460 'agli' 4 +24461 'agma' 4 +24462 'agna' 4 +24463 'agne' 4 +24464 'agog' 4 +24465 'agon' 4 +24466 'agos' 4 +24467 'agra' 4 +24468 'agua' 4 +24469 'ague' 4 +24470 'agus' 4 +24471 'ahan' 4 +24472 'ahoo' 4 +24473 'aign' 4 +24474 'ails' 4 +24475 'aily' 4 +24476 'aina' 4 +24477 'aine' 4 +24478 'ains' 4 +24479 'aint' 4 +24480 'aird' 4 +24481 'aire' 4 +24482 'airo' 4 +24483 'airs' 4 +24484 'airy' 4 +24485 'aise' 4 +24486 'aisy' 4 +24487 'ajan' 4 +24488 'ajas' 4 +24489 'ajax' 4 +24490 'ajes' 4 +24491 'ajor' 4 +24492 'ają' 4 +24493 'akan' 4 +24494 'aked' 4 +24495 'aken' 4 +24496 'aker' 4 +24497 'akes' 4 +24498 'akia' 4 +24499 'akin' 4 +24500 'akis' 4 +24501 'akov' 4 +24502 'alam' 4 +24503 'alan' 4 +24504 'alar' 4 +24505 'aldi' 4 +24506 'aldo' 4 +24507 'aleb' 4 +24508 'aled' 4 +24509 'alem' 4 +24510 'alen' 4 +24511 'aler' 4 +24512 'ales' 4 +24513 'alex' 4 +24514 'aley' 4 +24515 'alez' 4 +24516 'algo' 4 +24517 'alia' 4 +24518 'alin' 4 +24519 'alis' 4 +24520 'alla' 4 +24521 'alle' 4 +24522 'alli' 4 +24523 'allo' 4 +24524 'alls' 4 +24525 'ally' 4 +24526 'alog' 4 +24527 'alom' 4 +24528 'alon' 4 +24529 'alph' 4 +24530 'alsa' 4 +24531 'alse' 4 +24532 'also' 4 +24533 'alta' 4 +24534 'alth' 4 +24535 'alty' 4 +24536 'alus' 4 +24537 'amac' 4 +24538 'aman' 4 +24539 'amar' 4 +24540 'amas' 4 +24541 'amat' 4 +24542 'amaz' 4 +24543 'amba' 4 +24544 'ambi' 4 +24545 'ambo' 4 +24546 'amed' 4 +24547 'amel' 4 +24548 'amen' 4 +24549 'amer' 4 +24550 'ames' 4 +24551 'amic' 4 +24552 'amil' 4 +24553 'amin' 4 +24554 'amis' 4 +24555 'amma' 4 +24556 'amon' 4 +24557 'amos' 4 +24558 'ampa' 4 +24559 'ampl' 4 +24560 'amps' 4 +24561 'amus' 4 +24562 'anal' 4 +24563 'anan' 4 +24564 'anas' 4 +24565 'anca' 4 +24566 'ance' 4 +24567 'anch' 4 +24568 'anco' 4 +24569 'ancy' 4 +24570 'anda' 4 +24571 'ande' 4 +24572 'andi' 4 +24573 'ando' 4 +24574 'andr' 4 +24575 'ands' 4 +24576 'andy' 4 +24577 'aned' 4 +24578 'anel' 4 +24579 'anes' 4 +24580 'aney' 4 +24581 'anga' 4 +24582 'ange' 4 +24583 'angi' 4 +24584 'ango' 4 +24585 'angs' 4 +24586 'angu' 4 +24587 'ania' 4 +24588 'anic' 4 +24589 'anie' 4 +24590 'anim' 4 +24591 'anja' 4 +24592 'anje' 4 +24593 'anka' 4 +24594 'anke' 4 +24595 'anks' 4 +24596 'anna' 4 +24597 'anne' 4 +24598 'anni' 4 +24599 'anno' 4 +24600 'anny' 4 +24601 'anol' 4 +24602 'anon' 4 +24603 'anor' 4 +24604 'anos' 4 +24605 'anse' 4 +24606 'ansi' 4 +24607 'ansk' 4 +24608 'anst' 4 +24609 'answ' 4 +24610 'anta' 4 +24611 'ante' 4 +24612 'anth' 4 +24613 'anti' 4 +24614 'anto' 4 +24615 'ants' 4 +24616 'antz' 4 +24617 'anus' 4 +24618 'anut' 4 +24619 'anya' 4 +24620 'anye' 4 +24621 'anyl' 4 +24622 'anza' 4 +24623 'anç' 4 +24624 'apan' 4 +24625 'apat' 4 +24626 'aped' 4 +24627 'aper' 4 +24628 'apes' 4 +24629 'apid' 4 +24630 'apis' 4 +24631 'apon' 4 +24632 'apor' 4 +24633 'appa' 4 +24634 'appe' 4 +24635 'appl' 4 +24636 'apps' 4 +24637 'appy' 4 +24638 'apro' 4 +24639 'apse' 4 +24640 'apur' 4 +24641 'aque' 4 +24642 'arak' 4 +24643 'aram' 4 +24644 'aran' 4 +24645 'aras' 4 +24646 'arat' 4 +24647 'arch' 4 +24648 'arda' 4 +24649 'arde' 4 +24650 'ardi' 4 +24651 'ardo' 4 +24652 'ards' 4 +24653 'area' 4 +24654 'ared' 4 +24655 'arel' 4 +24656 'arem' 4 +24657 'aren' 4 +24658 'arer' 4 +24659 'ares' 4 +24660 'aret' 4 +24661 'arez' 4 +24662 'arga' 4 +24663 'arge' 4 +24664 'argo' 4 +24665 'args' 4 +24666 'argv' 4 +24667 'aria' 4 +24668 'arie' 4 +24669 'arin' 4 +24670 'ario' 4 +24671 'aris' 4 +24672 'arks' 4 +24673 'arlo' 4 +24674 'arly' 4 +24675 'arma' 4 +24676 'arms' 4 +24677 'arna' 4 +24678 'aron' 4 +24679 'aroo' 4 +24680 'arra' 4 +24681 'arri' 4 +24682 'arro' 4 +24683 'arry' 4 +24684 'arse' 4 +24685 'arta' 4 +24686 'arte' 4 +24687 'arth' 4 +24688 'arti' 4 +24689 'arto' 4 +24690 'arts' 4 +24691 'arty' 4 +24692 'artz' 4 +24693 'arum' 4 +24694 'arus' 4 +24695 'arya' 4 +24696 'aryl' 4 +24697 'ará' 4 +24698 'aré' 4 +24699 'arı' 4 +24700 'asan' 4 +24701 'asar' 4 +24702 'asci' 4 +24703 'asco' 4 +24704 'ased' 4 +24705 'aser' 4 +24706 'ases' 4 +24707 'aset' 4 +24708 'asha' 4 +24709 'ashi' 4 +24710 'asia' 4 +24711 'asic' 4 +24712 'asin' 4 +24713 'asio' 4 +24714 'asis' 4 +24715 'aska' 4 +24716 'asks' 4 +24717 'asma' 4 +24718 'ason' 4 +24719 'aspx' 4 +24720 'assa' 4 +24721 'asse' 4 +24722 'assi' 4 +24723 'asso' 4 +24724 'assy' 4 +24725 'asta' 4 +24726 'aste' 4 +24727 'asti' 4 +24728 'asto' 4 +24729 'astr' 4 +24730 'asts' 4 +24731 'asty' 4 +24732 'asus' 4 +24733 'atal' 4 +24734 'atan' 4 +24735 'atar' 4 +24736 'atas' 4 +24737 'atch' 4 +24738 'ated' 4 +24739 'ateg' 4 +24740 'atel' 4 +24741 'atem' 4 +24742 'aten' 4 +24743 'ater' 4 +24744 'ates' 4 +24745 'atex' 4 +24746 'atha' 4 +24747 'athe' 4 +24748 'athi' 4 +24749 'aths' 4 +24750 'athy' 4 +24751 'atia' 4 +24752 'atic' 4 +24753 'atie' 4 +24754 'atif' 4 +24755 'atin' 4 +24756 'atio' 4 +24757 'atis' 4 +24758 'ativ' 4 +24759 'atol' 4 +24760 'atom' 4 +24761 'aton' 4 +24762 'ator' 4 +24763 'atos' 4 +24764 'atra' 4 +24765 'atre' 4 +24766 'atri' 4 +24767 'atro' 4 +24768 'atsu' 4 +24769 'atta' 4 +24770 'atte' 4 +24771 'atti' 4 +24772 'attn' 4 +24773 'atto' 4 +24774 'attr' 4 +24775 'atts' 4 +24776 'atum' 4 +24777 'atur' 4 +24778 'atus' 4 +24779 'ató' 4 +24780 'ată' 4 +24781 'auch' 4 +24782 'audi' 4 +24783 'auer' 4 +24784 'auff' 4 +24785 'auge' 4 +24786 'augh' 4 +24787 'ault' 4 +24788 'aupt' 4 +24789 'aura' 4 +24790 'ause' 4 +24791 'auss' 4 +24792 'auth' 4 +24793 'auto' 4 +24794 'aval' 4 +24795 'avan' 4 +24796 'avar' 4 +24797 'avas' 4 +24798 'aved' 4 +24799 'avel' 4 +24800 'aven' 4 +24801 'aver' 4 +24802 'aves' 4 +24803 'avez' 4 +24804 'avia' 4 +24805 'avid' 4 +24806 'avig' 4 +24807 'avin' 4 +24808 'avis' 4 +24809 'avor' 4 +24810 'away' 4 +24811 'awks' 4 +24812 'axes' 4 +24813 'axis' 4 +24814 'axon' 4 +24815 'ayan' 4 +24816 'ayed' 4 +24817 'ayer' 4 +24818 'azar' 4 +24819 'azed' 4 +24820 'azer' 4 +24821 'azon' 4 +24822 'azzi' 4 +24823 'azzo' 4 +24824 'ază' 4 +24825 'aña' 4 +24826 'ała' 4 +24827 'ało' 4 +24828 'ały' 4 +24829 'baby' 4 +24830 'bach' 4 +24831 'back' 4 +24832 'bage' 4 +24833 'bags' 4 +24834 'ball' 4 +24835 'band' 4 +24836 'bane' 4 +24837 'bang' 4 +24838 'bank' 4 +24839 'bara' 4 +24840 'bard' 4 +24841 'bare' 4 +24842 'bars' 4 +24843 'bart' 4 +24844 'base' 4 +24845 'bash' 4 +24846 'bast' 4 +24847 'bath' 4 +24848 'baum' 4 +24849 'bbbb' 4 +24850 'bben' 4 +24851 'bbox' 4 +24852 'beam' 4 +24853 'bean' 4 +24854 'bear' 4 +24855 'beat' 4 +24856 'beck' 4 +24857 'been' 4 +24858 'beer' 4 +24859 'beit' 4 +24860 'bell' 4 +24861 'belt' 4 +24862 'bere' 4 +24863 'berg' 4 +24864 'bern' 4 +24865 'bers' 4 +24866 'bert' 4 +24867 'bery' 4 +24868 'best' 4 +24869 'beta' 4 +24870 'beth' 4 +24871 'bial' 4 +24872 'bian' 4 +24873 'bias' 4 +24874 'bies' 4 +24875 'bigg' 4 +24876 'bike' 4 +24877 'bild' 4 +24878 'bill' 4 +24879 'bilt' 4 +24880 'bind' 4 +24881 'bing' 4 +24882 'bins' 4 +24883 'bios' 4 +24884 'bird' 4 +24885 'bish' 4 +24886 'bits' 4 +24887 'bió' 4 +24888 'blah' 4 +24889 'bled' 4 +24890 'blem' 4 +24891 'bler' 4 +24892 'bles' 4 +24893 'blic' 4 +24894 'blob' 4 +24895 'blog' 4 +24896 'blue' 4 +24897 'blur' 4 +24898 'boat' 4 +24899 'body' 4 +24900 'bold' 4 +24901 'bole' 4 +24902 'bolt' 4 +24903 'bomb' 4 +24904 'bond' 4 +24905 'bone' 4 +24906 'bons' 4 +24907 'book' 4 +24908 'bool' 4 +24909 'boot' 4 +24910 'borg' 4 +24911 'born' 4 +24912 'boro' 4 +24913 'bose' 4 +24914 'boss' 4 +24915 'both' 4 +24916 'bour' 4 +24917 'bove' 4 +24918 'bows' 4 +24919 'boys' 4 +24920 'bral' 4 +24921 'bran' 4 +24922 'bras' 4 +24923 'bred' 4 +24924 'brew' 4 +24925 'brid' 4 +24926 'bris' 4 +24927 'brit' 4 +24928 'bron' 4 +24929 'brow' 4 +24930 'buch' 4 +24931 'buck' 4 +24932 'buff' 4 +24933 'bugs' 4 +24934 'bulk' 4 +24935 'bull' 4 +24936 'bund' 4 +24937 'burg' 4 +24938 'burn' 4 +24939 'bury' 4 +24940 'busy' 4 +24941 'byte' 4 +24942 'ból' 4 +24943 'cade' 4 +24944 'cake' 4 +24945 'calc' 4 +24946 'cale' 4 +24947 'call' 4 +24948 'came' 4 +24949 'camp' 4 +24950 'cano' 4 +24951 'cant' 4 +24952 'cape' 4 +24953 'caps' 4 +24954 'capt' 4 +24955 'carb' 4 +24956 'card' 4 +24957 'care' 4 +24958 'cars' 4 +24959 'cart' 4 +24960 'case' 4 +24961 'cash' 4 +24962 'cast' 4 +24963 'cate' 4 +24964 'cats' 4 +24965 'cccc' 4 +24966 'cdot' 4 +24967 'cean' 4 +24968 'ceed' 4 +24969 'ceil' 4 +24970 'cele' 4 +24971 'cell' 4 +24972 'cent' 4 +24973 'cept' 4 +24974 'cern' 4 +24975 'cers' 4 +24976 'cert' 4 +24977 'cery' 4 +24978 'ceso' 4 +24979 'cess' 4 +24980 'chal' 4 +24981 'chan' 4 +24982 'chap' 4 +24983 'char' 4 +24984 'chas' 4 +24985 'chat' 4 +24986 'ched' 4 +24987 'chel' 4 +24988 'chem' 4 +24989 'chen' 4 +24990 'cher' 4 +24991 'ches' 4 +24992 'chet' 4 +24993 'chev' 4 +24994 'chez' 4 +24995 'chia' 4 +24996 'chie' 4 +24997 'chin' 4 +24998 'chio' 4 +24999 'chip' 4 +25000 'chor' 4 +25001 'chos' 4 +25002 'chte' 4 +25003 'chts' 4 +25004 'chus' 4 +25005 'ché' 4 +25006 'cial' 4 +25007 'cias' 4 +25008 'cido' 4 +25009 'cies' 4 +25010 'cing' 4 +25011 'cion' 4 +25012 'cipl' 4 +25013 'circ' 4 +25014 'cite' 4 +25015 'city' 4 +25016 'cium' 4 +25017 'ció' 4 +25018 'cker' 4 +25019 'cket' 4 +25020 'ckpt' 4 +25021 'clam' 4 +25022 'clar' 4 +25023 'clas' 4 +25024 'cler' 4 +25025 'cles' 4 +25026 'clic' 4 +25027 'clin' 4 +25028 'clip' 4 +25029 'clos' 4 +25030 'club' 4 +25031 'clud' 4 +25032 'clus' 4 +25033 'coal' 4 +25034 'coat' 4 +25035 'cock' 4 +25036 'code' 4 +25037 'coef' 4 +25038 'coin' 4 +25039 'cola' 4 +25040 'cold' 4 +25041 'cole' 4 +25042 'coli' 4 +25043 'coll' 4 +25044 'colm' 4 +25045 'colo' 4 +25046 'cols' 4 +25047 'coma' 4 +25048 'comb' 4 +25049 'come' 4 +25050 'comm' 4 +25051 'como' 4 +25052 'comp' 4 +25053 'conc' 4 +25054 'cond' 4 +25055 'cone' 4 +25056 'conf' 4 +25057 'cong' 4 +25058 'coni' 4 +25059 'conj' 4 +25060 'conn' 4 +25061 'cono' 4 +25062 'cons' 4 +25063 'cont' 4 +25064 'conv' 4 +25065 'cook' 4 +25066 'cool' 4 +25067 'cope' 4 +25068 'copy' 4 +25069 'cord' 4 +25070 'core' 4 +25071 'corn' 4 +25072 'corp' 4 +25073 'corr' 4 +25074 'cost' 4 +25075 'cott' 4 +25076 'cour' 4 +25077 'cout' 4 +25078 'cred' 4 +25079 'cret' 4 +25080 'crib' 4 +25081 'crit' 4 +25082 'cron' 4 +25083 'crop' 4 +25084 'crow' 4 +25085 'csrf' 4 +25086 'ctic' 4 +25087 'ctor' 4 +25088 'ctrl' 4 +25089 'cube' 4 +25090 'cuda' 4 +25091 'cule' 4 +25092 'culo' 4 +25093 'cult' 4 +25094 'curl' 4 +25095 'curr' 4 +25096 'cuts' 4 +25097 'cyan' 4 +25098 'cycl' 4 +25099 'ców' 4 +25100 'dade' 4 +25101 'dain' 4 +25102 'dale' 4 +25103 'damn' 4 +25104 'dark' 4 +25105 'dash' 4 +25106 'data' 4 +25107 'date' 4 +25108 'days' 4 +25109 'dddd' 4 +25110 'dden' 4 +25111 'dead' 4 +25112 'deal' 4 +25113 'deck' 4 +25114 'decl' 4 +25115 'deen' 4 +25116 'deep' 4 +25117 'demo' 4 +25118 'dens' 4 +25119 'dent' 4 +25120 'dept' 4 +25121 'dera' 4 +25122 'dere' 4 +25123 'dern' 4 +25124 'derr' 4 +25125 'ders' 4 +25126 'desc' 4 +25127 'desk' 4 +25128 'dess' 4 +25129 'dest' 4 +25130 'diag' 4 +25131 'dial' 4 +25132 'dian' 4 +25133 'dice' 4 +25134 'dict' 4 +25135 'dies' 4 +25136 'diff' 4 +25137 'digo' 4 +25138 'dims' 4 +25139 'ding' 4 +25140 'dire' 4 +25141 'disc' 4 +25142 'disk' 4 +25143 'disp' 4 +25144 'diss' 4 +25145 'dist' 4 +25146 'doch' 4 +25147 'dock' 4 +25148 'docs' 4 +25149 'does' 4 +25150 'dogs' 4 +25151 'done' 4 +25152 'dong' 4 +25153 'dont' 4 +25154 'door' 4 +25155 'dorf' 4 +25156 'dose' 4 +25157 'dots' 4 +25158 'down' 4 +25159 'drag' 4 +25160 'draw' 4 +25161 'drop' 4 +25162 'drug' 4 +25163 'dual' 4 +25164 'duce' 4 +25165 'duct' 4 +25166 'duit' 4 +25167 'dule' 4 +25168 'dump' 4 +25169 'dust' 4 +25170 'duty' 4 +25171 'each' 4 +25172 'ears' 4 +25173 'east' 4 +25174 'easy' 4 +25175 'ebra' 4 +25176 'ecal' 4 +25177 'eced' 4 +25178 'eces' 4 +25179 'echa' 4 +25180 'echo' 4 +25181 'ects' 4 +25182 'edad' 4 +25183 'edar' 4 +25184 'eday' 4 +25185 'eded' 4 +25186 'edef' 4 +25187 'eden' 4 +25188 'eder' 4 +25189 'edes' 4 +25190 'edge' 4 +25191 'edia' 4 +25192 'edic' 4 +25193 'edin' 4 +25194 'edit' 4 +25195 'edly' 4 +25196 'edom' 4 +25197 'edor' 4 +25198 'educ' 4 +25199 'eeee' 4 +25200 'eful' 4 +25201 'egal' 4 +25202 'egan' 4 +25203 'egen' 4 +25204 'eger' 4 +25205 'egin' 4 +25206 'eing' 4 +25207 'eken' 4 +25208 'eker' 4 +25209 'eled' 4 +25210 'elem' 4 +25211 'elen' 4 +25212 'eler' 4 +25213 'eles' 4 +25214 'elia' 4 +25215 'elic' 4 +25216 'elif' 4 +25217 'elig' 4 +25218 'elim' 4 +25219 'elin' 4 +25220 'ella' 4 +25221 'elle' 4 +25222 'elli' 4 +25223 'ello' 4 +25224 'ells' 4 +25225 'ellt' 4 +25226 'elly' 4 +25227 'elon' 4 +25228 'elor' 4 +25229 'else' 4 +25230 'elta' 4 +25231 'elve' 4 +25232 'eman' 4 +25233 'emas' 4 +25234 'emat' 4 +25235 'emed' 4 +25236 'emen' 4 +25237 'emer' 4 +25238 'emes' 4 +25239 'emet' 4 +25240 'emia' 4 +25241 'emic' 4 +25242 'emin' 4 +25243 'emis' 4 +25244 'emit' 4 +25245 'emon' 4 +25246 'emos' 4 +25247 'empl' 4 +25248 'empt' 4 +25249 'enas' 4 +25250 'ence' 4 +25251 'ench' 4 +25252 'enci' 4 +25253 'ency' 4 +25254 'enda' 4 +25255 'ende' 4 +25256 'endi' 4 +25257 'endl' 4 +25258 'endo' 4 +25259 'ends' 4 +25260 'ened' 4 +25261 'eneg' 4 +25262 'enem' 4 +25263 'enen' 4 +25264 'ener' 4 +25265 'enes' 4 +25266 'enet' 4 +25267 'enez' 4 +25268 'enge' 4 +25269 'engl' 4 +25270 'engo' 4 +25271 'engu' 4 +25272 'enia' 4 +25273 'enic' 4 +25274 'enig' 4 +25275 'enis' 4 +25276 'enix' 4 +25277 'enko' 4 +25278 'enna' 4 +25279 'enne' 4 +25280 'enny' 4 +25281 'enos' 4 +25282 'ensa' 4 +25283 'ense' 4 +25284 'enso' 4 +25285 'enta' 4 +25286 'ente' 4 +25287 'enth' 4 +25288 'enti' 4 +25289 'ento' 4 +25290 'entr' 4 +25291 'ents' 4 +25292 'enty' 4 +25293 'enum' 4 +25294 'enza' 4 +25295 'enç' 4 +25296 'ení' 4 +25297 'eous' 4 +25298 'epad' 4 +25299 'eper' 4 +25300 'eral' 4 +25301 'eras' 4 +25302 'erca' 4 +25303 'erce' 4 +25304 'erea' 4 +25305 'ered' 4 +25306 'eree' 4 +25307 'ereg' 4 +25308 'erek' 4 +25309 'eren' 4 +25310 'erer' 4 +25311 'eres' 4 +25312 'erez' 4 +25313 'erge' 4 +25314 'ergy' 4 +25315 'eria' 4 +25316 'eric' 4 +25317 'erie' 4 +25318 'ermo' 4 +25319 'erna' 4 +25320 'erne' 4 +25321 'erno' 4 +25322 'eron' 4 +25323 'eros' 4 +25324 'erra' 4 +25325 'erre' 4 +25326 'erro' 4 +25327 'erry' 4 +25328 'erta' 4 +25329 'erte' 4 +25330 'erto' 4 +25331 'erts' 4 +25332 'erty' 4 +25333 'erva' 4 +25334 'erve' 4 +25335 'esan' 4 +25336 'esar' 4 +25337 'esch' 4 +25338 'esen' 4 +25339 'eses' 4 +25340 'esis' 4 +25341 'eson' 4 +25342 'essa' 4 +25343 'esse' 4 +25344 'esso' 4 +25345 'esta' 4 +25346 'este' 4 +25347 'esti' 4 +25348 'esto' 4 +25349 'estr' 4 +25350 'ests' 4 +25351 'esty' 4 +25352 'etag' 4 +25353 'etal' 4 +25354 'etas' 4 +25355 'etch' 4 +25356 'eted' 4 +25357 'eten' 4 +25358 'eter' 4 +25359 'etes' 4 +25360 'ethe' 4 +25361 'etic' 4 +25362 'eton' 4 +25363 'etra' 4 +25364 'etro' 4 +25365 'etry' 4 +25366 'etta' 4 +25367 'ette' 4 +25368 'etti' 4 +25369 'etto' 4 +25370 'etur' 4 +25371 'etus' 4 +25372 'etzt' 4 +25373 'età' 4 +25374 'eurs' 4 +25375 'eval' 4 +25376 'even' 4 +25377 'ever' 4 +25378 'evil' 4 +25379 'evin' 4 +25380 'eway' 4 +25381 'exam' 4 +25382 'exec' 4 +25383 'exit' 4 +25384 'expl' 4 +25385 'expr' 4 +25386 'extr' 4 +25387 'eyed' 4 +25388 'eyer' 4 +25389 'face' 4 +25390 'fact' 4 +25391 'fade' 4 +25392 'fail' 4 +25393 'fair' 4 +25394 'fake' 4 +25395 'fall' 4 +25396 'fang' 4 +25397 'fant' 4 +25398 'fare' 4 +25399 'farm' 4 +25400 'fast' 4 +25401 'feas' 4 +25402 'feat' 4 +25403 'fect' 4 +25404 'feed' 4 +25405 'feel' 4 +25406 'feit' 4 +25407 'feld' 4 +25408 'felt' 4 +25409 'fern' 4 +25410 'fers' 4 +25411 'fert' 4 +25412 'fest' 4 +25413 'ffee' 4 +25414 'ffen' 4 +25415 'ffer' 4 +25416 'ffff' 4 +25417 'ffic' 4 +25418 'fica' 4 +25419 'fico' 4 +25420 'file' 4 +25421 'fill' 4 +25422 'film' 4 +25423 'find' 4 +25424 'fine' 4 +25425 'fire' 4 +25426 'firm' 4 +25427 'fish' 4 +25428 'fits' 4 +25429 'five' 4 +25430 'flag' 4 +25431 'flat' 4 +25432 'flex' 4 +25433 'flip' 4 +25434 'flix' 4 +25435 'flow' 4 +25436 'flux' 4 +25437 'foil' 4 +25438 'fois' 4 +25439 'fold' 4 +25440 'folk' 4 +25441 'fono' 4 +25442 'font' 4 +25443 'fony' 4 +25444 'food' 4 +25445 'foot' 4 +25446 'ford' 4 +25447 'fore' 4 +25448 'fork' 4 +25449 'form' 4 +25450 'fort' 4 +25451 'four' 4 +25452 'frac' 4 +25453 'frag' 4 +25454 'frak' 4 +25455 'fram' 4 +25456 'fred' 4 +25457 'free' 4 +25458 'freq' 4 +25459 'frey' 4 +25460 'from' 4 +25461 'ften' 4 +25462 'fter' 4 +25463 'fuel' 4 +25464 'full' 4 +25465 'func' 4 +25466 'fund' 4 +25467 'furt' 4 +25468 'fusc' 4 +25469 'fuse' 4 +25470 'fér' 4 +25471 'för' 4 +25472 'füg' 4 +25473 'füh' 4 +25474 'für' 4 +25475 'gado' 4 +25476 'gage' 4 +25477 'gain' 4 +25478 'game' 4 +25479 'gang' 4 +25480 'gard' 4 +25481 'gart' 4 +25482 'gary' 4 +25483 'gate' 4 +25484 'gear' 4 +25485 'geme' 4 +25486 'gems' 4 +25487 'gend' 4 +25488 'gene' 4 +25489 'gens' 4 +25490 'gent' 4 +25491 'geom' 4 +25492 'geon' 4 +25493 'gers' 4 +25494 'gery' 4 +25495 'gest' 4 +25496 'getX' 4 +25497 'gets' 4 +25498 'gett' 4 +25499 'gger' 4 +25500 'ggle' 4 +25501 'ghan' 4 +25502 'gian' 4 +25503 'gift' 4 +25504 'ging' 4 +25505 'gins' 4 +25506 'ginx' 4 +25507 'girl' 4 +25508 'gium' 4 +25509 'give' 4 +25510 'glob' 4 +25511 'glut' 4 +25512 'goal' 4 +25513 'gold' 4 +25514 'gone' 4 +25515 'good' 4 +25516 'goog' 4 +25517 'goto' 4 +25518 'gpio' 4 +25519 'grab' 4 +25520 'grad' 4 +25521 'gram' 4 +25522 'gran' 4 +25523 'grat' 4 +25524 'grav' 4 +25525 'gray' 4 +25526 'gree' 4 +25527 'greg' 4 +25528 'gren' 4 +25529 'grep' 4 +25530 'gres' 4 +25531 'grey' 4 +25532 'grid' 4 +25533 'grow' 4 +25534 'gré' 4 +25535 'gså' 4 +25536 'guid' 4 +25537 'guns' 4 +25538 'gypt' 4 +25539 'gzip' 4 +25540 'habi' 4 +25541 'hack' 4 +25542 'haft' 4 +25543 'hair' 4 +25544 'halb' 4 +25545 'half' 4 +25546 'hall' 4 +25547 'halt' 4 +25548 'hand' 4 +25549 'hang' 4 +25550 'hani' 4 +25551 'hape' 4 +25552 'happ' 4 +25553 'haps' 4 +25554 'hard' 4 +25555 'hare' 4 +25556 'harm' 4 +25557 'hart' 4 +25558 'hash' 4 +25559 'hatt' 4 +25560 'haul' 4 +25561 'haus' 4 +25562 'have' 4 +25563 'havi' 4 +25564 'hbar' 4 +25565 'hbox' 4 +25566 'head' 4 +25567 'heal' 4 +25568 'heap' 4 +25569 'heat' 4 +25570 'heck' 4 +25571 'heed' 4 +25572 'heel' 4 +25573 'heet' 4 +25574 'heid' 4 +25575 'heim' 4 +25576 'heit' 4 +25577 'held' 4 +25578 'helf' 4 +25579 'hell' 4 +25580 'helm' 4 +25581 'help' 4 +25582 'hend' 4 +25583 'hene' 4 +25584 'heng' 4 +25585 'hens' 4 +25586 'here' 4 +25587 'hern' 4 +25588 'hero' 4 +25589 'hers' 4 +25590 'hest' 4 +25591 'heur' 4 +25592 'hide' 4 +25593 'hift' 4 +25594 'high' 4 +25595 'hill' 4 +25596 'hind' 4 +25597 'hing' 4 +25598 'hint' 4 +25599 'hips' 4 +25600 'hire' 4 +25601 'hist' 4 +25602 'hive' 4 +25603 'hlen' 4 +25604 'hler' 4 +25605 'hoff' 4 +25606 'hold' 4 +25607 'hole' 4 +25608 'holm' 4 +25609 'home' 4 +25610 'hood' 4 +25611 'hook' 4 +25612 'hope' 4 +25613 'hora' 4 +25614 'horn' 4 +25615 'hors' 4 +25616 'hort' 4 +25617 'host' 4 +25618 'hots' 4 +25619 'hour' 4 +25620 'href' 4 +25621 'html' 4 +25622 'hton' 4 +25623 'http' 4 +25624 'hung' 4 +25625 'hydr' 4 +25626 'hyth' 4 +25627 'ház' 4 +25628 'hés' 4 +25629 'hör' 4 +25630 'iada' 4 +25631 'iage' 4 +25632 'iais' 4 +25633 'iale' 4 +25634 'ials' 4 +25635 'iami' 4 +25636 'iamo' 4 +25637 'iams' 4 +25638 'iana' 4 +25639 'iane' 4 +25640 'iang' 4 +25641 'iani' 4 +25642 'iano' 4 +25643 'ians' 4 +25644 'iant' 4 +25645 'iary' 4 +25646 'iasm' 4 +25647 'iate' 4 +25648 'iał' 4 +25649 'ibal' 4 +25650 'iban' 4 +25651 'ibel' 4 +25652 'iben' 4 +25653 'iber' 4 +25654 'ibia' 4 +25655 'ibil' 4 +25656 'ible' 4 +25657 'ibli' 4 +25658 'ibly' 4 +25659 'ibus' 4 +25660 'ical' 4 +25661 'ican' 4 +25662 'icar' 4 +25663 'icas' 4 +25664 'iced' 4 +25665 'icer' 4 +25666 'ices' 4 +25667 'icha' 4 +25668 'iche' 4 +25669 'ichi' 4 +25670 'icho' 4 +25671 'icht' 4 +25672 'icia' 4 +25673 'icio' 4 +25674 'icip' 4 +25675 'icit' 4 +25676 'icki' 4 +25677 'icks' 4 +25678 'icky' 4 +25679 'icle' 4 +25680 'icol' 4 +25681 'icon' 4 +25682 'icos' 4 +25683 'icro' 4 +25684 'icts' 4 +25685 'icul' 4 +25686 'icum' 4 +25687 'icus' 4 +25688 'icut' 4 +25689 'ică' 4 +25690 'idad' 4 +25691 'idae' 4 +25692 'idal' 4 +25693 'idan' 4 +25694 'idas' 4 +25695 'iday' 4 +25696 'iddy' 4 +25697 'idea' 4 +25698 'ided' 4 +25699 'idel' 4 +25700 'iden' 4 +25701 'ideo' 4 +25702 'ider' 4 +25703 'ides' 4 +25704 'idge' 4 +25705 'idia' 4 +25706 'idin' 4 +25707 'idis' 4 +25708 'idle' 4 +25709 'idor' 4 +25710 'idos' 4 +25711 'idth' 4 +25712 'idé' 4 +25713 'iece' 4 +25714 'iego' 4 +25715 'ield' 4 +25716 'iele' 4 +25717 'iels' 4 +25718 'iene' 4 +25719 'iens' 4 +25720 'ient' 4 +25721 'iera' 4 +25722 'iere' 4 +25723 'ieri' 4 +25724 'iero' 4 +25725 'iers' 4 +25726 'iert' 4 +25727 'iese' 4 +25728 'iest' 4 +25729 'iets' 4 +25730 'iety' 4 +25731 'ieur' 4 +25732 'ieux' 4 +25733 'ieve' 4 +25734 'ieß' 4 +25735 'ież' 4 +25736 'ifar' 4 +25737 'ifax' 4 +25738 'ifen' 4 +25739 'ifer' 4 +25740 'iffe' 4 +25741 'iffs' 4 +25742 'ific' 4 +25743 'ifie' 4 +25744 'ifik' 4 +25745 'ifle' 4 +25746 'ifth' 4 +25747 'ifts' 4 +25748 'ifty' 4 +25749 'iful' 4 +25750 'igan' 4 +25751 'igar' 4 +25752 'igen' 4 +25753 'iger' 4 +25754 'iges' 4 +25755 'ighb' 4 +25756 'ight' 4 +25757 'igin' 4 +25758 'igma' 4 +25759 'igne' 4 +25760 'igon' 4 +25761 'igor' 4 +25762 'igos' 4 +25763 'igua' 4 +25764 'igue' 4 +25765 'ihad' 4 +25766 'ikal' 4 +25767 'ikan' 4 +25768 'iked' 4 +25769 'ikel' 4 +25770 'iken' 4 +25771 'iker' 4 +25772 'ikes' 4 +25773 'ikit' 4 +25774 'ikon' 4 +25775 'ikov' 4 +25776 'ilar' 4 +25777 'ilda' 4 +25778 'ilde' 4 +25779 'iled' 4 +25780 'ilee' 4 +25781 'ilen' 4 +25782 'iler' 4 +25783 'iles' 4 +25784 'ilet' 4 +25785 'iley' 4 +25786 'ilia' 4 +25787 'ilib' 4 +25788 'ilic' 4 +25789 'ilin' 4 +25790 'ilio' 4 +25791 'ilis' 4 +25792 'ilit' 4 +25793 'illa' 4 +25794 'ille' 4 +25795 'illi' 4 +25796 'illo' 4 +25797 'ills' 4 +25798 'illy' 4 +25799 'iloc' 4 +25800 'ilog' 4 +25801 'ilon' 4 +25802 'ilor' 4 +25803 'ilos' 4 +25804 'ilot' 4 +25805 'ilst' 4 +25806 'ilty' 4 +25807 'ilus' 4 +25808 'ilyn' 4 +25809 'ilà' 4 +25810 'imag' 4 +25811 'imal' 4 +25812 'iman' 4 +25813 'imap' 4 +25814 'imar' 4 +25815 'imas' 4 +25816 'imat' 4 +25817 'imed' 4 +25818 'imen' 4 +25819 'imer' 4 +25820 'imes' 4 +25821 'imet' 4 +25822 'imin' 4 +25823 'imir' 4 +25824 'imit' 4 +25825 'imon' 4 +25826 'imos' 4 +25827 'impl' 4 +25828 'imum' 4 +25829 'imus' 4 +25830 'inae' 4 +25831 'inal' 4 +25832 'inar' 4 +25833 'inas' 4 +25834 'ince' 4 +25835 'inch' 4 +25836 'inci' 4 +25837 'incl' 4 +25838 'inct' 4 +25839 'inda' 4 +25840 'inde' 4 +25841 'indi' 4 +25842 'indo' 4 +25843 'inds' 4 +25844 'indu' 4 +25845 'indy' 4 +25846 'inea' 4 +25847 'ined' 4 +25848 'inee' 4 +25849 'inel' 4 +25850 'inem' 4 +25851 'inen' 4 +25852 'iner' 4 +25853 'ines' 4 +25854 'inet' 4 +25855 'inez' 4 +25856 'infl' 4 +25857 'info' 4 +25858 'inge' 4 +25859 'ingo' 4 +25860 'ings' 4 +25861 'ingt' 4 +25862 'ingu' 4 +25863 'inha' 4 +25864 'inho' 4 +25865 'inia' 4 +25866 'inic' 4 +25867 'inin' 4 +25868 'inis' 4 +25869 'init' 4 +25870 'iniz' 4 +25871 'inja' 4 +25872 'inka' 4 +25873 'inki' 4 +25874 'inks' 4 +25875 'inky' 4 +25876 'inoa' 4 +25877 'inos' 4 +25878 'inqu' 4 +25879 'insi' 4 +25880 'insk' 4 +25881 'insn' 4 +25882 'insp' 4 +25883 'inst' 4 +25884 'inta' 4 +25885 'inte' 4 +25886 'inth' 4 +25887 'into' 4 +25888 'intr' 4 +25889 'ints' 4 +25890 'inue' 4 +25891 'inus' 4 +25892 'inux' 4 +25893 'iné' 4 +25894 'iona' 4 +25895 'ione' 4 +25896 'ioni' 4 +25897 'ions' 4 +25898 'iors' 4 +25899 'ioso' 4 +25900 'iota' 4 +25901 'iour' 4 +25902 'ious' 4 +25903 'ipal' 4 +25904 'iped' 4 +25905 'ipeg' 4 +25906 'ipel' 4 +25907 'iper' 4 +25908 'ipes' 4 +25909 'iple' 4 +25910 'ippi' 4 +25911 'ippy' 4 +25912 'ipro' 4 +25913 'ipse' 4 +25914 'ique' 4 +25915 'iral' 4 +25916 'iran' 4 +25917 'iras' 4 +25918 'irds' 4 +25919 'ired' 4 +25920 'iren' 4 +25921 'ires' 4 +25922 'irez' 4 +25923 'irie' 4 +25924 'iris' 4 +25925 'irit' 4 +25926 'irms' 4 +25927 'iron' 4 +25928 'iros' 4 +25929 'irse' 4 +25930 'irst' 4 +25931 'irth' 4 +25932 'irts' 4 +25933 'irty' 4 +25934 'irus' 4 +25935 'irá' 4 +25936 'isan' 4 +25937 'isas' 4 +25938 'isch' 4 +25939 'isco' 4 +25940 'ised' 4 +25941 'isel' 4 +25942 'isen' 4 +25943 'iser' 4 +25944 'ises' 4 +25945 'iset' 4 +25946 'isha' 4 +25947 'ishi' 4 +25948 'isia' 4 +25949 'isin' 4 +25950 'isis' 4 +25951 'iska' 4 +25952 'iske' 4 +25953 'isko' 4 +25954 'isks' 4 +25955 'isle' 4 +25956 'isma' 4 +25957 'isme' 4 +25958 'ismo' 4 +25959 'isms' 4 +25960 'isol' 4 +25961 'ison' 4 +25962 'isor' 4 +25963 'issa' 4 +25964 'isse' 4 +25965 'issy' 4 +25966 'ista' 4 +25967 'iste' 4 +25968 'isti' 4 +25969 'isto' 4 +25970 'istr' 4 +25971 'ists' 4 +25972 'isty' 4 +25973 'isé' 4 +25974 'ital' 4 +25975 'itan' 4 +25976 'itar' 4 +25977 'itas' 4 +25978 'itat' 4 +25979 'itch' 4 +25980 'ited' 4 +25981 'itel' 4 +25982 'item' 4 +25983 'iten' 4 +25984 'iter' 4 +25985 'ites' 4 +25986 'itet' 4 +25987 'ithe' 4 +25988 'itia' 4 +25989 'itic' 4 +25990 'itin' 4 +25991 'itis' 4 +25992 'itle' 4 +25993 'itol' 4 +25994 'iton' 4 +25995 'itor' 4 +25996 'itos' 4 +25997 'itro' 4 +25998 'itsu' 4 +25999 'itta' 4 +26000 'itte' 4 +26001 'itti' 4 +26002 'itto' 4 +26003 'itty' 4 +26004 'itud' 4 +26005 'itus' 4 +26006 'ità' 4 +26007 'itä' 4 +26008 'ité' 4 +26009 'ită' 4 +26010 'ival' 4 +26011 'ivan' 4 +26012 'ivar' 4 +26013 'ivas' 4 +26014 'ived' 4 +26015 'ivel' 4 +26016 'iven' 4 +26017 'iver' 4 +26018 'ives' 4 +26019 'ivia' 4 +26020 'ivic' 4 +26021 'ivid' 4 +26022 'ivil' 4 +26023 'ivir' 4 +26024 'ivos' 4 +26025 'ivot' 4 +26026 'ixed' 4 +26027 'ixel' 4 +26028 'ixin' 4 +26029 'ixon' 4 +26030 'izar' 4 +26031 'ized' 4 +26032 'izen' 4 +26033 'izer' 4 +26034 'izes' 4 +26035 'izia' 4 +26036 'izin' 4 +26037 'izio' 4 +26038 'izon' 4 +26039 'izza' 4 +26040 'ião' 4 +26041 'iça' 4 +26042 'ién' 4 +26043 'ión' 4 +26044 'jack' 4 +26045 'jang' 4 +26046 'java' 4 +26047 'jdbc' 4 +26048 'ject' 4 +26049 'jest' 4 +26050 'jets' 4 +26051 'jian' 4 +26052 'jing' 4 +26053 'jira' 4 +26054 'jobs' 4 +26055 'john' 4 +26056 'join' 4 +26057 'jong' 4 +26058 'jour' 4 +26059 'jpeg' 4 +26060 'json' 4 +26061 'jump' 4 +26062 'jury' 4 +26063 'just' 4 +26064 'ják' 4 +26065 'ján' 4 +26066 'ját' 4 +26067 'jär' 4 +26068 'jön' 4 +26069 'jör' 4 +26070 'jąc' 4 +26071 'kań' 4 +26072 'keep' 4 +26073 'kees' 4 +26074 'kehr' 4 +26075 'keit' 4 +26076 'kern' 4 +26077 'kers' 4 +26078 'keys' 4 +26079 'kick' 4 +26080 'kids' 4 +26081 'kill' 4 +26082 'kind' 4 +26083 'king' 4 +26084 'kins' 4 +26085 'know' 4 +26086 'krit' 4 +26087 'ktop' 4 +26088 'ktor' 4 +26089 'któ' 4 +26090 'ków' 4 +26091 'lace' 4 +26092 'lage' 4 +26093 'laim' 4 +26094 'lain' 4 +26095 'lake' 4 +26096 'land' 4 +26097 'lane' 4 +26098 'lang' 4 +26099 'larg' 4 +26100 'lash' 4 +26101 'lass' 4 +26102 'last' 4 +26103 'late' 4 +26104 'laus' 4 +26105 'laws' 4 +26106 'lazy' 4 +26107 'ldap' 4 +26108 'lder' 4 +26109 'lead' 4 +26110 'leaf' 4 +26111 'lean' 4 +26112 'lear' 4 +26113 'leck' 4 +26114 'lect' 4 +26115 'leen' 4 +26116 'leep' 4 +26117 'leet' 4 +26118 'left' 4 +26119 'lege' 4 +26120 'lein' 4 +26121 'lems' 4 +26122 'lene' 4 +26123 'lens' 4 +26124 'leon' 4 +26125 'lers' 4 +26126 'lesh' 4 +26127 'less' 4 +26128 'lest' 4 +26129 'lete' 4 +26130 'lets' 4 +26131 'lett' 4 +26132 'leur' 4 +26133 'leys' 4 +26134 'libc' 4 +26135 'libs' 4 +26136 'lica' 4 +26137 'lice' 4 +26138 'lich' 4 +26139 'lick' 4 +26140 'lict' 4 +26141 'lied' 4 +26142 'lier' 4 +26143 'lies' 4 +26144 'life' 4 +26145 'lift' 4 +26146 'liga' 4 +26147 'ligt' 4 +26148 'like' 4 +26149 'lime' 4 +26150 'line' 4 +26151 'ling' 4 +26152 'link' 4 +26153 'lint' 4 +26154 'lion' 4 +26155 'liqu' 4 +26156 'lish' 4 +26157 'list' 4 +26158 'lite' 4 +26159 'live' 4 +26160 'ller' 4 +26161 'lles' 4 +26162 'llvm' 4 +26163 'load' 4 +26164 'loan' 4 +26165 'loat' 4 +26166 'lock' 4 +26167 'logo' 4 +26168 'logs' 4 +26169 'loid' 4 +26170 'long' 4 +26171 'lood' 4 +26172 'look' 4 +26173 'loop' 4 +26174 'loor' 4 +26175 'lord' 4 +26176 'lose' 4 +26177 'loss' 4 +26178 'lost' 4 +26179 'lots' 4 +26180 'love' 4 +26181 'loyd' 4 +26182 'luck' 4 +26183 'lund' 4 +26184 'lung' 4 +26185 'lymp' 4 +26186 'lyph' 4 +26187 'lán' 4 +26188 'lär' 4 +26189 'läu' 4 +26190 'lès' 4 +26191 'lés' 4 +26192 'lês' 4 +26193 'mach' 4 +26194 'made' 4 +26195 'mage' 4 +26196 'magn' 4 +26197 'maid' 4 +26198 'mail' 4 +26199 'main' 4 +26200 'make' 4 +26201 'male' 4 +26202 'mall' 4 +26203 'mana' 4 +26204 'mand' 4 +26205 'mani' 4 +26206 'mann' 4 +26207 'mans' 4 +26208 'mant' 4 +26209 'many' 4 +26210 'maps' 4 +26211 'mare' 4 +26212 'mark' 4 +26213 'mars' 4 +26214 'mart' 4 +26215 'mary' 4 +26216 'mask' 4 +26217 'mass' 4 +26218 'mast' 4 +26219 'mate' 4 +26220 'math' 4 +26221 'maze' 4 +26222 'mber' 4 +26223 'mbox' 4 +26224 'meal' 4 +26225 'mean' 4 +26226 'meas' 4 +26227 'medi' 4 +26228 'meet' 4 +26229 'mega' 4 +26230 'memb' 4 +26231 'memo' 4 +26232 'meno' 4 +26233 'mens' 4 +26234 'ment' 4 +26235 'menu' 4 +26236 'merc' 4 +26237 'mere' 4 +26238 'mers' 4 +26239 'mesh' 4 +26240 'mess' 4 +26241 'meta' 4 +26242 'meth' 4 +26243 'midi' 4 +26244 'midt' 4 +26245 'mile' 4 +26246 'mill' 4 +26247 'mime' 4 +26248 'mina' 4 +26249 'mind' 4 +26250 'mine' 4 +26251 'ming' 4 +26252 'mini' 4 +26253 'mino' 4 +26254 'mins' 4 +26255 'mint' 4 +26256 'misc' 4 +26257 'mise' 4 +26258 'miss' 4 +26259 'mist' 4 +26260 'mite' 4 +26261 'mith' 4 +26262 'mits' 4 +26263 'mitt' 4 +26264 'mium' 4 +26265 'mlin' 4 +26266 'mock' 4 +26267 'mode' 4 +26268 'moil' 4 +26269 'mond' 4 +26270 'mong' 4 +26271 'mono' 4 +26272 'mons' 4 +26273 'mont' 4 +26274 'mony' 4 +26275 'moon' 4 +26276 'more' 4 +26277 'mort' 4 +26278 'most' 4 +26279 'move' 4 +26280 'mpeg' 4 +26281 'msgs' 4 +26282 'much' 4 +26283 'mult' 4 +26284 'mund' 4 +26285 'must' 4 +26286 'mute' 4 +26287 'nail' 4 +26288 'nals' 4 +26289 'nama' 4 +26290 'name' 4 +26291 'nant' 4 +26292 'nbsp' 4 +26293 'ncia' 4 +26294 'ndef' 4 +26295 'nder' 4 +26296 'ndim' 4 +26297 'near' 4 +26298 'neau' 4 +26299 'neck' 4 +26300 'nect' 4 +26301 'need' 4 +26302 'nego' 4 +26303 'nell' 4 +26304 'nels' 4 +26305 'nerg' 4 +26306 'ners' 4 +26307 'ness' 4 +26308 'nest' 4 +26309 'nets' 4 +26310 'nett' 4 +26311 'neum' 4 +26312 'neur' 4 +26313 'neut' 4 +26314 'news' 4 +26315 'next' 4 +26316 'neys' 4 +26317 'nger' 4 +26318 'nice' 4 +26319 'nick' 4 +26320 'nier' 4 +26321 'nine' 4 +26322 'ning' 4 +26323 'nist' 4 +26324 'nię' 4 +26325 'node' 4 +26326 'nome' 4 +26327 'none' 4 +26328 'noon' 4 +26329 'noop' 4 +26330 'norm' 4 +26331 'nose' 4 +26332 'nost' 4 +26333 'note' 4 +26334 'noun' 4 +26335 'nova' 4 +26336 'nown' 4 +26337 'nsic' 4 +26338 'nten' 4 +26339 'nton' 4 +26340 'null' 4 +26341 'nung' 4 +26342 'nuts' 4 +26343 'née' 4 +26344 'nés' 4 +26345 'ník' 4 +26346 'ním' 4 +26347 'oard' 4 +26348 'obal' 4 +26349 'obar' 4 +26350 'obby' 4 +26351 'ober' 4 +26352 'obia' 4 +26353 'obic' 4 +26354 'obil' 4 +26355 'oble' 4 +26356 'obox' 4 +26357 'obra' 4 +26358 'obre' 4 +26359 'obuf' 4 +26360 'ocal' 4 +26361 'ocar' 4 +26362 'occo' 4 +26363 'oche' 4 +26364 'ocks' 4 +26365 'ocoa' 4 +26366 'ocol' 4 +26367 'ocom' 4 +26368 'ocon' 4 +26369 'ocre' 4 +26370 'ocus' 4 +26371 'ocê' 4 +26372 'odal' 4 +26373 'oday' 4 +26374 'oded' 4 +26375 'odel' 4 +26376 'odem' 4 +26377 'oden' 4 +26378 'oder' 4 +26379 'odes' 4 +26380 'odge' 4 +26381 'odia' 4 +26382 'odic' 4 +26383 'odom' 4 +26384 'odon' 4 +26385 'odor' 4 +26386 'odos' 4 +26387 'odot' 4 +26388 'odox' 4 +26389 'odus' 4 +26390 'offs' 4 +26391 'ogan' 4 +26392 'ogel' 4 +26393 'ogen' 4 +26394 'ogle' 4 +26395 'ogly' 4 +26396 'ogne' 4 +26397 'ogon' 4 +26398 'ogra' 4 +26399 'ogue' 4 +26400 'ohan' 4 +26401 'oids' 4 +26402 'oine' 4 +26403 'oint' 4 +26404 'oire' 4 +26405 'oise' 4 +26406 'oked' 4 +26407 'oken' 4 +26408 'oker' 4 +26409 'okes' 4 +26410 'okia' 4 +26411 'okie' 4 +26412 'okin' 4 +26413 'olan' 4 +26414 'olar' 4 +26415 'olas' 4 +26416 'olds' 4 +26417 'oled' 4 +26418 'olem' 4 +26419 'olen' 4 +26420 'oler' 4 +26421 'oles' 4 +26422 'oley' 4 +26423 'olia' 4 +26424 'olic' 4 +26425 'olid' 4 +26426 'olin' 4 +26427 'olip' 4 +26428 'olis' 4 +26429 'olit' 4 +26430 'olla' 4 +26431 'ollo' 4 +26432 'olly' 4 +26433 'olog' 4 +26434 'olon' 4 +26435 'olor' 4 +26436 'olph' 4 +26437 'olta' 4 +26438 'olve' 4 +26439 'omal' 4 +26440 'oman' 4 +26441 'omas' 4 +26442 'omat' 4 +26443 'ombo' 4 +26444 'omed' 4 +26445 'omen' 4 +26446 'omer' 4 +26447 'omes' 4 +26448 'omet' 4 +26449 'omez' 4 +26450 'omic' 4 +26451 'omin' 4 +26452 'omit' 4 +26453 'omon' 4 +26454 'onal' 4 +26455 'onas' 4 +26456 'once' 4 +26457 'onda' 4 +26458 'onde' 4 +26459 'ondo' 4 +26460 'onds' 4 +26461 'oned' 4 +26462 'onel' 4 +26463 'onen' 4 +26464 'oner' 4 +26465 'ones' 4 +26466 'onet' 4 +26467 'oney' 4 +26468 'onga' 4 +26469 'onge' 4 +26470 'ongo' 4 +26471 'ongs' 4 +26472 'onia' 4 +26473 'onic' 4 +26474 'onio' 4 +26475 'onis' 4 +26476 'only' 4 +26477 'onna' 4 +26478 'onne' 4 +26479 'onom' 4 +26480 'onse' 4 +26481 'onso' 4 +26482 'onte' 4 +26483 'onto' 4 +26484 'onym' 4 +26485 'ooks' 4 +26486 'ools' 4 +26487 'oons' 4 +26488 'oooo' 4 +26489 'oops' 4 +26490 'ooth' 4 +26491 'opal' 4 +26492 'oped' 4 +26493 'open' 4 +26494 'oper' 4 +26495 'opes' 4 +26496 'opez' 4 +26497 'ophe' 4 +26498 'ophy' 4 +26499 'opia' 4 +26500 'opic' 4 +26501 'opin' 4 +26502 'ople' 4 +26503 'opol' 4 +26504 'opor' 4 +26505 'opot' 4 +26506 'oppy' 4 +26507 'opro' 4 +26508 'opsy' 4 +26509 'opts' 4 +26510 'opus' 4 +26511 'oque' 4 +26512 'oral' 4 +26513 'oran' 4 +26514 'oras' 4 +26515 'orce' 4 +26516 'orch' 4 +26517 'orde' 4 +26518 'ordo' 4 +26519 'ords' 4 +26520 'orea' 4 +26521 'ored' 4 +26522 'orem' 4 +26523 'oren' 4 +26524 'orer' 4 +26525 'ores' 4 +26526 'oret' 4 +26527 'orge' 4 +26528 'oria' 4 +26529 'oric' 4 +26530 'orie' 4 +26531 'orig' 4 +26532 'orin' 4 +26533 'orio' 4 +26534 'oris' 4 +26535 'orks' 4 +26536 'orld' 4 +26537 'orna' 4 +26538 'orne' 4 +26539 'orno' 4 +26540 'orns' 4 +26541 'oron' 4 +26542 'orph' 4 +26543 'orro' 4 +26544 'orry' 4 +26545 'orse' 4 +26546 'orsi' 4 +26547 'orsk' 4 +26548 'orst' 4 +26549 'orta' 4 +26550 'orte' 4 +26551 'orth' 4 +26552 'orts' 4 +26553 'orum' 4 +26554 'orus' 4 +26555 'osal' 4 +26556 'osas' 4 +26557 'osed' 4 +26558 'osen' 4 +26559 'oser' 4 +26560 'oses' 4 +26561 'osex' 4 +26562 'oshi' 4 +26563 'osin' 4 +26564 'osis' 4 +26565 'osit' 4 +26566 'osos' 4 +26567 'osph' 4 +26568 'ossa' 4 +26569 'osse' 4 +26570 'osta' 4 +26571 'oste' 4 +26572 'osti' 4 +26573 'osto' 4 +26574 'otal' 4 +26575 'oted' 4 +26576 'oten' 4 +26577 'oter' 4 +26578 'otes' 4 +26579 'othe' 4 +26580 'otho' 4 +26581 'othy' 4 +26582 'otic' 4 +26583 'otin' 4 +26584 'otle' 4 +26585 'otom' 4 +26586 'oton' 4 +26587 'otor' 4 +26588 'otos' 4 +26589 'otta' 4 +26590 'otte' 4 +26591 'otti' 4 +26592 'otto' 4 +26593 'otyp' 4 +26594 'ouch' 4 +26595 'oufl' 4 +26596 'ough' 4 +26597 'ould' 4 +26598 'ound' 4 +26599 'ount' 4 +26600 'oupe' 4 +26601 'ourd' 4 +26602 'oure' 4 +26603 'ourg' 4 +26604 'ouri' 4 +26605 'ourn' 4 +26606 'ours' 4 +26607 'ourt' 4 +26608 'ouse' 4 +26609 'ouss' 4 +26610 'oust' 4 +26611 'oute' 4 +26612 'outh' 4 +26613 'outs' 4 +26614 'ouve' 4 +26615 'oval' 4 +26616 'ovan' 4 +26617 'oved' 4 +26618 'oven' 4 +26619 'over' 4 +26620 'oves' 4 +26621 'ovic' 4 +26622 'ovie' 4 +26623 'ová' 4 +26624 'ové' 4 +26625 'ový' 4 +26626 'ově' 4 +26627 'owan' 4 +26628 'owed' 4 +26629 'owel' 4 +26630 'ower' 4 +26631 'ową' 4 +26632 'oxel' 4 +26633 'oxic' 4 +26634 'oxid' 4 +26635 'oyal' 4 +26636 'oyer' 4 +26637 'oyle' 4 +26638 'pace' 4 +26639 'pack' 4 +26640 'page' 4 +26641 'paid' 4 +26642 'pain' 4 +26643 'pair' 4 +26644 'pand' 4 +26645 'para' 4 +26646 'pard' 4 +26647 'pare' 4 +26648 'park' 4 +26649 'pars' 4 +26650 'part' 4 +26651 'pass' 4 +26652 'past' 4 +26653 'path' 4 +26654 'pdev' 4 +26655 'peak' 4 +26656 'pear' 4 +26657 'peat' 4 +26658 'pect' 4 +26659 'peed' 4 +26660 'peek' 4 +26661 'peer' 4 +26662 'pell' 4 +26663 'pend' 4 +26664 'pent' 4 +26665 'perc' 4 +26666 'perf' 4 +26667 'peri' 4 +26668 'perl' 4 +26669 'perm' 4 +26670 'perp' 4 +26671 'pers' 4 +26672 'pert' 4 +26673 'phal' 4 +26674 'phan' 4 +26675 'phas' 4 +26676 'phen' 4 +26677 'pher' 4 +26678 'phia' 4 +26679 'phil' 4 +26680 'phin' 4 +26681 'phis' 4 +26682 'phon' 4 +26683 'phot' 4 +26684 'phys' 4 +26685 'pick' 4 +26686 'pies' 4 +26687 'pile' 4 +26688 'pine' 4 +26689 'ping' 4 +26690 'pink' 4 +26691 'pins' 4 +26692 'pipe' 4 +26693 'pire' 4 +26694 'pite' 4 +26695 'plan' 4 +26696 'plat' 4 +26697 'play' 4 +26698 'pled' 4 +26699 'pler' 4 +26700 'ples' 4 +26701 'plet' 4 +26702 'plex' 4 +26703 'plic' 4 +26704 'plit' 4 +26705 'plot' 4 +26706 'ploy' 4 +26707 'plug' 4 +26708 'plus' 4 +26709 'pmod' 4 +26710 'poke' 4 +26711 'pole' 4 +26712 'poll' 4 +26713 'poly' 4 +26714 'pond' 4 +26715 'pone' 4 +26716 'pong' 4 +26717 'pons' 4 +26718 'pool' 4 +26719 'poon' 4 +26720 'pora' 4 +26721 'port' 4 +26722 'pose' 4 +26723 'poss' 4 +26724 'post' 4 +26725 'pour' 4 +26726 'pped' 4 +26727 'ppen' 4 +26728 'pper' 4 +26729 'prec' 4 +26730 'pred' 4 +26731 'pref' 4 +26732 'prem' 4 +26733 'prep' 4 +26734 'pres' 4 +26735 'pret' 4 +26736 'prev' 4 +26737 'pril' 4 +26738 'prim' 4 +26739 'prit' 4 +26740 'priv' 4 +26741 'prob' 4 +26742 'proc' 4 +26743 'prod' 4 +26744 'prof' 4 +26745 'prog' 4 +26746 'proj' 4 +26747 'prom' 4 +26748 'pron' 4 +26749 'prop' 4 +26750 'prot' 4 +26751 'prov' 4 +26752 'prox' 4 +26753 'prus' 4 +26754 'prü' 4 +26755 'pson' 4 +26756 'ptic' 4 +26757 'pton' 4 +26758 'publ' 4 +26759 'pull' 4 +26760 'punk' 4 +26761 'pure' 4 +26762 'push' 4 +26763 'pute' 4 +26764 'qing' 4 +26765 'quad' 4 +26766 'qual' 4 +26767 'quan' 4 +26768 'quar' 4 +26769 'quat' 4 +26770 'quee' 4 +26771 'quel' 4 +26772 'quer' 4 +26773 'ques' 4 +26774 'quet' 4 +26775 'quez' 4 +26776 'quia' 4 +26777 'quin' 4 +26778 'quir' 4 +26779 'quis' 4 +26780 'quit' 4 +26781 'quiz' 4 +26782 'quot' 4 +26783 'qué' 4 +26784 'race' 4 +26785 'rack' 4 +26786 'ract' 4 +26787 'rada' 4 +26788 'rade' 4 +26789 'radi' 4 +26790 'rado' 4 +26791 'rael' 4 +26792 'raft' 4 +26793 'rage' 4 +26794 'raid' 4 +26795 'rail' 4 +26796 'rain' 4 +26797 'rais' 4 +26798 'rait' 4 +26799 'rale' 4 +26800 'rama' 4 +26801 'rame' 4 +26802 'rams' 4 +26803 'rand' 4 +26804 'rane' 4 +26805 'rang' 4 +26806 'rank' 4 +26807 'rano' 4 +26808 'rans' 4 +26809 'rant' 4 +26810 'raph' 4 +26811 'rare' 4 +26812 'rary' 4 +26813 'rase' 4 +26814 'rast' 4 +26815 'rate' 4 +26816 'rats' 4 +26817 'raud' 4 +26818 'rawl' 4 +26819 'rawn' 4 +26820 'rays' 4 +26821 'read' 4 +26822 'reak' 4 +26823 'real' 4 +26824 'ream' 4 +26825 'reas' 4 +26826 'reat' 4 +26827 'rece' 4 +26828 'reci' 4 +26829 'reck' 4 +26830 'rect' 4 +26831 'recv' 4 +26832 'rede' 4 +26833 'redi' 4 +26834 'redo' 4 +26835 'redu' 4 +26836 'reed' 4 +26837 'reek' 4 +26838 'reen' 4 +26839 'rees' 4 +26840 'reet' 4 +26841 'refs' 4 +26842 'regn' 4 +26843 'regs' 4 +26844 'reib' 4 +26845 'rein' 4 +26846 'rell' 4 +26847 'rels' 4 +26848 'relu' 4 +26849 'reme' 4 +26850 'rena' 4 +26851 'rend' 4 +26852 'rene' 4 +26853 'reno' 4 +26854 'rens' 4 +26855 'rent' 4 +26856 'reon' 4 +26857 'repo' 4 +26858 'repr' 4 +26859 'requ' 4 +26860 'rera' 4 +26861 'rero' 4 +26862 'resa' 4 +26863 'rese' 4 +26864 'resh' 4 +26865 'reso' 4 +26866 'resp' 4 +26867 'ress' 4 +26868 'rest' 4 +26869 'reta' 4 +26870 'rete' 4 +26871 'rets' 4 +26872 'rett' 4 +26873 'reve' 4 +26874 'rgba' 4 +26875 'riad' 4 +26876 'rial' 4 +26877 'rian' 4 +26878 'rias' 4 +26879 'rica' 4 +26880 'rice' 4 +26881 'rich' 4 +26882 'rick' 4 +26883 'rico' 4 +26884 'rics' 4 +26885 'rict' 4 +26886 'ride' 4 +26887 'ried' 4 +26888 'rief' 4 +26889 'riel' 4 +26890 'rien' 4 +26891 'rier' 4 +26892 'ries' 4 +26893 'riet' 4 +26894 'rift' 4 +26895 'rika' 4 +26896 'rike' 4 +26897 'rile' 4 +26898 'rimp' 4 +26899 'rina' 4 +26900 'rine' 4 +26901 'ring' 4 +26902 'rink' 4 +26903 'rint' 4 +26904 'rior' 4 +26905 'rios' 4 +26906 'riot' 4 +26907 'ripp' 4 +26908 'ript' 4 +26909 'rire' 4 +26910 'rise' 4 +26911 'rish' 4 +26912 'risk' 4 +26913 'rist' 4 +26914 'rite' 4 +26915 'rito' 4 +26916 'ritt' 4 +26917 'ritz' 4 +26918 'rium' 4 +26919 'rive' 4 +26920 'rió' 4 +26921 'road' 4 +26922 'robe' 4 +26923 'rock' 4 +26924 'rodu' 4 +26925 'roid' 4 +26926 'rois' 4 +26927 'roit' 4 +26928 'roke' 4 +26929 'role' 4 +26930 'roll' 4 +26931 'roma' 4 +26932 'rome' 4 +26933 'romy' 4 +26934 'rone' 4 +26935 'rong' 4 +26936 'rons' 4 +26937 'ront' 4 +26938 'room' 4 +26939 'root' 4 +26940 'roph' 4 +26941 'rops' 4 +26942 'ropy' 4 +26943 'rors' 4 +26944 'rose' 4 +26945 'ross' 4 +26946 'rost' 4 +26947 'rote' 4 +26948 'rots' 4 +26949 'rott' 4 +26950 'roup' 4 +26951 'rous' 4 +26952 'rout' 4 +26953 'rove' 4 +26954 'rown' 4 +26955 'rows' 4 +26956 'rror' 4 +26957 'ruby' 4 +26958 'ruce' 4 +26959 'ruck' 4 +26960 'ruct' 4 +26961 'ruit' 4 +26962 'rule' 4 +26963 'runs' 4 +26964 'rupt' 4 +26965 'rust' 4 +26966 'ryan' 4 +26967 'rypt' 4 +26968 'rás' 4 +26969 'rän' 4 +26970 'rès' 4 +26971 'rée' 4 +26972 'rés' 4 +26973 'rét' 4 +26974 'ría' 4 +26975 'ród' 4 +26976 'rón' 4 +26977 'safe' 4 +26978 'said' 4 +26979 'sale' 4 +26980 'salt' 4 +26981 'same' 4 +26982 'samp' 4 +26983 'sand' 4 +26984 'sans' 4 +26985 'save' 4 +26986 'scal' 4 +26987 'scan' 4 +26988 'scar' 4 +26989 'sche' 4 +26990 'scre' 4 +26991 'scri' 4 +26992 'seat' 4 +26993 'seau' 4 +26994 'sect' 4 +26995 'seed' 4 +26996 'seek' 4 +26997 'seen' 4 +26998 'sein' 4 +26999 'self' 4 +27000 'sell' 4 +27001 'semb' 4 +27002 'semi' 4 +27003 'send' 4 +27004 'sens' 4 +27005 'sent' 4 +27006 'sequ' 4 +27007 'sers' 4 +27008 'sert' 4 +27009 'serv' 4 +27010 'sess' 4 +27011 'sets' 4 +27012 'sett' 4 +27013 'seud' 4 +27014 'shal' 4 +27015 'shan' 4 +27016 'shaw' 4 +27017 'ship' 4 +27018 'shit' 4 +27019 'shop' 4 +27020 'shot' 4 +27021 'show' 4 +27022 'shut' 4 +27023 'side' 4 +27024 'sign' 4 +27025 'sime' 4 +27026 'simp' 4 +27027 'sing' 4 +27028 'sink' 4 +27029 'site' 4 +27030 'size' 4 +27031 'skin' 4 +27032 'skip' 4 +27033 'ská' 4 +27034 'ské' 4 +27035 'ský' 4 +27036 'ską' 4 +27037 'slot' 4 +27038 'slow' 4 +27039 'slug' 4 +27040 'smtp' 4 +27041 'snap' 4 +27042 'snow' 4 +27043 'soap' 4 +27044 'sock' 4 +27045 'soft' 4 +27046 'sold' 4 +27047 'sole' 4 +27048 'some' 4 +27049 'song' 4 +27050 'sono' 4 +27051 'soon' 4 +27052 'sort' 4 +27053 'soup' 4 +27054 'spam' 4 +27055 'span' 4 +27056 'spar' 4 +27057 'spec' 4 +27058 'spin' 4 +27059 'spir' 4 +27060 'spot' 4 +27061 'sqrt' 4 +27062 'sson' 4 +27063 'stab' 4 +27064 'stad' 4 +27065 'stag' 4 +27066 'stal' 4 +27067 'stan' 4 +27068 'star' 4 +27069 'stat' 4 +27070 'stay' 4 +27071 'sted' 4 +27072 'stem' 4 +27073 'sten' 4 +27074 'step' 4 +27075 'ster' 4 +27076 'stic' 4 +27077 'stim' 4 +27078 'stit' 4 +27079 'stmt' 4 +27080 'ston' 4 +27081 'stop' 4 +27082 'stor' 4 +27083 'stra' 4 +27084 'stre' 4 +27085 'stri' 4 +27086 'stro' 4 +27087 'stru' 4 +27088 'stry' 4 +27089 'stub' 4 +27090 'stud' 4 +27091 'stä' 4 +27092 'stå' 4 +27093 'subs' 4 +27094 'succ' 4 +27095 'such' 4 +27096 'sudo' 4 +27097 'suit' 4 +27098 'summ' 4 +27099 'supp' 4 +27100 'sure' 4 +27101 'surf' 4 +27102 'swap' 4 +27103 'swer' 4 +27104 'sync' 4 +27105 'ség' 4 +27106 'tabs' 4 +27107 'tage' 4 +27108 'tags' 4 +27109 'tail' 4 +27110 'tain' 4 +27111 'tait' 4 +27112 'take' 4 +27113 'talk' 4 +27114 'tang' 4 +27115 'tanh' 4 +27116 'tank' 4 +27117 'task' 4 +27118 'tawa' 4 +27119 'tał' 4 +27120 'team' 4 +27121 'tech' 4 +27122 'teen' 4 +27123 'tegr' 4 +27124 'teil' 4 +27125 'tein' 4 +27126 'tele' 4 +27127 'tell' 4 +27128 'temp' 4 +27129 'tent' 4 +27130 'tera' 4 +27131 'tere' 4 +27132 'term' 4 +27133 'tern' 4 +27134 'tero' 4 +27135 'ters' 4 +27136 'tery' 4 +27137 'test' 4 +27138 'tesy' 4 +27139 'text' 4 +27140 'thal' 4 +27141 'than' 4 +27142 'that' 4 +27143 'thel' 4 +27144 'them' 4 +27145 'then' 4 +27146 'ther' 4 +27147 'thes' 4 +27148 'they' 4 +27149 'thin' 4 +27150 'this' 4 +27151 'thon' 4 +27152 'thor' 4 +27153 'thro' 4 +27154 'thur' 4 +27155 'thus' 4 +27156 'tica' 4 +27157 'tick' 4 +27158 'tico' 4 +27159 'tics' 4 +27160 'tier' 4 +27161 'ties' 4 +27162 'tiff' 4 +27163 'tikz' 4 +27164 'tile' 4 +27165 'time' 4 +27166 'ting' 4 +27167 'tiny' 4 +27168 'tion' 4 +27169 'tipo' 4 +27170 'tips' 4 +27171 'toBe' 4 +27172 'todo' 4 +27173 'tone' 4 +27174 'tons' 4 +27175 'took' 4 +27176 'tool' 4 +27177 'toon' 4 +27178 'tour' 4 +27179 'tout' 4 +27180 'town' 4 +27181 'trac' 4 +27182 'trad' 4 +27183 'trak' 4 +27184 'tran' 4 +27185 'trap' 4 +27186 'tras' 4 +27187 'tree' 4 +27188 'tres' 4 +27189 'trib' 4 +27190 'trie' 4 +27191 'trig' 4 +27192 'trim' 4 +27193 'trip' 4 +27194 'tron' 4 +27195 'true' 4 +27196 'ttes' 4 +27197 'tube' 4 +27198 'ture' 4 +27199 'turn' 4 +27200 'type' 4 +27201 'uala' 4 +27202 'uali' 4 +27203 'uant' 4 +27204 'uart' 4 +27205 'uary' 4 +27206 'uate' 4 +27207 'ubar' 4 +27208 'uben' 4 +27209 'uber' 4 +27210 'ubes' 4 +27211 'ubic' 4 +27212 'uble' 4 +27213 'ubre' 4 +27214 'ucci' 4 +27215 'uced' 4 +27216 'ucer' 4 +27217 'uces' 4 +27218 'ucha' 4 +27219 'uche' 4 +27220 'uchi' 4 +27221 'uchs' 4 +27222 'ucht' 4 +27223 'ucid' 4 +27224 'ucks' 4 +27225 'ucky' 4 +27226 'ucle' 4 +27227 'udad' 4 +27228 'uded' 4 +27229 'uden' 4 +27230 'uder' 4 +27231 'udes' 4 +27232 'udge' 4 +27233 'udio' 4 +27234 'udos' 4 +27235 'uego' 4 +27236 'ueil' 4 +27237 'uela' 4 +27238 'uels' 4 +27239 'uent' 4 +27240 'uers' 4 +27241 'uese' 4 +27242 'uest' 4 +27243 'ueur' 4 +27244 'ufen' 4 +27245 'uffs' 4 +27246 'uffy' 4 +27247 'ugal' 4 +27248 'ugar' 4 +27249 'ugby' 4 +27250 'ugen' 4 +27251 'ught' 4 +27252 'ugin' 4 +27253 'uild' 4 +27254 'uilt' 4 +27255 'uing' 4 +27256 'uins' 4 +27257 'uint' 4 +27258 'uish' 4 +27259 'uite' 4 +27260 'uits' 4 +27261 'uity' 4 +27262 'ují' 4 +27263 'ują' 4 +27264 'ukes' 4 +27265 'ular' 4 +27266 'ulas' 4 +27267 'uled' 4 +27268 'ulen' 4 +27269 'uler' 4 +27270 'ules' 4 +27271 'ulet' 4 +27272 'ulia' 4 +27273 'ulin' 4 +27274 'ulis' 4 +27275 'ulla' 4 +27276 'ulle' 4 +27277 'ulli' 4 +27278 'ulls' 4 +27279 'ully' 4 +27280 'ulos' 4 +27281 'ulpt' 4 +27282 'ulse' 4 +27283 'ulti' 4 +27284 'ults' 4 +27285 'ulty' 4 +27286 'ultz' 4 +27287 'ului' 4 +27288 'ulum' 4 +27289 'ulus' 4 +27290 'umab' 4 +27291 'uman' 4 +27292 'umar' 4 +27293 'umas' 4 +27294 'umat' 4 +27295 'umbn' 4 +27296 'umbo' 4 +27297 'umbs' 4 +27298 'umed' 4 +27299 'umen' 4 +27300 'umer' 4 +27301 'umes' 4 +27302 'umin' 4 +27303 'ummy' 4 +27304 'umni' 4 +27305 'umor' 4 +27306 'umph' 4 +27307 'umps' 4 +27308 'umpy' 4 +27309 'unal' 4 +27310 'unar' 4 +27311 'unas' 4 +27312 'unce' 4 +27313 'unch' 4 +27314 'unci' 4 +27315 'unct' 4 +27316 'unda' 4 +27317 'unde' 4 +27318 'undo' 4 +27319 'unds' 4 +27320 'undy' 4 +27321 'uned' 4 +27322 'uner' 4 +27323 'unes' 4 +27324 'unge' 4 +27325 'ungs' 4 +27326 'unic' 4 +27327 'unik' 4 +27328 'uniq' 4 +27329 'unit' 4 +27330 'unix' 4 +27331 'unks' 4 +27332 'unkt' 4 +27333 'unos' 4 +27334 'unta' 4 +27335 'unte' 4 +27336 'unto' 4 +27337 'unts' 4 +27338 'untu' 4 +27339 'unya' 4 +27340 'uous' 4 +27341 'upal' 4 +27342 'uper' 4 +27343 'upid' 4 +27344 'uple' 4 +27345 'upon' 4 +27346 'urai' 4 +27347 'ural' 4 +27348 'uran' 4 +27349 'uras' 4 +27350 'urch' 4 +27351 'urdy' 4 +27352 'ured' 4 +27353 'uren' 4 +27354 'urer' 4 +27355 'ures' 4 +27356 'uria' 4 +27357 'uris' 4 +27358 'urls' 4 +27359 'uron' 4 +27360 'urop' 4 +27361 'urre' 4 +27362 'urry' 4 +27363 'urse' 4 +27364 'urst' 4 +27365 'urus' 4 +27366 'usal' 4 +27367 'usat' 4 +27368 'usch' 4 +27369 'used' 4 +27370 'user' 4 +27371 'uses' 4 +27372 'uset' 4 +27373 'ushi' 4 +27374 'usic' 4 +27375 'ussy' 4 +27376 'usta' 4 +27377 'usto' 4 +27378 'ustr' 4 +27379 'utan' 4 +27380 'utar' 4 +27381 'utch' 4 +27382 'uted' 4 +27383 'uten' 4 +27384 'uter' 4 +27385 'utes' 4 +27386 'util' 4 +27387 'utor' 4 +27388 'utos' 4 +27389 'utra' 4 +27390 'utta' 4 +27391 'utto' 4 +27392 'uuid' 4 +27393 'uvre' 4 +27394 'uzzi' 4 +27395 'uzzy' 4 +27396 'ués' 4 +27397 'vais' 4 +27398 'vale' 4 +27399 'vals' 4 +27400 'valu' 4 +27401 'vana' 4 +27402 'vant' 4 +27403 'vard' 4 +27404 'vare' 4 +27405 'vari' 4 +27406 'vars' 4 +27407 'vecs' 4 +27408 'vect' 4 +27409 'veis' 4 +27410 'vell' 4 +27411 'velt' 4 +27412 'vely' 4 +27413 'vens' 4 +27414 'vent' 4 +27415 'verb' 4 +27416 'vere' 4 +27417 'vern' 4 +27418 'vers' 4 +27419 'vert' 4 +27420 'very' 4 +27421 'vest' 4 +27422 'vice' 4 +27423 'vict' 4 +27424 'vide' 4 +27425 'vier' 4 +27426 'view' 4 +27427 'vill' 4 +27428 'vine' 4 +27429 'ving' 4 +27430 'viol' 4 +27431 'virt' 4 +27432 'vity' 4 +27433 'vić' 4 +27434 'vlan' 4 +27435 'void' 4 +27436 'voir' 4 +27437 'voke' 4 +27438 'volt' 4 +27439 'vote' 4 +27440 'vous' 4 +27441 'vron' 4 +27442 'ván' 4 +27443 'vés' 4 +27444 'wait' 4 +27445 'wake' 4 +27446 'wald' 4 +27447 'walk' 4 +27448 'wall' 4 +27449 'wand' 4 +27450 'wang' 4 +27451 'want' 4 +27452 'ward' 4 +27453 'ware' 4 +27454 'warf' 4 +27455 'warm' 4 +27456 'warn' 4 +27457 'wart' 4 +27458 'warz' 4 +27459 'wash' 4 +27460 'wave' 4 +27461 'ways' 4 +27462 'weak' 4 +27463 'wear' 4 +27464 'weed' 4 +27465 'week' 4 +27466 'ween' 4 +27467 'weep' 4 +27468 'weet' 4 +27469 'well' 4 +27470 'wend' 4 +27471 'went' 4 +27472 'were' 4 +27473 'wers' 4 +27474 'wert' 4 +27475 'west' 4 +27476 'what' 4 +27477 'whel' 4 +27478 'when' 4 +27479 'wich' 4 +27480 'wick' 4 +27481 'wide' 4 +27482 'wife' 4 +27483 'wifi' 4 +27484 'wiki' 4 +27485 'wild' 4 +27486 'will' 4 +27487 'wind' 4 +27488 'wine' 4 +27489 'wing' 4 +27490 'wire' 4 +27491 'wise' 4 +27492 'wish' 4 +27493 'with' 4 +27494 'witz' 4 +27495 'wią' 4 +27496 'wię' 4 +27497 'wner' 4 +27498 'wolf' 4 +27499 'wood' 4 +27500 'word' 4 +27501 'work' 4 +27502 'worm' 4 +27503 'wort' 4 +27504 'wrap' 4 +27505 'writ' 4 +27506 'wär' 4 +27507 'wür' 4 +27508 'xico' 4 +27509 'ximo' 4 +27510 'xlim' 4 +27511 'xlsx' 4 +27512 'xmax' 4 +27513 'xton' 4 +27514 'xxxx' 4 +27515 'yaml' 4 +27516 'yang' 4 +27517 'yard' 4 +27518 'ycle' 4 +27519 'ydia' 4 +27520 'ydro' 4 +27521 'year' 4 +27522 'yect' 4 +27523 'yers' 4 +27524 'ygon' 4 +27525 'ying' 4 +27526 'ylan' 4 +27527 'yles' 4 +27528 'ylim' 4 +27529 'ylon' 4 +27530 'ylum' 4 +27531 'ymax' 4 +27532 'ymph' 4 +27533 'ynam' 4 +27534 'ynch' 4 +27535 'ynes' 4 +27536 'yond' 4 +27537 'your' 4 +27538 'yout' 4 +27539 'ypes' 4 +27540 'yrus' 4 +27541 'yses' 4 +27542 'ysis' 4 +27543 'yson' 4 +27544 'ysql' 4 +27545 'ytic' 4 +27546 'yyyy' 4 +27547 'zahl' 4 +27548 'zech' 4 +27549 'zeit' 4 +27550 'zens' 4 +27551 'zent' 4 +27552 'zero' 4 +27553 'zeta' 4 +27554 'zeug' 4 +27555 'zeń' 4 +27556 'ześ' 4 +27557 'zhen' 4 +27558 'zhou' 4 +27559 'zial' 4 +27560 'ziel' 4 +27561 'zier' 4 +27562 'zing' 4 +27563 'ził' 4 +27564 'zone' 4 +27565 'zoom' 4 +27566 'zung' 4 +27567 'zyme' 4 +27568 'zyć' 4 +27569 'zyż' 4 +27570 'zzle' 4 +27571 'zés' 4 +27572 'zös' 4 +27573 'ząd' 4 +27574 'ząt' 4 +27575 '}}' 5 +32856 '="../' 5 +32857 '=====' 5 +32858 'ABASE' 5 +32859 'ACION' 5 +32860 'ACTER' 5 +32861 'ADMIN' 5 +32862 'ALIGN' 5 +32863 'ALLOW' 5 +32864 'ALTER' 5 +32865 'AMPLE' 5 +32866 'ANNEL' 5 +32867 'ANTLR' 5 +32868 'APTER' 5 +32869 'ARGET' 5 +32870 'ARRAY' 5 +32871 'ASCII' 5 +32872 'ATING' 5 +32873 'ATION' 5 +32874 'ATIVE' 5 +32875 'ATURE' 5 +32876 'About' 5 +32877 'Above' 5 +32878 'Activ' 5 +32879 'Actor' 5 +32880 'Added' 5 +32881 'Addon' 5 +32882 'Admin' 5 +32883 'After' 5 +32884 'Again' 5 +32885 'Agent' 5 +32886 'Alarm' 5 +32887 'Album' 5 +32888 'Alert' 5 +32889 'Alias' 5 +32890 'Alice' 5 +32891 'Align' 5 +32892 'Alive' 5 +32893 'Allen' 5 +32894 'Alloc' 5 +32895 'Allow' 5 +32896 'Along' 5 +32897 'Alpha' 5 +32898 'Alter' 5 +32899 'Among' 5 +32900 'Analy' 5 +32901 'Andre' 5 +32902 'Angel' 5 +32903 'Angle' 5 +32904 'Apart' 5 +32905 'Apple' 5 +32906 'Apply' 5 +32907 'Appro' 5 +32908 'April' 5 +32909 'Arena' 5 +32910 'Arial' 5 +32911 'Armor' 5 +32912 'Array' 5 +32913 'Arrow' 5 +32914 'Asian' 5 +32915 'Asked' 5 +32916 'Asset' 5 +32917 'Async' 5 +32918 'Atlas' 5 +32919 'Attrs' 5 +32920 'Audio' 5 +32921 'Audit' 5 +32922 'Autom' 5 +32923 'Aware' 5 +32924 'Azure' 5 +32925 'BEGIN' 5 +32926 'BLACK' 5 +32927 'BLOCK' 5 +32928 'BOARD' 5 +32929 'BOOST' 5 +32930 'BUILD' 5 +32931 'Based' 5 +32932 'Basic' 5 +32933 'Batch' 5 +32934 'Beans' 5 +32935 'Begin' 5 +32936 'Being' 5 +32937 'Below' 5 +32938 'Berry' 5 +32939 'Billy' 5 +32940 'Birth' 5 +32941 'Black' 5 +32942 'Blank' 5 +32943 'Block' 5 +32944 'Blood' 5 +32945 'Board' 5 +32946 'Bonus' 5 +32947 'Books' 5 +32948 'Boost' 5 +32949 'Bound' 5 +32950 'Brain' 5 +32951 'Brand' 5 +32952 'Break' 5 +32953 'Brian' 5 +32954 'Brien' 5 +32955 'Bring' 5 +32956 'Broad' 5 +32957 'Brown' 5 +32958 'Brush' 5 +32959 'Build' 5 +32960 'Built' 5 +32961 'Bytes' 5 +32962 'Bạn' 5 +32963 'CACHE' 5 +32964 'CCESS' 5 +32965 'CDATA' 5 +32966 'CHANT' 5 +32967 'CHECK' 5 +32968 'CLAIM' 5 +32969 'CLASS' 5 +32970 'CLEAR' 5 +32971 'CLUDE' 5 +32972 'COLOR' 5 +32973 'CONST' 5 +32974 'COUNT' 5 +32975 'COVID' 5 +32976 'CRIPT' 5 +32977 'CRYPT' 5 +32978 'CTION' 5 +32979 'CTYPE' 5 +32980 'Cache' 5 +32981 'Calls' 5 +32982 'Carol' 5 +32983 'Catal' 5 +32984 'Catch' 5 +32985 'Cause' 5 +32986 'Cells' 5 +32987 'Chain' 5 +32988 'Chang' 5 +32989 'Chars' 5 +32990 'Chart' 5 +32991 'Check' 5 +32992 'Chief' 5 +32993 'Child' 5 +32994 'China' 5 +32995 'Chris' 5 +32996 'Chunk' 5 +32997 'Civil' 5 +32998 'Claim' 5 +32999 'Class' 5 +33000 'Clean' 5 +33001 'Clear' 5 +33002 'Click' 5 +33003 'Clock' 5 +33004 'Clone' 5 +33005 'Close' 5 +33006 'Cloud' 5 +33007 'Codec' 5 +33008 'Codes' 5 +33009 'Color' 5 +33010 'Combo' 5 +33011 'Compl' 5 +33012 'Const' 5 +33013 'Contr' 5 +33014 'Coord' 5 +33015 'Could' 5 +33016 'Count' 5 +33017 'Court' 5 +33018 'Cover' 5 +33019 'Craft' 5 +33020 'Creat' 5 +33021 'Cross' 5 +33022 'Crypt' 5 +33023 'Curve' 5 +33024 'Cycle' 5 +33025 'Cómo' 5 +33026 'DEBUG' 5 +33027 'DELAY' 5 +33028 'DEPTH' 5 +33029 'Daily' 5 +33030 'Dates' 5 +33031 'Datum' 5 +33032 'David' 5 +33033 'Davis' 5 +33034 'Death' 5 +33035 'Debug' 5 +33036 'Decor' 5 +33037 'Delay' 5 +33038 'Deleg' 5 +33039 'Delta' 5 +33040 'Dense' 5 +33041 'Depth' 5 +33042 'Digit' 5 +33043 'Dirty' 5 +33044 'Domin' 5 +33045 'Draft' 5 +33046 'Dream' 5 +33047 'Drive' 5 +33048 'Dummy' 5 +33049 'EMAIL' 5 +33050 'EMBER' 5 +33051 'EMENT' 5 +33052 'EMPTY' 5 +33053 'ENAME' 5 +33054 'ENCES' 5 +33055 'ENDER' 5 +33056 'ENGTH' 5 +33057 'ENTER' 5 +33058 'ENTRY' 5 +33059 'EQUAL' 5 +33060 'ERROR' 5 +33061 'ETHER' 5 +33062 'ETHOD' 5 +33063 'EVENT' 5 +33064 'EXIST' 5 +33065 'Early' 5 +33066 'Earth' 5 +33067 'Edges' 5 +33068 'Eight' 5 +33069 'Elect' 5 +33070 'Email' 5 +33071 'Embed' 5 +33072 'Emily' 5 +33073 'Empty' 5 +33074 'Enjoy' 5 +33075 'Enter' 5 +33076 'Entry' 5 +33077 'Epoch' 5 +33078 'Equal' 5 +33079 'Error' 5 +33080 'Estim' 5 +33081 'Evalu' 5 +33082 'Event' 5 +33083 'Every' 5 +33084 'Exact' 5 +33085 'Excel' 5 +33086 'Exist' 5 +33087 'Extra' 5 +33088 'FALSE' 5 +33089 'FAULT' 5 +33090 'FIELD' 5 +33091 'FILES' 5 +33092 'FIRST' 5 +33093 'FIXME' 5 +33094 'FLAGS' 5 +33095 'FLOAT' 5 +33096 'FOUND' 5 +33097 'FRAME' 5 +33098 'Faces' 5 +33099 'False' 5 +33100 'Fatal' 5 +33101 'Fault' 5 +33102 'Fetch' 5 +33103 'Field' 5 +33104 'Files' 5 +33105 'Final' 5 +33106 'First' 5 +33107 'Fixed' 5 +33108 'Flags' 5 +33109 'Flash' 5 +33110 'Float' 5 +33111 'Floor' 5 +33112 'Flush' 5 +33113 'Focus' 5 +33114 'Force' 5 +33115 'Forms' 5 +33116 'Forum' 5 +33117 'Found' 5 +33118 'Frame' 5 +33119 'Franc' 5 +33120 'Frank' 5 +33121 'Fresh' 5 +33122 'Front' 5 +33123 'GENER' 5 +33124 'GRAPH' 5 +33125 'GREEN' 5 +33126 'GRESS' 5 +33127 'GROUP' 5 +33128 'Games' 5 +33129 'Gamma' 5 +33130 'Gener' 5 +33131 'Genre' 5 +33132 'Georg' 5 +33133 'Getty' 5 +33134 'Ghost' 5 +33135 'Given' 5 +33136 'Glyph' 5 +33137 'Going' 5 +33138 'Grade' 5 +33139 'Grand' 5 +33140 'Grant' 5 +33141 'Graph' 5 +33142 'Great' 5 +33143 'Greek' 5 +33144 'Green' 5 +33145 'Group' 5 +33146 'Guard' 5 +33147 'Guest' 5 +33148 'Guide' 5 +33149 'Guild' 5 +33150 'HTTPS' 5 +33151 'Happy' 5 +33152 'Harry' 5 +33153 'Heart' 5 +33154 'Heavy' 5 +33155 'Hello' 5 +33156 'Henry' 5 +33157 'Hotel' 5 +33158 'Hours' 5 +33159 'House' 5 +33160 'Hover' 5 +33161 'Human' 5 +33162 'Hydro' 5 +33163 'Hyper' 5 +33164 'IDDEN' 5 +33165 'IDDLE' 5 +33166 'IDENT' 5 +33167 'IFIED' 5 +33168 'ILITY' 5 +33169 'IMAGE' 5 +33170 'IMARY' 5 +33171 'INDEX' 5 +33172 'INESS' 5 +33173 'INPUT' 5 +33174 'INTER' 5 +33175 'ISHED' 5 +33176 'ISING' 5 +33177 'ISION' 5 +33178 'ISTER' 5 +33179 'ITIES' 5 +33180 'ITION' 5 +33181 'IVATE' 5 +33182 'IVERS' 5 +33183 'Icons' 5 +33184 'Ident' 5 +33185 'Image' 5 +33186 'Impro' 5 +33187 'Incre' 5 +33188 'Index' 5 +33189 'India' 5 +33190 'Infos' 5 +33191 'Inner' 5 +33192 'Input' 5 +33193 'Instr' 5 +33194 'Intel' 5 +33195 'Inter' 5 +33196 'Intro' 5 +33197 'Islam' 5 +33198 'Issue' 5 +33199 'Items' 5 +33200 'Jacob' 5 +33201 'James' 5 +33202 'Japan' 5 +33203 'Jason' 5 +33204 'Jesus' 5 +33205 'Jimmy' 5 +33206 'Joint' 5 +33207 'Jones' 5 +33208 'Judge' 5 +33209 'KNOWN' 5 +33210 'Kelly' 5 +33211 'Kevin' 5 +33212 'Known' 5 +33213 'Krist' 5 +33214 'LABEL' 5 +33215 'LEASE' 5 +33216 'LEVEL' 5 +33217 'LIGHT' 5 +33218 'LIMIT' 5 +33219 'LOBAL' 5 +33220 'LOCAL' 5 +33221 'LOGIN' 5 +33222 'Label' 5 +33223 'Labor' 5 +33224 'Large' 5 +33225 'Later' 5 +33226 'Latin' 5 +33227 'Laura' 5 +33228 'Layer' 5 +33229 'Leaks' 5 +33230 'Learn' 5 +33231 'Leave' 5 +33232 'Legal' 5 +33233 'Lemma' 5 +33234 'Level' 5 +33235 'Lewis' 5 +33236 'Lexer' 5 +33237 'Light' 5 +33238 'Limit' 5 +33239 'Lines' 5 +33240 'Links' 5 +33241 'Linux' 5 +33242 'Lists' 5 +33243 'Liter' 5 +33244 'Local' 5 +33245 'Logic' 5 +33246 'Login' 5 +33247 'Looks' 5 +33248 'Louis' 5 +33249 'Lower' 5 +33250 'MATCH' 5 +33251 'MENTS' 5 +33252 'MODEL' 5 +33253 'MONTH' 5 +33254 'Macro' 5 +33255 'Magic' 5 +33256 'Major' 5 +33257 'Maker' 5 +33258 'March' 5 +33259 'Marco' 5 +33260 'Maria' 5 +33261 'Marie' 5 +33262 'Mario' 5 +33263 'Match' 5 +33264 'Maybe' 5 +33265 'Mayor' 5 +33266 'Means' 5 +33267 'Media' 5 +33268 'Merge' 5 +33269 'Metal' 5 +33270 'Meter' 5 +33271 'Miami' 5 +33272 'Micro' 5 +33273 'Minor' 5 +33274 'Mixed' 5 +33275 'Mixin' 5 +33276 'Modal' 5 +33277 'Model' 5 +33278 'Modes' 5 +33279 'Money' 5 +33280 'Mongo' 5 +33281 'Month' 5 +33282 'Motor' 5 +33283 'Mount' 5 +33284 'Mouse' 5 +33285 'Movie' 5 +33286 'Multi' 5 +33287 'Music' 5 +33288 'MySQL' 5 +33289 'Named' 5 +33290 'Names' 5 +33291 'Neill' 5 +33292 'Never' 5 +33293 'Night' 5 +33294 'Nodes' 5 +33295 'Noise' 5 +33296 'North' 5 +33297 'Notes' 5 +33298 'Numer' 5 +33299 'OAuth' 5 +33300 'ODULE' 5 +33301 'ORDER' 5 +33302 'ORMAL' 5 +33303 'OTHER' 5 +33304 'OURCE' 5 +33305 'Obama' 5 +33306 'Occup' 5 +33307 'Offer' 5 +33308 'Olymp' 5 +33309 'Omega' 5 +33310 'Optim' 5 +33311 'Order' 5 +33312 'Organ' 5 +33313 'Other' 5 +33314 'Outer' 5 +33315 'Owner' 5 +33316 'PARAM' 5 +33317 'PATCH' 5 +33318 'PLIED' 5 +33319 'POINT' 5 +33320 'PRESS' 5 +33321 'PRINT' 5 +33322 'PROTO' 5 +33323 'Pager' 5 +33324 'Pages' 5 +33325 'Paint' 5 +33326 'Panel' 5 +33327 'Paper' 5 +33328 'Param' 5 +33329 'Paris' 5 +33330 'Parse' 5 +33331 'Parts' 5 +33332 'Party' 5 +33333 'Paste' 5 +33334 'Patch' 5 +33335 'Paths' 5 +33336 'Pause' 5 +33337 'Peter' 5 +33338 'Phase' 5 +33339 'Phone' 5 +33340 'Photo' 5 +33341 'Piece' 5 +33342 'Pitch' 5 +33343 'Pixel' 5 +33344 'Place' 5 +33345 'Plain' 5 +33346 'Plane' 5 +33347 'Plant' 5 +33348 'Plate' 5 +33349 'Point' 5 +33350 'Polit' 5 +33351 'Popup' 5 +33352 'Posts' 5 +33353 'Power' 5 +33354 'Press' 5 +33355 'Price' 5 +33356 'Prime' 5 +33357 'Print' 5 +33358 'Prior' 5 +33359 'Probe' 5 +33360 'Produ' 5 +33361 'Proof' 5 +33362 'Props' 5 +33363 'Proto' 5 +33364 'Proxy' 5 +33365 'Psych' 5 +33366 'QUERY' 5 +33367 'QUEST' 5 +33368 'Quant' 5 +33369 'Queen' 5 +33370 'Query' 5 +33371 'Quest' 5 +33372 'Queue' 5 +33373 'Quick' 5 +33374 'Quote' 5 +33375 'READY' 5 +33376 'REATE' 5 +33377 'RESET' 5 +33378 'RIGHT' 5 +33379 'ROUND' 5 +33380 'Radio' 5 +33381 'Raise' 5 +33382 'Range' 5 +33383 'Ratio' 5 +33384 'React' 5 +33385 'Ready' 5 +33386 'Refer' 5 +33387 'Regex' 5 +33388 'Reply' 5 +33389 'Reset' 5 +33390 'Retry' 5 +33391 'Right' 5 +33392 'River' 5 +33393 'Robin' 5 +33394 'Robot' 5 +33395 'Roger' 5 +33396 'Roles' 5 +33397 'Roman' 5 +33398 'Round' 5 +33399 'Route' 5 +33400 'Royal' 5 +33401 'Rules' 5 +33402 'SHIFT' 5 +33403 'SHORT' 5 +33404 'SPACE' 5 +33405 'SSION' 5 +33406 'STAND' 5 +33407 'START' 5 +33408 'STATE' 5 +33409 'STORE' 5 +33410 'STYLE' 5 +33411 'Saint' 5 +33412 'Sales' 5 +33413 'Santa' 5 +33414 'Sarah' 5 +33415 'Saved' 5 +33416 'Scale' 5 +33417 'Scene' 5 +33418 'Sched' 5 +33419 'Scope' 5 +33420 'Score' 5 +33421 'Scott' 5 +33422 'Sense' 5 +33423 'Separ' 5 +33424 'Setup' 5 +33425 'Seven' 5 +33426 'Shape' 5 +33427 'Share' 5 +33428 'Sharp' 5 +33429 'Sheet' 5 +33430 'Shell' 5 +33431 'Shift' 5 +33432 'Short' 5 +33433 'Sigma' 5 +33434 'Simon' 5 +33435 'Since' 5 +33436 'Sizer' 5 +33437 'Skill' 5 +33438 'Sleep' 5 +33439 'Slice' 5 +33440 'Slide' 5 +33441 'Small' 5 +33442 'Smart' 5 +33443 'Smith' 5 +33444 'Solar' 5 +33445 'Solid' 5 +33446 'Songs' 5 +33447 'Sorry' 5 +33448 'Sound' 5 +33449 'South' 5 +33450 'Space' 5 +33451 'Spain' 5 +33452 'Spark' 5 +33453 'Spawn' 5 +33454 'Spect' 5 +33455 'Speed' 5 +33456 'Spell' 5 +33457 'Split' 5 +33458 'Sport' 5 +33459 'Stack' 5 +33460 'Staff' 5 +33461 'Stage' 5 +33462 'Stamp' 5 +33463 'Stand' 5 +33464 'Stars' 5 +33465 'Start' 5 +33466 'State' 5 +33467 'Stats' 5 +33468 'Steps' 5 +33469 'Steve' 5 +33470 'Still' 5 +33471 'Stock' 5 +33472 'Stone' 5 +33473 'Store' 5 +33474 'Storm' 5 +33475 'Story' 5 +33476 'Strip' 5 +33477 'Study' 5 +33478 'Style' 5 +33479 'Suite' 5 +33480 'Super' 5 +33481 'Susan' 5 +33482 'Sweet' 5 +33483 'Swift' 5 +33484 'TABLE' 5 +33485 'TEGER' 5 +33486 'TITLE' 5 +33487 'TOKEN' 5 +33488 'TRACE' 5 +33489 'TRACK' 5 +33490 'TRACT' 5 +33491 'TRAIN' 5 +33492 'TRANS' 5 +33493 'TYPES' 5 +33494 'Table' 5 +33495 'Taken' 5 +33496 'Tasks' 5 +33497 'Techn' 5 +33498 'Terms' 5 +33499 'Tests' 5 +33500 'Texas' 5 +33501 'Thank' 5 +33502 'Their' 5 +33503 'Theme' 5 +33504 'There' 5 +33505 'These' 5 +33506 'Theta' 5 +33507 'Thing' 5 +33508 'Think' 5 +33509 'Third' 5 +33510 'Those' 5 +33511 'Three' 5 +33512 'Throw' 5 +33513 'Thumb' 5 +33514 'Thêm' 5 +33515 'Tiles' 5 +33516 'Timer' 5 +33517 'Times' 5 +33518 'Title' 5 +33519 'ToOne' 5 +33520 'Today' 5 +33521 'Token' 5 +33522 'Tools' 5 +33523 'Topic' 5 +33524 'Total' 5 +33525 'Touch' 5 +33526 'Trace' 5 +33527 'Track' 5 +33528 'Trade' 5 +33529 'Train' 5 +33530 'Trait' 5 +33531 'Trans' 5 +33532 'Trial' 5 +33533 'Trump' 5 +33534 'Trust' 5 +33535 'Truth' 5 +33536 'Tuple' 5 +33537 'Tweet' 5 +33538 'Typed' 5 +33539 'Types' 5 +33540 'UMENT' 5 +33541 'USTOM' 5 +33542 'UTERS' 5 +33543 'UTION' 5 +33544 'Unary' 5 +33545 'Under' 5 +33546 'Union' 5 +33547 'Units' 5 +33548 'Unity' 5 +33549 'Until' 5 +33550 'Upper' 5 +33551 'Urban' 5 +33552 'Usage' 5 +33553 'Users' 5 +33554 'Using' 5 +33555 'Utils' 5 +33556 'VALID' 5 +33557 'VALUE' 5 +33558 'VIDEO' 5 +33559 'VIDIA' 5 +33560 'Valid' 5 +33561 'Valor' 5 +33562 'Value' 5 +33563 'Video' 5 +33564 'Views' 5 +33565 'Visit' 5 +33566 'Você' 5 +33567 'Voice' 5 +33568 'WHERE' 5 +33569 'WHITE' 5 +33570 'WIDTH' 5 +33571 'WRITE' 5 +33572 'Watch' 5 +33573 'Water' 5 +33574 'Wheel' 5 +33575 'Where' 5 +33576 'Which' 5 +33577 'While' 5 +33578 'White' 5 +33579 'Whole' 5 +33580 'Width' 5 +33581 'Women' 5 +33582 'Words' 5 +33583 'Works' 5 +33584 'World' 5 +33585 'Would' 5 +33586 'Write' 5 +33587 'Years' 5 +33588 'Young' 5 +33589 '[:,:,' 5 +33590 '[…]' 5 +33591 '\\":\\"' 5 +33592 '^−^' 5 +33593 'abama' 5 +33594 'abase' 5 +33595 'abbit' 5 +33596 'abeth' 5 +33597 'abled' 5 +33598 'ables' 5 +33599 'abort' 5 +33600 'about' 5 +33601 'above' 5 +33602 'abric' 5 +33603 'accum' 5 +33604 'accur' 5 +33605 'aceae' 5 +33606 'acent' 5 +33607 'acerb' 5 +33608 'aceut' 5 +33609 'ached' 5 +33610 'achel' 5 +33611 'achen' 5 +33612 'acher' 5 +33613 'aches' 5 +33614 'acial' 5 +33615 'acies' 5 +33616 'acing' 5 +33617 'acion' 5 +33618 'acity' 5 +33619 'ació' 5 +33620 'ację' 5 +33621 'acked' 5 +33622 'acker' 5 +33623 'acket' 5 +33624 'acles' 5 +33625 'acons' 5 +33626 'acted' 5 +33627 'acter' 5 +33628 'actic' 5 +33629 'activ' 5 +33630 'actly' 5 +33631 'actor' 5 +33632 'actus' 5 +33633 'acute' 5 +33634 'adapt' 5 +33635 'adata' 5 +33636 'adays' 5 +33637 'addTo' 5 +33638 'added' 5 +33639 'adder' 5 +33640 'addle' 5 +33641 'addon' 5 +33642 'adena' 5 +33643 'adeon' 5 +33644 'adequ' 5 +33645 'aders' 5 +33646 'adesh' 5 +33647 'adian' 5 +33648 'adier' 5 +33649 'adies' 5 +33650 'ading' 5 +33651 'adium' 5 +33652 'admin' 5 +33653 'adoop' 5 +33654 'adora' 5 +33655 'adors' 5 +33656 'adows' 5 +33657 'adult' 5 +33658 'adém' 5 +33659 'afety' 5 +33660 'affer' 5 +33661 'after' 5 +33662 'again' 5 +33663 'agara' 5 +33664 'agens' 5 +33665 'agent' 5 +33666 'agers' 5 +33667 'agged' 5 +33668 'agger' 5 +33669 'aggio' 5 +33670 'agher' 5 +33671 'agine' 5 +33672 'aging' 5 +33673 'agles' 5 +33674 'agner' 5 +33675 'agnet' 5 +33676 'agram' 5 +33677 'agree' 5 +33678 'agrid' 5 +33679 'agues' 5 +33680 'ahead' 5 +33681 'ahoma' 5 +33682 'ahren' 5 +33683 'aient' 5 +33684 'ailed' 5 +33685 'aille' 5 +33686 'ained' 5 +33687 'ainen' 5 +33688 'ainer' 5 +33689 'aines' 5 +33690 'aired' 5 +33691 'aires' 5 +33692 'aiser' 5 +33693 'aises' 5 +33694 'aison' 5 +33695 'ając' 5 +33696 'akers' 5 +33697 'aking' 5 +33698 'akter' 5 +33699 'aland' 5 +33700 'alarm' 5 +33701 'album' 5 +33702 'alert' 5 +33703 'ależ' 5 +33704 'algia' 5 +33705 'alian' 5 +33706 'alias' 5 +33707 'alice' 5 +33708 'alien' 5 +33709 'align' 5 +33710 'aline' 5 +33711 'aling' 5 +33712 'alion' 5 +33713 'alist' 5 +33714 'ality' 5 +33715 'alive' 5 +33716 'alkyl' 5 +33717 'allah' 5 +33718 'allas' 5 +33719 'alled' 5 +33720 'allel' 5 +33721 'allen' 5 +33722 'aller' 5 +33723 'alles' 5 +33724 'allet' 5 +33725 'allic' 5 +33726 'alloc' 5 +33727 'allow' 5 +33728 'alone' 5 +33729 'along' 5 +33730 'alore' 5 +33731 'alous' 5 +33732 'alpha' 5 +33733 'alter' 5 +33734 'amate' 5 +33735 'ambda' 5 +33736 'amber' 5 +33737 'ambia' 5 +33738 'ambig' 5 +33739 'amble' 5 +33740 'amboo' 5 +33741 'ament' 5 +33742 'amera' 5 +33743 'amide' 5 +33744 'amily' 5 +33745 'amina' 5 +33746 'amine' 5 +33747 'aming' 5 +33748 'amino' 5 +33749 'amins' 5 +33750 'ammad' 5 +33751 'ammed' 5 +33752 'ammer' 5 +33753 'among' 5 +33754 'amoto' 5 +33755 'amour' 5 +33756 'amous' 5 +33757 'amped' 5 +33758 'ample' 5 +33759 'amura' 5 +33760 'analy' 5 +33761 'anced' 5 +33762 'ancel' 5 +33763 'ancer' 5 +33764 'ances' 5 +33765 'anche' 5 +33766 'ancia' 5 +33767 'andal' 5 +33768 'andan' 5 +33769 'andas' 5 +33770 'anded' 5 +33771 'andel' 5 +33772 'anden' 5 +33773 'ander' 5 +33774 'andez' 5 +33775 'andid' 5 +33776 'andin' 5 +33777 'andle' 5 +33778 'andom' 5 +33779 'andon' 5 +33780 'andra' 5 +33781 'andre' 5 +33782 'andro' 5 +33783 'andum' 5 +33784 'anean' 5 +33785 'anese' 5 +33786 'angan' 5 +33787 'anged' 5 +33788 'angel' 5 +33789 'angen' 5 +33790 'anger' 5 +33791 'anges' 5 +33792 'angle' 5 +33793 'anian' 5 +33794 'anine' 5 +33795 'aning' 5 +33796 'anish' 5 +33797 'anity' 5 +33798 'anium' 5 +33799 'anked' 5 +33800 'anmar' 5 +33801 'annah' 5 +33802 'anned' 5 +33803 'annel' 5 +33804 'anner' 5 +33805 'annes' 5 +33806 'annie' 5 +33807 'annon' 5 +33808 'annot' 5 +33809 'anova' 5 +33810 'ansas' 5 +33811 'ansen' 5 +33812 'ansom' 5 +33813 'anson' 5 +33814 'antal' 5 +33815 'antan' 5 +33816 'anted' 5 +33817 'anten' 5 +33818 'anter' 5 +33819 'antes' 5 +33820 'antha' 5 +33821 'antic' 5 +33822 'antis' 5 +33823 'antly' 5 +33824 'antom' 5 +33825 'anton' 5 +33826 'antry' 5 +33827 'anuts' 5 +33828 'anyon' 5 +33829 'ança' 5 +33830 'apers' 5 +33831 'apest' 5 +33832 'apeut' 5 +33833 'aping' 5 +33834 'apons' 5 +33835 'apore' 5 +33836 'apped' 5 +33837 'appen' 5 +33838 'apper' 5 +33839 'apple' 5 +33840 'apply' 5 +33841 'appro' 5 +33842 'apsed' 5 +33843 'apses' 5 +33844 'apter' 5 +33845 'aptic' 5 +33846 'aptop' 5 +33847 'arant' 5 +33848 'archy' 5 +33849 'arded' 5 +33850 'arden' 5 +33851 'ardin' 5 +33852 'ardon' 5 +33853 'areas' 5 +33854 'arena' 5 +33855 'arent' 5 +33856 'arest' 5 +33857 'areth' 5 +33858 'argar' 5 +33859 'arger' 5 +33860 'arget' 5 +33861 'argin' 5 +33862 'argon' 5 +33863 'arial' 5 +33864 'arian' 5 +33865 'arias' 5 +33866 'ariat' 5 +33867 'aries' 5 +33868 'arily' 5 +33869 'arine' 5 +33870 'aring' 5 +33871 'arios' 5 +33872 'arith' 5 +33873 'arity' 5 +33874 'arium' 5 +33875 'arius' 5 +33876 'arked' 5 +33877 'arker' 5 +33878 'armac' 5 +33879 'armed' 5 +33880 'armor' 5 +33881 'array' 5 +33882 'arrow' 5 +33883 'arser' 5 +33884 'arten' 5 +33885 'arter' 5 +33886 'arthy' 5 +33887 'artic' 5 +33888 'arton' 5 +33889 'arxiv' 5 +33890 'aría' 5 +33891 'asaki' 5 +33892 'asant' 5 +33893 'ascal' 5 +33894 'ascii' 5 +33895 'ascus' 5 +33896 'asers' 5 +33897 'ashed' 5 +33898 'ashes' 5 +33899 'asian' 5 +33900 'aside' 5 +33901 'asing' 5 +33902 'asion' 5 +33903 'asive' 5 +33904 'asket' 5 +33905 'asons' 5 +33906 'asper' 5 +33907 'assed' 5 +33908 'assen' 5 +33909 'asser' 5 +33910 'asses' 5 +33911 'asset' 5 +33912 'assic' 5 +33913 'assin' 5 +33914 'assis' 5 +33915 'assoc' 5 +33916 'asted' 5 +33917 'aster' 5 +33918 'astes' 5 +33919 'astic' 5 +33920 'aston' 5 +33921 'astro' 5 +33922 'asure' 5 +33923 'asury' 5 +33924 'async' 5 +33925 'ataka' 5 +33926 'atche' 5 +33927 'ategy' 5 +33928 'ately' 5 +33929 'atern' 5 +33930 'aters' 5 +33931 'atest' 5 +33932 'ateur' 5 +33933 'atham' 5 +33934 'athan' 5 +33935 'athed' 5 +33936 'ather' 5 +33937 'athom' 5 +33938 'athon' 5 +33939 'atial' 5 +33940 'atica' 5 +33941 'atics' 5 +33942 'atile' 5 +33943 'ating' 5 +33944 'ation' 5 +33945 'atisf' 5 +33946 'atism' 5 +33947 'ativa' 5 +33948 'ative' 5 +33949 'ativo' 5 +33950 'atoes' 5 +33951 'atoms' 5 +33952 'atomy' 5 +33953 'atore' 5 +33954 'atori' 5 +33955 'ators' 5 +33956 'atory' 5 +33957 'atrix' 5 +33958 'atted' 5 +33959 'atten' 5 +33960 'atter' 5 +33961 'attle' 5 +33962 'attrs' 5 +33963 'atura' 5 +33964 'ature' 5 +33965 'atype' 5 +33966 'atég' 5 +33967 'audio' 5 +33968 'audit' 5 +33969 'aught' 5 +33970 'aukee' 5 +33971 'aurus' 5 +33972 'ausal' 5 +33973 'aused' 5 +33974 'auses' 5 +33975 'autom' 5 +33976 'autor' 5 +33977 'autos' 5 +33978 'autre' 5 +33979 'auté' 5 +33980 'avage' 5 +33981 'avail' 5 +33982 'avery' 5 +33983 'avian' 5 +33984 'avier' 5 +33985 'aving' 5 +33986 'avoid' 5 +33987 'avoir' 5 +33988 'avors' 5 +33989 'avour' 5 +33990 'await' 5 +33991 'award' 5 +33992 'aware' 5 +33993 'aways' 5 +33994 'axter' 5 +33995 'ayers' 5 +33996 'aying' 5 +33997 'aylor' 5 +33998 'ayout' 5 +33999 'azard' 5 +34000 'azine' 5 +34001 'azing' 5 +34002 'azole' 5 +34003 'azure' 5 +34004 'babel' 5 +34005 'bably' 5 +34006 'backs' 5 +34007 'badge' 5 +34008 'balls' 5 +34009 'bands' 5 +34010 'banks' 5 +34011 'based' 5 +34012 'basic' 5 +34013 'basis' 5 +34014 'batch' 5 +34015 'beans' 5 +34016 'becca' 5 +34017 'becue' 5 +34018 'begin' 5 +34019 'being' 5 +34020 'below' 5 +34021 'bench' 5 +34022 'benef' 5 +34023 'beros' 5 +34024 'berra' 5 +34025 'berry' 5 +34026 'berta' 5 +34027 'berto' 5 +34028 'binom' 5 +34029 'birds' 5 +34030 'birth' 5 +34031 'bject' 5 +34032 'black' 5 +34033 'blade' 5 +34034 'blank' 5 +34035 'blast' 5 +34036 'blems' 5 +34037 'blind' 5 +34038 'bling' 5 +34039 'block' 5 +34040 'blogs' 5 +34041 'blood' 5 +34042 'boBox' 5 +34043 'board' 5 +34044 'bones' 5 +34045 'books' 5 +34046 'boost' 5 +34047 'borne' 5 +34048 'bound' 5 +34049 'bourg' 5 +34050 'boxes' 5 +34051 'brace' 5 +34052 'brain' 5 +34053 'brand' 5 +34054 'brane' 5 +34055 'bread' 5 +34056 'break' 5 +34057 'brevi' 5 +34058 'brief' 5 +34059 'bring' 5 +34060 'broad' 5 +34061 'brook' 5 +34062 'brown' 5 +34063 'brush' 5 +34064 'bráz' 5 +34065 'bsite' 5 +34066 'bucks' 5 +34067 'build' 5 +34068 'built' 5 +34069 'buntu' 5 +34070 'burgh' 5 +34071 'burst' 5 +34072 'byter' 5 +34073 'bytes' 5 +34074 'cache' 5 +34075 'caffe' 5 +34076 'calls' 5 +34077 'camel' 5 +34078 'cards' 5 +34079 'caret' 5 +34080 'carry' 5 +34081 'cases' 5 +34082 'casts' 5 +34083 'catal' 5 +34084 'catch' 5 +34085 'cause' 5 +34086 'ccess' 5 +34087 'ccion' 5 +34088 'cció' 5 +34089 'ccoli' 5 +34090 'cdnjs' 5 +34091 'cdots' 5 +34092 'ceans' 5 +34093 'cedes' 5 +34094 'ceive' 5 +34095 'cells' 5 +34096 'cence' 5 +34097 'cents' 5 +34098 'cerpt' 5 +34099 'cesso' 5 +34100 'chaft' 5 +34101 'chain' 5 +34102 'chair' 5 +34103 'chang' 5 +34104 'chant' 5 +34105 'charg' 5 +34106 'chars' 5 +34107 'chart' 5 +34108 'check' 5 +34109 'chell' 5 +34110 'chemy' 5 +34111 'cheon' 5 +34112 'chers' 5 +34113 'chest' 5 +34114 'chief' 5 +34115 'child' 5 +34116 'ching' 5 +34117 'chini' 5 +34118 'chlor' 5 +34119 'chool' 5 +34120 'chrom' 5 +34121 'chron' 5 +34122 'chten' 5 +34123 'chter' 5 +34124 'chunk' 5 +34125 'cible' 5 +34126 'cient' 5 +34127 'civil' 5 +34128 'ción' 5 +34129 'cknow' 5 +34130 'ckså' 5 +34131 'claim' 5 +34132 'clair' 5 +34133 'clamp' 5 +34134 'clang' 5 +34135 'class' 5 +34136 'clave' 5 +34137 'clean' 5 +34138 'clear' 5 +34139 'click' 5 +34140 'cline' 5 +34141 'cling' 5 +34142 'clock' 5 +34143 'clone' 5 +34144 'close' 5 +34145 'cloth' 5 +34146 'cloud' 5 +34147 'clude' 5 +34148 'clust' 5 +34149 'coach' 5 +34150 'codec' 5 +34151 'coded' 5 +34152 'coder' 5 +34153 'codes' 5 +34154 'coeff' 5 +34155 'cohol' 5 +34156 'coins' 5 +34157 'colon' 5 +34158 'color' 5 +34159 'combe' 5 +34160 'combo' 5 +34161 'comed' 5 +34162 'comes' 5 +34163 'comic' 5 +34164 'comma' 5 +34165 'compl' 5 +34166 'conda' 5 +34167 'conde' 5 +34168 'conom' 5 +34169 'const' 5 +34170 'contr' 5 +34171 'coord' 5 +34172 'cores' 5 +34173 'could' 5 +34174 'count' 5 +34175 'court' 5 +34176 'cover' 5 +34177 'craft' 5 +34178 'crawl' 5 +34179 'creat' 5 +34180 'creen' 5 +34181 'crete' 5 +34182 'crets' 5 +34183 'cribe' 5 +34184 'crime' 5 +34185 'cript' 5 +34186 'crire' 5 +34187 'croft' 5 +34188 'cross' 5 +34189 'crypt' 5 +34190 'ctica' 5 +34191 'ction' 5 +34192 'ctors' 5 +34193 'ctype' 5 +34194 'cubic' 5 +34195 'cular' 5 +34196 'cules' 5 +34197 'culos' 5 +34198 'culus' 5 +34199 'curve' 5 +34200 'cycle' 5 +34201 'daily' 5 +34202 'datab' 5 +34203 'datas' 5 +34204 'datat' 5 +34205 'dated' 5 +34206 'dater' 5 +34207 'dates' 5 +34208 'datum' 5 +34209 'death' 5 +34210 'debug' 5 +34211 'decay' 5 +34212 'decor' 5 +34213 'defer' 5 +34214 'defin' 5 +34215 'delay' 5 +34216 'deleg' 5 +34217 'delta' 5 +34218 'denly' 5 +34219 'dense' 5 +34220 'depth' 5 +34221 'deque' 5 +34222 'deriv' 5 +34223 'descr' 5 +34224 'devel' 5 +34225 'dfrac' 5 +34226 'digit' 5 +34227 'dimen' 5 +34228 'dings' 5 +34229 'dirty' 5 +34230 'doesn' 5 +34231 'doing' 5 +34232 'domin' 5 +34233 'doors' 5 +34234 'draft' 5 +34235 'dream' 5 +34236 'drive' 5 +34237 'dtype' 5 +34238 'duced' 5 +34239 'ducer' 5 +34240 'duino' 5 +34241 'dummy' 5 +34242 'earch' 5 +34243 'early' 5 +34244 'earth' 5 +34245 'ebook' 5 +34246 'ecess' 5 +34247 'ectar' 5 +34248 'ected' 5 +34249 'ector' 5 +34250 'edges' 5 +34251 'eding' 5 +34252 'eenth' 5 +34253 'eeper' 5 +34254 'efore' 5 +34255 'eigen' 5 +34256 'eight' 5 +34257 'eking' 5 +34258 'eland' 5 +34259 'elect' 5 +34260 'eless' 5 +34261 'elfth' 5 +34262 'elian' 5 +34263 'elijk' 5 +34264 'eline' 5 +34265 'eling' 5 +34266 'elist' 5 +34267 'elius' 5 +34268 'ellan' 5 +34269 'ellar' 5 +34270 'elled' 5 +34271 'ellen' 5 +34272 'eller' 5 +34273 'elles' 5 +34274 'ellig' 5 +34275 'ellij' 5 +34276 'ellow' 5 +34277 'elman' 5 +34278 'elong' 5 +34279 'elope' 5 +34280 'elsen' 5 +34281 'elson' 5 +34282 'elter' 5 +34283 'elves' 5 +34284 'email' 5 +34285 'emale' 5 +34286 'emann' 5 +34287 'emark' 5 +34288 'embed' 5 +34289 'ember' 5 +34290 'emble' 5 +34291 'embre' 5 +34292 'embro' 5 +34293 'ement' 5 +34294 'emies' 5 +34295 'emoji' 5 +34296 'emory' 5 +34297 'emplo' 5 +34298 'empor' 5 +34299 'empre' 5 +34300 'empty' 5 +34301 'emás' 5 +34302 'ename' 5 +34303 'enant' 5 +34304 'enary' 5 +34305 'enced' 5 +34306 'encer' 5 +34307 'ences' 5 +34308 'encia' 5 +34309 'encil' 5 +34310 'endar' 5 +34311 'endas' 5 +34312 'ended' 5 +34313 'enden' 5 +34314 'ender' 5 +34315 'endez' 5 +34316 'endif' 5 +34317 'endix' 5 +34318 'endor' 5 +34319 'endra' 5 +34320 'endum' 5 +34321 'eners' 5 +34322 'enery' 5 +34323 'eness' 5 +34324 'enger' 5 +34325 'ength' 5 +34326 'ening' 5 +34327 'enium' 5 +34328 'ennen' 5 +34329 'ennes' 5 +34330 'ennis' 5 +34331 'ensch' 5 +34332 'ensed' 5 +34333 'ensen' 5 +34334 'enser' 5 +34335 'enses' 5 +34336 'ensis' 5 +34337 'enson' 5 +34338 'ensor' 5 +34339 'ensus' 5 +34340 'ental' 5 +34341 'ented' 5 +34342 'enter' 5 +34343 'entes' 5 +34344 'entic' 5 +34345 'entin' 5 +34346 'ently' 5 +34347 'enton' 5 +34348 'entre' 5 +34349 'entry' 5 +34350 'enzie' 5 +34351 'ença' 5 +34352 'epend' 5 +34353 'eping' 5 +34354 'epoch' 5 +34355 'equal' 5 +34356 'equip' 5 +34357 'equiv' 5 +34358 'erala' 5 +34359 'erald' 5 +34360 'erals' 5 +34361 'erase' 5 +34362 'erate' 5 +34363 'ereum' 5 +34364 'ergic' 5 +34365 'ergus' 5 +34366 'erial' 5 +34367 'eries' 5 +34368 'ering' 5 +34369 'erior' 5 +34370 'ermal' 5 +34371 'erman' 5 +34372 'ernal' 5 +34373 'ernel' 5 +34374 'erner' 5 +34375 'errno' 5 +34376 'error' 5 +34377 'ersen' 5 +34378 'erset' 5 +34379 'erson' 5 +34380 'erten' 5 +34381 'erton' 5 +34382 'erved' 5 +34383 'erver' 5 +34384 'erves' 5 +34385 'esian' 5 +34386 'esity' 5 +34387 'esium' 5 +34388 'esome' 5 +34389 'espan' 5 +34390 'esper' 5 +34391 'essed' 5 +34392 'essel' 5 +34393 'essen' 5 +34394 'esser' 5 +34395 'esses' 5 +34396 'essim' 5 +34397 'essor' 5 +34398 'ested' 5 +34399 'ester' 5 +34400 'estic' 5 +34401 'estim' 5 +34402 'eston' 5 +34403 'estre' 5 +34404 'estro' 5 +34405 'etary' 5 +34406 'eteor' 5 +34407 'eters' 5 +34408 'ether' 5 +34409 'ethod' 5 +34410 'ethyl' 5 +34411 'etics' 5 +34412 'eties' 5 +34413 'etime' 5 +34414 'etine' 5 +34415 'eting' 5 +34416 'etric' 5 +34417 'ettel' 5 +34418 'etter' 5 +34419 'ettes' 5 +34420 'ettle' 5 +34421 'etype' 5 +34422 'evalu' 5 +34423 'event' 5 +34424 'every' 5 +34425 'ewnę' 5 +34426 'exact' 5 +34427 'excel' 5 +34428 'exist' 5 +34429 'exper' 5 +34430 'explo' 5 +34431 'extra' 5 +34432 'faces' 5 +34433 'facts' 5 +34434 'faith' 5 +34435 'falls' 5 +34436 'false' 5 +34437 'fasta' 5 +34438 'fatal' 5 +34439 'fault' 5 +34440 'favor' 5 +34441 'fetch' 5 +34442 'ffect' 5 +34443 'ffiti' 5 +34444 'ffset' 5 +34445 'fiber' 5 +34446 'field' 5 +34447 'fight' 5 +34448 'filer' 5 +34449 'files' 5 +34450 'filtr' 5 +34451 'final' 5 +34452 'fires' 5 +34453 'first' 5 +34454 'fixed' 5 +34455 'flags' 5 +34456 'flake' 5 +34457 'flare' 5 +34458 'flash' 5 +34459 'flies' 5 +34460 'float' 5 +34461 'floor' 5 +34462 'flows' 5 +34463 'fluid' 5 +34464 'fluor' 5 +34465 'flush' 5 +34466 'fname' 5 +34467 'focus' 5 +34468 'folio' 5 +34469 'fonts' 5 +34470 'force' 5 +34471 'forge' 5 +34472 'forma' 5 +34473 'forme' 5 +34474 'forms' 5 +34475 'forth' 5 +34476 'forum' 5 +34477 'found' 5 +34478 'frame' 5 +34479 'fresh' 5 +34480 'frica' 5 +34481 'fried' 5 +34482 'front' 5 +34483 'fruit' 5 +34484 'ftime' 5 +34485 'ftype' 5 +34486 'fully' 5 +34487 'führ' 5 +34488 'gaard' 5 +34489 'gable' 5 +34490 'games' 5 +34491 'gamma' 5 +34492 'gauge' 5 +34493 'geant' 5 +34494 'geben' 5 +34495 'gebra' 5 +34496 'gence' 5 +34497 'gency' 5 +34498 'gende' 5 +34499 'gener' 5 +34500 'genes' 5 +34501 'genic' 5 +34502 'genre' 5 +34503 'geois' 5 +34504 'geons' 5 +34505 'gesch' 5 +34506 'getId' 5 +34507 'giene' 5 +34508 'given' 5 +34509 'glass' 5 +34510 'glyph' 5 +34511 'gmail' 5 +34512 'gment' 5 +34513 'goals' 5 +34514 'going' 5 +34515 'grade' 5 +34516 'grams' 5 +34517 'grand' 5 +34518 'grant' 5 +34519 'graph' 5 +34520 'grass' 5 +34521 'grave' 5 +34522 'great' 5 +34523 'green' 5 +34524 'gress' 5 +34525 'group' 5 +34526 'grown' 5 +34527 'grund' 5 +34528 'guard' 5 +34529 'guess' 5 +34530 'guest' 5 +34531 'guide' 5 +34532 'guild' 5 +34533 'gunta' 5 +34534 'habit' 5 +34535 'hagen' 5 +34536 'hands' 5 +34537 'happy' 5 +34538 'hardt' 5 +34539 'harma' 5 +34540 'ható' 5 +34541 'haust' 5 +34542 'haven' 5 +34543 'heads' 5 +34544 'heard' 5 +34545 'heart' 5 +34546 'heast' 5 +34547 'heavy' 5 +34548 'heets' 5 +34549 'heits' 5 +34550 'hello' 5 +34551 'hemat' 5 +34552 'hemer' 5 +34553 'henyl' 5 +34554 'heres' 5 +34555 'herty' 5 +34556 'heses' 5 +34557 'hesia' 5 +34558 'hesis' 5 +34559 'heter' 5 +34560 'hetic' 5 +34561 'hetti' 5 +34562 'hetto' 5 +34563 'heure' 5 +34564 'hibit' 5 +34565 'hicle' 5 +34566 'hline' 5 +34567 'holds' 5 +34568 'holes' 5 +34569 'homme' 5 +34570 'hooks' 5 +34571 'hores' 5 +34572 'horse' 5 +34573 'hosts' 5 +34574 'hotel' 5 +34575 'hours' 5 +34576 'house' 5 +34577 'hover' 5 +34578 'hower' 5 +34579 'https' 5 +34580 'human' 5 +34581 'hurst' 5 +34582 'hydro' 5 +34583 'hyper' 5 +34584 'hält' 5 +34585 'häng' 5 +34586 'hões' 5 +34587 'iable' 5 +34588 'ially' 5 +34589 'ialog' 5 +34590 'iance' 5 +34591 'iasis' 5 +34592 'iated' 5 +34593 'iates' 5 +34594 'iator' 5 +34595 'iała' 5 +34596 'ibaba' 5 +34597 'ibile' 5 +34598 'ibles' 5 +34599 'iből' 5 +34600 'icago' 5 +34601 'icals' 5 +34602 'icana' 5 +34603 'icans' 5 +34604 'icate' 5 +34605 'ichen' 5 +34606 'icher' 5 +34607 'ichte' 5 +34608 'icial' 5 +34609 'ician' 5 +34610 'icide' 5 +34611 'icine' 5 +34612 'icing' 5 +34613 'icion' 5 +34614 'icios' 5 +34615 'icism' 5 +34616 'icity' 5 +34617 'ició' 5 +34618 'icked' 5 +34619 'icken' 5 +34620 'icker' 5 +34621 'icket' 5 +34622 'ická' 5 +34623 'ické' 5 +34624 'ický' 5 +34625 'icles' 5 +34626 'icode' 5 +34627 'icons' 5 +34628 'icted' 5 +34629 'ictor' 5 +34630 'icult' 5 +34631 'idade' 5 +34632 'idase' 5 +34633 'idata' 5 +34634 'idden' 5 +34635 'iddle' 5 +34636 'ideal' 5 +34637 'ident' 5 +34638 'ideos' 5 +34639 'iders' 5 +34640 'idget' 5 +34641 'idian' 5 +34642 'idine' 5 +34643 'iding' 5 +34644 'idity' 5 +34645 'idium' 5 +34646 'idual' 5 +34647 'idée' 5 +34648 'iedad' 5 +34649 'ieder' 5 +34650 'iegel' 5 +34651 'ielle' 5 +34652 'ience' 5 +34653 'iency' 5 +34654 'iendo' 5 +34655 'ienen' 5 +34656 'ienna' 5 +34657 'ienne' 5 +34658 'iente' 5 +34659 'iento' 5 +34660 'ients' 5 +34661 'ienza' 5 +34662 'ieren' 5 +34663 'ierno' 5 +34664 'ieron' 5 +34665 'ierra' 5 +34666 'ierre' 5 +34667 'ierte' 5 +34668 'ierto' 5 +34669 'iesel' 5 +34670 'iesen' 5 +34671 'ieurs' 5 +34672 'ieval' 5 +34673 'ieved' 5 +34674 'ieves' 5 +34675 'iface' 5 +34676 'ifact' 5 +34677 'ifdef' 5 +34678 'ifest' 5 +34679 'iffer' 5 +34680 'ifica' 5 +34681 'ifice' 5 +34682 'ified' 5 +34683 'ifier' 5 +34684 'ifies' 5 +34685 'ifié' 5 +34686 'ifold' 5 +34687 'iform' 5 +34688 'iforn' 5 +34689 'ifter' 5 +34690 'igate' 5 +34691 'igent' 5 +34692 'igest' 5 +34693 'igger' 5 +34694 'ighed' 5 +34695 'ighth' 5 +34696 'ights' 5 +34697 'igion' 5 +34698 'igmat' 5 +34699 'igned' 5 +34700 'igner' 5 +34701 'ignon' 5 +34702 'igram' 5 +34703 'igung' 5 +34704 'ijing' 5 +34705 'ikawa' 5 +34706 'ikers' 5 +34707 'iking' 5 +34708 'ilage' 5 +34709 'iland' 5 +34710 'ilder' 5 +34711 'ilent' 5 +34712 'ilers' 5 +34713 'ilian' 5 +34714 'iliar' 5 +34715 'ilies' 5 +34716 'iline' 5 +34717 'iling' 5 +34718 'ilion' 5 +34719 'ility' 5 +34720 'illac' 5 +34721 'illar' 5 +34722 'illas' 5 +34723 'illed' 5 +34724 'iller' 5 +34725 'illes' 5 +34726 'illet' 5 +34727 'illin' 5 +34728 'illon' 5 +34729 'illus' 5 +34730 'illé' 5 +34731 'ilogy' 5 +34732 'ilton' 5 +34733 'image' 5 +34734 'imals' 5 +34735 'imate' 5 +34736 'imens' 5 +34737 'iment' 5 +34738 'imgur' 5 +34739 'imits' 5 +34740 'imize' 5 +34741 'immer' 5 +34742 'imony' 5 +34743 'imore' 5 +34744 'imoto' 5 +34745 'imper' 5 +34746 'imple' 5 +34747 'impro' 5 +34748 'imuth' 5 +34749 'inals' 5 +34750 'iname' 5 +34751 'inand' 5 +34752 'inant' 5 +34753 'inary' 5 +34754 'inate' 5 +34755 'inces' 5 +34756 'incip' 5 +34757 'incre' 5 +34758 'inden' 5 +34759 'inder' 5 +34760 'index' 5 +34761 'indic' 5 +34762 'indle' 5 +34763 'indow' 5 +34764 'indre' 5 +34765 'inear' 5 +34766 'inees' 5 +34767 'inely' 5 +34768 'inent' 5 +34769 'iners' 5 +34770 'inery' 5 +34771 'inese' 5 +34772 'iness' 5 +34773 'infer' 5 +34774 'infra' 5 +34775 'infty' 5 +34776 'ingen' 5 +34777 'inger' 5 +34778 'inges' 5 +34779 'ingle' 5 +34780 'ingly' 5 +34781 'inian' 5 +34782 'ining' 5 +34783 'inion' 5 +34784 'inite' 5 +34785 'inity' 5 +34786 'inkel' 5 +34787 'inker' 5 +34788 'inkle' 5 +34789 'inned' 5 +34790 'innen' 5 +34791 'inner' 5 +34792 'inode' 5 +34793 'inois' 5 +34794 'inous' 5 +34795 'input' 5 +34796 'inset' 5 +34797 'insic' 5 +34798 'inski' 5 +34799 'insky' 5 +34800 'inson' 5 +34801 'instr' 5 +34802 'intel' 5 +34803 'inter' 5 +34804 'inton' 5 +34805 'intro' 5 +34806 'inté' 5 +34807 'iolet' 5 +34808 'ional' 5 +34809 'ioned' 5 +34810 'iones' 5 +34811 'ionic' 5 +34812 'iosis' 5 +34813 'iotic' 5 +34814 'ioxid' 5 +34815 'ipart' 5 +34816 'ipers' 5 +34817 'ipher' 5 +34818 'iples' 5 +34819 'ipped' 5 +34820 'ipper' 5 +34821 'ippet' 5 +34822 'ipple' 5 +34823 'ipzig' 5 +34824 'iques' 5 +34825 'iquid' 5 +34826 'iqué' 5 +34827 'ircle' 5 +34828 'irect' 5 +34829 'iring' 5 +34830 'irmed' 5 +34831 'irror' 5 +34832 'isans' 5 +34833 'iscal' 5 +34834 'ische' 5 +34835 'isers' 5 +34836 'ished' 5 +34837 'isher' 5 +34838 'ishes' 5 +34839 'ishly' 5 +34840 'ishop' 5 +34841 'ising' 5 +34842 'ision' 5 +34843 'isman' 5 +34844 'ismic' 5 +34845 'ismus' 5 +34846 'isnan' 5 +34847 'isode' 5 +34848 'isons' 5 +34849 'issan' 5 +34850 'issen' 5 +34851 'isser' 5 +34852 'isses' 5 +34853 'isset' 5 +34854 'isson' 5 +34855 'issue' 5 +34856 'istan' 5 +34857 'istar' 5 +34858 'istas' 5 +34859 'isted' 5 +34860 'istem' 5 +34861 'isten' 5 +34862 'ister' 5 +34863 'istes' 5 +34864 'istic' 5 +34865 'istik' 5 +34866 'istle' 5 +34867 'istol' 5 +34868 'iston' 5 +34869 'istor' 5 +34870 'istra' 5 +34871 'istro' 5 +34872 'istry' 5 +34873 'istä' 5 +34874 'isure' 5 +34875 'isée' 5 +34876 'isés' 5 +34877 'itage' 5 +34878 'itals' 5 +34879 'itant' 5 +34880 'itary' 5 +34881 'itate' 5 +34882 'itect' 5 +34883 'itely' 5 +34884 'items' 5 +34885 'iterr' 5 +34886 'ither' 5 +34887 'ithub' 5 +34888 'itial' 5 +34889 'ities' 5 +34890 'itime' 5 +34891 'iting' 5 +34892 'ition' 5 +34893 'itive' 5 +34894 'itié' 5 +34895 'itled' 5 +34896 'itles' 5 +34897 'itone' 5 +34898 'itore' 5 +34899 'itori' 5 +34900 'itors' 5 +34901 'itory' 5 +34902 'itsch' 5 +34903 'itted' 5 +34904 'ittee' 5 +34905 'itten' 5 +34906 'itter' 5 +34907 'ittle' 5 +34908 'itude' 5 +34909 'itung' 5 +34910 'iture' 5 +34911 'itzer' 5 +34912 'ität' 5 +34913 'ités' 5 +34914 'ivals' 5 +34915 'ivari' 5 +34916 'ivate' 5 +34917 'iveau' 5 +34918 'ively' 5 +34919 'ivent' 5 +34920 'ivers' 5 +34921 'ivery' 5 +34922 'iving' 5 +34923 'ivism' 5 +34924 'ivist' 5 +34925 'ivity' 5 +34926 'ixels' 5 +34927 'izada' 5 +34928 'izado' 5 +34929 'izard' 5 +34930 'izens' 5 +34931 'izers' 5 +34932 'izing' 5 +34933 'izons' 5 +34934 'izont' 5 +34935 'izoph' 5 +34936 'ième' 5 +34937 'ière' 5 +34938 'jamin' 5 +34939 'jango' 5 +34940 'javax' 5 +34941 'jiang' 5 +34942 'joint' 5 +34943 'jours' 5 +34944 'juana' 5 +34945 'judge' 5 +34946 'junit' 5 +34947 'juven' 5 +34948 'jähr' 5 +34949 'jší' 5 +34950 'kappa' 5 +34951 'keley' 5 +34952 'keras' 5 +34953 'klass' 5 +34954 'klär' 5 +34955 'known' 5 +34956 'ktion' 5 +34957 'ként' 5 +34958 'label' 5 +34959 'labor' 5 +34960 'laden' 5 +34961 'lando' 5 +34962 'lands' 5 +34963 'lapse' 5 +34964 'large' 5 +34965 'ları' 5 +34966 'lated' 5 +34967 'later' 5 +34968 'latex' 5 +34969 'latin' 5 +34970 'layer' 5 +34971 'ldots' 5 +34972 'leans' 5 +34973 'learn' 5 +34974 'lease' 5 +34975 'least' 5 +34976 'leave' 5 +34977 'ledge' 5 +34978 'legal' 5 +34979 'legen' 5 +34980 'leich' 5 +34981 'leigh' 5 +34982 'leman' 5 +34983 'lemen' 5 +34984 'lemma' 5 +34985 'letal' 5 +34986 'leted' 5 +34987 'letes' 5 +34988 'letic' 5 +34989 'leton' 5 +34990 'lette' 5 +34991 'level' 5 +34992 'lexer' 5 +34993 'lical' 5 +34994 'lices' 5 +34995 'liche' 5 +34996 'licht' 5 +34997 'licit' 5 +34998 'lickr' 5 +34999 'lient' 5 +35000 'liers' 5 +35001 'liest' 5 +35002 'ließ' 5 +35003 'light' 5 +35004 'ligne' 5 +35005 'liked' 5 +35006 'limit' 5 +35007 'lined' 5 +35008 'liner' 5 +35009 'lines' 5 +35010 'lings' 5 +35011 'linha' 5 +35012 'links' 5 +35013 'linux' 5 +35014 'lique' 5 +35015 'lista' 5 +35016 'lists' 5 +35017 'liter' 5 +35018 'lived' 5 +35019 'liver' 5 +35020 'loads' 5 +35021 'lobal' 5 +35022 'local' 5 +35023 'locks' 5 +35024 'logic' 5 +35025 'login' 5 +35026 'loops' 5 +35027 'lopen' 5 +35028 'lords' 5 +35029 'lotte' 5 +35030 'lover' 5 +35031 'lower' 5 +35032 'luent' 5 +35033 'lycer' 5 +35034 'lying' 5 +35035 'länd' 5 +35036 'macro' 5 +35037 'magic' 5 +35038 'mails' 5 +35039 'maint' 5 +35040 'major' 5 +35041 'maker' 5 +35042 'makes' 5 +35043 'mania' 5 +35044 'mares' 5 +35045 'marks' 5 +35046 'ması' 5 +35047 'match' 5 +35048 'mates' 5 +35049 'matic' 5 +35050 'maven' 5 +35051 'maxim' 5 +35052 'maybe' 5 +35053 'means' 5 +35054 'media' 5 +35055 'mente' 5 +35056 'ments' 5 +35057 'merce' 5 +35058 'merge' 5 +35059 'meric' 5 +35060 'metal' 5 +35061 'meter' 5 +35062 'metic' 5 +35063 'metro' 5 +35064 'metry' 5 +35065 'micro' 5 +35066 'might' 5 +35067 'miner' 5 +35068 'minim' 5 +35069 'minor' 5 +35070 'minus' 5 +35071 'mixed' 5 +35072 'mkdir' 5 +35073 'modal' 5 +35074 'model' 5 +35075 'modes' 5 +35076 'money' 5 +35077 'mongo' 5 +35078 'monic' 5 +35079 'month' 5 +35080 'morph' 5 +35081 'motor' 5 +35082 'mount' 5 +35083 'mouse' 5 +35084 'mouth' 5 +35085 'movie' 5 +35086 'multi' 5 +35087 'music' 5 +35088 'mutex' 5 +35089 'mysql' 5 +35090 'même' 5 +35091 'nabla' 5 +35092 'nable' 5 +35093 'naire' 5 +35094 'named' 5 +35095 'names' 5 +35096 'nants' 5 +35097 'natal' 5 +35098 'neath' 5 +35099 'needs' 5 +35100 'negie' 5 +35101 'nelle' 5 +35102 'nergy' 5 +35103 'nesty' 5 +35104 'nette' 5 +35105 'never' 5 +35106 'nginx' 5 +35107 'night' 5 +35108 'nikov' 5 +35109 'nings' 5 +35110 'nodes' 5 +35111 'noise' 5 +35112 'nonce' 5 +35113 'north' 5 +35114 'notes' 5 +35115 'notin' 5 +35116 'nucle' 5 +35117 'numer' 5 +35118 'numpy' 5 +35119 'nyder' 5 +35120 'nées' 5 +35121 'ného' 5 +35122 'ních' 5 +35123 'ního' 5 +35124 'ných' 5 +35125 'oauth' 5 +35126 'obile' 5 +35127 'obody' 5 +35128 'ocado' 5 +35129 'ocamp' 5 +35130 'ocard' 5 +35131 'ocate' 5 +35132 'occup' 5 +35133 'occur' 5 +35134 'occus' 5 +35135 'ocene' 5 +35136 'ocent' 5 +35137 'ocese' 5 +35138 'ochem' 5 +35139 'ocial' 5 +35140 'ocide' 5 +35141 'ocity' 5 +35142 'ocker' 5 +35143 'ocket' 5 +35144 'ockey' 5 +35145 'ocode' 5 +35146 'ocrat' 5 +35147 'ocyan' 5 +35148 'ocyte' 5 +35149 'odies' 5 +35150 'oding' 5 +35151 'odium' 5 +35152 'odont' 5 +35153 'odore' 5 +35154 'odule' 5 +35155 'offee' 5 +35156 'offer' 5 +35157 'offic' 5 +35158 'often' 5 +35159 'ogene' 5 +35160 'ogens' 5 +35161 'oggle' 5 +35162 'oglob' 5 +35163 'ograf' 5 +35164 'ogram' 5 +35165 'ograp' 5 +35166 'ográ' 5 +35167 'oidal' 5 +35168 'okers' 5 +35169 'oking' 5 +35170 'okrat' 5 +35171 'oland' 5 +35172 'olars' 5 +35173 'olate' 5 +35174 'older' 5 +35175 'olean' 5 +35176 'olics' 5 +35177 'olina' 5 +35178 'oline' 5 +35179 'oling' 5 +35180 'olini' 5 +35181 'olith' 5 +35182 'ollah' 5 +35183 'ollar' 5 +35184 'ollen' 5 +35185 'oller' 5 +35186 'ollow' 5 +35187 'ology' 5 +35188 'olson' 5 +35189 'olulu' 5 +35190 'olute' 5 +35191 'olved' 5 +35192 'olver' 5 +35193 'olves' 5 +35194 'ológ' 5 +35195 'omain' 5 +35196 'omaly' 5 +35197 'ombie' 5 +35198 'omega' 5 +35199 'oment' 5 +35200 'omers' 5 +35201 'omial' 5 +35202 'omics' 5 +35203 'oming' 5 +35204 'ommen' 5 +35205 'omnia' 5 +35206 'omore' 5 +35207 'områ' 5 +35208 'onald' 5 +35209 'onaut' 5 +35210 'onces' 5 +35211 'oncé' 5 +35212 'onder' 5 +35213 'ondon' 5 +35214 'onent' 5 +35215 'onial' 5 +35216 'onian' 5 +35217 'onica' 5 +35218 'onies' 5 +35219 'oning' 5 +35220 'onium' 5 +35221 'onomy' 5 +35222 'onset' 5 +35223 'onyms' 5 +35224 'ookie' 5 +35225 'ooter' 5 +35226 'opard' 5 +35227 'opath' 5 +35228 'openh' 5 +35229 'opens' 5 +35230 'opher' 5 +35231 'ophil' 5 +35232 'ophys' 5 +35233 'opian' 5 +35234 'oping' 5 +35235 'oplan' 5 +35236 'oples' 5 +35237 'oplus' 5 +35238 'opoly' 5 +35239 'oprop' 5 +35240 'opsis' 5 +35241 'opter' 5 +35242 'optic' 5 +35243 'optim' 5 +35244 'orage' 5 +35245 'orama' 5 +35246 'orate' 5 +35247 'orbit' 5 +35248 'ordan' 5 +35249 'orden' 5 +35250 'order' 5 +35251 'ordin' 5 +35252 'ordon' 5 +35253 'oreal' 5 +35254 'orean' 5 +35255 'orest' 5 +35256 'organ' 5 +35257 'orgen' 5 +35258 'orget' 5 +35259 'orial' 5 +35260 'orian' 5 +35261 'ories' 5 +35262 'oring' 5 +35263 'ority' 5 +35264 'ormal' 5 +35265 'orman' 5 +35266 'orney' 5 +35267 'orous' 5 +35268 'orpor' 5 +35269 'orrow' 5 +35270 'ortal' 5 +35271 'orted' 5 +35272 'orter' 5 +35273 'ortex' 5 +35274 'ortho' 5 +35275 'orthy' 5 +35276 'ortic' 5 +35277 'orton' 5 +35278 'ortun' 5 +35279 'osaic' 5 +35280 'osaur' 5 +35281 'osing' 5 +35282 'osion' 5 +35283 'osite' 5 +35284 'osity' 5 +35285 'oslav' 5 +35286 'osome' 5 +35287 'ospel' 5 +35288 'ossip' 5 +35289 'ostat' 5 +35290 'osten' 5 +35291 'oster' 5 +35292 'ostic' 5 +35293 'oston' 5 +35294 'oteca' 5 +35295 'otech' 5 +35296 'oters' 5 +35297 'other' 5 +35298 'otics' 5 +35299 'otide' 5 +35300 'otine' 5 +35301 'oting' 5 +35302 'otion' 5 +35303 'otive' 5 +35304 'otomy' 5 +35305 'otrop' 5 +35306 'otted' 5 +35307 'otten' 5 +35308 'ottom' 5 +35309 'otype' 5 +35310 'ouble' 5 +35311 'ought' 5 +35312 'oulos' 5 +35313 'ounce' 5 +35314 'ounds' 5 +35315 'ounge' 5 +35316 'ounty' 5 +35317 'ource' 5 +35318 'oured' 5 +35319 'ourse' 5 +35320 'oused' 5 +35321 'ousel' 5 +35322 'ouses' 5 +35323 'ously' 5 +35324 'ousse' 5 +35325 'outer' 5 +35326 'ouver' 5 +35327 'overn' 5 +35328 'overs' 5 +35329 'overy' 5 +35330 'ovich' 5 +35331 'oving' 5 +35332 'ović' 5 +35333 'ovsky' 5 +35334 'ować' 5 +35335 'ował' 5 +35336 'owell' 5 +35337 'owing' 5 +35338 'owitz' 5 +35339 'owler' 5 +35340 'owned' 5 +35341 'owner' 5 +35342 'ownik' 5 +35343 'owski' 5 +35344 'oxide' 5 +35345 'ozzá' 5 +35346 'ości' 5 +35347 'paced' 5 +35348 'paces' 5 +35349 'pages' 5 +35350 'paint' 5 +35351 'pairs' 5 +35352 'panel' 5 +35353 'panic' 5 +35354 'paper' 5 +35355 'param' 5 +35356 'paras' 5 +35357 'paren' 5 +35358 'parse' 5 +35359 'parts' 5 +35360 'party' 5 +35361 'paste' 5 +35362 'patch' 5 +35363 'paths' 5 +35364 'pathy' 5 +35365 'pause' 5 +35366 'peace' 5 +35367 'pedia' 5 +35368 'peech' 5 +35369 'pered' 5 +35370 'peria' 5 +35371 'peror' 5 +35372 'perse' 5 +35373 'perty' 5 +35374 'phalt' 5 +35375 'phant' 5 +35376 'phase' 5 +35377 'pherd' 5 +35378 'phere' 5 +35379 'phins' 5 +35380 'phinx' 5 +35381 'phone' 5 +35382 'phony' 5 +35383 'photo' 5 +35384 'piece' 5 +35385 'pires' 5 +35386 'pitch' 5 +35387 'pivot' 5 +35388 'pixel' 5 +35389 'place' 5 +35390 'plain' 5 +35391 'plane' 5 +35392 'plant' 5 +35393 'plate' 5 +35394 'platz' 5 +35395 'plays' 5 +35396 'pless' 5 +35397 'plete' 5 +35398 'plets' 5 +35399 'plica' 5 +35400 'plied' 5 +35401 'plier' 5 +35402 'plies' 5 +35403 'pline' 5 +35404 'pling' 5 +35405 'plist' 5 +35406 'pload' 5 +35407 'plots' 5 +35408 'point' 5 +35409 'polar' 5 +35410 'polit' 5 +35411 'ponse' 5 +35412 'poons' 5 +35413 'popup' 5 +35414 'porte' 5 +35415 'ports' 5 +35416 'posal' 5 +35417 'posed' 5 +35418 'poser' 5 +35419 'poses' 5 +35420 'posit' 5 +35421 'posix' 5 +35422 'posta' 5 +35423 'posts' 5 +35424 'pound' 5 +35425 'power' 5 +35426 'ppers' 5 +35427 'pping' 5 +35428 'pread' 5 +35429 'press' 5 +35430 'price' 5 +35431 'prime' 5 +35432 'pring' 5 +35433 'print' 5 +35434 'prior' 5 +35435 'prise' 5 +35436 'probe' 5 +35437 'produ' 5 +35438 'promo' 5 +35439 'proof' 5 +35440 'props' 5 +35441 'prote' 5 +35442 'proto' 5 +35443 'prove' 5 +35444 'proxy' 5 +35445 'près' 5 +35446 'prés' 5 +35447 'psych' 5 +35448 'ptide' 5 +35449 'ption' 5 +35450 'ptive' 5 +35451 'ptune' 5 +35452 'pulse' 5 +35453 'punkt' 5 +35454 'puted' 5 +35455 'puter' 5 +35456 'pués' 5 +35457 'qquad' 5 +35458 'quake' 5 +35459 'quant' 5 +35460 'quare' 5 +35461 'quart' 5 +35462 'queda' 5 +35463 'quent' 5 +35464 'query' 5 +35465 'quest' 5 +35466 'queue' 5 +35467 'quick' 5 +35468 'quier' 5 +35469 'quiet' 5 +35470 'quipe' 5 +35471 'quire' 5 +35472 'quiry' 5 +35473 'quist' 5 +35474 'quite' 5 +35475 'quito' 5 +35476 'quivo' 5 +35477 'quota' 5 +35478 'quote' 5 +35479 'rades' 5 +35480 'radio' 5 +35481 'rador' 5 +35482 'ragon' 5 +35483 'raham' 5 +35484 'rails' 5 +35485 'raine' 5 +35486 'rains' 5 +35487 'raint' 5 +35488 'raise' 5 +35489 'raits' 5 +35490 'ramer' 5 +35491 'ramid' 5 +35492 'rance' 5 +35493 'ranch' 5 +35494 'range' 5 +35495 'rapid' 5 +35496 'rases' 5 +35497 'rated' 5 +35498 'rates' 5 +35499 'ratio' 5 +35500 'ravel' 5 +35501 'razil' 5 +35502 'reach' 5 +35503 'react' 5 +35504 'reads' 5 +35505 'ready' 5 +35506 'realm' 5 +35507 'reate' 5 +35508 'recht' 5 +35509 'redit' 5 +35510 'reens' 5 +35511 'refer' 5 +35512 'refix' 5 +35513 'regex' 5 +35514 'regon' 5 +35515 'regor' 5 +35516 'reich' 5 +35517 'reira' 5 +35518 'relax' 5 +35519 'rella' 5 +35520 'rence' 5 +35521 'rench' 5 +35522 'rende' 5 +35523 'renew' 5 +35524 'rente' 5 +35525 'reply' 5 +35526 'repos' 5 +35527 'reset' 5 +35528 'resid' 5 +35529 'resol' 5 +35530 'resse' 5 +35531 'retch' 5 +35532 'reten' 5 +35533 'retry' 5 +35534 'rette' 5 +35535 'reuse' 5 +35536 'riage' 5 +35537 'rians' 5 +35538 'rible' 5 +35539 'ribly' 5 +35540 'rical' 5 +35541 'rices' 5 +35542 'richt' 5 +35543 'ricia' 5 +35544 'ricks' 5 +35545 'rides' 5 +35546 'ridge' 5 +35547 'riend' 5 +35548 'rient' 5 +35549 'riers' 5 +35550 'rieve' 5 +35551 'right' 5 +35552 'rimin' 5 +35553 'ringe' 5 +35554 'rings' 5 +35555 'riors' 5 +35556 'rique' 5 +35557 'rison' 5 +35558 'rists' 5 +35559 'riter' 5 +35560 'rites' 5 +35561 'ritic' 5 +35562 'ritis' 5 +35563 'rival' 5 +35564 'rived' 5 +35565 'river' 5 +35566 'roads' 5 +35567 'robat' 5 +35568 'robot' 5 +35569 'rocal' 5 +35570 'rogen' 5 +35571 'roles' 5 +35572 'rolls' 5 +35573 'rolog' 5 +35574 'romes' 5 +35575 'rones' 5 +35576 'ronic' 5 +35577 'ronym' 5 +35578 'rooms' 5 +35579 'roots' 5 +35580 'rophe' 5 +35581 'rophy' 5 +35582 'ropic' 5 +35583 'ropol' 5 +35584 'ropri' 5 +35585 'rored' 5 +35586 'rosis' 5 +35587 'rosse' 5 +35588 'rough' 5 +35589 'round' 5 +35590 'route' 5 +35591 'rowse' 5 +35592 'rowth' 5 +35593 'rozen' 5 +35594 'ruary' 5 +35595 'ruits' 5 +35596 'rules' 5 +35597 'rying' 5 +35598 'rypto' 5 +35599 'sales' 5 +35600 'saved' 5 +35601 'sburg' 5 +35602 'scala' 5 +35603 'scale' 5 +35604 'scape' 5 +35605 'scene' 5 +35606 'sched' 5 +35607 'schen' 5 +35608 'scope' 5 +35609 'score' 5 +35610 'scrib' 5 +35611 'sembl' 5 +35612 'senal' 5 +35613 'sense' 5 +35614 'separ' 5 +35615 'serie' 5 +35616 'serve' 5 +35617 'setUp' 5 +35618 'setup' 5 +35619 'seudo' 5 +35620 'seven' 5 +35621 'sever' 5 +35622 'shake' 5 +35623 'shall' 5 +35624 'shape' 5 +35625 'share' 5 +35626 'sharp' 5 +35627 'sheet' 5 +35628 'shelf' 5 +35629 'shell' 5 +35630 'shift' 5 +35631 'shine' 5 +35632 'ships' 5 +35633 'shire' 5 +35634 'shirt' 5 +35635 'shoot' 5 +35636 'shops' 5 +35637 'shore' 5 +35638 'short' 5 +35639 'shots' 5 +35640 'shown' 5 +35641 'shows' 5 +35642 'sible' 5 +35643 'sided' 5 +35644 'sight' 5 +35645 'sigma' 5 +35646 'simeq' 5 +35647 'simpl' 5 +35648 'since' 5 +35649 'sites' 5 +35650 'sized' 5 +35651 'sizes' 5 +35652 'skill' 5 +35653 'skins' 5 +35654 'slack' 5 +35655 'slant' 5 +35656 'slash' 5 +35657 'slave' 5 +35658 'sleep' 5 +35659 'slice' 5 +35660 'slide' 5 +35661 'slope' 5 +35662 'slots' 5 +35663 'small' 5 +35664 'smart' 5 +35665 'smith' 5 +35666 'snake' 5 +35667 'sofar' 5 +35668 'solar' 5 +35669 'solid' 5 +35670 'solve' 5 +35671 'sound' 5 +35672 'south' 5 +35673 'space' 5 +35674 'spark' 5 +35675 'spawn' 5 +35676 'spect' 5 +35677 'speed' 5 +35678 'spell' 5 +35679 'split' 5 +35680 'sport' 5 +35681 'spots' 5 +35682 'stack' 5 +35683 'stadt' 5 +35684 'staff' 5 +35685 'stage' 5 +35686 'stalk' 5 +35687 'stamp' 5 +35688 'stand' 5 +35689 'stant' 5 +35690 'stars' 5 +35691 'start' 5 +35692 'stash' 5 +35693 'state' 5 +35694 'stats' 5 +35695 'stdin' 5 +35696 'stdio' 5 +35697 'stead' 5 +35698 'steel' 5 +35699 'stein' 5 +35700 'stell' 5 +35701 'steps' 5 +35702 'stere' 5 +35703 'sters' 5 +35704 'stery' 5 +35705 'stick' 5 +35706 'still' 5 +35707 'stime' 5 +35708 'stock' 5 +35709 'stone' 5 +35710 'stood' 5 +35711 'store' 5 +35712 'storm' 5 +35713 'story' 5 +35714 'stown' 5 +35715 'strap' 5 +35716 'strip' 5 +35717 'strom' 5 +35718 'study' 5 +35719 'stuff' 5 +35720 'ství' 5 +35721 'style' 5 +35722 'stype' 5 +35723 'stüt' 5 +35724 'subst' 5 +35725 'suite' 5 +35726 'super' 5 +35727 'sweet' 5 +35728 'swers' 5 +35729 'swick' 5 +35730 'swift' 5 +35731 'swing' 5 +35732 'szág' 5 +35733 'table' 5 +35734 'tails' 5 +35735 'taire' 5 +35736 'taken' 5 +35737 'takes' 5 +35738 'tasks' 5 +35739 'tbody' 5 +35740 'techn' 5 +35741 'teger' 5 +35742 'templ' 5 +35743 'temps' 5 +35744 'tered' 5 +35745 'terms' 5 +35746 'terra' 5 +35747 'tests' 5 +35748 'texto' 5 +35749 'texts' 5 +35750 'tfrac' 5 +35751 'thank' 5 +35752 'thead' 5 +35753 'their' 5 +35754 'theme' 5 +35755 'there' 5 +35756 'thern' 5 +35757 'thers' 5 +35758 'these' 5 +35759 'theta' 5 +35760 'thick' 5 +35761 'thing' 5 +35762 'think' 5 +35763 'third' 5 +35764 'thood' 5 +35765 'those' 5 +35766 'three' 5 +35767 'thren' 5 +35768 'throw' 5 +35769 'thumb' 5 +35770 'tical' 5 +35771 'ticks' 5 +35772 'tight' 5 +35773 'tilde' 5 +35774 'tiles' 5 +35775 'timer' 5 +35776 'times' 5 +35777 'tings' 5 +35778 'title' 5 +35779 'tober' 5 +35780 'today' 5 +35781 'todos' 5 +35782 'token' 5 +35783 'tools' 5 +35784 'topic' 5 +35785 'torch' 5 +35786 'total' 5 +35787 'touch' 5 +35788 'trace' 5 +35789 'track' 5 +35790 'tract' 5 +35791 'trade' 5 +35792 'trail' 5 +35793 'train' 5 +35794 'trait' 5 +35795 'trans' 5 +35796 'trash' 5 +35797 'treat' 5 +35798 'trees' 5 +35799 'trend' 5 +35800 'trial' 5 +35801 'tries' 5 +35802 'tring' 5 +35803 'trunc' 5 +35804 'trust' 5 +35805 'truth' 5 +35806 'tuple' 5 +35807 'tures' 5 +35808 'tweet' 5 +35809 'twist' 5 +35810 'typed' 5 +35811 'types' 5 +35812 'uable' 5 +35813 'ually' 5 +35814 'uario' 5 +35815 'uated' 5 +35816 'uates' 5 +35817 'ubble' 5 +35818 'ubern' 5 +35819 'ubert' 5 +35820 'ublic' 5 +35821 'ublin' 5 +35822 'ubyte' 5 +35823 'uchar' 5 +35824 'uchen' 5 +35825 'ucing' 5 +35826 'ucion' 5 +35827 'ucked' 5 +35828 'ucker' 5 +35829 'ucket' 5 +35830 'uckle' 5 +35831 'uctor' 5 +35832 'uddle' 5 +35833 'udeau' 5 +35834 'udent' 5 +35835 'uding' 5 +35836 'udson' 5 +35837 'uelle' 5 +35838 'uerdo' 5 +35839 'uerto' 5 +35840 'uesta' 5 +35841 'uesto' 5 +35842 'ufact' 5 +35843 'uffed' 5 +35844 'uffer' 5 +35845 'uffix' 5 +35846 'uffle' 5 +35847 'uggle' 5 +35848 'ugins' 5 +35849 'uitar' 5 +35850 'ulant' 5 +35851 'ulate' 5 +35852 'ulent' 5 +35853 'uliar' 5 +35854 'uling' 5 +35855 'ulkan' 5 +35856 'ullah' 5 +35857 'ullen' 5 +35858 'ulner' 5 +35859 'ulong' 5 +35860 'ulose' 5 +35861 'ulous' 5 +35862 'ultan' 5 +35863 'ultur' 5 +35864 'ulté' 5 +35865 'umann' 5 +35866 'umbai' 5 +35867 'umber' 5 +35868 'umble' 5 +35869 'ument' 5 +35870 'umina' 5 +35871 'uming' 5 +35872 'ummer' 5 +35873 'umped' 5 +35874 'umper' 5 +35875 'uncan' 5 +35876 'uncia' 5 +35877 'undai' 5 +35878 'unday' 5 +35879 'undef' 5 +35880 'unden' 5 +35881 'under' 5 +35882 'undle' 5 +35883 'ungal' 5 +35884 'ungen' 5 +35885 'unger' 5 +35886 'ungle' 5 +35887 'uning' 5 +35888 'union' 5 +35889 'units' 5 +35890 'unity' 5 +35891 'unker' 5 +35892 'unned' 5 +35893 'unnel' 5 +35894 'unque' 5 +35895 'unset' 5 +35896 'unted' 5 +35897 'unter' 5 +35898 'until' 5 +35899 'untos' 5 +35900 'uplic' 5 +35901 'upper' 5 +35902 'uracy' 5 +35903 'urate' 5 +35904 'urban' 5 +35905 'urbed' 5 +35906 'ureau' 5 +35907 'urent' 5 +35908 'urers' 5 +35909 'urger' 5 +35910 'uries' 5 +35911 'uring' 5 +35912 'urity' 5 +35913 'urnal' 5 +35914 'urope' 5 +35915 'urous' 5 +35916 'urred' 5 +35917 'ursed' 5 +35918 'urses' 5 +35919 'ursor' 5 +35920 'urtle' 5 +35921 'usage' 5 +35922 'users' 5 +35923 'useum' 5 +35924 'ushed' 5 +35925 'ushes' 5 +35926 'using' 5 +35927 'usion' 5 +35928 'usive' 5 +35929 'ussed' 5 +35930 'ussen' 5 +35931 'ussia' 5 +35932 'usted' 5 +35933 'uster' 5 +35934 'ustin' 5 +35935 'ustom' 5 +35936 'usual' 5 +35937 'utely' 5 +35938 'uters' 5 +35939 'uteur' 5 +35940 'uther' 5 +35941 'utils' 5 +35942 'uting' 5 +35943 'ution' 5 +35944 'utive' 5 +35945 'utors' 5 +35946 'utory' 5 +35947 'utral' 5 +35948 'utsch' 5 +35949 'utter' 5 +35950 'utton' 5 +35951 'uture' 5 +35952 'uyên' 5 +35953 'uzzle' 5 +35954 'vable' 5 +35955 'valid' 5 +35956 'valor' 5 +35957 'value' 5 +35958 'varez' 5 +35959 'vault' 5 +35960 'vdots' 5 +35961 'velle' 5 +35962 'velop' 5 +35963 'venir' 5 +35964 'venth' 5 +35965 'vents' 5 +35966 'venue' 5 +35967 'verbs' 5 +35968 'verse' 5 +35969 'verte' 5 +35970 'verts' 5 +35971 'verty' 5 +35972 'vette' 5 +35973 'video' 5 +35974 'vider' 5 +35975 'vidia' 5 +35976 'views' 5 +35977 'villa' 5 +35978 'ville' 5 +35979 'vious' 5 +35980 'viron' 5 +35981 'virus' 5 +35982 'vised' 5 +35983 'visit' 5 +35984 'visor' 5 +35985 'vival' 5 +35986 'vocab' 5 +35987 'voice' 5 +35988 'votes' 5 +35989 'väst' 5 +35990 'wagen' 5 +35991 'walls' 5 +35992 'wards' 5 +35993 'wares' 5 +35994 'watch' 5 +35995 'water' 5 +35996 'waves' 5 +35997 'wedge' 5 +35998 'weeks' 5 +35999 'weets' 5 +36000 'weise' 5 +36001 'wheel' 5 +36002 'where' 5 +36003 'which' 5 +36004 'while' 5 +36005 'white' 5 +36006 'whole' 5 +36007 'whose' 5 +36008 'width' 5 +36009 'witch' 5 +36010 'wives' 5 +36011 'wiąz' 5 +36012 'woman' 5 +36013 'women' 5 +36014 'woods' 5 +36015 'words' 5 +36016 'works' 5 +36017 'world' 5 +36018 'worth' 5 +36019 'would' 5 +36020 'write' 5 +36021 'wrong' 5 +36022 'xhtml' 5 +36023 'xiety' 5 +36024 'xmlns' 5 +36025 'xpath' 5 +36026 'xture' 5 +36027 'xygen' 5 +36028 'yahoo' 5 +36029 'yards' 5 +36030 'ycler' 5 +36031 'years' 5 +36032 'yield' 5 +36033 'ylene' 5 +36034 'ylvan' 5 +36035 'ymbol' 5 +36036 'yntax' 5 +36037 'young' 5 +36038 'ystem' 5 +36039 'yster' 5 +36040 'ython' 5 +36041 'ytics' 5 +36042 'zeich' 5 +36043 'zeros' 5 +36044 'ział' 5 +36045 'zilla' 5 +36046 'zione' 5 +36047 'zsche' 5 +36048 '}}_{\\' 5 +36049 'ÇÃO' 5 +36050 'État' 5 +36051 'ában' 5 +36052 'ácil' 5 +36053 'ález' 5 +36054 'ális' 5 +36055 'álva' 5 +36056 'ámos' 5 +36057 'ának' 5 +36058 'ános' 5 +36059 'ání' 5 +36060 'ária' 5 +36061 'ário' 5 +36062 'ások' 5 +36063 'átum' 5 +36064 'ával' 5 +36065 'ável' 5 +36066 'ází' 5 +36067 'ână' 5 +36068 'âtre' 5 +36069 'äche' 5 +36070 'ächs' 5 +36071 'ächt' 5 +36072 'äger' 5 +36073 'ählt' 5 +36074 'äler' 5 +36075 'älle' 5 +36076 'ällt' 5 +36077 'ämä' 5 +36078 'ände' 5 +36079 'änge' 5 +36080 'ären' 5 +36081 'ässt' 5 +36082 'äter' 5 +36083 'ätte' 5 +36084 'ätze' 5 +36085 'äude' 5 +36086 'ään' 5 +36087 'ædia' 5 +36088 'çais' 5 +36089 'çois' 5 +36090 'çoit' 5 +36091 'ção' 5 +36092 'èces' 5 +36093 'èles' 5 +36094 'èmes' 5 +36095 'ènes' 5 +36096 'èque' 5 +36097 'ères' 5 +36098 'ètes' 5 +36099 'ètre' 5 +36100 'èves' 5 +36101 'ébec' 5 +36102 'ében' 5 +36103 'écur' 5 +36104 'éder' 5 +36105 'édia' 5 +36106 'édie' 5 +36107 'édé' 5 +36108 'élé' 5 +36109 'émet' 5 +36110 'émie' 5 +36111 'émon' 5 +36112 'ének' 5 +36113 'énez' 5 +36114 'énom' 5 +36115 'éné' 5 +36116 'éral' 5 +36117 'érer' 5 +36118 'érez' 5 +36119 'éric' 5 +36120 'érie' 5 +36121 'ério' 5 +36122 'éré' 5 +36123 'ésie' 5 +36124 'éső' 5 +36125 'état' 5 +36126 'éter' 5 +36127 'été' 5 +36128 'ével' 5 +36129 'êmes' 5 +36130 'êque' 5 +36131 'êtes' 5 +36132 'être' 5 +36133 'ícia' 5 +36134 'ício' 5 +36135 'ícul' 5 +36136 'ící' 5 +36137 'ígen' 5 +36138 'ília' 5 +36139 'ínez' 5 +36140 'íses' 5 +36141 'ível' 5 +36142 'ître' 5 +36143 'ñana' 5 +36144 'òria' 5 +36145 'ództ' 5 +36146 'ópez' 5 +36147 'ória' 5 +36148 'ório' 5 +36149 'ôtel' 5 +36150 'öder' 5 +36151 'önig' 5 +36152 'öße' 5 +36153 'úmer' 5 +36154 'über' 5 +36155 'ücke' 5 +36156 'ügel' 5 +36157 'ügen' 5 +36158 'ühle' 5 +36159 'ührt' 5 +36160 'üler' 5 +36161 'ület' 5 +36162 'ünst' 5 +36163 'ční' 5 +36164 'ędzy' 5 +36165 'ění' 5 +36166 'ılı' 5 +36167 'ında' 5 +36168 'ını' 5 +36169 'łoż' 5 +36170 'łuż' 5 +36171 'łów' 5 +36172 'ńczy' 5 +36173 'ńska' 5 +36174 'ński' 5 +36175 'ństw' 5 +36176 'ście' 5 +36177 'śnie' 5 +36178 'ště' 5 +36179 'ším' 5 +36180 'ướ' 5 +36181 'ườ' 5 +36182 'ưở' 5 +36183 'ượ' 5 +36184 'ảng' 5 +36185 'ằng' 5 +36186 'ịch' 5 +36187 'ống' 5 +36188 'ồng' 5 +36189 'ụng' 5 +36190 'ứng' 5 +36191 'ững' 5 +36192 '’il' 5 +36193 '’ll' 5 +36194 '’re' 5 +36195 '’ve' 5 +36196 '“No' 5 +36197 '”),' 5 +36198 '”).' 5 +36199 '…..' 5 +36200 '!",' 5 +36201 ':", + "eos_token": "", + "pad_token": "", + "unk_token": "" +} diff --git a/models/rwkv-6-world-1b6/tokenizer_config.json b/models/rwkv-6-world-1b6/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..9f8f80c8bfcf0f123b9087d973a432e83f98cc84 --- /dev/null +++ b/models/rwkv-6-world-1b6/tokenizer_config.json @@ -0,0 +1,12 @@ +{ + "name_or_path": "rwkv-6-tokenizer", + "add_prefix_space": false, + "tokenizer_class": "Rwkv6Tokenizer", + "use_fast": false, + "auto_map": { + "AutoTokenizer": [ + "hf_rwkv_tokenizer.Rwkv6Tokenizer", + null + ] + } +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f3aa133af00ec883843c1197e617d13d81c018bb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,146 @@ +torchaudio==2.2.2 +torchvision==0.17.2 +accelerate==0.33.0 +aiohappyeyeballs==2.4.0 +aiohttp==3.10.5 +aiosignal==1.3.1 +anyio==4.4.0 +appnope==0.1.4 +argon2-cffi==23.1.0 +argon2-cffi-bindings==21.2.0 +arrow==1.3.0 +asttokens==2.4.1 +async-lru==2.0.4 +async-timeout==4.0.3 +attrs==24.2.0 +autotune==0.0.3 +babel==2.16.0 +beautifulsoup4==4.12.3 +bleach==6.1.0 +certifi==2024.8.30 +cffi==1.17.0 +charset-normalizer==3.3.2 +cmake==3.30.2 +comm==0.2.2 +datasets==2.21.0 +debugpy==1.8.5 +decorator==5.1.1 +defusedxml==0.7.1 +dill==0.3.8 +docstring_parser==0.16 +einops==0.8.0 +eval_type_backport==0.2.0 +exceptiongroup==1.2.2 +executing==2.0.1 +fastjsonschema==2.20.0 +filelock==3.15.4 +fqdn==1.5.1 +frozenlist==1.4.1 +fsspec==2024.6.1 +GPUtil==1.4.0 +h11==0.14.0 +httpcore==1.0.5 +httpx==0.27.2 +huggingface-hub==0.24.6 +idna==3.8 +importlib_metadata==8.4.0 +ipykernel==6.29.5 +ipython==8.18.1 +ipywidgets==8.1.5 +isoduration==20.11.0 +jedi==0.19.1 +Jinja2==3.1.4 +json5==0.9.25 +jsonpointer==3.0.0 +jsonschema==4.23.0 +jsonschema-specifications==2023.12.1 +jupyter==1.1.1 +jupyter-console==6.6.3 +jupyter-events==0.10.0 +jupyter-lsp==2.2.5 +jupyter_client==8.6.2 +jupyter_core==5.7.2 +jupyter_server==2.14.2 +jupyter_server_terminals==0.5.3 +jupyterlab==4.2.5 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.27.3 +jupyterlab_widgets==3.0.13 +markdown-it-py==3.0.0 +MarkupSafe==2.1.5 +matplotlib-inline==0.1.7 +mdurl==0.1.2 +mistune==3.0.2 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +nbclient==0.10.0 +nbconvert==7.16.4 +nbformat==5.10.4 +nest-asyncio==1.6.0 +networkx==3.2.1 +ninja==1.11.1.1 +notebook==7.2.2 +notebook_shim==0.2.4 +numpy==1.26.4 +overrides==7.7.0 +pandas==2.2.2 +pandocfilters==1.5.1 +parso==0.8.4 +peft==0.12.0 +pexpect==4.9.0 +pillow==10.4.0 +platformdirs==4.2.2 +prometheus_client==0.20.0 +prompt_toolkit==3.0.47 +psutil==6.0.0 +ptyprocess==0.7.0 +pure_eval==0.2.3 +pyarrow==17.0.0 +pybind11==2.13.5 +pycparser==2.22 +Pygments==2.18.0 +python-dateutil==2.9.0.post0 +python-json-logger==2.0.7 +pytz==2024.1 +PyYAML==6.0.2 +pyzmq==26.2.0 +referencing==0.35.1 +regex==2024.7.24 +requests==2.32.3 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==13.8.0 +rpds-py==0.20.0 +rwkv==0.8.26 +safetensors==0.4.4 +Send2Trash==1.8.3 +shtab==1.7.1 +six==1.16.0 +sniffio==1.3.1 +soupsieve==2.6 +stack-data==0.6.3 +sympy==1.13.2 +terminado==0.18.1 +tinycss2==1.3.0 +tokenizers==0.19.1 +tomli==2.0.1 +tornado==6.4.1 +tqdm==4.66.5 +traitlets==5.14.3 +transformers==4.44.0 +trl==0.10.1 +types-python-dateutil==2.9.0.20240821 +typing_extensions==4.12.2 +tyro==0.8.10 +tzdata==2024.1 +uri-template==1.3.0 +urllib3==2.2.2 +wcwidth==0.2.13 +webcolors==24.8.0 +webencodings==0.5.1 +websocket-client==1.8.0 +widgetsnbextension==4.0.13 +xxhash==3.5.0 +yarl==1.9.6 +zipp==3.20.1