Naozumi0512 commited on
Commit
7a6bcef
1 Parent(s): fe933ac

Init commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/workflows/pull_format.yml +43 -0
  2. .github/workflows/push_format.yml +57 -0
  3. .gitignore +188 -0
  4. .gitmodules +0 -0
  5. .pre-commit-config.yaml +25 -0
  6. LICENSE +661 -0
  7. app.py +552 -0
  8. attentions.py +464 -0
  9. bert/bert-base-japanese-v3/.gitattributes +34 -0
  10. bert/bert-base-japanese-v3/README.md +53 -0
  11. bert/bert-base-japanese-v3/config.json +19 -0
  12. bert/bert-base-japanese-v3/tokenizer_config.json +10 -0
  13. bert/bert-base-japanese-v3/vocab.txt +0 -0
  14. bert/bert-large-japanese-v2/.gitattributes +34 -0
  15. bert/bert-large-japanese-v2/README.md +53 -0
  16. bert/bert-large-japanese-v2/config.json +19 -0
  17. bert/bert-large-japanese-v2/tokenizer_config.json +10 -0
  18. bert/bert-large-japanese-v2/vocab.txt +0 -0
  19. bert/bert_models.json +14 -0
  20. bert/chinese-roberta-wwm-ext-large/.gitattributes +9 -0
  21. bert/chinese-roberta-wwm-ext-large/README.md +57 -0
  22. bert/chinese-roberta-wwm-ext-large/added_tokens.json +1 -0
  23. bert/chinese-roberta-wwm-ext-large/config.json +28 -0
  24. bert/chinese-roberta-wwm-ext-large/special_tokens_map.json +1 -0
  25. bert/chinese-roberta-wwm-ext-large/tokenizer.json +0 -0
  26. bert/chinese-roberta-wwm-ext-large/tokenizer_config.json +1 -0
  27. bert/chinese-roberta-wwm-ext-large/vocab.txt +0 -0
  28. bert/deberta-v2-large-japanese-char-wwm/.gitattributes +34 -0
  29. bert/deberta-v2-large-japanese-char-wwm/README.md +89 -0
  30. bert/deberta-v2-large-japanese-char-wwm/config.json +37 -0
  31. bert/deberta-v2-large-japanese-char-wwm/special_tokens_map.json +7 -0
  32. bert/deberta-v2-large-japanese-char-wwm/tokenizer_config.json +19 -0
  33. bert/deberta-v2-large-japanese-char-wwm/vocab.txt +0 -0
  34. bert/deberta-v2-large-japanese/.gitattributes +34 -0
  35. bert/deberta-v2-large-japanese/README.md +111 -0
  36. bert/deberta-v2-large-japanese/config.json +38 -0
  37. bert/deberta-v2-large-japanese/special_tokens_map.json +9 -0
  38. bert/deberta-v2-large-japanese/tokenizer.json +0 -0
  39. bert/deberta-v2-large-japanese/tokenizer_config.json +15 -0
  40. bert/deberta-v3-large/.gitattributes +27 -0
  41. bert/deberta-v3-large/README.md +93 -0
  42. bert/deberta-v3-large/config.json +22 -0
  43. bert/deberta-v3-large/generator_config.json +22 -0
  44. bert/deberta-v3-large/tokenizer_config.json +4 -0
  45. bert_gen.py +81 -0
  46. clap_gen.py +64 -0
  47. clap_wrapper.py +49 -0
  48. commons.py +158 -0
  49. compress_model.py +89 -0
  50. config.py +248 -0
.github/workflows/pull_format.yml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: pull format
2
+
3
+ on: [pull_request]
4
+
5
+ permissions:
6
+ contents: write
7
+
8
+ jobs:
9
+ pull_format:
10
+ runs-on: ${{ matrix.os }}
11
+
12
+ strategy:
13
+ matrix:
14
+ python-version: ["3.10"]
15
+ os: [ubuntu-latest]
16
+ fail-fast: false
17
+
18
+ continue-on-error: true
19
+
20
+ steps:
21
+ - name: checkout
22
+ continue-on-error: true
23
+ uses: actions/checkout@v3
24
+ with:
25
+ ref: ${{ github.head_ref }}
26
+ fetch-depth: 0
27
+
28
+ - name: Set up Python ${{ matrix.python-version }}
29
+ uses: actions/setup-python@v4
30
+ with:
31
+ python-version: ${{ matrix.python-version }}
32
+
33
+ - name: Install Black
34
+ run: pip install "black[jupyter]"
35
+
36
+ - name: Run Black
37
+ # run: black $(git ls-files '*.py')
38
+ run: black .
39
+
40
+ - name: Commit Back
41
+ uses: stefanzweifel/git-auto-commit-action@v4
42
+ with:
43
+ commit_message: Apply Code Formatter Change
.github/workflows/push_format.yml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: push format
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - master
7
+ - dev
8
+
9
+ permissions:
10
+ contents: write
11
+ pull-requests: write
12
+
13
+ jobs:
14
+ push_format:
15
+ runs-on: ${{ matrix.os }}
16
+
17
+ strategy:
18
+ matrix:
19
+ python-version: ["3.10"]
20
+ os: [ubuntu-latest]
21
+ fail-fast: false
22
+
23
+ steps:
24
+ - uses: actions/checkout@v3
25
+ with:
26
+ ref: ${{github.ref_name}}
27
+
28
+ - name: Set up Python ${{ matrix.python-version }}
29
+ uses: actions/setup-python@v4
30
+ with:
31
+ python-version: ${{ matrix.python-version }}
32
+
33
+ - name: Install Black
34
+ run: pip install "black[jupyter]"
35
+
36
+ - name: Run Black
37
+ # run: black $(git ls-files '*.py')
38
+ run: black .
39
+
40
+ - name: Commit Back
41
+ continue-on-error: true
42
+ id: commitback
43
+ run: |
44
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
45
+ git config --local user.name "github-actions[bot]"
46
+ git add --all
47
+ git commit -m "Format code"
48
+
49
+ - name: Create Pull Request
50
+ if: steps.commitback.outcome == 'success'
51
+ continue-on-error: true
52
+ uses: peter-evans/create-pull-request@v5
53
+ with:
54
+ delete-branch: true
55
+ body: Apply Code Formatter Change
56
+ title: Apply Code Formatter Change
57
+ commit-message: Automatic code format
.gitignore ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ .DS_Store
163
+ /models
164
+ /logs
165
+
166
+ filelists/*
167
+ !/filelists/esd.list
168
+ data/*
169
+ /*.yml
170
+ !/default_config.yml
171
+ /Web/
172
+ /emotional/*/*.bin
173
+ /slm/*/*.bin
174
+ /bert/*/*.bin
175
+ /bert/*/*.h5
176
+ /bert/*/*.model
177
+ /bert/*/*.safetensors
178
+ /bert/*/*.msgpack
179
+ asr_transcript.py
180
+ extract_list.py
181
+ dataset
182
+ /Data
183
+ Model
184
+ raw/
185
+ logs/
186
+ Data/*
187
+ /onnx
188
+ /.vs
.gitmodules ADDED
File without changes
.pre-commit-config.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.5.0
4
+ hooks:
5
+ - id: check-yaml
6
+ - id: end-of-file-fixer
7
+ - id: trailing-whitespace
8
+
9
+ - repo: https://github.com/astral-sh/ruff-pre-commit
10
+ rev: v0.1.8
11
+ hooks:
12
+ - id: ruff
13
+ args: [ --fix ]
14
+
15
+ - repo: https://github.com/psf/black
16
+ rev: 23.12.0
17
+ hooks:
18
+ - id: black
19
+
20
+ - repo: https://github.com/codespell-project/codespell
21
+ rev: v2.2.6
22
+ hooks:
23
+ - id: codespell
24
+ files: ^.*\.(py|md|rst|yml)$
25
+ args: [-L=fro]
LICENSE ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
app.py ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: E402
2
+ import os
3
+ import logging
4
+ import re_matching
5
+ from tools.sentence import split_by_language
6
+
7
+ logging.getLogger("numba").setLevel(logging.WARNING)
8
+ logging.getLogger("markdown_it").setLevel(logging.WARNING)
9
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
10
+ logging.getLogger("matplotlib").setLevel(logging.WARNING)
11
+
12
+ logging.basicConfig(
13
+ level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
14
+ )
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ import torch
19
+ import utils
20
+ from infer import infer, latest_version, get_net_g, infer_multilang
21
+ import gradio as gr
22
+ import webbrowser
23
+ import numpy as np
24
+ from config import config
25
+ from tools.translate import translate
26
+ import librosa
27
+
28
+ net_g = None
29
+
30
+ device = config.webui_config.device
31
+ if device == "mps":
32
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
33
+
34
+
35
+ def generate_audio(
36
+ slices,
37
+ sdp_ratio,
38
+ noise_scale,
39
+ noise_scale_w,
40
+ length_scale,
41
+ speaker,
42
+ language,
43
+ reference_audio,
44
+ emotion,
45
+ style_text,
46
+ style_weight,
47
+ skip_start=False,
48
+ skip_end=False,
49
+ ):
50
+ audio_list = []
51
+ # silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16)
52
+ with torch.no_grad():
53
+ for idx, piece in enumerate(slices):
54
+ skip_start = idx != 0
55
+ skip_end = idx != len(slices) - 1
56
+ audio = infer(
57
+ piece,
58
+ reference_audio=reference_audio,
59
+ emotion=emotion,
60
+ sdp_ratio=sdp_ratio,
61
+ noise_scale=noise_scale,
62
+ noise_scale_w=noise_scale_w,
63
+ length_scale=length_scale,
64
+ sid=speaker,
65
+ language=language,
66
+ hps=hps,
67
+ net_g=net_g,
68
+ device=device,
69
+ skip_start=skip_start,
70
+ skip_end=skip_end,
71
+ style_text=style_text,
72
+ style_weight=style_weight,
73
+ )
74
+ audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio)
75
+ audio_list.append(audio16bit)
76
+ return audio_list
77
+
78
+
79
+ def generate_audio_multilang(
80
+ slices,
81
+ sdp_ratio,
82
+ noise_scale,
83
+ noise_scale_w,
84
+ length_scale,
85
+ speaker,
86
+ language,
87
+ reference_audio,
88
+ emotion,
89
+ skip_start=False,
90
+ skip_end=False,
91
+ ):
92
+ audio_list = []
93
+ # silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16)
94
+ with torch.no_grad():
95
+ for idx, piece in enumerate(slices):
96
+ skip_start = idx != 0
97
+ skip_end = idx != len(slices) - 1
98
+ audio = infer_multilang(
99
+ piece,
100
+ reference_audio=reference_audio,
101
+ emotion=emotion,
102
+ sdp_ratio=sdp_ratio,
103
+ noise_scale=noise_scale,
104
+ noise_scale_w=noise_scale_w,
105
+ length_scale=length_scale,
106
+ sid=speaker,
107
+ language=language[idx],
108
+ hps=hps,
109
+ net_g=net_g,
110
+ device=device,
111
+ skip_start=skip_start,
112
+ skip_end=skip_end,
113
+ )
114
+ audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio)
115
+ audio_list.append(audio16bit)
116
+ return audio_list
117
+
118
+
119
+ def tts_split(
120
+ text: str,
121
+ speaker,
122
+ sdp_ratio,
123
+ noise_scale,
124
+ noise_scale_w,
125
+ length_scale,
126
+ language,
127
+ cut_by_sent,
128
+ interval_between_para,
129
+ interval_between_sent,
130
+ reference_audio,
131
+ emotion,
132
+ style_text,
133
+ style_weight,
134
+ ):
135
+ while text.find("\n\n") != -1:
136
+ text = text.replace("\n\n", "\n")
137
+ text = text.replace("|", "")
138
+ para_list = re_matching.cut_para(text)
139
+ para_list = [p for p in para_list if p != ""]
140
+ audio_list = []
141
+ for p in para_list:
142
+ if not cut_by_sent:
143
+ audio_list += process_text(
144
+ p,
145
+ speaker,
146
+ sdp_ratio,
147
+ noise_scale,
148
+ noise_scale_w,
149
+ length_scale,
150
+ language,
151
+ reference_audio,
152
+ emotion,
153
+ style_text,
154
+ style_weight,
155
+ )
156
+ silence = np.zeros((int)(44100 * interval_between_para), dtype=np.int16)
157
+ audio_list.append(silence)
158
+ else:
159
+ audio_list_sent = []
160
+ sent_list = re_matching.cut_sent(p)
161
+ sent_list = [s for s in sent_list if s != ""]
162
+ for s in sent_list:
163
+ audio_list_sent += process_text(
164
+ s,
165
+ speaker,
166
+ sdp_ratio,
167
+ noise_scale,
168
+ noise_scale_w,
169
+ length_scale,
170
+ language,
171
+ reference_audio,
172
+ emotion,
173
+ style_text,
174
+ style_weight,
175
+ )
176
+ silence = np.zeros((int)(44100 * interval_between_sent))
177
+ audio_list_sent.append(silence)
178
+ if (interval_between_para - interval_between_sent) > 0:
179
+ silence = np.zeros(
180
+ (int)(44100 * (interval_between_para - interval_between_sent))
181
+ )
182
+ audio_list_sent.append(silence)
183
+ audio16bit = gr.processing_utils.convert_to_16_bit_wav(
184
+ np.concatenate(audio_list_sent)
185
+ ) # 对完整句子做音量归一
186
+ audio_list.append(audio16bit)
187
+ audio_concat = np.concatenate(audio_list)
188
+ return ("Success", (hps.data.sampling_rate, audio_concat))
189
+
190
+
191
+ def process_mix(slice):
192
+ _speaker = slice.pop()
193
+ _text, _lang = [], []
194
+ for lang, content in slice:
195
+ content = content.split("|")
196
+ content = [part for part in content if part != ""]
197
+ if len(content) == 0:
198
+ continue
199
+ if len(_text) == 0:
200
+ _text = [[part] for part in content]
201
+ _lang = [[lang] for part in content]
202
+ else:
203
+ _text[-1].append(content[0])
204
+ _lang[-1].append(lang)
205
+ if len(content) > 1:
206
+ _text += [[part] for part in content[1:]]
207
+ _lang += [[lang] for part in content[1:]]
208
+ return _text, _lang, _speaker
209
+
210
+
211
+ def process_auto(text):
212
+ _text, _lang = [], []
213
+ for slice in text.split("|"):
214
+ if slice == "":
215
+ continue
216
+ temp_text, temp_lang = [], []
217
+ sentences_list = split_by_language(slice, target_languages=["zh", "ja", "en"])
218
+ for sentence, lang in sentences_list:
219
+ if sentence == "":
220
+ continue
221
+ temp_text.append(sentence)
222
+ temp_lang.append(lang.upper())
223
+ _text.append(temp_text)
224
+ _lang.append(temp_lang)
225
+ return _text, _lang
226
+
227
+
228
+ def process_text(
229
+ text: str,
230
+ speaker,
231
+ sdp_ratio,
232
+ noise_scale,
233
+ noise_scale_w,
234
+ length_scale,
235
+ language,
236
+ reference_audio,
237
+ emotion,
238
+ style_text=None,
239
+ style_weight=0,
240
+ ):
241
+ audio_list = []
242
+ if language == "mix":
243
+ bool_valid, str_valid = re_matching.validate_text(text)
244
+ if not bool_valid:
245
+ return str_valid, (
246
+ hps.data.sampling_rate,
247
+ np.concatenate([np.zeros(hps.data.sampling_rate // 2)]),
248
+ )
249
+ for slice in re_matching.text_matching(text):
250
+ _text, _lang, _speaker = process_mix(slice)
251
+ if _speaker is None:
252
+ continue
253
+ print(f"Text: {_text}\nLang: {_lang}")
254
+ audio_list.extend(
255
+ generate_audio_multilang(
256
+ _text,
257
+ sdp_ratio,
258
+ noise_scale,
259
+ noise_scale_w,
260
+ length_scale,
261
+ _speaker,
262
+ _lang,
263
+ reference_audio,
264
+ emotion,
265
+ )
266
+ )
267
+ elif language.lower() == "auto":
268
+ _text, _lang = process_auto(text)
269
+ print(f"Text: {_text}\nLang: {_lang}")
270
+ audio_list.extend(
271
+ generate_audio_multilang(
272
+ _text,
273
+ sdp_ratio,
274
+ noise_scale,
275
+ noise_scale_w,
276
+ length_scale,
277
+ speaker,
278
+ _lang,
279
+ reference_audio,
280
+ emotion,
281
+ )
282
+ )
283
+ else:
284
+ audio_list.extend(
285
+ generate_audio(
286
+ text.split("|"),
287
+ sdp_ratio,
288
+ noise_scale,
289
+ noise_scale_w,
290
+ length_scale,
291
+ speaker,
292
+ language,
293
+ reference_audio,
294
+ emotion,
295
+ style_text,
296
+ style_weight,
297
+ )
298
+ )
299
+ return audio_list
300
+
301
+
302
+ def tts_fn(
303
+ text: str,
304
+ speaker,
305
+ sdp_ratio,
306
+ noise_scale,
307
+ noise_scale_w,
308
+ length_scale,
309
+ language,
310
+ reference_audio,
311
+ emotion,
312
+ prompt_mode,
313
+ style_text=None,
314
+ style_weight=0,
315
+ ):
316
+ if style_text == "":
317
+ style_text = None
318
+ if prompt_mode == "Audio prompt":
319
+ if reference_audio == None:
320
+ return ("Invalid audio prompt", None)
321
+ else:
322
+ reference_audio = load_audio(reference_audio)[1]
323
+ else:
324
+ reference_audio = None
325
+
326
+ audio_list = process_text(
327
+ text,
328
+ speaker,
329
+ sdp_ratio,
330
+ noise_scale,
331
+ noise_scale_w,
332
+ length_scale,
333
+ language,
334
+ reference_audio,
335
+ emotion,
336
+ style_text,
337
+ style_weight,
338
+ )
339
+
340
+ audio_concat = np.concatenate(audio_list)
341
+ return "Success", (hps.data.sampling_rate, audio_concat)
342
+
343
+
344
+ def format_utils(text, speaker):
345
+ _text, _lang = process_auto(text)
346
+ res = f"[{speaker}]"
347
+ for lang_s, content_s in zip(_lang, _text):
348
+ for lang, content in zip(lang_s, content_s):
349
+ res += f"<{lang.lower()}>{content}"
350
+ res += "|"
351
+ return "mix", res[:-1]
352
+
353
+
354
+ def load_audio(path):
355
+ audio, sr = librosa.load(path, 48000)
356
+ # audio = librosa.resample(audio, 44100, 48000)
357
+ return sr, audio
358
+
359
+
360
+ def gr_util(item):
361
+ if item == "Text prompt":
362
+ return {"visible": True, "__type__": "update"}, {
363
+ "visible": False,
364
+ "__type__": "update",
365
+ }
366
+ else:
367
+ return {"visible": False, "__type__": "update"}, {
368
+ "visible": True,
369
+ "__type__": "update",
370
+ }
371
+
372
+
373
+ if __name__ == "__main__":
374
+ if config.webui_config.debug:
375
+ logger.info("Enable DEBUG-LEVEL log")
376
+ logging.basicConfig(level=logging.DEBUG)
377
+ hps = utils.get_hparams_from_file(config.webui_config.config_path)
378
+ # 若config.json中未指定版本则默认为最新版本
379
+ version = hps.version if hasattr(hps, "version") else latest_version
380
+ net_g = get_net_g(
381
+ model_path=config.webui_config.model, version=version, device=device, hps=hps
382
+ )
383
+ speaker_ids = hps.data.spk2id
384
+ speakers = list(speaker_ids.keys())
385
+ languages = ["ZH", "JP", "EN", "mix", "auto"]
386
+ with gr.Blocks() as app:
387
+ with gr.Row():
388
+ with gr.Column():
389
+ text = gr.TextArea(
390
+ label="输入文本内容",
391
+ placeholder="""
392
+ 如果你选择语言为\'mix\',必须按照格式输入,否则报错:
393
+ 格式举例(zh是中文,jp是日语,不区分大小写;说话人举例:gongzi):
394
+ [说话人1]<zh>你好,こんにちは! <jp>こんにちは,世界。
395
+ [说话人2]<zh>你好吗?<jp>元気ですか?
396
+ [说话人3]<zh>谢谢。<jp>どういたしまして。
397
+ ...
398
+ 另外,所有的语言选项都可以用'|'分割长段实现分句生成。
399
+ """,
400
+ )
401
+ trans = gr.Button("中翻日", variant="primary")
402
+ slicer = gr.Button("快速切分", variant="primary")
403
+ formatter = gr.Button("检测语言,并整理为 MIX 格式", variant="primary")
404
+ speaker = gr.Dropdown(
405
+ choices=speakers, value=speakers[0], label="Speaker"
406
+ )
407
+ _ = gr.Markdown(
408
+ value="提示模式(Prompt mode):可选文字提示或音频提示,用于生成文字或音频指定风格的声音。\n",
409
+ visible=False,
410
+ )
411
+ prompt_mode = gr.Radio(
412
+ ["Text prompt", "Audio prompt"],
413
+ label="Prompt Mode",
414
+ value="Text prompt",
415
+ visible=False,
416
+ )
417
+ text_prompt = gr.Textbox(
418
+ label="Text prompt",
419
+ placeholder="用文字描述生成风格。如:Happy",
420
+ value="Happy",
421
+ visible=False,
422
+ )
423
+ audio_prompt = gr.Audio(
424
+ label="Audio prompt", type="filepath", visible=False
425
+ )
426
+ sdp_ratio = gr.Slider(
427
+ minimum=0, maximum=1, value=0.5, step=0.1, label="SDP Ratio"
428
+ )
429
+ noise_scale = gr.Slider(
430
+ minimum=0.1, maximum=2, value=0.6, step=0.1, label="Noise"
431
+ )
432
+ noise_scale_w = gr.Slider(
433
+ minimum=0.1, maximum=2, value=0.9, step=0.1, label="Noise_W"
434
+ )
435
+ length_scale = gr.Slider(
436
+ minimum=0.1, maximum=2, value=1.0, step=0.1, label="Length"
437
+ )
438
+ language = gr.Dropdown(
439
+ choices=languages, value=languages[0], label="Language"
440
+ )
441
+ btn = gr.Button("生成音频!", variant="primary")
442
+ with gr.Column():
443
+ with gr.Accordion("融合文本语义", open=False):
444
+ gr.Markdown(
445
+ value="使用辅助文本的语意来辅助生成对话(语言保持与主文本相同)\n\n"
446
+ "**注意**:不要使用**指令式文本**(如:开心),要使用**带有强烈情感的文本**(如:我好快乐!!!)\n\n"
447
+ "效果较不明确,留空即为不使用该功能"
448
+ )
449
+ style_text = gr.Textbox(label="辅助文本")
450
+ style_weight = gr.Slider(
451
+ minimum=0,
452
+ maximum=1,
453
+ value=0.7,
454
+ step=0.1,
455
+ label="Weight",
456
+ info="主文本和辅助文本的bert混合比率,0表示仅主文本,1表示仅辅助文本",
457
+ )
458
+ with gr.Row():
459
+ with gr.Column():
460
+ interval_between_sent = gr.Slider(
461
+ minimum=0,
462
+ maximum=5,
463
+ value=0.2,
464
+ step=0.1,
465
+ label="句间停顿(秒),勾选按句切分才生效",
466
+ )
467
+ interval_between_para = gr.Slider(
468
+ minimum=0,
469
+ maximum=10,
470
+ value=1,
471
+ step=0.1,
472
+ label="段间停顿(秒),需要大于句间停顿才有效",
473
+ )
474
+ opt_cut_by_sent = gr.Checkbox(
475
+ label="按句切分 在按段落切分的基础上再按句子切分文本"
476
+ )
477
+ slicer = gr.Button("切分生成", variant="primary")
478
+ text_output = gr.Textbox(label="状态信息")
479
+ audio_output = gr.Audio(label="输出音频")
480
+ # explain_image = gr.Image(
481
+ # label="参数解释信息",
482
+ # show_label=True,
483
+ # show_share_button=False,
484
+ # show_download_button=False,
485
+ # value=os.path.abspath("./img/参数说明.png"),
486
+ # )
487
+ btn.click(
488
+ tts_fn,
489
+ inputs=[
490
+ text,
491
+ speaker,
492
+ sdp_ratio,
493
+ noise_scale,
494
+ noise_scale_w,
495
+ length_scale,
496
+ language,
497
+ audio_prompt,
498
+ text_prompt,
499
+ prompt_mode,
500
+ style_text,
501
+ style_weight,
502
+ ],
503
+ outputs=[text_output, audio_output],
504
+ )
505
+
506
+ trans.click(
507
+ translate,
508
+ inputs=[text],
509
+ outputs=[text],
510
+ )
511
+ slicer.click(
512
+ tts_split,
513
+ inputs=[
514
+ text,
515
+ speaker,
516
+ sdp_ratio,
517
+ noise_scale,
518
+ noise_scale_w,
519
+ length_scale,
520
+ language,
521
+ opt_cut_by_sent,
522
+ interval_between_para,
523
+ interval_between_sent,
524
+ audio_prompt,
525
+ text_prompt,
526
+ style_text,
527
+ style_weight,
528
+ ],
529
+ outputs=[text_output, audio_output],
530
+ )
531
+
532
+ prompt_mode.change(
533
+ lambda x: gr_util(x),
534
+ inputs=[prompt_mode],
535
+ outputs=[text_prompt, audio_prompt],
536
+ )
537
+
538
+ audio_prompt.upload(
539
+ lambda x: load_audio(x),
540
+ inputs=[audio_prompt],
541
+ outputs=[audio_prompt],
542
+ )
543
+
544
+ formatter.click(
545
+ format_utils,
546
+ inputs=[text, speaker],
547
+ outputs=[language, text],
548
+ )
549
+
550
+ print("推理页面已开启!")
551
+ webbrowser.open(f"http://127.0.0.1:{config.webui_config.port}")
552
+ app.launch(share=config.webui_config.share, server_name="0.0.0.0", server_port=config.webui_config.port)
attentions.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ import commons
7
+ import logging
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class LayerNorm(nn.Module):
13
+ def __init__(self, channels, eps=1e-5):
14
+ super().__init__()
15
+ self.channels = channels
16
+ self.eps = eps
17
+
18
+ self.gamma = nn.Parameter(torch.ones(channels))
19
+ self.beta = nn.Parameter(torch.zeros(channels))
20
+
21
+ def forward(self, x):
22
+ x = x.transpose(1, -1)
23
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
24
+ return x.transpose(1, -1)
25
+
26
+
27
+ @torch.jit.script
28
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
29
+ n_channels_int = n_channels[0]
30
+ in_act = input_a + input_b
31
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
32
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
33
+ acts = t_act * s_act
34
+ return acts
35
+
36
+
37
+ class Encoder(nn.Module):
38
+ def __init__(
39
+ self,
40
+ hidden_channels,
41
+ filter_channels,
42
+ n_heads,
43
+ n_layers,
44
+ kernel_size=1,
45
+ p_dropout=0.0,
46
+ window_size=4,
47
+ isflow=True,
48
+ **kwargs
49
+ ):
50
+ super().__init__()
51
+ self.hidden_channels = hidden_channels
52
+ self.filter_channels = filter_channels
53
+ self.n_heads = n_heads
54
+ self.n_layers = n_layers
55
+ self.kernel_size = kernel_size
56
+ self.p_dropout = p_dropout
57
+ self.window_size = window_size
58
+ # if isflow:
59
+ # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
60
+ # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
61
+ # self.cond_layer = weight_norm(cond_layer, name='weight')
62
+ # self.gin_channels = 256
63
+ self.cond_layer_idx = self.n_layers
64
+ if "gin_channels" in kwargs:
65
+ self.gin_channels = kwargs["gin_channels"]
66
+ if self.gin_channels != 0:
67
+ self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
68
+ # vits2 says 3rd block, so idx is 2 by default
69
+ self.cond_layer_idx = (
70
+ kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
71
+ )
72
+ logging.debug(self.gin_channels, self.cond_layer_idx)
73
+ assert (
74
+ self.cond_layer_idx < self.n_layers
75
+ ), "cond_layer_idx should be less than n_layers"
76
+ self.drop = nn.Dropout(p_dropout)
77
+ self.attn_layers = nn.ModuleList()
78
+ self.norm_layers_1 = nn.ModuleList()
79
+ self.ffn_layers = nn.ModuleList()
80
+ self.norm_layers_2 = nn.ModuleList()
81
+ for i in range(self.n_layers):
82
+ self.attn_layers.append(
83
+ MultiHeadAttention(
84
+ hidden_channels,
85
+ hidden_channels,
86
+ n_heads,
87
+ p_dropout=p_dropout,
88
+ window_size=window_size,
89
+ )
90
+ )
91
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
92
+ self.ffn_layers.append(
93
+ FFN(
94
+ hidden_channels,
95
+ hidden_channels,
96
+ filter_channels,
97
+ kernel_size,
98
+ p_dropout=p_dropout,
99
+ )
100
+ )
101
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
102
+
103
+ def forward(self, x, x_mask, g=None):
104
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
105
+ x = x * x_mask
106
+ for i in range(self.n_layers):
107
+ if i == self.cond_layer_idx and g is not None:
108
+ g = self.spk_emb_linear(g.transpose(1, 2))
109
+ g = g.transpose(1, 2)
110
+ x = x + g
111
+ x = x * x_mask
112
+ y = self.attn_layers[i](x, x, attn_mask)
113
+ y = self.drop(y)
114
+ x = self.norm_layers_1[i](x + y)
115
+
116
+ y = self.ffn_layers[i](x, x_mask)
117
+ y = self.drop(y)
118
+ x = self.norm_layers_2[i](x + y)
119
+ x = x * x_mask
120
+ return x
121
+
122
+
123
+ class Decoder(nn.Module):
124
+ def __init__(
125
+ self,
126
+ hidden_channels,
127
+ filter_channels,
128
+ n_heads,
129
+ n_layers,
130
+ kernel_size=1,
131
+ p_dropout=0.0,
132
+ proximal_bias=False,
133
+ proximal_init=True,
134
+ **kwargs
135
+ ):
136
+ super().__init__()
137
+ self.hidden_channels = hidden_channels
138
+ self.filter_channels = filter_channels
139
+ self.n_heads = n_heads
140
+ self.n_layers = n_layers
141
+ self.kernel_size = kernel_size
142
+ self.p_dropout = p_dropout
143
+ self.proximal_bias = proximal_bias
144
+ self.proximal_init = proximal_init
145
+
146
+ self.drop = nn.Dropout(p_dropout)
147
+ self.self_attn_layers = nn.ModuleList()
148
+ self.norm_layers_0 = nn.ModuleList()
149
+ self.encdec_attn_layers = nn.ModuleList()
150
+ self.norm_layers_1 = nn.ModuleList()
151
+ self.ffn_layers = nn.ModuleList()
152
+ self.norm_layers_2 = nn.ModuleList()
153
+ for i in range(self.n_layers):
154
+ self.self_attn_layers.append(
155
+ MultiHeadAttention(
156
+ hidden_channels,
157
+ hidden_channels,
158
+ n_heads,
159
+ p_dropout=p_dropout,
160
+ proximal_bias=proximal_bias,
161
+ proximal_init=proximal_init,
162
+ )
163
+ )
164
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
165
+ self.encdec_attn_layers.append(
166
+ MultiHeadAttention(
167
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
168
+ )
169
+ )
170
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
171
+ self.ffn_layers.append(
172
+ FFN(
173
+ hidden_channels,
174
+ hidden_channels,
175
+ filter_channels,
176
+ kernel_size,
177
+ p_dropout=p_dropout,
178
+ causal=True,
179
+ )
180
+ )
181
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
182
+
183
+ def forward(self, x, x_mask, h, h_mask):
184
+ """
185
+ x: decoder input
186
+ h: encoder output
187
+ """
188
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
189
+ device=x.device, dtype=x.dtype
190
+ )
191
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
192
+ x = x * x_mask
193
+ for i in range(self.n_layers):
194
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
195
+ y = self.drop(y)
196
+ x = self.norm_layers_0[i](x + y)
197
+
198
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
199
+ y = self.drop(y)
200
+ x = self.norm_layers_1[i](x + y)
201
+
202
+ y = self.ffn_layers[i](x, x_mask)
203
+ y = self.drop(y)
204
+ x = self.norm_layers_2[i](x + y)
205
+ x = x * x_mask
206
+ return x
207
+
208
+
209
+ class MultiHeadAttention(nn.Module):
210
+ def __init__(
211
+ self,
212
+ channels,
213
+ out_channels,
214
+ n_heads,
215
+ p_dropout=0.0,
216
+ window_size=None,
217
+ heads_share=True,
218
+ block_length=None,
219
+ proximal_bias=False,
220
+ proximal_init=False,
221
+ ):
222
+ super().__init__()
223
+ assert channels % n_heads == 0
224
+
225
+ self.channels = channels
226
+ self.out_channels = out_channels
227
+ self.n_heads = n_heads
228
+ self.p_dropout = p_dropout
229
+ self.window_size = window_size
230
+ self.heads_share = heads_share
231
+ self.block_length = block_length
232
+ self.proximal_bias = proximal_bias
233
+ self.proximal_init = proximal_init
234
+ self.attn = None
235
+
236
+ self.k_channels = channels // n_heads
237
+ self.conv_q = nn.Conv1d(channels, channels, 1)
238
+ self.conv_k = nn.Conv1d(channels, channels, 1)
239
+ self.conv_v = nn.Conv1d(channels, channels, 1)
240
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
241
+ self.drop = nn.Dropout(p_dropout)
242
+
243
+ if window_size is not None:
244
+ n_heads_rel = 1 if heads_share else n_heads
245
+ rel_stddev = self.k_channels**-0.5
246
+ self.emb_rel_k = nn.Parameter(
247
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
248
+ * rel_stddev
249
+ )
250
+ self.emb_rel_v = nn.Parameter(
251
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
252
+ * rel_stddev
253
+ )
254
+
255
+ nn.init.xavier_uniform_(self.conv_q.weight)
256
+ nn.init.xavier_uniform_(self.conv_k.weight)
257
+ nn.init.xavier_uniform_(self.conv_v.weight)
258
+ if proximal_init:
259
+ with torch.no_grad():
260
+ self.conv_k.weight.copy_(self.conv_q.weight)
261
+ self.conv_k.bias.copy_(self.conv_q.bias)
262
+
263
+ def forward(self, x, c, attn_mask=None):
264
+ q = self.conv_q(x)
265
+ k = self.conv_k(c)
266
+ v = self.conv_v(c)
267
+
268
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
269
+
270
+ x = self.conv_o(x)
271
+ return x
272
+
273
+ def attention(self, query, key, value, mask=None):
274
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
275
+ b, d, t_s, t_t = (*key.size(), query.size(2))
276
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
277
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
278
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
279
+
280
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
281
+ if self.window_size is not None:
282
+ assert (
283
+ t_s == t_t
284
+ ), "Relative attention is only available for self-attention."
285
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
286
+ rel_logits = self._matmul_with_relative_keys(
287
+ query / math.sqrt(self.k_channels), key_relative_embeddings
288
+ )
289
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
290
+ scores = scores + scores_local
291
+ if self.proximal_bias:
292
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
293
+ scores = scores + self._attention_bias_proximal(t_s).to(
294
+ device=scores.device, dtype=scores.dtype
295
+ )
296
+ if mask is not None:
297
+ scores = scores.masked_fill(mask == 0, -1e4)
298
+ if self.block_length is not None:
299
+ assert (
300
+ t_s == t_t
301
+ ), "Local attention is only available for self-attention."
302
+ block_mask = (
303
+ torch.ones_like(scores)
304
+ .triu(-self.block_length)
305
+ .tril(self.block_length)
306
+ )
307
+ scores = scores.masked_fill(block_mask == 0, -1e4)
308
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
309
+ p_attn = self.drop(p_attn)
310
+ output = torch.matmul(p_attn, value)
311
+ if self.window_size is not None:
312
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
313
+ value_relative_embeddings = self._get_relative_embeddings(
314
+ self.emb_rel_v, t_s
315
+ )
316
+ output = output + self._matmul_with_relative_values(
317
+ relative_weights, value_relative_embeddings
318
+ )
319
+ output = (
320
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
321
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
322
+ return output, p_attn
323
+
324
+ def _matmul_with_relative_values(self, x, y):
325
+ """
326
+ x: [b, h, l, m]
327
+ y: [h or 1, m, d]
328
+ ret: [b, h, l, d]
329
+ """
330
+ ret = torch.matmul(x, y.unsqueeze(0))
331
+ return ret
332
+
333
+ def _matmul_with_relative_keys(self, x, y):
334
+ """
335
+ x: [b, h, l, d]
336
+ y: [h or 1, m, d]
337
+ ret: [b, h, l, m]
338
+ """
339
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
340
+ return ret
341
+
342
+ def _get_relative_embeddings(self, relative_embeddings, length):
343
+ 2 * self.window_size + 1
344
+ # Pad first before slice to avoid using cond ops.
345
+ pad_length = max(length - (self.window_size + 1), 0)
346
+ slice_start_position = max((self.window_size + 1) - length, 0)
347
+ slice_end_position = slice_start_position + 2 * length - 1
348
+ if pad_length > 0:
349
+ padded_relative_embeddings = F.pad(
350
+ relative_embeddings,
351
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
352
+ )
353
+ else:
354
+ padded_relative_embeddings = relative_embeddings
355
+ used_relative_embeddings = padded_relative_embeddings[
356
+ :, slice_start_position:slice_end_position
357
+ ]
358
+ return used_relative_embeddings
359
+
360
+ def _relative_position_to_absolute_position(self, x):
361
+ """
362
+ x: [b, h, l, 2*l-1]
363
+ ret: [b, h, l, l]
364
+ """
365
+ batch, heads, length, _ = x.size()
366
+ # Concat columns of pad to shift from relative to absolute indexing.
367
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
368
+
369
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
370
+ x_flat = x.view([batch, heads, length * 2 * length])
371
+ x_flat = F.pad(
372
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
373
+ )
374
+
375
+ # Reshape and slice out the padded elements.
376
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
377
+ :, :, :length, length - 1 :
378
+ ]
379
+ return x_final
380
+
381
+ def _absolute_position_to_relative_position(self, x):
382
+ """
383
+ x: [b, h, l, l]
384
+ ret: [b, h, l, 2*l-1]
385
+ """
386
+ batch, heads, length, _ = x.size()
387
+ # pad along column
388
+ x = F.pad(
389
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
390
+ )
391
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
392
+ # add 0's in the beginning that will skew the elements after reshape
393
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
394
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
395
+ return x_final
396
+
397
+ def _attention_bias_proximal(self, length):
398
+ """Bias for self-attention to encourage attention to close positions.
399
+ Args:
400
+ length: an integer scalar.
401
+ Returns:
402
+ a Tensor with shape [1, 1, length, length]
403
+ """
404
+ r = torch.arange(length, dtype=torch.float32)
405
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
406
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
407
+
408
+
409
+ class FFN(nn.Module):
410
+ def __init__(
411
+ self,
412
+ in_channels,
413
+ out_channels,
414
+ filter_channels,
415
+ kernel_size,
416
+ p_dropout=0.0,
417
+ activation=None,
418
+ causal=False,
419
+ ):
420
+ super().__init__()
421
+ self.in_channels = in_channels
422
+ self.out_channels = out_channels
423
+ self.filter_channels = filter_channels
424
+ self.kernel_size = kernel_size
425
+ self.p_dropout = p_dropout
426
+ self.activation = activation
427
+ self.causal = causal
428
+
429
+ if causal:
430
+ self.padding = self._causal_padding
431
+ else:
432
+ self.padding = self._same_padding
433
+
434
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
435
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
436
+ self.drop = nn.Dropout(p_dropout)
437
+
438
+ def forward(self, x, x_mask):
439
+ x = self.conv_1(self.padding(x * x_mask))
440
+ if self.activation == "gelu":
441
+ x = x * torch.sigmoid(1.702 * x)
442
+ else:
443
+ x = torch.relu(x)
444
+ x = self.drop(x)
445
+ x = self.conv_2(self.padding(x * x_mask))
446
+ return x * x_mask
447
+
448
+ def _causal_padding(self, x):
449
+ if self.kernel_size == 1:
450
+ return x
451
+ pad_l = self.kernel_size - 1
452
+ pad_r = 0
453
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
454
+ x = F.pad(x, commons.convert_pad_shape(padding))
455
+ return x
456
+
457
+ def _same_padding(self, x):
458
+ if self.kernel_size == 1:
459
+ return x
460
+ pad_l = (self.kernel_size - 1) // 2
461
+ pad_r = self.kernel_size // 2
462
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
463
+ x = F.pad(x, commons.convert_pad_shape(padding))
464
+ return x
bert/bert-base-japanese-v3/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/bert-base-japanese-v3/README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - cc100
5
+ - wikipedia
6
+ language:
7
+ - ja
8
+ widget:
9
+ - text: 東北大学で[MASK]の研究をしています。
10
+ ---
11
+
12
+ # BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
13
+
14
+ This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
15
+
16
+ This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
17
+ Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
18
+
19
+ The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
20
+
21
+ ## Model architecture
22
+
23
+ The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads.
24
+
25
+ ## Training Data
26
+
27
+ The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
28
+ For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
29
+ The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
30
+
31
+ For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
32
+
33
+ ## Tokenization
34
+
35
+ The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
36
+ The vocabulary size is 32768.
37
+
38
+ We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
39
+
40
+ ## Training
41
+
42
+ We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
43
+ For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
44
+
45
+ For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
46
+
47
+ ## Licenses
48
+
49
+ The pretrained models are distributed under the Apache License 2.0.
50
+
51
+ ## Acknowledgments
52
+
53
+ This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
bert/bert-base-japanese-v3/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 768,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 3072,
11
+ "layer_norm_eps": 1e-12,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "bert",
14
+ "num_attention_heads": 12,
15
+ "num_hidden_layers": 12,
16
+ "pad_token_id": 0,
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 32768
19
+ }
bert/bert-base-japanese-v3/tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "BertJapaneseTokenizer",
3
+ "model_max_length": 512,
4
+ "do_lower_case": false,
5
+ "word_tokenizer_type": "mecab",
6
+ "subword_tokenizer_type": "wordpiece",
7
+ "mecab_kwargs": {
8
+ "mecab_dic": "unidic_lite"
9
+ }
10
+ }
bert/bert-base-japanese-v3/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/bert-large-japanese-v2/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/bert-large-japanese-v2/README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - cc100
5
+ - wikipedia
6
+ language:
7
+ - ja
8
+ widget:
9
+ - text: 東北大学で[MASK]の研究をしています。
10
+ ---
11
+
12
+ # BERT large Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
13
+
14
+ This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
15
+
16
+ This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
17
+ Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
18
+
19
+ The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
20
+
21
+ ## Model architecture
22
+
23
+ The model architecture is the same as the original BERT large model; 24 layers, 1024 dimensions of hidden states, and 16 attention heads.
24
+
25
+ ## Training Data
26
+
27
+ The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
28
+ For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
29
+ The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
30
+
31
+ For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
32
+
33
+ ## Tokenization
34
+
35
+ The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
36
+ The vocabulary size is 32768.
37
+
38
+ We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
39
+
40
+ ## Training
41
+
42
+ We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
43
+ For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
44
+
45
+ For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
46
+
47
+ ## Licenses
48
+
49
+ The pretrained models are distributed under the Apache License 2.0.
50
+
51
+ ## Acknowledgments
52
+
53
+ This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
bert/bert-large-japanese-v2/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 1024,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4096,
11
+ "layer_norm_eps": 1e-12,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "bert",
14
+ "num_attention_heads": 16,
15
+ "num_hidden_layers": 24,
16
+ "pad_token_id": 0,
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 32768
19
+ }
bert/bert-large-japanese-v2/tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "BertJapaneseTokenizer",
3
+ "model_max_length": 512,
4
+ "do_lower_case": false,
5
+ "word_tokenizer_type": "mecab",
6
+ "subword_tokenizer_type": "wordpiece",
7
+ "mecab_kwargs": {
8
+ "mecab_dic": "unidic_lite"
9
+ }
10
+ }
bert/bert-large-japanese-v2/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/bert_models.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "deberta-v2-large-japanese-char-wwm": {
3
+ "repo_id": "ku-nlp/deberta-v2-large-japanese-char-wwm",
4
+ "files": ["pytorch_model.bin"]
5
+ },
6
+ "chinese-roberta-wwm-ext-large": {
7
+ "repo_id": "hfl/chinese-roberta-wwm-ext-large",
8
+ "files": ["pytorch_model.bin"]
9
+ },
10
+ "deberta-v3-large": {
11
+ "repo_id": "microsoft/deberta-v3-large",
12
+ "files": ["spm.model", "pytorch_model.bin"]
13
+ }
14
+ }
bert/chinese-roberta-wwm-ext-large/.gitattributes ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
bert/chinese-roberta-wwm-ext-large/README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - zh
4
+ tags:
5
+ - bert
6
+ license: "apache-2.0"
7
+ ---
8
+
9
+ # Please use 'Bert' related functions to load this model!
10
+
11
+ ## Chinese BERT with Whole Word Masking
12
+ For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
13
+
14
+ **[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
15
+ Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
16
+
17
+ This repository is developed based on:https://github.com/google-research/bert
18
+
19
+ You may also interested in,
20
+ - Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
21
+ - Chinese MacBERT: https://github.com/ymcui/MacBERT
22
+ - Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
23
+ - Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
24
+ - Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
25
+
26
+ More resources by HFL: https://github.com/ymcui/HFL-Anthology
27
+
28
+ ## Citation
29
+ If you find the technical report or resource is useful, please cite the following technical report in your paper.
30
+ - Primary: https://arxiv.org/abs/2004.13922
31
+ ```
32
+ @inproceedings{cui-etal-2020-revisiting,
33
+ title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
34
+ author = "Cui, Yiming and
35
+ Che, Wanxiang and
36
+ Liu, Ting and
37
+ Qin, Bing and
38
+ Wang, Shijin and
39
+ Hu, Guoping",
40
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
41
+ month = nov,
42
+ year = "2020",
43
+ address = "Online",
44
+ publisher = "Association for Computational Linguistics",
45
+ url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
46
+ pages = "657--668",
47
+ }
48
+ ```
49
+ - Secondary: https://arxiv.org/abs/1906.08101
50
+ ```
51
+ @article{chinese-bert-wwm,
52
+ title={Pre-Training with Whole Word Masking for Chinese BERT},
53
+ author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
54
+ journal={arXiv preprint arXiv:1906.08101},
55
+ year={2019}
56
+ }
57
+ ```
bert/chinese-roberta-wwm-ext-large/added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
bert/chinese-roberta-wwm-ext-large/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "directionality": "bidi",
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 24,
19
+ "output_past": true,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "type_vocab_size": 2,
27
+ "vocab_size": 21128
28
+ }
bert/chinese-roberta-wwm-ext-large/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
bert/chinese-roberta-wwm-ext-large/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert/chinese-roberta-wwm-ext-large/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"init_inputs": []}
bert/chinese-roberta-wwm-ext-large/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/deberta-v2-large-japanese-char-wwm/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/deberta-v2-large-japanese-char-wwm/README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: ja
3
+ license: cc-by-sa-4.0
4
+ library_name: transformers
5
+ tags:
6
+ - deberta
7
+ - deberta-v2
8
+ - fill-mask
9
+ - character
10
+ - wwm
11
+ datasets:
12
+ - wikipedia
13
+ - cc100
14
+ - oscar
15
+ metrics:
16
+ - accuracy
17
+ mask_token: "[MASK]"
18
+ widget:
19
+ - text: "京都大学で自然言語処理を[MASK][MASK]する。"
20
+ ---
21
+
22
+ # Model Card for Japanese character-level DeBERTa V2 large
23
+
24
+ ## Model description
25
+
26
+ This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.
27
+ This model is trained with character-level tokenization and whole word masking.
28
+
29
+ ## How to use
30
+
31
+ You can use this model for masked language modeling as follows:
32
+
33
+ ```python
34
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
35
+ tokenizer = AutoTokenizer.from_pretrained('ku-nlp/deberta-v2-large-japanese-char-wwm')
36
+ model = AutoModelForMaskedLM.from_pretrained('ku-nlp/deberta-v2-large-japanese-char-wwm')
37
+
38
+ sentence = '京都大学で自然言語処理を[MASK][MASK]する。'
39
+ encoding = tokenizer(sentence, return_tensors='pt')
40
+ ...
41
+ ```
42
+
43
+ You can also fine-tune this model on downstream tasks.
44
+
45
+ ## Tokenization
46
+
47
+ There is no need to tokenize texts in advance, and you can give raw texts to the tokenizer.
48
+ The texts are tokenized into character-level tokens by [sentencepiece](https://github.com/google/sentencepiece).
49
+
50
+ ## Training data
51
+
52
+ We used the following corpora for pre-training:
53
+
54
+ - Japanese Wikipedia (as of 20221020, 3.2GB, 27M sentences, 1.3M documents)
55
+ - Japanese portion of CC-100 (85GB, 619M sentences, 66M documents)
56
+ - Japanese portion of OSCAR (54GB, 326M sentences, 25M documents)
57
+
58
+ Note that we filtered out documents annotated with "header", "footer", or "noisy" tags in OSCAR.
59
+ Also note that Japanese Wikipedia was duplicated 10 times to make the total size of the corpus comparable to that of CC-100 and OSCAR. As a result, the total size of the training data is 171GB.
60
+
61
+ ## Training procedure
62
+
63
+ We first segmented texts in the corpora into words using [Juman++ 2.0.0-rc3](https://github.com/ku-nlp/jumanpp/releases/tag/v2.0.0-rc3) for whole word masking.
64
+ Then, we built a sentencepiece model with 22,012 tokens including all characters that appear in the training corpus.
65
+
66
+ We tokenized raw corpora into character-level subwords using the sentencepiece model and trained the Japanese DeBERTa model using [transformers](https://github.com/huggingface/transformers) library.
67
+ The training took 26 days using 16 NVIDIA A100-SXM4-40GB GPUs.
68
+
69
+ The following hyperparameters were used during pre-training:
70
+
71
+ - learning_rate: 1e-4
72
+ - per_device_train_batch_size: 26
73
+ - distributed_type: multi-GPU
74
+ - num_devices: 16
75
+ - gradient_accumulation_steps: 8
76
+ - total_train_batch_size: 3,328
77
+ - max_seq_length: 512
78
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
79
+ - lr_scheduler_type: linear schedule with warmup (lr = 0 at 300k steps)
80
+ - training_steps: 260,000
81
+ - warmup_steps: 10,000
82
+
83
+ The accuracy of the trained model on the masked language modeling task was 0.795.
84
+ The evaluation set consists of 5,000 randomly sampled documents from each of the training corpora.
85
+
86
+ ## Acknowledgments
87
+
88
+ This work was supported by Joint Usage/Research Center for Interdisciplinary Large-scale Information Infrastructures (JHPCN) through General Collaboration Project no. jh221004, "Developing a Platform for Constructing and Sharing of Large-Scale Japanese Language Models".
89
+ For training models, we used the mdx: a platform for the data-driven future.
bert/deberta-v2-large-japanese-char-wwm/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DebertaV2ForMaskedLM"
4
+ ],
5
+ "attention_head_size": 64,
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "conv_act": "gelu",
8
+ "conv_kernel_size": 3,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-07,
15
+ "max_position_embeddings": 512,
16
+ "max_relative_positions": -1,
17
+ "model_type": "deberta-v2",
18
+ "norm_rel_ebd": "layer_norm",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "pad_token_id": 0,
22
+ "pooler_dropout": 0,
23
+ "pooler_hidden_act": "gelu",
24
+ "pooler_hidden_size": 1024,
25
+ "pos_att_type": [
26
+ "p2c",
27
+ "c2p"
28
+ ],
29
+ "position_biased_input": false,
30
+ "position_buckets": 256,
31
+ "relative_attention": true,
32
+ "share_att_key": true,
33
+ "torch_dtype": "float16",
34
+ "transformers_version": "4.25.1",
35
+ "type_vocab_size": 0,
36
+ "vocab_size": 22012
37
+ }
bert/deberta-v2-large-japanese-char-wwm/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
bert/deberta-v2-large-japanese-char-wwm/tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_lower_case": false,
4
+ "do_subword_tokenize": true,
5
+ "do_word_tokenize": true,
6
+ "jumanpp_kwargs": null,
7
+ "mask_token": "[MASK]",
8
+ "mecab_kwargs": null,
9
+ "model_max_length": 1000000000000000019884624838656,
10
+ "never_split": null,
11
+ "pad_token": "[PAD]",
12
+ "sep_token": "[SEP]",
13
+ "special_tokens_map_file": null,
14
+ "subword_tokenizer_type": "character",
15
+ "sudachi_kwargs": null,
16
+ "tokenizer_class": "BertJapaneseTokenizer",
17
+ "unk_token": "[UNK]",
18
+ "word_tokenizer_type": "basic"
19
+ }
bert/deberta-v2-large-japanese-char-wwm/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/deberta-v2-large-japanese/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/deberta-v2-large-japanese/README.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: ja
3
+ license: cc-by-sa-4.0
4
+ library_name: transformers
5
+ tags:
6
+ - deberta
7
+ - deberta-v2
8
+ - fill-mask
9
+ datasets:
10
+ - wikipedia
11
+ - cc100
12
+ - oscar
13
+ metrics:
14
+ - accuracy
15
+ mask_token: "[MASK]"
16
+ widget:
17
+ - text: "京都 大学 で 自然 言語 処理 を [MASK] する 。"
18
+ ---
19
+
20
+ # Model Card for Japanese DeBERTa V2 large
21
+
22
+ ## Model description
23
+
24
+ This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the
25
+ Japanese portion of OSCAR.
26
+
27
+ ## How to use
28
+
29
+ You can use this model for masked language modeling as follows:
30
+
31
+ ```python
32
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
33
+
34
+ tokenizer = AutoTokenizer.from_pretrained('ku-nlp/deberta-v2-large-japanese')
35
+ model = AutoModelForMaskedLM.from_pretrained('ku-nlp/deberta-v2-large-japanese')
36
+
37
+ sentence = '京都 大学 で 自然 言語 処理 を [MASK] する 。' # input should be segmented into words by Juman++ in advance
38
+ encoding = tokenizer(sentence, return_tensors='pt')
39
+ ...
40
+ ```
41
+
42
+ You can also fine-tune this model on downstream tasks.
43
+
44
+ ## Tokenization
45
+
46
+ The input text should be segmented into words by [Juman++](https://github.com/ku-nlp/jumanpp) in
47
+ advance. [Juman++ 2.0.0-rc3](https://github.com/ku-nlp/jumanpp/releases/tag/v2.0.0-rc3) was used for pre-training. Each
48
+ word is tokenized into subwords by [sentencepiece](https://github.com/google/sentencepiece).
49
+
50
+ ## Training data
51
+
52
+ We used the following corpora for pre-training:
53
+
54
+ - Japanese Wikipedia (as of 20221020, 3.2GB, 27M sentences, 1.3M documents)
55
+ - Japanese portion of CC-100 (85GB, 619M sentences, 66M documents)
56
+ - Japanese portion of OSCAR (54GB, 326M sentences, 25M documents)
57
+
58
+ Note that we filtered out documents annotated with "header", "footer", or "noisy" tags in OSCAR.
59
+ Also note that Japanese Wikipedia was duplicated 10 times to make the total size of the corpus comparable to that of
60
+ CC-100 and OSCAR. As a result, the total size of the training data is 171GB.
61
+
62
+ ## Training procedure
63
+
64
+ We first segmented texts in the corpora into words using [Juman++](https://github.com/ku-nlp/jumanpp).
65
+ Then, we built a sentencepiece model with 32000 tokens including words ([JumanDIC](https://github.com/ku-nlp/JumanDIC))
66
+ and subwords induced by the unigram language model of [sentencepiece](https://github.com/google/sentencepiece).
67
+
68
+ We tokenized the segmented corpora into subwords using the sentencepiece model and trained the Japanese DeBERTa model
69
+ using [transformers](https://github.com/huggingface/transformers) library.
70
+ The training took 36 days using 8 NVIDIA A100-SXM4-40GB GPUs.
71
+
72
+ The following hyperparameters were used during pre-training:
73
+
74
+ - learning_rate: 1e-4
75
+ - per_device_train_batch_size: 18
76
+ - distributed_type: multi-GPU
77
+ - num_devices: 8
78
+ - gradient_accumulation_steps: 16
79
+ - total_train_batch_size: 2,304
80
+ - max_seq_length: 512
81
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
82
+ - lr_scheduler_type: linear schedule with warmup
83
+ - training_steps: 300,000
84
+ - warmup_steps: 10,000
85
+
86
+ The accuracy of the trained model on the masked language modeling task was 0.799.
87
+ The evaluation set consists of 5,000 randomly sampled documents from each of the training corpora.
88
+
89
+ ## Fine-tuning on NLU tasks
90
+
91
+ We fine-tuned the following models and evaluated them on the dev set of JGLUE.
92
+ We tuned learning rate and training epochs for each model and task
93
+ following [the JGLUE paper](https://www.jstage.jst.go.jp/article/jnlp/30/1/30_63/_pdf/-char/ja).
94
+
95
+ | Model | MARC-ja/acc | JSTS/pearson | JSTS/spearman | JNLI/acc | JSQuAD/EM | JSQuAD/F1 | JComQA/acc |
96
+ |-------------------------------|-------------|--------------|---------------|----------|-----------|-----------|------------|
97
+ | Waseda RoBERTa base | 0.965 | 0.913 | 0.876 | 0.905 | 0.853 | 0.916 | 0.853 |
98
+ | Waseda RoBERTa large (seq512) | 0.969 | 0.925 | 0.890 | 0.928 | 0.910 | 0.955 | 0.900 |
99
+ | LUKE Japanese base* | 0.965 | 0.916 | 0.877 | 0.912 | - | - | 0.842 |
100
+ | LUKE Japanese large* | 0.965 | 0.932 | 0.902 | 0.927 | - | - | 0.893 |
101
+ | DeBERTaV2 base | 0.970 | 0.922 | 0.886 | 0.922 | 0.899 | 0.951 | 0.873 |
102
+ | DeBERTaV2 large | 0.968 | 0.925 | 0.892 | 0.924 | 0.912 | 0.959 | 0.890 |
103
+
104
+ *The scores of LUKE are from [the official repository](https://github.com/studio-ousia/luke).
105
+
106
+ ## Acknowledgments
107
+
108
+ This work was supported by Joint Usage/Research Center for Interdisciplinary Large-scale Information Infrastructures (
109
+ JHPCN) through General Collaboration Project no. jh221004, "Developing a Platform for Constructing and Sharing of
110
+ Large-Scale Japanese Language Models".
111
+ For training models, we used the mdx: a platform for the data-driven future.
bert/deberta-v2-large-japanese/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "configs/deberta_v2_large.json",
3
+ "architectures": [
4
+ "DebertaV2ForMaskedLM"
5
+ ],
6
+ "attention_head_size": 64,
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "conv_act": "gelu",
9
+ "conv_kernel_size": 3,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-07,
16
+ "max_position_embeddings": 512,
17
+ "max_relative_positions": -1,
18
+ "model_type": "deberta-v2",
19
+ "norm_rel_ebd": "layer_norm",
20
+ "num_attention_heads": 16,
21
+ "num_hidden_layers": 24,
22
+ "pad_token_id": 0,
23
+ "pooler_dropout": 0,
24
+ "pooler_hidden_act": "gelu",
25
+ "pooler_hidden_size": 1024,
26
+ "pos_att_type": [
27
+ "p2c",
28
+ "c2p"
29
+ ],
30
+ "position_biased_input": false,
31
+ "position_buckets": 256,
32
+ "relative_attention": true,
33
+ "share_att_key": true,
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.23.1",
36
+ "type_vocab_size": 0,
37
+ "vocab_size": 32000
38
+ }
bert/deberta-v2-large-japanese/special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
bert/deberta-v2-large-japanese/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert/deberta-v2-large-japanese/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "keep_accents": true,
7
+ "mask_token": "[MASK]",
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "sp_model_kwargs": {},
11
+ "special_tokens_map_file": null,
12
+ "split_by_punct": false,
13
+ "tokenizer_class": "DebertaV2Tokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
bert/deberta-v3-large/.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/deberta-v3-large/README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ tags:
4
+ - deberta
5
+ - deberta-v3
6
+ - fill-mask
7
+ thumbnail: https://huggingface.co/front/thumbnails/microsoft.png
8
+ license: mit
9
+ ---
10
+
11
+ ## DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing
12
+
13
+ [DeBERTa](https://arxiv.org/abs/2006.03654) improves the BERT and RoBERTa models using disentangled attention and enhanced mask decoder. With those two improvements, DeBERTa out perform RoBERTa on a majority of NLU tasks with 80GB training data.
14
+
15
+ In [DeBERTa V3](https://arxiv.org/abs/2111.09543), we further improved the efficiency of DeBERTa using ELECTRA-Style pre-training with Gradient Disentangled Embedding Sharing. Compared to DeBERTa, our V3 version significantly improves the model performance on downstream tasks. You can find more technique details about the new model from our [paper](https://arxiv.org/abs/2111.09543).
16
+
17
+ Please check the [official repository](https://github.com/microsoft/DeBERTa) for more implementation details and updates.
18
+
19
+ The DeBERTa V3 large model comes with 24 layers and a hidden size of 1024. It has 304M backbone parameters with a vocabulary containing 128K tokens which introduces 131M parameters in the Embedding layer. This model was trained using the 160GB data as DeBERTa V2.
20
+
21
+
22
+ #### Fine-tuning on NLU tasks
23
+
24
+ We present the dev results on SQuAD 2.0 and MNLI tasks.
25
+
26
+ | Model |Vocabulary(K)|Backbone #Params(M)| SQuAD 2.0(F1/EM) | MNLI-m/mm(ACC)|
27
+ |-------------------|----------|-------------------|-----------|----------|
28
+ | RoBERTa-large |50 |304 | 89.4/86.5 | 90.2 |
29
+ | XLNet-large |32 |- | 90.6/87.9 | 90.8 |
30
+ | DeBERTa-large |50 |- | 90.7/88.0 | 91.3 |
31
+ | **DeBERTa-v3-large**|128|304 | **91.5/89.0**| **91.8/91.9**|
32
+
33
+
34
+ #### Fine-tuning with HF transformers
35
+
36
+ ```bash
37
+ #!/bin/bash
38
+
39
+ cd transformers/examples/pytorch/text-classification/
40
+
41
+ pip install datasets
42
+ export TASK_NAME=mnli
43
+
44
+ output_dir="ds_results"
45
+
46
+ num_gpus=8
47
+
48
+ batch_size=8
49
+
50
+ python -m torch.distributed.launch --nproc_per_node=${num_gpus} \
51
+ run_glue.py \
52
+ --model_name_or_path microsoft/deberta-v3-large \
53
+ --task_name $TASK_NAME \
54
+ --do_train \
55
+ --do_eval \
56
+ --evaluation_strategy steps \
57
+ --max_seq_length 256 \
58
+ --warmup_steps 50 \
59
+ --per_device_train_batch_size ${batch_size} \
60
+ --learning_rate 6e-6 \
61
+ --num_train_epochs 2 \
62
+ --output_dir $output_dir \
63
+ --overwrite_output_dir \
64
+ --logging_steps 1000 \
65
+ --logging_dir $output_dir
66
+
67
+ ```
68
+
69
+ ### Citation
70
+
71
+ If you find DeBERTa useful for your work, please cite the following papers:
72
+
73
+ ``` latex
74
+ @misc{he2021debertav3,
75
+ title={DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing},
76
+ author={Pengcheng He and Jianfeng Gao and Weizhu Chen},
77
+ year={2021},
78
+ eprint={2111.09543},
79
+ archivePrefix={arXiv},
80
+ primaryClass={cs.CL}
81
+ }
82
+ ```
83
+
84
+ ``` latex
85
+ @inproceedings{
86
+ he2021deberta,
87
+ title={DEBERTA: DECODING-ENHANCED BERT WITH DISENTANGLED ATTENTION},
88
+ author={Pengcheng He and Xiaodong Liu and Jianfeng Gao and Weizhu Chen},
89
+ booktitle={International Conference on Learning Representations},
90
+ year={2021},
91
+ url={https://openreview.net/forum?id=XPZIaotutsD}
92
+ }
93
+ ```
bert/deberta-v3-large/config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "deberta-v2",
3
+ "attention_probs_dropout_prob": 0.1,
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.1,
6
+ "hidden_size": 1024,
7
+ "initializer_range": 0.02,
8
+ "intermediate_size": 4096,
9
+ "max_position_embeddings": 512,
10
+ "relative_attention": true,
11
+ "position_buckets": 256,
12
+ "norm_rel_ebd": "layer_norm",
13
+ "share_att_key": true,
14
+ "pos_att_type": "p2c|c2p",
15
+ "layer_norm_eps": 1e-7,
16
+ "max_relative_positions": -1,
17
+ "position_biased_input": false,
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "type_vocab_size": 0,
21
+ "vocab_size": 128100
22
+ }
bert/deberta-v3-large/generator_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "deberta-v2",
3
+ "attention_probs_dropout_prob": 0.1,
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.1,
6
+ "hidden_size": 1024,
7
+ "initializer_range": 0.02,
8
+ "intermediate_size": 4096,
9
+ "max_position_embeddings": 512,
10
+ "relative_attention": true,
11
+ "position_buckets": 256,
12
+ "norm_rel_ebd": "layer_norm",
13
+ "share_att_key": true,
14
+ "pos_att_type": "p2c|c2p",
15
+ "layer_norm_eps": 1e-7,
16
+ "max_relative_positions": -1,
17
+ "position_biased_input": false,
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 12,
20
+ "type_vocab_size": 0,
21
+ "vocab_size": 128100
22
+ }
bert/deberta-v3-large/tokenizer_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "do_lower_case": false,
3
+ "vocab_type": "spm"
4
+ }
bert_gen.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from multiprocessing import Pool
3
+ import commons
4
+ import utils
5
+ from tqdm import tqdm
6
+ from text import check_bert_models, cleaned_text_to_sequence, get_bert
7
+ import argparse
8
+ import torch.multiprocessing as mp
9
+ from config import config
10
+
11
+
12
+ def process_line(x):
13
+ line, add_blank = x
14
+ device = config.bert_gen_config.device
15
+ if config.bert_gen_config.use_multi_device:
16
+ rank = mp.current_process()._identity
17
+ rank = rank[0] if len(rank) > 0 else 0
18
+ if torch.cuda.is_available():
19
+ gpu_id = rank % torch.cuda.device_count()
20
+ device = torch.device(f"cuda:{gpu_id}")
21
+ else:
22
+ device = torch.device("cpu")
23
+ wav_path, _, language_str, text, phones, tone, word2ph = line.strip().split("|")
24
+ phone = phones.split(" ")
25
+ tone = [int(i) for i in tone.split(" ")]
26
+ word2ph = [int(i) for i in word2ph.split(" ")]
27
+ word2ph = [i for i in word2ph]
28
+ phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
29
+
30
+ if add_blank:
31
+ phone = commons.intersperse(phone, 0)
32
+ tone = commons.intersperse(tone, 0)
33
+ language = commons.intersperse(language, 0)
34
+ for i in range(len(word2ph)):
35
+ word2ph[i] = word2ph[i] * 2
36
+ word2ph[0] += 1
37
+
38
+ bert_path = wav_path.replace(".WAV", ".wav").replace(".wav", ".bert.pt")
39
+
40
+ try:
41
+ bert = torch.load(bert_path)
42
+ assert bert.shape[-1] == len(phone)
43
+ except Exception:
44
+ bert = get_bert(text, word2ph, language_str, device)
45
+ assert bert.shape[-1] == len(phone)
46
+ torch.save(bert, bert_path)
47
+
48
+
49
+ preprocess_text_config = config.preprocess_text_config
50
+
51
+ if __name__ == "__main__":
52
+ parser = argparse.ArgumentParser()
53
+ parser.add_argument(
54
+ "-c", "--config", type=str, default=config.bert_gen_config.config_path
55
+ )
56
+ parser.add_argument(
57
+ "--num_processes", type=int, default=config.bert_gen_config.num_processes
58
+ )
59
+ args, _ = parser.parse_known_args()
60
+ config_path = args.config
61
+ hps = utils.get_hparams_from_file(config_path)
62
+ check_bert_models()
63
+ lines = []
64
+ with open(hps.data.training_files, encoding="utf-8") as f:
65
+ lines.extend(f.readlines())
66
+
67
+ with open(hps.data.validation_files, encoding="utf-8") as f:
68
+ lines.extend(f.readlines())
69
+ add_blank = [hps.data.add_blank] * len(lines)
70
+
71
+ if len(lines) != 0:
72
+ num_processes = args.num_processes
73
+ with Pool(processes=num_processes) as pool:
74
+ for _ in tqdm(
75
+ pool.imap_unordered(process_line, zip(lines, add_blank)),
76
+ total=len(lines),
77
+ ):
78
+ # 这里是缩进的代码块,表示循环体
79
+ pass # 使用pass语句作为占位符
80
+
81
+ print(f"bert生成完毕!, 共有{len(lines)}个bert.pt生成!")
clap_gen.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from multiprocessing import Pool, cpu_count
3
+
4
+ import torch
5
+ import torch.multiprocessing as mp
6
+ from tqdm import tqdm
7
+
8
+ import utils
9
+ from config import config
10
+ from clap_wrapper import get_clap_audio_feature
11
+ import librosa
12
+ import os
13
+
14
+ os.environ["OMP_NUM_THREADS"] = "1"
15
+ os.environ["MKL_NUM_THREADS"] = "1"
16
+
17
+
18
+ def process_line(line):
19
+ device = config.emo_gen_config.device
20
+ if config.emo_gen_config.use_multi_device:
21
+ rank = mp.current_process()._identity
22
+ rank = rank[0] if len(rank) > 0 else 0
23
+ if torch.cuda.is_available():
24
+ gpu_id = rank % torch.cuda.device_count()
25
+ device = torch.device(f"cuda:{gpu_id}")
26
+ else:
27
+ device = torch.device("cpu")
28
+ wav_path, _, language_str, text, phones, tone, word2ph = line.strip().split("|")
29
+
30
+ clap_path = wav_path.replace(".WAV", ".wav").replace(".wav", ".emo.pt")
31
+ if os.path.isfile(clap_path):
32
+ return
33
+
34
+ audio = librosa.load(wav_path, 48000)[0]
35
+ # audio = librosa.resample(audio, 44100, 48000)
36
+
37
+ clap = get_clap_audio_feature(audio, device)
38
+ torch.save(clap, clap_path)
39
+
40
+
41
+ if __name__ == "__main__":
42
+ parser = argparse.ArgumentParser()
43
+ parser.add_argument(
44
+ "-c", "--config", type=str, default=config.emo_gen_config.config_path
45
+ )
46
+ parser.add_argument(
47
+ "--num_processes", type=int, default=config.emo_gen_config.num_processes
48
+ )
49
+ args, _ = parser.parse_known_args()
50
+ config_path = args.config
51
+ hps = utils.get_hparams_from_file(config_path)
52
+ lines = []
53
+ with open(hps.data.training_files, encoding="utf-8") as f:
54
+ lines.extend(f.readlines())
55
+
56
+ with open(hps.data.validation_files, encoding="utf-8") as f:
57
+ lines.extend(f.readlines())
58
+ if len(lines) != 0:
59
+ num_processes = min(args.num_processes, cpu_count())
60
+ with Pool(processes=num_processes) as pool:
61
+ for _ in tqdm(pool.imap_unordered(process_line, lines), total=len(lines)):
62
+ pass
63
+
64
+ print(f"clap生成完毕!, 共有{len(lines)}个emo.pt生成!")
clap_wrapper.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import torch
4
+ from transformers import ClapModel, ClapProcessor
5
+
6
+ from config import config
7
+
8
+ models = dict()
9
+ processor = ClapProcessor.from_pretrained("./emotional/clap-htsat-fused")
10
+
11
+
12
+ def get_clap_audio_feature(audio_data, device=config.bert_gen_config.device):
13
+ if (
14
+ sys.platform == "darwin"
15
+ and torch.backends.mps.is_available()
16
+ and device == "cpu"
17
+ ):
18
+ device = "mps"
19
+ if not device:
20
+ device = "cuda"
21
+ if device not in models.keys():
22
+ models[device] = ClapModel.from_pretrained("./emotional/clap-htsat-fused").to(
23
+ device
24
+ )
25
+ with torch.no_grad():
26
+ inputs = processor(
27
+ audios=audio_data, return_tensors="pt", sampling_rate=48000
28
+ ).to(device)
29
+ emb = models[device].get_audio_features(**inputs)
30
+ return emb.T
31
+
32
+
33
+ def get_clap_text_feature(text, device=config.bert_gen_config.device):
34
+ if (
35
+ sys.platform == "darwin"
36
+ and torch.backends.mps.is_available()
37
+ and device == "cpu"
38
+ ):
39
+ device = "mps"
40
+ if not device:
41
+ device = "cuda"
42
+ if device not in models.keys():
43
+ models[device] = ClapModel.from_pretrained("./emotional/clap-htsat-fused").to(
44
+ device
45
+ )
46
+ with torch.no_grad():
47
+ inputs = processor(text=text, return_tensors="pt").to(device)
48
+ emb = models[device].get_text_features(**inputs)
49
+ return emb.T
commons.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch.nn import functional as F
4
+
5
+
6
+ def init_weights(m, mean=0.0, std=0.01):
7
+ classname = m.__class__.__name__
8
+ if classname.find("Conv") != -1:
9
+ m.weight.data.normal_(mean, std)
10
+
11
+
12
+ def get_padding(kernel_size, dilation=1):
13
+ return int((kernel_size * dilation - dilation) / 2)
14
+
15
+
16
+ def convert_pad_shape(pad_shape):
17
+ layer = pad_shape[::-1]
18
+ pad_shape = [item for sublist in layer for item in sublist]
19
+ return pad_shape
20
+
21
+
22
+ def intersperse(lst, item):
23
+ result = [item] * (len(lst) * 2 + 1)
24
+ result[1::2] = lst
25
+ return result
26
+
27
+
28
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
29
+ """KL(P||Q)"""
30
+ kl = (logs_q - logs_p) - 0.5
31
+ kl += (
32
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
33
+ )
34
+ return kl
35
+
36
+
37
+ def rand_gumbel(shape):
38
+ """Sample from the Gumbel distribution, protect from overflows."""
39
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
+ return -torch.log(-torch.log(uniform_samples))
41
+
42
+
43
+ def rand_gumbel_like(x):
44
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
+ return g
46
+
47
+
48
+ def slice_segments(x, ids_str, segment_size=4):
49
+ gather_indices = ids_str.view(x.size(0), 1, 1).repeat(
50
+ 1, x.size(1), 1
51
+ ) + torch.arange(segment_size, device=x.device)
52
+ return torch.gather(x, 2, gather_indices)
53
+
54
+
55
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
56
+ b, d, t = x.size()
57
+ if x_lengths is None:
58
+ x_lengths = t
59
+ ids_str_max = torch.clamp(x_lengths - segment_size + 1, min=0)
60
+ ids_str = (torch.rand([b], device=x.device) * ids_str_max).to(dtype=torch.long)
61
+ ret = slice_segments(x, ids_str, segment_size)
62
+ return ret, ids_str
63
+
64
+
65
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
66
+ position = torch.arange(length, dtype=torch.float)
67
+ num_timescales = channels // 2
68
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
69
+ num_timescales - 1
70
+ )
71
+ inv_timescales = min_timescale * torch.exp(
72
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
73
+ )
74
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
75
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
76
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
77
+ signal = signal.view(1, channels, length)
78
+ return signal
79
+
80
+
81
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
82
+ b, channels, length = x.size()
83
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
84
+ return x + signal.to(dtype=x.dtype, device=x.device)
85
+
86
+
87
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
88
+ b, channels, length = x.size()
89
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
90
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
91
+
92
+
93
+ def subsequent_mask(length):
94
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
95
+ return mask
96
+
97
+
98
+ @torch.jit.script
99
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
100
+ n_channels_int = n_channels[0]
101
+ in_act = input_a + input_b
102
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
103
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
104
+ acts = t_act * s_act
105
+ return acts
106
+
107
+
108
+ def convert_pad_shape(pad_shape):
109
+ layer = pad_shape[::-1]
110
+ pad_shape = [item for sublist in layer for item in sublist]
111
+ return pad_shape
112
+
113
+
114
+ def shift_1d(x):
115
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
116
+ return x
117
+
118
+
119
+ def sequence_mask(length, max_length=None):
120
+ if max_length is None:
121
+ max_length = length.max()
122
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
123
+ return x.unsqueeze(0) < length.unsqueeze(1)
124
+
125
+
126
+ def generate_path(duration, mask):
127
+ """
128
+ duration: [b, 1, t_x]
129
+ mask: [b, 1, t_y, t_x]
130
+ """
131
+
132
+ b, _, t_y, t_x = mask.shape
133
+ cum_duration = torch.cumsum(duration, -1)
134
+
135
+ cum_duration_flat = cum_duration.view(b * t_x)
136
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
137
+ path = path.view(b, t_x, t_y)
138
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
139
+ path = path.unsqueeze(1).transpose(2, 3) * mask
140
+ return path
141
+
142
+
143
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
144
+ if isinstance(parameters, torch.Tensor):
145
+ parameters = [parameters]
146
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
147
+ norm_type = float(norm_type)
148
+ if clip_value is not None:
149
+ clip_value = float(clip_value)
150
+
151
+ total_norm = 0
152
+ for p in parameters:
153
+ param_norm = p.grad.data.norm(norm_type)
154
+ total_norm += param_norm.item() ** norm_type
155
+ if clip_value is not None:
156
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
157
+ total_norm = total_norm ** (1.0 / norm_type)
158
+ return total_norm
compress_model.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+ from text.symbols import symbols
3
+ import torch
4
+
5
+ from tools.log import logger
6
+ import utils
7
+ from models import SynthesizerTrn
8
+ import os
9
+
10
+
11
+ def copyStateDict(state_dict):
12
+ if list(state_dict.keys())[0].startswith("module"):
13
+ start_idx = 1
14
+ else:
15
+ start_idx = 0
16
+ new_state_dict = OrderedDict()
17
+ for k, v in state_dict.items():
18
+ name = ",".join(k.split(".")[start_idx:])
19
+ new_state_dict[name] = v
20
+ return new_state_dict
21
+
22
+
23
+ def removeOptimizer(config: str, input_model: str, ishalf: bool, output_model: str):
24
+ hps = utils.get_hparams_from_file(config)
25
+
26
+ net_g = SynthesizerTrn(
27
+ len(symbols),
28
+ hps.data.filter_length // 2 + 1,
29
+ hps.train.segment_size // hps.data.hop_length,
30
+ n_speakers=hps.data.n_speakers,
31
+ **hps.model,
32
+ )
33
+
34
+ optim_g = torch.optim.AdamW(
35
+ net_g.parameters(),
36
+ hps.train.learning_rate,
37
+ betas=hps.train.betas,
38
+ eps=hps.train.eps,
39
+ )
40
+
41
+ state_dict_g = torch.load(input_model, map_location="cpu")
42
+ new_dict_g = copyStateDict(state_dict_g)
43
+ keys = []
44
+ for k, v in new_dict_g["model"].items():
45
+ if "enc_q" in k:
46
+ continue # noqa: E701
47
+ keys.append(k)
48
+
49
+ new_dict_g = (
50
+ {k: new_dict_g["model"][k].half() for k in keys}
51
+ if ishalf
52
+ else {k: new_dict_g["model"][k] for k in keys}
53
+ )
54
+
55
+ torch.save(
56
+ {
57
+ "model": new_dict_g,
58
+ "iteration": 0,
59
+ "optimizer": optim_g.state_dict(),
60
+ "learning_rate": 0.0001,
61
+ },
62
+ output_model,
63
+ )
64
+
65
+
66
+ if __name__ == "__main__":
67
+ import argparse
68
+
69
+ parser = argparse.ArgumentParser()
70
+ parser.add_argument("-c", "--config", type=str, default="configs/config.json")
71
+ parser.add_argument("-i", "--input", type=str)
72
+ parser.add_argument("-o", "--output", type=str, default=None)
73
+ parser.add_argument(
74
+ "-hf", "--half", action="store_true", default=False, help="Save as FP16"
75
+ )
76
+
77
+ args = parser.parse_args()
78
+
79
+ output = args.output
80
+
81
+ if output is None:
82
+ import os.path
83
+
84
+ filename, ext = os.path.splitext(args.input)
85
+ half = "_half" if args.half else ""
86
+ output = filename + "_release" + half + ext
87
+
88
+ removeOptimizer(args.config, args.input, args.half, output)
89
+ logger.info(f"压缩模型成功, 输出模型: {os.path.abspath(output)}")
config.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ @Desc: 全局配置文件读取
3
+ """
4
+ import argparse
5
+ import yaml
6
+ from typing import Dict, List
7
+ import os
8
+ import shutil
9
+ import sys
10
+
11
+
12
+ class Resample_config:
13
+ """重采样配置"""
14
+
15
+ def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):
16
+ self.sampling_rate: int = sampling_rate # 目标采样率
17
+ self.in_dir: str = in_dir # 待处理音频目录路径
18
+ self.out_dir: str = out_dir # 重采样输出路径
19
+
20
+ @classmethod
21
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
22
+ """从字典中生成实例"""
23
+
24
+ # 不检查路径是否有效,此逻辑在resample.py中处理
25
+ data["in_dir"] = os.path.join(dataset_path, data["in_dir"])
26
+ data["out_dir"] = os.path.join(dataset_path, data["out_dir"])
27
+
28
+ return cls(**data)
29
+
30
+
31
+ class Preprocess_text_config:
32
+ """数据预处理配置"""
33
+
34
+ def __init__(
35
+ self,
36
+ transcription_path: str,
37
+ cleaned_path: str,
38
+ train_path: str,
39
+ val_path: str,
40
+ config_path: str,
41
+ val_per_lang: int = 5,
42
+ max_val_total: int = 10000,
43
+ clean: bool = True,
44
+ ):
45
+ self.transcription_path: str = transcription_path # 原始文本文件路径,文本格式应为{wav_path}|{speaker_name}|{language}|{text}。
46
+ self.cleaned_path: str = cleaned_path # 数据清洗后文本路径,可以不填。不填则将在原始文本目录生成
47
+ self.train_path: str = train_path # 训练集路径,可以不填。不填则将在原始文本目录生成
48
+ self.val_path: str = val_path # 验证集路径,可以不填。不填则将在原始文本目录生成
49
+ self.config_path: str = config_path # 配置文件路径
50
+ self.val_per_lang: int = val_per_lang # 每个speaker的验证集条数
51
+ self.max_val_total: int = max_val_total # 验证集最大条数,多于的会被截断并放到训练集中
52
+ self.clean: bool = clean # 是否进行数据清洗
53
+
54
+ @classmethod
55
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
56
+ """从字典中生成实例"""
57
+
58
+ data["transcription_path"] = os.path.join(
59
+ dataset_path, data["transcription_path"]
60
+ )
61
+ if data["cleaned_path"] == "" or data["cleaned_path"] is None:
62
+ data["cleaned_path"] = None
63
+ else:
64
+ data["cleaned_path"] = os.path.join(dataset_path, data["cleaned_path"])
65
+ data["train_path"] = os.path.join(dataset_path, data["train_path"])
66
+ data["val_path"] = os.path.join(dataset_path, data["val_path"])
67
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
68
+
69
+ return cls(**data)
70
+
71
+
72
+ class Bert_gen_config:
73
+ """bert_gen 配置"""
74
+
75
+ def __init__(
76
+ self,
77
+ config_path: str,
78
+ num_processes: int = 2,
79
+ device: str = "cuda",
80
+ use_multi_device: bool = False,
81
+ ):
82
+ self.config_path = config_path
83
+ self.num_processes = num_processes
84
+ self.device = device
85
+ self.use_multi_device = use_multi_device
86
+
87
+ @classmethod
88
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
89
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
90
+
91
+ return cls(**data)
92
+
93
+
94
+ class Emo_gen_config:
95
+ """emo_gen 配置"""
96
+
97
+ def __init__(
98
+ self,
99
+ config_path: str,
100
+ num_processes: int = 2,
101
+ device: str = "cuda",
102
+ use_multi_device: bool = False,
103
+ ):
104
+ self.config_path = config_path
105
+ self.num_processes = num_processes
106
+ self.device = device
107
+ self.use_multi_device = use_multi_device
108
+
109
+ @classmethod
110
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
111
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
112
+
113
+ return cls(**data)
114
+
115
+
116
+ class Train_ms_config:
117
+ """训练配置"""
118
+
119
+ def __init__(
120
+ self,
121
+ config_path: str,
122
+ env: Dict[str, any],
123
+ base: Dict[str, any],
124
+ model: str,
125
+ num_workers: int,
126
+ spec_cache: bool,
127
+ keep_ckpts: int,
128
+ ):
129
+ self.env = env # 需要加载的环境变量
130
+ self.base = base # 底模配置
131
+ self.model = model # 训练模型存储目录,该路径为相对于dataset_path的路径,而非项目根目录
132
+ self.config_path = config_path # 配置文件路径
133
+ self.num_workers = num_workers # worker数量
134
+ self.spec_cache = spec_cache # 是否启用spec缓存
135
+ self.keep_ckpts = keep_ckpts # ckpt数量
136
+
137
+ @classmethod
138
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
139
+ # data["model"] = os.path.join(dataset_path, data["model"])
140
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
141
+
142
+ return cls(**data)
143
+
144
+
145
+ class Webui_config:
146
+ """webui 配置"""
147
+
148
+ def __init__(
149
+ self,
150
+ device: str,
151
+ model: str,
152
+ config_path: str,
153
+ language_identification_library: str,
154
+ port: int = 7860,
155
+ share: bool = False,
156
+ debug: bool = False,
157
+ ):
158
+ self.device: str = device
159
+ self.model: str = model # 端口号
160
+ self.config_path: str = config_path # 是否公开部署,对外网开放
161
+ self.port: int = port # 是否开启debug模式
162
+ self.share: bool = share # 模型路径
163
+ self.debug: bool = debug # 配置文件路径
164
+ self.language_identification_library: str = (
165
+ language_identification_library # 语种识别库
166
+ )
167
+
168
+ @classmethod
169
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
170
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
171
+ data["model"] = os.path.join(dataset_path, data["model"])
172
+ return cls(**data)
173
+
174
+
175
+ class Server_config:
176
+ def __init__(
177
+ self, models: List[Dict[str, any]], port: int = 5000, device: str = "cuda"
178
+ ):
179
+ self.models: List[Dict[str, any]] = models # 需要加载的所有模型的配置
180
+ self.port: int = port # 端口号
181
+ self.device: str = device # 模型默认使用设备
182
+
183
+ @classmethod
184
+ def from_dict(cls, data: Dict[str, any]):
185
+ return cls(**data)
186
+
187
+
188
+ class Translate_config:
189
+ """翻译api配置"""
190
+
191
+ def __init__(self, app_key: str, secret_key: str):
192
+ self.app_key = app_key
193
+ self.secret_key = secret_key
194
+
195
+ @classmethod
196
+ def from_dict(cls, data: Dict[str, any]):
197
+ return cls(**data)
198
+
199
+
200
+ class Config:
201
+ def __init__(self, config_path: str):
202
+ if not os.path.isfile(config_path) and os.path.isfile("default_config.yml"):
203
+ shutil.copy(src="default_config.yml", dst=config_path)
204
+ print(
205
+ f"已根据默认配置文件default_config.yml生成配置文件{config_path}。请按该配置文件的说明进行配置后重新运行。"
206
+ )
207
+ print("如无特殊需求,请勿修改default_config.yml或备份该文件。")
208
+ sys.exit(0)
209
+ with open(file=config_path, mode="r", encoding="utf-8") as file:
210
+ yaml_config: Dict[str, any] = yaml.safe_load(file.read())
211
+ dataset_path: str = yaml_config["dataset_path"]
212
+ openi_token: str = yaml_config["openi_token"]
213
+ self.dataset_path: str = dataset_path
214
+ self.mirror: str = yaml_config["mirror"]
215
+ self.openi_token: str = openi_token
216
+ self.resample_config: Resample_config = Resample_config.from_dict(
217
+ dataset_path, yaml_config["resample"]
218
+ )
219
+ self.preprocess_text_config: Preprocess_text_config = (
220
+ Preprocess_text_config.from_dict(
221
+ dataset_path, yaml_config["preprocess_text"]
222
+ )
223
+ )
224
+ self.bert_gen_config: Bert_gen_config = Bert_gen_config.from_dict(
225
+ dataset_path, yaml_config["bert_gen"]
226
+ )
227
+ self.emo_gen_config: Emo_gen_config = Emo_gen_config.from_dict(
228
+ dataset_path, yaml_config["emo_gen"]
229
+ )
230
+ self.train_ms_config: Train_ms_config = Train_ms_config.from_dict(
231
+ dataset_path, yaml_config["train_ms"]
232
+ )
233
+ self.webui_config: Webui_config = Webui_config.from_dict(
234
+ dataset_path, yaml_config["webui"]
235
+ )
236
+ self.server_config: Server_config = Server_config.from_dict(
237
+ yaml_config["server"]
238
+ )
239
+ self.translate_config: Translate_config = Translate_config.from_dict(
240
+ yaml_config["translate"]
241
+ )
242
+
243
+
244
+ parser = argparse.ArgumentParser()
245
+ # 为避免与以前的config.json起冲突,将其更名如下
246
+ parser.add_argument("-y", "--yml_config", type=str, default="config.yml")
247
+ args, _ = parser.parse_known_args()
248
+ config = Config(args.yml_config)