aoxiang1221 commited on
Commit
7613654
1 Parent(s): 5140b73
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +9 -0
  2. Dockerfile +40 -0
  3. Dockerfile_GPU +40 -0
  4. LICENSE +661 -0
  5. README.md +436 -12
  6. README_zh.md +433 -0
  7. api_test.py +429 -0
  8. app.py +544 -0
  9. bert_vits2/LICENSE +674 -0
  10. bert_vits2/README.md +5 -0
  11. bert_vits2/__init__.py +2 -0
  12. bert_vits2/attentions.py +352 -0
  13. bert_vits2/bert/bert-base-japanese-v3/README.md +53 -0
  14. bert_vits2/bert/bert-base-japanese-v3/config.json +19 -0
  15. bert_vits2/bert/bert-base-japanese-v3/vocab.txt +0 -0
  16. bert_vits2/bert/chinese-roberta-wwm-ext-large/.gitattributes +9 -0
  17. bert_vits2/bert/chinese-roberta-wwm-ext-large/README.md +57 -0
  18. bert_vits2/bert/chinese-roberta-wwm-ext-large/added_tokens.json +1 -0
  19. bert_vits2/bert/chinese-roberta-wwm-ext-large/config.json +28 -0
  20. bert_vits2/bert/chinese-roberta-wwm-ext-large/special_tokens_map.json +1 -0
  21. bert_vits2/bert/chinese-roberta-wwm-ext-large/tokenizer.json +0 -0
  22. bert_vits2/bert/chinese-roberta-wwm-ext-large/tokenizer_config.json +1 -0
  23. bert_vits2/bert/chinese-roberta-wwm-ext-large/vocab.txt +0 -0
  24. bert_vits2/bert_vits2.py +118 -0
  25. bert_vits2/commons.py +161 -0
  26. bert_vits2/models.py +686 -0
  27. bert_vits2/modules.py +459 -0
  28. bert_vits2/requirements.txt +15 -0
  29. bert_vits2/text/__init__.py +25 -0
  30. bert_vits2/text/chinese.py +196 -0
  31. bert_vits2/text/chinese_bert.py +70 -0
  32. bert_vits2/text/cleaner.py +30 -0
  33. bert_vits2/text/cmudict.rep +0 -0
  34. bert_vits2/text/cmudict_cache.pickle +3 -0
  35. bert_vits2/text/english.py +146 -0
  36. bert_vits2/text/english_bert_mock.py +5 -0
  37. bert_vits2/text/japanese.py +585 -0
  38. bert_vits2/text/japanese_bert.py +47 -0
  39. bert_vits2/text/opencpop-strict.txt +429 -0
  40. bert_vits2/text/symbols.py +198 -0
  41. bert_vits2/text/tone_sandhi.py +769 -0
  42. bert_vits2/transforms.py +192 -0
  43. bert_vits2/utils.py +67 -0
  44. config.py +107 -0
  45. docker-compose-gpu.yaml +26 -0
  46. docker-compose.yaml +18 -0
  47. gunicorn_config.py +19 -0
  48. logger.py +42 -0
  49. requirements.txt +50 -0
  50. static/css/bootstrap.min.css +0 -0
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ **/__pycache__
2
+ /Model/
3
+ /logs/
4
+ /cache/
5
+ /upload/
6
+ /vits/text/chinese_dialect_lexicons/
7
+ /bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin
8
+ /bert_vits2/bert/bert-base-japanese-v3/pytorch_model.bin
9
+ /vits/bert/prosody_model.pt
Dockerfile ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10.11-slim-bullseye
2
+
3
+ RUN mkdir -p /app
4
+ WORKDIR /app
5
+
6
+ ENV DEBIAN_FRONTEND=noninteractive
7
+
8
+ RUN apt-get update && \
9
+ apt-get install -yq build-essential espeak-ng cmake wget && \
10
+ apt-get clean && \
11
+ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false && \
12
+ rm -rf /var/lib/apt/lists/*
13
+
14
+ # Install jemalloc
15
+ RUN wget https://github.com/jemalloc/jemalloc/releases/download/5.3.0/jemalloc-5.3.0.tar.bz2 && \
16
+ tar -xvf jemalloc-5.3.0.tar.bz2 && \
17
+ cd jemalloc-5.3.0 && \
18
+ ./configure && \
19
+ make && \
20
+ make install && \
21
+ cd .. && \
22
+ rm -rf jemalloc-5.3.0* && \
23
+ ldconfig
24
+
25
+ ENV LD_PRELOAD=/usr/local/lib/libjemalloc.so
26
+
27
+ RUN pip install torch --index-url https://download.pytorch.org/whl/cpu --no-cache-dir
28
+
29
+ COPY requirements.txt /app/
30
+ RUN pip install --upgrade pip && \
31
+ pip install pyopenjtalk==0.3.2 -i https://pypi.artrajz.cn/simple --no-cache-dir && \
32
+ pip install gunicorn --no-cache-dir && \
33
+ pip install -r requirements.txt --no-cache-dir&& \
34
+ rm -rf /root/.cache/pip/*
35
+
36
+ COPY . /app
37
+
38
+ EXPOSE 23456
39
+
40
+ CMD ["gunicorn", "-c", "gunicorn_config.py", "app:app"]
Dockerfile_GPU ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM cnstark/pytorch:2.0.1-py3.10.11-cuda11.8.0-ubuntu22.04
2
+
3
+ RUN mkdir -p /app
4
+ WORKDIR /app
5
+
6
+ ENV DEBIAN_FRONTEND=noninteractive
7
+
8
+ RUN apt-get update && \
9
+ apt-get install -yq build-essential espeak-ng cmake wget ca-certificates && \
10
+ update-ca-certificates && \
11
+ apt-get clean && \
12
+ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false && \
13
+ rm -rf /var/lib/apt/lists/*
14
+
15
+
16
+ # Install jemalloc
17
+ RUN wget https://github.com/jemalloc/jemalloc/releases/download/5.3.0/jemalloc-5.3.0.tar.bz2 && \
18
+ tar -xvf jemalloc-5.3.0.tar.bz2 && \
19
+ cd jemalloc-5.3.0 && \
20
+ ./configure && \
21
+ make && \
22
+ make install && \
23
+ cd .. && \
24
+ rm -rf jemalloc-5.3.0* && \
25
+ ldconfig
26
+
27
+ ENV LD_PRELOAD=/usr/local/lib/libjemalloc.so
28
+
29
+ COPY requirements.txt /app/
30
+ RUN pip install --upgrade pip && \
31
+ pip install pyopenjtalk==0.3.2 fasttext -i https://pypi.artrajz.cn/simple --no-cache-dir && \
32
+ pip install gunicorn --no-cache-dir && \
33
+ pip install -r requirements.txt --no-cache-dir&& \
34
+ rm -rf /root/.cache/pip/*
35
+
36
+ COPY . /app
37
+
38
+ EXPOSE 23456
39
+
40
+ CMD ["gunicorn", "-c", "gunicorn_config.py", "app:app"]
LICENSE ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
README.md CHANGED
@@ -1,12 +1,436 @@
1
- ---
2
- title: Vits Simple Api
3
- emoji: 📚
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.46.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div class="title" align=center>
2
+ <h1>vits-simple-api</h1>
3
+ <div>Simply call the vits api</div>
4
+ <br/>
5
+ <br/>
6
+ <p>
7
+ <img src="https://img.shields.io/github/license/Artrajz/vits-simple-api">
8
+ <img src="https://img.shields.io/badge/python-3.10-green">
9
+ <a href="https://hub.docker.com/r/artrajz/vits-simple-api">
10
+ <img src="https://img.shields.io/docker/pulls/artrajz/vits-simple-api"></a>
11
+ </p>
12
+ <a href="https://github.com/Artrajz/vits-simple-api/blob/main/README.md">English</a>|<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README_zh.md">中文文档</a>
13
+ <br/>
14
+ </div>
15
+
16
+
17
+
18
+
19
+
20
+ # Feature
21
+
22
+ - [x] VITS text-to-speech, voice conversion
23
+ - [x] HuBert-soft VITS
24
+ - [x] [vits_chinese](https://github.com/PlayVoice/vits_chinese)
25
+ - [x] [Bert-VITS2](https://github.com/Stardust-minus/Bert-VITS2)
26
+ - [x] W2V2 VITS / [emotional-vits](https://github.com/innnky/emotional-vits) dimensional emotion model
27
+ - [x] Support for loading multiple models
28
+ - [x] Automatic language recognition and processing,set the scope of language type recognition according to model's cleaner,support for custom language type range
29
+ - [x] Customize default parameters
30
+ - [x] Long text batch processing
31
+ - [x] GPU accelerated inference
32
+ - [x] SSML (Speech Synthesis Markup Language) work in progress...
33
+
34
+
35
+ ## demo
36
+
37
+ [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Artrajz/vits-simple-api)
38
+
39
+ Please note that different IDs may support different languages.[speakers](https://artrajz-vits-simple-api.hf.space/voice/speakers)
40
+
41
+ - `https://artrajz-vits-simple-api.hf.space/voice/vits?text=你好,こんにちは&id=164`
42
+ - `https://artrajz-vits-simple-api.hf.space/voice/vits?text=Difficult the first time, easy the second.&id=4`
43
+ - excited:`https://artrajz-vits-simple-api.hf.space/voice/w2v2-vits?text=こんにちは&id=3&emotion=111`
44
+ - whispered:`https://artrajz-vits-simple-api.hf.space/w2v2-vits?text=こんにちは&id=3&emotion=2077`
45
+
46
+ https://user-images.githubusercontent.com/73542220/237995061-c1f25b4e-dd86-438a-9363-4bb1fe65b425.mov
47
+
48
+ # Deploy
49
+
50
+ ## Docker(Recommended for Linux)
51
+
52
+ ### Docker image pull script
53
+
54
+ ```
55
+ bash -c "$(wget -O- https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/vits-simple-api-installer-latest.sh)"
56
+ ```
57
+
58
+ - The platforms currently supported by Docker images are `linux/amd64` and `linux/arm64`.(arm64 only has a CPU version)
59
+ - After a successful pull, the vits model needs to be imported before use. Please follow the steps below to import the model.
60
+
61
+ ### Download VITS model
62
+
63
+ Put the model into `/usr/local/vits-simple-api/Model`
64
+
65
+ <details><summary>Folder structure</summary><pre><code>
66
+ │ hubert-soft-0d54a1f4.pt
67
+ │ model.onnx
68
+ │ model.yaml
69
+
70
+ ├─g
71
+ │ config.json
72
+ │ G_953000.pth
73
+
74
+ ├─louise
75
+ │ 360_epochs.pth
76
+ │ config.json
77
+
78
+ ├─Nene_Nanami_Rong_Tang
79
+ │ 1374_epochs.pth
80
+ │ config.json
81
+
82
+ ├─Zero_no_tsukaima
83
+ │ 1158_epochs.pth
84
+ │ config.json
85
+
86
+ └─npy
87
+ 25ecb3f6-f968-11ed-b094-e0d4e84af078.npy
88
+ all_emotions.npy
89
+ </code></pre></details>
90
+
91
+
92
+
93
+
94
+
95
+ ### Modify model path
96
+
97
+ Modify in `/usr/local/vits-simple-api/config.py`
98
+
99
+ <details><summary>config.py</summary><pre><code>
100
+ # Fill in the model path here
101
+ MODEL_LIST = [
102
+ # VITS
103
+ [ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
104
+ [ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
105
+ [ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
106
+ # HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
107
+ [ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
108
+ # W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
109
+ [ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
110
+ ]
111
+ # hubert-vits: hubert soft model
112
+ HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
113
+ # w2v2-vits: Dimensional emotion npy file
114
+ # load single npy: ABS_PATH+"/all_emotions.npy
115
+ # load mutiple npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
116
+ # load mutiple npy from folder: ABS_PATH + "/Model/npy"
117
+ DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
118
+ # w2v2-vits: Need to have both `model.onnx` and `model.yaml` files in the same path.
119
+ DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
120
+ </code></pre></details>
121
+
122
+
123
+
124
+
125
+
126
+ ### Startup
127
+
128
+ `docker compose up -d`
129
+
130
+ Or execute the pull script again
131
+
132
+ ### Image update
133
+
134
+ Run the docker image pull script again
135
+
136
+ ## Virtual environment deployment
137
+
138
+ ### Clone
139
+
140
+ `git clone https://github.com/Artrajz/vits-simple-api.git`
141
+
142
+ ### Download python dependencies
143
+
144
+ A python virtual environment is recommended
145
+
146
+ `pip install -r requirements.txt`
147
+
148
+ Fasttext may not be installed on windows, you can install it with the following command,or download wheels [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#fasttext)
149
+
150
+ ```
151
+ # python3.10 win_amd64
152
+ pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp310-cp310-win_amd64.whl
153
+ ```
154
+
155
+ ### Download VITS model
156
+
157
+ Put the model into `/path/to/vits-simple-api/Model`
158
+
159
+ <details><summary>Folder structure</summary><pre><code>
160
+ │ hubert-soft-0d54a1f4.pt
161
+ │ model.onnx
162
+ │ model.yaml
163
+
164
+ ├─g
165
+ │ config.json
166
+ │ G_953000.pth
167
+
168
+ ├─louise
169
+ │ 360_epochs.pth
170
+ │ config.json
171
+
172
+ ├─Nene_Nanami_Rong_Tang
173
+ │ 1374_epochs.pth
174
+ │ config.json
175
+
176
+ ├─Zero_no_tsukaima
177
+ │ 1158_epochs.pth
178
+ │ config.json
179
+
180
+ └─npy
181
+ 25ecb3f6-f968-11ed-b094-e0d4e84af078.npy
182
+ all_emotions.npy
183
+ </code></pre></details>
184
+
185
+
186
+
187
+ ### Modify model path
188
+
189
+ Modify in `/path/to/vits-simple-api/config.py`
190
+
191
+ <details><summary>config.py</summary><pre><code>
192
+ # Fill in the model path here
193
+ MODEL_LIST = [
194
+ # VITS
195
+ [ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
196
+ [ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
197
+ [ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
198
+ # HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
199
+ [ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
200
+ # W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
201
+ [ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
202
+ ]
203
+ # hubert-vits: hubert soft model
204
+ HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
205
+ # w2v2-vits: Dimensional emotion npy file
206
+ # load single npy: ABS_PATH+"/all_emotions.npy
207
+ # load mutiple npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
208
+ # load mutiple npy from folder: ABS_PATH + "/Model/npy"
209
+ DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
210
+ # w2v2-vits: Need to have both `model.onnx` and `model.yaml` files in the same path.
211
+ DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
212
+ </code></pre></details>
213
+
214
+
215
+
216
+ ### Startup
217
+
218
+ `python app.py`
219
+
220
+ # GPU accelerated
221
+
222
+ ## Windows
223
+ ### Install CUDA
224
+ Check the highest version of CUDA supported by your graphics card:
225
+ ```
226
+ nvidia-smi
227
+ ```
228
+ Taking CUDA 11.7 as an example, download it from the [official website](https://developer.nvidia.com/cuda-11-7-0-download-archive?target_os=Windows&amp;target_arch=x86_64&amp;target_version=10&amp;target_type=exe_local)
229
+ ### Install GPU version of PyTorch
230
+
231
+ 1.13.1+cu117 is recommended, other versions may have memory instability issues.
232
+
233
+ ```
234
+ pip install torch==1.13.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117
235
+ ```
236
+ ## Linux
237
+ The installation process is similar, but I don't have the environment to test it.
238
+
239
+ # Dependency Installation Issues
240
+
241
+ Since pypi.org does not have the `pyopenjtalk` whl file, it usually needs to be installed from the source code. This process might be troublesome for some people. Therefore, you can also use the whl I built for installation.
242
+
243
+ ```
244
+ pip install pyopenjtalk -i https://pypi.artrajz.cn/simple
245
+ ```
246
+
247
+ # API
248
+
249
+ ## GET
250
+
251
+ #### speakers list
252
+
253
+ - GET http://127.0.0.1:23456/voice/speakers
254
+
255
+ Returns the mapping table of role IDs to speaker names.
256
+
257
+ #### voice vits
258
+
259
+ - GET http://127.0.0.1:23456/voice/vits?text=text
260
+
261
+ Default values are used when other parameters are not specified.
262
+
263
+ - GET http://127.0.0.1:23456/voice/vits?text=[ZH]text[ZH][JA]text[JA]&lang=mix
264
+
265
+ When lang=mix, the text needs to be annotated.
266
+
267
+ - GET http://127.0.0.1:23456/voice/vits?text=text&id=142&format=wav&lang=zh&length=1.4
268
+
269
+ The text is "text", the role ID is 142, the audio format is wav, the text language is zh, the speech length is 1.4, and the other parameters are default.
270
+
271
+ #### check
272
+
273
+ - GET http://127.0.0.1:23456/voice/check?id=0&model=vits
274
+
275
+ ## POST
276
+
277
+ - See `api_test.py`
278
+
279
+ ## API KEY
280
+
281
+ Set `API_KEY_ENABLED = True` in `config.py` to enable API key authentication. The API key is `API_KEY = "api-key"`.
282
+ After enabling it, you need to add the `api_key` parameter in GET requests and add the `X-API-KEY` parameter in the header for POST requests.
283
+
284
+ # Parameter
285
+
286
+ ## VITS
287
+
288
+ | Name | Parameter | Is must | Default | Type | Instruction |
289
+ | ---------------------- | --------- | ------- | ---------------- | ----- | ------------------------------------------------------------ |
290
+ | Synthesized text | text | true | | str | Text needed for voice synthesis. |
291
+ | Speaker ID | id | false | From `config.py` | int | The speaker ID. |
292
+ | Audio format | format | false | From `config.py` | str | Support for wav,ogg,silk,mp3,flac |
293
+ | Text language | lang | false | From `config.py` | str | The language of the text to be synthesized. Available options include auto, zh, ja, and mix. When lang=mix, the text should be wrapped in [ZH] or [JA].The default mode is auto, which automatically detects the language of the text |
294
+ | Audio length | length | false | From `config.py` | float | Adjusts the length of the synthesized speech, which is equivalent to adjusting the speed of the speech. The larger the value, the slower the speed. |
295
+ | Noise | noise | false | From `config.py` | float | Sample noise, controlling the randomness of the synthesis. |
296
+ | SDP noise | noisew | false | From `config.py` | float | Stochastic Duration Predictor noise, controlling the length of phoneme pronunciation. |
297
+ | Segmentation threshold | max | false | v | int | Divide the text into paragraphs based on punctuation marks, and combine them into one paragraph when the length exceeds max. If max<=0, the text will not be divided into paragraphs. |
298
+ | Streaming response | streaming | false | false | bool | Streamed synthesized speech with faster initial response. |
299
+
300
+ ## VITS voice conversion
301
+
302
+ | Name | Parameter | Is must | Default | Type | Instruction |
303
+ | -------------- | ----------- | ------- | ------- | ---- | --------------------------------------------------------- |
304
+ | Uploaded Audio | upload | true | | file | The audio file to be uploaded. It should be in wav or ogg |
305
+ | Source Role ID | original_id | true | | int | The ID of the role used to upload the audio file. |
306
+ | Target Role ID | target_id | true | | int | The ID of the target role to convert the audio to. |
307
+
308
+ ## HuBert-VITS
309
+
310
+ | Name | Parameter | Is must | Default | Type | Instruction |
311
+ | ----------------- | --------- | ------- | ------- | ----- | ------------------------------------------------------------ |
312
+ | Uploaded Audio | upload | true | | file | The audio file to be uploaded. It should be in wav or ogg format. |
313
+ | Target speaker ID | id | true | | int | The target speaker ID. |
314
+ | Audio format | format | true | | str | wav,ogg,silk |
315
+ | Audio length | length | true | | float | Adjusts the length of the synthesized speech, which is equivalent to adjusting the speed of the speech. The larger the value, the slower the speed. |
316
+ | Noise | noise | true | | float | Sample noise, controlling the randomness of the synthesis. |
317
+ | sdp noise | noisew | true | | float | Stochastic Duration Predictor noise, controlling the length of phoneme pronunciation. |
318
+
319
+ ## W2V2-VITS
320
+
321
+ | Name | Parameter | Is must | Default | Type | Instruction |
322
+ | ---------------------- | --------- | ------- | ---------------- | ----- | ------------------------------------------------------------ |
323
+ | Synthesized text | text | true | | str | Text needed for voice synthesis. |
324
+ | Speaker ID | id | false | From `config.py` | int | The speaker ID. |
325
+ | Audio format | format | false | From `config.py` | str | Support for wav,ogg,silk,mp3,flac |
326
+ | Text language | lang | false | From `config.py` | str | The language of the text to be synthesized. Available options include auto, zh, ja, and mix. When lang=mix, the text should be wrapped in [ZH] or [JA].The default mode is auto, which automatically detects the language of the text |
327
+ | Audio length | length | false | From `config.py` | float | Adjusts the length of the synthesized speech, which is equivalent to adjusting the speed of the speech. The larger the value, the slower the speed. |
328
+ | Noise | noise | false | From `config.py` | float | Sample noise, controlling the randomness of the synthesis. |
329
+ | SDP noise | noisew | false | From `config.py` | float | Stochastic Duration Predictor noise, controlling the length of phoneme pronunciation. |
330
+ | Segmentation threshold | max | false | From `config.py` | int | Divide the text into paragraphs based on punctuation marks, and combine them into one paragraph when the length exceeds max. If max<=0, the text will not be divided into paragraphs. |
331
+ | Dimensional emotion | emotion | false | 0 | int | The range depends on the emotion reference file in npy format, such as the range of the [innnky](https://huggingface.co/spaces/innnky/nene-emotion/tree/main)'s model all_emotions.npy, which is 0-5457. |
332
+
333
+ ## Dimensional emotion
334
+
335
+ | Name | Parameter | Is must | Default | Type | Instruction |
336
+ | -------------- | --------- | ------- | ------- | ---- | ------------------------------------------------------------ |
337
+ | Uploaded Audio | upload | true | | file | Return the npy file that stores the dimensional emotion vectors. |
338
+
339
+ ## Bert-VITS2
340
+
341
+ | Name | Parameter | Is must | Default | Type | Instruction |
342
+ | ---------------------- | --------- | ------- | ---------------- | ----- | ------------------------------------------------------------ |
343
+ | Synthesized text | text | true | | str | Text needed for voice synthesis. |
344
+ | Speaker ID | id | false | From `config.py` | int | The speaker ID. |
345
+ | Audio format | format | false | From `config.py` | str | Support for wav,ogg,silk,mp3,flac |
346
+ | Text language | lang | false | From `config.py` | str | "Auto" is a mode for automatic language detection and is also the default mode. However, it currently only supports detecting the language of an entire text passage and cannot distinguish languages on a per-sentence basis. The other available language options are "zh" and "ja". |
347
+ | Audio length | length | false | From `config.py` | float | Adjusts the length of the synthesized speech, which is equivalent to adjusting the speed of the speech. The larger the value, the slower the speed. |
348
+ | Noise | noise | false | From `config.py` | float | Sample noise, controlling the randomness of the synthesis. |
349
+ | SDP noise | noisew | false | From `config.py` | float | Stochastic Duration Predictor noise, controlling the length of phoneme pronunciation. |
350
+ | Segmentation threshold | max | false | From `config.py` | int | Divide the text into paragraphs based on punctuation marks, and combine them into one paragraph when the length exceeds max. If max<=0, the text will not be divided into paragraphs. |
351
+ | SDP/DP mix ratio | sdp_ratio | false | From `config.py` | int | The theoretical proportion of SDP during synthesis, the higher the ratio, the larger the variance in synthesized voice tone. |
352
+
353
+ ## SSML (Speech Synthesis Markup Language)
354
+
355
+ Supported Elements and Attributes
356
+
357
+ `speak` Element
358
+
359
+ | Attribute | Instruction | Is must |
360
+ | --------- | ------------------------------------------------------------ | ------- |
361
+ | id | Default value is retrieved from `config.py` | false |
362
+ | lang | Default value is retrieved from `config.py` | false |
363
+ | length | Default value is retrieved from `config.py` | false |
364
+ | noise | Default value is retrieved from `config.py` | false |
365
+ | noisew | Default value is retrieved from `config.py` | false |
366
+ | max | Splits text into segments based on punctuation marks. When the sum of segment lengths exceeds `max`, it is treated as one segment. `max<=0` means no segmentation. The default value is 0. | false |
367
+ | model | Default is `vits`. Options: `w2v2-vits`, `emotion-vits` | false |
368
+ | emotion | Only effective when using `w2v2-vits` or `emotion-vits`. The range depends on the npy emotion reference file. | false |
369
+
370
+ `voice` Element
371
+
372
+ Higher priority than `speak`.
373
+
374
+ | Attribute | Instruction | Is must |
375
+ | --------- | ------------------------------------------------------------ | ------- |
376
+ | id | Default value is retrieved from `config.py` | false |
377
+ | lang | Default value is retrieved from `config.py` | false |
378
+ | length | Default value is retrieved from `config.py` | false |
379
+ | noise | Default value is retrieved from `config.py` | false |
380
+ | noisew | Default value is retrieved from `config.py` | false |
381
+ | max | Splits text into segments based on punctuation marks. When the sum of segment lengths exceeds `max`, it is treated as one segment. `max<=0` means no segmentation. The default value is 0. | false |
382
+ | model | Default is `vits`. Options: `w2v2-vits`, `emotion-vits` | false |
383
+ | emotion | Only effective when using `w2v2-vits` or `emotion-vits` | false |
384
+
385
+ `break` Element
386
+
387
+ | Attribute | Instruction | Is must |
388
+ | --------- | ------------------------------------------------------------ | ------- |
389
+ | strength | x-weak, weak, medium (default), strong, x-strong | false |
390
+ | time | The absolute duration of a pause in seconds (such as `2s`) or milliseconds (such as `500ms`). Valid values range from 0 to 5000 milliseconds. If you set a value greater than the supported maximum, the service will use `5000ms`. If the `time` attribute is set, the `strength` attribute is ignored. | false |
391
+
392
+ | Strength | Relative Duration |
393
+ | :------- | :---------------- |
394
+ | x-weak | 250 ms |
395
+ | weak | 500 ms |
396
+ | medium | 750 ms |
397
+ | strong | 1000 ms |
398
+ | x-strong | 1250 ms |
399
+
400
+ Example
401
+
402
+ ```xml
403
+ <speak lang="zh" format="mp3" length="1.2">
404
+ <voice id="92" >这几天心里颇不宁静。</voice>
405
+ <voice id="125">今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。</voice>
406
+ <voice id="142">月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;</voice>
407
+ <voice id="98">妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。</voice>
408
+ <voice id="120">我悄悄地披了大衫,带上门出去。</voice><break time="2s"/>
409
+ <voice id="121">沿着荷塘,是一条曲折的小煤屑路。</voice>
410
+ <voice id="122">这是一条幽僻的路;白天也少人走,夜晚更加寂寞。</voice>
411
+ <voice id="123">荷塘四面,长着许多树,蓊蓊郁郁的。</voice>
412
+ <voice id="124">路的一旁,是些杨柳,和一些不知道名字的树。</voice>
413
+ <voice id="125">没有月光的晚上,这路上阴森森的,有些怕人。</voice>
414
+ <voice id="126">今晚却很好,虽然月光也还是淡淡的。</voice><break time="2s"/>
415
+ <voice id="127">路上只我一个人,背着手踱着。</voice>
416
+ <voice id="128">这一片天地好像是我的;我也像超出了平常的自己,到了另一个世界里。</voice>
417
+ <voice id="129">我爱热闹,也爱冷静;<break strength="x-weak"/>爱群居,也爱独处。</voice>
418
+ <voice id="130">像今晚上,一个人在这苍茫的月下,什么都可以想,什么都可以不想,便觉是个自由的人。</voice>
419
+ <voice id="131">白天里一定要做的事,一定要说的话,现在都可不理。</voice>
420
+ <voice id="132">这是独处的妙处,我且受用这无边的荷香月色好了。</voice>
421
+ </speak>
422
+ ```
423
+
424
+ # Communication
425
+
426
+ Learning and communication,now there is only Chinese [QQ group](https://qm.qq.com/cgi-bin/qm/qr?k=-1GknIe4uXrkmbDKBGKa1aAUteq40qs_&jump_from=webapi&authKey=x5YYt6Dggs1ZqWxvZqvj3fV8VUnxRyXm5S5Kzntc78+Nv3iXOIawplGip9LWuNR/)
427
+
428
+ # Acknowledgements
429
+
430
+ - vits:https://github.com/jaywalnut310/vits
431
+ - MoeGoe:https://github.com/CjangCjengh/MoeGoe
432
+ - emotional-vits:https://github.com/innnky/emotional-vits
433
+ - vits-uma-genshin-honkai:https://huggingface.co/spaces/zomehwh/vits-uma-genshin-honkai
434
+ - vits_chinese:https://github.com/PlayVoice/vits_chinese
435
+ - Bert_VITS2:https://github.com/fishaudio/Bert-VITS2
436
+
README_zh.md ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div class="title" align=center>
2
+ <h1>vits-simple-api</h1>
3
+ <div>Simply call the vits api</div>
4
+ <br/>
5
+ <br/>
6
+ <p>
7
+ <img src="https://img.shields.io/github/license/Artrajz/vits-simple-api">
8
+ <img src="https://img.shields.io/badge/python-3.10-green">
9
+ <a href="https://hub.docker.com/r/artrajz/vits-simple-api">
10
+ <img src="https://img.shields.io/docker/pulls/artrajz/vits-simple-api"></a>
11
+ </p>
12
+ <a href="https://github.com/Artrajz/vits-simple-api/blob/main/README.md">English</a>|<a href="https://github.com/Artrajz/vits-simple-api/blob/main/README_zh.md">中文文档</a>
13
+ <br/>
14
+ </div>
15
+
16
+
17
+
18
+
19
+
20
+ # Feature
21
+
22
+ - [x] VITS语音合成,语音转换
23
+ - [x] HuBert-soft VITS模型
24
+ - [x] W2V2 VITS / [emotional-vits](https://github.com/innnky/emotional-vits)维度情感模型
25
+ - [x] [vits_chinese](https://github.com/PlayVoice/vits_chinese)
26
+ - [x] [Bert-VITS2](https://github.com/Stardust-minus/Bert-VITS2)
27
+ - [x] 加载多模型
28
+ - [x] 自动识别语言并处理,根据模型的cleaner设置语言类型识别的范围,支持自定义语言类型范围
29
+ - [x] 自定义默认参数
30
+ - [x] 长文本批处理
31
+ - [x] GPU加速推理
32
+ - [x] SSML语音合成标记语言(完善中...)
33
+
34
+
35
+ ## demo
36
+
37
+ [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Artrajz/vits-simple-api)
38
+
39
+ 注意不同的id支持的语言可能有所不同。[speakers](https://artrajz-vits-simple-api.hf.space/voice/speakers)
40
+
41
+
42
+ - `https://artrajz-vits-simple-api.hf.space/voice/vits?text=你好,こんにちは&id=164`
43
+ - `https://artrajz-vits-simple-api.hf.space/voice/vits?text=我觉得1%2B1≠3&id=164&lang=zh`(get中一些字符需要转义不然会被过滤掉)
44
+ - `https://artrajz-vits-simple-api.hf.space/voice/vits?text=Difficult the first time, easy the second.&id=4`
45
+ - 激动:`https://artrajz-vits-simple-api.hf.space/voice/w2v2-vits?text=こんにちは&id=3&emotion=111`
46
+ - 小声:`https://artrajz-vits-simple-api.hf.space/voice/w2v2-vits?text=こんにちは&id=3&emotion=2077`
47
+
48
+ https://user-images.githubusercontent.com/73542220/237995061-c1f25b4e-dd86-438a-9363-4bb1fe65b425.mov
49
+
50
+ # 部署
51
+
52
+ ## Docker部署(Linux推荐)
53
+
54
+ ### 镜像拉取脚本
55
+
56
+ ```
57
+ bash -c "$(wget -O- https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/vits-simple-api-installer-latest.sh)"
58
+ ```
59
+
60
+ - 目前docker镜像支持的平台`linux/amd64,linux/arm64`(arm64仅有CPU版本)
61
+ - 在拉取完成后,需要导入VITS模型才能使用,请根据以下步骤导入模型。
62
+
63
+ ### 下载VITS模型
64
+
65
+ 将模型放入`/usr/local/vits-simple-api/Model`
66
+
67
+ <details><summary>Folder structure</summary><pre><code>
68
+ │ hubert-soft-0d54a1f4.pt
69
+ │ model.onnx
70
+ │ model.yaml
71
+ ├─g
72
+ │ config.json
73
+ │ G_953000.pth
74
+
75
+ ├─louise
76
+ │ 360_epochs.pth
77
+ │ config.json
78
+
79
+ ├─Nene_Nanami_Rong_Tang
80
+ │ 1374_epochs.pth
81
+ │ config.json
82
+
83
+ ├─Zero_no_tsukaima
84
+ │ 1158_epochs.pth
85
+ │ config.json
86
+
87
+ └─npy
88
+ 25ecb3f6-f968-11ed-b094-e0d4e84af078.npy
89
+ all_emotions.npy
90
+ </code></pre></details>
91
+
92
+
93
+
94
+ ### 修改模型路径
95
+
96
+ Modify in `/usr/local/vits-simple-api/config.py`
97
+
98
+ <details><summary>config.py</summary><pre><code>
99
+ # 在此填写模型路径
100
+ MODEL_LIST = [
101
+ # VITS
102
+ [ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
103
+ [ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
104
+ [ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
105
+ # HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
106
+ [ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
107
+ # W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
108
+ [ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
109
+ ]
110
+ # hubert-vits: hubert soft 编码器
111
+ HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
112
+ # w2v2-vits: Dimensional emotion npy file
113
+ # 加载单独的npy: ABS_PATH+"/all_emotions.npy
114
+ # 加载多个npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
115
+ # 从文件夹里加载npy: ABS_PATH + "/Model/npy"
116
+ DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
117
+ # w2v2-vits: 需要在同一路径下有model.onnx和model.yaml
118
+ DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
119
+ </code></pre></details>
120
+
121
+
122
+
123
+ ### 启动
124
+
125
+ `docker compose up -d`
126
+
127
+ 或者重新执行拉取脚本
128
+
129
+ ### 镜像更新
130
+
131
+ 重新执行docker镜像拉取脚本即可
132
+
133
+ ## 虚拟环境部署
134
+
135
+ ### Clone
136
+
137
+ `git clone https://github.com/Artrajz/vits-simple-api.git`
138
+
139
+ ### 下载python依赖
140
+
141
+ 推荐使用python的虚拟环境
142
+
143
+ `pip install -r requirements.txt`
144
+
145
+ windows下可能安装不了fasttext,可以用以下命令安装,附[wheels下载地址](https://www.lfd.uci.edu/~gohlke/pythonlibs/#fasttext)
146
+
147
+ ```
148
+ # python3.10 win_amd64
149
+ pip install https://github.com/Artrajz/archived/raw/main/fasttext/fasttext-0.9.2-cp310-cp310-win_amd64.whl
150
+ ```
151
+
152
+ ### 下载VITS模型
153
+
154
+ 将模型放入 `/path/to/vits-simple-api/Model`
155
+
156
+ <details><summary>文件夹结构</summary><pre><code>
157
+ ├─g
158
+ │ config.json
159
+ │ G_953000.pth
160
+
161
+ ├─louise
162
+ │ 360_epochs.pth
163
+ │ config.json
164
+ │ hubert-soft-0d54a1f4.pt
165
+
166
+ ├─Nene_Nanami_Rong_Tang
167
+ │ 1374_epochs.pth
168
+ │ config.json
169
+
170
+ └─Zero_no_tsukaima
171
+ 1158_epochs.pth
172
+ config.json
173
+ </code></pre></details>
174
+
175
+ ### 修改模型路径
176
+
177
+ 在 `/path/to/vits-simple-api/config.py` 修改
178
+
179
+ <details><summary>config.py</summary><pre><code>
180
+ # 在此填写模型路径
181
+ MODEL_LIST = [
182
+ # VITS
183
+ [ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
184
+ [ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
185
+ [ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
186
+ # HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
187
+ [ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
188
+ # W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
189
+ [ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
190
+ ]
191
+ # hubert-vits: hubert soft 编码器
192
+ HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
193
+ # w2v2-vits: Dimensional emotion npy file
194
+ # 加载单独的npy: ABS_PATH+"/all_emotions.npy
195
+ # 加载多个npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
196
+ # 从文件夹里加载npy: ABS_PATH + "/Model/npy"
197
+ DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
198
+ # w2v2-vits: 需要在同一路径下有model.onnx和model.yaml
199
+ DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
200
+ </code></pre></details>
201
+
202
+
203
+
204
+ ### 启动
205
+
206
+ `python app.py`
207
+
208
+ # GPU 加速
209
+
210
+ ## windows
211
+
212
+ ### 安装CUDA
213
+
214
+ 查看显卡最高支持CUDA的版本
215
+
216
+ ```
217
+ nvidia-smi
218
+ ```
219
+
220
+ 以CUDA11.7为例,[官网](https://developer.nvidia.com/cuda-11-7-0-download-archive?target_os=Windows&target_arch=x86_64&target_version=10&target_type=exe_local)
221
+
222
+ ### 安装GPU版pytorch
223
+
224
+ CUDA11.7对应的pytorch是用这个命令安装,推荐使用1.13.1+cu117,其他版本可能存在内存不稳定的问题。
225
+
226
+ ```
227
+ pip install torch==1.13.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117
228
+ ```
229
+
230
+ ## Linux
231
+
232
+ 安装过程类似,但我没有相应的环境所以没办法测试
233
+
234
+ # 依赖安装问题
235
+
236
+ 由于pypi.org没有pyopenjtalk的whl文件,通常需要从源代码来安装,这一过程对于一些人来说可能比较麻烦,所以你也可以使用我构建的whl来安装。
237
+
238
+ ```
239
+ pip install pyopenjtalk -i https://pypi.artrajz.cn/simple
240
+ ```
241
+
242
+ # API
243
+
244
+ ## GET
245
+
246
+ #### speakers list
247
+
248
+ - GET http://127.0.0.1:23456/voice/speakers
249
+
250
+ 返回id对应角色的映射表
251
+
252
+ #### voice vits
253
+
254
+ - GET http://127.0.0.1:23456/voice/vits?text=text
255
+
256
+ 其他参数不指定时均为默认值
257
+
258
+ - GET http://127.0.0.1:23456/voice/vits?text=[ZH]text[ZH][JA]text[JA]&lang=mix
259
+
260
+ lang=mix时文本要标注
261
+
262
+ - GET http://127.0.0.1:23456/voice/vits?text=text&id=142&format=wav&lang=zh&length=1.4
263
+
264
+ 文本为text,角色id为142,音频格式为wav,文本语言为zh,语音长度为1.4,其余参数默认
265
+
266
+ #### check
267
+
268
+ - GET http://127.0.0.1:23456/voice/check?id=0&model=vits
269
+
270
+ ## POST
271
+
272
+ - 见`api_test.py`
273
+
274
+
275
+
276
+ ## API KEY
277
+
278
+ 在config.py中设置`API_KEY_ENABLED = True`以启用,api key填写:`API_KEY = "api-key"`。
279
+
280
+ 启用后,GET请求中使用需要增加参数api_key,POST请求中使用需要在header中添加参数`X-API-KEY`。
281
+
282
+ # Parameter
283
+
284
+ ## VITS语音合成
285
+
286
+ | Name | Parameter | Is must | Default | Type | Instruction |
287
+ | ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
288
+ | 合成文本 | text | true | | str | 需要合成语音的文本。 |
289
+ | 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
290
+ | 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
291
+ | 文本语言 | lang | false | 从`config.py`中获取 | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
292
+ | 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢。 |
293
+ | 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
294
+ | sdp噪声 | noisew | false | 从`config.py`中获��� | float | 随机时长预测器噪声,控制音素发音长度。 |
295
+ | 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
296
+ | 流式响应 | streaming | false | false | bool | 流式合成语音,更快的首包响应。 |
297
+
298
+ ## VITS 语音转换
299
+
300
+ | Name | Parameter | Is must | Default | Type | Instruction |
301
+ | ---------- | ----------- | ------- | ------- | ---- | ---------------------- |
302
+ | 上传音频 | upload | true | | file | wav or ogg |
303
+ | 源角色id | original_id | true | | int | 上传文件所使用的角色id |
304
+ | 目标角色id | target_id | true | | int | 要转换的目标角色id |
305
+
306
+ ## HuBert-VITS 语音转换
307
+
308
+ | Name | Parameter | Is must | Default | Type | Instruction |
309
+ | ------------- | --------- | ------- | ------- | ----- | ------------------------------------------------ |
310
+ | 上传音频 | upload | true | | file | 需要转换说话人的音频文件。 |
311
+ | 目标角色id | id | true | | int | 目标说话人id。 |
312
+ | 音频格式 | format | true | | str | wav,ogg,silk |
313
+ | 语音长度/语速 | length | true | | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
314
+ | 噪声 | noise | true | | float | 样本噪声,控制合成的随机性。 |
315
+ | sdp噪声 | noisew | true | | float | 随机时长预测器噪声,控制音素发音长度。 |
316
+
317
+ ## W2V2-VITS
318
+
319
+ | Name | Parameter | Is must | Default | Type | Instruction |
320
+ | ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
321
+ | 合成文本 | text | true | | str | 需要合成语音的文本。 |
322
+ | 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
323
+ | 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
324
+ | 文本语言 | lang | false | 从`config.py`中获取 | str | auto为自动识别语言模式,也是默认模式。lang=mix时,文本应该用[ZH] 或 [JA] 包裹。方言无法自动识别。 |
325
+ | 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该数值越大语速越慢 |
326
+ | 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
327
+ | sdp噪声 | noisew | false | 从`config.py`中获取 | float | 随机时长预测器噪声,控制音素发音长度。 |
328
+ | 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
329
+ | 维度情感 | emotion | false | 0 | int | 范围取决于npy情感参考文件,如[innnky](https://huggingface.co/spaces/innnky/nene-emotion/tree/main)的all_emotions.npy模型范围是0-5457 |
330
+
331
+ ## Dimensional emotion
332
+
333
+ | Name | Parameter | Is must | Default | Type | Instruction |
334
+ | -------- | --------- | ------- | ------- | ---- | ----------------------------- |
335
+ | 上传音频 | upload | true | | file | 返回存储维度情感向量的npy文件 |
336
+
337
+ ## Bert-VITS2语音合成
338
+
339
+ | Name | Parameter | Is must | Default | Type | Instruction |
340
+ | ------------- | --------- | ------- | ------------------- | ----- | ------------------------------------------------------------ |
341
+ | 合成文本 | text | true | | str | 需要合成语音的文本。 |
342
+ | 角色id | id | false | 从`config.py`中获取 | int | 即说话人id。 |
343
+ | 音频格式 | format | false | 从`config.py`中获取 | str | 支持wav,ogg,silk,mp3,flac |
344
+ | 文本语言 | lang | false | 从`config.py`中获取 | str | auto为自动识别语言模式,也是默认模式,但目前只支持识别整段文本的语言,无法细分到每个句子。其余可选语言zh和ja。 |
345
+ | 语音长度/语速 | length | false | 从`config.py`中获取 | float | 调节语音长度,相当于调节语速,该���值越大语速越慢。 |
346
+ | 噪声 | noise | false | 从`config.py`中获取 | float | 样本噪声,控制合成的随机性。 |
347
+ | sdp噪声 | noisew | false | 从`config.py`中获取 | float | 随机时长预测器噪声,控制音素发音长度。 |
348
+ | 分段阈值 | max | false | 从`config.py`中获取 | int | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。 |
349
+ | SDP/DP混合比 | sdp_ratio | false | 从`config.py`中获取 | int | SDP在合成时的占比,理论上此比率越高,合成的语音语调方差越大。 |
350
+
351
+ ## SSML语音合成标记语言
352
+ 目前支持的元素与属性
353
+
354
+ `speak`元素
355
+
356
+ | Attribute | Description | Is must |
357
+ | --------- | ------------------------------------------------------------ | ------- |
358
+ | id | 默认值从`config.py`中读取 | false |
359
+ | lang | 默认值从`config.py`中读取 | false |
360
+ | length | 默认值从`config.py`中读取 | false |
361
+ | noise | 默认值从`config.py`中读取 | false |
362
+ | noisew | 默认值从`config.py`中读取 | false |
363
+ | max | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段,这里默认为0。 | false |
364
+ | model | 默认为vits,可选`w2v2-vits`,`emotion-vits` | false |
365
+ | emotion | 只有用`w2v2-vits`或`emotion-vits`时`emotion`才生效,范围取决于npy情感参考文件 | false |
366
+
367
+ `voice`元素
368
+
369
+ 优先级大于`speak`
370
+
371
+ | Attribute | Description | Is must |
372
+ | --------- | ------------------------------------------------------------ | ------- |
373
+ | id | 默认值从`config.py`中读取 | false |
374
+ | lang | 默认值从`config.py`中读取 | false |
375
+ | length | 默认值从`config.py`中读取 | false |
376
+ | noise | 默认值从`config.py`中读取 | false |
377
+ | noisew | 默认值从`config.py`中读取 | false |
378
+ | max | 按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段,这里默认为0。 | false |
379
+ | model | 默认为vits,可选`w2v2-vits`,`emotion-vits` | false |
380
+ | emotion | 只有用`w2v2-vits`或`emotion-vits`时`emotion`才会生效 | false |
381
+
382
+ `break`元素
383
+
384
+ | Attribute | Description | Is must |
385
+ | --------- | ------------------------------------------------------------ | ------- |
386
+ | strength | x-weak,weak,medium(默认值),strong,x-strong | false |
387
+ | time | 暂停的绝对持续时间,以秒为单位(例如 `2s`)或以毫秒为单位(例如 `500ms`)。 有效值的范围为 0 到 5000 毫秒。 如果设置的值大于支持的最大值,则服务将使用 `5000ms`。 如果设置了 `time` 属性,则会忽略 `strength` 属性。 | false |
388
+
389
+ | Strength | Relative Duration |
390
+ | :------- | :---------------- |
391
+ | x-weak | 250 毫秒 |
392
+ | weak | 500 毫秒 |
393
+ | Medium | 750 毫秒 |
394
+ | Strong | 1000 毫秒 |
395
+ | x-strong | 1250 毫秒 |
396
+
397
+ 示例
398
+
399
+ ```xml
400
+ <speak lang="zh" format="mp3" length="1.2">
401
+ <voice id="92" >这几天心里颇不宁静。</voice>
402
+ <voice id="125">今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。</voice>
403
+ <voice id="142">月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;</voice>
404
+ <voice id="98">妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。</voice>
405
+ <voice id="120">我悄悄地披了大衫,带上门出去。</voice><break time="2s"/>
406
+ <voice id="121">沿着荷塘,是一条曲折的小煤屑路。</voice>
407
+ <voice id="122">这是一条幽僻的路;白天也少人走,夜晚更加寂寞。</voice>
408
+ <voice id="123">荷塘四面,长着许多树,蓊蓊郁郁的。</voice>
409
+ <voice id="124">路的一旁,是些杨柳,和一些不知道名字的树。</voice>
410
+ <voice id="125">没有月光的晚上,这路上阴森森的,有些怕人。</voice>
411
+ <voice id="126">今晚却很好,虽然月光也还是淡淡的。</voice><break time="2s"/>
412
+ <voice id="127">路上只我一个人,背着手踱着。</voice>
413
+ <voice id="128">这一片天地好像是我的;我也像超出了平常的自己,到了另一个世界里。</voice>
414
+ <voice id="129">我爱热闹,也爱冷静;<break strength="x-weak"/>爱群居,也爱独处。</voice>
415
+ <voice id="130">像今晚上,一个人在这苍茫的月下,什么都可以想,什么都可以不想,便觉是个自由的人。</voice>
416
+ <voice id="131">白天里一定要做的事,一定要说的话,现在都可不理。</voice>
417
+ <voice id="132">这是独处的妙处,我且受用这无边的荷香月色好了。</voice>
418
+ </speak>
419
+ ```
420
+
421
+ # 交流平台
422
+
423
+ 现在只有 [Q群](https://qm.qq.com/cgi-bin/qm/qr?k=-1GknIe4uXrkmbDKBGKa1aAUteq40qs_&jump_from=webapi&authKey=x5YYt6Dggs1ZqWxvZqvj3fV8VUnxRyXm5S5Kzntc78+Nv3iXOIawplGip9LWuNR/)
424
+
425
+ # 鸣谢
426
+
427
+ - vits:https://github.com/jaywalnut310/vits
428
+ - MoeGoe:https://github.com/CjangCjengh/MoeGoe
429
+ - emotional-vits:https://github.com/innnky/emotional-vits
430
+ - vits-uma-genshin-honkai:https://huggingface.co/spaces/zomehwh/vits-uma-genshin-honkai
431
+ - vits_chinese:https://github.com/PlayVoice/vits_chinese
432
+ - Bert_VITS2:https://github.com/fishaudio/Bert-VITS2
433
+
api_test.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ import requests
4
+ import os
5
+ import time
6
+ import random
7
+ import string
8
+ from requests_toolbelt.multipart.encoder import MultipartEncoder
9
+
10
+ absolute_path = os.path.dirname(__file__)
11
+ base_url = "http://127.0.0.1:23456"
12
+
13
+
14
+ # 映射表
15
+ def voice_speakers():
16
+ url = f"{base_url}/voice/speakers"
17
+
18
+ res = requests.post(url=url)
19
+ json = res.json()
20
+ for i in json:
21
+ print(i)
22
+ for j in json[i]:
23
+ print(j)
24
+ return json
25
+
26
+
27
+ # 语音合成 voice vits
28
+ def voice_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, save_audio=True,
29
+ save_path=None):
30
+ fields = {
31
+ "text": text,
32
+ "id": str(id),
33
+ "format": format,
34
+ "lang": lang,
35
+ "length": str(length),
36
+ "noise": str(noise),
37
+ "noisew": str(noisew),
38
+ "max": str(max)
39
+ }
40
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
41
+
42
+ m = MultipartEncoder(fields=fields, boundary=boundary)
43
+ headers = {"Content-Type": m.content_type}
44
+ url = f"{base_url}/voice/vits"
45
+
46
+ res = requests.post(url=url, data=m, headers=headers)
47
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
48
+ if save_path is not None:
49
+ path = os.path.join(save_path, fname)
50
+ else:
51
+ path = os.path.join(absolute_path, fname)
52
+ if save_audio:
53
+ with open(path, "wb") as f:
54
+ f.write(res.content)
55
+ print(path)
56
+ return path
57
+ return None
58
+
59
+
60
+ def voice_vits_streaming(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50,
61
+ save_audio=True, save_path=None):
62
+ fields = {
63
+ "text": text,
64
+ "id": str(id),
65
+ "format": format,
66
+ "lang": lang,
67
+ "length": str(length),
68
+ "noise": str(noise),
69
+ "noisew": str(noisew),
70
+ "max": str(max),
71
+ "streaming": 'True'
72
+ }
73
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
74
+
75
+ m = MultipartEncoder(fields=fields, boundary=boundary)
76
+ headers = {"Content-Type": m.content_type}
77
+ url = f"{base_url}/voice"
78
+
79
+ res = requests.post(url=url, data=m, headers=headers)
80
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
81
+ if save_path is not None:
82
+ path = os.path.join(save_path, fname)
83
+ else:
84
+ path = os.path.join(absolute_path, fname)
85
+ if save_audio:
86
+ with open(path, "wb") as f:
87
+ f.write(res.content)
88
+ print(path)
89
+ return path
90
+ return None
91
+
92
+
93
+ def voice_vits_streaming(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50,
94
+ save_path=None):
95
+ fields = {
96
+ "text": text,
97
+ "id": str(id),
98
+ "format": format,
99
+ "lang": lang,
100
+ "length": str(length),
101
+ "noise": str(noise),
102
+ "noisew": str(noisew),
103
+ "max": str(max),
104
+ "streaming": 'True'
105
+ }
106
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
107
+
108
+ m = MultipartEncoder(fields=fields, boundary=boundary)
109
+ headers = {"Content-Type": m.content_type}
110
+ url = f"{base_url}/voice"
111
+
112
+ res = requests.post(url=url, data=m, headers=headers, stream=True)
113
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
114
+ if save_path is not None:
115
+ path = os.path.join(save_path, fname)
116
+ else:
117
+ path = os.path.join(absolute_path, fname)
118
+ audio = res.content
119
+
120
+ def get_file_size_from_bytes(byte_data):
121
+ file_size_offset = 4
122
+ file_size_length = 4
123
+
124
+ try:
125
+ file_size_bytes = byte_data[file_size_offset:file_size_offset + file_size_length]
126
+ file_size = int.from_bytes(file_size_bytes, byteorder='little')
127
+ return file_size + 8
128
+ except IndexError:
129
+ return None
130
+
131
+ audio = None
132
+ p = 0
133
+ audio_size = None
134
+ audios = []
135
+
136
+ for chunk in res.iter_content(chunk_size=1024):
137
+ if audio is None:
138
+ audio = chunk
139
+ else:
140
+ audio += chunk
141
+
142
+ p += len(chunk)
143
+ if audio_size is not None:
144
+ if p >= audio_size:
145
+ p = p - audio_size
146
+ audios.append(audio[:audio_size])
147
+ audio = audio[audio_size:]
148
+ audio_size = get_file_size_from_bytes(audio)
149
+ else:
150
+ audio_size = get_file_size_from_bytes(audio)
151
+ for i, audio in enumerate(audios):
152
+ with open(f"{path[:-4]}-{i}.wav", "wb") as f:
153
+ f.write(audio)
154
+
155
+ print(f"{path[:-4]}-{i}.wav")
156
+ return path
157
+
158
+
159
+ # 语音转换 hubert-vits
160
+ def voice_hubert_vits(upload_path, id, format="wav", length=1, noise=0.667, noisew=0.8, save_audio=True,
161
+ save_path=None):
162
+ upload_name = os.path.basename(upload_path)
163
+ upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
164
+
165
+ with open(upload_path, 'rb') as upload_file:
166
+ fields = {
167
+ "upload": (upload_name, upload_file, upload_type),
168
+ "id": str(id),
169
+ "format": format,
170
+ "length": str(length),
171
+ "noise": str(noise),
172
+ "noisew": str(noisew),
173
+ }
174
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
175
+
176
+ m = MultipartEncoder(fields=fields, boundary=boundary)
177
+ headers = {"Content-Type": m.content_type}
178
+ url = f"{base_url}/voice/hubert-vits"
179
+
180
+ res = requests.post(url=url, data=m, headers=headers)
181
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
182
+ if save_path is not None:
183
+ path = os.path.join(save_path, fname)
184
+ else:
185
+ path = os.path.join(absolute_path, fname)
186
+ if save_audio:
187
+ with open(path, "wb") as f:
188
+ f.write(res.content)
189
+ print(path)
190
+ return path
191
+ return None
192
+
193
+
194
+ # 维度情感模型 w2v2-vits
195
+ def voice_w2v2_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, emotion=0,
196
+ save_audio=True, save_path=None):
197
+ fields = {
198
+ "text": text,
199
+ "id": str(id),
200
+ "format": format,
201
+ "lang": lang,
202
+ "length": str(length),
203
+ "noise": str(noise),
204
+ "noisew": str(noisew),
205
+ "max": str(max),
206
+ "emotion": str(emotion)
207
+ }
208
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
209
+
210
+ m = MultipartEncoder(fields=fields, boundary=boundary)
211
+ headers = {"Content-Type": m.content_type}
212
+ url = f"{base_url}/voice/w2v2-vits"
213
+
214
+ res = requests.post(url=url, data=m, headers=headers)
215
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
216
+ if save_path is not None:
217
+ path = os.path.join(save_path, fname)
218
+ else:
219
+ path = os.path.join(absolute_path, fname)
220
+ if save_audio:
221
+ with open(path, "wb") as f:
222
+ f.write(res.content)
223
+ print(path)
224
+ return path
225
+ return None
226
+
227
+
228
+ # 语音转换 同VITS模型内角色之间的音色转换
229
+ def voice_conversion(upload_path, original_id, target_id, save_audio=True, save_path=None):
230
+ upload_name = os.path.basename(upload_path)
231
+ upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
232
+
233
+ with open(upload_path, 'rb') as upload_file:
234
+ fields = {
235
+ "upload": (upload_name, upload_file, upload_type),
236
+ "original_id": str(original_id),
237
+ "target_id": str(target_id),
238
+ }
239
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
240
+ m = MultipartEncoder(fields=fields, boundary=boundary)
241
+
242
+ headers = {"Content-Type": m.content_type}
243
+ url = f"{base_url}/voice/conversion"
244
+
245
+ res = requests.post(url=url, data=m, headers=headers)
246
+
247
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
248
+ if save_path is not None:
249
+ path = os.path.join(save_path, fname)
250
+ else:
251
+ path = os.path.join(absolute_path, fname)
252
+
253
+ if save_audio:
254
+ with open(path, "wb") as f:
255
+ f.write(res.content)
256
+ print(path)
257
+ return path
258
+ return None
259
+
260
+
261
+ def voice_ssml(ssml, save_audio=True, save_path=None):
262
+ fields = {
263
+ "ssml": ssml,
264
+ }
265
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
266
+
267
+ m = MultipartEncoder(fields=fields, boundary=boundary)
268
+ headers = {"Content-Type": m.content_type}
269
+ url = f"{base_url}/voice/ssml"
270
+
271
+ res = requests.post(url=url, data=m, headers=headers)
272
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
273
+ if save_path is not None:
274
+ path = os.path.join(save_path, fname)
275
+ else:
276
+ path = os.path.join(absolute_path, fname)
277
+
278
+ if save_audio:
279
+ with open(path, "wb") as f:
280
+ f.write(res.content)
281
+ print(path)
282
+ return path
283
+ return None
284
+
285
+
286
+ def voice_dimensional_emotion(upload_path, save_audio=True,
287
+ save_path=None):
288
+ upload_name = os.path.basename(upload_path)
289
+ upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
290
+
291
+ with open(upload_path, 'rb') as upload_file:
292
+ fields = {
293
+ "upload": (upload_name, upload_file, upload_type),
294
+ }
295
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
296
+
297
+ m = MultipartEncoder(fields=fields, boundary=boundary)
298
+ headers = {"Content-Type": m.content_type}
299
+ url = f"{base_url}/voice/dimension-emotion"
300
+
301
+ res = requests.post(url=url, data=m, headers=headers)
302
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
303
+ if save_path is not None:
304
+ path = os.path.join(save_path, fname)
305
+ else:
306
+ path = os.path.join(absolute_path, fname)
307
+ if save_audio:
308
+ with open(path, "wb") as f:
309
+ f.write(res.content)
310
+ print(path)
311
+ return path
312
+ return None
313
+
314
+
315
+ def vits_json(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50,
316
+ save_path=None):
317
+ fields = {
318
+ "text": text,
319
+ "id": str(id),
320
+ "format": format,
321
+ "lang": lang,
322
+ "length": str(length),
323
+ "noise": str(noise),
324
+ "noisew": str(noisew),
325
+ "max": str(max)
326
+ }
327
+ f = json.dumps(fields)
328
+ url = f"{base_url}/voice"
329
+ header = {"Content-Type": 'application/json'}
330
+ res = requests.post(url=url, data=f, headers=header)
331
+
332
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
333
+ if save_path is not None:
334
+ path = os.path.join(save_path, fname)
335
+ else:
336
+ path = os.path.join(absolute_path, fname)
337
+
338
+ with open(path, "wb") as f:
339
+ f.write(res.content)
340
+ print(path)
341
+ return path
342
+
343
+
344
+ # Bert_vits2
345
+ def voice_bert_vits2(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, sdp_ratio=0.2,
346
+ save_audio=True, save_path=None):
347
+ fields = {
348
+ "text": text,
349
+ "id": str(id),
350
+ "format": format,
351
+ "lang": lang,
352
+ "length": str(length),
353
+ "noise": str(noise),
354
+ "noisew": str(noisew),
355
+ "max": str(max),
356
+ "sdp_ratio": str(sdp_ratio)
357
+ }
358
+ boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
359
+
360
+ m = MultipartEncoder(fields=fields, boundary=boundary)
361
+ headers = {"Content-Type": m.content_type}
362
+ url = f"{base_url}/voice/bert-vits2"
363
+
364
+ res = requests.post(url=url, data=m, headers=headers)
365
+ fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
366
+ if save_path is not None:
367
+ path = os.path.join(save_path, fname)
368
+ else:
369
+ path = os.path.join(absolute_path, fname)
370
+ if save_audio:
371
+ with open(path, "wb") as f:
372
+ f.write(res.content)
373
+ print(path)
374
+ return path
375
+ return None
376
+
377
+
378
+ def test_interface(text):
379
+ error_num = 0
380
+ for i in range(100):
381
+ try:
382
+ time.sleep(1)
383
+ t1 = time.time()
384
+ voice_vits(text, format="wav", lang="zh", save_audio=False)
385
+ t2 = time.time()
386
+ print(f"{i}:len:{len(text)}耗时:{t2 - t1}")
387
+ except Exception as e:
388
+ error_num += 1
389
+ print(e)
390
+ print(f"error_num={error_num}")
391
+
392
+
393
+ if __name__ == '__main__':
394
+ text = "你好,こんにちは"
395
+
396
+ ssml = """
397
+ <speak lang="zh" format="mp3" length="1.2">
398
+ <voice id="92" >这几天心里颇不宁静。</voice>
399
+ <voice id="125">今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。</voice>
400
+ <voice id="142">月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;</voice>
401
+ <voice id="98">妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。</voice>
402
+ <voice id="120">我悄悄地披了大衫,带上门出去。</voice><break time="2s"/>
403
+ <voice id="121">沿着荷塘,是一条曲折的小煤屑路。</voice>
404
+ <voice id="122">这是一条幽僻的路;白天也少人走,夜晚更加寂寞。</voice>
405
+ <voice id="123">荷塘四面,长着许多树,蓊蓊郁郁的。</voice>
406
+ <voice id="124">路的一旁,是些杨柳,和一些不知道名字的树。</voice>
407
+ <voice id="125">没有月光的晚上,这路上阴森森的,有些怕人。</voice>
408
+ <voice id="126">今晚却很好,虽然月光也还是淡淡的。</voice><break time="2s"/>
409
+ <voice id="127">路上只我一个人,背着手踱着。</voice>
410
+ <voice id="128">这一片天地好像是我的;我也像超出了平常的自己,到了另一个世界里。</voice>
411
+ <voice id="129">我爱热闹,也爱冷静;<break strength="x-weak"/>爱群居,也爱独处。</voice>
412
+ <voice id="130">像今晚上,一个人在这苍茫的月下,什么都可以想,什么都可以不想,便觉是个自由的人。</voice>
413
+ <voice id="131">白天里一定要做的事,一定要说的话,现在都可不理。</voice>
414
+ <voice id="132">这是独处的妙处,我且受用这无边的荷香月色好了。</voice>
415
+ </speak>
416
+ """
417
+
418
+ from config import CACHE_PATH
419
+
420
+ path = voice_vits(text, save_path=CACHE_PATH)
421
+ voice_vits_streaming(text, save_path=CACHE_PATH)
422
+ voice_w2v2_vits(text, save_path=CACHE_PATH)
423
+ voice_conversion(path, 1, 3, save_path=CACHE_PATH)
424
+ voice_hubert_vits(path, 0, save_path=CACHE_PATH)
425
+ voice_dimensional_emotion(path, save_path=CACHE_PATH)
426
+ voice_ssml(ssml, save_path=CACHE_PATH)
427
+ voice_bert_vits2("你好",lang="zh", save_path=CACHE_PATH)
428
+ voice_bert_vits2("こんにちは", lang="ja", save_path=CACHE_PATH)
429
+ # os.system(path)
app.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import uuid
4
+ from logger import logger
5
+ from flask import Flask, request, send_file, jsonify, make_response, render_template
6
+ from werkzeug.utils import secure_filename
7
+ from flask_apscheduler import APScheduler
8
+ from functools import wraps
9
+ from utils.data_utils import save_audio, clean_folder, check_is_none
10
+ from utils.load_model import load_model
11
+ from io import BytesIO
12
+
13
+ app = Flask(__name__)
14
+ app.config.from_pyfile("config.py")
15
+
16
+ scheduler = APScheduler()
17
+ scheduler.init_app(app)
18
+ if app.config.get("CLEAN_INTERVAL_SECONDS", 3600) > 0:
19
+ scheduler.start()
20
+
21
+ for path in (app.config['LOGS_PATH'], app.config['UPLOAD_FOLDER'], app.config['CACHE_PATH']):
22
+ try:
23
+ os.makedirs(path, exist_ok=True)
24
+ except Exception as e:
25
+ logger.error(f"Unable to create directory {path}: {str(e)}")
26
+
27
+ # load model
28
+ tts = load_model(app.config["MODEL_LIST"])
29
+
30
+
31
+ def require_api_key(func):
32
+ @wraps(func)
33
+ def check_api_key(*args, **kwargs):
34
+ if not app.config.get('API_KEY_ENABLED', False):
35
+ return func(*args, **kwargs)
36
+ else:
37
+ api_key = request.args.get('api_key') or request.headers.get('X-API-KEY')
38
+ if api_key and api_key == app.config['API_KEY']:
39
+ return func(*args, **kwargs)
40
+ else:
41
+ return make_response(jsonify({"status": "error", "message": "Invalid API Key"}), 401)
42
+
43
+ return check_api_key
44
+
45
+
46
+ @app.route('/', methods=["GET", "POST"])
47
+ def index():
48
+ kwargs = {
49
+ "speakers": tts.voice_speakers,
50
+ "speakers_count": tts.speakers_count,
51
+ "vits_speakers_count": tts.vits_speakers_count,
52
+ "w2v2_speakers_count": tts.w2v2_speakers_count,
53
+ "w2v2_emotion_count": tts.w2v2_emotion_count,
54
+ "bert_vits2_speakers_count": tts.bert_vits2_speakers_count
55
+ }
56
+ return render_template("index.html", **kwargs)
57
+
58
+
59
+ @app.route('/voice/speakers', methods=["GET", "POST"])
60
+ def voice_speakers_api():
61
+ return jsonify(tts.voice_speakers)
62
+
63
+
64
+ @app.route('/voice', methods=["GET", "POST"])
65
+ @app.route('/voice/vits', methods=["GET", "POST"])
66
+ @require_api_key
67
+ def voice_vits_api():
68
+ try:
69
+ if request.method == "GET":
70
+ request_data = request.args
71
+ elif request.method == "POST":
72
+ content_type = request.headers.get('Content-Type')
73
+ if content_type == 'application/json':
74
+ request_data = request.get_json()
75
+ else:
76
+ request_data = request.form
77
+
78
+ text = request_data.get("text", "")
79
+ id = int(request_data.get("id", app.config.get("ID", 0)))
80
+ format = request_data.get("format", app.config.get("FORMAT", "wav"))
81
+ lang = request_data.get("lang", app.config.get("LANG", "auto")).lower()
82
+ length = float(request_data.get("length", app.config.get("LENGTH", 1)))
83
+ noise = float(request_data.get("noise", app.config.get("NOISE", 0.667)))
84
+ noisew = float(request_data.get("noisew", app.config.get("NOISEW", 0.8)))
85
+ max = int(request_data.get("max", app.config.get("MAX", 50)))
86
+ use_streaming = request_data.get('streaming', False, type=bool)
87
+ except Exception as e:
88
+ logger.error(f"[VITS] {e}")
89
+ return make_response("parameter error", 400)
90
+
91
+ logger.info(f"[VITS] id:{id} format:{format} lang:{lang} length:{length} noise:{noise} noisew:{noisew}")
92
+ logger.info(f"[VITS] len:{len(text)} text:{text}")
93
+
94
+ if check_is_none(text):
95
+ logger.info(f"[VITS] text is empty")
96
+ return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
97
+
98
+ if check_is_none(id):
99
+ logger.info(f"[VITS] speaker id is empty")
100
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
101
+
102
+ if id < 0 or id >= tts.vits_speakers_count:
103
+ logger.info(f"[VITS] speaker id {id} does not exist")
104
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
105
+
106
+ # 校验模型是否支持输入的语言
107
+ speaker_lang = tts.voice_speakers["VITS"][id].get('lang')
108
+ if lang not in ["auto", "mix"] and len(speaker_lang) != 1 and lang not in speaker_lang:
109
+ logger.info(f"[VITS] lang \"{lang}\" is not in {speaker_lang}")
110
+ return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
111
+
112
+ # 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
113
+ if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
114
+ speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
115
+
116
+ if use_streaming and format.upper() != "MP3":
117
+ format = "mp3"
118
+ logger.warning("Streaming response only supports MP3 format.")
119
+
120
+ fname = f"{str(uuid.uuid1())}.{format}"
121
+ file_type = f"audio/{format}"
122
+ task = {"text": text,
123
+ "id": id,
124
+ "format": format,
125
+ "length": length,
126
+ "noise": noise,
127
+ "noisew": noisew,
128
+ "max": max,
129
+ "lang": lang,
130
+ "speaker_lang": speaker_lang}
131
+
132
+ if use_streaming:
133
+ audio = tts.stream_vits_infer(task)
134
+ response = make_response(audio)
135
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
136
+ response.headers['Content-Type'] = file_type
137
+ return response
138
+ else:
139
+ t1 = time.time()
140
+ audio = tts.vits_infer(task)
141
+ t2 = time.time()
142
+ logger.info(f"[VITS] finish in {(t2 - t1):.2f}s")
143
+
144
+ if app.config.get("SAVE_AUDIO", False):
145
+ logger.debug(f"[VITS] {fname}")
146
+ path = os.path.join(app.config.get('CACHE_PATH'), fname)
147
+ save_audio(audio.getvalue(), path)
148
+
149
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
150
+
151
+
152
+ @app.route('/voice/hubert-vits', methods=["POST"])
153
+ @require_api_key
154
+ def voice_hubert_api():
155
+ if request.method == "POST":
156
+ try:
157
+ voice = request.files['upload']
158
+ id = int(request.form.get("id"))
159
+ format = request.form.get("format", app.config.get("LANG", "auto"))
160
+ length = float(request.form.get("length", app.config.get("LENGTH", 1)))
161
+ noise = float(request.form.get("noise", app.config.get("NOISE", 0.667)))
162
+ noisew = float(request.form.get("noisew", app.config.get("NOISEW", 0.8)))
163
+ use_streaming = request.form.get('streaming', False, type=bool)
164
+ except Exception as e:
165
+ logger.error(f"[hubert] {e}")
166
+ return make_response("parameter error", 400)
167
+
168
+ logger.info(f"[hubert] id:{id} format:{format} length:{length} noise:{noise} noisew:{noisew}")
169
+
170
+ fname = secure_filename(str(uuid.uuid1()) + "." + voice.filename.split(".")[1])
171
+ voice.save(os.path.join(app.config['UPLOAD_FOLDER'], fname))
172
+
173
+ if check_is_none(id):
174
+ logger.info(f"[hubert] speaker id is empty")
175
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
176
+
177
+ if id < 0 or id >= tts.hubert_speakers_count:
178
+ logger.info(f"[hubert] speaker id {id} does not exist")
179
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
180
+
181
+ file_type = f"audio/{format}"
182
+ task = {"id": id,
183
+ "format": format,
184
+ "length": length,
185
+ "noise": noise,
186
+ "noisew": noisew,
187
+ "audio_path": os.path.join(app.config['UPLOAD_FOLDER'], fname)}
188
+
189
+ t1 = time.time()
190
+ audio = tts.hubert_vits_infer(task)
191
+ t2 = time.time()
192
+ logger.info(f"[hubert] finish in {(t2 - t1):.2f}s")
193
+
194
+ if app.config.get("SAVE_AUDIO", False):
195
+ logger.debug(f"[hubert] {fname}")
196
+ path = os.path.join(app.config.get('CACHE_PATH'), fname)
197
+ save_audio(audio.getvalue(), path)
198
+
199
+ if use_streaming:
200
+ audio = tts.generate_audio_chunks(audio)
201
+ response = make_response(audio)
202
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
203
+ response.headers['Content-Type'] = file_type
204
+ return response
205
+ else:
206
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
207
+
208
+
209
+ @app.route('/voice/w2v2-vits', methods=["GET", "POST"])
210
+ @require_api_key
211
+ def voice_w2v2_api():
212
+ try:
213
+ if request.method == "GET":
214
+ request_data = request.args
215
+ elif request.method == "POST":
216
+ content_type = request.headers.get('Content-Type')
217
+ if content_type == 'application/json':
218
+ request_data = request.get_json()
219
+ else:
220
+ request_data = request.form
221
+
222
+ text = request_data.get("text", "")
223
+ id = int(request_data.get("id", app.config.get("ID", 0)))
224
+ format = request_data.get("format", app.config.get("FORMAT", "wav"))
225
+ lang = request_data.get("lang", app.config.get("LANG", "auto")).lower()
226
+ length = float(request_data.get("length", app.config.get("LENGTH", 1)))
227
+ noise = float(request_data.get("noise", app.config.get("NOISE", 0.667)))
228
+ noisew = float(request_data.get("noisew", app.config.get("NOISEW", 0.8)))
229
+ max = int(request_data.get("max", app.config.get("MAX", 50)))
230
+ emotion = int(request_data.get("emotion", app.config.get("EMOTION", 0)))
231
+ use_streaming = request_data.get('streaming', False, type=bool)
232
+ except Exception as e:
233
+ logger.error(f"[w2v2] {e}")
234
+ return make_response(f"parameter error", 400)
235
+
236
+ logger.info(f"[w2v2] id:{id} format:{format} lang:{lang} "
237
+ f"length:{length} noise:{noise} noisew:{noisew} emotion:{emotion}")
238
+ logger.info(f"[w2v2] len:{len(text)} text:{text}")
239
+
240
+ if check_is_none(text):
241
+ logger.info(f"[w2v2] text is empty")
242
+ return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
243
+
244
+ if check_is_none(id):
245
+ logger.info(f"[w2v2] speaker id is empty")
246
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
247
+
248
+ if id < 0 or id >= tts.w2v2_speakers_count:
249
+ logger.info(f"[w2v2] speaker id {id} does not exist")
250
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
251
+
252
+ # 校验模型是否支持输入的语言
253
+ speaker_lang = tts.voice_speakers["W2V2-VITS"][id].get('lang')
254
+ if lang not in ["auto", "mix"] and len(speaker_lang) != 1 and lang not in speaker_lang:
255
+ logger.info(f"[w2v2] lang \"{lang}\" is not in {speaker_lang}")
256
+ return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
257
+
258
+ # 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
259
+ if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
260
+ speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
261
+
262
+ if use_streaming and format.upper() != "MP3":
263
+ format = "mp3"
264
+ logger.warning("Streaming response only supports MP3 format.")
265
+
266
+ fname = f"{str(uuid.uuid1())}.{format}"
267
+ file_type = f"audio/{format}"
268
+ task = {"text": text,
269
+ "id": id,
270
+ "format": format,
271
+ "length": length,
272
+ "noise": noise,
273
+ "noisew": noisew,
274
+ "max": max,
275
+ "lang": lang,
276
+ "emotion": emotion,
277
+ "speaker_lang": speaker_lang}
278
+
279
+ t1 = time.time()
280
+ audio = tts.w2v2_vits_infer(task)
281
+ t2 = time.time()
282
+ logger.info(f"[w2v2] finish in {(t2 - t1):.2f}s")
283
+
284
+ if app.config.get("SAVE_AUDIO", False):
285
+ logger.debug(f"[w2v2] {fname}")
286
+ path = os.path.join(app.config.get('CACHE_PATH'), fname)
287
+ save_audio(audio.getvalue(), path)
288
+
289
+ if use_streaming:
290
+ audio = tts.generate_audio_chunks(audio)
291
+ response = make_response(audio)
292
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
293
+ response.headers['Content-Type'] = file_type
294
+ return response
295
+ else:
296
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
297
+
298
+
299
+ @app.route('/voice/conversion', methods=["POST"])
300
+ @app.route('/voice/vits/conversion', methods=["POST"])
301
+ @require_api_key
302
+ def vits_voice_conversion_api():
303
+ if request.method == "POST":
304
+ try:
305
+ voice = request.files['upload']
306
+ original_id = int(request.form["original_id"])
307
+ target_id = int(request.form["target_id"])
308
+ format = request.form.get("format", voice.filename.split(".")[1])
309
+ use_streaming = request.form.get('streaming', False, type=bool)
310
+ except Exception as e:
311
+ logger.error(f"[vits_voice_convertsion] {e}")
312
+ return make_response("parameter error", 400)
313
+
314
+ logger.info(f"[vits_voice_convertsion] orginal_id:{original_id} target_id:{target_id}")
315
+ fname = secure_filename(str(uuid.uuid1()) + "." + voice.filename.split(".")[1])
316
+ audio_path = os.path.join(app.config['UPLOAD_FOLDER'], fname)
317
+ voice.save(audio_path)
318
+ file_type = f"audio/{format}"
319
+ task = {"audio_path": audio_path,
320
+ "original_id": original_id,
321
+ "target_id": target_id,
322
+ "format": format}
323
+
324
+ t1 = time.time()
325
+ audio = tts.vits_voice_conversion(task)
326
+ t2 = time.time()
327
+ logger.info(f"[Voice conversion] finish in {(t2 - t1):.2f}s")
328
+
329
+ if app.config.get("SAVE_AUDIO", False):
330
+ logger.debug(f"[Voice conversion] {fname}")
331
+ path = os.path.join(app.config.get('CACHE_PATH'), fname)
332
+ save_audio(audio.getvalue(), path)
333
+
334
+ if use_streaming:
335
+ audio = tts.generate_audio_chunks(audio)
336
+ response = make_response(audio)
337
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
338
+ response.headers['Content-Type'] = file_type
339
+ return response
340
+ else:
341
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
342
+
343
+
344
+ @app.route('/voice/ssml', methods=["POST"])
345
+ @require_api_key
346
+ def ssml_api():
347
+ try:
348
+ content_type = request.headers.get('Content-Type')
349
+ if content_type == 'application/json':
350
+ request_data = request.get_json()
351
+ else:
352
+ request_data = request.form
353
+
354
+ ssml = request_data.get("ssml")
355
+ except Exception as e:
356
+ logger.info(f"[ssml] {e}")
357
+ return make_response(jsonify({"status": "error", "message": f"parameter error"}), 400)
358
+
359
+ logger.debug(ssml)
360
+ voice_tasks, format = tts.parse_ssml(ssml)
361
+ fname = f"{str(uuid.uuid1())}.{format}"
362
+ file_type = f"audio/{format}"
363
+
364
+ t1 = time.time()
365
+ audio = tts.create_ssml_infer_task(voice_tasks, format)
366
+ t2 = time.time()
367
+ logger.info(f"[ssml] finish in {(t2 - t1):.2f}s")
368
+
369
+ if app.config.get("SAVE_AUDIO", False):
370
+ logger.debug(f"[ssml] {fname}")
371
+ path = os.path.join(app.config.get('CACHE_PATH'), fname)
372
+ save_audio(audio.getvalue(), path)
373
+
374
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
375
+
376
+
377
+ @app.route('/voice/dimension-emotion', methods=["POST"])
378
+ @require_api_key
379
+ def dimensional_emotion():
380
+ if request.method == "POST":
381
+ try:
382
+ audio = request.files['upload']
383
+ use_streaming = request.form.get('streaming', False, type=bool)
384
+ except Exception as e:
385
+ logger.error(f"[dimensional_emotion] {e}")
386
+ return make_response("parameter error", 400)
387
+
388
+ content = BytesIO(audio.read())
389
+
390
+ file_type = "application/octet-stream; charset=ascii"
391
+ fname = os.path.splitext(audio.filename)[0] + ".npy"
392
+ emotion_npy = tts.get_dimensional_emotion_npy(content)
393
+ if use_streaming:
394
+ emotion_npy = tts.generate_audio_chunks(emotion_npy)
395
+ response = make_response(emotion_npy)
396
+ response.headers['Content-Disposition'] = f'attachment; filename={fname}'
397
+ response.headers['Content-Type'] = file_type
398
+ return response
399
+ else:
400
+ return send_file(path_or_file=emotion_npy, mimetype=file_type, download_name=fname)
401
+
402
+
403
+ @app.route('/voice/bert-vits2', methods=["GET", "POST"])
404
+ @require_api_key
405
+ def voice_bert_vits2_api():
406
+ try:
407
+ if request.method == "GET":
408
+ request_data = request.args
409
+ elif request.method == "POST":
410
+ content_type = request.headers.get('Content-Type')
411
+ if content_type == 'application/json':
412
+ request_data = request.get_json()
413
+ else:
414
+ request_data = request.form
415
+
416
+ text = request_data.get("text", "")
417
+ id = int(request_data.get("id", app.config.get("ID", 0)))
418
+ format = request_data.get("format", app.config.get("FORMAT", "wav"))
419
+ lang = request_data.get("lang", "auto").lower()
420
+ length = float(request_data.get("length", app.config.get("LENGTH", 1)))
421
+ noise = float(request_data.get("noise", app.config.get("NOISE", 0.667)))
422
+ noisew = float(request_data.get("noisew", app.config.get("NOISEW", 0.8)))
423
+ sdp_ratio = float(request_data.get("sdp_ratio", app.config.get("SDP_RATIO", 0.2)))
424
+ max = int(request_data.get("max", app.config.get("MAX", 50)))
425
+ except Exception as e:
426
+ logger.error(f"[Bert-VITS2] {e}")
427
+ return make_response("parameter error", 400)
428
+
429
+ logger.info(
430
+ f"[Bert-VITS2] id:{id} format:{format} lang:{lang} length:{length} noise:{noise} noisew:{noisew} sdp_ratio:{sdp_ratio}")
431
+ logger.info(f"[Bert-VITS2] len:{len(text)} text:{text}")
432
+
433
+ if check_is_none(text):
434
+ logger.info(f"[Bert-VITS2] text is empty")
435
+ return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
436
+
437
+ if check_is_none(id):
438
+ logger.info(f"[Bert-VITS2] speaker id is empty")
439
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
440
+
441
+ if id < 0 or id >= tts.bert_vits2_speakers_count:
442
+ logger.info(f"[Bert-VITS2] speaker id {id} does not exist")
443
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
444
+
445
+ # 校验模型是否支持输入的语言
446
+ speaker_lang = tts.voice_speakers["BERT-VITS2"][id].get('lang')
447
+ if lang not in ["auto", "mix"] and len(speaker_lang) != 1 and lang not in speaker_lang:
448
+ logger.info(f"[Bert-VITS2] lang \"{lang}\" is not in {speaker_lang}")
449
+ return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
450
+
451
+ # 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
452
+ if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
453
+ speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
454
+
455
+ fname = f"{str(uuid.uuid1())}.{format}"
456
+ file_type = f"audio/{format}"
457
+ task = {"text": text,
458
+ "id": id,
459
+ "format": format,
460
+ "length": length,
461
+ "noise": noise,
462
+ "noisew": noisew,
463
+ "sdp_ratio": sdp_ratio,
464
+ "max": max,
465
+ "lang": lang,
466
+ "speaker_lang": speaker_lang}
467
+
468
+ t1 = time.time()
469
+ audio = tts.bert_vits2_infer(task)
470
+ t2 = time.time()
471
+ logger.info(f"[Bert-VITS2] finish in {(t2 - t1):.2f}s")
472
+
473
+ if app.config.get("SAVE_AUDIO", False):
474
+ logger.debug(f"[Bert-VITS2] {fname}")
475
+ path = os.path.join(app.config.get('CACHE_PATH'), fname)
476
+ save_audio(audio.getvalue(), path)
477
+
478
+ return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
479
+
480
+
481
+ @app.route('/voice/check', methods=["GET", "POST"])
482
+ def check():
483
+ try:
484
+ if request.method == "GET":
485
+ request_data = request.args
486
+ elif request.method == "POST":
487
+ content_type = request.headers.get('Content-Type')
488
+ if content_type == 'application/json':
489
+ request_data = request.get_json()
490
+ else:
491
+ request_data = request.form
492
+
493
+ model = request_data.get("model")
494
+ id = int(request_data.get("id"))
495
+ except Exception as e:
496
+ logger.info(f"[check] {e}")
497
+ return make_response(jsonify({"status": "error", "message": "parameter error"}), 400)
498
+
499
+ if check_is_none(model):
500
+ logger.info(f"[check] model {model} is empty")
501
+ return make_response(jsonify({"status": "error", "message": "model is empty"}), 400)
502
+
503
+ if model.upper() not in ("VITS", "HUBERT", "W2V2"):
504
+ res = make_response(jsonify({"status": "error", "message": f"model {model} does not exist"}))
505
+ res.status = 404
506
+ logger.info(f"[check] speaker id {id} error")
507
+ return res
508
+
509
+ if check_is_none(id):
510
+ logger.info(f"[check] speaker id is empty")
511
+ return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
512
+
513
+ if model.upper() == "VITS":
514
+ speaker_list = tts.voice_speakers["VITS"]
515
+ elif model.upper() == "HUBERT":
516
+ speaker_list = tts.voice_speakers["HUBERT-VITS"]
517
+ elif model.upper() == "W2V2":
518
+ speaker_list = tts.voice_speakers["W2V2-VITS"]
519
+
520
+ if len(speaker_list) == 0:
521
+ logger.info(f"[check] {model} not loaded")
522
+ return make_response(jsonify({"status": "error", "message": f"{model} not loaded"}), 400)
523
+
524
+ if id < 0 or id >= len(speaker_list):
525
+ logger.info(f"[check] speaker id {id} does not exist")
526
+ return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
527
+ name = str(speaker_list[id]["name"])
528
+ lang = speaker_list[id]["lang"]
529
+ logger.info(f"[check] check id:{id} name:{name} lang:{lang}")
530
+
531
+ return make_response(jsonify({"status": "success", "id": id, "name": name, "lang": lang}), 200)
532
+
533
+
534
+ # regular cleaning
535
+ @scheduler.task('interval', id='clean_task', seconds=app.config.get("CLEAN_INTERVAL_SECONDS", 3600),
536
+ misfire_grace_time=900)
537
+ def clean_task():
538
+ clean_folder(app.config["UPLOAD_FOLDER"])
539
+ clean_folder(app.config["CACHE_PATH"])
540
+
541
+
542
+ if __name__ == '__main__':
543
+ app.run(host='0.0.0.0', port=app.config.get("PORT", 23456), debug=app.config.get("DEBUG", False)) # 对外开放
544
+ # app.run(host='127.0.0.1', port=app.config.get("PORT",23456), debug=True) # 本地运行、调试
bert_vits2/LICENSE ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU GENERAL PUBLIC LICENSE
2
+ Version 3, 29 June 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU General Public License is a free, copyleft license for
11
+ software and other kinds of works.
12
+
13
+ The licenses for most software and other practical works are designed
14
+ to take away your freedom to share and change the works. By contrast,
15
+ the GNU General Public License is intended to guarantee your freedom to
16
+ share and change all versions of a program--to make sure it remains free
17
+ software for all its users. We, the Free Software Foundation, use the
18
+ GNU General Public License for most of our software; it applies also to
19
+ any other work released this way by its authors. You can apply it to
20
+ your programs, too.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ To protect your rights, we need to prevent others from denying you
30
+ these rights or asking you to surrender the rights. Therefore, you have
31
+ certain responsibilities if you distribute copies of the software, or if
32
+ you modify it: responsibilities to respect the freedom of others.
33
+
34
+ For example, if you distribute copies of such a program, whether
35
+ gratis or for a fee, you must pass on to the recipients the same
36
+ freedoms that you received. You must make sure that they, too, receive
37
+ or can get the source code. And you must show them these terms so they
38
+ know their rights.
39
+
40
+ Developers that use the GNU GPL protect your rights with two steps:
41
+ (1) assert copyright on the software, and (2) offer you this License
42
+ giving you legal permission to copy, distribute and/or modify it.
43
+
44
+ For the developers' and authors' protection, the GPL clearly explains
45
+ that there is no warranty for this free software. For both users' and
46
+ authors' sake, the GPL requires that modified versions be marked as
47
+ changed, so that their problems will not be attributed erroneously to
48
+ authors of previous versions.
49
+
50
+ Some devices are designed to deny users access to install or run
51
+ modified versions of the software inside them, although the manufacturer
52
+ can do so. This is fundamentally incompatible with the aim of
53
+ protecting users' freedom to change the software. The systematic
54
+ pattern of such abuse occurs in the area of products for individuals to
55
+ use, which is precisely where it is most unacceptable. Therefore, we
56
+ have designed this version of the GPL to prohibit the practice for those
57
+ products. If such problems arise substantially in other domains, we
58
+ stand ready to extend this provision to those domains in future versions
59
+ of the GPL, as needed to protect the freedom of users.
60
+
61
+ Finally, every program is threatened constantly by software patents.
62
+ States should not allow patents to restrict development and use of
63
+ software on general-purpose computers, but in those that do, we wish to
64
+ avoid the special danger that patents applied to a free program could
65
+ make it effectively proprietary. To prevent this, the GPL assures that
66
+ patents cannot be used to render the program non-free.
67
+
68
+ The precise terms and conditions for copying, distribution and
69
+ modification follow.
70
+
71
+ TERMS AND CONDITIONS
72
+
73
+ 0. Definitions.
74
+
75
+ "This License" refers to version 3 of the GNU General Public License.
76
+
77
+ "Copyright" also means copyright-like laws that apply to other kinds of
78
+ works, such as semiconductor masks.
79
+
80
+ "The Program" refers to any copyrightable work licensed under this
81
+ License. Each licensee is addressed as "you". "Licensees" and
82
+ "recipients" may be individuals or organizations.
83
+
84
+ To "modify" a work means to copy from or adapt all or part of the work
85
+ in a fashion requiring copyright permission, other than the making of an
86
+ exact copy. The resulting work is called a "modified version" of the
87
+ earlier work or a work "based on" the earlier work.
88
+
89
+ A "covered work" means either the unmodified Program or a work based
90
+ on the Program.
91
+
92
+ To "propagate" a work means to do anything with it that, without
93
+ permission, would make you directly or secondarily liable for
94
+ infringement under applicable copyright law, except executing it on a
95
+ computer or modifying a private copy. Propagation includes copying,
96
+ distribution (with or without modification), making available to the
97
+ public, and in some countries other activities as well.
98
+
99
+ To "convey" a work means any kind of propagation that enables other
100
+ parties to make or receive copies. Mere interaction with a user through
101
+ a computer network, with no transfer of a copy, is not conveying.
102
+
103
+ An interactive user interface displays "Appropriate Legal Notices"
104
+ to the extent that it includes a convenient and prominently visible
105
+ feature that (1) displays an appropriate copyright notice, and (2)
106
+ tells the user that there is no warranty for the work (except to the
107
+ extent that warranties are provided), that licensees may convey the
108
+ work under this License, and how to view a copy of this License. If
109
+ the interface presents a list of user commands or options, such as a
110
+ menu, a prominent item in the list meets this criterion.
111
+
112
+ 1. Source Code.
113
+
114
+ The "source code" for a work means the preferred form of the work
115
+ for making modifications to it. "Object code" means any non-source
116
+ form of a work.
117
+
118
+ A "Standard Interface" means an interface that either is an official
119
+ standard defined by a recognized standards body, or, in the case of
120
+ interfaces specified for a particular programming language, one that
121
+ is widely used among developers working in that language.
122
+
123
+ The "System Libraries" of an executable work include anything, other
124
+ than the work as a whole, that (a) is included in the normal form of
125
+ packaging a Major Component, but which is not part of that Major
126
+ Component, and (b) serves only to enable use of the work with that
127
+ Major Component, or to implement a Standard Interface for which an
128
+ implementation is available to the public in source code form. A
129
+ "Major Component", in this context, means a major essential component
130
+ (kernel, window system, and so on) of the specific operating system
131
+ (if any) on which the executable work runs, or a compiler used to
132
+ produce the work, or an object code interpreter used to run it.
133
+
134
+ The "Corresponding Source" for a work in object code form means all
135
+ the source code needed to generate, install, and (for an executable
136
+ work) run the object code and to modify the work, including scripts to
137
+ control those activities. However, it does not include the work's
138
+ System Libraries, or general-purpose tools or generally available free
139
+ programs which are used unmodified in performing those activities but
140
+ which are not part of the work. For example, Corresponding Source
141
+ includes interface definition files associated with source files for
142
+ the work, and the source code for shared libraries and dynamically
143
+ linked subprograms that the work is specifically designed to require,
144
+ such as by intimate data communication or control flow between those
145
+ subprograms and other parts of the work.
146
+
147
+ The Corresponding Source need not include anything that users
148
+ can regenerate automatically from other parts of the Corresponding
149
+ Source.
150
+
151
+ The Corresponding Source for a work in source code form is that
152
+ same work.
153
+
154
+ 2. Basic Permissions.
155
+
156
+ All rights granted under this License are granted for the term of
157
+ copyright on the Program, and are irrevocable provided the stated
158
+ conditions are met. This License explicitly affirms your unlimited
159
+ permission to run the unmodified Program. The output from running a
160
+ covered work is covered by this License only if the output, given its
161
+ content, constitutes a covered work. This License acknowledges your
162
+ rights of fair use or other equivalent, as provided by copyright law.
163
+
164
+ You may make, run and propagate covered works that you do not
165
+ convey, without conditions so long as your license otherwise remains
166
+ in force. You may convey covered works to others for the sole purpose
167
+ of having them make modifications exclusively for you, or provide you
168
+ with facilities for running those works, provided that you comply with
169
+ the terms of this License in conveying all material for which you do
170
+ not control copyright. Those thus making or running the covered works
171
+ for you must do so exclusively on your behalf, under your direction
172
+ and control, on terms that prohibit them from making any copies of
173
+ your copyrighted material outside their relationship with you.
174
+
175
+ Conveying under any other circumstances is permitted solely under
176
+ the conditions stated below. Sublicensing is not allowed; section 10
177
+ makes it unnecessary.
178
+
179
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
+
181
+ No covered work shall be deemed part of an effective technological
182
+ measure under any applicable law fulfilling obligations under article
183
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
+ similar laws prohibiting or restricting circumvention of such
185
+ measures.
186
+
187
+ When you convey a covered work, you waive any legal power to forbid
188
+ circumvention of technological measures to the extent such circumvention
189
+ is effected by exercising rights under this License with respect to
190
+ the covered work, and you disclaim any intention to limit operation or
191
+ modification of the work as a means of enforcing, against the work's
192
+ users, your or third parties' legal rights to forbid circumvention of
193
+ technological measures.
194
+
195
+ 4. Conveying Verbatim Copies.
196
+
197
+ You may convey verbatim copies of the Program's source code as you
198
+ receive it, in any medium, provided that you conspicuously and
199
+ appropriately publish on each copy an appropriate copyright notice;
200
+ keep intact all notices stating that this License and any
201
+ non-permissive terms added in accord with section 7 apply to the code;
202
+ keep intact all notices of the absence of any warranty; and give all
203
+ recipients a copy of this License along with the Program.
204
+
205
+ You may charge any price or no price for each copy that you convey,
206
+ and you may offer support or warranty protection for a fee.
207
+
208
+ 5. Conveying Modified Source Versions.
209
+
210
+ You may convey a work based on the Program, or the modifications to
211
+ produce it from the Program, in the form of source code under the
212
+ terms of section 4, provided that you also meet all of these conditions:
213
+
214
+ a) The work must carry prominent notices stating that you modified
215
+ it, and giving a relevant date.
216
+
217
+ b) The work must carry prominent notices stating that it is
218
+ released under this License and any conditions added under section
219
+ 7. This requirement modifies the requirement in section 4 to
220
+ "keep intact all notices".
221
+
222
+ c) You must license the entire work, as a whole, under this
223
+ License to anyone who comes into possession of a copy. This
224
+ License will therefore apply, along with any applicable section 7
225
+ additional terms, to the whole of the work, and all its parts,
226
+ regardless of how they are packaged. This License gives no
227
+ permission to license the work in any other way, but it does not
228
+ invalidate such permission if you have separately received it.
229
+
230
+ d) If the work has interactive user interfaces, each must display
231
+ Appropriate Legal Notices; however, if the Program has interactive
232
+ interfaces that do not display Appropriate Legal Notices, your
233
+ work need not make them do so.
234
+
235
+ A compilation of a covered work with other separate and independent
236
+ works, which are not by their nature extensions of the covered work,
237
+ and which are not combined with it such as to form a larger program,
238
+ in or on a volume of a storage or distribution medium, is called an
239
+ "aggregate" if the compilation and its resulting copyright are not
240
+ used to limit the access or legal rights of the compilation's users
241
+ beyond what the individual works permit. Inclusion of a covered work
242
+ in an aggregate does not cause this License to apply to the other
243
+ parts of the aggregate.
244
+
245
+ 6. Conveying Non-Source Forms.
246
+
247
+ You may convey a covered work in object code form under the terms
248
+ of sections 4 and 5, provided that you also convey the
249
+ machine-readable Corresponding Source under the terms of this License,
250
+ in one of these ways:
251
+
252
+ a) Convey the object code in, or embodied in, a physical product
253
+ (including a physical distribution medium), accompanied by the
254
+ Corresponding Source fixed on a durable physical medium
255
+ customarily used for software interchange.
256
+
257
+ b) Convey the object code in, or embodied in, a physical product
258
+ (including a physical distribution medium), accompanied by a
259
+ written offer, valid for at least three years and valid for as
260
+ long as you offer spare parts or customer support for that product
261
+ model, to give anyone who possesses the object code either (1) a
262
+ copy of the Corresponding Source for all the software in the
263
+ product that is covered by this License, on a durable physical
264
+ medium customarily used for software interchange, for a price no
265
+ more than your reasonable cost of physically performing this
266
+ conveying of source, or (2) access to copy the
267
+ Corresponding Source from a network server at no charge.
268
+
269
+ c) Convey individual copies of the object code with a copy of the
270
+ written offer to provide the Corresponding Source. This
271
+ alternative is allowed only occasionally and noncommercially, and
272
+ only if you received the object code with such an offer, in accord
273
+ with subsection 6b.
274
+
275
+ d) Convey the object code by offering access from a designated
276
+ place (gratis or for a charge), and offer equivalent access to the
277
+ Corresponding Source in the same way through the same place at no
278
+ further charge. You need not require recipients to copy the
279
+ Corresponding Source along with the object code. If the place to
280
+ copy the object code is a network server, the Corresponding Source
281
+ may be on a different server (operated by you or a third party)
282
+ that supports equivalent copying facilities, provided you maintain
283
+ clear directions next to the object code saying where to find the
284
+ Corresponding Source. Regardless of what server hosts the
285
+ Corresponding Source, you remain obligated to ensure that it is
286
+ available for as long as needed to satisfy these requirements.
287
+
288
+ e) Convey the object code using peer-to-peer transmission, provided
289
+ you inform other peers where the object code and Corresponding
290
+ Source of the work are being offered to the general public at no
291
+ charge under subsection 6d.
292
+
293
+ A separable portion of the object code, whose source code is excluded
294
+ from the Corresponding Source as a System Library, need not be
295
+ included in conveying the object code work.
296
+
297
+ A "User Product" is either (1) a "consumer product", which means any
298
+ tangible personal property which is normally used for personal, family,
299
+ or household purposes, or (2) anything designed or sold for incorporation
300
+ into a dwelling. In determining whether a product is a consumer product,
301
+ doubtful cases shall be resolved in favor of coverage. For a particular
302
+ product received by a particular user, "normally used" refers to a
303
+ typical or common use of that class of product, regardless of the status
304
+ of the particular user or of the way in which the particular user
305
+ actually uses, or expects or is expected to use, the product. A product
306
+ is a consumer product regardless of whether the product has substantial
307
+ commercial, industrial or non-consumer uses, unless such uses represent
308
+ the only significant mode of use of the product.
309
+
310
+ "Installation Information" for a User Product means any methods,
311
+ procedures, authorization keys, or other information required to install
312
+ and execute modified versions of a covered work in that User Product from
313
+ a modified version of its Corresponding Source. The information must
314
+ suffice to ensure that the continued functioning of the modified object
315
+ code is in no case prevented or interfered with solely because
316
+ modification has been made.
317
+
318
+ If you convey an object code work under this section in, or with, or
319
+ specifically for use in, a User Product, and the conveying occurs as
320
+ part of a transaction in which the right of possession and use of the
321
+ User Product is transferred to the recipient in perpetuity or for a
322
+ fixed term (regardless of how the transaction is characterized), the
323
+ Corresponding Source conveyed under this section must be accompanied
324
+ by the Installation Information. But this requirement does not apply
325
+ if neither you nor any third party retains the ability to install
326
+ modified object code on the User Product (for example, the work has
327
+ been installed in ROM).
328
+
329
+ The requirement to provide Installation Information does not include a
330
+ requirement to continue to provide support service, warranty, or updates
331
+ for a work that has been modified or installed by the recipient, or for
332
+ the User Product in which it has been modified or installed. Access to a
333
+ network may be denied when the modification itself materially and
334
+ adversely affects the operation of the network or violates the rules and
335
+ protocols for communication across the network.
336
+
337
+ Corresponding Source conveyed, and Installation Information provided,
338
+ in accord with this section must be in a format that is publicly
339
+ documented (and with an implementation available to the public in
340
+ source code form), and must require no special password or key for
341
+ unpacking, reading or copying.
342
+
343
+ 7. Additional Terms.
344
+
345
+ "Additional permissions" are terms that supplement the terms of this
346
+ License by making exceptions from one or more of its conditions.
347
+ Additional permissions that are applicable to the entire Program shall
348
+ be treated as though they were included in this License, to the extent
349
+ that they are valid under applicable law. If additional permissions
350
+ apply only to part of the Program, that part may be used separately
351
+ under those permissions, but the entire Program remains governed by
352
+ this License without regard to the additional permissions.
353
+
354
+ When you convey a copy of a covered work, you may at your option
355
+ remove any additional permissions from that copy, or from any part of
356
+ it. (Additional permissions may be written to require their own
357
+ removal in certain cases when you modify the work.) You may place
358
+ additional permissions on material, added by you to a covered work,
359
+ for which you have or can give appropriate copyright permission.
360
+
361
+ Notwithstanding any other provision of this License, for material you
362
+ add to a covered work, you may (if authorized by the copyright holders of
363
+ that material) supplement the terms of this License with terms:
364
+
365
+ a) Disclaiming warranty or limiting liability differently from the
366
+ terms of sections 15 and 16 of this License; or
367
+
368
+ b) Requiring preservation of specified reasonable legal notices or
369
+ author attributions in that material or in the Appropriate Legal
370
+ Notices displayed by works containing it; or
371
+
372
+ c) Prohibiting misrepresentation of the origin of that material, or
373
+ requiring that modified versions of such material be marked in
374
+ reasonable ways as different from the original version; or
375
+
376
+ d) Limiting the use for publicity purposes of names of licensors or
377
+ authors of the material; or
378
+
379
+ e) Declining to grant rights under trademark law for use of some
380
+ trade names, trademarks, or service marks; or
381
+
382
+ f) Requiring indemnification of licensors and authors of that
383
+ material by anyone who conveys the material (or modified versions of
384
+ it) with contractual assumptions of liability to the recipient, for
385
+ any liability that these contractual assumptions directly impose on
386
+ those licensors and authors.
387
+
388
+ All other non-permissive additional terms are considered "further
389
+ restrictions" within the meaning of section 10. If the Program as you
390
+ received it, or any part of it, contains a notice stating that it is
391
+ governed by this License along with a term that is a further
392
+ restriction, you may remove that term. If a license document contains
393
+ a further restriction but permits relicensing or conveying under this
394
+ License, you may add to a covered work material governed by the terms
395
+ of that license document, provided that the further restriction does
396
+ not survive such relicensing or conveying.
397
+
398
+ If you add terms to a covered work in accord with this section, you
399
+ must place, in the relevant source files, a statement of the
400
+ additional terms that apply to those files, or a notice indicating
401
+ where to find the applicable terms.
402
+
403
+ Additional terms, permissive or non-permissive, may be stated in the
404
+ form of a separately written license, or stated as exceptions;
405
+ the above requirements apply either way.
406
+
407
+ 8. Termination.
408
+
409
+ You may not propagate or modify a covered work except as expressly
410
+ provided under this License. Any attempt otherwise to propagate or
411
+ modify it is void, and will automatically terminate your rights under
412
+ this License (including any patent licenses granted under the third
413
+ paragraph of section 11).
414
+
415
+ However, if you cease all violation of this License, then your
416
+ license from a particular copyright holder is reinstated (a)
417
+ provisionally, unless and until the copyright holder explicitly and
418
+ finally terminates your license, and (b) permanently, if the copyright
419
+ holder fails to notify you of the violation by some reasonable means
420
+ prior to 60 days after the cessation.
421
+
422
+ Moreover, your license from a particular copyright holder is
423
+ reinstated permanently if the copyright holder notifies you of the
424
+ violation by some reasonable means, this is the first time you have
425
+ received notice of violation of this License (for any work) from that
426
+ copyright holder, and you cure the violation prior to 30 days after
427
+ your receipt of the notice.
428
+
429
+ Termination of your rights under this section does not terminate the
430
+ licenses of parties who have received copies or rights from you under
431
+ this License. If your rights have been terminated and not permanently
432
+ reinstated, you do not qualify to receive new licenses for the same
433
+ material under section 10.
434
+
435
+ 9. Acceptance Not Required for Having Copies.
436
+
437
+ You are not required to accept this License in order to receive or
438
+ run a copy of the Program. Ancillary propagation of a covered work
439
+ occurring solely as a consequence of using peer-to-peer transmission
440
+ to receive a copy likewise does not require acceptance. However,
441
+ nothing other than this License grants you permission to propagate or
442
+ modify any covered work. These actions infringe copyright if you do
443
+ not accept this License. Therefore, by modifying or propagating a
444
+ covered work, you indicate your acceptance of this License to do so.
445
+
446
+ 10. Automatic Licensing of Downstream Recipients.
447
+
448
+ Each time you convey a covered work, the recipient automatically
449
+ receives a license from the original licensors, to run, modify and
450
+ propagate that work, subject to this License. You are not responsible
451
+ for enforcing compliance by third parties with this License.
452
+
453
+ An "entity transaction" is a transaction transferring control of an
454
+ organization, or substantially all assets of one, or subdividing an
455
+ organization, or merging organizations. If propagation of a covered
456
+ work results from an entity transaction, each party to that
457
+ transaction who receives a copy of the work also receives whatever
458
+ licenses to the work the party's predecessor in interest had or could
459
+ give under the previous paragraph, plus a right to possession of the
460
+ Corresponding Source of the work from the predecessor in interest, if
461
+ the predecessor has it or can get it with reasonable efforts.
462
+
463
+ You may not impose any further restrictions on the exercise of the
464
+ rights granted or affirmed under this License. For example, you may
465
+ not impose a license fee, royalty, or other charge for exercise of
466
+ rights granted under this License, and you may not initiate litigation
467
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
468
+ any patent claim is infringed by making, using, selling, offering for
469
+ sale, or importing the Program or any portion of it.
470
+
471
+ 11. Patents.
472
+
473
+ A "contributor" is a copyright holder who authorizes use under this
474
+ License of the Program or a work on which the Program is based. The
475
+ work thus licensed is called the contributor's "contributor version".
476
+
477
+ A contributor's "essential patent claims" are all patent claims
478
+ owned or controlled by the contributor, whether already acquired or
479
+ hereafter acquired, that would be infringed by some manner, permitted
480
+ by this License, of making, using, or selling its contributor version,
481
+ but do not include claims that would be infringed only as a
482
+ consequence of further modification of the contributor version. For
483
+ purposes of this definition, "control" includes the right to grant
484
+ patent sublicenses in a manner consistent with the requirements of
485
+ this License.
486
+
487
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
488
+ patent license under the contributor's essential patent claims, to
489
+ make, use, sell, offer for sale, import and otherwise run, modify and
490
+ propagate the contents of its contributor version.
491
+
492
+ In the following three paragraphs, a "patent license" is any express
493
+ agreement or commitment, however denominated, not to enforce a patent
494
+ (such as an express permission to practice a patent or covenant not to
495
+ sue for patent infringement). To "grant" such a patent license to a
496
+ party means to make such an agreement or commitment not to enforce a
497
+ patent against the party.
498
+
499
+ If you convey a covered work, knowingly relying on a patent license,
500
+ and the Corresponding Source of the work is not available for anyone
501
+ to copy, free of charge and under the terms of this License, through a
502
+ publicly available network server or other readily accessible means,
503
+ then you must either (1) cause the Corresponding Source to be so
504
+ available, or (2) arrange to deprive yourself of the benefit of the
505
+ patent license for this particular work, or (3) arrange, in a manner
506
+ consistent with the requirements of this License, to extend the patent
507
+ license to downstream recipients. "Knowingly relying" means you have
508
+ actual knowledge that, but for the patent license, your conveying the
509
+ covered work in a country, or your recipient's use of the covered work
510
+ in a country, would infringe one or more identifiable patents in that
511
+ country that you have reason to believe are valid.
512
+
513
+ If, pursuant to or in connection with a single transaction or
514
+ arrangement, you convey, or propagate by procuring conveyance of, a
515
+ covered work, and grant a patent license to some of the parties
516
+ receiving the covered work authorizing them to use, propagate, modify
517
+ or convey a specific copy of the covered work, then the patent license
518
+ you grant is automatically extended to all recipients of the covered
519
+ work and works based on it.
520
+
521
+ A patent license is "discriminatory" if it does not include within
522
+ the scope of its coverage, prohibits the exercise of, or is
523
+ conditioned on the non-exercise of one or more of the rights that are
524
+ specifically granted under this License. You may not convey a covered
525
+ work if you are a party to an arrangement with a third party that is
526
+ in the business of distributing software, under which you make payment
527
+ to the third party based on the extent of your activity of conveying
528
+ the work, and under which the third party grants, to any of the
529
+ parties who would receive the covered work from you, a discriminatory
530
+ patent license (a) in connection with copies of the covered work
531
+ conveyed by you (or copies made from those copies), or (b) primarily
532
+ for and in connection with specific products or compilations that
533
+ contain the covered work, unless you entered into that arrangement,
534
+ or that patent license was granted, prior to 28 March 2007.
535
+
536
+ Nothing in this License shall be construed as excluding or limiting
537
+ any implied license or other defenses to infringement that may
538
+ otherwise be available to you under applicable patent law.
539
+
540
+ 12. No Surrender of Others' Freedom.
541
+
542
+ If conditions are imposed on you (whether by court order, agreement or
543
+ otherwise) that contradict the conditions of this License, they do not
544
+ excuse you from the conditions of this License. If you cannot convey a
545
+ covered work so as to satisfy simultaneously your obligations under this
546
+ License and any other pertinent obligations, then as a consequence you may
547
+ not convey it at all. For example, if you agree to terms that obligate you
548
+ to collect a royalty for further conveying from those to whom you convey
549
+ the Program, the only way you could satisfy both those terms and this
550
+ License would be to refrain entirely from conveying the Program.
551
+
552
+ 13. Use with the GNU Affero General Public License.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU Affero General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the special requirements of the GNU Affero General Public License,
560
+ section 13, concerning interaction through a network will apply to the
561
+ combination as such.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU General Public License from time to time. Such new versions will
567
+ be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU General Public License for more details.
646
+
647
+ You should have received a copy of the GNU General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If the program does terminal interaction, make it output a short
653
+ notice like this when it starts in an interactive mode:
654
+
655
+ <program> Copyright (C) <year> <name of author>
656
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
+ This is free software, and you are welcome to redistribute it
658
+ under certain conditions; type `show c' for details.
659
+
660
+ The hypothetical commands `show w' and `show c' should show the appropriate
661
+ parts of the General Public License. Of course, your program's commands
662
+ might be different; for a GUI interface, you would use an "about box".
663
+
664
+ You should also get your employer (if you work as a programmer) or school,
665
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
666
+ For more information on this, and how to apply and follow the GNU GPL, see
667
+ <https://www.gnu.org/licenses/>.
668
+
669
+ The GNU General Public License does not permit incorporating your program
670
+ into proprietary programs. If your program is a subroutine library, you
671
+ may consider it more useful to permit linking proprietary applications with
672
+ the library. If this is what you want to do, use the GNU Lesser General
673
+ Public License instead of this License. But first, please read
674
+ <https://www.gnu.org/licenses/why-not-lgpl.html>.
bert_vits2/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Bert-VITS2
2
+
3
+ VITS2 Backbone with bert
4
+ ## 成熟的旅行者/开拓者/舰长/博士/sensei/猎魔人/喵喵露/V应该参阅代码自己学习如何训练。
5
+ ### 严禁将此项目用于一切违反《中华人民共和国宪法》,《中华人民共和国刑法》,《中华人民共和国治安管理处罚法》和《中华人民共和国民法典》之用途。
bert_vits2/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from bert_vits2.bert_vits2 import Bert_VITS2
2
+ from bert_vits2 import text
bert_vits2/attentions.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+ from bert_vits2 import commons
6
+ from torch.nn.utils import weight_norm, remove_weight_norm
7
+
8
+
9
+ class LayerNorm(nn.Module):
10
+ def __init__(self, channels, eps=1e-5):
11
+ super().__init__()
12
+ self.channels = channels
13
+ self.eps = eps
14
+
15
+ self.gamma = nn.Parameter(torch.ones(channels))
16
+ self.beta = nn.Parameter(torch.zeros(channels))
17
+
18
+ def forward(self, x):
19
+ x = x.transpose(1, -1)
20
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
21
+ return x.transpose(1, -1)
22
+
23
+
24
+ @torch.jit.script
25
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
26
+ n_channels_int = n_channels[0]
27
+ in_act = input_a + input_b
28
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
29
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
30
+ acts = t_act * s_act
31
+ return acts
32
+
33
+
34
+ class Encoder(nn.Module):
35
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4,
36
+ isflow=True, **kwargs):
37
+ super().__init__()
38
+ self.hidden_channels = hidden_channels
39
+ self.filter_channels = filter_channels
40
+ self.n_heads = n_heads
41
+ self.n_layers = n_layers
42
+ self.kernel_size = kernel_size
43
+ self.p_dropout = p_dropout
44
+ self.window_size = window_size
45
+ # if isflow:
46
+ # cond_layer = torch.nn.Conv1d(256, 2 * hidden_channels * n_layers, 1)
47
+ # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, 1)
48
+ # self.cond_layer = weight_norm(cond_layer, name='weight')
49
+ # self.gin_channels = 256
50
+ self.cond_layer_idx = self.n_layers
51
+ if 'gin_channels' in kwargs:
52
+ self.gin_channels = kwargs['gin_channels']
53
+ if self.gin_channels != 0:
54
+ self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
55
+ # vits2 says 3rd block, so idx is 2 by default
56
+ self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2
57
+ # print(self.gin_channels, self.cond_layer_idx)
58
+ assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers'
59
+ self.drop = nn.Dropout(p_dropout)
60
+ self.attn_layers = nn.ModuleList()
61
+ self.norm_layers_1 = nn.ModuleList()
62
+ self.ffn_layers = nn.ModuleList()
63
+ self.norm_layers_2 = nn.ModuleList()
64
+ for i in range(self.n_layers):
65
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout,
66
+ window_size=window_size))
67
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
68
+ self.ffn_layers.append(
69
+ FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
70
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
71
+
72
+ def forward(self, x, x_mask, g=None):
73
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
74
+ x = x * x_mask
75
+ for i in range(self.n_layers):
76
+ if i == self.cond_layer_idx and g is not None:
77
+ g = self.spk_emb_linear(g.transpose(1, 2))
78
+ g = g.transpose(1, 2)
79
+ x = x + g
80
+ x = x * x_mask
81
+ y = self.attn_layers[i](x, x, attn_mask)
82
+ y = self.drop(y)
83
+ x = self.norm_layers_1[i](x + y)
84
+
85
+ y = self.ffn_layers[i](x, x_mask)
86
+ y = self.drop(y)
87
+ x = self.norm_layers_2[i](x + y)
88
+ x = x * x_mask
89
+ return x
90
+
91
+
92
+ class Decoder(nn.Module):
93
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.,
94
+ proximal_bias=False, proximal_init=True, **kwargs):
95
+ super().__init__()
96
+ self.hidden_channels = hidden_channels
97
+ self.filter_channels = filter_channels
98
+ self.n_heads = n_heads
99
+ self.n_layers = n_layers
100
+ self.kernel_size = kernel_size
101
+ self.p_dropout = p_dropout
102
+ self.proximal_bias = proximal_bias
103
+ self.proximal_init = proximal_init
104
+
105
+ self.drop = nn.Dropout(p_dropout)
106
+ self.self_attn_layers = nn.ModuleList()
107
+ self.norm_layers_0 = nn.ModuleList()
108
+ self.encdec_attn_layers = nn.ModuleList()
109
+ self.norm_layers_1 = nn.ModuleList()
110
+ self.ffn_layers = nn.ModuleList()
111
+ self.norm_layers_2 = nn.ModuleList()
112
+ for i in range(self.n_layers):
113
+ self.self_attn_layers.append(
114
+ MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout,
115
+ proximal_bias=proximal_bias, proximal_init=proximal_init))
116
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
117
+ self.encdec_attn_layers.append(
118
+ MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
119
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
120
+ self.ffn_layers.append(
121
+ FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
122
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
123
+
124
+ def forward(self, x, x_mask, h, h_mask):
125
+ """
126
+ x: decoder input
127
+ h: encoder output
128
+ """
129
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
130
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
131
+ x = x * x_mask
132
+ for i in range(self.n_layers):
133
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
134
+ y = self.drop(y)
135
+ x = self.norm_layers_0[i](x + y)
136
+
137
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
138
+ y = self.drop(y)
139
+ x = self.norm_layers_1[i](x + y)
140
+
141
+ y = self.ffn_layers[i](x, x_mask)
142
+ y = self.drop(y)
143
+ x = self.norm_layers_2[i](x + y)
144
+ x = x * x_mask
145
+ return x
146
+
147
+
148
+ class MultiHeadAttention(nn.Module):
149
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True,
150
+ block_length=None, proximal_bias=False, proximal_init=False):
151
+ super().__init__()
152
+ assert channels % n_heads == 0
153
+
154
+ self.channels = channels
155
+ self.out_channels = out_channels
156
+ self.n_heads = n_heads
157
+ self.p_dropout = p_dropout
158
+ self.window_size = window_size
159
+ self.heads_share = heads_share
160
+ self.block_length = block_length
161
+ self.proximal_bias = proximal_bias
162
+ self.proximal_init = proximal_init
163
+ self.attn = None
164
+
165
+ self.k_channels = channels // n_heads
166
+ self.conv_q = nn.Conv1d(channels, channels, 1)
167
+ self.conv_k = nn.Conv1d(channels, channels, 1)
168
+ self.conv_v = nn.Conv1d(channels, channels, 1)
169
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
170
+ self.drop = nn.Dropout(p_dropout)
171
+
172
+ if window_size is not None:
173
+ n_heads_rel = 1 if heads_share else n_heads
174
+ rel_stddev = self.k_channels ** -0.5
175
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
176
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
177
+
178
+ nn.init.xavier_uniform_(self.conv_q.weight)
179
+ nn.init.xavier_uniform_(self.conv_k.weight)
180
+ nn.init.xavier_uniform_(self.conv_v.weight)
181
+ if proximal_init:
182
+ with torch.no_grad():
183
+ self.conv_k.weight.copy_(self.conv_q.weight)
184
+ self.conv_k.bias.copy_(self.conv_q.bias)
185
+
186
+ def forward(self, x, c, attn_mask=None):
187
+ q = self.conv_q(x)
188
+ k = self.conv_k(c)
189
+ v = self.conv_v(c)
190
+
191
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
192
+
193
+ x = self.conv_o(x)
194
+ return x
195
+
196
+ def attention(self, query, key, value, mask=None):
197
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
198
+ b, d, t_s, t_t = (*key.size(), query.size(2))
199
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
200
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
201
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
202
+
203
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
204
+ if self.window_size is not None:
205
+ assert t_s == t_t, "Relative attention is only available for self-attention."
206
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
207
+ rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
208
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
209
+ scores = scores + scores_local
210
+ if self.proximal_bias:
211
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
212
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
213
+ if mask is not None:
214
+ scores = scores.masked_fill(mask == 0, -1e4)
215
+ if self.block_length is not None:
216
+ assert t_s == t_t, "Local attention is only available for self-attention."
217
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
218
+ scores = scores.masked_fill(block_mask == 0, -1e4)
219
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
220
+ p_attn = self.drop(p_attn)
221
+ output = torch.matmul(p_attn, value)
222
+ if self.window_size is not None:
223
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
224
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
225
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
226
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
227
+ return output, p_attn
228
+
229
+ def _matmul_with_relative_values(self, x, y):
230
+ """
231
+ x: [b, h, l, m]
232
+ y: [h or 1, m, d]
233
+ ret: [b, h, l, d]
234
+ """
235
+ ret = torch.matmul(x, y.unsqueeze(0))
236
+ return ret
237
+
238
+ def _matmul_with_relative_keys(self, x, y):
239
+ """
240
+ x: [b, h, l, d]
241
+ y: [h or 1, m, d]
242
+ ret: [b, h, l, m]
243
+ """
244
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
245
+ return ret
246
+
247
+ def _get_relative_embeddings(self, relative_embeddings, length):
248
+ max_relative_position = 2 * self.window_size + 1
249
+ # Pad first before slice to avoid using cond ops.
250
+ pad_length = max(length - (self.window_size + 1), 0)
251
+ slice_start_position = max((self.window_size + 1) - length, 0)
252
+ slice_end_position = slice_start_position + 2 * length - 1
253
+ if pad_length > 0:
254
+ padded_relative_embeddings = F.pad(
255
+ relative_embeddings,
256
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
257
+ else:
258
+ padded_relative_embeddings = relative_embeddings
259
+ used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
260
+ return used_relative_embeddings
261
+
262
+ def _relative_position_to_absolute_position(self, x):
263
+ """
264
+ x: [b, h, l, 2*l-1]
265
+ ret: [b, h, l, l]
266
+ """
267
+ batch, heads, length, _ = x.size()
268
+ # Concat columns of pad to shift from relative to absolute indexing.
269
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
270
+
271
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
272
+ x_flat = x.view([batch, heads, length * 2 * length])
273
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
274
+
275
+ # Reshape and slice out the padded elements.
276
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:]
277
+ return x_final
278
+
279
+ def _absolute_position_to_relative_position(self, x):
280
+ """
281
+ x: [b, h, l, l]
282
+ ret: [b, h, l, 2*l-1]
283
+ """
284
+ batch, heads, length, _ = x.size()
285
+ # padd along column
286
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
287
+ x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
288
+ # add 0's in the beginning that will skew the elements after reshape
289
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
290
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
291
+ return x_final
292
+
293
+ def _attention_bias_proximal(self, length):
294
+ """Bias for self-attention to encourage attention to close positions.
295
+ Args:
296
+ length: an integer scalar.
297
+ Returns:
298
+ a Tensor with shape [1, 1, length, length]
299
+ """
300
+ r = torch.arange(length, dtype=torch.float32)
301
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
302
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
303
+
304
+
305
+ class FFN(nn.Module):
306
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None,
307
+ causal=False):
308
+ super().__init__()
309
+ self.in_channels = in_channels
310
+ self.out_channels = out_channels
311
+ self.filter_channels = filter_channels
312
+ self.kernel_size = kernel_size
313
+ self.p_dropout = p_dropout
314
+ self.activation = activation
315
+ self.causal = causal
316
+
317
+ if causal:
318
+ self.padding = self._causal_padding
319
+ else:
320
+ self.padding = self._same_padding
321
+
322
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
323
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
324
+ self.drop = nn.Dropout(p_dropout)
325
+
326
+ def forward(self, x, x_mask):
327
+ x = self.conv_1(self.padding(x * x_mask))
328
+ if self.activation == "gelu":
329
+ x = x * torch.sigmoid(1.702 * x)
330
+ else:
331
+ x = torch.relu(x)
332
+ x = self.drop(x)
333
+ x = self.conv_2(self.padding(x * x_mask))
334
+ return x * x_mask
335
+
336
+ def _causal_padding(self, x):
337
+ if self.kernel_size == 1:
338
+ return x
339
+ pad_l = self.kernel_size - 1
340
+ pad_r = 0
341
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
342
+ x = F.pad(x, commons.convert_pad_shape(padding))
343
+ return x
344
+
345
+ def _same_padding(self, x):
346
+ if self.kernel_size == 1:
347
+ return x
348
+ pad_l = (self.kernel_size - 1) // 2
349
+ pad_r = self.kernel_size // 2
350
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
351
+ x = F.pad(x, commons.convert_pad_shape(padding))
352
+ return x
bert_vits2/bert/bert-base-japanese-v3/README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - cc100
5
+ - wikipedia
6
+ language:
7
+ - ja
8
+ widget:
9
+ - text: 東北大学で[MASK]の研究をしています。
10
+ ---
11
+
12
+ # BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
13
+
14
+ This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
15
+
16
+ This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
17
+ Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
18
+
19
+ The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
20
+
21
+ ## Model architecture
22
+
23
+ The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads.
24
+
25
+ ## Training Data
26
+
27
+ The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
28
+ For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
29
+ The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
30
+
31
+ For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
32
+
33
+ ## Tokenization
34
+
35
+ The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
36
+ The vocabulary size is 32768.
37
+
38
+ We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
39
+
40
+ ## Training
41
+
42
+ We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
43
+ For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
44
+
45
+ For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
46
+
47
+ ## Licenses
48
+
49
+ The pretrained models are distributed under the Apache License 2.0.
50
+
51
+ ## Acknowledgments
52
+
53
+ This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
bert_vits2/bert/bert-base-japanese-v3/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 768,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 3072,
11
+ "layer_norm_eps": 1e-12,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "bert",
14
+ "num_attention_heads": 12,
15
+ "num_hidden_layers": 12,
16
+ "pad_token_id": 0,
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 32768
19
+ }
bert_vits2/bert/bert-base-japanese-v3/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert_vits2/bert/chinese-roberta-wwm-ext-large/.gitattributes ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
bert_vits2/bert/chinese-roberta-wwm-ext-large/README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - zh
4
+ tags:
5
+ - bert
6
+ license: "apache-2.0"
7
+ ---
8
+
9
+ # Please use 'Bert' related functions to load this model!
10
+
11
+ ## Chinese BERT with Whole Word Masking
12
+ For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
13
+
14
+ **[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
15
+ Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
16
+
17
+ This repository is developed based on:https://github.com/google-research/bert
18
+
19
+ You may also interested in,
20
+ - Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
21
+ - Chinese MacBERT: https://github.com/ymcui/MacBERT
22
+ - Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
23
+ - Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
24
+ - Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
25
+
26
+ More resources by HFL: https://github.com/ymcui/HFL-Anthology
27
+
28
+ ## Citation
29
+ If you find the technical report or resource is useful, please cite the following technical report in your paper.
30
+ - Primary: https://arxiv.org/abs/2004.13922
31
+ ```
32
+ @inproceedings{cui-etal-2020-revisiting,
33
+ title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
34
+ author = "Cui, Yiming and
35
+ Che, Wanxiang and
36
+ Liu, Ting and
37
+ Qin, Bing and
38
+ Wang, Shijin and
39
+ Hu, Guoping",
40
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
41
+ month = nov,
42
+ year = "2020",
43
+ address = "Online",
44
+ publisher = "Association for Computational Linguistics",
45
+ url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
46
+ pages = "657--668",
47
+ }
48
+ ```
49
+ - Secondary: https://arxiv.org/abs/1906.08101
50
+ ```
51
+ @article{chinese-bert-wwm,
52
+ title={Pre-Training with Whole Word Masking for Chinese BERT},
53
+ author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
54
+ journal={arXiv preprint arXiv:1906.08101},
55
+ year={2019}
56
+ }
57
+ ```
bert_vits2/bert/chinese-roberta-wwm-ext-large/added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
bert_vits2/bert/chinese-roberta-wwm-ext-large/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "directionality": "bidi",
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 24,
19
+ "output_past": true,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "type_vocab_size": 2,
27
+ "vocab_size": 21128
28
+ }
bert_vits2/bert/chinese-roberta-wwm-ext-large/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
bert_vits2/bert/chinese-roberta-wwm-ext-large/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert_vits2/bert/chinese-roberta-wwm-ext-large/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"init_inputs": []}
bert_vits2/bert/chinese-roberta-wwm-ext-large/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert_vits2/bert_vits2.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from bert_vits2 import commons
5
+ from bert_vits2 import utils as bert_vits2_utils
6
+ from bert_vits2.models import SynthesizerTrn
7
+ from bert_vits2.text import *
8
+ from bert_vits2.text.cleaner import clean_text
9
+ from bert_vits2.utils import process_legacy_versions
10
+ from utils import classify_language, get_hparams_from_file, lang_dict
11
+ from utils.sentence import sentence_split_and_markup, cut
12
+
13
+
14
+ class Bert_VITS2:
15
+ def __init__(self, model, config, device=torch.device("cpu"), **kwargs):
16
+ self.hps_ms = get_hparams_from_file(config) if isinstance(config, str) else config
17
+ self.n_speakers = getattr(self.hps_ms.data, 'n_speakers', 0)
18
+ self.speakers = [item[0] for item in
19
+ sorted(list(getattr(self.hps_ms.data, 'spk2id', {'0': 0}).items()), key=lambda x: x[1])]
20
+ self.symbols = symbols
21
+
22
+ # Compatible with legacy versions
23
+ self.version = process_legacy_versions(self.hps_ms)
24
+
25
+ if self.version in ["1.0", "1.0.1"]:
26
+ self.symbols = symbols_legacy
27
+ self.hps_ms.model.n_layers_trans_flow = 3
28
+
29
+ if self.version in ["1.1"]:
30
+ self.hps_ms.model.n_layers_trans_flow = 6
31
+
32
+ self._symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
33
+
34
+ self.net_g = SynthesizerTrn(
35
+ len(self.symbols),
36
+ self.hps_ms.data.filter_length // 2 + 1,
37
+ self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
38
+ n_speakers=self.hps_ms.data.n_speakers,
39
+ symbols=self.symbols,
40
+ **self.hps_ms.model).to(device)
41
+ _ = self.net_g.eval()
42
+ self.device = device
43
+ self.load_model(model)
44
+
45
+ def load_model(self, model):
46
+ bert_vits2_utils.load_checkpoint(model, self.net_g, None, skip_optimizer=True, legacy_version=self.version)
47
+
48
+ def get_speakers(self):
49
+ return self.speakers
50
+
51
+ def get_text(self, text, language_str, hps):
52
+ norm_text, phone, tone, word2ph = clean_text(text, language_str)
53
+ phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str, self._symbol_to_id)
54
+
55
+ if hps.data.add_blank:
56
+ phone = commons.intersperse(phone, 0)
57
+ tone = commons.intersperse(tone, 0)
58
+ language = commons.intersperse(language, 0)
59
+ for i in range(len(word2ph)):
60
+ word2ph[i] = word2ph[i] * 2
61
+ word2ph[0] += 1
62
+ bert = get_bert(norm_text, word2ph, language_str)
63
+ del word2ph
64
+ assert bert.shape[-1] == len(phone), phone
65
+
66
+ if language_str == "zh":
67
+ bert = bert
68
+ ja_bert = torch.zeros(768, len(phone))
69
+ elif language_str == "ja":
70
+ ja_bert = bert
71
+ bert = torch.zeros(1024, len(phone))
72
+ else:
73
+ bert = torch.zeros(1024, len(phone))
74
+ ja_bert = torch.zeros(768, len(phone))
75
+ assert bert.shape[-1] == len(
76
+ phone
77
+ ), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
78
+ phone = torch.LongTensor(phone)
79
+ tone = torch.LongTensor(tone)
80
+ language = torch.LongTensor(language)
81
+ return bert, ja_bert, phone, tone, language
82
+
83
+ def infer(self, text, lang, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid):
84
+ bert, ja_bert, phones, tones, lang_ids = self.get_text(text, lang, self.hps_ms)
85
+ with torch.no_grad():
86
+ x_tst = phones.to(self.device).unsqueeze(0)
87
+ tones = tones.to(self.device).unsqueeze(0)
88
+ lang_ids = lang_ids.to(self.device).unsqueeze(0)
89
+ bert = bert.to(self.device).unsqueeze(0)
90
+ ja_bert = ja_bert.to(self.device).unsqueeze(0)
91
+ x_tst_lengths = torch.LongTensor([phones.size(0)]).to(self.device)
92
+ speakers = torch.LongTensor([int(sid)]).to(self.device)
93
+ audio = self.net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, ja_bert, sdp_ratio=sdp_ratio
94
+ , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[
95
+ 0][0, 0].data.cpu().float().numpy()
96
+
97
+ torch.cuda.empty_cache()
98
+ return audio
99
+
100
+ def get_audio(self, voice, auto_break=False):
101
+ text = voice.get("text", None)
102
+ lang = voice.get("lang", "auto")
103
+ sdp_ratio = voice.get("sdp_ratio", 0.2)
104
+ noise_scale = voice.get("noise", 0.5)
105
+ noise_scale_w = voice.get("noisew", 0.6)
106
+ length_scale = voice.get("length", 1)
107
+ sid = voice.get("id", 0)
108
+ max = voice.get("max", 50)
109
+ # sentence_list = sentence_split_and_markup(text, max, "ZH", ["zh"])
110
+ if lang == "auto":
111
+ lang = classify_language(text, target_languages=lang_dict["bert_vits2"])
112
+ sentence_list = cut(text, max)
113
+ audios = []
114
+ for sentence in sentence_list:
115
+ audio = self.infer(sentence, lang, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid)
116
+ audios.append(audio)
117
+ audio = np.concatenate(audios)
118
+ return audio
bert_vits2/commons.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+
8
+ def init_weights(m, mean=0.0, std=0.01):
9
+ classname = m.__class__.__name__
10
+ if classname.find("Conv") != -1:
11
+ m.weight.data.normal_(mean, std)
12
+
13
+
14
+ def get_padding(kernel_size, dilation=1):
15
+ return int((kernel_size * dilation - dilation) / 2)
16
+
17
+
18
+ def convert_pad_shape(pad_shape):
19
+ l = pad_shape[::-1]
20
+ pad_shape = [item for sublist in l for item in sublist]
21
+ return pad_shape
22
+
23
+
24
+ def intersperse(lst, item):
25
+ result = [item] * (len(lst) * 2 + 1)
26
+ result[1::2] = lst
27
+ return result
28
+
29
+
30
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
31
+ """KL(P||Q)"""
32
+ kl = (logs_q - logs_p) - 0.5
33
+ kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2. * logs_q)
34
+ return kl
35
+
36
+
37
+ def rand_gumbel(shape):
38
+ """Sample from the Gumbel distribution, protect from overflows."""
39
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
+ return -torch.log(-torch.log(uniform_samples))
41
+
42
+
43
+ def rand_gumbel_like(x):
44
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
+ return g
46
+
47
+
48
+ def slice_segments(x, ids_str, segment_size=4):
49
+ ret = torch.zeros_like(x[:, :, :segment_size])
50
+ for i in range(x.size(0)):
51
+ idx_str = ids_str[i]
52
+ idx_end = idx_str + segment_size
53
+ ret[i] = x[i, :, idx_str:idx_end]
54
+ return ret
55
+
56
+
57
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
58
+ b, d, t = x.size()
59
+ if x_lengths is None:
60
+ x_lengths = t
61
+ ids_str_max = x_lengths - segment_size + 1
62
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
63
+ ret = slice_segments(x, ids_str, segment_size)
64
+ return ret, ids_str
65
+
66
+
67
+ def get_timing_signal_1d(
68
+ length, channels, min_timescale=1.0, max_timescale=1.0e4):
69
+ position = torch.arange(length, dtype=torch.float)
70
+ num_timescales = channels // 2
71
+ log_timescale_increment = (
72
+ math.log(float(max_timescale) / float(min_timescale)) /
73
+ (num_timescales - 1))
74
+ inv_timescales = min_timescale * torch.exp(
75
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
76
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
77
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
78
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
79
+ signal = signal.view(1, channels, length)
80
+ return signal
81
+
82
+
83
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
84
+ b, channels, length = x.size()
85
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
86
+ return x + signal.to(dtype=x.dtype, device=x.device)
87
+
88
+
89
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
90
+ b, channels, length = x.size()
91
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
92
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
93
+
94
+
95
+ def subsequent_mask(length):
96
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
97
+ return mask
98
+
99
+
100
+ @torch.jit.script
101
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
102
+ n_channels_int = n_channels[0]
103
+ in_act = input_a + input_b
104
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
105
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
106
+ acts = t_act * s_act
107
+ return acts
108
+
109
+
110
+ def convert_pad_shape(pad_shape):
111
+ l = pad_shape[::-1]
112
+ pad_shape = [item for sublist in l for item in sublist]
113
+ return pad_shape
114
+
115
+
116
+ def shift_1d(x):
117
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
118
+ return x
119
+
120
+
121
+ def sequence_mask(length, max_length=None):
122
+ if max_length is None:
123
+ max_length = length.max()
124
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
125
+ return x.unsqueeze(0) < length.unsqueeze(1)
126
+
127
+
128
+ def generate_path(duration, mask):
129
+ """
130
+ duration: [b, 1, t_x]
131
+ mask: [b, 1, t_y, t_x]
132
+ """
133
+ device = duration.device
134
+
135
+ b, _, t_y, t_x = mask.shape
136
+ cum_duration = torch.cumsum(duration, -1)
137
+
138
+ cum_duration_flat = cum_duration.view(b * t_x)
139
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
140
+ path = path.view(b, t_x, t_y)
141
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
142
+ path = path.unsqueeze(1).transpose(2, 3) * mask
143
+ return path
144
+
145
+
146
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
147
+ if isinstance(parameters, torch.Tensor):
148
+ parameters = [parameters]
149
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
150
+ norm_type = float(norm_type)
151
+ if clip_value is not None:
152
+ clip_value = float(clip_value)
153
+
154
+ total_norm = 0
155
+ for p in parameters:
156
+ param_norm = p.grad.data.norm(norm_type)
157
+ total_norm += param_norm.item() ** norm_type
158
+ if clip_value is not None:
159
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
160
+ total_norm = total_norm ** (1. / norm_type)
161
+ return total_norm
bert_vits2/models.py ADDED
@@ -0,0 +1,686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ from bert_vits2 import commons
7
+ from bert_vits2 import modules
8
+ from bert_vits2 import attentions
9
+
10
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+
13
+ from bert_vits2.commons import init_weights, get_padding
14
+ from bert_vits2.text import num_tones, num_languages
15
+
16
+
17
+ class DurationDiscriminator(nn.Module): # vits2
18
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
19
+ super().__init__()
20
+
21
+ self.in_channels = in_channels
22
+ self.filter_channels = filter_channels
23
+ self.kernel_size = kernel_size
24
+ self.p_dropout = p_dropout
25
+ self.gin_channels = gin_channels
26
+
27
+ self.drop = nn.Dropout(p_dropout)
28
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
29
+ self.norm_1 = modules.LayerNorm(filter_channels)
30
+ self.conv_2 = nn.Conv1d(
31
+ filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
32
+ )
33
+ self.norm_2 = modules.LayerNorm(filter_channels)
34
+ self.dur_proj = nn.Conv1d(1, filter_channels, 1)
35
+
36
+ self.pre_out_conv_1 = nn.Conv1d(2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
37
+ self.pre_out_norm_1 = modules.LayerNorm(filter_channels)
38
+ self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
39
+ self.pre_out_norm_2 = modules.LayerNorm(filter_channels)
40
+
41
+ if gin_channels != 0:
42
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
43
+
44
+ self.output_layer = nn.Sequential(
45
+ nn.Linear(filter_channels, 1),
46
+ nn.Sigmoid()
47
+ )
48
+
49
+ def forward_probability(self, x, x_mask, dur, g=None):
50
+ dur = self.dur_proj(dur)
51
+ x = torch.cat([x, dur], dim=1)
52
+ x = self.pre_out_conv_1(x * x_mask)
53
+ x = torch.relu(x)
54
+ x = self.pre_out_norm_1(x)
55
+ x = self.drop(x)
56
+ x = self.pre_out_conv_2(x * x_mask)
57
+ x = torch.relu(x)
58
+ x = self.pre_out_norm_2(x)
59
+ x = self.drop(x)
60
+ x = x * x_mask
61
+ x = x.transpose(1, 2)
62
+ output_prob = self.output_layer(x)
63
+ return output_prob
64
+
65
+ def forward(self, x, x_mask, dur_r, dur_hat, g=None):
66
+ x = torch.detach(x)
67
+ if g is not None:
68
+ g = torch.detach(g)
69
+ x = x + self.cond(g)
70
+ x = self.conv_1(x * x_mask)
71
+ x = torch.relu(x)
72
+ x = self.norm_1(x)
73
+ x = self.drop(x)
74
+ x = self.conv_2(x * x_mask)
75
+ x = torch.relu(x)
76
+ x = self.norm_2(x)
77
+ x = self.drop(x)
78
+
79
+ output_probs = []
80
+ for dur in [dur_r, dur_hat]:
81
+ output_prob = self.forward_probability(x, x_mask, dur, g)
82
+ output_probs.append(output_prob)
83
+
84
+ return output_probs
85
+
86
+
87
+ class TransformerCouplingBlock(nn.Module):
88
+ def __init__(self,
89
+ channels,
90
+ hidden_channels,
91
+ filter_channels,
92
+ n_heads,
93
+ n_layers,
94
+ kernel_size,
95
+ p_dropout,
96
+ n_flows=4,
97
+ gin_channels=0,
98
+ share_parameter=False
99
+ ):
100
+
101
+ super().__init__()
102
+ self.channels = channels
103
+ self.hidden_channels = hidden_channels
104
+ self.kernel_size = kernel_size
105
+ self.n_layers = n_layers
106
+ self.n_flows = n_flows
107
+ self.gin_channels = gin_channels
108
+
109
+ self.flows = nn.ModuleList()
110
+
111
+ self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout,
112
+ isflow=True, gin_channels=self.gin_channels) if share_parameter else None
113
+
114
+ for i in range(n_flows):
115
+ self.flows.append(
116
+ modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout,
117
+ filter_channels, mean_only=True, wn_sharing_parameter=self.wn,
118
+ gin_channels=self.gin_channels))
119
+ self.flows.append(modules.Flip())
120
+
121
+ def forward(self, x, x_mask, g=None, reverse=False):
122
+ if not reverse:
123
+ for flow in self.flows:
124
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
125
+ else:
126
+ for flow in reversed(self.flows):
127
+ x = flow(x, x_mask, g=g, reverse=reverse)
128
+ return x
129
+
130
+
131
+ class StochasticDurationPredictor(nn.Module):
132
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
133
+ super().__init__()
134
+ filter_channels = in_channels # it needs to be removed from future version.
135
+ self.in_channels = in_channels
136
+ self.filter_channels = filter_channels
137
+ self.kernel_size = kernel_size
138
+ self.p_dropout = p_dropout
139
+ self.n_flows = n_flows
140
+ self.gin_channels = gin_channels
141
+
142
+ self.log_flow = modules.Log()
143
+ self.flows = nn.ModuleList()
144
+ self.flows.append(modules.ElementwiseAffine(2))
145
+ for i in range(n_flows):
146
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
147
+ self.flows.append(modules.Flip())
148
+
149
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
150
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
151
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
152
+ self.post_flows = nn.ModuleList()
153
+ self.post_flows.append(modules.ElementwiseAffine(2))
154
+ for i in range(4):
155
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
156
+ self.post_flows.append(modules.Flip())
157
+
158
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
159
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
160
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
161
+ if gin_channels != 0:
162
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
163
+
164
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
165
+ x = torch.detach(x)
166
+ x = self.pre(x)
167
+ if g is not None:
168
+ g = torch.detach(g)
169
+ x = x + self.cond(g)
170
+ x = self.convs(x, x_mask)
171
+ x = self.proj(x) * x_mask
172
+
173
+ if not reverse:
174
+ flows = self.flows
175
+ assert w is not None
176
+
177
+ logdet_tot_q = 0
178
+ h_w = self.post_pre(w)
179
+ h_w = self.post_convs(h_w, x_mask)
180
+ h_w = self.post_proj(h_w) * x_mask
181
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
182
+ z_q = e_q
183
+ for flow in self.post_flows:
184
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
185
+ logdet_tot_q += logdet_q
186
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
187
+ u = torch.sigmoid(z_u) * x_mask
188
+ z0 = (w - u) * x_mask
189
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
190
+ logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
191
+
192
+ logdet_tot = 0
193
+ z0, logdet = self.log_flow(z0, x_mask)
194
+ logdet_tot += logdet
195
+ z = torch.cat([z0, z1], 1)
196
+ for flow in flows:
197
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
198
+ logdet_tot = logdet_tot + logdet
199
+ nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
200
+ return nll + logq # [b]
201
+ else:
202
+ flows = list(reversed(self.flows))
203
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
204
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
205
+ for flow in flows:
206
+ z = flow(z, x_mask, g=x, reverse=reverse)
207
+ z0, z1 = torch.split(z, [1, 1], 1)
208
+ logw = z0
209
+ return logw
210
+
211
+
212
+ class DurationPredictor(nn.Module):
213
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
214
+ super().__init__()
215
+
216
+ self.in_channels = in_channels
217
+ self.filter_channels = filter_channels
218
+ self.kernel_size = kernel_size
219
+ self.p_dropout = p_dropout
220
+ self.gin_channels = gin_channels
221
+
222
+ self.drop = nn.Dropout(p_dropout)
223
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
224
+ self.norm_1 = modules.LayerNorm(filter_channels)
225
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
226
+ self.norm_2 = modules.LayerNorm(filter_channels)
227
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
228
+
229
+ if gin_channels != 0:
230
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
231
+
232
+ def forward(self, x, x_mask, g=None):
233
+ x = torch.detach(x)
234
+ if g is not None:
235
+ g = torch.detach(g)
236
+ x = x + self.cond(g)
237
+ x = self.conv_1(x * x_mask)
238
+ x = torch.relu(x)
239
+ x = self.norm_1(x)
240
+ x = self.drop(x)
241
+ x = self.conv_2(x * x_mask)
242
+ x = torch.relu(x)
243
+ x = self.norm_2(x)
244
+ x = self.drop(x)
245
+ x = self.proj(x * x_mask)
246
+ return x * x_mask
247
+
248
+
249
+ class TextEncoder(nn.Module):
250
+ def __init__(self,
251
+ n_vocab,
252
+ out_channels,
253
+ hidden_channels,
254
+ filter_channels,
255
+ n_heads,
256
+ n_layers,
257
+ kernel_size,
258
+ p_dropout,
259
+ gin_channels=0,
260
+ symbols=None):
261
+ super().__init__()
262
+ self.n_vocab = n_vocab
263
+ self.out_channels = out_channels
264
+ self.hidden_channels = hidden_channels
265
+ self.filter_channels = filter_channels
266
+ self.n_heads = n_heads
267
+ self.n_layers = n_layers
268
+ self.kernel_size = kernel_size
269
+ self.p_dropout = p_dropout
270
+ self.gin_channels = gin_channels
271
+ self.emb = nn.Embedding(len(symbols), hidden_channels)
272
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
273
+ self.tone_emb = nn.Embedding(num_tones, hidden_channels)
274
+ nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5)
275
+ self.language_emb = nn.Embedding(num_languages, hidden_channels)
276
+ nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5)
277
+ self.bert_proj = nn.Conv1d(1024, hidden_channels, 1)
278
+ self.ja_bert_proj = nn.Conv1d(768, hidden_channels, 1)
279
+
280
+ self.encoder = attentions.Encoder(
281
+ hidden_channels,
282
+ filter_channels,
283
+ n_heads,
284
+ n_layers,
285
+ kernel_size,
286
+ p_dropout,
287
+ gin_channels=self.gin_channels)
288
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
289
+
290
+ def forward(self, x, x_lengths, tone, language, bert, ja_bert, g=None):
291
+ bert_emb = self.bert_proj(bert).transpose(1, 2)
292
+ ja_bert_emb = self.ja_bert_proj(ja_bert).transpose(1, 2)
293
+ x = (self.emb(x) + self.tone_emb(tone) + self.language_emb(language) + bert_emb + ja_bert_emb) * math.sqrt(
294
+ self.hidden_channels) # [b, t, h]
295
+ x = torch.transpose(x, 1, -1) # [b, h, t]
296
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
297
+
298
+ x = self.encoder(x * x_mask, x_mask, g=g)
299
+ stats = self.proj(x) * x_mask
300
+
301
+ m, logs = torch.split(stats, self.out_channels, dim=1)
302
+ return x, m, logs, x_mask
303
+
304
+
305
+ class ResidualCouplingBlock(nn.Module):
306
+ def __init__(self,
307
+ channels,
308
+ hidden_channels,
309
+ kernel_size,
310
+ dilation_rate,
311
+ n_layers,
312
+ n_flows=4,
313
+ gin_channels=0):
314
+ super().__init__()
315
+ self.channels = channels
316
+ self.hidden_channels = hidden_channels
317
+ self.kernel_size = kernel_size
318
+ self.dilation_rate = dilation_rate
319
+ self.n_layers = n_layers
320
+ self.n_flows = n_flows
321
+ self.gin_channels = gin_channels
322
+
323
+ self.flows = nn.ModuleList()
324
+ for i in range(n_flows):
325
+ self.flows.append(
326
+ modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
327
+ gin_channels=gin_channels, mean_only=True))
328
+ self.flows.append(modules.Flip())
329
+
330
+ def forward(self, x, x_mask, g=None, reverse=False):
331
+ if not reverse:
332
+ for flow in self.flows:
333
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
334
+ else:
335
+ for flow in reversed(self.flows):
336
+ x = flow(x, x_mask, g=g, reverse=reverse)
337
+ return x
338
+
339
+
340
+ class PosteriorEncoder(nn.Module):
341
+ def __init__(self,
342
+ in_channels,
343
+ out_channels,
344
+ hidden_channels,
345
+ kernel_size,
346
+ dilation_rate,
347
+ n_layers,
348
+ gin_channels=0):
349
+ super().__init__()
350
+ self.in_channels = in_channels
351
+ self.out_channels = out_channels
352
+ self.hidden_channels = hidden_channels
353
+ self.kernel_size = kernel_size
354
+ self.dilation_rate = dilation_rate
355
+ self.n_layers = n_layers
356
+ self.gin_channels = gin_channels
357
+
358
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
359
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
360
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
361
+
362
+ def forward(self, x, x_lengths, g=None):
363
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
364
+ x = self.pre(x) * x_mask
365
+ x = self.enc(x, x_mask, g=g)
366
+ stats = self.proj(x) * x_mask
367
+ m, logs = torch.split(stats, self.out_channels, dim=1)
368
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
369
+ return z, m, logs, x_mask
370
+
371
+
372
+ class Generator(torch.nn.Module):
373
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
374
+ upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
375
+ super(Generator, self).__init__()
376
+ self.num_kernels = len(resblock_kernel_sizes)
377
+ self.num_upsamples = len(upsample_rates)
378
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
379
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
380
+
381
+ self.ups = nn.ModuleList()
382
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
383
+ self.ups.append(weight_norm(
384
+ ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
385
+ k, u, padding=(k - u) // 2)))
386
+
387
+ self.resblocks = nn.ModuleList()
388
+ for i in range(len(self.ups)):
389
+ ch = upsample_initial_channel // (2 ** (i + 1))
390
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
391
+ self.resblocks.append(resblock(ch, k, d))
392
+
393
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
394
+ self.ups.apply(init_weights)
395
+
396
+ if gin_channels != 0:
397
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
398
+
399
+ def forward(self, x, g=None):
400
+ x = self.conv_pre(x)
401
+ if g is not None:
402
+ x = x + self.cond(g)
403
+
404
+ for i in range(self.num_upsamples):
405
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
406
+ x = self.ups[i](x)
407
+ xs = None
408
+ for j in range(self.num_kernels):
409
+ if xs is None:
410
+ xs = self.resblocks[i * self.num_kernels + j](x)
411
+ else:
412
+ xs += self.resblocks[i * self.num_kernels + j](x)
413
+ x = xs / self.num_kernels
414
+ x = F.leaky_relu(x)
415
+ x = self.conv_post(x)
416
+ x = torch.tanh(x)
417
+
418
+ return x
419
+
420
+ def remove_weight_norm(self):
421
+ print('Removing weight norm...')
422
+ for l in self.ups:
423
+ remove_weight_norm(l)
424
+ for l in self.resblocks:
425
+ l.remove_weight_norm()
426
+
427
+
428
+ class DiscriminatorP(torch.nn.Module):
429
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
430
+ super(DiscriminatorP, self).__init__()
431
+ self.period = period
432
+ self.use_spectral_norm = use_spectral_norm
433
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
434
+ self.convs = nn.ModuleList([
435
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
436
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
437
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
438
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
439
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
440
+ ])
441
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
442
+
443
+ def forward(self, x):
444
+ fmap = []
445
+
446
+ # 1d to 2d
447
+ b, c, t = x.shape
448
+ if t % self.period != 0: # pad first
449
+ n_pad = self.period - (t % self.period)
450
+ x = F.pad(x, (0, n_pad), "reflect")
451
+ t = t + n_pad
452
+ x = x.view(b, c, t // self.period, self.period)
453
+
454
+ for l in self.convs:
455
+ x = l(x)
456
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
457
+ fmap.append(x)
458
+ x = self.conv_post(x)
459
+ fmap.append(x)
460
+ x = torch.flatten(x, 1, -1)
461
+
462
+ return x, fmap
463
+
464
+
465
+ class DiscriminatorS(torch.nn.Module):
466
+ def __init__(self, use_spectral_norm=False):
467
+ super(DiscriminatorS, self).__init__()
468
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
469
+ self.convs = nn.ModuleList([
470
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
471
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
472
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
473
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
474
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
475
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
476
+ ])
477
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
478
+
479
+ def forward(self, x):
480
+ fmap = []
481
+
482
+ for l in self.convs:
483
+ x = l(x)
484
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
485
+ fmap.append(x)
486
+ x = self.conv_post(x)
487
+ fmap.append(x)
488
+ x = torch.flatten(x, 1, -1)
489
+
490
+ return x, fmap
491
+
492
+
493
+ class MultiPeriodDiscriminator(torch.nn.Module):
494
+ def __init__(self, use_spectral_norm=False):
495
+ super(MultiPeriodDiscriminator, self).__init__()
496
+ periods = [2, 3, 5, 7, 11]
497
+
498
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
499
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
500
+ self.discriminators = nn.ModuleList(discs)
501
+
502
+ def forward(self, y, y_hat):
503
+ y_d_rs = []
504
+ y_d_gs = []
505
+ fmap_rs = []
506
+ fmap_gs = []
507
+ for i, d in enumerate(self.discriminators):
508
+ y_d_r, fmap_r = d(y)
509
+ y_d_g, fmap_g = d(y_hat)
510
+ y_d_rs.append(y_d_r)
511
+ y_d_gs.append(y_d_g)
512
+ fmap_rs.append(fmap_r)
513
+ fmap_gs.append(fmap_g)
514
+
515
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
516
+
517
+
518
+ class ReferenceEncoder(nn.Module):
519
+ '''
520
+ inputs --- [N, Ty/r, n_mels*r] mels
521
+ outputs --- [N, ref_enc_gru_size]
522
+ '''
523
+
524
+ def __init__(self, spec_channels, gin_channels=0):
525
+
526
+ super().__init__()
527
+ self.spec_channels = spec_channels
528
+ ref_enc_filters = [32, 32, 64, 64, 128, 128]
529
+ K = len(ref_enc_filters)
530
+ filters = [1] + ref_enc_filters
531
+ convs = [weight_norm(nn.Conv2d(in_channels=filters[i],
532
+ out_channels=filters[i + 1],
533
+ kernel_size=(3, 3),
534
+ stride=(2, 2),
535
+ padding=(1, 1))) for i in range(K)]
536
+ self.convs = nn.ModuleList(convs)
537
+ # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)])
538
+
539
+ out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K)
540
+ self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels,
541
+ hidden_size=256 // 2,
542
+ batch_first=True)
543
+ self.proj = nn.Linear(128, gin_channels)
544
+
545
+ def forward(self, inputs, mask=None):
546
+ N = inputs.size(0)
547
+ out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs]
548
+ for conv in self.convs:
549
+ out = conv(out)
550
+ # out = wn(out)
551
+ out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]
552
+
553
+ out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]
554
+ T = out.size(1)
555
+ N = out.size(0)
556
+ out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]
557
+
558
+ self.gru.flatten_parameters()
559
+ memory, out = self.gru(out) # out --- [1, N, 128]
560
+
561
+ return self.proj(out.squeeze(0))
562
+
563
+ def calculate_channels(self, L, kernel_size, stride, pad, n_convs):
564
+ for i in range(n_convs):
565
+ L = (L - kernel_size + 2 * pad) // stride + 1
566
+ return L
567
+
568
+
569
+ class SynthesizerTrn(nn.Module):
570
+ """
571
+ Synthesizer for Training
572
+ """
573
+
574
+ def __init__(self,
575
+ n_vocab,
576
+ spec_channels,
577
+ segment_size,
578
+ inter_channels,
579
+ hidden_channels,
580
+ filter_channels,
581
+ n_heads,
582
+ n_layers,
583
+ kernel_size,
584
+ p_dropout,
585
+ resblock,
586
+ resblock_kernel_sizes,
587
+ resblock_dilation_sizes,
588
+ upsample_rates,
589
+ upsample_initial_channel,
590
+ upsample_kernel_sizes,
591
+ n_speakers=256,
592
+ gin_channels=256,
593
+ use_sdp=True,
594
+ n_flow_layer=4,
595
+ n_layers_trans_flow=6,
596
+ flow_share_parameter=False,
597
+ use_transformer_flow=True,
598
+ **kwargs):
599
+
600
+ super().__init__()
601
+ self.n_vocab = n_vocab
602
+ self.spec_channels = spec_channels
603
+ self.inter_channels = inter_channels
604
+ self.hidden_channels = hidden_channels
605
+ self.filter_channels = filter_channels
606
+ self.n_heads = n_heads
607
+ self.n_layers = n_layers
608
+ self.kernel_size = kernel_size
609
+ self.p_dropout = p_dropout
610
+ self.resblock = resblock
611
+ self.resblock_kernel_sizes = resblock_kernel_sizes
612
+ self.resblock_dilation_sizes = resblock_dilation_sizes
613
+ self.upsample_rates = upsample_rates
614
+ self.upsample_initial_channel = upsample_initial_channel
615
+ self.upsample_kernel_sizes = upsample_kernel_sizes
616
+ self.segment_size = segment_size
617
+ self.n_speakers = n_speakers
618
+ self.gin_channels = gin_channels
619
+ self.n_layers_trans_flow = n_layers_trans_flow
620
+ self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True)
621
+ self.use_sdp = use_sdp
622
+ self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False)
623
+ self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01)
624
+ self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6)
625
+ self.current_mas_noise_scale = self.mas_noise_scale_initial
626
+ if self.use_spk_conditioned_encoder and gin_channels > 0:
627
+ self.enc_gin_channels = gin_channels
628
+ symbols = kwargs.get("symbols")
629
+ self.enc_p = TextEncoder(n_vocab,
630
+ inter_channels,
631
+ hidden_channels,
632
+ filter_channels,
633
+ n_heads,
634
+ n_layers,
635
+ kernel_size,
636
+ p_dropout,
637
+ gin_channels=self.enc_gin_channels,
638
+ symbols=symbols,
639
+ )
640
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
641
+ upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
642
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
643
+ gin_channels=gin_channels)
644
+ if use_transformer_flow:
645
+ self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads,
646
+ n_layers_trans_flow, 5, p_dropout, n_flow_layer,
647
+ gin_channels=gin_channels, share_parameter=flow_share_parameter)
648
+ else:
649
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer,
650
+ gin_channels=gin_channels)
651
+ self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
652
+ self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
653
+
654
+ if self.n_speakers > 0:
655
+ self.emb_g = nn.Embedding(self.n_speakers, gin_channels)
656
+ else:
657
+ self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)
658
+
659
+ def infer(self, x, x_lengths, sid, tone, language, bert, ja_bert, noise_scale=.667, length_scale=1,
660
+ noise_scale_w=0.8,
661
+ max_len=None, sdp_ratio=0, y=None):
662
+ # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)
663
+ # g = self.gst(y)
664
+ if self.n_speakers > 0:
665
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
666
+ else:
667
+ g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)
668
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert, ja_bert, g=g)
669
+ logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask,
670
+ g=g) * (
671
+ 1 - sdp_ratio)
672
+ w = torch.exp(logw) * x_mask * length_scale
673
+ w_ceil = torch.ceil(w)
674
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
675
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
676
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
677
+ attn = commons.generate_path(w_ceil, attn_mask)
678
+
679
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
680
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
681
+ 2) # [b, t', t], [b, t, d] -> [b, d, t']
682
+
683
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
684
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
685
+ o = self.dec((z * y_mask)[:, :, :max_len], g=g)
686
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
bert_vits2/modules.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm
11
+
12
+ from bert_vits2 import commons
13
+ from bert_vits2.commons import init_weights, get_padding
14
+ from bert_vits2.transforms import piecewise_rational_quadratic_transform
15
+ from bert_vits2.attentions import Encoder
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super().__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
+ super().__init__()
38
+ self.in_channels = in_channels
39
+ self.hidden_channels = hidden_channels
40
+ self.out_channels = out_channels
41
+ self.kernel_size = kernel_size
42
+ self.n_layers = n_layers
43
+ self.p_dropout = p_dropout
44
+ assert n_layers > 1, "Number of layers should be larger than 0."
45
+
46
+ self.conv_layers = nn.ModuleList()
47
+ self.norm_layers = nn.ModuleList()
48
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
49
+ self.norm_layers.append(LayerNorm(hidden_channels))
50
+ self.relu_drop = nn.Sequential(
51
+ nn.ReLU(),
52
+ nn.Dropout(p_dropout))
53
+ for _ in range(n_layers - 1):
54
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
55
+ self.norm_layers.append(LayerNorm(hidden_channels))
56
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
+ self.proj.weight.data.zero_()
58
+ self.proj.bias.data.zero_()
59
+
60
+ def forward(self, x, x_mask):
61
+ x_org = x
62
+ for i in range(self.n_layers):
63
+ x = self.conv_layers[i](x * x_mask)
64
+ x = self.norm_layers[i](x)
65
+ x = self.relu_drop(x)
66
+ x = x_org + self.proj(x)
67
+ return x * x_mask
68
+
69
+
70
+ class DDSConv(nn.Module):
71
+ """
72
+ Dialted and Depth-Separable Convolution
73
+ """
74
+
75
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
76
+ super().__init__()
77
+ self.channels = channels
78
+ self.kernel_size = kernel_size
79
+ self.n_layers = n_layers
80
+ self.p_dropout = p_dropout
81
+
82
+ self.drop = nn.Dropout(p_dropout)
83
+ self.convs_sep = nn.ModuleList()
84
+ self.convs_1x1 = nn.ModuleList()
85
+ self.norms_1 = nn.ModuleList()
86
+ self.norms_2 = nn.ModuleList()
87
+ for i in range(n_layers):
88
+ dilation = kernel_size ** i
89
+ padding = (kernel_size * dilation - dilation) // 2
90
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
91
+ groups=channels, dilation=dilation, padding=padding
92
+ ))
93
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
94
+ self.norms_1.append(LayerNorm(channels))
95
+ self.norms_2.append(LayerNorm(channels))
96
+
97
+ def forward(self, x, x_mask, g=None):
98
+ if g is not None:
99
+ x = x + g
100
+ for i in range(self.n_layers):
101
+ y = self.convs_sep[i](x * x_mask)
102
+ y = self.norms_1[i](y)
103
+ y = F.gelu(y)
104
+ y = self.convs_1x1[i](y)
105
+ y = self.norms_2[i](y)
106
+ y = F.gelu(y)
107
+ y = self.drop(y)
108
+ x = x + y
109
+ return x * x_mask
110
+
111
+
112
+ class WN(torch.nn.Module):
113
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
114
+ super(WN, self).__init__()
115
+ assert (kernel_size % 2 == 1)
116
+ self.hidden_channels = hidden_channels
117
+ self.kernel_size = kernel_size,
118
+ self.dilation_rate = dilation_rate
119
+ self.n_layers = n_layers
120
+ self.gin_channels = gin_channels
121
+ self.p_dropout = p_dropout
122
+
123
+ self.in_layers = torch.nn.ModuleList()
124
+ self.res_skip_layers = torch.nn.ModuleList()
125
+ self.drop = nn.Dropout(p_dropout)
126
+
127
+ if gin_channels != 0:
128
+ cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
129
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
130
+
131
+ for i in range(n_layers):
132
+ dilation = dilation_rate ** i
133
+ padding = int((kernel_size * dilation - dilation) / 2)
134
+ in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size,
135
+ dilation=dilation, padding=padding)
136
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
137
+ self.in_layers.append(in_layer)
138
+
139
+ # last one is not necessary
140
+ if i < n_layers - 1:
141
+ res_skip_channels = 2 * hidden_channels
142
+ else:
143
+ res_skip_channels = hidden_channels
144
+
145
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
146
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
147
+ self.res_skip_layers.append(res_skip_layer)
148
+
149
+ def forward(self, x, x_mask, g=None, **kwargs):
150
+ output = torch.zeros_like(x)
151
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
152
+
153
+ if g is not None:
154
+ g = self.cond_layer(g)
155
+
156
+ for i in range(self.n_layers):
157
+ x_in = self.in_layers[i](x)
158
+ if g is not None:
159
+ cond_offset = i * 2 * self.hidden_channels
160
+ g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :]
161
+ else:
162
+ g_l = torch.zeros_like(x_in)
163
+
164
+ acts = commons.fused_add_tanh_sigmoid_multiply(
165
+ x_in,
166
+ g_l,
167
+ n_channels_tensor)
168
+ acts = self.drop(acts)
169
+
170
+ res_skip_acts = self.res_skip_layers[i](acts)
171
+ if i < self.n_layers - 1:
172
+ res_acts = res_skip_acts[:, :self.hidden_channels, :]
173
+ x = (x + res_acts) * x_mask
174
+ output = output + res_skip_acts[:, self.hidden_channels:, :]
175
+ else:
176
+ output = output + res_skip_acts
177
+ return output * x_mask
178
+
179
+ def remove_weight_norm(self):
180
+ if self.gin_channels != 0:
181
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
182
+ for l in self.in_layers:
183
+ torch.nn.utils.remove_weight_norm(l)
184
+ for l in self.res_skip_layers:
185
+ torch.nn.utils.remove_weight_norm(l)
186
+
187
+
188
+ class ResBlock1(torch.nn.Module):
189
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
190
+ super(ResBlock1, self).__init__()
191
+ self.convs1 = nn.ModuleList([
192
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
193
+ padding=get_padding(kernel_size, dilation[0]))),
194
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
195
+ padding=get_padding(kernel_size, dilation[1]))),
196
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
197
+ padding=get_padding(kernel_size, dilation[2])))
198
+ ])
199
+ self.convs1.apply(init_weights)
200
+
201
+ self.convs2 = nn.ModuleList([
202
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
203
+ padding=get_padding(kernel_size, 1))),
204
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
205
+ padding=get_padding(kernel_size, 1))),
206
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
207
+ padding=get_padding(kernel_size, 1)))
208
+ ])
209
+ self.convs2.apply(init_weights)
210
+
211
+ def forward(self, x, x_mask=None):
212
+ for c1, c2 in zip(self.convs1, self.convs2):
213
+ xt = F.leaky_relu(x, LRELU_SLOPE)
214
+ if x_mask is not None:
215
+ xt = xt * x_mask
216
+ xt = c1(xt)
217
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
218
+ if x_mask is not None:
219
+ xt = xt * x_mask
220
+ xt = c2(xt)
221
+ x = xt + x
222
+ if x_mask is not None:
223
+ x = x * x_mask
224
+ return x
225
+
226
+ def remove_weight_norm(self):
227
+ for l in self.convs1:
228
+ remove_weight_norm(l)
229
+ for l in self.convs2:
230
+ remove_weight_norm(l)
231
+
232
+
233
+ class ResBlock2(torch.nn.Module):
234
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
235
+ super(ResBlock2, self).__init__()
236
+ self.convs = nn.ModuleList([
237
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
238
+ padding=get_padding(kernel_size, dilation[0]))),
239
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
240
+ padding=get_padding(kernel_size, dilation[1])))
241
+ ])
242
+ self.convs.apply(init_weights)
243
+
244
+ def forward(self, x, x_mask=None):
245
+ for c in self.convs:
246
+ xt = F.leaky_relu(x, LRELU_SLOPE)
247
+ if x_mask is not None:
248
+ xt = xt * x_mask
249
+ xt = c(xt)
250
+ x = xt + x
251
+ if x_mask is not None:
252
+ x = x * x_mask
253
+ return x
254
+
255
+ def remove_weight_norm(self):
256
+ for l in self.convs:
257
+ remove_weight_norm(l)
258
+
259
+
260
+ class Log(nn.Module):
261
+ def forward(self, x, x_mask, reverse=False, **kwargs):
262
+ if not reverse:
263
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
264
+ logdet = torch.sum(-y, [1, 2])
265
+ return y, logdet
266
+ else:
267
+ x = torch.exp(x) * x_mask
268
+ return x
269
+
270
+
271
+ class Flip(nn.Module):
272
+ def forward(self, x, *args, reverse=False, **kwargs):
273
+ x = torch.flip(x, [1])
274
+ if not reverse:
275
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
276
+ return x, logdet
277
+ else:
278
+ return x
279
+
280
+
281
+ class ElementwiseAffine(nn.Module):
282
+ def __init__(self, channels):
283
+ super().__init__()
284
+ self.channels = channels
285
+ self.m = nn.Parameter(torch.zeros(channels, 1))
286
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
287
+
288
+ def forward(self, x, x_mask, reverse=False, **kwargs):
289
+ if not reverse:
290
+ y = self.m + torch.exp(self.logs) * x
291
+ y = y * x_mask
292
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
293
+ return y, logdet
294
+ else:
295
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
296
+ return x
297
+
298
+
299
+ class ResidualCouplingLayer(nn.Module):
300
+ def __init__(self,
301
+ channels,
302
+ hidden_channels,
303
+ kernel_size,
304
+ dilation_rate,
305
+ n_layers,
306
+ p_dropout=0,
307
+ gin_channels=0,
308
+ mean_only=False):
309
+ assert channels % 2 == 0, "channels should be divisible by 2"
310
+ super().__init__()
311
+ self.channels = channels
312
+ self.hidden_channels = hidden_channels
313
+ self.kernel_size = kernel_size
314
+ self.dilation_rate = dilation_rate
315
+ self.n_layers = n_layers
316
+ self.half_channels = channels // 2
317
+ self.mean_only = mean_only
318
+
319
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
320
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout,
321
+ gin_channels=gin_channels)
322
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
323
+ self.post.weight.data.zero_()
324
+ self.post.bias.data.zero_()
325
+
326
+ def forward(self, x, x_mask, g=None, reverse=False):
327
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
328
+ h = self.pre(x0) * x_mask
329
+ h = self.enc(h, x_mask, g=g)
330
+ stats = self.post(h) * x_mask
331
+ if not self.mean_only:
332
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
333
+ else:
334
+ m = stats
335
+ logs = torch.zeros_like(m)
336
+
337
+ if not reverse:
338
+ x1 = m + x1 * torch.exp(logs) * x_mask
339
+ x = torch.cat([x0, x1], 1)
340
+ logdet = torch.sum(logs, [1, 2])
341
+ return x, logdet
342
+ else:
343
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
344
+ x = torch.cat([x0, x1], 1)
345
+ return x
346
+
347
+
348
+ class ConvFlow(nn.Module):
349
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
350
+ super().__init__()
351
+ self.in_channels = in_channels
352
+ self.filter_channels = filter_channels
353
+ self.kernel_size = kernel_size
354
+ self.n_layers = n_layers
355
+ self.num_bins = num_bins
356
+ self.tail_bound = tail_bound
357
+ self.half_channels = in_channels // 2
358
+
359
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
360
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
361
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
362
+ self.proj.weight.data.zero_()
363
+ self.proj.bias.data.zero_()
364
+
365
+ def forward(self, x, x_mask, g=None, reverse=False):
366
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
367
+ h = self.pre(x0)
368
+ h = self.convs(h, x_mask, g=g)
369
+ h = self.proj(h) * x_mask
370
+
371
+ b, c, t = x0.shape
372
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
373
+
374
+ unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
375
+ unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels)
376
+ unnormalized_derivatives = h[..., 2 * self.num_bins:]
377
+
378
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
379
+ unnormalized_widths,
380
+ unnormalized_heights,
381
+ unnormalized_derivatives,
382
+ inverse=reverse,
383
+ tails='linear',
384
+ tail_bound=self.tail_bound
385
+ )
386
+
387
+ x = torch.cat([x0, x1], 1) * x_mask
388
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
389
+ if not reverse:
390
+ return x, logdet
391
+ else:
392
+ return x
393
+
394
+
395
+ class TransformerCouplingLayer(nn.Module):
396
+ def __init__(self,
397
+ channels,
398
+ hidden_channels,
399
+ kernel_size,
400
+ n_layers,
401
+ n_heads,
402
+ p_dropout=0,
403
+ filter_channels=0,
404
+ mean_only=False,
405
+ wn_sharing_parameter=None,
406
+ gin_channels=0
407
+ ):
408
+ assert channels % 2 == 0, "channels should be divisible by 2"
409
+ super().__init__()
410
+ self.channels = channels
411
+ self.hidden_channels = hidden_channels
412
+ self.kernel_size = kernel_size
413
+ self.n_layers = n_layers
414
+ self.half_channels = channels // 2
415
+ self.mean_only = mean_only
416
+
417
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
418
+ self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow=True,
419
+ gin_channels=gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter
420
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
421
+ self.post.weight.data.zero_()
422
+ self.post.bias.data.zero_()
423
+
424
+ def forward(self, x, x_mask, g=None, reverse=False):
425
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
426
+ h = self.pre(x0) * x_mask
427
+ h = self.enc(h, x_mask, g=g)
428
+ stats = self.post(h) * x_mask
429
+ if not self.mean_only:
430
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
431
+ else:
432
+ m = stats
433
+ logs = torch.zeros_like(m)
434
+
435
+ if not reverse:
436
+ x1 = m + x1 * torch.exp(logs) * x_mask
437
+ x = torch.cat([x0, x1], 1)
438
+ logdet = torch.sum(logs, [1, 2])
439
+ return x, logdet
440
+ else:
441
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
442
+ x = torch.cat([x0, x1], 1)
443
+ return x
444
+
445
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
446
+ unnormalized_widths,
447
+ unnormalized_heights,
448
+ unnormalized_derivatives,
449
+ inverse=reverse,
450
+ tails='linear',
451
+ tail_bound=self.tail_bound
452
+ )
453
+
454
+ x = torch.cat([x0, x1], 1) * x_mask
455
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
456
+ if not reverse:
457
+ return x, logdet
458
+ else:
459
+ return x
bert_vits2/requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Cython
2
+ librosa==0.9.1
3
+ matplotlib==3.3.1
4
+ numpy
5
+ phonemizer
6
+ scipy
7
+ tensorboard
8
+ torch
9
+ torchvision
10
+ Unidecode
11
+ amfm_decompy
12
+ jieba
13
+ transformers
14
+ pypinyin
15
+ cn2an
bert_vits2/text/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bert_vits2.text.symbols import *
2
+ from .chinese_bert import get_bert_feature as zh_bert
3
+ from .english_bert_mock import get_bert_feature as en_bert
4
+ from .japanese_bert import get_bert_feature as ja_bert
5
+
6
+
7
+ def cleaned_text_to_sequence(cleaned_text, tones, language, _symbol_to_id):
8
+ """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
9
+ Args:
10
+ text: string to convert to a sequence
11
+ Returns:
12
+ List of integers corresponding to the symbols in the text
13
+ """
14
+ phones = [_symbol_to_id[symbol] for symbol in cleaned_text]
15
+ tone_start = language_tone_start_map[language]
16
+ tones = [i + tone_start for i in tones]
17
+ lang_id = language_id_map[language]
18
+ lang_ids = [lang_id for i in phones]
19
+ return phones, tones, lang_ids
20
+
21
+
22
+ def get_bert(norm_text, word2ph, language):
23
+ lang_bert_func_map = {"zh": zh_bert, "en": en_bert, "ja": ja_bert}
24
+ bert = lang_bert_func_map[language](norm_text, word2ph)
25
+ return bert
bert_vits2/text/chinese.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+
4
+ import cn2an
5
+ from pypinyin import lazy_pinyin, Style
6
+
7
+ from bert_vits2.text.symbols import punctuation
8
+ from bert_vits2.text.tone_sandhi import ToneSandhi
9
+
10
+ current_file_path = os.path.dirname(__file__)
11
+ pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in
12
+ open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()}
13
+
14
+ import jieba.posseg as psg
15
+ from jieba import lcut
16
+ lcut("预加载")
17
+
18
+ rep_map = {
19
+ ':': ',',
20
+ ';': ',',
21
+ ',': ',',
22
+ '。': '.',
23
+ '!': '!',
24
+ '?': '?',
25
+ '\n': '.',
26
+ "·": ",",
27
+ '、': ",",
28
+ '...': '…',
29
+ '$': '.',
30
+ '“': "'",
31
+ '”': "'",
32
+ '‘': "'",
33
+ '’': "'",
34
+ '(': "'",
35
+ ')': "'",
36
+ '(': "'",
37
+ ')': "'",
38
+ '《': "'",
39
+ '》': "'",
40
+ '【': "'",
41
+ '】': "'",
42
+ '[': "'",
43
+ ']': "'",
44
+ '—': "-",
45
+ '~': "-",
46
+ '~': "-",
47
+ '「': "'",
48
+ '」': "'",
49
+
50
+ }
51
+
52
+ tone_modifier = ToneSandhi()
53
+
54
+
55
+ def replace_punctuation(text):
56
+ text = text.replace("嗯", "恩").replace("呣", "母")
57
+ pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys()))
58
+
59
+ replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
60
+
61
+ replaced_text = re.sub(r'[^\u4e00-\u9fa5' + "".join(punctuation) + r']+', '', replaced_text)
62
+
63
+ return replaced_text
64
+
65
+
66
+ def g2p(text):
67
+ pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation))
68
+ sentences = [i for i in re.split(pattern, text) if i.strip() != '']
69
+ phones, tones, word2ph = _g2p(sentences)
70
+ assert sum(word2ph) == len(phones)
71
+ assert len(word2ph) == len(text) # Sometimes it will crash,you can add a try-catch.
72
+ phones = ['_'] + phones + ["_"]
73
+ tones = [0] + tones + [0]
74
+ word2ph = [1] + word2ph + [1]
75
+ return phones, tones, word2ph
76
+
77
+
78
+ def _get_initials_finals(word):
79
+ initials = []
80
+ finals = []
81
+ orig_initials = lazy_pinyin(
82
+ word, neutral_tone_with_five=True, style=Style.INITIALS)
83
+ orig_finals = lazy_pinyin(
84
+ word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
85
+ for c, v in zip(orig_initials, orig_finals):
86
+ initials.append(c)
87
+ finals.append(v)
88
+ return initials, finals
89
+
90
+
91
+ def _g2p(segments):
92
+ phones_list = []
93
+ tones_list = []
94
+ word2ph = []
95
+ for seg in segments:
96
+ pinyins = []
97
+ # Replace all English words in the sentence
98
+ seg = re.sub('[a-zA-Z]+', '', seg)
99
+ seg_cut = psg.lcut(seg)
100
+ initials = []
101
+ finals = []
102
+ seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
103
+ for word, pos in seg_cut:
104
+ if pos == 'eng':
105
+ continue
106
+ sub_initials, sub_finals = _get_initials_finals(word)
107
+ sub_finals = tone_modifier.modified_tone(word, pos,
108
+ sub_finals)
109
+ initials.append(sub_initials)
110
+ finals.append(sub_finals)
111
+
112
+ # assert len(sub_initials) == len(sub_finals) == len(word)
113
+ initials = sum(initials, [])
114
+ finals = sum(finals, [])
115
+ #
116
+ for c, v in zip(initials, finals):
117
+ raw_pinyin = c + v
118
+ # NOTE: post process for pypinyin outputs
119
+ # we discriminate i, ii and iii
120
+ if c == v:
121
+ assert c in punctuation
122
+ phone = [c]
123
+ tone = '0'
124
+ word2ph.append(1)
125
+ else:
126
+ v_without_tone = v[:-1]
127
+ tone = v[-1]
128
+
129
+ pinyin = c + v_without_tone
130
+ assert tone in '12345'
131
+
132
+ if c:
133
+ # 多音节
134
+ v_rep_map = {
135
+ "uei": 'ui',
136
+ 'iou': 'iu',
137
+ 'uen': 'un',
138
+ }
139
+ if v_without_tone in v_rep_map.keys():
140
+ pinyin = c + v_rep_map[v_without_tone]
141
+ else:
142
+ # 单音节
143
+ pinyin_rep_map = {
144
+ 'ing': 'ying',
145
+ 'i': 'yi',
146
+ 'in': 'yin',
147
+ 'u': 'wu',
148
+ }
149
+ if pinyin in pinyin_rep_map.keys():
150
+ pinyin = pinyin_rep_map[pinyin]
151
+ else:
152
+ single_rep_map = {
153
+ 'v': 'yu',
154
+ 'e': 'e',
155
+ 'i': 'y',
156
+ 'u': 'w',
157
+ }
158
+ if pinyin[0] in single_rep_map.keys():
159
+ pinyin = single_rep_map[pinyin[0]] + pinyin[1:]
160
+
161
+ assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
162
+ phone = pinyin_to_symbol_map[pinyin].split(' ')
163
+ word2ph.append(len(phone))
164
+
165
+ phones_list += phone
166
+ tones_list += [int(tone)] * len(phone)
167
+ return phones_list, tones_list, word2ph
168
+
169
+
170
+ def text_normalize(text):
171
+ numbers = re.findall(r'\d+(?:\.?\d+)?', text)
172
+ for number in numbers:
173
+ text = text.replace(number, cn2an.an2cn(number), 1)
174
+ text = replace_punctuation(text)
175
+ return text
176
+
177
+
178
+ def get_bert_feature(text, word2ph):
179
+ from bert_vits2.text import chinese_bert
180
+ return chinese_bert.get_bert_feature(text, word2ph)
181
+
182
+
183
+ if __name__ == '__main__':
184
+ from bert_vits2.text import get_bert_feature
185
+
186
+ text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
187
+ text = text_normalize(text)
188
+ print(text)
189
+ phones, tones, word2ph = g2p(text)
190
+ bert = get_bert_feature(text, word2ph)
191
+
192
+ print(phones, tones, word2ph, bert.shape)
193
+
194
+ # # 示例用法
195
+ # text = "这是一个示例文本:,你好!这是一个测试...."
196
+ # print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
bert_vits2/text/chinese_bert.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import config
4
+ import torch
5
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
6
+ from logger import logger
7
+ from utils.download import download_and_verify
8
+ from config import DEVICE as device
9
+
10
+ URLS = [
11
+ "https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/resolve/main/pytorch_model.bin",
12
+ ]
13
+ TARGET_PATH = os.path.join(config.ABS_PATH, "bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin")
14
+ EXPECTED_MD5 = None
15
+
16
+ if not os.path.exists(TARGET_PATH):
17
+ success, message = download_and_verify(URLS, TARGET_PATH, EXPECTED_MD5)
18
+
19
+ try:
20
+ logger.info("Loading chinese-roberta-wwm-ext-large...")
21
+ tokenizer = AutoTokenizer.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large")
22
+ model = AutoModelForMaskedLM.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large").to(
23
+ device)
24
+ logger.info("Loading finished.")
25
+ except Exception as e:
26
+ logger.error(e)
27
+ logger.error(f"Please download pytorch_model.bin from hfl/chinese-roberta-wwm-ext-large.")
28
+
29
+
30
+ def get_bert_feature(text, word2ph, device=config.DEVICE):
31
+ with torch.no_grad():
32
+ inputs = tokenizer(text, return_tensors='pt')
33
+ for i in inputs:
34
+ inputs[i] = inputs[i].to(device)
35
+ res = model(**inputs, output_hidden_states=True)
36
+ res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu()
37
+
38
+ assert len(word2ph) == len(text) + 2
39
+ word2phone = word2ph
40
+ phone_level_feature = []
41
+ for i in range(len(word2phone)):
42
+ repeat_feature = res[i].repeat(word2phone[i], 1)
43
+ phone_level_feature.append(repeat_feature)
44
+
45
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
46
+
47
+ return phone_level_feature.T
48
+
49
+
50
+ if __name__ == '__main__':
51
+ import torch
52
+
53
+ word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征
54
+ word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2,
55
+ 2, 2, 2, 1]
56
+
57
+ # 计算总帧数
58
+ total_frames = sum(word2phone)
59
+ print(word_level_feature.shape)
60
+ print(word2phone)
61
+ phone_level_feature = []
62
+ for i in range(len(word2phone)):
63
+ print(word_level_feature[i].shape)
64
+
65
+ # 对每个词重复word2phone[i]次
66
+ repeat_feature = word_level_feature[i].repeat(word2phone[i], 1)
67
+ phone_level_feature.append(repeat_feature)
68
+
69
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
70
+ print(phone_level_feature.shape) # torch.Size([36, 1024])
bert_vits2/text/cleaner.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bert_vits2.text import chinese, japanese, cleaned_text_to_sequence
2
+
3
+ language_module_map = {
4
+ 'zh': chinese,
5
+ 'ja': japanese
6
+ }
7
+
8
+
9
+ def clean_text(text, language):
10
+ language_module = language_module_map[language]
11
+ norm_text = language_module.text_normalize(text)
12
+ phones, tones, word2ph = language_module.g2p(norm_text)
13
+ return norm_text, phones, tones, word2ph
14
+
15
+
16
+ def clean_text_bert(text, language):
17
+ language_module = language_module_map[language]
18
+ norm_text = language_module.text_normalize(text)
19
+ phones, tones, word2ph = language_module.g2p(norm_text)
20
+ bert = language_module.get_bert_feature(norm_text, word2ph)
21
+ return phones, tones, bert
22
+
23
+
24
+ def text_to_sequence(text, language):
25
+ norm_text, phones, tones, word2ph = clean_text(text, language)
26
+ return cleaned_text_to_sequence(phones, tones, language)
27
+
28
+
29
+ if __name__ == '__main__':
30
+ pass
bert_vits2/text/cmudict.rep ADDED
The diff for this file is too large to render. See raw diff
 
bert_vits2/text/cmudict_cache.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9b21b20325471934ba92f2e4a5976989e7d920caa32e7a286eacb027d197949
3
+ size 6212655
bert_vits2/text/english.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import os
3
+ import re
4
+ from g2p_en import G2p
5
+
6
+ from bert_vits2.text import symbols
7
+
8
+ current_file_path = os.path.dirname(__file__)
9
+ CMU_DICT_PATH = os.path.join(current_file_path, 'cmudict.rep')
10
+ CACHE_PATH = os.path.join(current_file_path, 'cmudict_cache.pickle')
11
+ _g2p = G2p()
12
+
13
+ arpa = {'AH0', 'S', 'AH1', 'EY2', 'AE2', 'EH0', 'OW2', 'UH0', 'NG', 'B', 'G', 'AY0', 'M', 'AA0', 'F', 'AO0', 'ER2',
14
+ 'UH1', 'IY1', 'AH2', 'DH', 'IY0', 'EY1', 'IH0', 'K', 'N', 'W', 'IY2', 'T', 'AA1', 'ER1', 'EH2', 'OY0', 'UH2',
15
+ 'UW1', 'Z', 'AW2', 'AW1', 'V', 'UW2', 'AA2', 'ER', 'AW0', 'UW0', 'R', 'OW1', 'EH1', 'ZH', 'AE0', 'IH2', 'IH',
16
+ 'Y', 'JH', 'P', 'AY1', 'EY0', 'OY2', 'TH', 'HH', 'D', 'ER0', 'CH', 'AO1', 'AE1', 'AO2', 'OY1', 'AY2', 'IH1',
17
+ 'OW0', 'L', 'SH'}
18
+
19
+
20
+ def post_replace_ph(ph):
21
+ rep_map = {
22
+ ':': ',',
23
+ ';': ',',
24
+ ',': ',',
25
+ '。': '.',
26
+ '!': '!',
27
+ '?': '?',
28
+ '\n': '.',
29
+ "·": ",",
30
+ '、': ",",
31
+ '...': '…',
32
+ 'v': "V"
33
+ }
34
+ if ph in rep_map.keys():
35
+ ph = rep_map[ph]
36
+ if ph in symbols:
37
+ return ph
38
+ if ph not in symbols:
39
+ ph = 'UNK'
40
+ return ph
41
+
42
+
43
+ def read_dict():
44
+ g2p_dict = {}
45
+ start_line = 49
46
+ with open(CMU_DICT_PATH) as f:
47
+ line = f.readline()
48
+ line_index = 1
49
+ while line:
50
+ if line_index >= start_line:
51
+ line = line.strip()
52
+ word_split = line.split(' ')
53
+ word = word_split[0]
54
+
55
+ syllable_split = word_split[1].split(' - ')
56
+ g2p_dict[word] = []
57
+ for syllable in syllable_split:
58
+ phone_split = syllable.split(' ')
59
+ g2p_dict[word].append(phone_split)
60
+
61
+ line_index = line_index + 1
62
+ line = f.readline()
63
+
64
+ return g2p_dict
65
+
66
+
67
+ def cache_dict(g2p_dict, file_path):
68
+ with open(file_path, 'wb') as pickle_file:
69
+ pickle.dump(g2p_dict, pickle_file)
70
+
71
+
72
+ def get_dict():
73
+ if os.path.exists(CACHE_PATH):
74
+ with open(CACHE_PATH, 'rb') as pickle_file:
75
+ g2p_dict = pickle.load(pickle_file)
76
+ else:
77
+ g2p_dict = read_dict()
78
+ cache_dict(g2p_dict, CACHE_PATH)
79
+
80
+ return g2p_dict
81
+
82
+
83
+ eng_dict = get_dict()
84
+
85
+
86
+ def refine_ph(phn):
87
+ tone = 0
88
+ if re.search(r'\d$', phn):
89
+ tone = int(phn[-1]) + 1
90
+ phn = phn[:-1]
91
+ return phn.lower(), tone
92
+
93
+
94
+ def refine_syllables(syllables):
95
+ tones = []
96
+ phonemes = []
97
+ for phn_list in syllables:
98
+ for i in range(len(phn_list)):
99
+ phn = phn_list[i]
100
+ phn, tone = refine_ph(phn)
101
+ phonemes.append(phn)
102
+ tones.append(tone)
103
+ return phonemes, tones
104
+
105
+
106
+ def text_normalize(text):
107
+
108
+ return text
109
+
110
+
111
+ def g2p(text):
112
+ phones = []
113
+ tones = []
114
+ words = re.split(r"([,;.\-\?\!\s+])", text)
115
+ for w in words:
116
+ if w.upper() in eng_dict:
117
+ phns, tns = refine_syllables(eng_dict[w.upper()])
118
+ phones += phns
119
+ tones += tns
120
+ else:
121
+ phone_list = list(filter(lambda p: p != " ", _g2p(w)))
122
+ for ph in phone_list:
123
+ if ph in arpa:
124
+ ph, tn = refine_ph(ph)
125
+ phones.append(ph)
126
+ tones.append(tn)
127
+ else:
128
+ phones.append(ph)
129
+ tones.append(0)
130
+
131
+ word2ph = [1 for i in phones]
132
+
133
+ phones = [post_replace_ph(i) for i in phones]
134
+ return phones, tones, word2ph
135
+
136
+
137
+ if __name__ == "__main__":
138
+ # print(get_dict())
139
+ # print(eng_word_to_phoneme("hello"))
140
+ print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
141
+ # all_phones = set()
142
+ # for k, syllables in eng_dict.items():
143
+ # for group in syllables:
144
+ # for ph in group:
145
+ # all_phones.add(ph)
146
+ # print(all_phones)
bert_vits2/text/english_bert_mock.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def get_bert_feature(norm_text, word2ph):
5
+ return torch.zeros(1024, sum(word2ph))
bert_vits2/text/japanese.py ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Convert Japanese text to phonemes which is
2
+ # compatible with Julius https://github.com/julius-speech/segmentation-kit
3
+ import re
4
+ import unicodedata
5
+
6
+ from transformers import AutoTokenizer
7
+
8
+ from bert_vits2.text.symbols import *
9
+ from bert_vits2.text.japanese_bert import tokenizer
10
+
11
+ try:
12
+ import MeCab
13
+ except ImportError as e:
14
+ raise ImportError("Japanese requires mecab-python3 and unidic-lite.") from e
15
+ from num2words import num2words
16
+
17
+ _CONVRULES = [
18
+ # Conversion of 2 letters
19
+ "アァ/ a a",
20
+ "イィ/ i i",
21
+ "イェ/ i e",
22
+ "イャ/ y a",
23
+ "ウゥ/ u:",
24
+ "エェ/ e e",
25
+ "オォ/ o:",
26
+ "カァ/ k a:",
27
+ "キィ/ k i:",
28
+ "クゥ/ k u:",
29
+ "クャ/ ky a",
30
+ "クュ/ ky u",
31
+ "クョ/ ky o",
32
+ "ケェ/ k e:",
33
+ "コォ/ k o:",
34
+ "ガァ/ g a:",
35
+ "ギィ/ g i:",
36
+ "グゥ/ g u:",
37
+ "グャ/ gy a",
38
+ "グュ/ gy u",
39
+ "グョ/ gy o",
40
+ "ゲェ/ g e:",
41
+ "ゴォ/ g o:",
42
+ "サァ/ s a:",
43
+ "シィ/ sh i:",
44
+ "スゥ/ s u:",
45
+ "スャ/ sh a",
46
+ "スュ/ sh u",
47
+ "スョ/ sh o",
48
+ "セェ/ s e:",
49
+ "ソォ/ s o:",
50
+ "ザァ/ z a:",
51
+ "ジィ/ j i:",
52
+ "ズゥ/ z u:",
53
+ "ズャ/ zy a",
54
+ "ズュ/ zy u",
55
+ "ズョ/ zy o",
56
+ "ゼェ/ z e:",
57
+ "ゾォ/ z o:",
58
+ "タァ/ t a:",
59
+ "チィ/ ch i:",
60
+ "ツァ/ ts a",
61
+ "ツィ/ ts i",
62
+ "ツゥ/ ts u:",
63
+ "ツャ/ ch a",
64
+ "ツュ/ ch u",
65
+ "ツョ/ ch o",
66
+ "ツェ/ ts e",
67
+ "ツォ/ ts o",
68
+ "テェ/ t e:",
69
+ "トォ/ t o:",
70
+ "ダァ/ d a:",
71
+ "ヂィ/ j i:",
72
+ "ヅゥ/ d u:",
73
+ "ヅャ/ zy a",
74
+ "ヅュ/ zy u",
75
+ "ヅョ/ zy o",
76
+ "デェ/ d e:",
77
+ "ドォ/ d o:",
78
+ "ナァ/ n a:",
79
+ "ニィ/ n i:",
80
+ "ヌゥ/ n u:",
81
+ "ヌャ/ ny a",
82
+ "ヌュ/ ny u",
83
+ "ヌョ/ ny o",
84
+ "ネェ/ n e:",
85
+ "ノォ/ n o:",
86
+ "ハァ/ h a:",
87
+ "ヒィ/ h i:",
88
+ "フゥ/ f u:",
89
+ "フャ/ hy a",
90
+ "フュ/ hy u",
91
+ "フョ/ hy o",
92
+ "ヘェ/ h e:",
93
+ "ホォ/ h o:",
94
+ "バァ/ b a:",
95
+ "ビィ/ b i:",
96
+ "ブゥ/ b u:",
97
+ "フャ/ hy a",
98
+ "ブュ/ by u",
99
+ "フョ/ hy o",
100
+ "ベェ/ b e:",
101
+ "ボォ/ b o:",
102
+ "パァ/ p a:",
103
+ "ピィ/ p i:",
104
+ "プゥ/ p u:",
105
+ "プャ/ py a",
106
+ "プュ/ py u",
107
+ "プョ/ py o",
108
+ "ペェ/ p e:",
109
+ "ポォ/ p o:",
110
+ "マァ/ m a:",
111
+ "ミィ/ m i:",
112
+ "ムゥ/ m u:",
113
+ "ムャ/ my a",
114
+ "ムュ/ my u",
115
+ "ムョ/ my o",
116
+ "メェ/ m e:",
117
+ "モォ/ m o:",
118
+ "ヤァ/ y a:",
119
+ "ユゥ/ y u:",
120
+ "ユャ/ y a:",
121
+ "ユュ/ y u:",
122
+ "ユョ/ y o:",
123
+ "ヨォ/ y o:",
124
+ "ラァ/ r a:",
125
+ "リィ/ r i:",
126
+ "ルゥ/ r u:",
127
+ "ルャ/ ry a",
128
+ "ルュ/ ry u",
129
+ "ルョ/ ry o",
130
+ "レェ/ r e:",
131
+ "ロォ/ r o:",
132
+ "ワァ/ w a:",
133
+ "ヲォ/ o:",
134
+ "ディ/ d i",
135
+ "デェ/ d e:",
136
+ "デャ/ dy a",
137
+ "デュ/ dy u",
138
+ "デョ/ dy o",
139
+ "ティ/ t i",
140
+ "テェ/ t e:",
141
+ "テャ/ ty a",
142
+ "テュ/ ty u",
143
+ "テョ/ ty o",
144
+ "スィ/ s i",
145
+ "ズァ/ z u a",
146
+ "ズィ/ z i",
147
+ "ズゥ/ z u",
148
+ "ズャ/ zy a",
149
+ "ズュ/ zy u",
150
+ "ズョ/ zy o",
151
+ "ズェ/ z e",
152
+ "ズォ/ z o",
153
+ "キャ/ ky a",
154
+ "キュ/ ky u",
155
+ "キョ/ ky o",
156
+ "シャ/ sh a",
157
+ "シュ/ sh u",
158
+ "シェ/ sh e",
159
+ "ショ/ sh o",
160
+ "チャ/ ch a",
161
+ "チュ/ ch u",
162
+ "チェ/ ch e",
163
+ "チョ/ ch o",
164
+ "トゥ/ t u",
165
+ "トャ/ ty a",
166
+ "トュ/ ty u",
167
+ "トョ/ ty o",
168
+ "ドァ/ d o a",
169
+ "ドゥ/ d u",
170
+ "ドャ/ dy a",
171
+ "ドュ/ dy u",
172
+ "ドョ/ dy o",
173
+ "ドォ/ d o:",
174
+ "ニャ/ ny a",
175
+ "ニュ/ ny u",
176
+ "ニョ/ ny o",
177
+ "ヒャ/ hy a",
178
+ "ヒュ/ hy u",
179
+ "ヒョ/ hy o",
180
+ "ミャ/ my a",
181
+ "ミュ/ my u",
182
+ "ミョ/ my o",
183
+ "リャ/ ry a",
184
+ "リュ/ ry u",
185
+ "リョ/ ry o",
186
+ "ギャ/ gy a",
187
+ "ギュ/ gy u",
188
+ "ギョ/ gy o",
189
+ "ヂェ/ j e",
190
+ "ヂャ/ j a",
191
+ "ヂュ/ j u",
192
+ "ヂョ/ j o",
193
+ "ジェ/ j e",
194
+ "ジャ/ j a",
195
+ "ジュ/ j u",
196
+ "ジョ/ j o",
197
+ "ビャ/ by a",
198
+ "ビュ/ by u",
199
+ "ビョ/ by o",
200
+ "ピャ/ py a",
201
+ "ピュ/ py u",
202
+ "ピョ/ py o",
203
+ "ウァ/ u a",
204
+ "ウィ/ w i",
205
+ "ウェ/ w e",
206
+ "ウォ/ w o",
207
+ "ファ/ f a",
208
+ "フィ/ f i",
209
+ "フゥ/ f u",
210
+ "フャ/ hy a",
211
+ "フュ/ hy u",
212
+ "フョ/ hy o",
213
+ "フェ/ f e",
214
+ "フォ/ f o",
215
+ "ヴァ/ b a",
216
+ "ヴィ/ b i",
217
+ "ヴェ/ b e",
218
+ "ヴォ/ b o",
219
+ "ヴュ/ by u",
220
+ # Conversion of 1 letter
221
+ "ア/ a",
222
+ "イ/ i",
223
+ "ウ/ u",
224
+ "エ/ e",
225
+ "オ/ o",
226
+ "カ/ k a",
227
+ "キ/ k i",
228
+ "ク/ k u",
229
+ "ケ/ k e",
230
+ "コ/ k o",
231
+ "サ/ s a",
232
+ "シ/ sh i",
233
+ "ス/ s u",
234
+ "セ/ s e",
235
+ "ソ/ s o",
236
+ "タ/ t a",
237
+ "チ/ ch i",
238
+ "ツ/ ts u",
239
+ "テ/ t e",
240
+ "ト/ t o",
241
+ "ナ/ n a",
242
+ "ニ/ n i",
243
+ "ヌ/ n u",
244
+ "ネ/ n e",
245
+ "ノ/ n o",
246
+ "ハ/ h a",
247
+ "ヒ/ h i",
248
+ "フ/ f u",
249
+ "ヘ/ h e",
250
+ "ホ/ h o",
251
+ "マ/ m a",
252
+ "ミ/ m i",
253
+ "ム/ m u",
254
+ "メ/ m e",
255
+ "モ/ m o",
256
+ "ラ/ r a",
257
+ "リ/ r i",
258
+ "ル/ r u",
259
+ "レ/ r e",
260
+ "ロ/ r o",
261
+ "ガ/ g a",
262
+ "ギ/ g i",
263
+ "グ/ g u",
264
+ "ゲ/ g e",
265
+ "ゴ/ g o",
266
+ "ザ/ z a",
267
+ "ジ/ j i",
268
+ "ズ/ z u",
269
+ "ゼ/ z e",
270
+ "ゾ/ z o",
271
+ "ダ/ d a",
272
+ "ヂ/ j i",
273
+ "ヅ/ z u",
274
+ "デ/ d e",
275
+ "ド/ d o",
276
+ "バ/ b a",
277
+ "ビ/ b i",
278
+ "ブ/ b u",
279
+ "ベ/ b e",
280
+ "ボ/ b o",
281
+ "パ/ p a",
282
+ "ピ/ p i",
283
+ "プ/ p u",
284
+ "ペ/ p e",
285
+ "ポ/ p o",
286
+ "ヤ/ y a",
287
+ "ユ/ y u",
288
+ "ヨ/ y o",
289
+ "ワ/ w a",
290
+ "ヰ/ i",
291
+ "ヱ/ e",
292
+ "ヲ/ o",
293
+ "ン/ N",
294
+ "ッ/ q",
295
+ "ヴ/ b u",
296
+ "ー/:",
297
+ # Try converting broken text
298
+ "ァ/ a",
299
+ "ィ/ i",
300
+ "ゥ/ u",
301
+ "ェ/ e",
302
+ "ォ/ o",
303
+ "ヮ/ w a",
304
+ "ォ/ o",
305
+ # Symbols
306
+ "、/ ,",
307
+ "。/ .",
308
+ "!/ !",
309
+ "?/ ?",
310
+ "・/ ,",
311
+ ]
312
+
313
+ _COLON_RX = re.compile(":+")
314
+ _REJECT_RX = re.compile("[^ a-zA-Z:,.?]")
315
+
316
+
317
+ def _makerulemap():
318
+ l = [tuple(x.split("/")) for x in _CONVRULES]
319
+ return tuple({k: v for k, v in l if len(k) == i} for i in (1, 2))
320
+
321
+
322
+ _RULEMAP1, _RULEMAP2 = _makerulemap()
323
+
324
+
325
+ def kata2phoneme(text: str) -> str:
326
+ """Convert katakana text to phonemes."""
327
+ text = text.strip()
328
+ res = []
329
+ while text:
330
+ if len(text) >= 2:
331
+ x = _RULEMAP2.get(text[:2])
332
+ if x is not None:
333
+ text = text[2:]
334
+ res += x.split(" ")[1:]
335
+ continue
336
+ x = _RULEMAP1.get(text[0])
337
+ if x is not None:
338
+ text = text[1:]
339
+ res += x.split(" ")[1:]
340
+ continue
341
+ res.append(text[0])
342
+ text = text[1:]
343
+ # res = _COLON_RX.sub(":", res)
344
+ return res
345
+
346
+
347
+ _KATAKANA = "".join(chr(ch) for ch in range(ord("ァ"), ord("ン") + 1))
348
+ _HIRAGANA = "".join(chr(ch) for ch in range(ord("ぁ"), ord("ん") + 1))
349
+ _HIRA2KATATRANS = str.maketrans(_HIRAGANA, _KATAKANA)
350
+
351
+
352
+ def hira2kata(text: str) -> str:
353
+ text = text.translate(_HIRA2KATATRANS)
354
+ return text.replace("う゛", "ヴ")
355
+
356
+
357
+ _SYMBOL_TOKENS = set(list("・、。?!"))
358
+ _NO_YOMI_TOKENS = set(list("「」『』―()[][]"))
359
+ _TAGGER = MeCab.Tagger()
360
+
361
+
362
+ def text2kata(text: str) -> str:
363
+ parsed = _TAGGER.parse(text)
364
+ res = []
365
+ for line in parsed.split("\n"):
366
+ if line == "EOS":
367
+ break
368
+ parts = line.split("\t")
369
+
370
+ word, yomi = parts[0], parts[1]
371
+ if yomi:
372
+ res.append(yomi)
373
+ else:
374
+ if word in _SYMBOL_TOKENS:
375
+ res.append(word)
376
+ elif word in ("っ", "ッ"):
377
+ res.append("ッ")
378
+ elif word in _NO_YOMI_TOKENS:
379
+ pass
380
+ else:
381
+ res.append(word)
382
+ return hira2kata("".join(res))
383
+
384
+
385
+ _ALPHASYMBOL_YOMI = {
386
+ "#": "シャープ",
387
+ "%": "パーセント",
388
+ "&": "アンド",
389
+ "+": "プラス",
390
+ "-": "マイナス",
391
+ ":": "コロン",
392
+ ";": "セミコロン",
393
+ "<": "小なり",
394
+ "=": "イコール",
395
+ ">": "大なり",
396
+ "@": "アット",
397
+ "a": "エー",
398
+ "b": "ビー",
399
+ "c": "シー",
400
+ "d": "ディー",
401
+ "e": "イー",
402
+ "f": "エフ",
403
+ "g": "ジー",
404
+ "h": "エイチ",
405
+ "i": "アイ",
406
+ "j": "ジェー",
407
+ "k": "ケー",
408
+ "l": "エル",
409
+ "m": "エム",
410
+ "n": "エヌ",
411
+ "o": "オー",
412
+ "p": "ピー",
413
+ "q": "キュー",
414
+ "r": "アール",
415
+ "s": "エス",
416
+ "t": "ティー",
417
+ "u": "ユー",
418
+ "v": "ブイ",
419
+ "w": "ダブリュー",
420
+ "x": "エックス",
421
+ "y": "ワイ",
422
+ "z": "ゼット",
423
+ "α": "アルファ",
424
+ "β": "ベータ",
425
+ "γ": "ガンマ",
426
+ "δ": "デルタ",
427
+ "ε": "イプシロン",
428
+ "ζ": "ゼータ",
429
+ "η": "イータ",
430
+ "θ": "シータ",
431
+ "ι": "イオタ",
432
+ "κ": "カッパ",
433
+ "λ": "ラムダ",
434
+ "μ": "ミュー",
435
+ "ν": "ニュー",
436
+ "ξ": "クサイ",
437
+ "ο": "オミクロン",
438
+ "π": "パイ",
439
+ "ρ": "ロー",
440
+ "σ": "シグマ",
441
+ "τ": "タウ",
442
+ "υ": "ウプシロン",
443
+ "φ": "ファイ",
444
+ "χ": "カイ",
445
+ "ψ": "プサイ",
446
+ "ω": "オメガ",
447
+ }
448
+
449
+ _NUMBER_WITH_SEPARATOR_RX = re.compile("[0-9]{1,3}(,[0-9]{3})+")
450
+ _CURRENCY_MAP = {"$": "ドル", "¥": "円", "£": "ポンド", "€": "ユーロ"}
451
+ _CURRENCY_RX = re.compile(r"([$¥£€])([0-9.]*[0-9])")
452
+ _NUMBER_RX = re.compile(r"[0-9]+(\.[0-9]+)?")
453
+
454
+
455
+ def japanese_convert_numbers_to_words(text: str) -> str:
456
+ res = _NUMBER_WITH_SEPARATOR_RX.sub(lambda m: m[0].replace(",", ""), text)
457
+ res = _CURRENCY_RX.sub(lambda m: m[2] + _CURRENCY_MAP.get(m[1], m[1]), res)
458
+ res = _NUMBER_RX.sub(lambda m: num2words(m[0], lang="ja"), res)
459
+ return res
460
+
461
+
462
+ def japanese_convert_alpha_symbols_to_words(text: str) -> str:
463
+ return "".join([_ALPHASYMBOL_YOMI.get(ch, ch) for ch in text.lower()])
464
+
465
+
466
+ def japanese_text_to_phonemes(text: str) -> str:
467
+ """Convert Japanese text to phonemes."""
468
+ res = unicodedata.normalize("NFKC", text)
469
+ res = japanese_convert_numbers_to_words(res)
470
+ # res = japanese_convert_alpha_symbols_to_words(res)
471
+ res = text2kata(res)
472
+ res = kata2phoneme(res)
473
+ return res
474
+
475
+
476
+ def is_japanese_character(char):
477
+ # 定义日语文字系统的 Unicode 范围
478
+ japanese_ranges = [
479
+ (0x3040, 0x309F), # 平假名
480
+ (0x30A0, 0x30FF), # 片假名
481
+ (0x4E00, 0x9FFF), # 汉字 (CJK Unified Ideographs)
482
+ (0x3400, 0x4DBF), # 汉字扩展 A
483
+ (0x20000, 0x2A6DF), # 汉字扩展 B
484
+ # 可以根据需要添加其他汉字扩展范围
485
+ ]
486
+
487
+ # 将字符的 Unicode 编码转换为整数
488
+ char_code = ord(char)
489
+
490
+ # 检查字符是否在任何一个日语范围内
491
+ for start, end in japanese_ranges:
492
+ if start <= char_code <= end:
493
+ return True
494
+
495
+ return False
496
+
497
+
498
+ rep_map = {
499
+ ":": ",",
500
+ ";": ",",
501
+ ",": ",",
502
+ "。": ".",
503
+ "!": "!",
504
+ "?": "?",
505
+ "\n": ".",
506
+ "·": ",",
507
+ "、": ",",
508
+ "...": "…",
509
+ }
510
+
511
+
512
+ def replace_punctuation(text):
513
+ pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
514
+
515
+ replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
516
+
517
+ replaced_text = re.sub(
518
+ r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF"
519
+ + "".join(punctuation)
520
+ + r"]+",
521
+ "",
522
+ replaced_text,
523
+ )
524
+
525
+ return replaced_text
526
+
527
+
528
+ def text_normalize(text):
529
+ res = unicodedata.normalize("NFKC", text)
530
+ res = japanese_convert_numbers_to_words(res)
531
+ # res = "".join([i for i in res if is_japanese_character(i)])
532
+ res = replace_punctuation(res)
533
+ return res
534
+
535
+
536
+ def distribute_phone(n_phone, n_word):
537
+ phones_per_word = [0] * n_word
538
+ for task in range(n_phone):
539
+ min_tasks = min(phones_per_word)
540
+ min_index = phones_per_word.index(min_tasks)
541
+ phones_per_word[min_index] += 1
542
+ return phones_per_word
543
+
544
+
545
+ def g2p(norm_text):
546
+ tokenized = tokenizer.tokenize(norm_text)
547
+ phs = []
548
+ ph_groups = []
549
+ for t in tokenized:
550
+ if not t.startswith("#"):
551
+ ph_groups.append([t])
552
+ else:
553
+ ph_groups[-1].append(t.replace("#", ""))
554
+ word2ph = []
555
+ for group in ph_groups:
556
+ phonemes = kata2phoneme(text2kata("".join(group)))
557
+ # phonemes = [i for i in phonemes if i in symbols]
558
+ for i in phonemes:
559
+ assert i in symbols, (i, group, norm_text, tokenized)
560
+ phone_len = len(phonemes)
561
+ word_len = len(group)
562
+
563
+ aaa = distribute_phone(phone_len, word_len)
564
+ word2ph += aaa
565
+
566
+ phs += phonemes
567
+ phones = ["_"] + phs + ["_"]
568
+ tones = [0 for i in phones]
569
+ word2ph = [1] + word2ph + [1]
570
+ return phones, tones, word2ph
571
+
572
+
573
+ if __name__ == "__main__":
574
+ from config import ABS_PATH
575
+
576
+ tokenizer = AutoTokenizer.from_pretrained(ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3")
577
+ text = "hello,こんにちは、世界!……"
578
+ from bert_vits2.text.japanese_bert import get_bert_feature
579
+
580
+ text = text_normalize(text)
581
+ print(text)
582
+ phones, tones, word2ph = g2p(text)
583
+ bert = get_bert_feature(text, word2ph)
584
+
585
+ print(phones, tones, word2ph, bert.shape)
bert_vits2/text/japanese_bert.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
5
+
6
+ import config
7
+ from logger import logger
8
+ from utils.download import download_and_verify
9
+ from config import DEVICE as device
10
+
11
+ URLS = [
12
+ "https://huggingface.co/cl-tohoku/bert-base-japanese-v3/resolve/main/pytorch_model.bin",
13
+ ]
14
+ TARGET_PATH = os.path.join(config.ABS_PATH, "bert_vits2/bert/bert-base-japanese-v3/pytorch_model.bin")
15
+ EXPECTED_MD5 = None
16
+
17
+ if not os.path.exists(TARGET_PATH):
18
+ success, message = download_and_verify(URLS, TARGET_PATH, EXPECTED_MD5)
19
+
20
+ try:
21
+ logger.info("Loading bert-base-japanese-v3...")
22
+ tokenizer = AutoTokenizer.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3")
23
+ model = AutoModelForMaskedLM.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3").to(
24
+ device)
25
+ logger.info("Loading finished.")
26
+ except Exception as e:
27
+ logger.error(e)
28
+ logger.error(f"Please download pytorch_model.bin from cl-tohoku/bert-base-japanese-v3.")
29
+
30
+
31
+ def get_bert_feature(text, word2ph, device=config.DEVICE):
32
+ with torch.no_grad():
33
+ inputs = tokenizer(text, return_tensors="pt")
34
+ for i in inputs:
35
+ inputs[i] = inputs[i].to(device)
36
+ res = model(**inputs, output_hidden_states=True)
37
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()
38
+ assert inputs["input_ids"].shape[-1] == len(word2ph)
39
+ word2phone = word2ph
40
+ phone_level_feature = []
41
+ for i in range(len(word2phone)):
42
+ repeat_feature = res[i].repeat(word2phone[i], 1)
43
+ phone_level_feature.append(repeat_feature)
44
+
45
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
46
+
47
+ return phone_level_feature.T
bert_vits2/text/opencpop-strict.txt ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a AA a
2
+ ai AA ai
3
+ an AA an
4
+ ang AA ang
5
+ ao AA ao
6
+ ba b a
7
+ bai b ai
8
+ ban b an
9
+ bang b ang
10
+ bao b ao
11
+ bei b ei
12
+ ben b en
13
+ beng b eng
14
+ bi b i
15
+ bian b ian
16
+ biao b iao
17
+ bie b ie
18
+ bin b in
19
+ bing b ing
20
+ bo b o
21
+ bu b u
22
+ ca c a
23
+ cai c ai
24
+ can c an
25
+ cang c ang
26
+ cao c ao
27
+ ce c e
28
+ cei c ei
29
+ cen c en
30
+ ceng c eng
31
+ cha ch a
32
+ chai ch ai
33
+ chan ch an
34
+ chang ch ang
35
+ chao ch ao
36
+ che ch e
37
+ chen ch en
38
+ cheng ch eng
39
+ chi ch ir
40
+ chong ch ong
41
+ chou ch ou
42
+ chu ch u
43
+ chua ch ua
44
+ chuai ch uai
45
+ chuan ch uan
46
+ chuang ch uang
47
+ chui ch ui
48
+ chun ch un
49
+ chuo ch uo
50
+ ci c i0
51
+ cong c ong
52
+ cou c ou
53
+ cu c u
54
+ cuan c uan
55
+ cui c ui
56
+ cun c un
57
+ cuo c uo
58
+ da d a
59
+ dai d ai
60
+ dan d an
61
+ dang d ang
62
+ dao d ao
63
+ de d e
64
+ dei d ei
65
+ den d en
66
+ deng d eng
67
+ di d i
68
+ dia d ia
69
+ dian d ian
70
+ diao d iao
71
+ die d ie
72
+ ding d ing
73
+ diu d iu
74
+ dong d ong
75
+ dou d ou
76
+ du d u
77
+ duan d uan
78
+ dui d ui
79
+ dun d un
80
+ duo d uo
81
+ e EE e
82
+ ei EE ei
83
+ en EE en
84
+ eng EE eng
85
+ er EE er
86
+ fa f a
87
+ fan f an
88
+ fang f ang
89
+ fei f ei
90
+ fen f en
91
+ feng f eng
92
+ fo f o
93
+ fou f ou
94
+ fu f u
95
+ ga g a
96
+ gai g ai
97
+ gan g an
98
+ gang g ang
99
+ gao g ao
100
+ ge g e
101
+ gei g ei
102
+ gen g en
103
+ geng g eng
104
+ gong g ong
105
+ gou g ou
106
+ gu g u
107
+ gua g ua
108
+ guai g uai
109
+ guan g uan
110
+ guang g uang
111
+ gui g ui
112
+ gun g un
113
+ guo g uo
114
+ ha h a
115
+ hai h ai
116
+ han h an
117
+ hang h ang
118
+ hao h ao
119
+ he h e
120
+ hei h ei
121
+ hen h en
122
+ heng h eng
123
+ hong h ong
124
+ hou h ou
125
+ hu h u
126
+ hua h ua
127
+ huai h uai
128
+ huan h uan
129
+ huang h uang
130
+ hui h ui
131
+ hun h un
132
+ huo h uo
133
+ ji j i
134
+ jia j ia
135
+ jian j ian
136
+ jiang j iang
137
+ jiao j iao
138
+ jie j ie
139
+ jin j in
140
+ jing j ing
141
+ jiong j iong
142
+ jiu j iu
143
+ ju j v
144
+ jv j v
145
+ juan j van
146
+ jvan j van
147
+ jue j ve
148
+ jve j ve
149
+ jun j vn
150
+ jvn j vn
151
+ ka k a
152
+ kai k ai
153
+ kan k an
154
+ kang k ang
155
+ kao k ao
156
+ ke k e
157
+ kei k ei
158
+ ken k en
159
+ keng k eng
160
+ kong k ong
161
+ kou k ou
162
+ ku k u
163
+ kua k ua
164
+ kuai k uai
165
+ kuan k uan
166
+ kuang k uang
167
+ kui k ui
168
+ kun k un
169
+ kuo k uo
170
+ la l a
171
+ lai l ai
172
+ lan l an
173
+ lang l ang
174
+ lao l ao
175
+ le l e
176
+ lei l ei
177
+ leng l eng
178
+ li l i
179
+ lia l ia
180
+ lian l ian
181
+ liang l iang
182
+ liao l iao
183
+ lie l ie
184
+ lin l in
185
+ ling l ing
186
+ liu l iu
187
+ lo l o
188
+ long l ong
189
+ lou l ou
190
+ lu l u
191
+ luan l uan
192
+ lun l un
193
+ luo l uo
194
+ lv l v
195
+ lve l ve
196
+ ma m a
197
+ mai m ai
198
+ man m an
199
+ mang m ang
200
+ mao m ao
201
+ me m e
202
+ mei m ei
203
+ men m en
204
+ meng m eng
205
+ mi m i
206
+ mian m ian
207
+ miao m iao
208
+ mie m ie
209
+ min m in
210
+ ming m ing
211
+ miu m iu
212
+ mo m o
213
+ mou m ou
214
+ mu m u
215
+ na n a
216
+ nai n ai
217
+ nan n an
218
+ nang n ang
219
+ nao n ao
220
+ ne n e
221
+ nei n ei
222
+ nen n en
223
+ neng n eng
224
+ ni n i
225
+ nian n ian
226
+ niang n iang
227
+ niao n iao
228
+ nie n ie
229
+ nin n in
230
+ ning n ing
231
+ niu n iu
232
+ nong n ong
233
+ nou n ou
234
+ nu n u
235
+ nuan n uan
236
+ nun n un
237
+ nuo n uo
238
+ nv n v
239
+ nve n ve
240
+ o OO o
241
+ ou OO ou
242
+ pa p a
243
+ pai p ai
244
+ pan p an
245
+ pang p ang
246
+ pao p ao
247
+ pei p ei
248
+ pen p en
249
+ peng p eng
250
+ pi p i
251
+ pian p ian
252
+ piao p iao
253
+ pie p ie
254
+ pin p in
255
+ ping p ing
256
+ po p o
257
+ pou p ou
258
+ pu p u
259
+ qi q i
260
+ qia q ia
261
+ qian q ian
262
+ qiang q iang
263
+ qiao q iao
264
+ qie q ie
265
+ qin q in
266
+ qing q ing
267
+ qiong q iong
268
+ qiu q iu
269
+ qu q v
270
+ qv q v
271
+ quan q van
272
+ qvan q van
273
+ que q ve
274
+ qve q ve
275
+ qun q vn
276
+ qvn q vn
277
+ ran r an
278
+ rang r ang
279
+ rao r ao
280
+ re r e
281
+ ren r en
282
+ reng r eng
283
+ ri r ir
284
+ rong r ong
285
+ rou r ou
286
+ ru r u
287
+ rua r ua
288
+ ruan r uan
289
+ rui r ui
290
+ run r un
291
+ ruo r uo
292
+ sa s a
293
+ sai s ai
294
+ san s an
295
+ sang s ang
296
+ sao s ao
297
+ se s e
298
+ sen s en
299
+ seng s eng
300
+ sha sh a
301
+ shai sh ai
302
+ shan sh an
303
+ shang sh ang
304
+ shao sh ao
305
+ she sh e
306
+ shei sh ei
307
+ shen sh en
308
+ sheng sh eng
309
+ shi sh ir
310
+ shou sh ou
311
+ shu sh u
312
+ shua sh ua
313
+ shuai sh uai
314
+ shuan sh uan
315
+ shuang sh uang
316
+ shui sh ui
317
+ shun sh un
318
+ shuo sh uo
319
+ si s i0
320
+ song s ong
321
+ sou s ou
322
+ su s u
323
+ suan s uan
324
+ sui s ui
325
+ sun s un
326
+ suo s uo
327
+ ta t a
328
+ tai t ai
329
+ tan t an
330
+ tang t ang
331
+ tao t ao
332
+ te t e
333
+ tei t ei
334
+ teng t eng
335
+ ti t i
336
+ tian t ian
337
+ tiao t iao
338
+ tie t ie
339
+ ting t ing
340
+ tong t ong
341
+ tou t ou
342
+ tu t u
343
+ tuan t uan
344
+ tui t ui
345
+ tun t un
346
+ tuo t uo
347
+ wa w a
348
+ wai w ai
349
+ wan w an
350
+ wang w ang
351
+ wei w ei
352
+ wen w en
353
+ weng w eng
354
+ wo w o
355
+ wu w u
356
+ xi x i
357
+ xia x ia
358
+ xian x ian
359
+ xiang x iang
360
+ xiao x iao
361
+ xie x ie
362
+ xin x in
363
+ xing x ing
364
+ xiong x iong
365
+ xiu x iu
366
+ xu x v
367
+ xv x v
368
+ xuan x van
369
+ xvan x van
370
+ xue x ve
371
+ xve x ve
372
+ xun x vn
373
+ xvn x vn
374
+ ya y a
375
+ yan y En
376
+ yang y ang
377
+ yao y ao
378
+ ye y E
379
+ yi y i
380
+ yin y in
381
+ ying y ing
382
+ yo y o
383
+ yong y ong
384
+ you y ou
385
+ yu y v
386
+ yv y v
387
+ yuan y van
388
+ yvan y van
389
+ yue y ve
390
+ yve y ve
391
+ yun y vn
392
+ yvn y vn
393
+ za z a
394
+ zai z ai
395
+ zan z an
396
+ zang z ang
397
+ zao z ao
398
+ ze z e
399
+ zei z ei
400
+ zen z en
401
+ zeng z eng
402
+ zha zh a
403
+ zhai zh ai
404
+ zhan zh an
405
+ zhang zh ang
406
+ zhao zh ao
407
+ zhe zh e
408
+ zhei zh ei
409
+ zhen zh en
410
+ zheng zh eng
411
+ zhi zh ir
412
+ zhong zh ong
413
+ zhou zh ou
414
+ zhu zh u
415
+ zhua zh ua
416
+ zhuai zh uai
417
+ zhuan zh uan
418
+ zhuang zh uang
419
+ zhui zh ui
420
+ zhun zh un
421
+ zhuo zh uo
422
+ zi z i0
423
+ zong z ong
424
+ zou z ou
425
+ zu z u
426
+ zuan z uan
427
+ zui z ui
428
+ zun z un
429
+ zuo z uo
bert_vits2/text/symbols.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ punctuation = ["!", "?", "…", ",", ".", "'", "-"]
2
+ pu_symbols = punctuation + ["SP", "UNK"]
3
+ pad = "_"
4
+
5
+ # chinese
6
+ zh_symbols = [
7
+ "E",
8
+ "En",
9
+ "a",
10
+ "ai",
11
+ "an",
12
+ "ang",
13
+ "ao",
14
+ "b",
15
+ "c",
16
+ "ch",
17
+ "d",
18
+ "e",
19
+ "ei",
20
+ "en",
21
+ "eng",
22
+ "er",
23
+ "f",
24
+ "g",
25
+ "h",
26
+ "i",
27
+ "i0",
28
+ "ia",
29
+ "ian",
30
+ "iang",
31
+ "iao",
32
+ "ie",
33
+ "in",
34
+ "ing",
35
+ "iong",
36
+ "ir",
37
+ "iu",
38
+ "j",
39
+ "k",
40
+ "l",
41
+ "m",
42
+ "n",
43
+ "o",
44
+ "ong",
45
+ "ou",
46
+ "p",
47
+ "q",
48
+ "r",
49
+ "s",
50
+ "sh",
51
+ "t",
52
+ "u",
53
+ "ua",
54
+ "uai",
55
+ "uan",
56
+ "uang",
57
+ "ui",
58
+ "un",
59
+ "uo",
60
+ "v",
61
+ "van",
62
+ "ve",
63
+ "vn",
64
+ "w",
65
+ "x",
66
+ "y",
67
+ "z",
68
+ "zh",
69
+ "AA",
70
+ "EE",
71
+ "OO",
72
+ ]
73
+ num_zh_tones = 6
74
+
75
+ # japanese
76
+ ja_symbols_legacy = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j',
77
+ 'k', 'ky',
78
+ 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z']
79
+ ja_symbols = [
80
+ "N",
81
+ "a",
82
+ "a:",
83
+ "b",
84
+ "by",
85
+ "ch",
86
+ "d",
87
+ "dy",
88
+ "e",
89
+ "e:",
90
+ "f",
91
+ "g",
92
+ "gy",
93
+ "h",
94
+ "hy",
95
+ "i",
96
+ "i:",
97
+ "j",
98
+ "k",
99
+ "ky",
100
+ "m",
101
+ "my",
102
+ "n",
103
+ "ny",
104
+ "o",
105
+ "o:",
106
+ "p",
107
+ "py",
108
+ "q",
109
+ "r",
110
+ "ry",
111
+ "s",
112
+ "sh",
113
+ "t",
114
+ "ts",
115
+ "ty",
116
+ "u",
117
+ "u:",
118
+ "w",
119
+ "y",
120
+ "z",
121
+ "zy",
122
+ ]
123
+ num_ja_tones = 1
124
+
125
+ # English
126
+ en_symbols = [
127
+ "aa",
128
+ "ae",
129
+ "ah",
130
+ "ao",
131
+ "aw",
132
+ "ay",
133
+ "b",
134
+ "ch",
135
+ "d",
136
+ "dh",
137
+ "eh",
138
+ "er",
139
+ "ey",
140
+ "f",
141
+ "g",
142
+ "hh",
143
+ "ih",
144
+ "iy",
145
+ "jh",
146
+ "k",
147
+ "l",
148
+ "m",
149
+ "n",
150
+ "ng",
151
+ "ow",
152
+ "oy",
153
+ "p",
154
+ "r",
155
+ "s",
156
+ "sh",
157
+ "t",
158
+ "th",
159
+ "uh",
160
+ "uw",
161
+ "V",
162
+ "w",
163
+ "y",
164
+ "z",
165
+ "zh",
166
+ ]
167
+ num_en_tones = 4
168
+
169
+ normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols))
170
+ symbols = [pad] + normal_symbols + pu_symbols
171
+ sil_phonemes_ids = [symbols.index(i) for i in pu_symbols]
172
+
173
+ # legacy
174
+ normal_symbols_legacy = sorted(set(zh_symbols + ja_symbols_legacy + en_symbols))
175
+ symbols_legacy = [pad] + normal_symbols_legacy + pu_symbols
176
+ sil_phonemes_ids_legacy = [symbols_legacy.index(i) for i in pu_symbols]
177
+
178
+ # combine all tones
179
+ num_tones = num_zh_tones + num_ja_tones + num_en_tones
180
+
181
+ # language maps
182
+ language_id_map = {"zh": 0, "ja": 1, "en": 2}
183
+ num_languages = len(language_id_map.keys())
184
+
185
+ language_tone_start_map = {
186
+ "zh": 0,
187
+ "ja": num_zh_tones,
188
+ "en": num_zh_tones + num_ja_tones,
189
+ }
190
+
191
+ if __name__ == "__main__":
192
+ zh = set(zh_symbols)
193
+ en = set(en_symbols)
194
+ ja = set(ja_symbols)
195
+ print(zh)
196
+ print(en)
197
+ print(ja)
198
+ print(sorted(zh & en))
bert_vits2/text/tone_sandhi.py ADDED
@@ -0,0 +1,769 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List
15
+ from typing import Tuple
16
+
17
+ import jieba
18
+ from pypinyin import lazy_pinyin
19
+ from pypinyin import Style
20
+
21
+
22
+ class ToneSandhi:
23
+ def __init__(self):
24
+ self.must_neural_tone_words = {
25
+ "麻烦",
26
+ "麻利",
27
+ "鸳鸯",
28
+ "高粱",
29
+ "骨头",
30
+ "骆驼",
31
+ "马虎",
32
+ "首饰",
33
+ "馒头",
34
+ "馄饨",
35
+ "风筝",
36
+ "难为",
37
+ "队伍",
38
+ "阔气",
39
+ "闺女",
40
+ "门道",
41
+ "锄头",
42
+ "铺盖",
43
+ "铃铛",
44
+ "铁匠",
45
+ "钥匙",
46
+ "里脊",
47
+ "里头",
48
+ "部分",
49
+ "那么",
50
+ "道士",
51
+ "造化",
52
+ "迷糊",
53
+ "连累",
54
+ "这么",
55
+ "这个",
56
+ "运气",
57
+ "过去",
58
+ "软和",
59
+ "转悠",
60
+ "踏实",
61
+ "跳蚤",
62
+ "跟头",
63
+ "趔趄",
64
+ "财主",
65
+ "豆腐",
66
+ "讲究",
67
+ "记性",
68
+ "记号",
69
+ "认识",
70
+ "规矩",
71
+ "见识",
72
+ "裁缝",
73
+ "补丁",
74
+ "衣裳",
75
+ "衣服",
76
+ "衙门",
77
+ "街坊",
78
+ "行李",
79
+ "行当",
80
+ "蛤蟆",
81
+ "蘑菇",
82
+ "薄荷",
83
+ "葫芦",
84
+ "葡萄",
85
+ "萝卜",
86
+ "荸荠",
87
+ "苗条",
88
+ "苗头",
89
+ "苍蝇",
90
+ "芝麻",
91
+ "舒服",
92
+ "舒坦",
93
+ "舌头",
94
+ "自在",
95
+ "膏药",
96
+ "脾气",
97
+ "脑袋",
98
+ "脊梁",
99
+ "能耐",
100
+ "胳膊",
101
+ "胭脂",
102
+ "胡萝",
103
+ "胡琴",
104
+ "胡同",
105
+ "聪明",
106
+ "耽误",
107
+ "耽搁",
108
+ "耷拉",
109
+ "耳朵",
110
+ "老爷",
111
+ "老实",
112
+ "老婆",
113
+ "老头",
114
+ "老太",
115
+ "翻腾",
116
+ "罗嗦",
117
+ "罐头",
118
+ "编辑",
119
+ "结实",
120
+ "红火",
121
+ "累赘",
122
+ "糨糊",
123
+ "糊涂",
124
+ "精神",
125
+ "粮食",
126
+ "簸箕",
127
+ "篱笆",
128
+ "算计",
129
+ "算盘",
130
+ "答应",
131
+ "笤帚",
132
+ "笑语",
133
+ "笑话",
134
+ "窟窿",
135
+ "窝囊",
136
+ "窗户",
137
+ "稳当",
138
+ "稀罕",
139
+ "称呼",
140
+ "秧歌",
141
+ "秀气",
142
+ "秀才",
143
+ "福气",
144
+ "祖宗",
145
+ "砚台",
146
+ "码头",
147
+ "石榴",
148
+ "石头",
149
+ "石匠",
150
+ "知识",
151
+ "眼睛",
152
+ "眯缝",
153
+ "眨巴",
154
+ "眉毛",
155
+ "相声",
156
+ "盘算",
157
+ "白净",
158
+ "痢疾",
159
+ "痛快",
160
+ "疟疾",
161
+ "疙瘩",
162
+ "疏忽",
163
+ "畜生",
164
+ "生意",
165
+ "甘蔗",
166
+ "琵琶",
167
+ "琢磨",
168
+ "琉璃",
169
+ "玻璃",
170
+ "玫瑰",
171
+ "玄乎",
172
+ "狐狸",
173
+ "状元",
174
+ "特务",
175
+ "牲口",
176
+ "牙碜",
177
+ "牌楼",
178
+ "爽快",
179
+ "爱人",
180
+ "热闹",
181
+ "烧饼",
182
+ "烟筒",
183
+ "烂糊",
184
+ "点心",
185
+ "炊帚",
186
+ "灯笼",
187
+ "火候",
188
+ "漂亮",
189
+ "滑溜",
190
+ "溜达",
191
+ "温和",
192
+ "清楚",
193
+ "消息",
194
+ "浪头",
195
+ "活泼",
196
+ "比方",
197
+ "正经",
198
+ "欺负",
199
+ "模糊",
200
+ "槟榔",
201
+ "棺材",
202
+ "棒槌",
203
+ "棉花",
204
+ "核桃",
205
+ "栅栏",
206
+ "柴火",
207
+ "架势",
208
+ "枕头",
209
+ "枇杷",
210
+ "机灵",
211
+ "本事",
212
+ "木头",
213
+ "木匠",
214
+ "朋友",
215
+ "月饼",
216
+ "月亮",
217
+ "暖和",
218
+ "明白",
219
+ "时候",
220
+ "新鲜",
221
+ "故事",
222
+ "收拾",
223
+ "收成",
224
+ "提防",
225
+ "挖苦",
226
+ "挑剔",
227
+ "指甲",
228
+ "指头",
229
+ "拾掇",
230
+ "拳头",
231
+ "拨弄",
232
+ "招牌",
233
+ "招呼",
234
+ "抬举",
235
+ "护士",
236
+ "折腾",
237
+ "扫帚",
238
+ "打量",
239
+ "打算",
240
+ "打点",
241
+ "打扮",
242
+ "打听",
243
+ "打发",
244
+ "扎实",
245
+ "扁担",
246
+ "戒指",
247
+ "懒得",
248
+ "意识",
249
+ "意思",
250
+ "情形",
251
+ "悟性",
252
+ "怪物",
253
+ "思量",
254
+ "怎么",
255
+ "念头",
256
+ "念叨",
257
+ "快活",
258
+ "忙活",
259
+ "志气",
260
+ "心思",
261
+ "得罪",
262
+ "张罗",
263
+ "弟兄",
264
+ "开通",
265
+ "应酬",
266
+ "庄稼",
267
+ "干事",
268
+ "帮手",
269
+ "帐篷",
270
+ "希罕",
271
+ "师父",
272
+ "师傅",
273
+ "巴结",
274
+ "巴掌",
275
+ "差事",
276
+ "工夫",
277
+ "岁数",
278
+ "屁股",
279
+ "尾巴",
280
+ "少爷",
281
+ "小气",
282
+ "小伙",
283
+ "将就",
284
+ "对头",
285
+ "对付",
286
+ "寡妇",
287
+ "家伙",
288
+ "客气",
289
+ "实在",
290
+ "官司",
291
+ "学问",
292
+ "学生",
293
+ "字号",
294
+ "嫁妆",
295
+ "媳妇",
296
+ "媒人",
297
+ "婆家",
298
+ "娘家",
299
+ "委屈",
300
+ "姑娘",
301
+ "姐夫",
302
+ "妯娌",
303
+ "妥当",
304
+ "妖精",
305
+ "奴才",
306
+ "女婿",
307
+ "头发",
308
+ "太阳",
309
+ "大爷",
310
+ "大方",
311
+ "大意",
312
+ "大夫",
313
+ "多少",
314
+ "多么",
315
+ "外甥",
316
+ "壮实",
317
+ "地道",
318
+ "地方",
319
+ "在乎",
320
+ "困难",
321
+ "嘴巴",
322
+ "嘱咐",
323
+ "嘟囔",
324
+ "嘀咕",
325
+ "喜欢",
326
+ "喇嘛",
327
+ "喇叭",
328
+ "商量",
329
+ "唾沫",
330
+ "哑巴",
331
+ "哈欠",
332
+ "哆嗦",
333
+ "咳嗽",
334
+ "和尚",
335
+ "告诉",
336
+ "告示",
337
+ "含糊",
338
+ "吓唬",
339
+ "后头",
340
+ "名字",
341
+ "名堂",
342
+ "合同",
343
+ "吆喝",
344
+ "叫唤",
345
+ "口袋",
346
+ "厚道",
347
+ "厉害",
348
+ "千斤",
349
+ "包袱",
350
+ "包涵",
351
+ "匀称",
352
+ "勤快",
353
+ "动静",
354
+ "动弹",
355
+ "功夫",
356
+ "力气",
357
+ "前头",
358
+ "刺猬",
359
+ "刺激",
360
+ "别扭",
361
+ "利落",
362
+ "利索",
363
+ "利害",
364
+ "分析",
365
+ "出息",
366
+ "凑合",
367
+ "凉快",
368
+ "冷战",
369
+ "冤枉",
370
+ "冒失",
371
+ "养活",
372
+ "关系",
373
+ "先生",
374
+ "兄弟",
375
+ "便宜",
376
+ "使唤",
377
+ "佩服",
378
+ "作坊",
379
+ "体面",
380
+ "位置",
381
+ "似的",
382
+ "伙计",
383
+ "休息",
384
+ "什么",
385
+ "人家",
386
+ "亲戚",
387
+ "亲家",
388
+ "交情",
389
+ "云彩",
390
+ "事情",
391
+ "买卖",
392
+ "主意",
393
+ "丫头",
394
+ "丧气",
395
+ "两口",
396
+ "东西",
397
+ "东家",
398
+ "世故",
399
+ "不由",
400
+ "不在",
401
+ "下水",
402
+ "下巴",
403
+ "上头",
404
+ "上司",
405
+ "丈夫",
406
+ "丈人",
407
+ "一辈",
408
+ "那个",
409
+ "菩萨",
410
+ "父亲",
411
+ "母亲",
412
+ "咕噜",
413
+ "邋遢",
414
+ "费用",
415
+ "冤家",
416
+ "甜头",
417
+ "介绍",
418
+ "荒唐",
419
+ "大人",
420
+ "泥鳅",
421
+ "幸福",
422
+ "熟悉",
423
+ "计划",
424
+ "扑腾",
425
+ "蜡烛",
426
+ "姥爷",
427
+ "照顾",
428
+ "喉咙",
429
+ "吉他",
430
+ "弄堂",
431
+ "蚂蚱",
432
+ "凤凰",
433
+ "拖沓",
434
+ "寒碜",
435
+ "糟蹋",
436
+ "倒腾",
437
+ "报复",
438
+ "逻辑",
439
+ "盘缠",
440
+ "喽啰",
441
+ "牢骚",
442
+ "咖喱",
443
+ "扫把",
444
+ "惦记",
445
+ }
446
+ self.must_not_neural_tone_words = {
447
+ "男子",
448
+ "女子",
449
+ "分子",
450
+ "原子",
451
+ "量子",
452
+ "莲子",
453
+ "石子",
454
+ "瓜子",
455
+ "电子",
456
+ "人人",
457
+ "虎虎",
458
+ }
459
+ self.punc = ":,;。?!“”‘’':,;.?!"
460
+
461
+ # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041
462
+ # e.g.
463
+ # word: "家里"
464
+ # pos: "s"
465
+ # finals: ['ia1', 'i3']
466
+ def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:
467
+ # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺
468
+ for j, item in enumerate(word):
469
+ if (
470
+ j - 1 >= 0
471
+ and item == word[j - 1]
472
+ and pos[0] in {"n", "v", "a"}
473
+ and word not in self.must_not_neural_tone_words
474
+ ):
475
+ finals[j] = finals[j][:-1] + "5"
476
+ ge_idx = word.find("个")
477
+ if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶":
478
+ finals[-1] = finals[-1][:-1] + "5"
479
+ elif len(word) >= 1 and word[-1] in "的地得":
480
+ finals[-1] = finals[-1][:-1] + "5"
481
+ # e.g. 走了, 看着, 去过
482
+ # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}:
483
+ # finals[-1] = finals[-1][:-1] + "5"
484
+ elif (
485
+ len(word) > 1
486
+ and word[-1] in "们子"
487
+ and pos in {"r", "n"}
488
+ and word not in self.must_not_neural_tone_words
489
+ ):
490
+ finals[-1] = finals[-1][:-1] + "5"
491
+ # e.g. 桌上, 地下, 家里
492
+ elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}:
493
+ finals[-1] = finals[-1][:-1] + "5"
494
+ # e.g. 上来, 下去
495
+ elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开":
496
+ finals[-1] = finals[-1][:-1] + "5"
497
+ # 个做量词
498
+ elif (
499
+ ge_idx >= 1
500
+ and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in "几有两半多各整每做是")
501
+ ) or word == "个":
502
+ finals[ge_idx] = finals[ge_idx][:-1] + "5"
503
+ else:
504
+ if (
505
+ word in self.must_neural_tone_words
506
+ or word[-2:] in self.must_neural_tone_words
507
+ ):
508
+ finals[-1] = finals[-1][:-1] + "5"
509
+
510
+ word_list = self._split_word(word)
511
+ finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]
512
+ for i, word in enumerate(word_list):
513
+ # conventional neural in Chinese
514
+ if (
515
+ word in self.must_neural_tone_words
516
+ or word[-2:] in self.must_neural_tone_words
517
+ ):
518
+ finals_list[i][-1] = finals_list[i][-1][:-1] + "5"
519
+ finals = sum(finals_list, [])
520
+ return finals
521
+
522
+ def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:
523
+ # e.g. 看不懂
524
+ if len(word) == 3 and word[1] == "不":
525
+ finals[1] = finals[1][:-1] + "5"
526
+ else:
527
+ for i, char in enumerate(word):
528
+ # "不" before tone4 should be bu2, e.g. 不怕
529
+ if char == "不" and i + 1 < len(word) and finals[i + 1][-1] == "4":
530
+ finals[i] = finals[i][:-1] + "2"
531
+ return finals
532
+
533
+ def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:
534
+ # "一" in number sequences, e.g. 一零零, 二一零
535
+ if word.find("一") != -1 and all(
536
+ [item.isnumeric() for item in word if item != "一"]
537
+ ):
538
+ return finals
539
+ # "一" between reduplication words should be yi5, e.g. 看一看
540
+ elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]:
541
+ finals[1] = finals[1][:-1] + "5"
542
+ # when "一" is ordinal word, it should be yi1
543
+ elif word.startswith("第一"):
544
+ finals[1] = finals[1][:-1] + "1"
545
+ else:
546
+ for i, char in enumerate(word):
547
+ if char == "一" and i + 1 < len(word):
548
+ # "一" before tone4 should be yi2, e.g. 一段
549
+ if finals[i + 1][-1] == "4":
550
+ finals[i] = finals[i][:-1] + "2"
551
+ # "一" before non-tone4 should be yi4, e.g. 一天
552
+ else:
553
+ # "一" 后面如果是标点,还读一声
554
+ if word[i + 1] not in self.punc:
555
+ finals[i] = finals[i][:-1] + "4"
556
+ return finals
557
+
558
+ def _split_word(self, word: str) -> List[str]:
559
+ word_list = jieba.cut_for_search(word)
560
+ word_list = sorted(word_list, key=lambda i: len(i), reverse=False)
561
+ first_subword = word_list[0]
562
+ first_begin_idx = word.find(first_subword)
563
+ if first_begin_idx == 0:
564
+ second_subword = word[len(first_subword) :]
565
+ new_word_list = [first_subword, second_subword]
566
+ else:
567
+ second_subword = word[: -len(first_subword)]
568
+ new_word_list = [second_subword, first_subword]
569
+ return new_word_list
570
+
571
+ def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:
572
+ if len(word) == 2 and self._all_tone_three(finals):
573
+ finals[0] = finals[0][:-1] + "2"
574
+ elif len(word) == 3:
575
+ word_list = self._split_word(word)
576
+ if self._all_tone_three(finals):
577
+ # disyllabic + monosyllabic, e.g. 蒙古/包
578
+ if len(word_list[0]) == 2:
579
+ finals[0] = finals[0][:-1] + "2"
580
+ finals[1] = finals[1][:-1] + "2"
581
+ # monosyllabic + disyllabic, e.g. 纸/老虎
582
+ elif len(word_list[0]) == 1:
583
+ finals[1] = finals[1][:-1] + "2"
584
+ else:
585
+ finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]
586
+ if len(finals_list) == 2:
587
+ for i, sub in enumerate(finals_list):
588
+ # e.g. 所有/人
589
+ if self._all_tone_three(sub) and len(sub) == 2:
590
+ finals_list[i][0] = finals_list[i][0][:-1] + "2"
591
+ # e.g. 好/喜欢
592
+ elif (
593
+ i == 1
594
+ and not self._all_tone_three(sub)
595
+ and finals_list[i][0][-1] == "3"
596
+ and finals_list[0][-1][-1] == "3"
597
+ ):
598
+ finals_list[0][-1] = finals_list[0][-1][:-1] + "2"
599
+ finals = sum(finals_list, [])
600
+ # split idiom into two words who's length is 2
601
+ elif len(word) == 4:
602
+ finals_list = [finals[:2], finals[2:]]
603
+ finals = []
604
+ for sub in finals_list:
605
+ if self._all_tone_three(sub):
606
+ sub[0] = sub[0][:-1] + "2"
607
+ finals += sub
608
+
609
+ return finals
610
+
611
+ def _all_tone_three(self, finals: List[str]) -> bool:
612
+ return all(x[-1] == "3" for x in finals)
613
+
614
+ # merge "不" and the word behind it
615
+ # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error
616
+ def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
617
+ new_seg = []
618
+ last_word = ""
619
+ for word, pos in seg:
620
+ if last_word == "不":
621
+ word = last_word + word
622
+ if word != "不":
623
+ new_seg.append((word, pos))
624
+ last_word = word[:]
625
+ if last_word == "不":
626
+ new_seg.append((last_word, "d"))
627
+ last_word = ""
628
+ return new_seg
629
+
630
+ # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听"
631
+ # function 2: merge single "一" and the word behind it
632
+ # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error
633
+ # e.g.
634
+ # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]
635
+ # output seg: [['听一听', 'v']]
636
+ def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
637
+ new_seg = []
638
+ # function 1
639
+ for i, (word, pos) in enumerate(seg):
640
+ if (
641
+ i - 1 >= 0
642
+ and word == "一"
643
+ and i + 1 < len(seg)
644
+ and seg[i - 1][0] == seg[i + 1][0]
645
+ and seg[i - 1][1] == "v"
646
+ ):
647
+ new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
648
+ else:
649
+ if (
650
+ i - 2 >= 0
651
+ and seg[i - 1][0] == "一"
652
+ and seg[i - 2][0] == word
653
+ and pos == "v"
654
+ ):
655
+ continue
656
+ else:
657
+ new_seg.append([word, pos])
658
+ seg = new_seg
659
+ new_seg = []
660
+ # function 2
661
+ for i, (word, pos) in enumerate(seg):
662
+ if new_seg and new_seg[-1][0] == "一":
663
+ new_seg[-1][0] = new_seg[-1][0] + word
664
+ else:
665
+ new_seg.append([word, pos])
666
+ return new_seg
667
+
668
+ # the first and the second words are all_tone_three
669
+ def _merge_continuous_three_tones(
670
+ self, seg: List[Tuple[str, str]]
671
+ ) -> List[Tuple[str, str]]:
672
+ new_seg = []
673
+ sub_finals_list = [
674
+ lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
675
+ for (word, pos) in seg
676
+ ]
677
+ assert len(sub_finals_list) == len(seg)
678
+ merge_last = [False] * len(seg)
679
+ for i, (word, pos) in enumerate(seg):
680
+ if (
681
+ i - 1 >= 0
682
+ and self._all_tone_three(sub_finals_list[i - 1])
683
+ and self._all_tone_three(sub_finals_list[i])
684
+ and not merge_last[i - 1]
685
+ ):
686
+ # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
687
+ if (
688
+ not self._is_reduplication(seg[i - 1][0])
689
+ and len(seg[i - 1][0]) + len(seg[i][0]) <= 3
690
+ ):
691
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
692
+ merge_last[i] = True
693
+ else:
694
+ new_seg.append([word, pos])
695
+ else:
696
+ new_seg.append([word, pos])
697
+
698
+ return new_seg
699
+
700
+ def _is_reduplication(self, word: str) -> bool:
701
+ return len(word) == 2 and word[0] == word[1]
702
+
703
+ # the last char of first word and the first char of second word is tone_three
704
+ def _merge_continuous_three_tones_2(
705
+ self, seg: List[Tuple[str, str]]
706
+ ) -> List[Tuple[str, str]]:
707
+ new_seg = []
708
+ sub_finals_list = [
709
+ lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
710
+ for (word, pos) in seg
711
+ ]
712
+ assert len(sub_finals_list) == len(seg)
713
+ merge_last = [False] * len(seg)
714
+ for i, (word, pos) in enumerate(seg):
715
+ if (
716
+ i - 1 >= 0
717
+ and sub_finals_list[i - 1][-1][-1] == "3"
718
+ and sub_finals_list[i][0][-1] == "3"
719
+ and not merge_last[i - 1]
720
+ ):
721
+ # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
722
+ if (
723
+ not self._is_reduplication(seg[i - 1][0])
724
+ and len(seg[i - 1][0]) + len(seg[i][0]) <= 3
725
+ ):
726
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
727
+ merge_last[i] = True
728
+ else:
729
+ new_seg.append([word, pos])
730
+ else:
731
+ new_seg.append([word, pos])
732
+ return new_seg
733
+
734
+ def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
735
+ new_seg = []
736
+ for i, (word, pos) in enumerate(seg):
737
+ if i - 1 >= 0 and word == "儿" and seg[i - 1][0] != "#":
738
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
739
+ else:
740
+ new_seg.append([word, pos])
741
+ return new_seg
742
+
743
+ def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
744
+ new_seg = []
745
+ for i, (word, pos) in enumerate(seg):
746
+ if new_seg and word == new_seg[-1][0]:
747
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
748
+ else:
749
+ new_seg.append([word, pos])
750
+ return new_seg
751
+
752
+ def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
753
+ seg = self._merge_bu(seg)
754
+ try:
755
+ seg = self._merge_yi(seg)
756
+ except:
757
+ print("_merge_yi failed")
758
+ seg = self._merge_reduplication(seg)
759
+ seg = self._merge_continuous_three_tones(seg)
760
+ seg = self._merge_continuous_three_tones_2(seg)
761
+ seg = self._merge_er(seg)
762
+ return seg
763
+
764
+ def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:
765
+ finals = self._bu_sandhi(word, finals)
766
+ finals = self._yi_sandhi(word, finals)
767
+ finals = self._neural_sandhi(word, pos, finals)
768
+ finals = self._three_sandhi(word, finals)
769
+ return finals
bert_vits2/transforms.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import numpy as np
5
+
6
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
7
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
8
+ DEFAULT_MIN_DERIVATIVE = 1e-3
9
+
10
+
11
+ def piecewise_rational_quadratic_transform(inputs,
12
+ unnormalized_widths,
13
+ unnormalized_heights,
14
+ unnormalized_derivatives,
15
+ inverse=False,
16
+ tails=None,
17
+ tail_bound=1.,
18
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
19
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
20
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
21
+ if tails is None:
22
+ spline_fn = rational_quadratic_spline
23
+ spline_kwargs = {}
24
+ else:
25
+ spline_fn = unconstrained_rational_quadratic_spline
26
+ spline_kwargs = {
27
+ 'tails': tails,
28
+ 'tail_bound': tail_bound
29
+ }
30
+
31
+ outputs, logabsdet = spline_fn(
32
+ inputs=inputs,
33
+ unnormalized_widths=unnormalized_widths,
34
+ unnormalized_heights=unnormalized_heights,
35
+ unnormalized_derivatives=unnormalized_derivatives,
36
+ inverse=inverse,
37
+ min_bin_width=min_bin_width,
38
+ min_bin_height=min_bin_height,
39
+ min_derivative=min_derivative,
40
+ **spline_kwargs
41
+ )
42
+ return outputs, logabsdet
43
+
44
+
45
+ def searchsorted(bin_locations, inputs, eps=1e-6):
46
+ bin_locations[..., -1] += eps
47
+ return torch.sum(
48
+ inputs[..., None] >= bin_locations,
49
+ dim=-1
50
+ ) - 1
51
+
52
+
53
+ def unconstrained_rational_quadratic_spline(inputs,
54
+ unnormalized_widths,
55
+ unnormalized_heights,
56
+ unnormalized_derivatives,
57
+ inverse=False,
58
+ tails='linear',
59
+ tail_bound=1.,
60
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
61
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
62
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
63
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
64
+ outside_interval_mask = ~inside_interval_mask
65
+
66
+ outputs = torch.zeros_like(inputs)
67
+ logabsdet = torch.zeros_like(inputs)
68
+
69
+ if tails == 'linear':
70
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
71
+ constant = np.log(np.exp(1 - min_derivative) - 1)
72
+ unnormalized_derivatives[..., 0] = constant
73
+ unnormalized_derivatives[..., -1] = constant
74
+
75
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
76
+ logabsdet[outside_interval_mask] = 0
77
+ else:
78
+ raise RuntimeError('{} tails are not implemented.'.format(tails))
79
+
80
+ outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
81
+ inputs=inputs[inside_interval_mask],
82
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
83
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
84
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
85
+ inverse=inverse,
86
+ left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
87
+ min_bin_width=min_bin_width,
88
+ min_bin_height=min_bin_height,
89
+ min_derivative=min_derivative
90
+ )
91
+
92
+ return outputs, logabsdet
93
+
94
+
95
+ def rational_quadratic_spline(inputs,
96
+ unnormalized_widths,
97
+ unnormalized_heights,
98
+ unnormalized_derivatives,
99
+ inverse=False,
100
+ left=0., right=1., bottom=0., top=1.,
101
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
102
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
103
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
104
+ if torch.min(inputs) < left or torch.max(inputs) > right:
105
+ raise ValueError('Input to a transform is not within its domain')
106
+
107
+ num_bins = unnormalized_widths.shape[-1]
108
+
109
+ if min_bin_width * num_bins > 1.0:
110
+ raise ValueError('Minimal bin width too large for the number of bins')
111
+ if min_bin_height * num_bins > 1.0:
112
+ raise ValueError('Minimal bin height too large for the number of bins')
113
+
114
+ widths = F.softmax(unnormalized_widths, dim=-1)
115
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
116
+ cumwidths = torch.cumsum(widths, dim=-1)
117
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
118
+ cumwidths = (right - left) * cumwidths + left
119
+ cumwidths[..., 0] = left
120
+ cumwidths[..., -1] = right
121
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
122
+
123
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
124
+
125
+ heights = F.softmax(unnormalized_heights, dim=-1)
126
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
127
+ cumheights = torch.cumsum(heights, dim=-1)
128
+ cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
129
+ cumheights = (top - bottom) * cumheights + bottom
130
+ cumheights[..., 0] = bottom
131
+ cumheights[..., -1] = top
132
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
133
+
134
+ if inverse:
135
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
136
+ else:
137
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
138
+
139
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
140
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
141
+
142
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
143
+ delta = heights / widths
144
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
145
+
146
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
147
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
148
+
149
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
150
+
151
+ if inverse:
152
+ a = (((inputs - input_cumheights) * (input_derivatives
153
+ + input_derivatives_plus_one
154
+ - 2 * input_delta)
155
+ + input_heights * (input_delta - input_derivatives)))
156
+ b = (input_heights * input_derivatives
157
+ - (inputs - input_cumheights) * (input_derivatives
158
+ + input_derivatives_plus_one
159
+ - 2 * input_delta))
160
+ c = - input_delta * (inputs - input_cumheights)
161
+
162
+ discriminant = b.pow(2) - 4 * a * c
163
+ assert (discriminant >= 0).all()
164
+
165
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
166
+ outputs = root * input_bin_widths + input_cumwidths
167
+
168
+ theta_one_minus_theta = root * (1 - root)
169
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
170
+ * theta_one_minus_theta)
171
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
172
+ + 2 * input_delta * theta_one_minus_theta
173
+ + input_derivatives * (1 - root).pow(2))
174
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
175
+
176
+ return outputs, -logabsdet
177
+ else:
178
+ theta = (inputs - input_cumwidths) / input_bin_widths
179
+ theta_one_minus_theta = theta * (1 - theta)
180
+
181
+ numerator = input_heights * (input_delta * theta.pow(2)
182
+ + input_derivatives * theta_one_minus_theta)
183
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
184
+ * theta_one_minus_theta)
185
+ outputs = input_cumheights + numerator / denominator
186
+
187
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
188
+ + 2 * input_delta * theta_one_minus_theta
189
+ + input_derivatives * (1 - theta).pow(2))
190
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
191
+
192
+ return outputs, logabsdet
bert_vits2/utils.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import logging
4
+ import torch
5
+
6
+ MATPLOTLIB_FLAG = False
7
+
8
+ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
9
+ logger = logging
10
+
11
+
12
+ def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False, legacy_version=None):
13
+ assert os.path.isfile(checkpoint_path)
14
+ checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
15
+ iteration = checkpoint_dict['iteration']
16
+ learning_rate = checkpoint_dict['learning_rate']
17
+ if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
18
+ optimizer.load_state_dict(checkpoint_dict['optimizer'])
19
+ elif optimizer is None and not skip_optimizer:
20
+ # else: #Disable this line if Infer ,and enable the line upper
21
+ new_opt_dict = optimizer.state_dict()
22
+ new_opt_dict_params = new_opt_dict['param_groups'][0]['params']
23
+ new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups']
24
+ new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params
25
+ optimizer.load_state_dict(new_opt_dict)
26
+ saved_state_dict = checkpoint_dict['model']
27
+ if hasattr(model, 'module'):
28
+ state_dict = model.module.state_dict()
29
+ else:
30
+ state_dict = model.state_dict()
31
+ new_state_dict = {}
32
+ for k, v in state_dict.items():
33
+ try:
34
+ # assert "emb_g" not in k
35
+ # print("load", k)
36
+ new_state_dict[k] = saved_state_dict[k]
37
+ assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
38
+ except:
39
+ # For upgrading from the old version
40
+ if "ja_bert_proj" in k:
41
+ v = torch.zeros_like(v)
42
+ if legacy_version is None:
43
+ logger.error(f"{k} is not in the checkpoint")
44
+ logger.warning(
45
+ f"If you are using an older version of the model, you should add the parameter \"legacy_version\" "
46
+ f"to the parameter \"data\" of the model's config.json. For example: \"legacy_version\": \"1.0.1\"")
47
+ else:
48
+ logger.error(f"{k} is not in the checkpoint")
49
+
50
+ new_state_dict[k] = v
51
+ if hasattr(model, 'module'):
52
+ model.module.load_state_dict(new_state_dict, strict=False)
53
+ else:
54
+ model.load_state_dict(new_state_dict, strict=False)
55
+ # print("load ")
56
+ logger.info("Loaded checkpoint '{}' (iteration {})".format(
57
+ checkpoint_path, iteration))
58
+ return model, optimizer, learning_rate, iteration
59
+
60
+
61
+ def process_legacy_versions(hps):
62
+ legacy_version = getattr(hps.data, "legacy", getattr(hps.data, "legacy_version", None))
63
+ if legacy_version:
64
+ prefix = legacy_version[0].lower()
65
+ if prefix == "v":
66
+ legacy_version = legacy_version[1:]
67
+ return legacy_version
config.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import torch
5
+
6
+ JSON_AS_ASCII = False
7
+
8
+ MAX_CONTENT_LENGTH = 5242880
9
+
10
+ # Flask debug mode
11
+ DEBUG = False
12
+
13
+ # Server port
14
+ PORT = 23456
15
+
16
+ # Absolute path of vits-simple-api
17
+ ABS_PATH = os.path.dirname(os.path.realpath(__file__))
18
+
19
+ # Upload path
20
+ UPLOAD_FOLDER = ABS_PATH + "/upload"
21
+
22
+ # Cahce path
23
+ CACHE_PATH = ABS_PATH + "/cache"
24
+
25
+ # Logs path
26
+ LOGS_PATH = ABS_PATH + "/logs"
27
+
28
+ # Set the number of backup log files to keep.
29
+ LOGS_BACKUPCOUNT = 30
30
+
31
+ # If CLEAN_INTERVAL_SECONDS <= 0, the cleaning task will not be executed.
32
+ CLEAN_INTERVAL_SECONDS = 3600
33
+
34
+ # save audio to CACHE_PATH
35
+ SAVE_AUDIO = False
36
+
37
+ # zh ja ko en... If it is empty, it will be read based on the text_cleaners specified in the config.json.
38
+ LANGUAGE_AUTOMATIC_DETECT = []
39
+
40
+ # Set to True to enable API Key authentication
41
+ API_KEY_ENABLED = False
42
+
43
+ # API_KEY is required for authentication
44
+ API_KEY = "api-key"
45
+
46
+ # logging_level:DEBUG/INFO/WARNING/ERROR/CRITICAL
47
+ LOGGING_LEVEL = "DEBUG"
48
+
49
+ # Language identification library. Optional fastlid, langid
50
+ LANGUAGE_IDENTIFICATION_LIBRARY = "langid"
51
+
52
+ # To use the english_cleaner, you need to install espeak and provide the path of libespeak-ng.dll as input here.
53
+ # If ESPEAK_LIBRARY is set to empty, it will be read from the environment variable.
54
+ # For windows : "C:/Program Files/eSpeak NG/libespeak-ng.dll"
55
+ ESPEAK_LIBRARY = ""
56
+
57
+ # Fill in the model path here
58
+ MODEL_LIST = [
59
+ # VITS
60
+ # [ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
61
+ # [ABS_PATH + "/Model/Zero_no_tsukaima/1158_epochs.pth", ABS_PATH + "/Model/Zero_no_tsukaima/config.json"],
62
+ # [ABS_PATH + "/Model/g/G_953000.pth", ABS_PATH + "/Model/g/config.json"],
63
+ # [ABS_PATH + "/Model/vits_chinese/vits_bert_model.pth", ABS_PATH + "/Model/vits_chinese/bert_vits.json"],
64
+ # HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
65
+ # [ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
66
+ # W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
67
+ # [ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
68
+ # Bert-VITS2
69
+ # [ABS_PATH + "/Model/bert_vits2/G_9000.pth", ABS_PATH + "/Model/bert_vits2/config.json"],
70
+ ]
71
+
72
+ # hubert-vits: hubert soft model
73
+ HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
74
+
75
+ # w2v2-vits: Dimensional emotion npy file
76
+ # load single npy: ABS_PATH+"/all_emotions.npy
77
+ # load mutiple npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
78
+ # load mutiple npy from folder: ABS_PATH + "/Model/npy"
79
+ DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
80
+
81
+ # w2v2-vits: Need to have both `model.onnx` and `model.yaml` files in the same path.
82
+ # DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
83
+
84
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
85
+
86
+ """
87
+ Default parameter
88
+ """
89
+
90
+ ID = 0
91
+
92
+ FORMAT = "wav"
93
+
94
+ LANG = "AUTO"
95
+
96
+ LENGTH = 1
97
+
98
+ NOISE = 0.33
99
+
100
+ NOISEW = 0.4
101
+
102
+ # 长文本分段阈值,max<=0表示不分段.
103
+ # Batch processing threshold. Text will not be processed in batches if max<=0
104
+ MAX = 50
105
+
106
+ # Bert_VITS2
107
+ SDP_RATIO = 0.2
docker-compose-gpu.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ vits:
5
+ image: artrajz/vits-simple-api:latest-gpu
6
+ restart: always
7
+ ports:
8
+ - 23456:23456
9
+ environment:
10
+ LANG: 'C.UTF-8'
11
+ #TZ: Asia/Shanghai #timezone
12
+ volumes:
13
+ - ./Model:/app/Model # 挂载模型文件夹
14
+ - ./config.py:/app/config.py # 挂载配置文件
15
+ - ./logs:/app/logs # logging logs
16
+ - ./gunicorn_config.py:/app/gunicorn_config.py # gunicorn configuration
17
+ - ./vits/bert:/app/vits/bert # vits_chinese
18
+ - ./bert_vits2/bert/chinese-roberta-wwm-ext-large:/app/bert_vits2/bert/chinese-roberta-wwm-ext-large # Bert-vits2
19
+ - ./pyopenjtalk/open_jtalk_dic_utf_8-1.11:/usr/local/lib/python3.10/site-packages/pyopenjtalk/open_jtalk_dic_utf_8-1.11 #pyopentjalk
20
+ deploy:
21
+ resources:
22
+ reservations:
23
+ devices:
24
+ - driver: nvidia
25
+ #device_ids: ['0', '3']
26
+ capabilities: [gpu]
docker-compose.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.4'
2
+ services:
3
+ vits:
4
+ image: artrajz/vits-simple-api:latest
5
+ restart: always
6
+ ports:
7
+ - 23456:23456
8
+ environment:
9
+ LANG: 'C.UTF-8'
10
+ TZ: Asia/Shanghai #timezone
11
+ volumes:
12
+ - ./Model:/app/Model # 挂载模型文件夹
13
+ - ./config.py:/app/config.py # 挂载配置文件
14
+ - ./logs:/app/logs # logging logs
15
+ - ./gunicorn_config.py:/app/gunicorn_config.py # gunicorn configuration
16
+ - ./vits/bert:/app/vits/bert # vits_chinese
17
+ - ./bert_vits2/bert/chinese-roberta-wwm-ext-large:/app/bert_vits2/bert/chinese-roberta-wwm-ext-large # Bert-vits2
18
+ - ./pyopenjtalk/open_jtalk_dic_utf_8-1.11:/usr/local/lib/python3.10/site-packages/pyopenjtalk/open_jtalk_dic_utf_8-1.11 #pyopentjalk
gunicorn_config.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import multiprocessing
3
+
4
+ bind = "0.0.0.0:23456"
5
+ # workers = multiprocessing.cpu_count()
6
+ workers = 1
7
+ preload_app = True
8
+
9
+ # disable GC in master as early as possible
10
+ gc.disable()
11
+
12
+ def when_ready(server):
13
+ # freeze objects after preloading app
14
+ gc.freeze()
15
+ print("Objects frozen in perm gen: ", gc.get_freeze_count())
16
+
17
+ def post_fork(server, worker):
18
+ # reenable GC on worker
19
+ gc.enable()
logger.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import logging
4
+ import logzero
5
+ import config
6
+ from logging.handlers import TimedRotatingFileHandler
7
+
8
+ logzero.loglevel(logging.WARNING)
9
+ logger = logging.getLogger("vits-simple-api")
10
+ level = getattr(config, "LOGGING_LEVEL", "DEBUG")
11
+ level_dict = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR,
12
+ 'CRITICAL': logging.CRITICAL}
13
+ logging.basicConfig(level=level_dict[level])
14
+ logging.getLogger('numba').setLevel(logging.WARNING)
15
+ logging.getLogger("langid.langid").setLevel(logging.INFO)
16
+ logging.getLogger("apscheduler.scheduler").setLevel(logging.INFO)
17
+
18
+ os.makedirs(config.LOGS_PATH, exist_ok=True)
19
+ log_file = os.path.join(config.LOGS_PATH, 'latest.log')
20
+ backup_count = getattr(config, "LOGS_BACKUPCOUNT", 30)
21
+ handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=backup_count, encoding='utf-8')
22
+ handler.suffix = "%Y-%m-%d.log"
23
+ formatter = logging.Formatter('%(levelname)s:%(name)s %(message)s')
24
+ handler.setFormatter(formatter)
25
+ logger.addHandler(handler)
26
+
27
+ logging.getLogger("werkzeug").addHandler(handler)
28
+ logging.getLogger("apscheduler.scheduler").addHandler(handler)
29
+
30
+
31
+ # Custom function to handle uncaught exceptions
32
+ def handle_exception(exc_type, exc_value, exc_traceback):
33
+ # If it's a keyboard interrupt, don't handle it, just return
34
+ if issubclass(exc_type, KeyboardInterrupt):
35
+ sys.__excepthook__(exc_type, exc_value, exc_traceback)
36
+ return
37
+
38
+ logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
39
+
40
+
41
+ # Set the global exception handler in Python
42
+ sys.excepthook = handle_exception
requirements.txt ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core scientific and computation libraries
2
+ numpy==1.23.3
3
+ scipy
4
+
5
+ # Audio processing
6
+ librosa
7
+ soundfile==0.12.1
8
+ numba
9
+ graiax-silkcoder[libsndfile]
10
+
11
+ # Natural Language Processing and Text Conversion
12
+ unidecode
13
+ pyopenjtalk==0.3.2
14
+ jamo
15
+ pypinyin
16
+ jieba
17
+ cn2an
18
+ inflect
19
+ eng_to_ipa
20
+ ko_pron
21
+ indic_transliteration
22
+ num_thai
23
+ opencc
24
+ fasttext
25
+ fastlid
26
+ langid
27
+ phonemizer==3.2.1
28
+ transformers
29
+ num2words
30
+ mecab-python3
31
+ unidic-lite
32
+
33
+ # Machine Learning and Deep Learning
34
+ torch
35
+ audonnx
36
+
37
+ # Web and API services
38
+ flask==2.2.3
39
+ flask_apscheduler
40
+ pydantic==2.3.0
41
+ werkzeug==2.3.6
42
+
43
+ # Compression and Decompression
44
+ py7zr
45
+
46
+ # Other utilities and dependencies
47
+ MarkupSafe==2.1.2
48
+ six==1.16.0
49
+ protobuf
50
+ tqdm
static/css/bootstrap.min.css ADDED
The diff for this file is too large to render. See raw diff