ulysses115 commited on
Commit
6b89d0b
·
1 Parent(s): f560fb5

First model version

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dockerfile +29 -0
  2. Dockerfile.back +35 -0
  3. Dockerfile.latestbackup +42 -0
  4. LICENSE.md +661 -0
  5. README.md +7 -9
  6. app.py +106 -0
  7. app.py.back2 +123 -0
  8. app.pyback +123 -0
  9. batch.py +43 -0
  10. checkpoints/0102_xiaoma_pe/config.yaml +172 -0
  11. checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt +3 -0
  12. checkpoints/0109_hifigan_bigpopcs_hop128/config.yaml +241 -0
  13. checkpoints/0109_hifigan_bigpopcs_hop128/model_ckpt_steps_1512000.ckpt +3 -0
  14. checkpoints/Unnamed/config.yaml +445 -0
  15. checkpoints/Unnamed/config_nsf.yaml +445 -0
  16. checkpoints/Unnamed/lightning_logs/lastest/hparams.yaml +1 -0
  17. checkpoints/Unnamed/model_ckpt_steps_192000.ckpt +3 -0
  18. checkpoints/hubert/hubert_soft.pt +3 -0
  19. checkpoints/nsf_hifigan/NOTICE.txt +74 -0
  20. checkpoints/nsf_hifigan/NOTICE.zh-CN.txt +72 -0
  21. checkpoints/nsf_hifigan/config.json +38 -0
  22. ckpt.jpg +0 -0
  23. config.yaml +349 -0
  24. doc/train_and_inference.markdown +210 -0
  25. flask_api.py +54 -0
  26. infer.py +98 -0
  27. infer_tools/__init__.py +0 -0
  28. infer_tools/__pycache__/__init__.cpython-38.pyc +0 -0
  29. infer_tools/__pycache__/infer_tool.cpython-38.pyc +0 -0
  30. infer_tools/__pycache__/slicer.cpython-38.pyc +0 -0
  31. infer_tools/f0_temp.json +0 -0
  32. infer_tools/infer_tool.py +342 -0
  33. infer_tools/new_chunks_temp.json +1 -0
  34. infer_tools/slicer.py +158 -0
  35. inference.ipynb +0 -0
  36. models/genshin/__init__.py +0 -0
  37. models/genshin/config.yaml +445 -0
  38. models/genshin/raiden.ckpt +3 -0
  39. modules/commons/__pycache__/common_layers.cpython-38.pyc +0 -0
  40. modules/commons/__pycache__/espnet_positional_embedding.cpython-38.pyc +0 -0
  41. modules/commons/__pycache__/ssim.cpython-38.pyc +0 -0
  42. modules/commons/common_layers.py +671 -0
  43. modules/commons/espnet_positional_embedding.py +113 -0
  44. modules/commons/ssim.py +391 -0
  45. modules/fastspeech/__pycache__/fs2.cpython-38.pyc +0 -0
  46. modules/fastspeech/__pycache__/pe.cpython-38.pyc +0 -0
  47. modules/fastspeech/__pycache__/tts_modules.cpython-38.pyc +0 -0
  48. modules/fastspeech/fs2.py +255 -0
  49. modules/fastspeech/pe.py +149 -0
  50. modules/fastspeech/tts_modules.py +364 -0
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM misakiminato/cuda-python:cu12.0.0-py3.8.16-devel-ubuntu18.04
2
+ WORKDIR /app
3
+
4
+ COPY ./requirements.txt /app/requirements.txt
5
+ COPY ./packages.txt /app/packages.txt
6
+ RUN pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
7
+ RUN apt-get update && xargs -r -a /app/packages.txt apt-get install -y && rm -rf /var/lib/apt/lists/*
8
+ RUN pip3 install --no-cache-dir -r /app/requirements.txt
9
+
10
+
11
+ # Set up a new user named "user" with user ID 1000
12
+ RUN useradd -m -u 1000 user
13
+
14
+ # Switch to the "user" user
15
+ USER user
16
+
17
+ # Set home to the user's home directory
18
+ ENV HOME=/home/user \
19
+ PATH=/home/user/.local/bin:$PATH
20
+
21
+ # Set the working directory to the user's home directory
22
+ WORKDIR $HOME/app
23
+
24
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
25
+ COPY --chown=user . $HOME/app
26
+
27
+
28
+ EXPOSE 8501
29
+ CMD streamlit run app.py --server.maxUploadSize 1024 --server.enableWebsocketCompression=false --server.enableXsrfProtection=false
Dockerfile.back ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.0.0-base-ubuntu20.04
2
+ ARG DEBIAN_FRONTEND=noninteractive
3
+ ENV PYTHONUNBUFFERED=1
4
+
5
+ WORKDIR /app
6
+ # Install python, git & ffmpeg
7
+ RUN apt-get update && apt-get install --no-install-recommends -y \
8
+ build-essential \
9
+ python3.8=3.8.10* \
10
+ python3-pip \
11
+ git \
12
+ ffmpeg \
13
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
14
+ COPY ./requirements.txt /app/requirements.txt
15
+ COPY ./packages.txt /app/packages.txt
16
+ RUN pip install --upgrade pip
17
+ RUN pip install pyproject-toml
18
+ RUN pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
19
+ RUN apt-get update && xargs -r -a /app/packages.txt apt-get install -y && rm -rf /var/lib/apt/lists/*
20
+ RUN pip3 install --no-cache-dir -r /app/requirements.txt
21
+ RUN pip3 install --no-cache-dir numba==0.56.3
22
+ RUN pip install --no-binary :all: pyworld
23
+
24
+ WORKDIR /app
25
+ # Set up a new user named "user" with user ID 1000
26
+ RUN useradd -m -u 1000 user
27
+ # Set the working directory to the user's home directory
28
+ WORKDIR $HOME/app
29
+
30
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
31
+ COPY --chown=user . $HOME/app
32
+
33
+ EXPOSE 8501
34
+ CMD nvidia-smi -l
35
+ CMD streamlit run app.py --server.maxUploadSize 1024 --server.enableWebsocketCompression=false --server.enableXsrfProtection=false
Dockerfile.latestbackup ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.0.0-base-ubuntu20.04
2
+ ARG DEBIAN_FRONTEND=noninteractive
3
+ ENV PYTHONUNBUFFERED=1
4
+ ENV PYTHON_INCLUDE /usr/include/python3.8
5
+ ENV PYTHON_LIB /usr/lib/x86_64-linux-gnu/libpython3.8.so
6
+
7
+ WORKDIR /app
8
+ # Install python, git & ffmpeg
9
+ RUN apt-get update && apt-get install --no-install-recommends -y \
10
+ build-essential \
11
+ python3.9 \
12
+ python3-pip \
13
+ git \
14
+ ffmpeg \
15
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
16
+ COPY ./requirements.txt /app/requirements.txt
17
+ COPY ./packages.txt /app/packages.txt
18
+ RUN pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
19
+ RUN apt-get update && xargs -r -a /app/packages.txt apt-get install -y && rm -rf /var/lib/apt/lists/*
20
+ RUN apt-get update && apt-get install -y python3-dev
21
+ RUN apt-get update && apt-get install -y build-essential
22
+ RUN which python3
23
+ RUN which python3-config
24
+ RUN pip3 install --no-cache-dir -r /app/requirements.txt
25
+ RUN apt-get update && apt-get install -y build-essential
26
+ RUN pip3 install --no-cache-dir numba==0.56.3
27
+ RUN pip install --upgrade pip setuptools wheel
28
+ RUN pip3 install --no-binary :all: pyworld
29
+ RUN pip install soundfile
30
+
31
+ WORKDIR /app
32
+ # Set up a new user named "user" with user ID 1000
33
+ RUN useradd -m -u 1000 user
34
+ # Set the working directory to the user's home directory
35
+ WORKDIR $HOME/app
36
+
37
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
38
+ COPY --chown=user . $HOME/app
39
+
40
+ EXPOSE 8501
41
+ CMD nvidia-smi -l
42
+ CMD streamlit run app.py --server.maxUploadSize 1024 --server.enableWebsocketCompression=false --server.enableXsrfProtection=false
LICENSE.md ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
README.md CHANGED
@@ -1,12 +1,10 @@
1
  ---
2
- title: Diffsvc Test
3
- emoji: 🔥
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.16.2
8
- app_file: app.py
9
  pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: DiffSVC Inference
3
+ emoji: 🎙
4
+ colorFrom: red
5
+ colorTo: orange
6
+ sdk: docker
7
+ app_port: 8501
 
8
  pinned: false
9
+ duplicated_from: DIFF-SVCModel/Inference
10
  ---
 
 
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import json
6
+ import os
7
+ import tempfile
8
+ import shutil
9
+ import requests
10
+ from pathlib import Path
11
+ global ckpt_temp_file
12
+ global audio_temp_file
13
+ global config_temp_file
14
+ ###################################################
15
+ from utils.hparams import hparams
16
+ from preprocessing.data_gen_utils import get_pitch_parselmouth,get_pitch_crepe
17
+ import numpy as np
18
+ import matplotlib.pyplot as plt
19
+ import IPython.display as ipd
20
+ import utils
21
+ import librosa
22
+ import torchcrepe
23
+ from infer import *
24
+ import logging
25
+ from infer_tools.infer_tool import *
26
+ import io
27
+
28
+ spk_dict = {
29
+ "雷电将军": {"model_name": './models/genshin/raiden.ckpt', "config_name": './models/genshin/config.yaml'}
30
+ }
31
+
32
+ project_name = "Unnamed"
33
+ model_path = spk_dict['雷电将军']['model_name']
34
+ config_path= spk_dict['雷电将军']['config_name']
35
+ hubert_gpu = False
36
+ svc_model = Svc(project_name, config_path, hubert_gpu, model_path)
37
+
38
+ def vc_fn(sid, audio_record, audio_upload, tran, pndm_speedup=20):
39
+ print(sid)
40
+ if audio_upload is not None:
41
+ audio_path = audio_upload
42
+ elif audio_record is not None:
43
+ audio_path = audio_record
44
+ else:
45
+ return "你需要上传wav文件或使用网页内置的录音!", None
46
+
47
+ tran = int(tran)
48
+ pndm_speedup = int(pndm_speedup)
49
+ print('model loaded')
50
+ # demoaudio, sr = librosa.load(audio_path)
51
+ key = tran # 音高调整,支持正负(半音)
52
+ # 加速倍数
53
+ pndm_speedup = 20
54
+ wav_gen='queeeeee.wav'
55
+
56
+ # Show the spinner and run the run_clip function inside the 'with' block
57
+ f0_tst, f0_pred, audio = run_clip(svc_model, file_path=audio_path, key=key, acc=pndm_speedup, use_crepe=True, use_pe=True, thre=0.05,
58
+ use_gt_mel=False, add_noise_step=500, project_name=project_name, out_path=wav_gen)
59
+
60
+ return "Success", (hparams['audio_sample_rate'], audio)
61
+
62
+
63
+ app = gr.Blocks()
64
+ with app:
65
+ with gr.Tabs():
66
+ with gr.TabItem("Basic"):
67
+ gr.Markdown(value="""
68
+ 本模型为sovits_f0(含AI猫雷2.0音色),支持**60s以内**的**无伴奏**wav、mp3(单声道)格式,或使用**网页内置**的录音(二选一)
69
+
70
+ 转换效果取决于源音频语气、节奏是否与目标音色相近,以及音域是否超出目标音色音域范围
71
+
72
+ 猫雷音色低音音域效果不佳,如转换男声歌声,建议变调升 **6-10key**
73
+
74
+ 该模型的 [github仓库链接](https://github.com/innnky/so-vits-svc),如果想自己制作并训练模型可以访问这个 [github仓库](https://github.com/IceKyrin/sovits_guide)
75
+ """)
76
+ speaker_id = gr.Dropdown(label="音色", choices=['雷电将军'], value="雷电将军")
77
+ record_input = gr.Audio(source="microphone", label="录制你的声音", type="filepath", elem_id="audio_inputs")
78
+ upload_input = gr.Audio(source="upload", label="上传音频(长度小于60秒)", type="filepath",
79
+ elem_id="audio_inputs")
80
+ vc_transform = gr.Number(label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0)
81
+ vc_speedup = gr.Number(label="加速倍数", value=20)
82
+ vc_submit = gr.Button("转换", variant="primary")
83
+ out_audio = gr.Audio(label="Output Audio")
84
+ gr.Markdown(value="""
85
+ 输出信息为音高平均偏差半音数量,体现转换音频的跑调情况(一般平均小于0.5个半音)
86
+ """)
87
+ out_message = gr.Textbox(label="Output")
88
+ gr.Markdown(value="""f0曲线可以直观的显示跑调情况,蓝色为输入音高,橙色为合成音频的音高
89
+ 若**只看见橙色**,说明蓝色曲线被覆盖,转换效果较好
90
+ """)
91
+ # f0_image = gr.Image(label="f0曲线")
92
+ vc_submit.click(vc_fn, [speaker_id, record_input, upload_input, vc_transform, vc_speedup],
93
+ [out_message, out_audio])
94
+ with gr.TabItem("使用说明"):
95
+ gr.Markdown(value="""
96
+ 0、合集:https://github.com/IceKyrin/sovits_guide/blob/main/README.md
97
+ 1、仅支持sovit_f0(sovits2.0)模型
98
+ 2、自行下载hubert-soft-0d54a1f4.pt改名为hubert.pt放置于pth文件夹下(已经下好了)
99
+ https://github.com/bshall/hubert/releases/tag/v0.1
100
+ 3、pth文件夹下放置sovits2.0的模型
101
+ 4、与模型配套的xxx.json,需有speaker项——人物列表
102
+ 5、放无伴奏的音频、或网页内置录音,不要放奇奇怪怪的格式
103
+ 6、仅供交流使用,不对用户行为负责
104
+ """)
105
+
106
+ app.launch()
app.py.back2 ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import json
6
+ import os
7
+ import tempfile
8
+ import shutil
9
+ import requests
10
+ from pathlib import Path
11
+ temp_dir = os.path.expanduser("~/app")
12
+ global ckpt_temp_file
13
+ global audio_temp_file
14
+ global config_temp_file
15
+ ###################################################
16
+ from utils.hparams import hparams
17
+ from preprocessing.data_gen_utils import get_pitch_parselmouth,get_pitch_crepe
18
+ import numpy as np
19
+ import matplotlib.pyplot as plt
20
+ import IPython.display as ipd
21
+ import utils
22
+ import librosa
23
+ import torchcrepe
24
+ from infer import *
25
+ import logging
26
+ from infer_tools.infer_tool import *
27
+ import io
28
+
29
+ clip_completed = False
30
+ def render_audio(ckpt_temp_file, config_temp_file, audio_temp_file, title):
31
+ logging.getLogger('numba').setLevel(logging.WARNING)
32
+ title = int(title)
33
+ project_name = "Unnamed"
34
+ model_path = ckpt_temp_file
35
+ config_path= config_temp_file
36
+ hubert_gpu=True
37
+ svc_model = Svc(project_name,config_path,hubert_gpu, model_path)
38
+ print('model loaded')
39
+ wav_fn = audio_temp_file
40
+ demoaudio, sr = librosa.load(wav_fn)
41
+ key = title # 音高调整,支持正负(半音)
42
+ # 加速倍数
43
+ pndm_speedup = 20
44
+ wav_gen='queeeeee.wav'#直接改后缀可以保存不同格式音频,如flac可无损压缩
45
+
46
+ # Show the spinner and run the run_clip function inside the 'with' block
47
+ with st.spinner("Rendering Audio..."):
48
+ f0_tst, f0_pred, audio = run_clip(svc_model,file_path=wav_fn, key=key, acc=pndm_speedup, use_crepe=True, use_pe=True, thre=0.05,
49
+ use_gt_mel=False, add_noise_step=500,project_name=project_name,out_path=wav_gen)
50
+ clip_completed = True
51
+ if clip_completed:
52
+ # If the 'run_clip' function has completed, use the st.audio function to show an audio player for the file stored in the 'wav_gen' variable
53
+ st.audio(wav_gen)
54
+
55
+ #######################################################
56
+ st.set_page_config(
57
+ page_title="DiffSVC Render",
58
+ page_icon="🧊",
59
+ initial_sidebar_state="expanded",
60
+ )
61
+ ############
62
+ st.title('DIFF-SVC Render')
63
+
64
+ ###CKPT LOADER
65
+ with tempfile.TemporaryDirectory(dir=os.path.expanduser("~/app")) as temp_dir:
66
+ ckpt = st.file_uploader("Choose your CKPT", type= 'ckpt')
67
+ # Check if user uploaded a CKPT file
68
+ if ckpt is not None:
69
+ #TEMP FUNCTION
70
+ with tempfile.NamedTemporaryFile(mode="wb", suffix='.ckpt', delete=False) as temp:
71
+ # Get the file contents as bytes
72
+ bytes_data = ckpt.getvalue()
73
+ # Write the bytes to the temporary file
74
+ temp.write(bytes_data)
75
+ ckpt_temp_file = temp.name
76
+ # Print the temporary file name
77
+ print(temp.name)
78
+
79
+ # Display the file path
80
+ if "ckpt_temp_file" in locals():
81
+ st.success("File saved to: {}".format(ckpt_temp_file))
82
+
83
+ # File uploader
84
+ config = st.file_uploader("Choose your config", type= 'yaml')
85
+
86
+ # Check if user uploaded a config file
87
+ if config is not None:
88
+ #TEMP FUNCTION
89
+ with tempfile.NamedTemporaryFile(mode="wb", suffix='.yaml', delete=False) as temp:
90
+ # Get the file contents as bytes
91
+ bytes_data = config.getvalue()
92
+ # Write the bytes to the temporary file
93
+ temp.write(bytes_data)
94
+ config_temp_file = temp.name
95
+ # Print the temporary file name
96
+ print(temp.name)
97
+
98
+ # Display the file path
99
+ if "config_temp_file" in locals():
100
+ st.success("File saved to: {}".format(config_temp_file))
101
+
102
+ audio = st.file_uploader("Choose your audio", type=["wav", "mp3"])
103
+
104
+ # Check if user uploaded an audio file
105
+ if audio is not None:
106
+ #TEMP FUNCTION
107
+ with tempfile.NamedTemporaryFile(mode="wb", suffix='.wav', delete=False) as temp:
108
+ # Get the file contents as bytes
109
+ bytes_data = audio.getvalue()
110
+ # Write the bytes to the temporary file
111
+ temp.write(bytes_data)
112
+ audio_temp_file = temp.name
113
+ # Print the temporary file name
114
+ print(temp.name)
115
+
116
+ # Display the file path
117
+ if "audio_temp_file" in locals():
118
+ st.success("File saved to: {}".format(audio_temp_file))
119
+ # Add a text input for the title with a default value of 0
120
+ title = st.text_input("Key", value="0")
121
+ # Add a button to start the rendering process
122
+ if st.button("Render audio"):
123
+ render_audio(ckpt_temp_file, config_temp_file, audio_temp_file, title)
app.pyback ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import json
6
+ import os
7
+ import tempfile
8
+ import shutil
9
+ import requests
10
+ from pathlib import Path
11
+ temp_dir = os.path.expanduser("/~app")
12
+ global ckpt_temp_file
13
+ global audio_temp_file
14
+ global config_temp_file
15
+ ###################################################
16
+ from utils.hparams import hparams
17
+ from preprocessing.data_gen_utils import get_pitch_parselmouth,get_pitch_crepe
18
+ import numpy as np
19
+ import matplotlib.pyplot as plt
20
+ import IPython.display as ipd
21
+ import utils
22
+ import librosa
23
+ import torchcrepe
24
+ from infer import *
25
+ import logging
26
+ from infer_tools.infer_tool import *
27
+ import io
28
+
29
+ clip_completed = False
30
+ def render_audio(ckpt_temp_file, config_temp_file, audio_temp_file, title):
31
+ logging.getLogger('numba').setLevel(logging.WARNING)
32
+ title = int(title)
33
+ project_name = "Unnamed"
34
+ model_path = ckpt_temp_file
35
+ config_path= config_temp_file
36
+ hubert_gpu=True
37
+ svc_model = Svc(project_name,config_path,hubert_gpu, model_path)
38
+ print('model loaded')
39
+ wav_fn = audio_temp_file
40
+ demoaudio, sr = librosa.load(wav_fn)
41
+ key = title # 音高调整,支持正负(半音)
42
+ # 加速倍数
43
+ pndm_speedup = 20
44
+ wav_gen='queeeeee.wav'#直接改后缀可以保存不同格式音频,如flac可无损压缩
45
+
46
+ # Show the spinner and run the run_clip function inside the 'with' block
47
+ with st.spinner("Rendering Audio..."):
48
+ f0_tst, f0_pred, audio = run_clip(svc_model,file_path=wav_fn, key=key, acc=pndm_speedup, use_crepe=True, use_pe=True, thre=0.05,
49
+ use_gt_mel=False, add_noise_step=500,project_name=project_name,out_path=wav_gen)
50
+ clip_completed = True
51
+ if clip_completed:
52
+ # If the 'run_clip' function has completed, use the st.audio function to show an audio player for the file stored in the 'wav_gen' variable
53
+ st.audio(wav_gen)
54
+
55
+ #######################################################
56
+ st.set_page_config(
57
+ page_title="DiffSVC Render",
58
+ page_icon="🧊",
59
+ initial_sidebar_state="expanded",
60
+ )
61
+ ############
62
+ st.title('DIFF-SVC Render')
63
+
64
+ ###CKPT LOADER
65
+ with tempfile.TemporaryDirectory(dir=os.path.expanduser("/~app")) as temp_dir:
66
+ ckpt = st.file_uploader("Choose your CKPT", type= 'ckpt')
67
+ # Check if user uploaded a CKPT file
68
+ if ckpt is not None:
69
+ #TEMP FUNCTION
70
+ with tempfile.NamedTemporaryFile(mode="wb", suffix='.ckpt', delete=False, dir=temp_dir) as temp:
71
+ # Get the file contents as bytes
72
+ bytes_data = ckpt.getvalue()
73
+ # Write the bytes to the temporary file
74
+ temp.write(bytes_data)
75
+ ckpt_temp_file = temp.name
76
+ # Print the temporary file name
77
+ print(temp.name)
78
+
79
+ # Display the file path
80
+ if "ckpt_temp_file" in locals():
81
+ st.success("File saved to: {}".format(ckpt_temp_file))
82
+
83
+ # File uploader
84
+ config = st.file_uploader("Choose your config", type= 'yaml')
85
+
86
+ # Check if user uploaded a config file
87
+ if config is not None:
88
+ #TEMP FUNCTION
89
+ with tempfile.NamedTemporaryFile(mode="w", suffix='.yaml', delete=False, dir=temp_dir) as temp:
90
+ # Get the file contents as bytes
91
+ bytes_data = config.getvalue()
92
+ # Write the bytes to the temporary file
93
+ temp.write(bytes_data)
94
+ config_temp_file = temp.name
95
+ # Print the temporary file name
96
+ print(temp.name)
97
+
98
+ # Display the file path
99
+ if "config_temp_file" in locals():
100
+ st.success("File saved to: {}".format(config_temp_file))
101
+
102
+ audio = st.file_uploader("Choose your audio", type=["wav", "mp3"])
103
+
104
+ # Check if user uploaded an audio file
105
+ if audio is not None:
106
+ #TEMP FUNCTION
107
+ with tempfile.NamedTemporaryFile(mode="wb", suffix='.wav', delete=False, dir=temp_dir) as temp:
108
+ # Get the file contents as bytes
109
+ bytes_data = audio.getvalue()
110
+ # Write the bytes to the temporary file
111
+ temp.write(bytes_data)
112
+ audio_temp_file = temp.name
113
+ # Print the temporary file name
114
+ print(temp.name)
115
+
116
+ # Display the file path
117
+ if "audio_temp_file" in locals():
118
+ st.success("File saved to: {}".format(audio_temp_file))
119
+ # Add a text input for the title with a default value of 0
120
+ title = st.text_input("Key", value="0")
121
+ # Add a button to start the rendering process
122
+ if st.button("Render audio"):
123
+ render_audio(ckpt_temp_file, config_temp_file, audio_temp_file, title)
batch.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import soundfile
2
+
3
+ from infer_tools import infer_tool
4
+ from infer_tools.infer_tool import Svc
5
+
6
+
7
+ def run_clip(svc_model, key, acc, use_pe, use_crepe, thre, use_gt_mel, add_noise_step, project_name='', f_name=None,
8
+ file_path=None, out_path=None):
9
+ raw_audio_path = f_name
10
+ infer_tool.format_wav(raw_audio_path)
11
+ _f0_tst, _f0_pred, _audio = svc_model.infer(raw_audio_path, key=key, acc=acc, singer=True, use_pe=use_pe,
12
+ use_crepe=use_crepe,
13
+ thre=thre, use_gt_mel=use_gt_mel, add_noise_step=add_noise_step)
14
+ out_path = f'./singer_data/{f_name.split("/")[-1]}'
15
+ soundfile.write(out_path, _audio, 44100, 'PCM_16')
16
+
17
+
18
+ if __name__ == '__main__':
19
+ # 工程文件夹名,训练时用的那个
20
+ project_name = "firefox"
21
+ model_path = f'./checkpoints/{project_name}/clean_model_ckpt_steps_100000.ckpt'
22
+ config_path = f'./checkpoints/{project_name}/config.yaml'
23
+
24
+ # 支持多个wav/ogg文件,放在raw文件夹下,带扩展名
25
+ file_names = infer_tool.get_end_file("./batch", "wav")
26
+ trans = [-6] # 音高调整,支持正负(半音),数量与上一行对应,不足的自动按第一个移调参数补齐
27
+ # 加速倍数
28
+ accelerate = 50
29
+ hubert_gpu = True
30
+ cut_time = 30
31
+
32
+ # 下面不动
33
+ infer_tool.mkdir(["./batch", "./singer_data"])
34
+ infer_tool.fill_a_to_b(trans, file_names)
35
+
36
+ model = Svc(project_name, config_path, hubert_gpu, model_path)
37
+ count = 0
38
+ for f_name, tran in zip(file_names, trans):
39
+ print(f_name)
40
+ run_clip(model, key=tran, acc=accelerate, use_crepe=False, thre=0.05, use_pe=False, use_gt_mel=False,
41
+ add_noise_step=500, f_name=f_name, project_name=project_name)
42
+ count += 1
43
+ print(f"process:{round(count * 100 / len(file_names), 2)}%")
checkpoints/0102_xiaoma_pe/config.yaml ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accumulate_grad_batches: 1
2
+ audio_num_mel_bins: 80
3
+ audio_sample_rate: 24000
4
+ base_config:
5
+ - configs/tts/lj/fs2.yaml
6
+ binarization_args:
7
+ shuffle: false
8
+ with_align: true
9
+ with_f0: true
10
+ with_f0cwt: true
11
+ with_spk_embed: true
12
+ with_txt: true
13
+ with_wav: false
14
+ binarizer_cls: data_gen.tts.base_binarizer.BaseBinarizer
15
+ binary_data_dir: data/binary/xiaoma1022_24k_128hop
16
+ check_val_every_n_epoch: 10
17
+ clip_grad_norm: 1
18
+ cwt_add_f0_loss: false
19
+ cwt_hidden_size: 128
20
+ cwt_layers: 2
21
+ cwt_loss: l1
22
+ cwt_std_scale: 0.8
23
+ debug: false
24
+ dec_ffn_kernel_size: 9
25
+ dec_layers: 4
26
+ decoder_type: fft
27
+ dict_dir: ''
28
+ dropout: 0.1
29
+ ds_workers: 4
30
+ dur_enc_hidden_stride_kernel:
31
+ - 0,2,3
32
+ - 0,2,3
33
+ - 0,1,3
34
+ dur_loss: mse
35
+ dur_predictor_kernel: 3
36
+ dur_predictor_layers: 2
37
+ enc_ffn_kernel_size: 9
38
+ enc_layers: 4
39
+ encoder_K: 8
40
+ encoder_type: fft
41
+ endless_ds: true
42
+ ffn_act: gelu
43
+ ffn_padding: SAME
44
+ fft_size: 512
45
+ fmax: 12000
46
+ fmin: 30
47
+ gen_dir_name: ''
48
+ hidden_size: 256
49
+ hop_size: 128
50
+ infer: false
51
+ lambda_commit: 0.25
52
+ lambda_energy: 0.1
53
+ lambda_f0: 1.0
54
+ lambda_ph_dur: 1.0
55
+ lambda_sent_dur: 1.0
56
+ lambda_uv: 1.0
57
+ lambda_word_dur: 1.0
58
+ load_ckpt: ''
59
+ log_interval: 100
60
+ loud_norm: false
61
+ lr: 2.0
62
+ max_epochs: 1000
63
+ max_eval_sentences: 1
64
+ max_eval_tokens: 60000
65
+ max_frames: 5000
66
+ max_input_tokens: 1550
67
+ max_sentences: 100000
68
+ max_tokens: 20000
69
+ max_updates: 60000
70
+ mel_loss: l1
71
+ mel_vmax: 1.5
72
+ mel_vmin: -6
73
+ min_level_db: -120
74
+ norm_type: gn
75
+ num_ckpt_keep: 3
76
+ num_heads: 2
77
+ num_sanity_val_steps: 5
78
+ num_spk: 1
79
+ num_test_samples: 20
80
+ num_valid_plots: 10
81
+ optimizer_adam_beta1: 0.9
82
+ optimizer_adam_beta2: 0.98
83
+ out_wav_norm: false
84
+ pitch_ar: false
85
+ pitch_enc_hidden_stride_kernel:
86
+ - 0,2,5
87
+ - 0,2,5
88
+ - 0,2,5
89
+ pitch_extractor_conv_layers: 2
90
+ pitch_loss: l1
91
+ pitch_norm: log
92
+ pitch_type: frame
93
+ pre_align_args:
94
+ allow_no_txt: false
95
+ denoise: false
96
+ forced_align: mfa
97
+ txt_processor: en
98
+ use_sox: false
99
+ use_tone: true
100
+ pre_align_cls: data_gen.tts.lj.pre_align.LJPreAlign
101
+ predictor_dropout: 0.5
102
+ predictor_grad: 0.1
103
+ predictor_hidden: -1
104
+ predictor_kernel: 5
105
+ predictor_layers: 2
106
+ prenet_dropout: 0.5
107
+ prenet_hidden_size: 256
108
+ pretrain_fs_ckpt: ''
109
+ processed_data_dir: data/processed/ljspeech
110
+ profile_infer: false
111
+ raw_data_dir: data/raw/LJSpeech-1.1
112
+ ref_norm_layer: bn
113
+ reset_phone_dict: true
114
+ save_best: false
115
+ save_ckpt: true
116
+ save_codes:
117
+ - configs
118
+ - modules
119
+ - tasks
120
+ - utils
121
+ - usr
122
+ save_f0: false
123
+ save_gt: false
124
+ seed: 1234
125
+ sort_by_len: true
126
+ stop_token_weight: 5.0
127
+ task_cls: tasks.tts.pe.PitchExtractionTask
128
+ test_ids:
129
+ - 68
130
+ - 70
131
+ - 74
132
+ - 87
133
+ - 110
134
+ - 172
135
+ - 190
136
+ - 215
137
+ - 231
138
+ - 294
139
+ - 316
140
+ - 324
141
+ - 402
142
+ - 422
143
+ - 485
144
+ - 500
145
+ - 505
146
+ - 508
147
+ - 509
148
+ - 519
149
+ test_input_dir: ''
150
+ test_num: 523
151
+ test_set_name: test
152
+ train_set_name: train
153
+ use_denoise: false
154
+ use_energy_embed: false
155
+ use_gt_dur: false
156
+ use_gt_f0: false
157
+ use_pitch_embed: true
158
+ use_pos_embed: true
159
+ use_spk_embed: false
160
+ use_spk_id: false
161
+ use_split_spk_id: false
162
+ use_uv: true
163
+ use_var_enc: false
164
+ val_check_interval: 2000
165
+ valid_num: 348
166
+ valid_set_name: valid
167
+ vocoder: pwg
168
+ vocoder_ckpt: ''
169
+ warmup_updates: 2000
170
+ weight_decay: 0
171
+ win_size: 512
172
+ work_dir: checkpoints/0102_xiaoma_pe
checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1863f12324e43783089ab933edeeb969106b851e30d71019ebbaa9b82099d82a
3
+ size 39141959
checkpoints/0109_hifigan_bigpopcs_hop128/config.yaml ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accumulate_grad_batches: 1
2
+ adam_b1: 0.8
3
+ adam_b2: 0.99
4
+ amp: false
5
+ audio_num_mel_bins: 80
6
+ audio_sample_rate: 24000
7
+ aux_context_window: 0
8
+ #base_config:
9
+ #- egs/egs_bases/singing/pwg.yaml
10
+ #- egs/egs_bases/tts/vocoder/hifigan.yaml
11
+ binarization_args:
12
+ reset_phone_dict: true
13
+ reset_word_dict: true
14
+ shuffle: false
15
+ trim_eos_bos: false
16
+ trim_sil: false
17
+ with_align: false
18
+ with_f0: true
19
+ with_f0cwt: false
20
+ with_linear: false
21
+ with_spk_embed: false
22
+ with_spk_id: true
23
+ with_txt: false
24
+ with_wav: true
25
+ with_word: false
26
+ binarizer_cls: data_gen.tts.singing.binarize.SingingBinarizer
27
+ binary_data_dir: data/binary/big_popcs_24k_hop128
28
+ check_val_every_n_epoch: 10
29
+ clip_grad_norm: 1
30
+ clip_grad_value: 0
31
+ datasets: []
32
+ debug: false
33
+ dec_ffn_kernel_size: 9
34
+ dec_layers: 4
35
+ dict_dir: ''
36
+ disc_start_steps: 40000
37
+ discriminator_grad_norm: 1
38
+ discriminator_optimizer_params:
39
+ eps: 1.0e-06
40
+ lr: 0.0002
41
+ weight_decay: 0.0
42
+ discriminator_params:
43
+ bias: true
44
+ conv_channels: 64
45
+ in_channels: 1
46
+ kernel_size: 3
47
+ layers: 10
48
+ nonlinear_activation: LeakyReLU
49
+ nonlinear_activation_params:
50
+ negative_slope: 0.2
51
+ out_channels: 1
52
+ use_weight_norm: true
53
+ discriminator_scheduler_params:
54
+ gamma: 0.999
55
+ step_size: 600
56
+ dropout: 0.1
57
+ ds_workers: 1
58
+ enc_ffn_kernel_size: 9
59
+ enc_layers: 4
60
+ endless_ds: true
61
+ ffn_act: gelu
62
+ ffn_padding: SAME
63
+ fft_size: 512
64
+ fmax: 12000
65
+ fmin: 30
66
+ frames_multiple: 1
67
+ gen_dir_name: ''
68
+ generator_grad_norm: 10
69
+ generator_optimizer_params:
70
+ eps: 1.0e-06
71
+ lr: 0.0002
72
+ weight_decay: 0.0
73
+ generator_params:
74
+ aux_channels: 80
75
+ dropout: 0.0
76
+ gate_channels: 128
77
+ in_channels: 1
78
+ kernel_size: 3
79
+ layers: 30
80
+ out_channels: 1
81
+ residual_channels: 64
82
+ skip_channels: 64
83
+ stacks: 3
84
+ upsample_net: ConvInUpsampleNetwork
85
+ upsample_params:
86
+ upsample_scales:
87
+ - 2
88
+ - 4
89
+ - 4
90
+ - 4
91
+ use_nsf: false
92
+ use_pitch_embed: true
93
+ use_weight_norm: true
94
+ generator_scheduler_params:
95
+ gamma: 0.999
96
+ step_size: 600
97
+ griffin_lim_iters: 60
98
+ hidden_size: 256
99
+ hop_size: 128
100
+ infer: false
101
+ lambda_adv: 1.0
102
+ lambda_cdisc: 4.0
103
+ lambda_energy: 0.0
104
+ lambda_f0: 0.0
105
+ lambda_mel: 5.0
106
+ lambda_mel_adv: 1.0
107
+ lambda_ph_dur: 0.0
108
+ lambda_sent_dur: 0.0
109
+ lambda_uv: 0.0
110
+ lambda_word_dur: 0.0
111
+ load_ckpt: ''
112
+ loud_norm: false
113
+ lr: 2.0
114
+ max_epochs: 1000
115
+ max_frames: 2400
116
+ max_input_tokens: 1550
117
+ max_samples: 8192
118
+ max_sentences: 20
119
+ max_tokens: 24000
120
+ max_updates: 3000000
121
+ max_valid_sentences: 1
122
+ max_valid_tokens: 60000
123
+ mel_loss: ssim:0.5|l1:0.5
124
+ mel_vmax: 1.5
125
+ mel_vmin: -6
126
+ min_frames: 0
127
+ min_level_db: -120
128
+ num_ckpt_keep: 3
129
+ num_heads: 2
130
+ num_mels: 80
131
+ num_sanity_val_steps: 5
132
+ num_spk: 100
133
+ num_test_samples: 0
134
+ num_valid_plots: 10
135
+ optimizer_adam_beta1: 0.9
136
+ optimizer_adam_beta2: 0.98
137
+ out_wav_norm: false
138
+ pitch_extractor: parselmouth
139
+ pitch_type: frame
140
+ pre_align_args:
141
+ allow_no_txt: false
142
+ denoise: false
143
+ sox_resample: true
144
+ sox_to_wav: false
145
+ trim_sil: false
146
+ txt_processor: zh
147
+ use_tone: false
148
+ pre_align_cls: data_gen.tts.singing.pre_align.SingingPreAlign
149
+ predictor_grad: 0.0
150
+ print_nan_grads: false
151
+ processed_data_dir: ''
152
+ profile_infer: false
153
+ raw_data_dir: ''
154
+ ref_level_db: 20
155
+ rename_tmux: true
156
+ rerun_gen: true
157
+ resblock: '1'
158
+ resblock_dilation_sizes:
159
+ - - 1
160
+ - 3
161
+ - 5
162
+ - - 1
163
+ - 3
164
+ - 5
165
+ - - 1
166
+ - 3
167
+ - 5
168
+ resblock_kernel_sizes:
169
+ - 3
170
+ - 7
171
+ - 11
172
+ resume_from_checkpoint: 0
173
+ save_best: true
174
+ save_codes: []
175
+ save_f0: true
176
+ save_gt: true
177
+ scheduler: rsqrt
178
+ seed: 1234
179
+ sort_by_len: true
180
+ stft_loss_params:
181
+ fft_sizes:
182
+ - 1024
183
+ - 2048
184
+ - 512
185
+ hop_sizes:
186
+ - 120
187
+ - 240
188
+ - 50
189
+ win_lengths:
190
+ - 600
191
+ - 1200
192
+ - 240
193
+ window: hann_window
194
+ task_cls: tasks.vocoder.hifigan.HifiGanTask
195
+ tb_log_interval: 100
196
+ test_ids: []
197
+ test_input_dir: ''
198
+ test_num: 50
199
+ test_prefixes: []
200
+ test_set_name: test
201
+ train_set_name: train
202
+ train_sets: ''
203
+ upsample_initial_channel: 512
204
+ upsample_kernel_sizes:
205
+ - 16
206
+ - 16
207
+ - 4
208
+ - 4
209
+ upsample_rates:
210
+ - 8
211
+ - 4
212
+ - 2
213
+ - 2
214
+ use_cdisc: false
215
+ use_cond_disc: false
216
+ use_fm_loss: false
217
+ use_gt_dur: true
218
+ use_gt_f0: true
219
+ use_mel_loss: true
220
+ use_ms_stft: false
221
+ use_pitch_embed: true
222
+ use_ref_enc: true
223
+ use_spec_disc: false
224
+ use_spk_embed: false
225
+ use_spk_id: false
226
+ use_split_spk_id: false
227
+ val_check_interval: 2000
228
+ valid_infer_interval: 10000
229
+ valid_monitor_key: val_loss
230
+ valid_monitor_mode: min
231
+ valid_set_name: valid
232
+ vocoder: pwg
233
+ vocoder_ckpt: ''
234
+ vocoder_denoise_c: 0.0
235
+ warmup_updates: 8000
236
+ weight_decay: 0
237
+ win_length: null
238
+ win_size: 512
239
+ window: hann
240
+ word_size: 3000
241
+ work_dir: checkpoints/0109_hifigan_bigpopcs_hop128
checkpoints/0109_hifigan_bigpopcs_hop128/model_ckpt_steps_1512000.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cb68f3ce0c46ba0a8b6d49718f1fffdf5bd7bcab769a986fd2fd129835cc1d1
3
+ size 55827436
checkpoints/Unnamed/config.yaml ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ K_step: 1000
2
+ accumulate_grad_batches: 1
3
+ audio_num_mel_bins: 128
4
+ audio_sample_rate: 44100
5
+ binarization_args:
6
+ shuffle: false
7
+ with_align: true
8
+ with_f0: true
9
+ with_hubert: true
10
+ with_spk_embed: false
11
+ with_wav: false
12
+ binarizer_cls: preprocessing.SVCpre.SVCBinarizer
13
+ binary_data_dir: data/binary/Unnamed
14
+ check_val_every_n_epoch: 10
15
+ choose_test_manually: false
16
+ clip_grad_norm: 1
17
+ config_path: training/config_nsf.yaml
18
+ content_cond_steps: []
19
+ cwt_add_f0_loss: false
20
+ cwt_hidden_size: 128
21
+ cwt_layers: 2
22
+ cwt_loss: l1
23
+ cwt_std_scale: 0.8
24
+ datasets:
25
+ - opencpop
26
+ debug: false
27
+ dec_ffn_kernel_size: 9
28
+ dec_layers: 4
29
+ decay_steps: 60000
30
+ decoder_type: fft
31
+ dict_dir: ''
32
+ diff_decoder_type: wavenet
33
+ diff_loss_type: l2
34
+ dilation_cycle_length: 4
35
+ dropout: 0.1
36
+ ds_workers: 4
37
+ dur_enc_hidden_stride_kernel:
38
+ - 0,2,3
39
+ - 0,2,3
40
+ - 0,1,3
41
+ dur_loss: mse
42
+ dur_predictor_kernel: 3
43
+ dur_predictor_layers: 5
44
+ enc_ffn_kernel_size: 9
45
+ enc_layers: 4
46
+ encoder_K: 8
47
+ encoder_type: fft
48
+ endless_ds: false
49
+ f0_bin: 256
50
+ f0_max: 1100.0
51
+ f0_min: 40.0
52
+ ffn_act: gelu
53
+ ffn_padding: SAME
54
+ fft_size: 2048
55
+ fmax: 16000
56
+ fmin: 40
57
+ fs2_ckpt: ''
58
+ gaussian_start: true
59
+ gen_dir_name: ''
60
+ gen_tgt_spk_id: -1
61
+ hidden_size: 256
62
+ hop_size: 512
63
+ hubert_gpu: true
64
+ hubert_path: checkpoints/hubert/hubert_soft.pt
65
+ infer: false
66
+ keep_bins: 128
67
+ lambda_commit: 0.25
68
+ lambda_energy: 0.0
69
+ lambda_f0: 1.0
70
+ lambda_ph_dur: 0.3
71
+ lambda_sent_dur: 1.0
72
+ lambda_uv: 1.0
73
+ lambda_word_dur: 1.0
74
+ load_ckpt: ''
75
+ log_interval: 100
76
+ loud_norm: false
77
+ lr: 0.0008
78
+ max_beta: 0.02
79
+ max_epochs: 3000
80
+ max_eval_sentences: 1
81
+ max_eval_tokens: 60000
82
+ max_frames: 42000
83
+ max_input_tokens: 60000
84
+ max_sentences: 14
85
+ max_tokens: 128000
86
+ max_updates: 1000000
87
+ mel_loss: ssim:0.5|l1:0.5
88
+ mel_vmax: 1.5
89
+ mel_vmin: -6.0
90
+ min_level_db: -120
91
+ no_fs2: true
92
+ norm_type: gn
93
+ num_ckpt_keep: 10
94
+ num_heads: 2
95
+ num_sanity_val_steps: 1
96
+ num_spk: 1
97
+ num_test_samples: 0
98
+ num_valid_plots: 10
99
+ optimizer_adam_beta1: 0.9
100
+ optimizer_adam_beta2: 0.98
101
+ out_wav_norm: false
102
+ pe_ckpt: checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt
103
+ pe_enable: false
104
+ perform_enhance: true
105
+ pitch_ar: false
106
+ pitch_enc_hidden_stride_kernel:
107
+ - 0,2,5
108
+ - 0,2,5
109
+ - 0,2,5
110
+ pitch_extractor: parselmouth
111
+ pitch_loss: l2
112
+ pitch_norm: log
113
+ pitch_type: frame
114
+ pndm_speedup: 10
115
+ pre_align_args:
116
+ allow_no_txt: false
117
+ denoise: false
118
+ forced_align: mfa
119
+ txt_processor: zh_g2pM
120
+ use_sox: true
121
+ use_tone: false
122
+ pre_align_cls: data_gen.singing.pre_align.SingingPreAlign
123
+ predictor_dropout: 0.5
124
+ predictor_grad: 0.1
125
+ predictor_hidden: -1
126
+ predictor_kernel: 5
127
+ predictor_layers: 5
128
+ prenet_dropout: 0.5
129
+ prenet_hidden_size: 256
130
+ pretrain_fs_ckpt: ''
131
+ processed_data_dir: xxx
132
+ profile_infer: false
133
+ raw_data_dir: data/raw/Unnamed
134
+ ref_norm_layer: bn
135
+ rel_pos: true
136
+ reset_phone_dict: true
137
+ residual_channels: 384
138
+ residual_layers: 20
139
+ save_best: true
140
+ save_ckpt: true
141
+ save_codes:
142
+ - configs
143
+ - modules
144
+ - src
145
+ - utils
146
+ save_f0: true
147
+ save_gt: false
148
+ schedule_type: linear
149
+ seed: 1234
150
+ sort_by_len: true
151
+ speaker_id: Unnamed
152
+ spec_max:
153
+ - 0.47615352272987366
154
+ - 0.6125704050064087
155
+ - 0.7518845796585083
156
+ - 0.900716245174408
157
+ - 0.8935521841049194
158
+ - 0.9057011604309082
159
+ - 0.9648348689079285
160
+ - 0.9044283032417297
161
+ - 0.9109272360801697
162
+ - 0.9744535088539124
163
+ - 0.9476388692855835
164
+ - 0.9883336424827576
165
+ - 1.0821290016174316
166
+ - 1.046391248703003
167
+ - 0.9829667806625366
168
+ - 1.0163493156433105
169
+ - 0.9825412631034851
170
+ - 1.0021960735321045
171
+ - 1.052114725112915
172
+ - 1.128888726234436
173
+ - 1.186057209968567
174
+ - 1.112004280090332
175
+ - 1.1282787322998047
176
+ - 1.051572322845459
177
+ - 1.1104764938354492
178
+ - 1.176831603050232
179
+ - 1.13348388671875
180
+ - 1.1075258255004883
181
+ - 1.1696264743804932
182
+ - 1.0231049060821533
183
+ - 0.9303848743438721
184
+ - 1.1257890462875366
185
+ - 1.1610286235809326
186
+ - 1.0335885286331177
187
+ - 1.0645352602005005
188
+ - 1.0619306564331055
189
+ - 1.1310148239135742
190
+ - 1.1191954612731934
191
+ - 1.1307402849197388
192
+ - 1.2094721794128418
193
+ - 1.2683185338974
194
+ - 1.1212272644042969
195
+ - 1.1781182289123535
196
+ - 1.1501952409744263
197
+ - 0.9884514808654785
198
+ - 0.9226155281066895
199
+ - 0.9469702839851379
200
+ - 1.023751139640808
201
+ - 1.1348609924316406
202
+ - 1.087107539176941
203
+ - 0.9899962544441223
204
+ - 1.061837077140808
205
+ - 1.0341650247573853
206
+ - 0.9019684195518494
207
+ - 0.7986546158790588
208
+ - 0.7983465194702148
209
+ - 0.7755436301231384
210
+ - 0.701917290687561
211
+ - 0.7639197707176208
212
+ - 0.7503461837768555
213
+ - 0.6701087951660156
214
+ - 0.5326520800590515
215
+ - 0.6320568323135376
216
+ - 0.4748716950416565
217
+ - 0.41016310453414917
218
+ - 0.4754445552825928
219
+ - 0.4267503023147583
220
+ - 0.391481876373291
221
+ - 0.3118276298046112
222
+ - 0.3193877339363098
223
+ - 0.3111794888973236
224
+ - 0.3342774212360382
225
+ - 0.1353837102651596
226
+ - 0.16596835851669312
227
+ - 0.1730986088514328
228
+ - 0.2325316220521927
229
+ - 0.17107760906219482
230
+ - 0.10877621918916702
231
+ - 0.2612082064151764
232
+ - 0.11200784891843796
233
+ - 0.14075303077697754
234
+ - 0.07312829792499542
235
+ - -0.011712555773556232
236
+ - 0.1741427332162857
237
+ - 0.19782507419586182
238
+ - 0.03305494412779808
239
+ - 0.004054426681250334
240
+ - 0.1011907309293747
241
+ - 0.1317272037267685
242
+ - 0.014256341382861137
243
+ - 0.019952761009335518
244
+ - -0.1253873109817505
245
+ - -0.14854255318641663
246
+ - -0.14063480496406555
247
+ - -0.1331133395433426
248
+ - -0.28339776396751404
249
+ - -0.38559386134147644
250
+ - -0.2798943519592285
251
+ - -0.19351321458816528
252
+ - -0.23238061368465424
253
+ - -0.2850213944911957
254
+ - -0.20320385694503784
255
+ - -0.24087588489055634
256
+ - -0.15823237597942352
257
+ - -0.13949760794639587
258
+ - -0.19627133011817932
259
+ - -0.1920071393251419
260
+ - -0.19384469091892242
261
+ - -0.22403620183467865
262
+ - -0.18197931349277496
263
+ - -0.28423866629600525
264
+ - -0.26859334111213684
265
+ - -0.3213472068309784
266
+ - -0.3303631842136383
267
+ - -0.3835512697696686
268
+ - -0.3256210386753082
269
+ - -0.3938714265823364
270
+ - -0.4373253881931305
271
+ - -0.4146285951137543
272
+ - -0.4861420691013336
273
+ - -0.4018196761608124
274
+ - -0.46770456433296204
275
+ - -0.4100344479084015
276
+ - -0.5364681482315063
277
+ - -0.5802102088928223
278
+ - -0.5856970548629761
279
+ - -0.47378262877464294
280
+ - -0.36258620023727417
281
+ spec_min:
282
+ - -4.999994277954102
283
+ - -4.999994277954102
284
+ - -4.999994277954102
285
+ - -4.999994277954102
286
+ - -4.999994277954102
287
+ - -4.999994277954102
288
+ - -4.999994277954102
289
+ - -4.999994277954102
290
+ - -4.999994277954102
291
+ - -4.999994277954102
292
+ - -4.999994277954102
293
+ - -4.999994277954102
294
+ - -4.999994277954102
295
+ - -4.999994277954102
296
+ - -4.999994277954102
297
+ - -4.999994277954102
298
+ - -4.999994277954102
299
+ - -4.999994277954102
300
+ - -4.999994277954102
301
+ - -4.999994277954102
302
+ - -4.999994277954102
303
+ - -4.999994277954102
304
+ - -4.999994277954102
305
+ - -4.999994277954102
306
+ - -4.999994277954102
307
+ - -4.999994277954102
308
+ - -4.999994277954102
309
+ - -4.999994277954102
310
+ - -4.999994277954102
311
+ - -4.999994277954102
312
+ - -4.999994277954102
313
+ - -4.999994277954102
314
+ - -4.999994277954102
315
+ - -4.999994277954102
316
+ - -4.999994277954102
317
+ - -4.999994277954102
318
+ - -4.999994277954102
319
+ - -4.999994277954102
320
+ - -4.999994277954102
321
+ - -4.999994277954102
322
+ - -4.999994277954102
323
+ - -4.999994277954102
324
+ - -4.999994277954102
325
+ - -4.999994277954102
326
+ - -4.999994277954102
327
+ - -4.999994277954102
328
+ - -4.999994277954102
329
+ - -4.999994277954102
330
+ - -4.999994277954102
331
+ - -4.999994277954102
332
+ - -4.999994277954102
333
+ - -4.999994277954102
334
+ - -4.999994277954102
335
+ - -4.999994277954102
336
+ - -4.999994277954102
337
+ - -4.999994277954102
338
+ - -4.999994277954102
339
+ - -4.999994277954102
340
+ - -4.999994277954102
341
+ - -4.999994277954102
342
+ - -4.999994277954102
343
+ - -4.999994277954102
344
+ - -4.999994277954102
345
+ - -4.999994277954102
346
+ - -4.999994277954102
347
+ - -4.999994277954102
348
+ - -4.999994277954102
349
+ - -4.999994277954102
350
+ - -4.999994277954102
351
+ - -4.999994277954102
352
+ - -4.999994277954102
353
+ - -4.999994277954102
354
+ - -4.999994277954102
355
+ - -4.999994277954102
356
+ - -4.999994277954102
357
+ - -4.999994277954102
358
+ - -4.999994277954102
359
+ - -4.999994277954102
360
+ - -4.999994277954102
361
+ - -4.999994277954102
362
+ - -4.999994277954102
363
+ - -4.999994277954102
364
+ - -4.999994277954102
365
+ - -4.999994277954102
366
+ - -4.999994277954102
367
+ - -4.999994277954102
368
+ - -4.999994277954102
369
+ - -4.999994277954102
370
+ - -4.999994277954102
371
+ - -4.999994277954102
372
+ - -4.999994277954102
373
+ - -4.999994277954102
374
+ - -4.999994277954102
375
+ - -4.999994277954102
376
+ - -4.999994277954102
377
+ - -4.999994277954102
378
+ - -4.999994277954102
379
+ - -4.999994277954102
380
+ - -4.999994277954102
381
+ - -4.999994277954102
382
+ - -4.999994277954102
383
+ - -4.999994277954102
384
+ - -4.999994277954102
385
+ - -4.999994277954102
386
+ - -4.999994277954102
387
+ - -4.999994277954102
388
+ - -4.999994277954102
389
+ - -4.999994277954102
390
+ - -4.999994277954102
391
+ - -4.999994277954102
392
+ - -4.999994277954102
393
+ - -4.999994277954102
394
+ - -4.999994277954102
395
+ - -4.999994277954102
396
+ - -4.999994277954102
397
+ - -4.999994277954102
398
+ - -4.999994277954102
399
+ - -4.999994277954102
400
+ - -4.999994277954102
401
+ - -4.999994277954102
402
+ - -4.999994277954102
403
+ - -4.999994277954102
404
+ - -4.999994277954102
405
+ - -4.999994277954102
406
+ - -4.999994277954102
407
+ - -4.999994277954102
408
+ - -4.999994277954102
409
+ - -4.999994277954102
410
+ spk_cond_steps: []
411
+ stop_token_weight: 5.0
412
+ task_cls: training.task.SVC_task.SVCTask
413
+ test_ids: []
414
+ test_input_dir: ''
415
+ test_num: 0
416
+ test_prefixes:
417
+ - test
418
+ test_set_name: test
419
+ timesteps: 1000
420
+ train_set_name: train
421
+ use_crepe: false
422
+ use_denoise: false
423
+ use_energy_embed: false
424
+ use_gt_dur: false
425
+ use_gt_f0: false
426
+ use_midi: false
427
+ use_nsf: true
428
+ use_pitch_embed: true
429
+ use_pos_embed: true
430
+ use_spk_embed: false
431
+ use_spk_id: false
432
+ use_split_spk_id: false
433
+ use_uv: false
434
+ use_var_enc: false
435
+ use_vec: false
436
+ val_check_interval: 1000
437
+ valid_num: 0
438
+ valid_set_name: valid
439
+ vocoder: network.vocoders.nsf_hifigan.NsfHifiGAN
440
+ vocoder_ckpt: checkpoints/nsf_hifigan/model
441
+ warmup_updates: 2000
442
+ wav2spec_eps: 1e-6
443
+ weight_decay: 0
444
+ win_size: 2048
445
+ work_dir: checkpoints/Unnamed
checkpoints/Unnamed/config_nsf.yaml ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ K_step: 1000
2
+ accumulate_grad_batches: 1
3
+ audio_num_mel_bins: 128
4
+ audio_sample_rate: 44100
5
+ binarization_args:
6
+ shuffle: false
7
+ with_align: true
8
+ with_f0: true
9
+ with_hubert: true
10
+ with_spk_embed: false
11
+ with_wav: false
12
+ binarizer_cls: preprocessing.SVCpre.SVCBinarizer
13
+ binary_data_dir: data/binary/Unnamed
14
+ check_val_every_n_epoch: 10
15
+ choose_test_manually: false
16
+ clip_grad_norm: 1
17
+ config_path: training/config_nsf.yaml
18
+ content_cond_steps: []
19
+ cwt_add_f0_loss: false
20
+ cwt_hidden_size: 128
21
+ cwt_layers: 2
22
+ cwt_loss: l1
23
+ cwt_std_scale: 0.8
24
+ datasets:
25
+ - opencpop
26
+ debug: false
27
+ dec_ffn_kernel_size: 9
28
+ dec_layers: 4
29
+ decay_steps: 20000
30
+ decoder_type: fft
31
+ dict_dir: ''
32
+ diff_decoder_type: wavenet
33
+ diff_loss_type: l2
34
+ dilation_cycle_length: 4
35
+ dropout: 0.1
36
+ ds_workers: 4
37
+ dur_enc_hidden_stride_kernel:
38
+ - 0,2,3
39
+ - 0,2,3
40
+ - 0,1,3
41
+ dur_loss: mse
42
+ dur_predictor_kernel: 3
43
+ dur_predictor_layers: 5
44
+ enc_ffn_kernel_size: 9
45
+ enc_layers: 4
46
+ encoder_K: 8
47
+ encoder_type: fft
48
+ endless_ds: false
49
+ f0_bin: 256
50
+ f0_max: 1100.0
51
+ f0_min: 40.0
52
+ ffn_act: gelu
53
+ ffn_padding: SAME
54
+ fft_size: 2048
55
+ fmax: 16000
56
+ fmin: 40
57
+ fs2_ckpt: ''
58
+ gaussian_start: true
59
+ gen_dir_name: ''
60
+ gen_tgt_spk_id: -1
61
+ hidden_size: 256
62
+ hop_size: 512
63
+ hubert_gpu: true
64
+ hubert_path: checkpoints/hubert/hubert_soft.pt
65
+ infer: false
66
+ keep_bins: 128
67
+ lambda_commit: 0.25
68
+ lambda_energy: 0.0
69
+ lambda_f0: 1.0
70
+ lambda_ph_dur: 0.3
71
+ lambda_sent_dur: 1.0
72
+ lambda_uv: 1.0
73
+ lambda_word_dur: 1.0
74
+ load_ckpt: pretrain/nehito_ckpt_steps_1000000.ckpt
75
+ log_interval: 100
76
+ loud_norm: false
77
+ lr: 5.0e-05
78
+ max_beta: 0.02
79
+ max_epochs: 3000
80
+ max_eval_sentences: 1
81
+ max_eval_tokens: 60000
82
+ max_frames: 42000
83
+ max_input_tokens: 60000
84
+ max_sentences: 12
85
+ max_tokens: 128000
86
+ max_updates: 1000000
87
+ mel_loss: ssim:0.5|l1:0.5
88
+ mel_vmax: 1.5
89
+ mel_vmin: -6.0
90
+ min_level_db: -120
91
+ no_fs2: true
92
+ norm_type: gn
93
+ num_ckpt_keep: 10
94
+ num_heads: 2
95
+ num_sanity_val_steps: 1
96
+ num_spk: 1
97
+ num_test_samples: 0
98
+ num_valid_plots: 10
99
+ optimizer_adam_beta1: 0.9
100
+ optimizer_adam_beta2: 0.98
101
+ out_wav_norm: false
102
+ pe_ckpt: checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt
103
+ pe_enable: false
104
+ perform_enhance: true
105
+ pitch_ar: false
106
+ pitch_enc_hidden_stride_kernel:
107
+ - 0,2,5
108
+ - 0,2,5
109
+ - 0,2,5
110
+ pitch_extractor: parselmouth
111
+ pitch_loss: l2
112
+ pitch_norm: log
113
+ pitch_type: frame
114
+ pndm_speedup: 10
115
+ pre_align_args:
116
+ allow_no_txt: false
117
+ denoise: false
118
+ forced_align: mfa
119
+ txt_processor: zh_g2pM
120
+ use_sox: true
121
+ use_tone: false
122
+ pre_align_cls: data_gen.singing.pre_align.SingingPreAlign
123
+ predictor_dropout: 0.5
124
+ predictor_grad: 0.1
125
+ predictor_hidden: -1
126
+ predictor_kernel: 5
127
+ predictor_layers: 5
128
+ prenet_dropout: 0.5
129
+ prenet_hidden_size: 256
130
+ pretrain_fs_ckpt: ''
131
+ processed_data_dir: xxx
132
+ profile_infer: false
133
+ raw_data_dir: data/raw/Unnamed
134
+ ref_norm_layer: bn
135
+ rel_pos: true
136
+ reset_phone_dict: true
137
+ residual_channels: 384
138
+ residual_layers: 20
139
+ save_best: false
140
+ save_ckpt: true
141
+ save_codes:
142
+ - configs
143
+ - modules
144
+ - src
145
+ - utils
146
+ save_f0: true
147
+ save_gt: false
148
+ schedule_type: linear
149
+ seed: 1234
150
+ sort_by_len: true
151
+ speaker_id: Unnamed
152
+ spec_max:
153
+ - -0.4884430170059204
154
+ - 0.004534448496997356
155
+ - 0.5684943795204163
156
+ - 0.6527385115623474
157
+ - 0.659079372882843
158
+ - 0.7416915893554688
159
+ - 0.844637930393219
160
+ - 0.806076169013977
161
+ - 0.7238750457763672
162
+ - 0.9744535088539124
163
+ - 0.9476388692855835
164
+ - 0.9883336424827576
165
+ - 1.0821290016174316
166
+ - 1.046391248703003
167
+ - 0.9829667806625366
168
+ - 1.0163493156433105
169
+ - 0.9825412631034851
170
+ - 0.9834834337234497
171
+ - 0.9811502695083618
172
+ - 1.128888726234436
173
+ - 1.186057209968567
174
+ - 1.112004280090332
175
+ - 1.1282787322998047
176
+ - 1.051572322845459
177
+ - 1.0510444641113281
178
+ - 1.0110565423965454
179
+ - 0.9236567616462708
180
+ - 0.8036720156669617
181
+ - 0.8383486270904541
182
+ - 0.7735869288444519
183
+ - 0.9303848743438721
184
+ - 1.1257890462875366
185
+ - 1.1610286235809326
186
+ - 1.0335885286331177
187
+ - 1.0645352602005005
188
+ - 1.0619306564331055
189
+ - 1.1310148239135742
190
+ - 1.1191954612731934
191
+ - 1.1307402849197388
192
+ - 0.8837698698043823
193
+ - 1.1153966188430786
194
+ - 1.1045044660568237
195
+ - 1.0479614734649658
196
+ - 0.9491603374481201
197
+ - 0.9858523011207581
198
+ - 0.9226155281066895
199
+ - 0.9469702839851379
200
+ - 0.8791896104812622
201
+ - 0.997624933719635
202
+ - 0.9068642854690552
203
+ - 0.9575618505477905
204
+ - 0.8551340699195862
205
+ - 0.8397778272628784
206
+ - 0.8908605575561523
207
+ - 0.7986546158790588
208
+ - 0.7983465194702148
209
+ - 0.6965265274047852
210
+ - 0.640673041343689
211
+ - 0.6690735220909119
212
+ - 0.5631484985351562
213
+ - 0.48587048053741455
214
+ - 0.5326520800590515
215
+ - 0.4286036193370819
216
+ - 0.35252484679222107
217
+ - 0.3290073573589325
218
+ - 0.4754445552825928
219
+ - 0.3632410168647766
220
+ - 0.391481876373291
221
+ - 0.20288512110710144
222
+ - 0.18305960297584534
223
+ - 0.1539602279663086
224
+ - 0.03451670706272125
225
+ - -0.16881510615348816
226
+ - -0.02030198462307453
227
+ - 0.10024689882993698
228
+ - -0.023952053859829903
229
+ - 0.05635542422533035
230
+ - 0.10877621918916702
231
+ - 0.006155031267553568
232
+ - 0.07318088412284851
233
+ - 0.14075303077697754
234
+ - 0.057870157063007355
235
+ - -0.0520513579249382
236
+ - 0.1741427332162857
237
+ - -0.11464552581310272
238
+ - 0.03305494412779808
239
+ - -0.06897418200969696
240
+ - -0.12598733603954315
241
+ - -0.09894973039627075
242
+ - -0.2817802429199219
243
+ - -0.0825519785284996
244
+ - -0.3040400445461273
245
+ - -0.4998124837875366
246
+ - -0.36957985162734985
247
+ - -0.5409602522850037
248
+ - -0.49879470467567444
249
+ - -0.713716983795166
250
+ - -0.6545754671096802
251
+ - -0.6425778865814209
252
+ - -0.6178902387619019
253
+ - -0.47356730699539185
254
+ - -0.6165243983268738
255
+ - -0.5841533541679382
256
+ - -0.5759448409080505
257
+ - -0.5498068332672119
258
+ - -0.4661938548088074
259
+ - -0.5811225771903992
260
+ - -0.614664614200592
261
+ - -0.3902229070663452
262
+ - -0.7037366032600403
263
+ - -0.7260795831680298
264
+ - -0.7540019750595093
265
+ - -0.8360528945922852
266
+ - -0.8374698758125305
267
+ - -0.8328713178634644
268
+ - -0.9081047177314758
269
+ - -0.9679695963859558
270
+ - -0.9587443470954895
271
+ - -1.0706337690353394
272
+ - -0.9818469285964966
273
+ - -0.8360191583633423
274
+ - -0.9938981533050537
275
+ - -1.0823708772659302
276
+ - -1.0617167949676514
277
+ - -1.1093820333480835
278
+ - -1.1300138235092163
279
+ - -1.2141350507736206
280
+ - -1.3147293329238892
281
+ spec_min:
282
+ - -4.473258972167969
283
+ - -4.244492530822754
284
+ - -4.390527725219727
285
+ - -4.209497928619385
286
+ - -4.446024417877197
287
+ - -4.3960185050964355
288
+ - -4.164802551269531
289
+ - -4.5063300132751465
290
+ - -4.608232021331787
291
+ - -4.251623630523682
292
+ - -4.4799604415893555
293
+ - -4.733210563659668
294
+ - -4.411860466003418
295
+ - -4.609100818634033
296
+ - -4.726972579956055
297
+ - -4.428761959075928
298
+ - -4.487612247467041
299
+ - -4.525552749633789
300
+ - -4.480506896972656
301
+ - -4.589383125305176
302
+ - -4.608384132385254
303
+ - -4.385376453399658
304
+ - -4.816161632537842
305
+ - -4.8706955909729
306
+ - -4.848956108093262
307
+ - -4.431278705596924
308
+ - -4.999994277954102
309
+ - -4.818373203277588
310
+ - -4.527368068695068
311
+ - -4.872085094451904
312
+ - -4.894851207733154
313
+ - -4.511948585510254
314
+ - -4.534575939178467
315
+ - -4.57792854309082
316
+ - -4.444681644439697
317
+ - -4.628803253173828
318
+ - -4.74341344833374
319
+ - -4.85427713394165
320
+ - -4.723776817321777
321
+ - -4.7166008949279785
322
+ - -4.749168395996094
323
+ - -4.67240047454834
324
+ - -4.590690612792969
325
+ - -4.576009750366211
326
+ - -4.542308330535889
327
+ - -4.890907287597656
328
+ - -4.613001823425293
329
+ - -4.494126796722412
330
+ - -4.474257946014404
331
+ - -4.574635028839111
332
+ - -4.4817585945129395
333
+ - -4.651009559631348
334
+ - -4.478254795074463
335
+ - -4.523812770843506
336
+ - -4.546536922454834
337
+ - -4.535660266876221
338
+ - -4.470296859741211
339
+ - -4.577486991882324
340
+ - -4.541748046875
341
+ - -4.428532123565674
342
+ - -4.461862564086914
343
+ - -4.489077091217041
344
+ - -4.515830039978027
345
+ - -4.395663738250732
346
+ - -4.439975738525391
347
+ - -4.4290876388549805
348
+ - -4.397741794586182
349
+ - -4.478252410888672
350
+ - -4.399686336517334
351
+ - -4.45617151260376
352
+ - -4.434477806091309
353
+ - -4.442898750305176
354
+ - -4.5840277671813965
355
+ - -4.537542819976807
356
+ - -4.492046356201172
357
+ - -4.534677505493164
358
+ - -4.477104187011719
359
+ - -4.511618614196777
360
+ - -4.387601375579834
361
+ - -4.499236106872559
362
+ - -4.3717169761657715
363
+ - -4.4242024421691895
364
+ - -4.4055657386779785
365
+ - -4.429355144500732
366
+ - -4.4636993408203125
367
+ - -4.508528232574463
368
+ - -4.515079498291016
369
+ - -4.426190376281738
370
+ - -4.433525085449219
371
+ - -4.4200215339660645
372
+ - -4.421280860900879
373
+ - -4.400143623352051
374
+ - -4.419166088104248
375
+ - -4.429825305938721
376
+ - -4.436781406402588
377
+ - -4.51550817489624
378
+ - -4.518474578857422
379
+ - -4.495880603790283
380
+ - -4.483924865722656
381
+ - -4.409562587738037
382
+ - -4.3811845779418945
383
+ - -4.411908149719238
384
+ - -4.427165985107422
385
+ - -4.396549701690674
386
+ - -4.340637683868408
387
+ - -4.405435085296631
388
+ - -4.367630481719971
389
+ - -4.419083595275879
390
+ - -4.389026165008545
391
+ - -4.371067047119141
392
+ - -4.370710372924805
393
+ - -4.3755269050598145
394
+ - -4.39500093460083
395
+ - -4.451773166656494
396
+ - -4.365351676940918
397
+ - -4.348028182983398
398
+ - -4.408270359039307
399
+ - -4.390385627746582
400
+ - -4.347931861877441
401
+ - -4.378237247467041
402
+ - -4.426717758178711
403
+ - -4.364233493804932
404
+ - -4.371546745300293
405
+ - -4.402477264404297
406
+ - -4.430750846862793
407
+ - -4.404538154602051
408
+ - -4.384459018707275
409
+ - -4.401677131652832
410
+ spk_cond_steps: []
411
+ stop_token_weight: 5.0
412
+ task_cls: training.task.SVC_task.SVCTask
413
+ test_ids: []
414
+ test_input_dir: ''
415
+ test_num: 0
416
+ test_prefixes:
417
+ - test
418
+ test_set_name: test
419
+ timesteps: 1000
420
+ train_set_name: train
421
+ use_crepe: false
422
+ use_denoise: false
423
+ use_energy_embed: false
424
+ use_gt_dur: false
425
+ use_gt_f0: false
426
+ use_midi: false
427
+ use_nsf: true
428
+ use_pitch_embed: true
429
+ use_pos_embed: true
430
+ use_spk_embed: false
431
+ use_spk_id: false
432
+ use_split_spk_id: false
433
+ use_uv: false
434
+ use_var_enc: false
435
+ use_vec: false
436
+ val_check_interval: 1000
437
+ valid_num: 0
438
+ valid_set_name: valid
439
+ vocoder: network.vocoders.nsf_hifigan.NsfHifiGAN
440
+ vocoder_ckpt: checkpoints/nsf_hifigan/model
441
+ warmup_updates: 2000
442
+ wav2spec_eps: 1e-6
443
+ weight_decay: 0
444
+ win_size: 2048
445
+ work_dir: checkpoints/HokoHifi
checkpoints/Unnamed/lightning_logs/lastest/hparams.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
checkpoints/Unnamed/model_ckpt_steps_192000.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c441462923580893a6170dd00126084be0a20b387b1c4fb1860755acd36c881b
3
+ size 391390823
checkpoints/hubert/hubert_soft.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e82e7d079df05fe3aa535f6f7d42d309bdae1d2a53324e2b2386c56721f4f649
3
+ size 378435957
checkpoints/nsf_hifigan/NOTICE.txt ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --- DiffSinger Community Vocoder ---
2
+
3
+ ARCHITECTURE: NSF-HiFiGAN
4
+ RELEASE DATE: 2022-12-11
5
+
6
+ HYPER PARAMETERS:
7
+ - 44100 sample rate
8
+ - 128 mel bins
9
+ - 512 hop size
10
+ - 2048 window size
11
+ - fmin at 40Hz
12
+ - fmax at 16000Hz
13
+
14
+
15
+ NOTICE:
16
+
17
+ All model weights in the [DiffSinger Community Vocoder Project](https://openvpi.github.io/vocoders/), including
18
+ model weights in this directory, are provided by the [OpenVPI Team](https://github.com/openvpi/), under the
19
+ [Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/) license.
20
+
21
+
22
+ ACKNOWLEDGEMENTS:
23
+
24
+ Training data of this vocoder is provided and permitted by the following organizations, societies and individuals:
25
+
26
+ 孙飒 https://www.qfssr.cn
27
+ 赤松_Akamatsu https://www.zhibin.club
28
+ 乐威 https://www.zhibin.club
29
+ 伯添 https://space.bilibili.com/24087011
30
+ 雲宇光 https://space.bilibili.com/660675050
31
+ 橙子言 https://space.bilibili.com/318486464
32
+ 人衣大人 https://space.bilibili.com/2270344
33
+ 玖蝶 https://space.bilibili.com/676771003
34
+ Yuuko
35
+ 白夜零BYL https://space.bilibili.com/1605040503
36
+ 嗷天 https://space.bilibili.com/5675252
37
+ 洛泠羽 https://space.bilibili.com/347373318
38
+ 灰条纹的灰猫君 https://space.bilibili.com/2083633
39
+ 幽寂 https://space.bilibili.com/478860
40
+ 恶魔王女 https://space.bilibili.com/2475098
41
+ AlexYHX 芮晴
42
+ 绮萱 https://y.qq.com/n/ryqq/singer/003HjD6H4aZn1K
43
+ 诗芸 https://y.qq.com/n/ryqq/singer/0005NInj142zm0
44
+ 汐蕾 https://y.qq.com/n/ryqq/singer/0023cWMH1Bq1PJ
45
+ 1262917464
46
+ 炜阳
47
+ 叶卡yolka
48
+ 幸の夏 https://space.bilibili.com/1017297686
49
+ 暮色未量 https://space.bilibili.com/272904686
50
+ 晓寞sama https://space.bilibili.com/3463394
51
+ 没头绪的节操君
52
+ 串串BunC https://space.bilibili.com/95817834
53
+ 落雨 https://space.bilibili.com/1292427
54
+ 长尾巴的翎艾 https://space.bilibili.com/1638666
55
+ 声闻计划 https://space.bilibili.com/392812269
56
+ 唐家大小姐 http://5sing.kugou.com/palmusic/default.html
57
+ 不伊子
58
+
59
+ Training machines are provided by:
60
+
61
+ 花儿不哭 https://space.bilibili.com/5760446
62
+
63
+
64
+ TERMS OF REDISTRIBUTIONS:
65
+
66
+ 1. Do not sell this vocoder, or charge any fees from redistributing it, as prohibited by
67
+ the license.
68
+ 2. Include a copy of the CC BY-NC-SA 4.0 license, or a link referring to it.
69
+ 3. Include a copy of this notice, or any other notices informing that this vocoder is
70
+ provided by the OpenVPI Team, that this vocoder is licensed under CC BY-NC-SA 4.0, and
71
+ with a complete acknowledgement list as shown above.
72
+ 4. If you fine-tuned or modified the weights, leave a notice about what has been changed.
73
+ 5. (Optional) Leave a link to the official release page of the vocoder, and tell users
74
+ that other versions and future updates of this vocoder can be obtained from the website.
checkpoints/nsf_hifigan/NOTICE.zh-CN.txt ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --- DiffSinger 社区声码器 ---
2
+
3
+ 架构:NSF-HiFiGAN
4
+ 发布日期:2022-12-11
5
+
6
+ 超参数:
7
+ - 44100 sample rate
8
+ - 128 mel bins
9
+ - 512 hop size
10
+ - 2048 window size
11
+ - fmin at 40Hz
12
+ - fmax at 16000Hz
13
+
14
+
15
+ 注意事项:
16
+
17
+ [DiffSinger 社区声码器企划](https://openvpi.github.io/vocoders/) 中的所有模型权重,
18
+ 包括此目录下的模型权重,均由 [OpenVPI Team](https://github.com/openvpi/) 提供,并基于
19
+ [Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/)
20
+ 进行许可。
21
+
22
+
23
+ 致谢:
24
+
25
+ 此声码器的训练数据由以下组织、社团和个人提供并许可:
26
+
27
+ 孙飒 https://www.qfssr.cn
28
+ 赤松_Akamatsu https://www.zhibin.club
29
+ 乐威 https://www.zhibin.club
30
+ 伯添 https://space.bilibili.com/24087011
31
+ 雲宇光 https://space.bilibili.com/660675050
32
+ 橙子言 https://space.bilibili.com/318486464
33
+ 人衣大人 https://space.bilibili.com/2270344
34
+ 玖蝶 https://space.bilibili.com/676771003
35
+ Yuuko
36
+ 白夜零BYL https://space.bilibili.com/1605040503
37
+ 嗷天 https://space.bilibili.com/5675252
38
+ 洛泠羽 https://space.bilibili.com/347373318
39
+ 灰条纹的灰猫君 https://space.bilibili.com/2083633
40
+ 幽寂 https://space.bilibili.com/478860
41
+ 恶魔王女 https://space.bilibili.com/2475098
42
+ AlexYHX 芮晴
43
+ 绮萱 https://y.qq.com/n/ryqq/singer/003HjD6H4aZn1K
44
+ 诗芸 https://y.qq.com/n/ryqq/singer/0005NInj142zm0
45
+ 汐蕾 https://y.qq.com/n/ryqq/singer/0023cWMH1Bq1PJ
46
+ 1262917464
47
+ 炜阳
48
+ 叶卡yolka
49
+ 幸の夏 https://space.bilibili.com/1017297686
50
+ 暮色未量 https://space.bilibili.com/272904686
51
+ 晓寞sama https://space.bilibili.com/3463394
52
+ 没头绪的节操君
53
+ 串串BunC https://space.bilibili.com/95817834
54
+ 落雨 https://space.bilibili.com/1292427
55
+ 长尾巴的翎艾 https://space.bilibili.com/1638666
56
+ 声闻计划 https://space.bilibili.com/392812269
57
+ 唐家大小姐 http://5sing.kugou.com/palmusic/default.html
58
+ 不伊子
59
+
60
+ 训练算力的提供者如下:
61
+
62
+ 花儿不哭 https://space.bilibili.com/5760446
63
+
64
+
65
+ 二次分发条款:
66
+
67
+ 1. 请勿售卖此声码器或从其二次分发过程中收取任何费用,因为此类行为受到许可证的禁止。
68
+ 2. 请在二次分发文件中包含一份 CC BY-NC-SA 4.0 许可证的副本或指向该许可证的链接。
69
+ 3. 请在二次分发文件中包含这份声明,或以其他形式声明此声码器由 OpenVPI Team 提供并基于 CC BY-NC-SA 4.0 许可,
70
+ 并附带上述完整的致谢名单。
71
+ 4. 如果您微调或修改了权重,请留下一份关于其受到了何种修改的说明。
72
+ 5.(可选)留下一份指向此声码器的官方发布页面的链接,并告知使用者可从该网站获取此声码器的其他版本和未来的更新。
checkpoints/nsf_hifigan/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 4,
4
+ "batch_size": 10,
5
+ "learning_rate": 0.0002,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.999,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [ 8, 8, 2, 2, 2],
12
+ "upsample_kernel_sizes": [16,16, 4, 4, 4],
13
+ "upsample_initial_channel": 512,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+ "discriminator_periods": [3, 5, 7, 11, 17, 23, 37],
17
+
18
+ "segment_size": 16384,
19
+ "num_mels": 128,
20
+ "num_freq": 1025,
21
+ "n_fft" : 2048,
22
+ "hop_size": 512,
23
+ "win_size": 2048,
24
+
25
+ "sampling_rate": 44100,
26
+
27
+ "fmin": 40,
28
+ "fmax": 16000,
29
+ "fmax_for_loss": null,
30
+
31
+ "num_workers": 16,
32
+
33
+ "dist_config": {
34
+ "dist_backend": "nccl",
35
+ "dist_url": "tcp://localhost:54321",
36
+ "world_size": 1
37
+ }
38
+ }
ckpt.jpg ADDED
config.yaml ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ K_step: 1000
2
+ accumulate_grad_batches: 1
3
+ audio_num_mel_bins: 80
4
+ audio_sample_rate: 24000
5
+ binarization_args:
6
+ shuffle: false
7
+ with_align: true
8
+ with_f0: true
9
+ with_hubert: true
10
+ with_spk_embed: false
11
+ with_wav: false
12
+ binarizer_cls: preprocessing.SVCpre.SVCBinarizer
13
+ binary_data_dir: data/binary/atri
14
+ check_val_every_n_epoch: 10
15
+ choose_test_manually: false
16
+ clip_grad_norm: 1
17
+ config_path: training/config.yaml
18
+ content_cond_steps: []
19
+ cwt_add_f0_loss: false
20
+ cwt_hidden_size: 128
21
+ cwt_layers: 2
22
+ cwt_loss: l1
23
+ cwt_std_scale: 0.8
24
+ datasets:
25
+ - opencpop
26
+ debug: false
27
+ dec_ffn_kernel_size: 9
28
+ dec_layers: 4
29
+ decay_steps: 30000
30
+ decoder_type: fft
31
+ dict_dir: ''
32
+ diff_decoder_type: wavenet
33
+ diff_loss_type: l2
34
+ dilation_cycle_length: 4
35
+ dropout: 0.1
36
+ ds_workers: 4
37
+ dur_enc_hidden_stride_kernel:
38
+ - 0,2,3
39
+ - 0,2,3
40
+ - 0,1,3
41
+ dur_loss: mse
42
+ dur_predictor_kernel: 3
43
+ dur_predictor_layers: 5
44
+ enc_ffn_kernel_size: 9
45
+ enc_layers: 4
46
+ encoder_K: 8
47
+ encoder_type: fft
48
+ endless_ds: False
49
+ f0_bin: 256
50
+ f0_max: 1100.0
51
+ f0_min: 50.0
52
+ ffn_act: gelu
53
+ ffn_padding: SAME
54
+ fft_size: 512
55
+ fmax: 12000
56
+ fmin: 30
57
+ fs2_ckpt: ''
58
+ gaussian_start: true
59
+ gen_dir_name: ''
60
+ gen_tgt_spk_id: -1
61
+ hidden_size: 256
62
+ hop_size: 128
63
+ hubert_gpu: true
64
+ hubert_path: checkpoints/hubert/hubert_soft.pt
65
+ infer: false
66
+ keep_bins: 80
67
+ lambda_commit: 0.25
68
+ lambda_energy: 0.0
69
+ lambda_f0: 1.0
70
+ lambda_ph_dur: 0.3
71
+ lambda_sent_dur: 1.0
72
+ lambda_uv: 1.0
73
+ lambda_word_dur: 1.0
74
+ load_ckpt: ''
75
+ log_interval: 100
76
+ loud_norm: false
77
+ lr: 5.0e-05
78
+ max_beta: 0.02
79
+ max_epochs: 3000
80
+ max_eval_sentences: 1
81
+ max_eval_tokens: 60000
82
+ max_frames: 42000
83
+ max_input_tokens: 60000
84
+ max_sentences: 24
85
+ max_tokens: 128000
86
+ max_updates: 1000000
87
+ mel_loss: ssim:0.5|l1:0.5
88
+ mel_vmax: 1.5
89
+ mel_vmin: -6.0
90
+ min_level_db: -120
91
+ norm_type: gn
92
+ num_ckpt_keep: 10
93
+ num_heads: 2
94
+ num_sanity_val_steps: 1
95
+ num_spk: 1
96
+ num_test_samples: 0
97
+ num_valid_plots: 10
98
+ optimizer_adam_beta1: 0.9
99
+ optimizer_adam_beta2: 0.98
100
+ out_wav_norm: false
101
+ pe_ckpt: checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt
102
+ pe_enable: false
103
+ perform_enhance: true
104
+ pitch_ar: false
105
+ pitch_enc_hidden_stride_kernel:
106
+ - 0,2,5
107
+ - 0,2,5
108
+ - 0,2,5
109
+ pitch_extractor: parselmouth
110
+ pitch_loss: l2
111
+ pitch_norm: log
112
+ pitch_type: frame
113
+ pndm_speedup: 10
114
+ pre_align_args:
115
+ allow_no_txt: false
116
+ denoise: false
117
+ forced_align: mfa
118
+ txt_processor: zh_g2pM
119
+ use_sox: true
120
+ use_tone: false
121
+ pre_align_cls: data_gen.singing.pre_align.SingingPreAlign
122
+ predictor_dropout: 0.5
123
+ predictor_grad: 0.1
124
+ predictor_hidden: -1
125
+ predictor_kernel: 5
126
+ predictor_layers: 5
127
+ prenet_dropout: 0.5
128
+ prenet_hidden_size: 256
129
+ pretrain_fs_ckpt: pretrain/nyaru/model_ckpt_steps_60000.ckpt
130
+ processed_data_dir: xxx
131
+ profile_infer: false
132
+ raw_data_dir: data/raw/atri
133
+ ref_norm_layer: bn
134
+ rel_pos: true
135
+ reset_phone_dict: true
136
+ residual_channels: 256
137
+ residual_layers: 20
138
+ save_best: false
139
+ save_ckpt: true
140
+ save_codes:
141
+ - configs
142
+ - modules
143
+ - src
144
+ - utils
145
+ save_f0: true
146
+ save_gt: false
147
+ schedule_type: linear
148
+ seed: 1234
149
+ sort_by_len: true
150
+ speaker_id: atri
151
+ spec_max:
152
+ - 0.2987259328365326
153
+ - 0.29721200466156006
154
+ - 0.23978209495544434
155
+ - 0.208412766456604
156
+ - 0.25777050852775574
157
+ - 0.2514476478099823
158
+ - 0.1129382848739624
159
+ - 0.03415697440505028
160
+ - 0.09860049188137054
161
+ - 0.10637332499027252
162
+ - 0.13287633657455444
163
+ - 0.19744250178337097
164
+ - 0.10040587931871414
165
+ - 0.13735432922840118
166
+ - 0.15107455849647522
167
+ - 0.17196381092071533
168
+ - 0.08298977464437485
169
+ - 0.0632769986987114
170
+ - 0.02723858878016472
171
+ - -0.001819317927584052
172
+ - -0.029565516859292984
173
+ - -0.023574354127049446
174
+ - -0.01633293740451336
175
+ - 0.07143621146678925
176
+ - 0.021580500528216362
177
+ - 0.07257916033267975
178
+ - -0.024349519982933998
179
+ - -0.06165708228945732
180
+ - -0.10486568510532379
181
+ - -0.1363687664270401
182
+ - -0.13333871960639954
183
+ - -0.13955898582935333
184
+ - -0.16613495349884033
185
+ - -0.17636367678642273
186
+ - -0.2786925733089447
187
+ - -0.22967253625392914
188
+ - -0.31897130608558655
189
+ - -0.18007366359233856
190
+ - -0.29366692900657654
191
+ - -0.2871025800704956
192
+ - -0.36748355627059937
193
+ - -0.46071451902389526
194
+ - -0.5464922189712524
195
+ - -0.5719417333602905
196
+ - -0.6020897626876831
197
+ - -0.6239874958992004
198
+ - -0.5653440952301025
199
+ - -0.6508013606071472
200
+ - -0.628247857093811
201
+ - -0.6809687614440918
202
+ - -0.569259762763977
203
+ - -0.5423558354377747
204
+ - -0.5811785459518433
205
+ - -0.5359002351760864
206
+ - -0.6565515398979187
207
+ - -0.7143737077713013
208
+ - -0.8502675890922546
209
+ - -0.7979224920272827
210
+ - -0.7110578417778015
211
+ - -0.763409435749054
212
+ - -0.7984790802001953
213
+ - -0.6927220821380615
214
+ - -0.658117413520813
215
+ - -0.7486468553543091
216
+ - -0.5949879884719849
217
+ - -0.7494576573371887
218
+ - -0.7400822639465332
219
+ - -0.6822793483734131
220
+ - -0.7773582339286804
221
+ - -0.661201536655426
222
+ - -0.791329026222229
223
+ - -0.8982341885566711
224
+ - -0.8736728429794312
225
+ - -0.7701027393341064
226
+ - -0.8490535616874695
227
+ - -0.7479292154312134
228
+ - -0.9320166110992432
229
+ - -1.2862414121627808
230
+ - -2.8936190605163574
231
+ - -2.924229860305786
232
+ spec_min:
233
+ - -6.0
234
+ - -6.0
235
+ - -6.0
236
+ - -6.0
237
+ - -6.0
238
+ - -6.0
239
+ - -6.0
240
+ - -6.0
241
+ - -6.0
242
+ - -6.0
243
+ - -6.0
244
+ - -6.0
245
+ - -6.0
246
+ - -6.0
247
+ - -6.0
248
+ - -6.0
249
+ - -6.0
250
+ - -6.0
251
+ - -6.0
252
+ - -6.0
253
+ - -6.0
254
+ - -6.0
255
+ - -6.0
256
+ - -6.0
257
+ - -6.0
258
+ - -6.0
259
+ - -6.0
260
+ - -6.0
261
+ - -6.0
262
+ - -6.0
263
+ - -6.0
264
+ - -6.0
265
+ - -6.0
266
+ - -6.0
267
+ - -6.0
268
+ - -6.0
269
+ - -6.0
270
+ - -6.0
271
+ - -6.0
272
+ - -6.0
273
+ - -6.0
274
+ - -6.0
275
+ - -6.0
276
+ - -6.0
277
+ - -6.0
278
+ - -6.0
279
+ - -6.0
280
+ - -6.0
281
+ - -6.0
282
+ - -6.0
283
+ - -6.0
284
+ - -6.0
285
+ - -6.0
286
+ - -6.0
287
+ - -6.0
288
+ - -6.0
289
+ - -6.0
290
+ - -6.0
291
+ - -6.0
292
+ - -6.0
293
+ - -6.0
294
+ - -6.0
295
+ - -6.0
296
+ - -6.0
297
+ - -6.0
298
+ - -6.0
299
+ - -6.0
300
+ - -5.999454021453857
301
+ - -5.8822431564331055
302
+ - -5.892064571380615
303
+ - -5.882402420043945
304
+ - -5.786972522735596
305
+ - -5.746835231781006
306
+ - -5.8594512939453125
307
+ - -5.7389445304870605
308
+ - -5.718059539794922
309
+ - -5.779720306396484
310
+ - -5.801984786987305
311
+ - -6.0
312
+ - -6.0
313
+ spk_cond_steps: []
314
+ stop_token_weight: 5.0
315
+ task_cls: training.task.SVC_task.SVCTask
316
+ test_ids: []
317
+ test_input_dir: ''
318
+ test_num: 0
319
+ test_prefixes:
320
+ - test
321
+ test_set_name: test
322
+ timesteps: 1000
323
+ train_set_name: train
324
+ use_crepe: true
325
+ use_denoise: false
326
+ use_energy_embed: false
327
+ use_gt_dur: false
328
+ use_gt_f0: false
329
+ use_midi: false
330
+ use_nsf: true
331
+ use_pitch_embed: true
332
+ use_pos_embed: true
333
+ use_spk_embed: false
334
+ use_spk_id: false
335
+ use_split_spk_id: false
336
+ use_uv: false
337
+ use_var_enc: false
338
+ use_vec: false
339
+ val_check_interval: 2000
340
+ valid_num: 0
341
+ valid_set_name: valid
342
+ vocoder: network.vocoders.hifigan.HifiGAN
343
+ vocoder_ckpt: checkpoints/0109_hifigan_bigpopcs_hop128
344
+ warmup_updates: 2000
345
+ wav2spec_eps: 1e-6
346
+ weight_decay: 0
347
+ win_size: 512
348
+ work_dir: checkpoints/atri
349
+ no_fs2: false
doc/train_and_inference.markdown ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Diff-SVC(train/inference by yourself)
2
+ ## 0.环境配置
3
+ >注意:requirements文件已更新,目前分为3个版本,可自行选择使用。\
4
+ 1. requirements.txt 是此仓库测试的原始完整环境,Torch1.12.1+cu113,可选择直接pip 或删除其中与pytorch有关的项目(torch/torchvision)后再pip,并使用自己的torch环境
5
+ ```
6
+ pip install -r requirements.txt
7
+ ```
8
+ >2. (推荐)requirements_short.txt 是上述环境的手动整理版,不含torch本体,也可以直接
9
+ ```
10
+ pip install -r requirements_short.txt
11
+ ```
12
+ >3. 根目录下有一份@三千整理的依赖列表requirements.png,是在某品牌云服务器上跑通的,不过此torch版本已不兼容目前版本代码,但是其他部分版本可以参考,十分感谢
13
+
14
+ ## 1.推理
15
+ >使用根目录下的inference.ipynb进行推理或使用经过作者适配的@小狼的infer.py\
16
+ 在第一个block中修改如下参数:
17
+ ```
18
+ config_path='checkpoints压缩包中config.yaml的位置'
19
+ 如'./checkpoints/nyaru/config.yaml'
20
+ config和checkpoints是一一对应的,请不要使用其他config
21
+
22
+ project_name='这个项目的名称'
23
+ 如'nyaru'
24
+
25
+ model_path='ckpt文件的全路径'
26
+ 如'./checkpoints/nyaru/model_ckpt_steps_112000.ckpt'
27
+
28
+ hubert_gpu=True
29
+ 推理时是否使用gpu推理hubert(模型中的一个模块),不影响模型的其他部分
30
+ 目前版本已大幅减小hubert的gpu占用,在1060 6G显存下可完整推理,不需要关闭了。
31
+ 另外现已支持长音频自动切片功能(ipynb和infer.py均可),超过30s的音频将自动在静音处切片处理,感谢@小狼的代码
32
+
33
+ ```
34
+ ### 可调节参数:
35
+ ```
36
+ wav_fn='xxx.wav'#传入音频的路径,默认在项目根目录中
37
+
38
+ use_crepe=True
39
+ #crepe是一个F0算法,效果好但速度慢,改成False会使用效果稍逊于crepe但较快的parselmouth算法
40
+
41
+ thre=0.05
42
+ #crepe的噪声过滤阈值,源音频干净可适当调大,噪音多就保持这个数值或者调小,前面改成False后这个参数不起作用
43
+
44
+ pndm_speedup=20
45
+ #推理加速算法倍数,默认是1000步,这里填成10就是只使用100步合成,是一个中规中矩的数值,这个数值可以高到50倍(20步合成)没有明显质量损失,再大可能会有可观的质量损失,注意如果下方开启了use_gt_mel, 应保证这个数值小于add_noise_step,并尽量让其能够整除
46
+
47
+ key=0
48
+ #变调参数,默认为0(不是1!!),将源音频的音高升高key个半音后合成,如男声转女生,可填入8或者12等(12就是升高一整个8度)
49
+
50
+ use_pe=True
51
+ #梅尔谱合成音频时使用的F0提取算法,如果改成False将使用源音频的F0\
52
+ 这里填True和False合成会略有差异,通常是True会好些,但也不尽然,对合成速度几乎无影响\
53
+ (无论key填什么 这里都是可以自由选择的,不影响)\
54
+ 44.1kHz下不支持此功能,会自动关闭,开着也不报错就是了
55
+
56
+ use_gt_mel=False
57
+ #这个选项类似于AI画图的图生图功能,如果打开,产生的音频将是输入声音与目标说话人声音的混合,混合比例由下一个参数确定
58
+ 注意!!!:这个参数如果改成True,请确保key填成0,不支持变调
59
+
60
+ add_noise_step=500
61
+ #与上个参数有关,控制两种声音的比例,填入1是完全的源声线,填入1000是完全的目标声线,能听出来是两者均等混合的数值大约在300附近(并不是线性的,另外这个参数如果调的很小,可以把pndm加速倍率调低,增加合成质量)
62
+
63
+ wav_gen='yyy.wav'#输出音频的路径,默认在项目根目录中,可通过改变扩展名更改保存文件类型
64
+ ```
65
+ 如果使用infer.py,修改方式类似,需要修改__name__=='__main__'中的部分,然后在根目录中执行\
66
+ python infer.py\
67
+ 这种方式需要将原音频放入raw中并在results中查找结果
68
+ ## 2.数据预处理与训练
69
+ ### 2.1 准备数据
70
+ >目前支持wav格式和ogg格式的音频数据,采样率最好高于24kHz,程序会自动处理采样率和声道问题。采样率不可低于16kHz(一般不会的)\
71
+ 音频需要切片为5-15s为宜的短音频,长度没有具体要求,但不宜过长过短。音频需要为纯目标人干声,不可以有背景音乐和其他人声音,最好也不要有过重的混响等。若经过去伴奏等处理,请尽量保证处理后的音频质量。\
72
+ 目前仅支持单人训练,总时长尽量保证在3h或以上,不需要额外任何标注,将音频文件放在下述raw_data_dir下即可,这个目录下的结构可以自由定义,程序会自主找到所需文件。
73
+
74
+ ### 2.2 修改超参数配置
75
+ >首先请备份一份config.yaml(此文件对应24kHz声码器, 44.1kHz声码器请使用config_nsf.yaml),然后修改它\
76
+ 可能会用到的参数如下(以工程名为nyaru为例):
77
+ ```
78
+ K_step: 1000
79
+ #diffusion过程总的step,建议不要修改
80
+
81
+ binary_data_dir: data/binary/nyaru
82
+ 预处理后数据的存放地址:需要将后缀改成工程名字
83
+
84
+ config_path: training/config.yaml
85
+ 你要使用的这份yaml自身的地址,由于预处理过程中会写入数据,所以这个地址务必修改成将要存放这份yaml文件的完整路径
86
+
87
+ choose_test_manually: false
88
+ 手动选择测试集,默认关闭,自动随机抽取5条音频作为测试集。
89
+ 如果改为ture,请在test_prefixes:中填入测试数据的文件名前缀,程序会将以对应前缀开头的文件作为测试集
90
+ 这是个列表,可以填多个前缀,如:
91
+ test_prefixes:
92
+ - test
93
+ - aaaa
94
+ - 5012
95
+ - speaker1024
96
+ 重要:测试集*不可以*为空,为了不产生意外影响,建议尽量不要手动选择测试集
97
+
98
+ endless_ds:False
99
+ 如果你的数据集过小,每个epoch时间很短,请将此项打开,将把正常的1000epoch作为一个epoch计算
100
+
101
+ hubert_path: checkpoints/hubert/hubert.pt
102
+ hubert模型的存放地址,确保这个路径是对的,一般解压checkpoints包之后就是这个路径不需要改,现已使用torch版本推理
103
+ hubert_gpu:True
104
+ 是否在预处理时使用gpu运行hubert(模型的一个模块),关闭后使用cpu,但耗时会显著增加。另外模型训练完推理时hubert是否用gpu是在inference中单独控制的,不受此处影响。目前hubert改为torch版后已经可以做到在1060 6G显存gpu上进行预处理,与直接推理1分钟内的音频不超出显存限制,一般不需要关了。
105
+
106
+ lr: 0.0008
107
+ #初始的学习率:这个数字对应于88的batchsize,如果batchsize更小,可以调低这个数值一些
108
+
109
+ decay_steps: 20000
110
+ 每20000步学习率衰减为原来的一半,如果batchsize比较小,请调大这个数值
111
+
112
+ #对于30-40左右的batchsize,推荐lr=0.0004,decay_steps=40000
113
+
114
+ max_frames: 42000
115
+ max_input_tokens: 6000
116
+ max_sentences: 88
117
+ max_tokens: 128000
118
+ #batchsize是由这几个参数动态算出来的,如果不太清楚具体含义,可以只改动max_sentences这个参数,填入batchsize的最大限制值,以免炸显存
119
+
120
+ pe_ckpt: checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt
121
+ #pe模型路径,确保这个文件存在,具体作用参考inference部分
122
+
123
+ raw_data_dir: data/raw/nyaru
124
+ #存放预处理前原始数据的位置,请将原始wav数据放在这个目录下,内部文件结构无所谓,会自动解构
125
+
126
+ residual_channels: 384
127
+ residual_layers: 20
128
+ #控制核心网络规模的一组参数,越大参数越多炼的越慢,但效果不一定会变好,大一点的数据集可以把第一个改成512。这个可以自行实验效果,不过不了解的话尽量不动。
129
+
130
+ speaker_id: nyaru
131
+ #训练的说话人名字,目前只支持单说话人,请在这里填写(只是观赏作用,没有实际意义的参数)
132
+
133
+ use_crepe: true
134
+ #在数据预处理中使用crepe提取F0,追求效果请打开,追求速度可以关闭
135
+
136
+ val_check_interval: 2000
137
+ #每2000steps推理测试集并保存ckpt
138
+
139
+ vocoder_ckpt:checkpoints/0109_hifigan_bigpopcs_hop128
140
+ #24kHz下为对应声码器的目录, 44.1kHz下为对应声码器的文件名, 注意不要填错
141
+
142
+ work_dir: checkpoints/nyaru
143
+ #修改后缀为工程名(也可以删掉或完全留空自动生成,但别乱填)
144
+ no_fs2: true
145
+ #对网络encoder的精简,能缩减模型体积,加快训练,且并未发现有对网络表现损害的直接证据。默认打开
146
+
147
+ ```
148
+ >其他的参数如果你不知道它是做什么的,请不要修改,即使你看着名称可能以为你知道它是做什么的。
149
+
150
+ ### 2.3 数据预处理
151
+ 在diff-svc的目录下执行以下命令:\
152
+ #windows
153
+ ```
154
+ set PYTHONPATH=.
155
+ set CUDA_VISIBLE_DEVICES=0
156
+ python preprocessing/binarize.py --config training/config.yaml
157
+ ```
158
+ #linux
159
+ ```
160
+ export PYTHONPATH=.
161
+ CUDA_VISIBLE_DEVICES=0 python preprocessing/binarize.py --config training/config.yaml
162
+ ```
163
+ 对于预处理,@小狼准备了一份可以分段处理hubert和其他特征的代码,如果正常处理显存不足,可以先python ./network/hubert/hubert_model.py
164
+ 然后再运行正常的指令,能够识别提前处理好的hubert特征
165
+ ### 2.4 训练
166
+ #windows
167
+ ```
168
+ set CUDA_VISIBLE_DEVICES=0
169
+ python run.py --config training/config.yaml --exp_name nyaru --reset
170
+ ```
171
+ #linux
172
+ ```
173
+ CUDA_VISIBLE_DEVICES=0 python run.py --config training/config.yaml --exp_name nyaru --reset
174
+ ```
175
+ >需要将exp_name改为你的工程名,并修改config路径,请确保和预处理使用的是同一个config文件\
176
+ *重要* :训练完成后,若之前不是在本地数据预处理,除了需要下载对应的ckpt文件,也需要将config文件下载下来,作为推理时使用的config,不可以使用本地之前上传上去那份。因为预处理时会向config文件中写入内容。推理时要保持使用的config和预处理使用的config是同一份。
177
+
178
+
179
+ ### 2.5 可能出现的问题:
180
+ >2.5.1 'Upsample' object has no attribute 'recompute_scale_factor'\
181
+ 此问题发现于cuda11.3对应的torch中,若出现此问题,请通过合适的方法(如ide自动跳转等)找到你的python依赖包中的torch.nn.modules.upsampling.py文件(如conda环境中为conda目录\envs\环境目录\Lib\site-packages\torch\nn\modules\upsampling.py),修改其153-154行
182
+ ```
183
+ return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners,recompute_scale_factor=self.recompute_scale_factor)
184
+ ```
185
+ >改为
186
+ ```
187
+ return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners)
188
+ # recompute_scale_factor=self.recompute_scale_factor)
189
+ ```
190
+ >2.5.2 no module named 'utils'\
191
+ 请在你的运行环境(如colab笔记本)中以如下方式设置:
192
+ ```
193
+ import os
194
+ os.environ['PYTHONPATH']='.'
195
+ !CUDA_VISIBLE_DEVICES=0 python preprocessing/binarize.py --config training/config.yaml
196
+ ```
197
+ 注意一定要在项目文件夹的根目录中执行
198
+ >2.5.3 cannot load library 'libsndfile.so'\
199
+ 可能会在linux环境中遇到的错误,请执行以下指令
200
+ ```
201
+ apt-get install libsndfile1 -y
202
+ ```
203
+ >2.5.4 cannot load import 'consume_prefix_in_state_dict_if_present'\
204
+ torch版本过低,请更换高版本torch
205
+
206
+ >2.5.5 预处理数据过慢\
207
+ 检查是否在配置中开启了use_crepe,将其关闭可显著提升速度。\
208
+ 检查配置中hubert_gpu是否开启。
209
+
210
+ 如有其他问题,请加入QQ频道或discord频道询问。
flask_api.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import logging
3
+
4
+ import librosa
5
+ import soundfile
6
+ from flask import Flask, request, send_file
7
+ from flask_cors import CORS
8
+
9
+ from infer_tools.infer_tool import Svc
10
+ from utils.hparams import hparams
11
+
12
+ app = Flask(__name__)
13
+
14
+ CORS(app)
15
+
16
+ logging.getLogger('numba').setLevel(logging.WARNING)
17
+
18
+
19
+ @app.route("/voiceChangeModel", methods=["POST"])
20
+ def voice_change_model():
21
+ request_form = request.form
22
+ wave_file = request.files.get("sample", None)
23
+ # 变调信息
24
+ f_pitch_change = float(request_form.get("fPitchChange", 0))
25
+ # DAW所需的采样率
26
+ daw_sample = int(float(request_form.get("sampleRate", 0)))
27
+ speaker_id = int(float(request_form.get("sSpeakId", 0)))
28
+ # http获得wav文件并转换
29
+ input_wav_path = io.BytesIO(wave_file.read())
30
+ # 模型推理
31
+ _f0_tst, _f0_pred, _audio = model.infer(input_wav_path, key=f_pitch_change, acc=accelerate, use_pe=False,
32
+ use_crepe=False)
33
+ tar_audio = librosa.resample(_audio, hparams["audio_sample_rate"], daw_sample)
34
+ # 返回音频
35
+ out_wav_path = io.BytesIO()
36
+ soundfile.write(out_wav_path, tar_audio, daw_sample, format="wav")
37
+ out_wav_path.seek(0)
38
+ return send_file(out_wav_path, download_name="temp.wav", as_attachment=True)
39
+
40
+
41
+ if __name__ == '__main__':
42
+ # 工程文件夹名,训练时用的那个
43
+ project_name = "firefox"
44
+ model_path = f'./checkpoints/{project_name}/model_ckpt_steps_188000.ckpt'
45
+ config_path = f'./checkpoints/{project_name}/config.yaml'
46
+
47
+ # 加速倍数
48
+ accelerate = 50
49
+ hubert_gpu = True
50
+
51
+ model = Svc(project_name, config_path, hubert_gpu, model_path)
52
+
53
+ # 此处与vst插件对应,不建议更改
54
+ app.run(port=6842, host="0.0.0.0", debug=False, threaded=False)
infer.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import time
3
+ from pathlib import Path
4
+
5
+ import librosa
6
+ import numpy as np
7
+ import soundfile
8
+
9
+ from infer_tools import infer_tool
10
+ from infer_tools import slicer
11
+ from infer_tools.infer_tool import Svc
12
+ from utils.hparams import hparams
13
+
14
+ chunks_dict = infer_tool.read_temp("./infer_tools/new_chunks_temp.json")
15
+
16
+
17
+ def run_clip(svc_model, key, acc, use_pe, use_crepe, thre, use_gt_mel, add_noise_step, project_name='', f_name=None,
18
+ file_path=None, out_path=None, slice_db=-40,**kwargs):
19
+ print(f'code version:2022-12-04')
20
+ use_pe = use_pe if hparams['audio_sample_rate'] == 24000 else False
21
+ if file_path is None:
22
+ raw_audio_path = f"./raw/{f_name}"
23
+ clean_name = f_name[:-4]
24
+ else:
25
+ raw_audio_path = file_path
26
+ clean_name = str(Path(file_path).name)[:-4]
27
+ infer_tool.format_wav(raw_audio_path)
28
+ wav_path = Path(raw_audio_path).with_suffix('.wav')
29
+ global chunks_dict
30
+ audio, sr = librosa.load(wav_path, mono=True,sr=None)
31
+ wav_hash = infer_tool.get_md5(audio)
32
+ if wav_hash in chunks_dict.keys():
33
+ print("load chunks from temp")
34
+ chunks = chunks_dict[wav_hash]["chunks"]
35
+ else:
36
+ chunks = slicer.cut(wav_path, db_thresh=slice_db)
37
+ chunks_dict[wav_hash] = {"chunks": chunks, "time": int(time.time())}
38
+ infer_tool.write_temp("./infer_tools/new_chunks_temp.json", chunks_dict)
39
+ audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
40
+
41
+ count = 0
42
+ f0_tst = []
43
+ f0_pred = []
44
+ audio = []
45
+ for (slice_tag, data) in audio_data:
46
+ print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
47
+ length = int(np.ceil(len(data) / audio_sr * hparams['audio_sample_rate']))
48
+ raw_path = io.BytesIO()
49
+ soundfile.write(raw_path, data, audio_sr, format="wav")
50
+ if hparams['debug']:
51
+ print(np.mean(data), np.var(data))
52
+ raw_path.seek(0)
53
+ if slice_tag:
54
+ print('jump empty segment')
55
+ _f0_tst, _f0_pred, _audio = (
56
+ np.zeros(int(np.ceil(length / hparams['hop_size']))), np.zeros(int(np.ceil(length / hparams['hop_size']))),
57
+ np.zeros(length))
58
+ else:
59
+ _f0_tst, _f0_pred, _audio = svc_model.infer(raw_path, key=key, acc=acc, use_pe=use_pe, use_crepe=use_crepe,
60
+ thre=thre, use_gt_mel=use_gt_mel, add_noise_step=add_noise_step)
61
+ fix_audio = np.zeros(length)
62
+ fix_audio[:] = np.mean(_audio)
63
+ fix_audio[:len(_audio)] = _audio[0 if len(_audio)<len(fix_audio) else len(_audio)-len(fix_audio):]
64
+ f0_tst.extend(_f0_tst)
65
+ f0_pred.extend(_f0_pred)
66
+ audio.extend(list(fix_audio))
67
+ count += 1
68
+ if out_path is None:
69
+ out_path = f'./results/{clean_name}_{key}key_{project_name}_{hparams["residual_channels"]}_{hparams["residual_layers"]}_{int(step / 1000)}k_{accelerate}x.{kwargs["format"]}'
70
+ soundfile.write(out_path, audio, hparams["audio_sample_rate"], 'PCM_16',format=out_path.split('.')[-1])
71
+ return np.array(f0_tst), np.array(f0_pred), audio
72
+
73
+
74
+ if __name__ == '__main__':
75
+ # 工程文件夹名,训练时用的那个
76
+ project_name = "yilanqiu"
77
+ model_path = f'./checkpoints/{project_name}/model_ckpt_steps_246000.ckpt'
78
+ config_path = f'./checkpoints/{project_name}/config.yaml'
79
+
80
+ # 支持多个wav/ogg文件,放在raw文件夹下,带扩展名
81
+ file_names = ["青花瓷.wav"]
82
+ trans = [0] # 音高调整,支持正负(半音),数量与上一行对应,不足的自动按第一个移调参数补齐
83
+ # 加速倍数
84
+ accelerate = 20
85
+ hubert_gpu = True
86
+ format='flac'
87
+ step = int(model_path.split("_")[-1].split(".")[0])
88
+
89
+ # 下面不动
90
+ infer_tool.mkdir(["./raw", "./results"])
91
+ infer_tool.fill_a_to_b(trans, file_names)
92
+
93
+ model = Svc(project_name, config_path, hubert_gpu, model_path)
94
+ for f_name, tran in zip(file_names, trans):
95
+ if "." not in f_name:
96
+ f_name += ".wav"
97
+ run_clip(model, key=tran, acc=accelerate, use_crepe=True, thre=0.05, use_pe=True, use_gt_mel=False,
98
+ add_noise_step=500, f_name=f_name, project_name=project_name, format=format)
infer_tools/__init__.py ADDED
File without changes
infer_tools/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (163 Bytes). View file
 
infer_tools/__pycache__/infer_tool.cpython-38.pyc ADDED
Binary file (12 kB). View file
 
infer_tools/__pycache__/slicer.cpython-38.pyc ADDED
Binary file (4.72 kB). View file
 
infer_tools/f0_temp.json ADDED
The diff for this file is too large to render. See raw diff
 
infer_tools/infer_tool.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import json
3
+ import os
4
+ import time
5
+ from io import BytesIO
6
+ from pathlib import Path
7
+
8
+ import librosa
9
+ import numpy as np
10
+ import soundfile
11
+ import torch
12
+
13
+ import utils
14
+ from modules.fastspeech.pe import PitchExtractor
15
+ from network.diff.candidate_decoder import FFT
16
+ from network.diff.diffusion import GaussianDiffusion
17
+ from network.diff.net import DiffNet
18
+ from network.vocoders.base_vocoder import VOCODERS, get_vocoder_cls
19
+ from preprocessing.data_gen_utils import get_pitch_parselmouth, get_pitch_crepe, get_pitch_world
20
+ from preprocessing.hubertinfer import Hubertencoder
21
+ from utils.hparams import hparams, set_hparams
22
+ from utils.pitch_utils import denorm_f0, norm_interp_f0
23
+
24
+ if os.path.exists("chunks_temp.json"):
25
+ os.remove("chunks_temp.json")
26
+
27
+ def read_temp(file_name):
28
+ if not os.path.exists(file_name):
29
+ with open(file_name, "w") as f:
30
+ f.write(json.dumps({"info": "temp_dict"}))
31
+ return {}
32
+ else:
33
+ try:
34
+ with open(file_name, "r") as f:
35
+ data = f.read()
36
+ data_dict = json.loads(data)
37
+ if os.path.getsize(file_name) > 50 * 1024 * 1024:
38
+ f_name = file_name.split("/")[-1]
39
+ print(f"clean {f_name}")
40
+ for wav_hash in list(data_dict.keys()):
41
+ if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600:
42
+ del data_dict[wav_hash]
43
+ except Exception as e:
44
+ print(e)
45
+ print(f"{file_name} error,auto rebuild file")
46
+ data_dict = {"info": "temp_dict"}
47
+ return data_dict
48
+
49
+
50
+ f0_dict = read_temp("./infer_tools/f0_temp.json")
51
+
52
+
53
+ def write_temp(file_name, data):
54
+ with open(file_name, "w") as f:
55
+ f.write(json.dumps(data))
56
+
57
+
58
+ def timeit(func):
59
+ def run(*args, **kwargs):
60
+ t = time.time()
61
+ res = func(*args, **kwargs)
62
+ print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
63
+ return res
64
+
65
+ return run
66
+
67
+
68
+ def format_wav(audio_path):
69
+ if Path(audio_path).suffix=='.wav':
70
+ return
71
+ raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True,sr=None)
72
+ soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate)
73
+
74
+
75
+ def fill_a_to_b(a, b):
76
+ if len(a) < len(b):
77
+ for _ in range(0, len(b) - len(a)):
78
+ a.append(a[0])
79
+
80
+
81
+ def get_end_file(dir_path, end):
82
+ file_lists = []
83
+ for root, dirs, files in os.walk(dir_path):
84
+ files = [f for f in files if f[0] != '.']
85
+ dirs[:] = [d for d in dirs if d[0] != '.']
86
+ for f_file in files:
87
+ if f_file.endswith(end):
88
+ file_lists.append(os.path.join(root, f_file).replace("\\", "/"))
89
+ return file_lists
90
+
91
+
92
+ def mkdir(paths: list):
93
+ for path in paths:
94
+ if not os.path.exists(path):
95
+ os.mkdir(path)
96
+
97
+
98
+ def get_md5(content):
99
+ return hashlib.new("md5", content).hexdigest()
100
+
101
+
102
+ class Svc:
103
+ def __init__(self, project_name, config_name, hubert_gpu, model_path):
104
+ self.project_name = project_name
105
+ self.DIFF_DECODERS = {
106
+ 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),
107
+ 'fft': lambda hp: FFT(
108
+ hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
109
+ }
110
+
111
+ self.model_path = model_path
112
+ self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
113
+
114
+ self._ = set_hparams(config=config_name, exp_name=self.project_name, infer=True,
115
+ reset=True,
116
+ hparams_str='',
117
+ print_hparams=False)
118
+
119
+ self.mel_bins = hparams['audio_num_mel_bins']
120
+ self.model = GaussianDiffusion(
121
+ phone_encoder=Hubertencoder(hparams['hubert_path']),
122
+ out_dims=self.mel_bins, denoise_fn=self.DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
123
+ timesteps=hparams['timesteps'],
124
+ K_step=hparams['K_step'],
125
+ loss_type=hparams['diff_loss_type'],
126
+ spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
127
+ )
128
+ self.load_ckpt()
129
+ self.model.to(self.dev)
130
+ hparams['hubert_gpu'] = hubert_gpu
131
+ self.hubert = Hubertencoder(hparams['hubert_path'])
132
+ self.pe = PitchExtractor().to(self.dev)
133
+ utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
134
+ self.pe.eval()
135
+ self.vocoder = get_vocoder_cls(hparams)()
136
+
137
+ def load_ckpt(self, model_name='model', force=True, strict=True):
138
+ utils.load_ckpt(self.model, self.model_path, model_name, force, strict)
139
+
140
+ def infer(self, in_path, key, acc, use_pe=True, use_crepe=True, thre=0.05, singer=False, **kwargs):
141
+ batch = self.pre(in_path, acc, use_crepe, thre)
142
+ spk_embed = batch.get('spk_embed') if not hparams['use_spk_id'] else batch.get('spk_ids')
143
+ hubert = batch['hubert']
144
+ ref_mels = batch["mels"]
145
+ energy=batch['energy']
146
+ mel2ph = batch['mel2ph']
147
+ batch['f0'] = batch['f0'] + (key / 12)
148
+ batch['f0'][batch['f0']>np.log2(hparams['f0_max'])]=0
149
+ f0 = batch['f0']
150
+ uv = batch['uv']
151
+ @timeit
152
+ def diff_infer():
153
+ outputs = self.model(
154
+ hubert.to(self.dev), spk_embed=spk_embed, mel2ph=mel2ph.to(self.dev), f0=f0.to(self.dev), uv=uv.to(self.dev),energy=energy.to(self.dev),
155
+ ref_mels=ref_mels.to(self.dev),
156
+ infer=True, **kwargs)
157
+ return outputs
158
+ outputs=diff_infer()
159
+ batch['outputs'] = self.model.out2mel(outputs['mel_out'])
160
+ batch['mel2ph_pred'] = outputs['mel2ph']
161
+ batch['f0_gt'] = denorm_f0(batch['f0'], batch['uv'], hparams)
162
+ if use_pe:
163
+ batch['f0_pred'] = self.pe(outputs['mel_out'])['f0_denorm_pred'].detach()
164
+ else:
165
+ batch['f0_pred'] = outputs.get('f0_denorm')
166
+ return self.after_infer(batch, singer, in_path)
167
+
168
+ @timeit
169
+ def after_infer(self, prediction, singer, in_path):
170
+ for k, v in prediction.items():
171
+ if type(v) is torch.Tensor:
172
+ prediction[k] = v.cpu().numpy()
173
+
174
+ # remove paddings
175
+ mel_gt = prediction["mels"]
176
+ mel_gt_mask = np.abs(mel_gt).sum(-1) > 0
177
+
178
+ mel_pred = prediction["outputs"]
179
+ mel_pred_mask = np.abs(mel_pred).sum(-1) > 0
180
+ mel_pred = mel_pred[mel_pred_mask]
181
+ mel_pred = np.clip(mel_pred, hparams['mel_vmin'], hparams['mel_vmax'])
182
+
183
+ f0_gt = prediction.get("f0_gt")
184
+ f0_pred = prediction.get("f0_pred")
185
+ if f0_pred is not None:
186
+ f0_gt = f0_gt[mel_gt_mask]
187
+ if len(f0_pred) > len(mel_pred_mask):
188
+ f0_pred = f0_pred[:len(mel_pred_mask)]
189
+ f0_pred = f0_pred[mel_pred_mask]
190
+ torch.cuda.is_available() and torch.cuda.empty_cache()
191
+
192
+ if singer:
193
+ data_path = in_path.replace("batch", "singer_data")
194
+ mel_path = data_path[:-4] + "_mel.npy"
195
+ f0_path = data_path[:-4] + "_f0.npy"
196
+ np.save(mel_path, mel_pred)
197
+ np.save(f0_path, f0_pred)
198
+ wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred)
199
+ return f0_gt, f0_pred, wav_pred
200
+
201
+ def temporary_dict2processed_input(self, item_name, temp_dict, use_crepe=True, thre=0.05):
202
+ '''
203
+ process data in temporary_dicts
204
+ '''
205
+
206
+ binarization_args = hparams['binarization_args']
207
+
208
+ @timeit
209
+ def get_pitch(wav, mel):
210
+ # get ground truth f0 by self.get_pitch_algorithm
211
+ global f0_dict
212
+ if use_crepe:
213
+ md5 = get_md5(wav)
214
+ if f"{md5}_gt" in f0_dict.keys():
215
+ print("load temp crepe f0")
216
+ gt_f0 = np.array(f0_dict[f"{md5}_gt"]["f0"])
217
+ coarse_f0 = np.array(f0_dict[f"{md5}_coarse"]["f0"])
218
+ else:
219
+ torch.cuda.is_available() and torch.cuda.empty_cache()
220
+ gt_f0, coarse_f0 = get_pitch_crepe(wav, mel, hparams, thre)
221
+ f0_dict[f"{md5}_gt"] = {"f0": gt_f0.tolist(), "time": int(time.time())}
222
+ f0_dict[f"{md5}_coarse"] = {"f0": coarse_f0.tolist(), "time": int(time.time())}
223
+ write_temp("./infer_tools/f0_temp.json", f0_dict)
224
+ else:
225
+ md5 = get_md5(wav)
226
+ if f"{md5}_gt_harvest" in f0_dict.keys():
227
+ print("load temp harvest f0")
228
+ gt_f0 = np.array(f0_dict[f"{md5}_gt_harvest"]["f0"])
229
+ coarse_f0 = np.array(f0_dict[f"{md5}_coarse_harvest"]["f0"])
230
+ else:
231
+ gt_f0, coarse_f0 = get_pitch_world(wav, mel, hparams)
232
+ f0_dict[f"{md5}_gt_harvest"] = {"f0": gt_f0.tolist(), "time": int(time.time())}
233
+ f0_dict[f"{md5}_coarse_harvest"] = {"f0": coarse_f0.tolist(), "time": int(time.time())}
234
+ write_temp("./infer_tools/f0_temp.json", f0_dict)
235
+ processed_input['f0'] = gt_f0
236
+ processed_input['pitch'] = coarse_f0
237
+
238
+ def get_align(mel, phone_encoded):
239
+ mel2ph = np.zeros([mel.shape[0]], int)
240
+ start_frame = 0
241
+ ph_durs = mel.shape[0] / phone_encoded.shape[0]
242
+ if hparams['debug']:
243
+ print(mel.shape, phone_encoded.shape, mel.shape[0] / phone_encoded.shape[0])
244
+ for i_ph in range(phone_encoded.shape[0]):
245
+ end_frame = int(i_ph * ph_durs + ph_durs + 0.5)
246
+ mel2ph[start_frame:end_frame + 1] = i_ph + 1
247
+ start_frame = end_frame + 1
248
+
249
+ processed_input['mel2ph'] = mel2ph
250
+
251
+ if hparams['vocoder'] in VOCODERS:
252
+ wav, mel = VOCODERS[hparams['vocoder']].wav2spec(temp_dict['wav_fn'])
253
+ else:
254
+ wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(temp_dict['wav_fn'])
255
+ processed_input = {
256
+ 'item_name': item_name, 'mel': mel,
257
+ 'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0]
258
+ }
259
+ processed_input = {**temp_dict, **processed_input} # merge two dicts
260
+
261
+ if binarization_args['with_f0']:
262
+ get_pitch(wav, mel)
263
+ if binarization_args['with_hubert']:
264
+ st = time.time()
265
+ hubert_encoded = processed_input['hubert'] = self.hubert.encode(temp_dict['wav_fn'])
266
+ et = time.time()
267
+ dev = 'cuda' if hparams['hubert_gpu'] and torch.cuda.is_available() else 'cpu'
268
+ print(f'hubert (on {dev}) time used {et - st}')
269
+
270
+ if binarization_args['with_align']:
271
+ get_align(mel, hubert_encoded)
272
+ return processed_input
273
+
274
+ def pre(self, wav_fn, accelerate, use_crepe=True, thre=0.05):
275
+ if isinstance(wav_fn, BytesIO):
276
+ item_name = self.project_name
277
+ else:
278
+ song_info = wav_fn.split('/')
279
+ item_name = song_info[-1].split('.')[-2]
280
+ temp_dict = {'wav_fn': wav_fn, 'spk_id': self.project_name}
281
+
282
+ temp_dict = self.temporary_dict2processed_input(item_name, temp_dict, use_crepe, thre)
283
+ hparams['pndm_speedup'] = accelerate
284
+ batch = processed_input2batch([getitem(temp_dict)])
285
+ return batch
286
+
287
+
288
+ def getitem(item):
289
+ max_frames = hparams['max_frames']
290
+ spec = torch.Tensor(item['mel'])[:max_frames]
291
+ energy = (spec.exp() ** 2).sum(-1).sqrt()
292
+ mel2ph = torch.LongTensor(item['mel2ph'])[:max_frames] if 'mel2ph' in item else None
293
+ f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams)
294
+ hubert = torch.Tensor(item['hubert'][:hparams['max_input_tokens']])
295
+ pitch = torch.LongTensor(item.get("pitch"))[:max_frames]
296
+ sample = {
297
+ "item_name": item['item_name'],
298
+ "hubert": hubert,
299
+ "mel": spec,
300
+ "pitch": pitch,
301
+ "energy": energy,
302
+ "f0": f0,
303
+ "uv": uv,
304
+ "mel2ph": mel2ph,
305
+ "mel_nonpadding": spec.abs().sum(-1) > 0,
306
+ }
307
+ return sample
308
+
309
+
310
+ def processed_input2batch(samples):
311
+ '''
312
+ Args:
313
+ samples: one batch of processed_input
314
+ NOTE:
315
+ the batch size is controlled by hparams['max_sentences']
316
+ '''
317
+ if len(samples) == 0:
318
+ return {}
319
+ item_names = [s['item_name'] for s in samples]
320
+ hubert = utils.collate_2d([s['hubert'] for s in samples], 0.0)
321
+ f0 = utils.collate_1d([s['f0'] for s in samples], 0.0)
322
+ pitch = utils.collate_1d([s['pitch'] for s in samples])
323
+ uv = utils.collate_1d([s['uv'] for s in samples])
324
+ energy = utils.collate_1d([s['energy'] for s in samples], 0.0)
325
+ mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \
326
+ if samples[0]['mel2ph'] is not None else None
327
+ mels = utils.collate_2d([s['mel'] for s in samples], 0.0)
328
+ mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples])
329
+
330
+ batch = {
331
+ 'item_name': item_names,
332
+ 'nsamples': len(samples),
333
+ 'hubert': hubert,
334
+ 'mels': mels,
335
+ 'mel_lengths': mel_lengths,
336
+ 'mel2ph': mel2ph,
337
+ 'energy': energy,
338
+ 'pitch': pitch,
339
+ 'f0': f0,
340
+ 'uv': uv,
341
+ }
342
+ return batch
infer_tools/new_chunks_temp.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"info": "temp_dict", "accd68639783a1819e41702c4c1bf2e7": {"chunks": {"0": {"slice": false, "split_time": "0,607727"}}, "time": 1672781849}, "28b718f4ef116ca8c4d2279dfc0bd161": {"chunks": {"0": {"slice": false, "split_time": "0,607727"}}, "time": 1672758446}, "3c68f6ef87cdbea1be9b66e78bcd1c62": {"chunks": {"0": {"slice": true, "split_time": "0,20115"}, "1": {"slice": false, "split_time": "20115,152363"}, "2": {"slice": true, "split_time": "152363,163441"}, "3": {"slice": false, "split_time": "163441,347184"}, "4": {"slice": true, "split_time": "347184,351976"}, "5": {"slice": false, "split_time": "351976,438356"}, "6": {"slice": true, "split_time": "438356,499095"}}, "time": 1673478071}, "dd17f428601bccf6dd3c82c6d6daaaba": {"chunks": {"0": {"slice": true, "split_time": "0,24155"}, "1": {"slice": false, "split_time": "24155,323983"}, "2": {"slice": true, "split_time": "323983,352800"}}, "time": 1672758814}, "7d915edda42b3c65471bb0f86ba2a57c": {"chunks": {"0": {"slice": false, "split_time": "0,1417197"}, "1": {"slice": true, "split_time": "1417197,1426665"}, "2": {"slice": false, "split_time": "1426665,1736746"}, "3": {"slice": true, "split_time": "1736746,1743374"}, "4": {"slice": false, "split_time": "1743374,2042438"}, "5": {"slice": true, "split_time": "2042438,2050710"}, "6": {"slice": false, "split_time": "2050710,2508864"}, "7": {"slice": true, "split_time": "2508864,2515696"}, "8": {"slice": false, "split_time": "2515696,2682383"}}, "time": 1672772091}, "3bad51a34c1a940a31de387147e10c0a": {"chunks": {"0": {"slice": false, "split_time": "0,318420"}, "1": {"slice": true, "split_time": "318420,325186"}, "2": {"slice": false, "split_time": "325186,1581611"}, "3": {"slice": true, "split_time": "1581611,1594730"}}, "time": 1672772297}, "3ffa2635bed65fb96d8476803560fd1a": {"chunks": {"0": {"slice": true, "split_time": "0,668952"}, "1": {"slice": false, "split_time": "668952,894598"}, "2": {"slice": true, "split_time": "894598,917516"}, "3": {"slice": false, "split_time": "917516,1140044"}, "4": {"slice": true, "split_time": "1140044,1159951"}, "5": {"slice": false, "split_time": "1159951,1725379"}, "6": {"slice": true, "split_time": "1725379,1756813"}, "7": {"slice": false, "split_time": "1756813,3302878"}, "8": {"slice": true, "split_time": "3302878,3361575"}, "9": {"slice": false, "split_time": "3361575,3582626"}, "10": {"slice": true, "split_time": "3582626,3609059"}, "11": {"slice": false, "split_time": "3609059,3844622"}, "12": {"slice": true, "split_time": "3844622,3861910"}, "13": {"slice": false, "split_time": "3861910,4440673"}, "14": {"slice": true, "split_time": "4440673,4468405"}, "15": {"slice": false, "split_time": "4468405,5108832"}, "16": {"slice": true, "split_time": "5108832,5129497"}, "17": {"slice": false, "split_time": "5129497,6675968"}, "18": {"slice": true, "split_time": "6675968,9217857"}}, "time": 1673261015}, "5fba9ddc6c7223c2907d9ff3169e23f1": {"chunks": {"0": {"slice": true, "split_time": "0,53579"}, "1": {"slice": false, "split_time": "53579,332754"}, "2": {"slice": true, "split_time": "332754,414277"}, "3": {"slice": false, "split_time": "414277,781044"}, "4": {"slice": true, "split_time": "781044,816837"}, "5": {"slice": false, "split_time": "816837,1549835"}, "6": {"slice": true, "split_time": "1549835,1557319"}, "7": {"slice": false, "split_time": "1557319,4476357"}, "8": {"slice": true, "split_time": "4476357,4503731"}, "9": {"slice": false, "split_time": "4503731,5209666"}, "10": {"slice": true, "split_time": "5209666,5213405"}, "11": {"slice": false, "split_time": "5213405,6021387"}, "12": {"slice": true, "split_time": "6021387,6063481"}, "13": {"slice": false, "split_time": "6063481,6491872"}, "14": {"slice": true, "split_time": "6491872,6602234"}, "15": {"slice": false, "split_time": "6602234,7311975"}, "16": {"slice": true, "split_time": "7311975,7328295"}, "17": {"slice": false, "split_time": "7328295,8137067"}, "18": {"slice": true, "split_time": "8137067,8142127"}, "19": {"slice": false, "split_time": "8142127,10821823"}, "20": {"slice": true, "split_time": "10821823,11066085"}, "21": {"slice": false, "split_time": "11066085,11148757"}, "22": {"slice": true, "split_time": "11148757,11459584"}}, "time": 1673446113}, "a8b37529b910527ef9b414d4fa485973": {"chunks": {"0": {"slice": true, "split_time": "0,74804"}, "1": {"slice": false, "split_time": "74804,545959"}, "2": {"slice": true, "split_time": "545959,550194"}, "3": {"slice": false, "split_time": "550194,1040392"}, "4": {"slice": true, "split_time": "1040392,1045223"}, "5": {"slice": false, "split_time": "1045223,2552370"}, "6": {"slice": true, "split_time": "2552370,2825551"}, "7": {"slice": false, "split_time": "2825551,3280352"}, "8": {"slice": true, "split_time": "3280352,3284153"}, "9": {"slice": false, "split_time": "3284153,4005079"}, "10": {"slice": true, "split_time": "4005079,4049271"}, "11": {"slice": false, "split_time": "4049271,4279046"}, "12": {"slice": true, "split_time": "4279046,4323261"}, "13": {"slice": false, "split_time": "4323261,5505905"}, "14": {"slice": true, "split_time": "5505905,5535670"}, "15": {"slice": false, "split_time": "5535670,5770626"}, "16": {"slice": true, "split_time": "5770626,5829727"}, "17": {"slice": false, "split_time": "5829727,7771861"}, "18": {"slice": true, "split_time": "7771861,8040098"}, "19": {"slice": false, "split_time": "8040098,8536352"}, "20": {"slice": true, "split_time": "8536352,9186304"}}, "time": 1673040810}, "7e44079101dd10bc262ee28c4b881f25": {"chunks": {"0": {"slice": true, "split_time": "0,135941"}, "1": {"slice": false, "split_time": "135941,787855"}, "2": {"slice": true, "split_time": "787855,999653"}, "3": {"slice": false, "split_time": "999653,2981684"}, "4": {"slice": true, "split_time": "2981684,3116588"}, "5": {"slice": false, "split_time": "3116588,3339320"}, "6": {"slice": true, "split_time": "3339320,3382142"}, "7": {"slice": false, "split_time": "3382142,5345853"}, "8": {"slice": true, "split_time": "5345853,5365617"}, "9": {"slice": false, "split_time": "5365617,6391431"}, "10": {"slice": true, "split_time": "6391431,6527569"}, "11": {"slice": false, "split_time": "6527569,7791684"}, "12": {"slice": true, "split_time": "7791684,7794073"}, "13": {"slice": false, "split_time": "7794073,8749857"}, "14": {"slice": true, "split_time": "8749857,9232506"}, "15": {"slice": false, "split_time": "9232506,11714376"}, "16": {"slice": true, "split_time": "11714376,11785553"}, "17": {"slice": false, "split_time": "11785553,12282894"}, "18": {"slice": true, "split_time": "12282894,12861440"}}, "time": 1673260417}, "89ccd0cc779c17deb9dfd54e700a0bcd": {"chunks": {"0": {"slice": true, "split_time": "0,15387"}, "1": {"slice": false, "split_time": "15387,348933"}, "2": {"slice": true, "split_time": "348933,352886"}, "3": {"slice": false, "split_time": "352886,675429"}, "4": {"slice": true, "split_time": "675429,678891"}, "5": {"slice": false, "split_time": "678891,1048502"}, "6": {"slice": true, "split_time": "1048502,1056469"}, "7": {"slice": false, "split_time": "1056469,1337989"}, "8": {"slice": true, "split_time": "1337989,1381376"}}, "time": 1673351834}, "46fa6dbb81bfe1252ebdefa0884d0e6d": {"chunks": {"0": {"slice": true, "split_time": "0,185799"}, "1": {"slice": false, "split_time": "185799,502227"}, "2": {"slice": true, "split_time": "502227,848473"}, "3": {"slice": false, "split_time": "848473,2347749"}, "4": {"slice": true, "split_time": "2347749,2348783"}, "5": {"slice": false, "split_time": "2348783,3429977"}, "6": {"slice": true, "split_time": "3429977,3890042"}, "7": {"slice": false, "split_time": "3890042,5375055"}, "8": {"slice": true, "split_time": "5375055,5402621"}, "9": {"slice": false, "split_time": "5402621,6462260"}, "10": {"slice": true, "split_time": "6462260,6923811"}, "11": {"slice": false, "split_time": "6923811,8507888"}, "12": {"slice": true, "split_time": "8507888,9252348"}, "13": {"slice": false, "split_time": "9252348,10599478"}, "14": {"slice": true, "split_time": "10599478,10628316"}, "15": {"slice": false, "split_time": "10628316,10692608"}}, "time": 1673446761}, "2b0ffc08fb6fb0f29df83ae8cc0c0eb8": {"chunks": {"0": {"slice": true, "split_time": "0,1208342"}, "1": {"slice": false, "split_time": "1208342,1917473"}, "2": {"slice": true, "split_time": "1917473,1984255"}, "3": {"slice": false, "split_time": "1984255,4413108"}, "4": {"slice": true, "split_time": "4413108,4882745"}, "5": {"slice": false, "split_time": "4882745,8072787"}, "6": {"slice": true, "split_time": "8072787,9419776"}}, "time": 1673388059}, "57440513a8cfffbc7c1ffb790b9723f2": {"chunks": {"0": {"slice": true, "split_time": "0,1199064"}, "1": {"slice": false, "split_time": "1199064,1431015"}, "2": {"slice": true, "split_time": "1431015,1452361"}, "3": {"slice": false, "split_time": "1452361,1898308"}, "4": {"slice": true, "split_time": "1898308,1904596"}, "5": {"slice": false, "split_time": "1904596,2468360"}, "6": {"slice": true, "split_time": "2468360,2479240"}, "7": {"slice": false, "split_time": "2479240,3593434"}, "8": {"slice": true, "split_time": "3593434,3629186"}, "9": {"slice": false, "split_time": "3629186,4074575"}, "10": {"slice": true, "split_time": "4074575,4078966"}, "11": {"slice": false, "split_time": "4078966,4307991"}, "12": {"slice": true, "split_time": "4307991,4316241"}, "13": {"slice": false, "split_time": "4316241,5986540"}, "14": {"slice": true, "split_time": "5986540,5990713"}, "15": {"slice": false, "split_time": "5990713,6243455"}, "16": {"slice": true, "split_time": "6243455,6491661"}, "17": {"slice": false, "split_time": "6491661,6719744"}, "18": {"slice": true, "split_time": "6719744,6739748"}, "19": {"slice": false, "split_time": "6739748,7021570"}, "20": {"slice": true, "split_time": "7021570,7069439"}, "21": {"slice": false, "split_time": "7069439,7363743"}, "22": {"slice": true, "split_time": "7363743,7376200"}, "23": {"slice": false, "split_time": "7376200,9402054"}, "24": {"slice": true, "split_time": "9402054,9403936"}, "25": {"slice": false, "split_time": "9403936,9697617"}, "26": {"slice": true, "split_time": "9697617,9878400"}}, "time": 1673468608}, "1d7d07c16652e4b7300bdaf5d38128b9": {"chunks": {"0": {"slice": true, "split_time": "0,261052"}, "1": {"slice": false, "split_time": "261052,993078"}, "2": {"slice": true, "split_time": "993078,1091294"}, "3": {"slice": false, "split_time": "1091294,2303042"}, "4": {"slice": true, "split_time": "2303042,2304533"}, "5": {"slice": false, "split_time": "2304533,3543442"}, "6": {"slice": true, "split_time": "3543442,3656782"}, "7": {"slice": false, "split_time": "3656782,4337660"}, "8": {"slice": true, "split_time": "4337660,4406921"}, "9": {"slice": false, "split_time": "4406921,6132779"}, "10": {"slice": true, "split_time": "6132779,6147466"}, "11": {"slice": false, "split_time": "6147466,6855506"}, "12": {"slice": true, "split_time": "6855506,7413094"}}, "time": 1673466029}, "dc6af7b6086c4a6f7f6ee206024988f3": {"chunks": {"0": {"slice": true, "split_time": "0,1492137"}, "1": {"slice": false, "split_time": "1492137,2132538"}, "2": {"slice": true, "split_time": "2132538,2140949"}, "3": {"slice": false, "split_time": "2140949,2362096"}, "4": {"slice": true, "split_time": "2362096,2377313"}, "5": {"slice": false, "split_time": "2377313,3190602"}, "6": {"slice": true, "split_time": "3190602,3236563"}, "7": {"slice": false, "split_time": "3236563,3795750"}, "8": {"slice": true, "split_time": "3795750,3798646"}, "9": {"slice": false, "split_time": "3798646,4037700"}, "10": {"slice": true, "split_time": "4037700,4057671"}, "11": {"slice": false, "split_time": "4057671,4828341"}, "12": {"slice": true, "split_time": "4828341,4839270"}, "13": {"slice": false, "split_time": "4839270,5069319"}, "14": {"slice": true, "split_time": "5069319,5070998"}, "15": {"slice": false, "split_time": "5070998,5722290"}, "16": {"slice": true, "split_time": "5722290,5778275"}, "17": {"slice": false, "split_time": "5778275,6346775"}, "18": {"slice": true, "split_time": "6346775,6354400"}, "19": {"slice": false, "split_time": "6354400,6965771"}, "20": {"slice": true, "split_time": "6965771,7361821"}, "21": {"slice": false, "split_time": "7361821,7649206"}, "22": {"slice": true, "split_time": "7649206,7681433"}, "23": {"slice": false, "split_time": "7681433,7921525"}, "24": {"slice": true, "split_time": "7921525,7934007"}, "25": {"slice": false, "split_time": "7934007,8183308"}, "26": {"slice": true, "split_time": "8183308,8238306"}, "27": {"slice": false, "split_time": "8238306,10906572"}, "28": {"slice": true, "split_time": "10906572,10942779"}}, "time": 1673469959}, "010900becfb5574ab3a333f941577c2c": {"chunks": {"0": {"slice": true, "split_time": "0,1083101"}, "1": {"slice": false, "split_time": "1083101,1421801"}, "2": {"slice": true, "split_time": "1421801,1453133"}, "3": {"slice": false, "split_time": "1453133,1794206"}, "4": {"slice": true, "split_time": "1794206,1800891"}, "5": {"slice": false, "split_time": "1800891,2115485"}, "6": {"slice": true, "split_time": "2115485,2152253"}, "7": {"slice": false, "split_time": "2152253,2957806"}, "8": {"slice": true, "split_time": "2957806,2967552"}, "9": {"slice": false, "split_time": "2967552,3967440"}, "10": {"slice": true, "split_time": "3967440,4012384"}, "11": {"slice": false, "split_time": "4012384,4335874"}, "12": {"slice": true, "split_time": "4335874,4363031"}, "13": {"slice": false, "split_time": "4363031,4694987"}, "14": {"slice": true, "split_time": "4694987,4715486"}, "15": {"slice": false, "split_time": "4715486,5030719"}, "16": {"slice": true, "split_time": "5030719,5065596"}, "17": {"slice": false, "split_time": "5065596,6874746"}, "18": {"slice": true, "split_time": "6874746,7885915"}, "19": {"slice": false, "split_time": "7885915,9628060"}, "20": {"slice": true, "split_time": "9628060,10032061"}}, "time": 1673470410}, "5bc32d122ced9dd6d8cf0186fff6466a": {"chunks": {"0": {"slice": true, "split_time": "0,814909"}, "1": {"slice": false, "split_time": "814909,3487605"}, "2": {"slice": true, "split_time": "3487605,3496613"}, "3": {"slice": false, "split_time": "3496613,6186104"}, "4": {"slice": true, "split_time": "6186104,6826505"}, "5": {"slice": false, "split_time": "6826505,9979380"}, "6": {"slice": true, "split_time": "9979380,13711090"}}, "time": 1673474828}}
infer_tools/slicer.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ import numpy as np
4
+ import torch
5
+ import torchaudio
6
+ from scipy.ndimage import maximum_filter1d, uniform_filter1d
7
+
8
+
9
+ def timeit(func):
10
+ def run(*args, **kwargs):
11
+ t = time.time()
12
+ res = func(*args, **kwargs)
13
+ print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
14
+ return res
15
+
16
+ return run
17
+
18
+
19
+ # @timeit
20
+ def _window_maximum(arr, win_sz):
21
+ return maximum_filter1d(arr, size=win_sz)[win_sz // 2: win_sz // 2 + arr.shape[0] - win_sz + 1]
22
+
23
+
24
+ # @timeit
25
+ def _window_rms(arr, win_sz):
26
+ filtered = np.sqrt(uniform_filter1d(np.power(arr, 2), win_sz) - np.power(uniform_filter1d(arr, win_sz), 2))
27
+ return filtered[win_sz // 2: win_sz // 2 + arr.shape[0] - win_sz + 1]
28
+
29
+
30
+ def level2db(levels, eps=1e-12):
31
+ return 20 * np.log10(np.clip(levels, a_min=eps, a_max=1))
32
+
33
+
34
+ def _apply_slice(audio, begin, end):
35
+ if len(audio.shape) > 1:
36
+ return audio[:, begin: end]
37
+ else:
38
+ return audio[begin: end]
39
+
40
+
41
+ class Slicer:
42
+ def __init__(self,
43
+ sr: int,
44
+ db_threshold: float = -40,
45
+ min_length: int = 5000,
46
+ win_l: int = 300,
47
+ win_s: int = 20,
48
+ max_silence_kept: int = 500):
49
+ self.db_threshold = db_threshold
50
+ self.min_samples = round(sr * min_length / 1000)
51
+ self.win_ln = round(sr * win_l / 1000)
52
+ self.win_sn = round(sr * win_s / 1000)
53
+ self.max_silence = round(sr * max_silence_kept / 1000)
54
+ if not self.min_samples >= self.win_ln >= self.win_sn:
55
+ raise ValueError('The following condition must be satisfied: min_length >= win_l >= win_s')
56
+ if not self.max_silence >= self.win_sn:
57
+ raise ValueError('The following condition must be satisfied: max_silence_kept >= win_s')
58
+
59
+ @timeit
60
+ def slice(self, audio):
61
+ samples = audio
62
+ if samples.shape[0] <= self.min_samples:
63
+ return {"0": {"slice": False, "split_time": f"0,{len(audio)}"}}
64
+ # get absolute amplitudes
65
+ abs_amp = np.abs(samples - np.mean(samples))
66
+ # calculate local maximum with large window
67
+ win_max_db = level2db(_window_maximum(abs_amp, win_sz=self.win_ln))
68
+ sil_tags = []
69
+ left = right = 0
70
+ while right < win_max_db.shape[0]:
71
+ if win_max_db[right] < self.db_threshold:
72
+ right += 1
73
+ elif left == right:
74
+ left += 1
75
+ right += 1
76
+ else:
77
+ if left == 0:
78
+ split_loc_l = left
79
+ else:
80
+ sil_left_n = min(self.max_silence, (right + self.win_ln - left) // 2)
81
+ rms_db_left = level2db(_window_rms(samples[left: left + sil_left_n], win_sz=self.win_sn))
82
+ split_win_l = left + np.argmin(rms_db_left)
83
+ split_loc_l = split_win_l + np.argmin(abs_amp[split_win_l: split_win_l + self.win_sn])
84
+ if len(sil_tags) != 0 and split_loc_l - sil_tags[-1][1] < self.min_samples and right < win_max_db.shape[
85
+ 0] - 1:
86
+ right += 1
87
+ left = right
88
+ continue
89
+ if right == win_max_db.shape[0] - 1:
90
+ split_loc_r = right + self.win_ln
91
+ else:
92
+ sil_right_n = min(self.max_silence, (right + self.win_ln - left) // 2)
93
+ rms_db_right = level2db(_window_rms(samples[right + self.win_ln - sil_right_n: right + self.win_ln],
94
+ win_sz=self.win_sn))
95
+ split_win_r = right + self.win_ln - sil_right_n + np.argmin(rms_db_right)
96
+ split_loc_r = split_win_r + np.argmin(abs_amp[split_win_r: split_win_r + self.win_sn])
97
+ sil_tags.append((split_loc_l, split_loc_r))
98
+ right += 1
99
+ left = right
100
+ if left != right:
101
+ sil_left_n = min(self.max_silence, (right + self.win_ln - left) // 2)
102
+ rms_db_left = level2db(_window_rms(samples[left: left + sil_left_n], win_sz=self.win_sn))
103
+ split_win_l = left + np.argmin(rms_db_left)
104
+ split_loc_l = split_win_l + np.argmin(abs_amp[split_win_l: split_win_l + self.win_sn])
105
+ sil_tags.append((split_loc_l, samples.shape[0]))
106
+ if len(sil_tags) == 0:
107
+ return {"0": {"slice": False, "split_time": f"0,{len(audio)}"}}
108
+ else:
109
+ chunks = []
110
+ # 第一段静音并非从头开始,补上有声片段
111
+ if sil_tags[0][0]:
112
+ chunks.append({"slice": False, "split_time": f"0,{sil_tags[0][0]}"})
113
+ for i in range(0, len(sil_tags)):
114
+ # 标识有声片段(跳过第一段)
115
+ if i:
116
+ chunks.append({"slice": False, "split_time": f"{sil_tags[i - 1][1]},{sil_tags[i][0]}"})
117
+ # 标识所有静音片段
118
+ chunks.append({"slice": True, "split_time": f"{sil_tags[i][0]},{sil_tags[i][1]}"})
119
+ # 最后一段静音并非结尾,补上结尾片段
120
+ if sil_tags[-1][1] != len(audio):
121
+ chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1]},{len(audio)}"})
122
+ chunk_dict = {}
123
+ for i in range(len(chunks)):
124
+ chunk_dict[str(i)] = chunks[i]
125
+ return chunk_dict
126
+
127
+
128
+ def cut(audio_path, db_thresh=-30, min_len=5000, win_l=300, win_s=20, max_sil_kept=500):
129
+ audio, sr = torchaudio.load(audio_path)
130
+ if len(audio.shape) == 2 and audio.shape[1] >= 2:
131
+ audio = torch.mean(audio, dim=0).unsqueeze(0)
132
+ audio = audio.cpu().numpy()[0]
133
+
134
+ slicer = Slicer(
135
+ sr=sr,
136
+ db_threshold=db_thresh,
137
+ min_length=min_len,
138
+ win_l=win_l,
139
+ win_s=win_s,
140
+ max_silence_kept=max_sil_kept
141
+ )
142
+ chunks = slicer.slice(audio)
143
+ return chunks
144
+
145
+
146
+ def chunks2audio(audio_path, chunks):
147
+ chunks = dict(chunks)
148
+ audio, sr = torchaudio.load(audio_path)
149
+ if len(audio.shape) == 2 and audio.shape[1] >= 2:
150
+ audio = torch.mean(audio, dim=0).unsqueeze(0)
151
+ audio = audio.cpu().numpy()[0]
152
+ result = []
153
+ for k, v in chunks.items():
154
+ tag = v["split_time"].split(",")
155
+ result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
156
+ return result, sr
157
+
158
+
inference.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
models/genshin/__init__.py ADDED
File without changes
models/genshin/config.yaml ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ K_step: 1000
2
+ accumulate_grad_batches: 1
3
+ audio_num_mel_bins: 128
4
+ audio_sample_rate: 44100
5
+ binarization_args:
6
+ shuffle: false
7
+ with_align: true
8
+ with_f0: true
9
+ with_hubert: true
10
+ with_spk_embed: false
11
+ with_wav: false
12
+ binarizer_cls: preprocessing.SVCpre.SVCBinarizer
13
+ binary_data_dir: data/binary/raiden
14
+ check_val_every_n_epoch: 10
15
+ choose_test_manually: false
16
+ clip_grad_norm: 1
17
+ config_path: training/config_nsf.yaml
18
+ content_cond_steps: []
19
+ cwt_add_f0_loss: false
20
+ cwt_hidden_size: 128
21
+ cwt_layers: 2
22
+ cwt_loss: l1
23
+ cwt_std_scale: 0.8
24
+ datasets:
25
+ - opencpop
26
+ debug: false
27
+ dec_ffn_kernel_size: 9
28
+ dec_layers: 4
29
+ decay_steps: 50000
30
+ decoder_type: fft
31
+ dict_dir: ''
32
+ diff_decoder_type: wavenet
33
+ diff_loss_type: l2
34
+ dilation_cycle_length: 4
35
+ dropout: 0.1
36
+ ds_workers: 4
37
+ dur_enc_hidden_stride_kernel:
38
+ - 0,2,3
39
+ - 0,2,3
40
+ - 0,1,3
41
+ dur_loss: mse
42
+ dur_predictor_kernel: 3
43
+ dur_predictor_layers: 5
44
+ enc_ffn_kernel_size: 9
45
+ enc_layers: 4
46
+ encoder_K: 8
47
+ encoder_type: fft
48
+ endless_ds: false
49
+ f0_bin: 256
50
+ f0_max: 1100.0
51
+ f0_min: 40.0
52
+ ffn_act: gelu
53
+ ffn_padding: SAME
54
+ fft_size: 2048
55
+ fmax: 16000
56
+ fmin: 40
57
+ fs2_ckpt: ''
58
+ gaussian_start: true
59
+ gen_dir_name: ''
60
+ gen_tgt_spk_id: -1
61
+ hidden_size: 256
62
+ hop_size: 512
63
+ hubert_gpu: true
64
+ hubert_path: checkpoints/hubert/hubert_soft.pt
65
+ infer: false
66
+ keep_bins: 128
67
+ lambda_commit: 0.25
68
+ lambda_energy: 0.0
69
+ lambda_f0: 1.0
70
+ lambda_ph_dur: 0.3
71
+ lambda_sent_dur: 1.0
72
+ lambda_uv: 1.0
73
+ lambda_word_dur: 1.0
74
+ load_ckpt: ''
75
+ log_interval: 100
76
+ loud_norm: false
77
+ lr: 0.0012
78
+ max_beta: 0.02
79
+ max_epochs: 3000
80
+ max_eval_sentences: 1
81
+ max_eval_tokens: 60000
82
+ max_frames: 42000
83
+ max_input_tokens: 60000
84
+ max_sentences: 48
85
+ max_tokens: 128000
86
+ max_updates: 260000
87
+ mel_loss: ssim:0.5|l1:0.5
88
+ mel_vmax: 1.5
89
+ mel_vmin: -6.0
90
+ min_level_db: -120
91
+ no_fs2: true
92
+ norm_type: gn
93
+ num_ckpt_keep: 10
94
+ num_heads: 2
95
+ num_sanity_val_steps: 1
96
+ num_spk: 1
97
+ num_test_samples: 0
98
+ num_valid_plots: 10
99
+ optimizer_adam_beta1: 0.9
100
+ optimizer_adam_beta2: 0.98
101
+ out_wav_norm: false
102
+ pe_ckpt: checkpoints/0102_xiaoma_pe/model_ckpt_steps_60000.ckpt
103
+ pe_enable: false
104
+ perform_enhance: true
105
+ pitch_ar: false
106
+ pitch_enc_hidden_stride_kernel:
107
+ - 0,2,5
108
+ - 0,2,5
109
+ - 0,2,5
110
+ pitch_extractor: parselmouth
111
+ pitch_loss: l2
112
+ pitch_norm: log
113
+ pitch_type: frame
114
+ pndm_speedup: 10
115
+ pre_align_args:
116
+ allow_no_txt: false
117
+ denoise: false
118
+ forced_align: mfa
119
+ txt_processor: zh_g2pM
120
+ use_sox: true
121
+ use_tone: false
122
+ pre_align_cls: data_gen.singing.pre_align.SingingPreAlign
123
+ predictor_dropout: 0.5
124
+ predictor_grad: 0.1
125
+ predictor_hidden: -1
126
+ predictor_kernel: 5
127
+ predictor_layers: 5
128
+ prenet_dropout: 0.5
129
+ prenet_hidden_size: 256
130
+ pretrain_fs_ckpt: ''
131
+ processed_data_dir: xxx
132
+ profile_infer: false
133
+ raw_data_dir: data/raw/raiden
134
+ ref_norm_layer: bn
135
+ rel_pos: true
136
+ reset_phone_dict: true
137
+ residual_channels: 384
138
+ residual_layers: 20
139
+ save_best: false
140
+ save_ckpt: true
141
+ save_codes:
142
+ - configs
143
+ - modules
144
+ - src
145
+ - utils
146
+ save_f0: true
147
+ save_gt: false
148
+ schedule_type: linear
149
+ seed: 1234
150
+ sort_by_len: true
151
+ speaker_id: raiden
152
+ spec_max:
153
+ - -0.4759584963321686
154
+ - -0.04242899641394615
155
+ - 0.2820039689540863
156
+ - 0.6635098457336426
157
+ - 0.7846556901931763
158
+ - 0.9242268800735474
159
+ - 1.0596446990966797
160
+ - 0.9890199303627014
161
+ - 0.8979427218437195
162
+ - 0.8635445237159729
163
+ - 0.8591453433036804
164
+ - 0.6987467408180237
165
+ - 0.717823326587677
166
+ - 0.7350517511367798
167
+ - 0.6464754939079285
168
+ - 0.6164345145225525
169
+ - 0.4986744523048401
170
+ - 0.3543139100074768
171
+ - 0.2876613438129425
172
+ - 0.3467520773410797
173
+ - 0.27083638310432434
174
+ - 0.4002445936203003
175
+ - 0.4222544729709625
176
+ - 0.4776916205883026
177
+ - 0.5299767255783081
178
+ - 0.6194124817848206
179
+ - 0.5802494287490845
180
+ - 0.6222044229507446
181
+ - 0.6124054193496704
182
+ - 0.6688933968544006
183
+ - 0.7368689179420471
184
+ - 0.7275264859199524
185
+ - 0.7448640465736389
186
+ - 0.5364857912063599
187
+ - 0.5867365002632141
188
+ - 0.48127084970474243
189
+ - 0.48270556330680847
190
+ - 0.45863038301467896
191
+ - 0.3647041916847229
192
+ - 0.3207940459251404
193
+ - 0.3352258801460266
194
+ - 0.2846287190914154
195
+ - 0.2674693465232849
196
+ - 0.3205840587615967
197
+ - 0.36074087023735046
198
+ - 0.40593528747558594
199
+ - 0.266417920589447
200
+ - 0.22159256041049957
201
+ - 0.19403372704982758
202
+ - 0.29326388239860535
203
+ - 0.29472747445106506
204
+ - 0.3801038861274719
205
+ - 0.3864395320415497
206
+ - 0.285959392786026
207
+ - 0.22213149070739746
208
+ - 0.19549456238746643
209
+ - 0.22238962352275848
210
+ - 0.15776650607585907
211
+ - 0.23960433900356293
212
+ - 0.3050341308116913
213
+ - 0.258531779050827
214
+ - 0.19573383033275604
215
+ - 0.2259710431098938
216
+ - 0.2110864669084549
217
+ - 0.24603094160556793
218
+ - 0.05981471762061119
219
+ - 0.17803697288036346
220
+ - 0.17807669937610626
221
+ - 0.18952223658561707
222
+ - 0.053435735404491425
223
+ - 0.1157914251089096
224
+ - 0.026514273136854172
225
+ - 0.16326436400413513
226
+ - 0.22839383780956268
227
+ - 0.08631942421197891
228
+ - 0.23315998911857605
229
+ - 0.162082701921463
230
+ - 0.1533375382423401
231
+ - 0.12668564915657043
232
+ - 0.08644244074821472
233
+ - 0.02113831788301468
234
+ - 0.3463006615638733
235
+ - 0.22695019841194153
236
+ - 0.14443305134773254
237
+ - 0.20211298763751984
238
+ - 0.07295431941747665
239
+ - -0.007622874341905117
240
+ - -0.02703588828444481
241
+ - -0.06394484639167786
242
+ - -0.09371187537908554
243
+ - -0.005024001933634281
244
+ - -0.013857427053153515
245
+ - -0.1372852921485901
246
+ - -0.10361140221357346
247
+ - -0.12665916979312897
248
+ - -0.20724378526210785
249
+ - -0.2142055779695511
250
+ - 0.19261693954467773
251
+ - 0.08877495676279068
252
+ - -0.21178629994392395
253
+ - -0.18947778642177582
254
+ - -0.2520659863948822
255
+ - -0.22880172729492188
256
+ - -0.14105628430843353
257
+ - -0.05572707951068878
258
+ - -0.26297450065612793
259
+ - -0.412873774766922
260
+ - -0.35086822509765625
261
+ - -0.36454305052757263
262
+ - -0.4511951208114624
263
+ - -0.3738810122013092
264
+ - -0.4491288959980011
265
+ - -0.5304299592971802
266
+ - -0.4495029151439667
267
+ - -0.4343602657318115
268
+ - -0.34189438819885254
269
+ - -0.5748119950294495
270
+ - -0.6650391817092896
271
+ - -0.6537092924118042
272
+ - -0.8302493691444397
273
+ - -0.7430973649024963
274
+ - -0.9170511364936829
275
+ - -1.0624077320098877
276
+ - -1.242630958557129
277
+ - -1.163043737411499
278
+ - -1.0178996324539185
279
+ - -1.4756207466125488
280
+ - -1.5275251865386963
281
+ spec_min:
282
+ - -4.999994277954102
283
+ - -4.999994277954102
284
+ - -4.999994277954102
285
+ - -4.999994277954102
286
+ - -4.999994277954102
287
+ - -4.999994277954102
288
+ - -4.999994277954102
289
+ - -4.999994277954102
290
+ - -4.999994277954102
291
+ - -4.999994277954102
292
+ - -4.999994277954102
293
+ - -4.999994277954102
294
+ - -4.999994277954102
295
+ - -4.999994277954102
296
+ - -4.999994277954102
297
+ - -4.999994277954102
298
+ - -4.999994277954102
299
+ - -4.999994277954102
300
+ - -4.999994277954102
301
+ - -4.999994277954102
302
+ - -4.999994277954102
303
+ - -4.999994277954102
304
+ - -4.999994277954102
305
+ - -4.999994277954102
306
+ - -4.999994277954102
307
+ - -4.999994277954102
308
+ - -4.999994277954102
309
+ - -4.999994277954102
310
+ - -4.999994277954102
311
+ - -4.999994277954102
312
+ - -4.999994277954102
313
+ - -4.999994277954102
314
+ - -4.999994277954102
315
+ - -4.999994277954102
316
+ - -4.999994277954102
317
+ - -4.999994277954102
318
+ - -4.999994277954102
319
+ - -4.999994277954102
320
+ - -4.999994277954102
321
+ - -4.999994277954102
322
+ - -4.999994277954102
323
+ - -4.999994277954102
324
+ - -4.999994277954102
325
+ - -4.999994277954102
326
+ - -4.999994277954102
327
+ - -4.999994277954102
328
+ - -4.999994277954102
329
+ - -4.999994277954102
330
+ - -4.999994277954102
331
+ - -4.999994277954102
332
+ - -4.999994277954102
333
+ - -4.999994277954102
334
+ - -4.999994277954102
335
+ - -4.999994277954102
336
+ - -4.999994277954102
337
+ - -4.999994277954102
338
+ - -4.999994277954102
339
+ - -4.999994277954102
340
+ - -4.999994277954102
341
+ - -4.999994277954102
342
+ - -4.999994277954102
343
+ - -4.999994277954102
344
+ - -4.999994277954102
345
+ - -4.999994277954102
346
+ - -4.999994277954102
347
+ - -4.999994277954102
348
+ - -4.999994277954102
349
+ - -4.999994277954102
350
+ - -4.999994277954102
351
+ - -4.999994277954102
352
+ - -4.999994277954102
353
+ - -4.999994277954102
354
+ - -4.999994277954102
355
+ - -4.999994277954102
356
+ - -4.999994277954102
357
+ - -4.999994277954102
358
+ - -4.999994277954102
359
+ - -4.999994277954102
360
+ - -4.999994277954102
361
+ - -4.999994277954102
362
+ - -4.999994277954102
363
+ - -4.999994277954102
364
+ - -4.999994277954102
365
+ - -4.999994277954102
366
+ - -4.999994277954102
367
+ - -4.999994277954102
368
+ - -4.999994277954102
369
+ - -4.999994277954102
370
+ - -4.999994277954102
371
+ - -4.999994277954102
372
+ - -4.999994277954102
373
+ - -4.999994277954102
374
+ - -4.999994277954102
375
+ - -4.999994277954102
376
+ - -4.999994277954102
377
+ - -4.999994277954102
378
+ - -4.999994277954102
379
+ - -4.999994277954102
380
+ - -4.999994277954102
381
+ - -4.999994277954102
382
+ - -4.999994277954102
383
+ - -4.999994277954102
384
+ - -4.999994277954102
385
+ - -4.999994277954102
386
+ - -4.999994277954102
387
+ - -4.999994277954102
388
+ - -4.999994277954102
389
+ - -4.999994277954102
390
+ - -4.999994277954102
391
+ - -4.999994277954102
392
+ - -4.999994277954102
393
+ - -4.999994277954102
394
+ - -4.999994277954102
395
+ - -4.999994277954102
396
+ - -4.999994277954102
397
+ - -4.999994277954102
398
+ - -4.999994277954102
399
+ - -4.999994277954102
400
+ - -4.999994277954102
401
+ - -4.999994277954102
402
+ - -4.999994277954102
403
+ - -4.999994277954102
404
+ - -4.999994277954102
405
+ - -4.999994277954102
406
+ - -4.999994277954102
407
+ - -4.9844536781311035
408
+ - -4.999994277954102
409
+ - -4.999994277954102
410
+ spk_cond_steps: []
411
+ stop_token_weight: 5.0
412
+ task_cls: training.task.SVC_task.SVCTask
413
+ test_ids: []
414
+ test_input_dir: ''
415
+ test_num: 0
416
+ test_prefixes:
417
+ - test
418
+ test_set_name: test
419
+ timesteps: 1000
420
+ train_set_name: train
421
+ use_crepe: true
422
+ use_denoise: false
423
+ use_energy_embed: false
424
+ use_gt_dur: false
425
+ use_gt_f0: false
426
+ use_midi: false
427
+ use_nsf: true
428
+ use_pitch_embed: true
429
+ use_pos_embed: true
430
+ use_spk_embed: false
431
+ use_spk_id: false
432
+ use_split_spk_id: false
433
+ use_uv: false
434
+ use_var_enc: false
435
+ use_vec: false
436
+ val_check_interval: 2000
437
+ valid_num: 0
438
+ valid_set_name: valid
439
+ vocoder: network.vocoders.nsf_hifigan.NsfHifiGAN
440
+ vocoder_ckpt: checkpoints/nsf_hifigan/model
441
+ warmup_updates: 2000
442
+ wav2spec_eps: 1e-6
443
+ weight_decay: 0
444
+ win_size: 2048
445
+ work_dir: checkpoints/raiden
models/genshin/raiden.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1da7f6c45f8651131386c946f1bd90010b1fd568792d8d70f7025ea3b9a9dda2
3
+ size 134965595
modules/commons/__pycache__/common_layers.cpython-38.pyc ADDED
Binary file (18.8 kB). View file
 
modules/commons/__pycache__/espnet_positional_embedding.cpython-38.pyc ADDED
Binary file (4.44 kB). View file
 
modules/commons/__pycache__/ssim.cpython-38.pyc ADDED
Binary file (2.69 kB). View file
 
modules/commons/common_layers.py ADDED
@@ -0,0 +1,671 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import Parameter
5
+ import torch.onnx.operators
6
+ import torch.nn.functional as F
7
+ import utils
8
+
9
+
10
+ class Reshape(nn.Module):
11
+ def __init__(self, *args):
12
+ super(Reshape, self).__init__()
13
+ self.shape = args
14
+
15
+ def forward(self, x):
16
+ return x.view(self.shape)
17
+
18
+
19
+ class Permute(nn.Module):
20
+ def __init__(self, *args):
21
+ super(Permute, self).__init__()
22
+ self.args = args
23
+
24
+ def forward(self, x):
25
+ return x.permute(self.args)
26
+
27
+
28
+ class LinearNorm(torch.nn.Module):
29
+ def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
30
+ super(LinearNorm, self).__init__()
31
+ self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
32
+
33
+ torch.nn.init.xavier_uniform_(
34
+ self.linear_layer.weight,
35
+ gain=torch.nn.init.calculate_gain(w_init_gain))
36
+
37
+ def forward(self, x):
38
+ return self.linear_layer(x)
39
+
40
+
41
+ class ConvNorm(torch.nn.Module):
42
+ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
43
+ padding=None, dilation=1, bias=True, w_init_gain='linear'):
44
+ super(ConvNorm, self).__init__()
45
+ if padding is None:
46
+ assert (kernel_size % 2 == 1)
47
+ padding = int(dilation * (kernel_size - 1) / 2)
48
+
49
+ self.conv = torch.nn.Conv1d(in_channels, out_channels,
50
+ kernel_size=kernel_size, stride=stride,
51
+ padding=padding, dilation=dilation,
52
+ bias=bias)
53
+
54
+ torch.nn.init.xavier_uniform_(
55
+ self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
56
+
57
+ def forward(self, signal):
58
+ conv_signal = self.conv(signal)
59
+ return conv_signal
60
+
61
+
62
+ def Embedding(num_embeddings, embedding_dim, padding_idx=None):
63
+ m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
64
+ nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
65
+ if padding_idx is not None:
66
+ nn.init.constant_(m.weight[padding_idx], 0)
67
+ return m
68
+
69
+
70
+ def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
71
+ if not export and torch.cuda.is_available():
72
+ try:
73
+ from apex.normalization import FusedLayerNorm
74
+ return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
75
+ except ImportError:
76
+ pass
77
+ return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
78
+
79
+
80
+ def Linear(in_features, out_features, bias=True):
81
+ m = nn.Linear(in_features, out_features, bias)
82
+ nn.init.xavier_uniform_(m.weight)
83
+ if bias:
84
+ nn.init.constant_(m.bias, 0.)
85
+ return m
86
+
87
+
88
+ class SinusoidalPositionalEmbedding(nn.Module):
89
+ """This module produces sinusoidal positional embeddings of any length.
90
+
91
+ Padding symbols are ignored.
92
+ """
93
+
94
+ def __init__(self, embedding_dim, padding_idx, init_size=1024):
95
+ super().__init__()
96
+ self.embedding_dim = embedding_dim
97
+ self.padding_idx = padding_idx
98
+ self.weights = SinusoidalPositionalEmbedding.get_embedding(
99
+ init_size,
100
+ embedding_dim,
101
+ padding_idx,
102
+ )
103
+ self.register_buffer('_float_tensor', torch.FloatTensor(1))
104
+
105
+ @staticmethod
106
+ def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
107
+ """Build sinusoidal embeddings.
108
+
109
+ This matches the implementation in tensor2tensor, but differs slightly
110
+ from the description in Section 3.5 of "Attention Is All You Need".
111
+ """
112
+ half_dim = embedding_dim // 2
113
+ emb = math.log(10000) / (half_dim - 1)
114
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
115
+ emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
116
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
117
+ if embedding_dim % 2 == 1:
118
+ # zero pad
119
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
120
+ if padding_idx is not None:
121
+ emb[padding_idx, :] = 0
122
+ return emb
123
+
124
+ def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
125
+ """Input is expected to be of size [bsz x seqlen]."""
126
+ bsz, seq_len = input.shape[:2]
127
+ max_pos = self.padding_idx + 1 + seq_len
128
+ if self.weights is None or max_pos > self.weights.size(0):
129
+ # recompute/expand embeddings if needed
130
+ self.weights = SinusoidalPositionalEmbedding.get_embedding(
131
+ max_pos,
132
+ self.embedding_dim,
133
+ self.padding_idx,
134
+ )
135
+ self.weights = self.weights.to(self._float_tensor)
136
+
137
+ if incremental_state is not None:
138
+ # positions is the same for every token when decoding a single step
139
+ pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
140
+ return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
141
+
142
+ positions = utils.make_positions(input, self.padding_idx) if positions is None else positions
143
+ return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
144
+
145
+ def max_positions(self):
146
+ """Maximum number of supported positions."""
147
+ return int(1e5) # an arbitrary large number
148
+
149
+
150
+ class ConvTBC(nn.Module):
151
+ def __init__(self, in_channels, out_channels, kernel_size, padding=0):
152
+ super(ConvTBC, self).__init__()
153
+ self.in_channels = in_channels
154
+ self.out_channels = out_channels
155
+ self.kernel_size = kernel_size
156
+ self.padding = padding
157
+
158
+ self.weight = torch.nn.Parameter(torch.Tensor(
159
+ self.kernel_size, in_channels, out_channels))
160
+ self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
161
+
162
+ def forward(self, input):
163
+ return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding)
164
+
165
+
166
+ class MultiheadAttention(nn.Module):
167
+ def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
168
+ add_bias_kv=False, add_zero_attn=False, self_attention=False,
169
+ encoder_decoder_attention=False):
170
+ super().__init__()
171
+ self.embed_dim = embed_dim
172
+ self.kdim = kdim if kdim is not None else embed_dim
173
+ self.vdim = vdim if vdim is not None else embed_dim
174
+ self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
175
+
176
+ self.num_heads = num_heads
177
+ self.dropout = dropout
178
+ self.head_dim = embed_dim // num_heads
179
+ assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
180
+ self.scaling = self.head_dim ** -0.5
181
+
182
+ self.self_attention = self_attention
183
+ self.encoder_decoder_attention = encoder_decoder_attention
184
+
185
+ assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
186
+ 'value to be of the same size'
187
+
188
+ if self.qkv_same_dim:
189
+ self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
190
+ else:
191
+ self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
192
+ self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
193
+ self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
194
+
195
+ if bias:
196
+ self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
197
+ else:
198
+ self.register_parameter('in_proj_bias', None)
199
+
200
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
201
+
202
+ if add_bias_kv:
203
+ self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
204
+ self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
205
+ else:
206
+ self.bias_k = self.bias_v = None
207
+
208
+ self.add_zero_attn = add_zero_attn
209
+
210
+ self.reset_parameters()
211
+
212
+ self.enable_torch_version = False
213
+ if hasattr(F, "multi_head_attention_forward"):
214
+ self.enable_torch_version = True
215
+ else:
216
+ self.enable_torch_version = False
217
+ self.last_attn_probs = None
218
+
219
+ def reset_parameters(self):
220
+ if self.qkv_same_dim:
221
+ nn.init.xavier_uniform_(self.in_proj_weight)
222
+ else:
223
+ nn.init.xavier_uniform_(self.k_proj_weight)
224
+ nn.init.xavier_uniform_(self.v_proj_weight)
225
+ nn.init.xavier_uniform_(self.q_proj_weight)
226
+
227
+ nn.init.xavier_uniform_(self.out_proj.weight)
228
+ if self.in_proj_bias is not None:
229
+ nn.init.constant_(self.in_proj_bias, 0.)
230
+ nn.init.constant_(self.out_proj.bias, 0.)
231
+ if self.bias_k is not None:
232
+ nn.init.xavier_normal_(self.bias_k)
233
+ if self.bias_v is not None:
234
+ nn.init.xavier_normal_(self.bias_v)
235
+
236
+ def forward(
237
+ self,
238
+ query, key, value,
239
+ key_padding_mask=None,
240
+ incremental_state=None,
241
+ need_weights=True,
242
+ static_kv=False,
243
+ attn_mask=None,
244
+ before_softmax=False,
245
+ need_head_weights=False,
246
+ enc_dec_attn_constraint_mask=None,
247
+ reset_attn_weight=None
248
+ ):
249
+ """Input shape: Time x Batch x Channel
250
+
251
+ Args:
252
+ key_padding_mask (ByteTensor, optional): mask to exclude
253
+ keys that are pads, of shape `(batch, src_len)`, where
254
+ padding elements are indicated by 1s.
255
+ need_weights (bool, optional): return the attention weights,
256
+ averaged over heads (default: False).
257
+ attn_mask (ByteTensor, optional): typically used to
258
+ implement causal attention, where the mask prevents the
259
+ attention from looking forward in time (default: None).
260
+ before_softmax (bool, optional): return the raw attention
261
+ weights and values before the attention softmax.
262
+ need_head_weights (bool, optional): return the attention
263
+ weights for each head. Implies *need_weights*. Default:
264
+ return the average attention weights over all heads.
265
+ """
266
+ if need_head_weights:
267
+ need_weights = True
268
+
269
+ tgt_len, bsz, embed_dim = query.size()
270
+ assert embed_dim == self.embed_dim
271
+ assert list(query.size()) == [tgt_len, bsz, embed_dim]
272
+
273
+ if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
274
+ if self.qkv_same_dim:
275
+ return F.multi_head_attention_forward(query, key, value,
276
+ self.embed_dim, self.num_heads,
277
+ self.in_proj_weight,
278
+ self.in_proj_bias, self.bias_k, self.bias_v,
279
+ self.add_zero_attn, self.dropout,
280
+ self.out_proj.weight, self.out_proj.bias,
281
+ self.training, key_padding_mask, need_weights,
282
+ attn_mask)
283
+ else:
284
+ return F.multi_head_attention_forward(query, key, value,
285
+ self.embed_dim, self.num_heads,
286
+ torch.empty([0]),
287
+ self.in_proj_bias, self.bias_k, self.bias_v,
288
+ self.add_zero_attn, self.dropout,
289
+ self.out_proj.weight, self.out_proj.bias,
290
+ self.training, key_padding_mask, need_weights,
291
+ attn_mask, use_separate_proj_weight=True,
292
+ q_proj_weight=self.q_proj_weight,
293
+ k_proj_weight=self.k_proj_weight,
294
+ v_proj_weight=self.v_proj_weight)
295
+
296
+ if incremental_state is not None:
297
+ print('Not implemented error.')
298
+ exit()
299
+ else:
300
+ saved_state = None
301
+
302
+ if self.self_attention:
303
+ # self-attention
304
+ q, k, v = self.in_proj_qkv(query)
305
+ elif self.encoder_decoder_attention:
306
+ # encoder-decoder attention
307
+ q = self.in_proj_q(query)
308
+ if key is None:
309
+ assert value is None
310
+ k = v = None
311
+ else:
312
+ k = self.in_proj_k(key)
313
+ v = self.in_proj_v(key)
314
+
315
+ else:
316
+ q = self.in_proj_q(query)
317
+ k = self.in_proj_k(key)
318
+ v = self.in_proj_v(value)
319
+ q *= self.scaling
320
+
321
+ if self.bias_k is not None:
322
+ assert self.bias_v is not None
323
+ k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
324
+ v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
325
+ if attn_mask is not None:
326
+ attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
327
+ if key_padding_mask is not None:
328
+ key_padding_mask = torch.cat(
329
+ [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
330
+
331
+ q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
332
+ if k is not None:
333
+ k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
334
+ if v is not None:
335
+ v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
336
+
337
+ if saved_state is not None:
338
+ print('Not implemented error.')
339
+ exit()
340
+
341
+ src_len = k.size(1)
342
+
343
+ # This is part of a workaround to get around fork/join parallelism
344
+ # not supporting Optional types.
345
+ if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
346
+ key_padding_mask = None
347
+
348
+ if key_padding_mask is not None:
349
+ assert key_padding_mask.size(0) == bsz
350
+ assert key_padding_mask.size(1) == src_len
351
+
352
+ if self.add_zero_attn:
353
+ src_len += 1
354
+ k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
355
+ v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
356
+ if attn_mask is not None:
357
+ attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
358
+ if key_padding_mask is not None:
359
+ key_padding_mask = torch.cat(
360
+ [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
361
+
362
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
363
+ attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
364
+
365
+ assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
366
+
367
+ if attn_mask is not None:
368
+ if len(attn_mask.shape) == 2:
369
+ attn_mask = attn_mask.unsqueeze(0)
370
+ elif len(attn_mask.shape) == 3:
371
+ attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
372
+ bsz * self.num_heads, tgt_len, src_len)
373
+ attn_weights = attn_weights + attn_mask
374
+
375
+ if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
376
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
377
+ attn_weights = attn_weights.masked_fill(
378
+ enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
379
+ -1e9,
380
+ )
381
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
382
+
383
+ if key_padding_mask is not None:
384
+ # don't attend to padding symbols
385
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
386
+ attn_weights = attn_weights.masked_fill(
387
+ key_padding_mask.unsqueeze(1).unsqueeze(2),
388
+ -1e9,
389
+ )
390
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
391
+
392
+ attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
393
+
394
+ if before_softmax:
395
+ return attn_weights, v
396
+
397
+ attn_weights_float = utils.softmax(attn_weights, dim=-1)
398
+ attn_weights = attn_weights_float.type_as(attn_weights)
399
+ attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
400
+
401
+ if reset_attn_weight is not None:
402
+ if reset_attn_weight:
403
+ self.last_attn_probs = attn_probs.detach()
404
+ else:
405
+ assert self.last_attn_probs is not None
406
+ attn_probs = self.last_attn_probs
407
+ attn = torch.bmm(attn_probs, v)
408
+ assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
409
+ attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
410
+ attn = self.out_proj(attn)
411
+
412
+ if need_weights:
413
+ attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
414
+ if not need_head_weights:
415
+ # average attention weights over heads
416
+ attn_weights = attn_weights.mean(dim=0)
417
+ else:
418
+ attn_weights = None
419
+
420
+ return attn, (attn_weights, attn_logits)
421
+
422
+ def in_proj_qkv(self, query):
423
+ return self._in_proj(query).chunk(3, dim=-1)
424
+
425
+ def in_proj_q(self, query):
426
+ if self.qkv_same_dim:
427
+ return self._in_proj(query, end=self.embed_dim)
428
+ else:
429
+ bias = self.in_proj_bias
430
+ if bias is not None:
431
+ bias = bias[:self.embed_dim]
432
+ return F.linear(query, self.q_proj_weight, bias)
433
+
434
+ def in_proj_k(self, key):
435
+ if self.qkv_same_dim:
436
+ return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
437
+ else:
438
+ weight = self.k_proj_weight
439
+ bias = self.in_proj_bias
440
+ if bias is not None:
441
+ bias = bias[self.embed_dim:2 * self.embed_dim]
442
+ return F.linear(key, weight, bias)
443
+
444
+ def in_proj_v(self, value):
445
+ if self.qkv_same_dim:
446
+ return self._in_proj(value, start=2 * self.embed_dim)
447
+ else:
448
+ weight = self.v_proj_weight
449
+ bias = self.in_proj_bias
450
+ if bias is not None:
451
+ bias = bias[2 * self.embed_dim:]
452
+ return F.linear(value, weight, bias)
453
+
454
+ def _in_proj(self, input, start=0, end=None):
455
+ weight = self.in_proj_weight
456
+ bias = self.in_proj_bias
457
+ weight = weight[start:end, :]
458
+ if bias is not None:
459
+ bias = bias[start:end]
460
+ return F.linear(input, weight, bias)
461
+
462
+
463
+ def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
464
+ return attn_weights
465
+
466
+
467
+ class Swish(torch.autograd.Function):
468
+ @staticmethod
469
+ def forward(ctx, i):
470
+ result = i * torch.sigmoid(i)
471
+ ctx.save_for_backward(i)
472
+ return result
473
+
474
+ @staticmethod
475
+ def backward(ctx, grad_output):
476
+ i = ctx.saved_variables[0]
477
+ sigmoid_i = torch.sigmoid(i)
478
+ return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
479
+
480
+
481
+ class CustomSwish(nn.Module):
482
+ def forward(self, input_tensor):
483
+ return Swish.apply(input_tensor)
484
+
485
+ class Mish(nn.Module):
486
+ def forward(self, x):
487
+ return x * torch.tanh(F.softplus(x))
488
+
489
+ class TransformerFFNLayer(nn.Module):
490
+ def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
491
+ super().__init__()
492
+ self.kernel_size = kernel_size
493
+ self.dropout = dropout
494
+ self.act = act
495
+ if padding == 'SAME':
496
+ self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
497
+ elif padding == 'LEFT':
498
+ self.ffn_1 = nn.Sequential(
499
+ nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
500
+ nn.Conv1d(hidden_size, filter_size, kernel_size)
501
+ )
502
+ self.ffn_2 = Linear(filter_size, hidden_size)
503
+ if self.act == 'swish':
504
+ self.swish_fn = CustomSwish()
505
+
506
+ def forward(self, x, incremental_state=None):
507
+ # x: T x B x C
508
+ if incremental_state is not None:
509
+ assert incremental_state is None, 'Nar-generation does not allow this.'
510
+ exit(1)
511
+
512
+ x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
513
+ x = x * self.kernel_size ** -0.5
514
+
515
+ if incremental_state is not None:
516
+ x = x[-1:]
517
+ if self.act == 'gelu':
518
+ x = F.gelu(x)
519
+ if self.act == 'relu':
520
+ x = F.relu(x)
521
+ if self.act == 'swish':
522
+ x = self.swish_fn(x)
523
+ x = F.dropout(x, self.dropout, training=self.training)
524
+ x = self.ffn_2(x)
525
+ return x
526
+
527
+
528
+ class BatchNorm1dTBC(nn.Module):
529
+ def __init__(self, c):
530
+ super(BatchNorm1dTBC, self).__init__()
531
+ self.bn = nn.BatchNorm1d(c)
532
+
533
+ def forward(self, x):
534
+ """
535
+
536
+ :param x: [T, B, C]
537
+ :return: [T, B, C]
538
+ """
539
+ x = x.permute(1, 2, 0) # [B, C, T]
540
+ x = self.bn(x) # [B, C, T]
541
+ x = x.permute(2, 0, 1) # [T, B, C]
542
+ return x
543
+
544
+
545
+ class EncSALayer(nn.Module):
546
+ def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
547
+ relu_dropout=0.1, kernel_size=9, padding='SAME', norm='ln', act='gelu'):
548
+ super().__init__()
549
+ self.c = c
550
+ self.dropout = dropout
551
+ self.num_heads = num_heads
552
+ if num_heads > 0:
553
+ if norm == 'ln':
554
+ self.layer_norm1 = LayerNorm(c)
555
+ elif norm == 'bn':
556
+ self.layer_norm1 = BatchNorm1dTBC(c)
557
+ self.self_attn = MultiheadAttention(
558
+ self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False,
559
+ )
560
+ if norm == 'ln':
561
+ self.layer_norm2 = LayerNorm(c)
562
+ elif norm == 'bn':
563
+ self.layer_norm2 = BatchNorm1dTBC(c)
564
+ self.ffn = TransformerFFNLayer(
565
+ c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
566
+
567
+ def forward(self, x, encoder_padding_mask=None, **kwargs):
568
+ layer_norm_training = kwargs.get('layer_norm_training', None)
569
+ if layer_norm_training is not None:
570
+ self.layer_norm1.training = layer_norm_training
571
+ self.layer_norm2.training = layer_norm_training
572
+ if self.num_heads > 0:
573
+ residual = x
574
+ x = self.layer_norm1(x)
575
+ x, _, = self.self_attn(
576
+ query=x,
577
+ key=x,
578
+ value=x,
579
+ key_padding_mask=encoder_padding_mask
580
+ )
581
+ x = F.dropout(x, self.dropout, training=self.training)
582
+ x = residual + x
583
+ x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
584
+
585
+ residual = x
586
+ x = self.layer_norm2(x)
587
+ x = self.ffn(x)
588
+ x = F.dropout(x, self.dropout, training=self.training)
589
+ x = residual + x
590
+ x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
591
+ return x
592
+
593
+
594
+ class DecSALayer(nn.Module):
595
+ def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, kernel_size=9, act='gelu'):
596
+ super().__init__()
597
+ self.c = c
598
+ self.dropout = dropout
599
+ self.layer_norm1 = LayerNorm(c)
600
+ self.self_attn = MultiheadAttention(
601
+ c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
602
+ )
603
+ self.layer_norm2 = LayerNorm(c)
604
+ self.encoder_attn = MultiheadAttention(
605
+ c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
606
+ )
607
+ self.layer_norm3 = LayerNorm(c)
608
+ self.ffn = TransformerFFNLayer(
609
+ c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
610
+
611
+ def forward(
612
+ self,
613
+ x,
614
+ encoder_out=None,
615
+ encoder_padding_mask=None,
616
+ incremental_state=None,
617
+ self_attn_mask=None,
618
+ self_attn_padding_mask=None,
619
+ attn_out=None,
620
+ reset_attn_weight=None,
621
+ **kwargs,
622
+ ):
623
+ layer_norm_training = kwargs.get('layer_norm_training', None)
624
+ if layer_norm_training is not None:
625
+ self.layer_norm1.training = layer_norm_training
626
+ self.layer_norm2.training = layer_norm_training
627
+ self.layer_norm3.training = layer_norm_training
628
+ residual = x
629
+ x = self.layer_norm1(x)
630
+ x, _ = self.self_attn(
631
+ query=x,
632
+ key=x,
633
+ value=x,
634
+ key_padding_mask=self_attn_padding_mask,
635
+ incremental_state=incremental_state,
636
+ attn_mask=self_attn_mask
637
+ )
638
+ x = F.dropout(x, self.dropout, training=self.training)
639
+ x = residual + x
640
+
641
+ residual = x
642
+ x = self.layer_norm2(x)
643
+ if encoder_out is not None:
644
+ x, attn = self.encoder_attn(
645
+ query=x,
646
+ key=encoder_out,
647
+ value=encoder_out,
648
+ key_padding_mask=encoder_padding_mask,
649
+ incremental_state=incremental_state,
650
+ static_kv=True,
651
+ enc_dec_attn_constraint_mask=None, #utils.get_incremental_state(self, incremental_state, 'enc_dec_attn_constraint_mask'),
652
+ reset_attn_weight=reset_attn_weight
653
+ )
654
+ attn_logits = attn[1]
655
+ else:
656
+ assert attn_out is not None
657
+ x = self.encoder_attn.in_proj_v(attn_out.transpose(0, 1))
658
+ attn_logits = None
659
+ x = F.dropout(x, self.dropout, training=self.training)
660
+ x = residual + x
661
+
662
+ residual = x
663
+ x = self.layer_norm3(x)
664
+ x = self.ffn(x, incremental_state=incremental_state)
665
+ x = F.dropout(x, self.dropout, training=self.training)
666
+ x = residual + x
667
+ # if len(attn_logits.size()) > 3:
668
+ # indices = attn_logits.softmax(-1).max(-1).values.sum(-1).argmax(-1)
669
+ # attn_logits = attn_logits.gather(1,
670
+ # indices[:, None, None, None].repeat(1, 1, attn_logits.size(-2), attn_logits.size(-1))).squeeze(1)
671
+ return x, attn_logits
modules/commons/espnet_positional_embedding.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+
4
+
5
+ class PositionalEncoding(torch.nn.Module):
6
+ """Positional encoding.
7
+ Args:
8
+ d_model (int): Embedding dimension.
9
+ dropout_rate (float): Dropout rate.
10
+ max_len (int): Maximum input length.
11
+ reverse (bool): Whether to reverse the input position.
12
+ """
13
+
14
+ def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
15
+ """Construct an PositionalEncoding object."""
16
+ super(PositionalEncoding, self).__init__()
17
+ self.d_model = d_model
18
+ self.reverse = reverse
19
+ self.xscale = math.sqrt(self.d_model)
20
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
21
+ self.pe = None
22
+ self.extend_pe(torch.tensor(0.0).expand(1, max_len))
23
+
24
+ def extend_pe(self, x):
25
+ """Reset the positional encodings."""
26
+ if self.pe is not None:
27
+ if self.pe.size(1) >= x.size(1):
28
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
29
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
30
+ return
31
+ pe = torch.zeros(x.size(1), self.d_model)
32
+ if self.reverse:
33
+ position = torch.arange(
34
+ x.size(1) - 1, -1, -1.0, dtype=torch.float32
35
+ ).unsqueeze(1)
36
+ else:
37
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
38
+ div_term = torch.exp(
39
+ torch.arange(0, self.d_model, 2, dtype=torch.float32)
40
+ * -(math.log(10000.0) / self.d_model)
41
+ )
42
+ pe[:, 0::2] = torch.sin(position * div_term)
43
+ pe[:, 1::2] = torch.cos(position * div_term)
44
+ pe = pe.unsqueeze(0)
45
+ self.pe = pe.to(device=x.device, dtype=x.dtype)
46
+
47
+ def forward(self, x: torch.Tensor):
48
+ """Add positional encoding.
49
+ Args:
50
+ x (torch.Tensor): Input tensor (batch, time, `*`).
51
+ Returns:
52
+ torch.Tensor: Encoded tensor (batch, time, `*`).
53
+ """
54
+ self.extend_pe(x)
55
+ x = x * self.xscale + self.pe[:, : x.size(1)]
56
+ return self.dropout(x)
57
+
58
+
59
+ class ScaledPositionalEncoding(PositionalEncoding):
60
+ """Scaled positional encoding module.
61
+ See Sec. 3.2 https://arxiv.org/abs/1809.08895
62
+ Args:
63
+ d_model (int): Embedding dimension.
64
+ dropout_rate (float): Dropout rate.
65
+ max_len (int): Maximum input length.
66
+ """
67
+
68
+ def __init__(self, d_model, dropout_rate, max_len=5000):
69
+ """Initialize class."""
70
+ super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
71
+ self.alpha = torch.nn.Parameter(torch.tensor(1.0))
72
+
73
+ def reset_parameters(self):
74
+ """Reset parameters."""
75
+ self.alpha.data = torch.tensor(1.0)
76
+
77
+ def forward(self, x):
78
+ """Add positional encoding.
79
+ Args:
80
+ x (torch.Tensor): Input tensor (batch, time, `*`).
81
+ Returns:
82
+ torch.Tensor: Encoded tensor (batch, time, `*`).
83
+ """
84
+ self.extend_pe(x)
85
+ x = x + self.alpha * self.pe[:, : x.size(1)]
86
+ return self.dropout(x)
87
+
88
+
89
+ class RelPositionalEncoding(PositionalEncoding):
90
+ """Relative positional encoding module.
91
+ See : Appendix B in https://arxiv.org/abs/1901.02860
92
+ Args:
93
+ d_model (int): Embedding dimension.
94
+ dropout_rate (float): Dropout rate.
95
+ max_len (int): Maximum input length.
96
+ """
97
+
98
+ def __init__(self, d_model, dropout_rate, max_len=5000):
99
+ """Initialize class."""
100
+ super().__init__(d_model, dropout_rate, max_len, reverse=True)
101
+
102
+ def forward(self, x):
103
+ """Compute positional encoding.
104
+ Args:
105
+ x (torch.Tensor): Input tensor (batch, time, `*`).
106
+ Returns:
107
+ torch.Tensor: Encoded tensor (batch, time, `*`).
108
+ torch.Tensor: Positional embedding tensor (1, time, `*`).
109
+ """
110
+ self.extend_pe(x)
111
+ x = x * self.xscale
112
+ pos_emb = self.pe[:, : x.size(1)]
113
+ return self.dropout(x) + self.dropout(pos_emb)
modules/commons/ssim.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # '''
2
+ # https://github.com/One-sixth/ms_ssim_pytorch/blob/master/ssim.py
3
+ # '''
4
+ #
5
+ # import torch
6
+ # import torch.jit
7
+ # import torch.nn.functional as F
8
+ #
9
+ #
10
+ # @torch.jit.script
11
+ # def create_window(window_size: int, sigma: float, channel: int):
12
+ # '''
13
+ # Create 1-D gauss kernel
14
+ # :param window_size: the size of gauss kernel
15
+ # :param sigma: sigma of normal distribution
16
+ # :param channel: input channel
17
+ # :return: 1D kernel
18
+ # '''
19
+ # coords = torch.arange(window_size, dtype=torch.float)
20
+ # coords -= window_size // 2
21
+ #
22
+ # g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
23
+ # g /= g.sum()
24
+ #
25
+ # g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1)
26
+ # return g
27
+ #
28
+ #
29
+ # @torch.jit.script
30
+ # def _gaussian_filter(x, window_1d, use_padding: bool):
31
+ # '''
32
+ # Blur input with 1-D kernel
33
+ # :param x: batch of tensors to be blured
34
+ # :param window_1d: 1-D gauss kernel
35
+ # :param use_padding: padding image before conv
36
+ # :return: blured tensors
37
+ # '''
38
+ # C = x.shape[1]
39
+ # padding = 0
40
+ # if use_padding:
41
+ # window_size = window_1d.shape[3]
42
+ # padding = window_size // 2
43
+ # out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C)
44
+ # out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C)
45
+ # return out
46
+ #
47
+ #
48
+ # @torch.jit.script
49
+ # def ssim(X, Y, window, data_range: float, use_padding: bool = False):
50
+ # '''
51
+ # Calculate ssim index for X and Y
52
+ # :param X: images [B, C, H, N_bins]
53
+ # :param Y: images [B, C, H, N_bins]
54
+ # :param window: 1-D gauss kernel
55
+ # :param data_range: value range of input images. (usually 1.0 or 255)
56
+ # :param use_padding: padding image before conv
57
+ # :return:
58
+ # '''
59
+ #
60
+ # K1 = 0.01
61
+ # K2 = 0.03
62
+ # compensation = 1.0
63
+ #
64
+ # C1 = (K1 * data_range) ** 2
65
+ # C2 = (K2 * data_range) ** 2
66
+ #
67
+ # mu1 = _gaussian_filter(X, window, use_padding)
68
+ # mu2 = _gaussian_filter(Y, window, use_padding)
69
+ # sigma1_sq = _gaussian_filter(X * X, window, use_padding)
70
+ # sigma2_sq = _gaussian_filter(Y * Y, window, use_padding)
71
+ # sigma12 = _gaussian_filter(X * Y, window, use_padding)
72
+ #
73
+ # mu1_sq = mu1.pow(2)
74
+ # mu2_sq = mu2.pow(2)
75
+ # mu1_mu2 = mu1 * mu2
76
+ #
77
+ # sigma1_sq = compensation * (sigma1_sq - mu1_sq)
78
+ # sigma2_sq = compensation * (sigma2_sq - mu2_sq)
79
+ # sigma12 = compensation * (sigma12 - mu1_mu2)
80
+ #
81
+ # cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
82
+ # # Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
83
+ # cs_map = cs_map.clamp_min(0.)
84
+ # ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
85
+ #
86
+ # ssim_val = ssim_map.mean(dim=(1, 2, 3)) # reduce along CHW
87
+ # cs = cs_map.mean(dim=(1, 2, 3))
88
+ #
89
+ # return ssim_val, cs
90
+ #
91
+ #
92
+ # @torch.jit.script
93
+ # def ms_ssim(X, Y, window, data_range: float, weights, use_padding: bool = False, eps: float = 1e-8):
94
+ # '''
95
+ # interface of ms-ssim
96
+ # :param X: a batch of images, (N,C,H,W)
97
+ # :param Y: a batch of images, (N,C,H,W)
98
+ # :param window: 1-D gauss kernel
99
+ # :param data_range: value range of input images. (usually 1.0 or 255)
100
+ # :param weights: weights for different levels
101
+ # :param use_padding: padding image before conv
102
+ # :param eps: use for avoid grad nan.
103
+ # :return:
104
+ # '''
105
+ # levels = weights.shape[0]
106
+ # cs_vals = []
107
+ # ssim_vals = []
108
+ # for _ in range(levels):
109
+ # ssim_val, cs = ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding)
110
+ # # Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
111
+ # ssim_val = ssim_val.clamp_min(eps)
112
+ # cs = cs.clamp_min(eps)
113
+ # cs_vals.append(cs)
114
+ #
115
+ # ssim_vals.append(ssim_val)
116
+ # padding = (X.shape[2] % 2, X.shape[3] % 2)
117
+ # X = F.avg_pool2d(X, kernel_size=2, stride=2, padding=padding)
118
+ # Y = F.avg_pool2d(Y, kernel_size=2, stride=2, padding=padding)
119
+ #
120
+ # cs_vals = torch.stack(cs_vals, dim=0)
121
+ # ms_ssim_val = torch.prod((cs_vals[:-1] ** weights[:-1].unsqueeze(1)) * (ssim_vals[-1] ** weights[-1]), dim=0)
122
+ # return ms_ssim_val
123
+ #
124
+ #
125
+ # class SSIM(torch.jit.ScriptModule):
126
+ # __constants__ = ['data_range', 'use_padding']
127
+ #
128
+ # def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False):
129
+ # '''
130
+ # :param window_size: the size of gauss kernel
131
+ # :param window_sigma: sigma of normal distribution
132
+ # :param data_range: value range of input images. (usually 1.0 or 255)
133
+ # :param channel: input channels (default: 3)
134
+ # :param use_padding: padding image before conv
135
+ # '''
136
+ # super().__init__()
137
+ # assert window_size % 2 == 1, 'Window size must be odd.'
138
+ # window = create_window(window_size, window_sigma, channel)
139
+ # self.register_buffer('window', window)
140
+ # self.data_range = data_range
141
+ # self.use_padding = use_padding
142
+ #
143
+ # @torch.jit.script_method
144
+ # def forward(self, X, Y):
145
+ # r = ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding)
146
+ # return r[0]
147
+ #
148
+ #
149
+ # class MS_SSIM(torch.jit.ScriptModule):
150
+ # __constants__ = ['data_range', 'use_padding', 'eps']
151
+ #
152
+ # def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None,
153
+ # levels=None, eps=1e-8):
154
+ # '''
155
+ # class for ms-ssim
156
+ # :param window_size: the size of gauss kernel
157
+ # :param window_sigma: sigma of normal distribution
158
+ # :param data_range: value range of input images. (usually 1.0 or 255)
159
+ # :param channel: input channels
160
+ # :param use_padding: padding image before conv
161
+ # :param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
162
+ # :param levels: number of downsampling
163
+ # :param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
164
+ # '''
165
+ # super().__init__()
166
+ # assert window_size % 2 == 1, 'Window size must be odd.'
167
+ # self.data_range = data_range
168
+ # self.use_padding = use_padding
169
+ # self.eps = eps
170
+ #
171
+ # window = create_window(window_size, window_sigma, channel)
172
+ # self.register_buffer('window', window)
173
+ #
174
+ # if weights is None:
175
+ # weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
176
+ # weights = torch.tensor(weights, dtype=torch.float)
177
+ #
178
+ # if levels is not None:
179
+ # weights = weights[:levels]
180
+ # weights = weights / weights.sum()
181
+ #
182
+ # self.register_buffer('weights', weights)
183
+ #
184
+ # @torch.jit.script_method
185
+ # def forward(self, X, Y):
186
+ # return ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights,
187
+ # use_padding=self.use_padding, eps=self.eps)
188
+ #
189
+ #
190
+ # if __name__ == '__main__':
191
+ # print('Simple Test')
192
+ # im = torch.randint(0, 255, (5, 3, 256, 256), dtype=torch.float, device='cuda')
193
+ # img1 = im / 255
194
+ # img2 = img1 * 0.5
195
+ #
196
+ # losser = SSIM(data_range=1.).cuda()
197
+ # loss = losser(img1, img2).mean()
198
+ #
199
+ # losser2 = MS_SSIM(data_range=1.).cuda()
200
+ # loss2 = losser2(img1, img2).mean()
201
+ #
202
+ # print(loss.item())
203
+ # print(loss2.item())
204
+ #
205
+ # if __name__ == '__main__':
206
+ # print('Training Test')
207
+ # import cv2
208
+ # import torch.optim
209
+ # import numpy as np
210
+ # import imageio
211
+ # import time
212
+ #
213
+ # out_test_video = False
214
+ # # 最好不要直接输出gif图,会非常大,最好先输出mkv文件后用ffmpeg转换到GIF
215
+ # video_use_gif = False
216
+ #
217
+ # im = cv2.imread('test_img1.jpg', 1)
218
+ # t_im = torch.from_numpy(im).cuda().permute(2, 0, 1).float()[None] / 255.
219
+ #
220
+ # if out_test_video:
221
+ # if video_use_gif:
222
+ # fps = 0.5
223
+ # out_wh = (im.shape[1] // 2, im.shape[0] // 2)
224
+ # suffix = '.gif'
225
+ # else:
226
+ # fps = 5
227
+ # out_wh = (im.shape[1], im.shape[0])
228
+ # suffix = '.mkv'
229
+ # video_last_time = time.perf_counter()
230
+ # video = imageio.get_writer('ssim_test' + suffix, fps=fps)
231
+ #
232
+ # # 测试ssim
233
+ # print('Training SSIM')
234
+ # rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255.
235
+ # rand_im.requires_grad = True
236
+ # optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8)
237
+ # losser = SSIM(data_range=1., channel=t_im.shape[1]).cuda()
238
+ # ssim_score = 0
239
+ # while ssim_score < 0.999:
240
+ # optim.zero_grad()
241
+ # loss = losser(rand_im, t_im)
242
+ # (-loss).sum().backward()
243
+ # ssim_score = loss.item()
244
+ # optim.step()
245
+ # r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0]
246
+ # r_im = cv2.putText(r_im, 'ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
247
+ #
248
+ # if out_test_video:
249
+ # if time.perf_counter() - video_last_time > 1. / fps:
250
+ # video_last_time = time.perf_counter()
251
+ # out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB)
252
+ # out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA)
253
+ # if isinstance(out_frame, cv2.UMat):
254
+ # out_frame = out_frame.get()
255
+ # video.append_data(out_frame)
256
+ #
257
+ # cv2.imshow('ssim', r_im)
258
+ # cv2.setWindowTitle('ssim', 'ssim %f' % ssim_score)
259
+ # cv2.waitKey(1)
260
+ #
261
+ # if out_test_video:
262
+ # video.close()
263
+ #
264
+ # # 测试ms_ssim
265
+ # if out_test_video:
266
+ # if video_use_gif:
267
+ # fps = 0.5
268
+ # out_wh = (im.shape[1] // 2, im.shape[0] // 2)
269
+ # suffix = '.gif'
270
+ # else:
271
+ # fps = 5
272
+ # out_wh = (im.shape[1], im.shape[0])
273
+ # suffix = '.mkv'
274
+ # video_last_time = time.perf_counter()
275
+ # video = imageio.get_writer('ms_ssim_test' + suffix, fps=fps)
276
+ #
277
+ # print('Training MS_SSIM')
278
+ # rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255.
279
+ # rand_im.requires_grad = True
280
+ # optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8)
281
+ # losser = MS_SSIM(data_range=1., channel=t_im.shape[1]).cuda()
282
+ # ssim_score = 0
283
+ # while ssim_score < 0.999:
284
+ # optim.zero_grad()
285
+ # loss = losser(rand_im, t_im)
286
+ # (-loss).sum().backward()
287
+ # ssim_score = loss.item()
288
+ # optim.step()
289
+ # r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0]
290
+ # r_im = cv2.putText(r_im, 'ms_ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
291
+ #
292
+ # if out_test_video:
293
+ # if time.perf_counter() - video_last_time > 1. / fps:
294
+ # video_last_time = time.perf_counter()
295
+ # out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB)
296
+ # out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA)
297
+ # if isinstance(out_frame, cv2.UMat):
298
+ # out_frame = out_frame.get()
299
+ # video.append_data(out_frame)
300
+ #
301
+ # cv2.imshow('ms_ssim', r_im)
302
+ # cv2.setWindowTitle('ms_ssim', 'ms_ssim %f' % ssim_score)
303
+ # cv2.waitKey(1)
304
+ #
305
+ # if out_test_video:
306
+ # video.close()
307
+
308
+ """
309
+ Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim
310
+ """
311
+
312
+ import torch
313
+ import torch.nn.functional as F
314
+ from torch.autograd import Variable
315
+ import numpy as np
316
+ from math import exp
317
+
318
+
319
+ def gaussian(window_size, sigma):
320
+ gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
321
+ return gauss / gauss.sum()
322
+
323
+
324
+ def create_window(window_size, channel):
325
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
326
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
327
+ window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
328
+ return window
329
+
330
+
331
+ def _ssim(img1, img2, window, window_size, channel, size_average=True):
332
+ mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
333
+ mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
334
+
335
+ mu1_sq = mu1.pow(2)
336
+ mu2_sq = mu2.pow(2)
337
+ mu1_mu2 = mu1 * mu2
338
+
339
+ sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
340
+ sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
341
+ sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
342
+
343
+ C1 = 0.01 ** 2
344
+ C2 = 0.03 ** 2
345
+
346
+ ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
347
+
348
+ if size_average:
349
+ return ssim_map.mean()
350
+ else:
351
+ return ssim_map.mean(1)
352
+
353
+
354
+ class SSIM(torch.nn.Module):
355
+ def __init__(self, window_size=11, size_average=True):
356
+ super(SSIM, self).__init__()
357
+ self.window_size = window_size
358
+ self.size_average = size_average
359
+ self.channel = 1
360
+ self.window = create_window(window_size, self.channel)
361
+
362
+ def forward(self, img1, img2):
363
+ (_, channel, _, _) = img1.size()
364
+
365
+ if channel == self.channel and self.window.data.type() == img1.data.type():
366
+ window = self.window
367
+ else:
368
+ window = create_window(self.window_size, channel)
369
+
370
+ if img1.is_cuda:
371
+ window = window.cuda(img1.get_device())
372
+ window = window.type_as(img1)
373
+
374
+ self.window = window
375
+ self.channel = channel
376
+
377
+ return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
378
+
379
+
380
+ window = None
381
+
382
+
383
+ def ssim(img1, img2, window_size=11, size_average=True):
384
+ (_, channel, _, _) = img1.size()
385
+ global window
386
+ if window is None:
387
+ window = create_window(window_size, channel)
388
+ if img1.is_cuda:
389
+ window = window.cuda(img1.get_device())
390
+ window = window.type_as(img1)
391
+ return _ssim(img1, img2, window, window_size, channel, size_average)
modules/fastspeech/__pycache__/fs2.cpython-38.pyc ADDED
Binary file (5.87 kB). View file
 
modules/fastspeech/__pycache__/pe.cpython-38.pyc ADDED
Binary file (5.05 kB). View file
 
modules/fastspeech/__pycache__/tts_modules.cpython-38.pyc ADDED
Binary file (13.6 kB). View file
 
modules/fastspeech/fs2.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.commons.common_layers import *
2
+ from modules.commons.common_layers import Embedding
3
+ from modules.fastspeech.tts_modules import FastspeechDecoder, DurationPredictor, LengthRegulator, PitchPredictor, \
4
+ EnergyPredictor, FastspeechEncoder
5
+ from utils.cwt import cwt2f0
6
+ from utils.hparams import hparams
7
+ from utils.pitch_utils import f0_to_coarse, denorm_f0, norm_f0
8
+
9
+ FS_ENCODERS = {
10
+ 'fft': lambda hp: FastspeechEncoder(
11
+ hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'],
12
+ num_heads=hp['num_heads']),
13
+ }
14
+
15
+ FS_DECODERS = {
16
+ 'fft': lambda hp: FastspeechDecoder(
17
+ hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
18
+ }
19
+
20
+
21
+ class FastSpeech2(nn.Module):
22
+ def __init__(self, dictionary, out_dims=None):
23
+ super().__init__()
24
+ # self.dictionary = dictionary
25
+ self.padding_idx = 0
26
+ if not hparams['no_fs2'] if 'no_fs2' in hparams.keys() else True:
27
+ self.enc_layers = hparams['enc_layers']
28
+ self.dec_layers = hparams['dec_layers']
29
+ self.encoder = FS_ENCODERS[hparams['encoder_type']](hparams)
30
+ self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
31
+ self.hidden_size = hparams['hidden_size']
32
+ # self.encoder_embed_tokens = self.build_embedding(self.dictionary, self.hidden_size)
33
+ self.out_dims = out_dims
34
+ if out_dims is None:
35
+ self.out_dims = hparams['audio_num_mel_bins']
36
+ self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
37
+ #=========not used===========
38
+ # if hparams['use_spk_id']:
39
+ # self.spk_embed_proj = Embedding(hparams['num_spk'] + 1, self.hidden_size)
40
+ # if hparams['use_split_spk_id']:
41
+ # self.spk_embed_f0 = Embedding(hparams['num_spk'] + 1, self.hidden_size)
42
+ # self.spk_embed_dur = Embedding(hparams['num_spk'] + 1, self.hidden_size)
43
+ # elif hparams['use_spk_embed']:
44
+ # self.spk_embed_proj = Linear(256, self.hidden_size, bias=True)
45
+ predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
46
+ # self.dur_predictor = DurationPredictor(
47
+ # self.hidden_size,
48
+ # n_chans=predictor_hidden,
49
+ # n_layers=hparams['dur_predictor_layers'],
50
+ # dropout_rate=hparams['predictor_dropout'], padding=hparams['ffn_padding'],
51
+ # kernel_size=hparams['dur_predictor_kernel'])
52
+ # self.length_regulator = LengthRegulator()
53
+ if hparams['use_pitch_embed']:
54
+ self.pitch_embed = Embedding(300, self.hidden_size, self.padding_idx)
55
+ if hparams['pitch_type'] == 'cwt':
56
+ h = hparams['cwt_hidden_size']
57
+ cwt_out_dims = 10
58
+ if hparams['use_uv']:
59
+ cwt_out_dims = cwt_out_dims + 1
60
+ self.cwt_predictor = nn.Sequential(
61
+ nn.Linear(self.hidden_size, h),
62
+ PitchPredictor(
63
+ h,
64
+ n_chans=predictor_hidden,
65
+ n_layers=hparams['predictor_layers'],
66
+ dropout_rate=hparams['predictor_dropout'], odim=cwt_out_dims,
67
+ padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel']))
68
+ self.cwt_stats_layers = nn.Sequential(
69
+ nn.Linear(self.hidden_size, h), nn.ReLU(),
70
+ nn.Linear(h, h), nn.ReLU(), nn.Linear(h, 2)
71
+ )
72
+ else:
73
+ self.pitch_predictor = PitchPredictor(
74
+ self.hidden_size,
75
+ n_chans=predictor_hidden,
76
+ n_layers=hparams['predictor_layers'],
77
+ dropout_rate=hparams['predictor_dropout'],
78
+ odim=2 if hparams['pitch_type'] == 'frame' else 1,
79
+ padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
80
+ if hparams['use_energy_embed']:
81
+ self.energy_embed = Embedding(256, self.hidden_size, self.padding_idx)
82
+ # self.energy_predictor = EnergyPredictor(
83
+ # self.hidden_size,
84
+ # n_chans=predictor_hidden,
85
+ # n_layers=hparams['predictor_layers'],
86
+ # dropout_rate=hparams['predictor_dropout'], odim=1,
87
+ # padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
88
+
89
+ # def build_embedding(self, dictionary, embed_dim):
90
+ # num_embeddings = len(dictionary)
91
+ # emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
92
+ # return emb
93
+
94
+ def forward(self, hubert, mel2ph=None, spk_embed=None,
95
+ ref_mels=None, f0=None, uv=None, energy=None, skip_decoder=True,
96
+ spk_embed_dur_id=None, spk_embed_f0_id=None, infer=False, **kwargs):
97
+ ret = {}
98
+ if not hparams['no_fs2'] if 'no_fs2' in hparams.keys() else True:
99
+ encoder_out =self.encoder(hubert) # [B, T, C]
100
+ else:
101
+ encoder_out =hubert
102
+ src_nonpadding = (hubert!=0).any(-1)[:,:,None]
103
+
104
+ # add ref style embed
105
+ # Not implemented
106
+ # variance encoder
107
+ var_embed = 0
108
+
109
+ # encoder_out_dur denotes encoder outputs for duration predictor
110
+ # in speech adaptation, duration predictor use old speaker embedding
111
+ if hparams['use_spk_embed']:
112
+ spk_embed_dur = spk_embed_f0 = spk_embed = self.spk_embed_proj(spk_embed)[:, None, :]
113
+ elif hparams['use_spk_id']:
114
+ spk_embed_id = spk_embed
115
+ if spk_embed_dur_id is None:
116
+ spk_embed_dur_id = spk_embed_id
117
+ if spk_embed_f0_id is None:
118
+ spk_embed_f0_id = spk_embed_id
119
+ spk_embed = self.spk_embed_proj(spk_embed_id)[:, None, :]
120
+ spk_embed_dur = spk_embed_f0 = spk_embed
121
+ if hparams['use_split_spk_id']:
122
+ spk_embed_dur = self.spk_embed_dur(spk_embed_dur_id)[:, None, :]
123
+ spk_embed_f0 = self.spk_embed_f0(spk_embed_f0_id)[:, None, :]
124
+ else:
125
+ spk_embed_dur = spk_embed_f0 = spk_embed = 0
126
+
127
+ # add dur
128
+ # dur_inp = (encoder_out + var_embed + spk_embed_dur) * src_nonpadding
129
+
130
+ # mel2ph = self.add_dur(dur_inp, mel2ph, hubert, ret)
131
+ ret['mel2ph'] = mel2ph
132
+
133
+ decoder_inp = F.pad(encoder_out, [0, 0, 1, 0])
134
+
135
+ mel2ph_ = mel2ph[..., None].repeat([1, 1, encoder_out.shape[-1]])
136
+ decoder_inp_origin = decoder_inp = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H]
137
+
138
+ tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
139
+
140
+ # add pitch and energy embed
141
+ pitch_inp = (decoder_inp_origin + var_embed + spk_embed_f0) * tgt_nonpadding
142
+ if hparams['use_pitch_embed']:
143
+ pitch_inp_ph = (encoder_out + var_embed + spk_embed_f0) * src_nonpadding
144
+ decoder_inp = decoder_inp + self.add_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out=pitch_inp_ph)
145
+ if hparams['use_energy_embed']:
146
+ decoder_inp = decoder_inp + self.add_energy(pitch_inp, energy, ret)
147
+
148
+ ret['decoder_inp'] = decoder_inp = (decoder_inp + spk_embed) * tgt_nonpadding
149
+ if not hparams['no_fs2'] if 'no_fs2' in hparams.keys() else True:
150
+ if skip_decoder:
151
+ return ret
152
+ ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
153
+
154
+ return ret
155
+
156
+ def add_dur(self, dur_input, mel2ph, hubert, ret):
157
+ src_padding = (hubert==0).all(-1)
158
+ dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach())
159
+ if mel2ph is None:
160
+ dur, xs = self.dur_predictor.inference(dur_input, src_padding)
161
+ ret['dur'] = xs
162
+ ret['dur_choice'] = dur
163
+ mel2ph = self.length_regulator(dur, src_padding).detach()
164
+ else:
165
+ ret['dur'] = self.dur_predictor(dur_input, src_padding)
166
+ ret['mel2ph'] = mel2ph
167
+ return mel2ph
168
+
169
+ def run_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs):
170
+ x = decoder_inp # [B, T, H]
171
+ x = self.decoder(x)
172
+ x = self.mel_out(x)
173
+ return x * tgt_nonpadding
174
+
175
+ def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
176
+ f0 = cwt2f0(cwt_spec, mean, std, hparams['cwt_scales'])
177
+ f0 = torch.cat(
178
+ [f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1)
179
+ f0_norm = norm_f0(f0, None, hparams)
180
+ return f0_norm
181
+
182
+ def out2mel(self, out):
183
+ return out
184
+
185
+ def add_pitch(self,decoder_inp, f0, uv, mel2ph, ret, encoder_out=None):
186
+ # if hparams['pitch_type'] == 'ph':
187
+ # pitch_pred_inp = encoder_out.detach() + hparams['predictor_grad'] * (encoder_out - encoder_out.detach())
188
+ # pitch_padding = (encoder_out.sum().abs() == 0)
189
+ # ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp)
190
+ # if f0 is None:
191
+ # f0 = pitch_pred[:, :, 0]
192
+ # ret['f0_denorm'] = f0_denorm = denorm_f0(f0, None, hparams, pitch_padding=pitch_padding)
193
+ # pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt]
194
+ # pitch = F.pad(pitch, [1, 0])
195
+ # pitch = torch.gather(pitch, 1, mel2ph) # [B, T_mel]
196
+ # pitch_embedding = pitch_embed(pitch)
197
+ # return pitch_embedding
198
+
199
+ decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
200
+
201
+ pitch_padding = (mel2ph == 0)
202
+
203
+ # if hparams['pitch_type'] == 'cwt':
204
+ # # NOTE: this part of script is *isolated* from other scripts, which means
205
+ # # it may not be compatible with the current version.
206
+ # pass
207
+ # # pitch_padding = None
208
+ # # ret['cwt'] = cwt_out = self.cwt_predictor(decoder_inp)
209
+ # # stats_out = self.cwt_stats_layers(encoder_out[:, 0, :]) # [B, 2]
210
+ # # mean = ret['f0_mean'] = stats_out[:, 0]
211
+ # # std = ret['f0_std'] = stats_out[:, 1]
212
+ # # cwt_spec = cwt_out[:, :, :10]
213
+ # # if f0 is None:
214
+ # # std = std * hparams['cwt_std_scale']
215
+ # # f0 = self.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
216
+ # # if hparams['use_uv']:
217
+ # # assert cwt_out.shape[-1] == 11
218
+ # # uv = cwt_out[:, :, -1] > 0
219
+ # elif hparams['pitch_ar']:
220
+ # ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp, f0 if is_training else None)
221
+ # if f0 is None:
222
+ # f0 = pitch_pred[:, :, 0]
223
+ # else:
224
+ #ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp)
225
+ # if f0 is None:
226
+ # f0 = pitch_pred[:, :, 0]
227
+ # if hparams['use_uv'] and uv is None:
228
+ # uv = pitch_pred[:, :, 1] > 0
229
+ ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv, hparams, pitch_padding=pitch_padding)
230
+ if pitch_padding is not None:
231
+ f0[pitch_padding] = 0
232
+
233
+ pitch = f0_to_coarse(f0_denorm,hparams) # start from 0
234
+ ret['pitch_pred']=pitch.unsqueeze(-1)
235
+ # print(ret['pitch_pred'].shape)
236
+ # print(pitch.shape)
237
+ pitch_embedding = self.pitch_embed(pitch)
238
+ return pitch_embedding
239
+
240
+ def add_energy(self,decoder_inp, energy, ret):
241
+ decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
242
+ ret['energy_pred'] = energy#energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
243
+ # if energy is None:
244
+ # energy = energy_pred
245
+ energy = torch.clamp(energy * 256 // 4, max=255).long() # energy_to_coarse
246
+ energy_embedding = self.energy_embed(energy)
247
+ return energy_embedding
248
+
249
+ @staticmethod
250
+ def mel_norm(x):
251
+ return (x + 5.5) / (6.3 / 2) - 1
252
+
253
+ @staticmethod
254
+ def mel_denorm(x):
255
+ return (x + 1) * (6.3 / 2) - 5.5
modules/fastspeech/pe.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.commons.common_layers import *
2
+ from utils.hparams import hparams
3
+ from modules.fastspeech.tts_modules import PitchPredictor
4
+ from utils.pitch_utils import denorm_f0
5
+
6
+
7
+ class Prenet(nn.Module):
8
+ def __init__(self, in_dim=80, out_dim=256, kernel=5, n_layers=3, strides=None):
9
+ super(Prenet, self).__init__()
10
+ padding = kernel // 2
11
+ self.layers = []
12
+ self.strides = strides if strides is not None else [1] * n_layers
13
+ for l in range(n_layers):
14
+ self.layers.append(nn.Sequential(
15
+ nn.Conv1d(in_dim, out_dim, kernel_size=kernel, padding=padding, stride=self.strides[l]),
16
+ nn.ReLU(),
17
+ nn.BatchNorm1d(out_dim)
18
+ ))
19
+ in_dim = out_dim
20
+ self.layers = nn.ModuleList(self.layers)
21
+ self.out_proj = nn.Linear(out_dim, out_dim)
22
+
23
+ def forward(self, x):
24
+ """
25
+
26
+ :param x: [B, T, 80]
27
+ :return: [L, B, T, H], [B, T, H]
28
+ """
29
+ # padding_mask = x.abs().sum(-1).eq(0).data # [B, T]
30
+ padding_mask = x.abs().sum(-1).eq(0).detach()
31
+ nonpadding_mask_TB = 1 - padding_mask.float()[:, None, :] # [B, 1, T]
32
+ x = x.transpose(1, 2)
33
+ hiddens = []
34
+ for i, l in enumerate(self.layers):
35
+ nonpadding_mask_TB = nonpadding_mask_TB[:, :, ::self.strides[i]]
36
+ x = l(x) * nonpadding_mask_TB
37
+ hiddens.append(x)
38
+ hiddens = torch.stack(hiddens, 0) # [L, B, H, T]
39
+ hiddens = hiddens.transpose(2, 3) # [L, B, T, H]
40
+ x = self.out_proj(x.transpose(1, 2)) # [B, T, H]
41
+ x = x * nonpadding_mask_TB.transpose(1, 2)
42
+ return hiddens, x
43
+
44
+
45
+ class ConvBlock(nn.Module):
46
+ def __init__(self, idim=80, n_chans=256, kernel_size=3, stride=1, norm='gn', dropout=0):
47
+ super().__init__()
48
+ self.conv = ConvNorm(idim, n_chans, kernel_size, stride=stride)
49
+ self.norm = norm
50
+ if self.norm == 'bn':
51
+ self.norm = nn.BatchNorm1d(n_chans)
52
+ elif self.norm == 'in':
53
+ self.norm = nn.InstanceNorm1d(n_chans, affine=True)
54
+ elif self.norm == 'gn':
55
+ self.norm = nn.GroupNorm(n_chans // 16, n_chans)
56
+ elif self.norm == 'ln':
57
+ self.norm = LayerNorm(n_chans // 16, n_chans)
58
+ elif self.norm == 'wn':
59
+ self.conv = torch.nn.utils.weight_norm(self.conv.conv)
60
+ self.dropout = nn.Dropout(dropout)
61
+ self.relu = nn.ReLU()
62
+
63
+ def forward(self, x):
64
+ """
65
+
66
+ :param x: [B, C, T]
67
+ :return: [B, C, T]
68
+ """
69
+ x = self.conv(x)
70
+ if not isinstance(self.norm, str):
71
+ if self.norm == 'none':
72
+ pass
73
+ elif self.norm == 'ln':
74
+ x = self.norm(x.transpose(1, 2)).transpose(1, 2)
75
+ else:
76
+ x = self.norm(x)
77
+ x = self.relu(x)
78
+ x = self.dropout(x)
79
+ return x
80
+
81
+
82
+ class ConvStacks(nn.Module):
83
+ def __init__(self, idim=80, n_layers=5, n_chans=256, odim=32, kernel_size=5, norm='gn',
84
+ dropout=0, strides=None, res=True):
85
+ super().__init__()
86
+ self.conv = torch.nn.ModuleList()
87
+ self.kernel_size = kernel_size
88
+ self.res = res
89
+ self.in_proj = Linear(idim, n_chans)
90
+ if strides is None:
91
+ strides = [1] * n_layers
92
+ else:
93
+ assert len(strides) == n_layers
94
+ for idx in range(n_layers):
95
+ self.conv.append(ConvBlock(
96
+ n_chans, n_chans, kernel_size, stride=strides[idx], norm=norm, dropout=dropout))
97
+ self.out_proj = Linear(n_chans, odim)
98
+
99
+ def forward(self, x, return_hiddens=False):
100
+ """
101
+
102
+ :param x: [B, T, H]
103
+ :return: [B, T, H]
104
+ """
105
+ x = self.in_proj(x)
106
+ x = x.transpose(1, -1) # (B, idim, Tmax)
107
+ hiddens = []
108
+ for f in self.conv:
109
+ x_ = f(x)
110
+ x = x + x_ if self.res else x_ # (B, C, Tmax)
111
+ hiddens.append(x)
112
+ x = x.transpose(1, -1)
113
+ x = self.out_proj(x) # (B, Tmax, H)
114
+ if return_hiddens:
115
+ hiddens = torch.stack(hiddens, 1) # [B, L, C, T]
116
+ return x, hiddens
117
+ return x
118
+
119
+
120
+ class PitchExtractor(nn.Module):
121
+ def __init__(self, n_mel_bins=80, conv_layers=2):
122
+ super().__init__()
123
+ self.hidden_size = hparams['hidden_size']
124
+ self.predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
125
+ self.conv_layers = conv_layers
126
+
127
+ self.mel_prenet = Prenet(n_mel_bins, self.hidden_size, strides=[1, 1, 1])
128
+ if self.conv_layers > 0:
129
+ self.mel_encoder = ConvStacks(
130
+ idim=self.hidden_size, n_chans=self.hidden_size, odim=self.hidden_size, n_layers=self.conv_layers)
131
+ self.pitch_predictor = PitchPredictor(
132
+ self.hidden_size, n_chans=self.predictor_hidden,
133
+ n_layers=5, dropout_rate=0.1, odim=2,
134
+ padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
135
+
136
+ def forward(self, mel_input=None):
137
+ ret = {}
138
+ mel_hidden = self.mel_prenet(mel_input)[1]
139
+ if self.conv_layers > 0:
140
+ mel_hidden = self.mel_encoder(mel_hidden)
141
+
142
+ ret['pitch_pred'] = pitch_pred = self.pitch_predictor(mel_hidden)
143
+
144
+ pitch_padding = mel_input.abs().sum(-1) == 0
145
+ use_uv = hparams['pitch_type'] == 'frame' #and hparams['use_uv']
146
+ ret['f0_denorm_pred'] = denorm_f0(
147
+ pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None,
148
+ hparams, pitch_padding=pitch_padding)
149
+ return ret
modules/fastspeech/tts_modules.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.nn import functional as F
7
+
8
+ from modules.commons.espnet_positional_embedding import RelPositionalEncoding
9
+ from modules.commons.common_layers import SinusoidalPositionalEmbedding, Linear, EncSALayer, DecSALayer, BatchNorm1dTBC
10
+ from utils.hparams import hparams
11
+
12
+ DEFAULT_MAX_SOURCE_POSITIONS = 2000
13
+ DEFAULT_MAX_TARGET_POSITIONS = 2000
14
+
15
+
16
+ class TransformerEncoderLayer(nn.Module):
17
+ def __init__(self, hidden_size, dropout, kernel_size=None, num_heads=2, norm='ln'):
18
+ super().__init__()
19
+ self.hidden_size = hidden_size
20
+ self.dropout = dropout
21
+ self.num_heads = num_heads
22
+ self.op = EncSALayer(
23
+ hidden_size, num_heads, dropout=dropout,
24
+ attention_dropout=0.0, relu_dropout=dropout,
25
+ kernel_size=kernel_size
26
+ if kernel_size is not None else hparams['enc_ffn_kernel_size'],
27
+ padding=hparams['ffn_padding'],
28
+ norm=norm, act=hparams['ffn_act'])
29
+
30
+ def forward(self, x, **kwargs):
31
+ return self.op(x, **kwargs)
32
+
33
+
34
+ ######################
35
+ # fastspeech modules
36
+ ######################
37
+ class LayerNorm(torch.nn.LayerNorm):
38
+ """Layer normalization module.
39
+ :param int nout: output dim size
40
+ :param int dim: dimension to be normalized
41
+ """
42
+
43
+ def __init__(self, nout, dim=-1):
44
+ """Construct an LayerNorm object."""
45
+ super(LayerNorm, self).__init__(nout, eps=1e-12)
46
+ self.dim = dim
47
+
48
+ def forward(self, x):
49
+ """Apply layer normalization.
50
+ :param torch.Tensor x: input tensor
51
+ :return: layer normalized tensor
52
+ :rtype torch.Tensor
53
+ """
54
+ if self.dim == -1:
55
+ return super(LayerNorm, self).forward(x)
56
+ return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
57
+
58
+
59
+ class DurationPredictor(torch.nn.Module):
60
+ """Duration predictor module.
61
+ This is a module of duration predictor described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
62
+ The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder.
63
+ .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
64
+ https://arxiv.org/pdf/1905.09263.pdf
65
+ Note:
66
+ The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`,
67
+ the outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
68
+ """
69
+
70
+ def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0, padding='SAME'):
71
+ """Initilize duration predictor module.
72
+ Args:
73
+ idim (int): Input dimension.
74
+ n_layers (int, optional): Number of convolutional layers.
75
+ n_chans (int, optional): Number of channels of convolutional layers.
76
+ kernel_size (int, optional): Kernel size of convolutional layers.
77
+ dropout_rate (float, optional): Dropout rate.
78
+ offset (float, optional): Offset value to avoid nan in log domain.
79
+ """
80
+ super(DurationPredictor, self).__init__()
81
+ self.offset = offset
82
+ self.conv = torch.nn.ModuleList()
83
+ self.kernel_size = kernel_size
84
+ self.padding = padding
85
+ for idx in range(n_layers):
86
+ in_chans = idim if idx == 0 else n_chans
87
+ self.conv += [torch.nn.Sequential(
88
+ torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
89
+ if padding == 'SAME'
90
+ else (kernel_size - 1, 0), 0),
91
+ torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
92
+ torch.nn.ReLU(),
93
+ LayerNorm(n_chans, dim=1),
94
+ torch.nn.Dropout(dropout_rate)
95
+ )]
96
+ if hparams['dur_loss'] in ['mse', 'huber']:
97
+ odims = 1
98
+ elif hparams['dur_loss'] == 'mog':
99
+ odims = 15
100
+ elif hparams['dur_loss'] == 'crf':
101
+ odims = 32
102
+ from torchcrf import CRF
103
+ self.crf = CRF(odims, batch_first=True)
104
+ self.linear = torch.nn.Linear(n_chans, odims)
105
+
106
+ def _forward(self, xs, x_masks=None, is_inference=False):
107
+ xs = xs.transpose(1, -1) # (B, idim, Tmax)
108
+ for f in self.conv:
109
+ xs = f(xs) # (B, C, Tmax)
110
+ if x_masks is not None:
111
+ xs = xs * (1 - x_masks.float())[:, None, :]
112
+
113
+ xs = self.linear(xs.transpose(1, -1)) # [B, T, C]
114
+ xs = xs * (1 - x_masks.float())[:, :, None] # (B, T, C)
115
+ if is_inference:
116
+ return self.out2dur(xs), xs
117
+ else:
118
+ if hparams['dur_loss'] in ['mse']:
119
+ xs = xs.squeeze(-1) # (B, Tmax)
120
+ return xs
121
+
122
+ def out2dur(self, xs):
123
+ if hparams['dur_loss'] in ['mse']:
124
+ # NOTE: calculate in log domain
125
+ xs = xs.squeeze(-1) # (B, Tmax)
126
+ dur = torch.clamp(torch.round(xs.exp() - self.offset), min=0).long() # avoid negative value
127
+ elif hparams['dur_loss'] == 'mog':
128
+ return NotImplementedError
129
+ elif hparams['dur_loss'] == 'crf':
130
+ dur = torch.LongTensor(self.crf.decode(xs)).cuda()
131
+ return dur
132
+
133
+ def forward(self, xs, x_masks=None):
134
+ """Calculate forward propagation.
135
+ Args:
136
+ xs (Tensor): Batch of input sequences (B, Tmax, idim).
137
+ x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
138
+ Returns:
139
+ Tensor: Batch of predicted durations in log domain (B, Tmax).
140
+ """
141
+ return self._forward(xs, x_masks, False)
142
+
143
+ def inference(self, xs, x_masks=None):
144
+ """Inference duration.
145
+ Args:
146
+ xs (Tensor): Batch of input sequences (B, Tmax, idim).
147
+ x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
148
+ Returns:
149
+ LongTensor: Batch of predicted durations in linear domain (B, Tmax).
150
+ """
151
+ return self._forward(xs, x_masks, True)
152
+
153
+
154
+ class LengthRegulator(torch.nn.Module):
155
+ def __init__(self, pad_value=0.0):
156
+ super(LengthRegulator, self).__init__()
157
+ self.pad_value = pad_value
158
+
159
+ def forward(self, dur, dur_padding=None, alpha=1.0):
160
+ """
161
+ Example (no batch dim version):
162
+ 1. dur = [2,2,3]
163
+ 2. token_idx = [[1],[2],[3]], dur_cumsum = [2,4,7], dur_cumsum_prev = [0,2,4]
164
+ 3. token_mask = [[1,1,0,0,0,0,0],
165
+ [0,0,1,1,0,0,0],
166
+ [0,0,0,0,1,1,1]]
167
+ 4. token_idx * token_mask = [[1,1,0,0,0,0,0],
168
+ [0,0,2,2,0,0,0],
169
+ [0,0,0,0,3,3,3]]
170
+ 5. (token_idx * token_mask).sum(0) = [1,1,2,2,3,3,3]
171
+
172
+ :param dur: Batch of durations of each frame (B, T_txt)
173
+ :param dur_padding: Batch of padding of each frame (B, T_txt)
174
+ :param alpha: duration rescale coefficient
175
+ :return:
176
+ mel2ph (B, T_speech)
177
+ """
178
+ assert alpha > 0
179
+ dur = torch.round(dur.float() * alpha).long()
180
+ if dur_padding is not None:
181
+ dur = dur * (1 - dur_padding.long())
182
+ token_idx = torch.arange(1, dur.shape[1] + 1)[None, :, None].to(dur.device)
183
+ dur_cumsum = torch.cumsum(dur, 1)
184
+ dur_cumsum_prev = F.pad(dur_cumsum, [1, -1], mode='constant', value=0)
185
+
186
+ pos_idx = torch.arange(dur.sum(-1).max())[None, None].to(dur.device)
187
+ token_mask = (pos_idx >= dur_cumsum_prev[:, :, None]) & (pos_idx < dur_cumsum[:, :, None])
188
+ mel2ph = (token_idx * token_mask.long()).sum(1)
189
+ return mel2ph
190
+
191
+
192
+ class PitchPredictor(torch.nn.Module):
193
+ def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5,
194
+ dropout_rate=0.1, padding='SAME'):
195
+ """Initilize pitch predictor module.
196
+ Args:
197
+ idim (int): Input dimension.
198
+ n_layers (int, optional): Number of convolutional layers.
199
+ n_chans (int, optional): Number of channels of convolutional layers.
200
+ kernel_size (int, optional): Kernel size of convolutional layers.
201
+ dropout_rate (float, optional): Dropout rate.
202
+ """
203
+ super(PitchPredictor, self).__init__()
204
+ self.conv = torch.nn.ModuleList()
205
+ self.kernel_size = kernel_size
206
+ self.padding = padding
207
+ for idx in range(n_layers):
208
+ in_chans = idim if idx == 0 else n_chans
209
+ self.conv += [torch.nn.Sequential(
210
+ torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
211
+ if padding == 'SAME'
212
+ else (kernel_size - 1, 0), 0),
213
+ torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
214
+ torch.nn.ReLU(),
215
+ LayerNorm(n_chans, dim=1),
216
+ torch.nn.Dropout(dropout_rate)
217
+ )]
218
+ self.linear = torch.nn.Linear(n_chans, odim)
219
+ self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)
220
+ self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
221
+
222
+ def forward(self, xs):
223
+ """
224
+
225
+ :param xs: [B, T, H]
226
+ :return: [B, T, H]
227
+ """
228
+ positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])
229
+ xs = xs + positions
230
+ xs = xs.transpose(1, -1) # (B, idim, Tmax)
231
+ for f in self.conv:
232
+ xs = f(xs) # (B, C, Tmax)
233
+ # NOTE: calculate in log domain
234
+ xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)
235
+ return xs
236
+
237
+
238
+ class EnergyPredictor(PitchPredictor):
239
+ pass
240
+
241
+
242
+ def mel2ph_to_dur(mel2ph, T_txt, max_dur=None):
243
+ B, _ = mel2ph.shape
244
+ dur = mel2ph.new_zeros(B, T_txt + 1).scatter_add(1, mel2ph, torch.ones_like(mel2ph))
245
+ dur = dur[:, 1:]
246
+ if max_dur is not None:
247
+ dur = dur.clamp(max=max_dur)
248
+ return dur
249
+
250
+
251
+ class FFTBlocks(nn.Module):
252
+ def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=None, num_heads=2,
253
+ use_pos_embed=True, use_last_norm=True, norm='ln', use_pos_embed_alpha=True):
254
+ super().__init__()
255
+ self.num_layers = num_layers
256
+ embed_dim = self.hidden_size = hidden_size
257
+ self.dropout = dropout if dropout is not None else hparams['dropout']
258
+ self.use_pos_embed = use_pos_embed
259
+ self.use_last_norm = use_last_norm
260
+ if use_pos_embed:
261
+ self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
262
+ self.padding_idx = 0
263
+ self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1
264
+ self.embed_positions = SinusoidalPositionalEmbedding(
265
+ embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
266
+ )
267
+
268
+ self.layers = nn.ModuleList([])
269
+ self.layers.extend([
270
+ TransformerEncoderLayer(self.hidden_size, self.dropout,
271
+ kernel_size=ffn_kernel_size, num_heads=num_heads)
272
+ for _ in range(self.num_layers)
273
+ ])
274
+ if self.use_last_norm:
275
+ if norm == 'ln':
276
+ self.layer_norm = nn.LayerNorm(embed_dim)
277
+ elif norm == 'bn':
278
+ self.layer_norm = BatchNorm1dTBC(embed_dim)
279
+ else:
280
+ self.layer_norm = None
281
+
282
+ def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False):
283
+ """
284
+ :param x: [B, T, C]
285
+ :param padding_mask: [B, T]
286
+ :return: [B, T, C] or [L, B, T, C]
287
+ """
288
+ # padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
289
+ padding_mask = x.abs().sum(-1).eq(0).detach() if padding_mask is None else padding_mask
290
+ nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1]
291
+ if self.use_pos_embed:
292
+ positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
293
+ x = x + positions
294
+ x = F.dropout(x, p=self.dropout, training=self.training)
295
+ # B x T x C -> T x B x C
296
+ x = x.transpose(0, 1) * nonpadding_mask_TB
297
+ hiddens = []
298
+ for layer in self.layers:
299
+ x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB
300
+ hiddens.append(x)
301
+ if self.use_last_norm:
302
+ x = self.layer_norm(x) * nonpadding_mask_TB
303
+ if return_hiddens:
304
+ x = torch.stack(hiddens, 0) # [L, T, B, C]
305
+ x = x.transpose(1, 2) # [L, B, T, C]
306
+ else:
307
+ x = x.transpose(0, 1) # [B, T, C]
308
+ return x
309
+
310
+
311
+ class FastspeechEncoder(FFTBlocks):
312
+ '''
313
+ compared to FFTBlocks:
314
+ - input is [B, T, H], not [B, T, C]
315
+ - supports "relative" positional encoding
316
+ '''
317
+ def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=2):
318
+ hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size
319
+ kernel_size = hparams['enc_ffn_kernel_size'] if kernel_size is None else kernel_size
320
+ num_layers = hparams['dec_layers'] if num_layers is None else num_layers
321
+ super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads,
322
+ use_pos_embed=False) # use_pos_embed_alpha for compatibility
323
+ #self.embed_tokens = embed_tokens
324
+ self.embed_scale = math.sqrt(hidden_size)
325
+ self.padding_idx = 0
326
+ if hparams.get('rel_pos') is not None and hparams['rel_pos']:
327
+ self.embed_positions = RelPositionalEncoding(hidden_size, dropout_rate=0.0)
328
+ else:
329
+ self.embed_positions = SinusoidalPositionalEmbedding(
330
+ hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
331
+ )
332
+
333
+ def forward(self, hubert):
334
+ """
335
+
336
+ :param hubert: [B, T, H ]
337
+ :return: {
338
+ 'encoder_out': [T x B x C]
339
+ }
340
+ """
341
+ # encoder_padding_mask = txt_tokens.eq(self.padding_idx).data
342
+ encoder_padding_mask = (hubert==0).all(-1)
343
+ x = self.forward_embedding(hubert) # [B, T, H]
344
+ x = super(FastspeechEncoder, self).forward(x, encoder_padding_mask)
345
+ return x
346
+
347
+ def forward_embedding(self, hubert):
348
+ # embed tokens and positions
349
+ x = self.embed_scale * hubert
350
+ if hparams['use_pos_embed']:
351
+ positions = self.embed_positions(hubert)
352
+ x = x + positions
353
+ x = F.dropout(x, p=self.dropout, training=self.training)
354
+ return x
355
+
356
+
357
+ class FastspeechDecoder(FFTBlocks):
358
+ def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=None):
359
+ num_heads = hparams['num_heads'] if num_heads is None else num_heads
360
+ hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size
361
+ kernel_size = hparams['dec_ffn_kernel_size'] if kernel_size is None else kernel_size
362
+ num_layers = hparams['dec_layers'] if num_layers is None else num_layers
363
+ super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads)
364
+