diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..d8d31d66fd02e4127f492474571249c6f6715961 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,35 +1,37 @@ -*.7z filter=lfs diff=lfs merge=lfs -text -*.arrow filter=lfs diff=lfs merge=lfs -text -*.bin filter=lfs diff=lfs merge=lfs -text -*.bz2 filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text -*.ftz filter=lfs diff=lfs merge=lfs -text -*.gz filter=lfs diff=lfs merge=lfs -text -*.h5 filter=lfs diff=lfs merge=lfs -text -*.joblib filter=lfs diff=lfs merge=lfs -text -*.lfs.* filter=lfs diff=lfs merge=lfs -text -*.mlmodel filter=lfs diff=lfs merge=lfs -text -*.model filter=lfs diff=lfs merge=lfs -text -*.msgpack filter=lfs diff=lfs merge=lfs -text -*.npy filter=lfs diff=lfs merge=lfs -text -*.npz filter=lfs diff=lfs merge=lfs -text -*.onnx filter=lfs diff=lfs merge=lfs -text -*.ot filter=lfs diff=lfs merge=lfs -text -*.parquet filter=lfs diff=lfs merge=lfs -text -*.pb filter=lfs diff=lfs merge=lfs -text -*.pickle filter=lfs diff=lfs merge=lfs -text -*.pkl filter=lfs diff=lfs merge=lfs -text -*.pt filter=lfs diff=lfs merge=lfs -text -*.pth filter=lfs diff=lfs merge=lfs -text -*.rar filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text -saved_model/**/* filter=lfs diff=lfs merge=lfs -text -*.tar.* filter=lfs diff=lfs merge=lfs -text -*.tar filter=lfs diff=lfs merge=lfs -text -*.tflite filter=lfs diff=lfs merge=lfs -text -*.tgz filter=lfs diff=lfs merge=lfs -text -*.wasm filter=lfs diff=lfs merge=lfs -text -*.xz filter=lfs diff=lfs merge=lfs -text -*.zip filter=lfs diff=lfs merge=lfs -text -*.zst filter=lfs diff=lfs merge=lfs -text -*tfevents* filter=lfs diff=lfs merge=lfs -text +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text + +*.flac filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ee374ad74bf50f27fbbaec5823ab4998c827ad8a --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +poetry.lock +work/* +__pycache__ \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..13f6376abe4edf18975a5fb8ec7eac48deeb2820 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Project Beatrice + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8f32518b153625b9e3a9e79f40237c9cddc01d96 --- /dev/null +++ b/README.md @@ -0,0 +1,213 @@ +--- +license: mit +pipeline_tag: audio-to-audio +tags: +- audio +- speech +- voice-conversion +datasets: +- reazon-research/reazonspeech +- dns-challenge +- libritts-r +--- + +# Beatrice Trainer + +超低遅延・低負荷・低容量を特徴とする完全無料の声質変換 VST 「[Beatrice 2](https://prj-beatrice.com)」のモデル学習用ツールキットです。 + +Beatrice 2 は、以下を目標に開発されています。 + +* 自分の変換された声を聴きながら、歌を快適に歌えるようにする +* 入力された声の抑揚を変換音声に正確に反映し、繊細な表現を可能にする +* 変換音声の高い自然性と明瞭さ +* 多様な変換先話者 +* 公式 VST での変換時、外部の録音機器を使った実測で 50ms 程度の遅延 +* 開発者のノート PC (Intel Core i7-1165G7) でシングルスレッドで動作させ、RTF < 0.25 となる程度の負荷 +* 最小構成で 30MB 以下の容量 +* VST と [VC Client](https://github.com/w-okada/voice-changer) での動作 +* その他 (内緒) + +## Prerequisites + +Beatrice は、既存の学習済みモデルを用いて声質の変換を行うだけであれば GPU を必要としません。 +しかし、新たなモデルの作成を効率良く行うためには GPU が必要です。 + +学習スクリプトを実行すると、デフォルト設定では 9GB 程度の VRAM を消費します。 +GeForce RTX 4090 を使用した場合、 1 時間程度で学習が完了します。 + +GPU を手元に用意できない場合でも、以下のリポジトリを使用して Google Colab 上で学習を行うことができます。 + +* [w-okada/beatrice-trainer-colab](https://github.com/w-okada/beatrice-trainer-colab) + +## Getting Started + +### 1. Download This Repo + +Git などを使用して、このリポジトリをダウンロードしてください。 + +```sh +git lfs install +git clone https://huggingface.co/fierce-cats/beatrice-trainer +cd beatrice-trainer +``` + +### 2. Environment Setup + +Poetry などを使用して、依存ライブラリをインストールしてください。 + +```sh +poetry install +poetry shell +# Alternatively, you can use pip to install dependencies directly: +# pip3 install -e . +``` + +正しくインストールできていれば、 `python3 beatrice_trainer -h` で以下のようなヘルプが表示されます。 + +``` +usage: beatrice_trainer [-h] [-d DATA_DIR] [-o OUT_DIR] [-r] [-c CONFIG] + +options: + -h, --help show this help message and exit + -d DATA_DIR, --data_dir DATA_DIR + directory containing the training data + -o OUT_DIR, --out_dir OUT_DIR + output directory + -r, --resume resume training + -c CONFIG, --config CONFIG + path to the config file +``` + +### 3. Prepare Your Training Data + +下図のように学習データを配置してください。 + +``` +your_training_data_dir ++---alice +| +---alices_wonderful_speech.wav +| +---alices_excellent_speech.flac // FLAC, MP3, and some other formats are also okay. +| `---... ++---bob +| +---bobs_fantastic_speech.wav +| +---bobs_speeches +| | `---bobs_awesome_speech.wav // Audio files in nested directory will also be used. +| `---... +`---... +``` + +学習データ用ディレクトリの直下に各話者のディレクトリを作る必要があります。 +各話者のディレクトリの中の構造や音声ファイルの名前は自由です。 + +学習を行うデータが 1 話者のみの場合も、話者のディレクトリを作る必要があることに注意してください。 + +``` +your_training_data_dir_with_only_one_speaker ++---charlies_brilliant_speech.wav // Wrong. +`---... +``` + +``` +your_training_data_dir_with_only_one_speaker +`---charlie + +---charlies_brilliant_speech.wav // Correct! + `---... +``` + +### 4. Train Your Model + +学習データを配置したディレクトリと出力ディレクトリを指定して学習を開始します。 + +```sh +python3 beatrice_trainer -d -o +``` + +学習の状況は、 TensorBoard で確認できます。 + +```sh +tensorboard --logdir +``` + +### 5. After Training + +学習が正常に完了すると、出力ディレクトリ内に `paraphernalia_(data_dir_name)_(step)` という名前のディレクトリが生成されています。 +このディレクトリを[公式 VST](https://prj-beatrice.com) や [VC Client](https://github.com/w-okada/voice-changer) で読み込むことで、ストリーム (リアルタイム) 変換を行うことができます。 + +## Detailed Usage + +### Training + +使用するハイパーパラメータや事前学習済みモデルをデフォルトと異なるものにする場合は、デフォルト値の書かれたコンフィグファイルである `assets/default_config.json` を別の場所にコピーして値を編集し、 `-c` でファイルを指定します。 +`assets/default_config.json` を直接編集すると壊れるので注意してください。 + +また、コンフィグファイルに `data_dir` キーと `out_dir` キーを追加し、学習データを配置したディレクトリと出力ディレクトリを絶対パスまたはリポジトリルートからの相対パスで記載することで、コマンドライン引数での指定を省略できます。 + +```sh +python3 beatrice_trainer -c +``` + +何らかの理由で学習が中断された場合、出力ディレクトリに `checkpoint_latest.pt` が生成されていれば、その学習を行っていたコマンドに `-r` オプションを追加して実行することで、最後に保存されたチェックポイントから学習を再開できます。 + +```sh +python3 beatrice_trainer -d -o -r +``` + +### Output Files + +学習スクリプトを実行すると、出力ディレクトリ内に以下のファイル・ディレクトリが生成されます。 + +* `paraphernalia_(data_dir_name)_(step)` + * ストリーム変換に必要なファイルを全て含むディレクトリです。 + * 学習途中のものも出力される場合があり、必要なステップ数のもの以外は削除して問題ありません。 + * このディレクトリ以外の出力物はストリーム変換に使用されないため、不要であれば削除して問題ありません。 +* `checkpoint_(data_dir_name)_(step)` + * 学習を途中から再開するためのチェックポイントです。 + * checkpoint_latest.pt にリネームし、 `-r` オプションを付けて学習スクリプトを実行すると、そのステップ数から学習を再開できます。 +* `checkpoint_latest.pt` + * 最も新しい checkpoint_(data_dir_name)_(step) のコピーです。 +* `config.json` + * 学習に使用されたコンフィグです。 +* `events.out.tfevents.*` + * TensorBoard で表示される情報を含むデータです。 + +### Customize Paraphernalia + +学習スクリプトによって生成された paraphernalia ディレクトリ内にある `beatrice_paraphernalia_*.toml` ファイルを編集することで、 VST や VC Client 上での表示を変更できます。 + +`model.version` は、生成されたモデルのフォーマットバージョンを表すため、変更しないでください。 + +各 `description` は、長すぎると全文が表示されない場合があります。 +現在表示できていても、将来的な VST や VC Client の仕様変更により表示できなくなる可能性があるため、余裕を持った文字数・行数に収めてください。 + +`portrait` に設定する画像は、 PNG 形式かつ正方形としてください。 + +## Distribution of Trained Models + +このリポジトリを用いて生成したモデルの配布を歓迎します。 + +配布されたモデルは、 Project Beatrice およびその関係者の管理する SNS アカウントやウェブサイト上でご紹介させていただく場合があります。 +その際、 `portrait` に設定された画像を掲載することがありますので、予めご承知おきください。 + +## Resource + +このリポジトリには、学習などに使用する各種データが含まれています。 +詳しくは [assets/README.md](https://huggingface.co/fierce-cats/beatrice-trainer/blob/main/assets/README.md) をご覧ください。 + +## Reference + +* [wav2vec 2.0](https://arxiv.org/abs/2006.11477) ([Official implementation](https://github.com/facebookresearch/fairseq), [MIT License](https://github.com/facebookresearch/fairseq/blob/main/LICENSE)) +* [EnCodec](https://arxiv.org/abs/2210.13438) ([Official implementation](https://github.com/facebookresearch/encodec), [MIT License](https://github.com/facebookresearch/encodec/blob/main/LICENSE)) +* [HiFi-GAN](https://arxiv.org/abs/2010.05646) ([Official implementation](https://github.com/jik876/hifi-gan), [MIT License](https://github.com/jik876/hifi-gan/blob/master/LICENSE)) +* [Vocos](https://arxiv.org/abs/2306.00814) ([Official implementation](https://github.com/gemelo-ai/vocos), [MIT License](https://github.com/gemelo-ai/vocos/blob/main/LICENSE)) +* [BigVSAN](https://arxiv.org/abs/2309.02836) ([Official implementation](https://github.com/sony/bigvsan), [MIT License](https://github.com/sony/bigvsan/blob/main/LICENSE)) +* [UnivNet](https://arxiv.org/abs/2106.07889) ([Unofficial implementation](https://github.com/maum-ai/univnet), [BSD 3-Clause License](https://github.com/maum-ai/univnet/blob/master/LICENSE)) +* [Soft-VC](https://arxiv.org/abs/2111.02392) +* [StreamVC](https://arxiv.org/abs/2401.03078) +* [EVA-GAN](https://arxiv.org/abs/2402.00892) +* [Subramani et al., 2024](https://arxiv.org/abs/2309.14507) +* [Agrawal et al., 2024](https://arxiv.org/abs/2401.10460) + +## License + +このリポジトリ内のソースコードおよび学習済みモデルは MIT License のもとで公開されています。 +詳しくは [LICENSE](https://huggingface.co/fierce-cats/beatrice-trainer/blob/main/LICENSE) をご覧ください。 diff --git a/assets/README.md b/assets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..01889231df7acf176c86629390ed32a763e0a8ed --- /dev/null +++ b/assets/README.md @@ -0,0 +1,48 @@ +# Assets + +## IR + +[Room Impulse Response Generator](https://github.com/audiolabs/rir-generator) によって生成されたインパルス応答データです。 + +## Noise + +[DNS-Chellenge](https://github.com/microsoft/DNS-Challenge) で提供されているノイズデータのサブセットをダウンサンプルしたものであり、以下を含みます。 + +* Audioset: https://research.google.com/audioset/index.html; License: https://creativecommons.org/licenses/by/4.0/ +* Freesound: https://freesound.org/ Only files with CC0 licenses were selected; License: https://creativecommons.org/publicdomain/zero/1.0/ +* Demand: https://zenodo.org/record/1227121#.XRKKxYhKiUk; License: https://creativecommons.org/licenses/by-sa/3.0/deed.en_CA + +## Pretrained + +Beatrice の事前学習済みモデルです。 +[ReazonSpeech](https://huggingface.co/datasets/reazon-research/reazonspeech), [DNS-Chellenge](https://github.com/microsoft/DNS-Challenge), [LibriTTS-R](https://www.openslr.org/141/) のデータを使用して学習されています。 + +## Test + +[Common Voice](https://commonvoice.mozilla.org) のサブセットをダウンサンプルしたものであり、オリジナルのデータは CC0 でライセンスされています。 +読み上げられている文は、[青空文庫](https://www.aozora.gr.jp)に掲載されている著作権保護期間の満了した作品の一部です。 + +* common_voice_ja_38833628 + * "「やっぱりお化けや幽霊じゃないんだ。ああして歩いているところをみると、人間にちがいない」" + * 江戸川乱歩 『少年探偵団』 https://www.aozora.gr.jp/cards/001779/files/56669_58756.html +* common_voice_ja_38843402 + * "「こりゃきっと仲間によくないことがあったにちがいない。」と小悪魔は考えました。" + * トルストイ 『イワンの馬鹿』 菊池寛訳 https://www.aozora.gr.jp/cards/000361/files/42941_15672.html +* common_voice_ja_38852485 + * "すると、ブランコ乗りは突然泣き始めた。すっかり驚いた興行主は飛び上がり、いったいどうしたのか、とたずねた。" + * フランツ・カフカ 『最初の苦悩』 原田義人訳 https://www.aozora.gr.jp/cards/001235/files/49861_41921.html +* common_voice_ja_38853932 + * "王もこのやり方は喜んでいません。それにもう一つ、これには困ることがあるのです。" + * ジョナサン・スイフト 『ガリバー旅行記』 原民喜訳 https://www.aozora.gr.jp/cards/000912/files/4673_9768.html +* common_voice_ja_38864552 + * "ヘンゼルは屋根が、とてもおいしかったので、大きなやつを、一枚、そっくりめくってもって来ました。" + * グリム兄弟 『ヘンゼルとグレーテル』 楠山正雄訳 https://www.aozora.gr.jp/cards/001091/files/42315_15931.html +* common_voice_ja_38878413 + * "私があまりあけすけに、陛下に申し上げたので、それが、皇帝のお気にさわったらしいのです。陛下は議会で、私の考えを、それとなく非難されました。" + * ジョナサン・スイフト 『ガリバー旅行記』 原民喜訳 https://www.aozora.gr.jp/cards/000912/files/4673_9768.html +* common_voice_ja_38898180 + * "「君となら話すかい?」と、Kはきいた。「わたしもだめよ」と、フリーダがいう。「あなたもだめよ、わたしもだめよ。まったくできないことなのよ」" + * フランツ・カフカ 『城』 原田義人訳 https://www.aozora.gr.jp/cards/001235/files/49862_45839.html +* common_voice_ja_38925334 + * "それで海の中へ落ちたことがはじめてわかりました。箱は私の身体や家具などの重みで、水の中に浸りながら浮いています。" + * ジョナサン・スイフト 『ガリバー旅行記』 原民喜訳 https://www.aozora.gr.jp/cards/000912/files/4673_9768.html diff --git a/assets/default_config.json b/assets/default_config.json new file mode 100644 index 0000000000000000000000000000000000000000..1046864fd3f71f0c0d4c46f72d6915d82d23c295 --- /dev/null +++ b/assets/default_config.json @@ -0,0 +1,30 @@ +{ + "learning_rate": 1e-4, + "min_learning_rate": 5e-6, + "adam_betas": [ + 0.8, + 0.99 + ], + "adam_eps": 1e-6, + "batch_size": 8, + "grad_weight_mel": 1.0, + "grad_weight_adv": 1.0, + "grad_weight_fm": 1.0, + "grad_balancer_ema_decay": 0.995, + "use_amp": true, + "num_workers": 16, + "n_steps": 20000, + "warmup_steps": 10000, + "in_sample_rate": 16000, + "out_sample_rate": 24000, + "wav_length": 96000, + "segment_length": 100, + "phone_extractor_file": "assets/pretrained/003b_checkpoint_03000000.pt", + "pitch_estimator_file": "assets/pretrained/008_1_checkpoint_00300000.pt", + "in_ir_wav_dir": "assets/ir", + "in_noise_wav_dir": "assets/noise", + "in_test_wav_dir": "assets/test", + "pretrained_file": "assets/pretrained/040c_checkpoint_libritts_r_200_02300000.pt", + "hidden_channels": 256, + "san": false +} \ No newline at end of file diff --git a/assets/images/noimage.png b/assets/images/noimage.png new file mode 100644 index 0000000000000000000000000000000000000000..c6fb8a4a82b77c25a2068ab9ffca2676c04b6144 Binary files /dev/null and b/assets/images/noimage.png differ diff --git a/assets/ir/0000.flac b/assets/ir/0000.flac new file mode 100644 index 0000000000000000000000000000000000000000..a2f9944966349b82b390ec39d8ff149867344506 --- /dev/null +++ b/assets/ir/0000.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c9dd1f837c88a3e1dc32f49f74dab1039d9f2366ed7ea55ab3abb28b01428f8 +size 3668 diff --git a/assets/ir/0001.flac b/assets/ir/0001.flac new file mode 100644 index 0000000000000000000000000000000000000000..fbe8186542651d4fcb0afe7afd5de676d6921375 --- /dev/null +++ b/assets/ir/0001.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e5d79560eb0b9b90b6470adfeb215f372fb76fa404ae17835bab40d6b545d86 +size 7843 diff --git a/assets/ir/0002.flac b/assets/ir/0002.flac new file mode 100644 index 0000000000000000000000000000000000000000..e52b26933a960d4c1a456fb83f5273d565bf2993 --- /dev/null +++ b/assets/ir/0002.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00f099a151076c1acff6489863bc477571541aa8c2e6e6bb284f55c297dd6f8c +size 4476 diff --git a/assets/ir/0003.flac b/assets/ir/0003.flac new file mode 100644 index 0000000000000000000000000000000000000000..ce95e8778455d1e4066f8a9edb009070690e3dda --- /dev/null +++ b/assets/ir/0003.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20439e06eefa457e98c09b1b529374c765e79786985ca1ddbcdabbcef37e9f2b +size 2711 diff --git a/assets/ir/0004.flac b/assets/ir/0004.flac new file mode 100644 index 0000000000000000000000000000000000000000..20e15fa1517f14014cfd9f11fc7a2d5aefc79bfc --- /dev/null +++ b/assets/ir/0004.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25b234ee64ced7f03dee7f10fe45582d868e9666d97f67bf1788699b425e00bc +size 3372 diff --git a/assets/ir/0005.flac b/assets/ir/0005.flac new file mode 100644 index 0000000000000000000000000000000000000000..185678fe30443da0a4b735248cf5cfa515b85b7b --- /dev/null +++ b/assets/ir/0005.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bfe35cbf402ccb263bae70f89da276401fc234723b4072c67b84a3e4028358f +size 4108 diff --git a/assets/ir/0006.flac b/assets/ir/0006.flac new file mode 100644 index 0000000000000000000000000000000000000000..28a235603e07e3c3805e59a5ae3be945f2363338 --- /dev/null +++ b/assets/ir/0006.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:183c522278a1b6952bb278efcb51dab69a24c5f7380c3cf096c4fb88491a3472 +size 7044 diff --git a/assets/ir/0007.flac b/assets/ir/0007.flac new file mode 100644 index 0000000000000000000000000000000000000000..20163bab9019432b5ec27e8f732dd982476ff01e --- /dev/null +++ b/assets/ir/0007.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1587ae55ed42ed9749f90698ac51ac248406fb6c92bd5e39394cd4c3874ab3ec +size 3069 diff --git a/assets/ir/0008.flac b/assets/ir/0008.flac new file mode 100644 index 0000000000000000000000000000000000000000..5200220403e055c2f2f9deef2aabf9b2c1c7e825 --- /dev/null +++ b/assets/ir/0008.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c283e5389aed32d287f48d580909ebe603478778343007ec996f6566206a36ba +size 3036 diff --git a/assets/ir/0009.flac b/assets/ir/0009.flac new file mode 100644 index 0000000000000000000000000000000000000000..393b96f55439c4033081dc15f110fb841841c4e3 --- /dev/null +++ b/assets/ir/0009.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e56d9fae89de7d9a1958bd0256433723c3d5bfec5568bddcbdd4cd70772c947 +size 4746 diff --git a/assets/ir/0010.flac b/assets/ir/0010.flac new file mode 100644 index 0000000000000000000000000000000000000000..e30f7065455bf1f15aba4a1e80e1b94423242af0 --- /dev/null +++ b/assets/ir/0010.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df69cebb1bc991337fd5d3531aae35df6cdc04c6e45b5463e1c0225aa1e0c340 +size 11285 diff --git a/assets/ir/0011.flac b/assets/ir/0011.flac new file mode 100644 index 0000000000000000000000000000000000000000..a81f72213c772a02da93e83211a3671e4d2b3997 --- /dev/null +++ b/assets/ir/0011.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ed46dba2fd4b22104dee643dfc1cc9bdf255875b790e4cb4437070381b82972 +size 2628 diff --git a/assets/ir/0012.flac b/assets/ir/0012.flac new file mode 100644 index 0000000000000000000000000000000000000000..78e80e65e32bb69ffe84e58861746c7a8fc6da9d --- /dev/null +++ b/assets/ir/0012.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d91a1786b5da5c16a25a0f1e68831c3127b2a667600a39815a7ca3f88f65648 +size 3392 diff --git a/assets/ir/0013.flac b/assets/ir/0013.flac new file mode 100644 index 0000000000000000000000000000000000000000..e7a123c1f80bdd9cc12efbf91bcb8fac72610786 --- /dev/null +++ b/assets/ir/0013.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:635637d26a0bc70925aeaaf54c52dccbaf1c3658c3e88376e22c42fe5ef4468f +size 3288 diff --git a/assets/ir/0014.flac b/assets/ir/0014.flac new file mode 100644 index 0000000000000000000000000000000000000000..a9f7588eb0a0f1df70a33e172f3bc1c23e08f0b2 --- /dev/null +++ b/assets/ir/0014.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af9fb26de76379895c36053ce2a1bdcdedd0a50da753202ffa198f30c05dbc98 +size 2770 diff --git a/assets/ir/0015.flac b/assets/ir/0015.flac new file mode 100644 index 0000000000000000000000000000000000000000..036fa05457d01f5e8fdd9d10d068a1ba9680830a --- /dev/null +++ b/assets/ir/0015.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e5a0afd0b9b32e2ebd851e94a60329dc7cdfa21e9d9a41cb264d9b4f24ff933 +size 7679 diff --git a/assets/ir/0016.flac b/assets/ir/0016.flac new file mode 100644 index 0000000000000000000000000000000000000000..0b71d217d6423ad6d1803affb6bd2dd53daccca0 --- /dev/null +++ b/assets/ir/0016.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23bda2cd5c7923bae3f1c99b805347e42273fccb4be7e6a673def499f471075a +size 4794 diff --git a/assets/ir/0017.flac b/assets/ir/0017.flac new file mode 100644 index 0000000000000000000000000000000000000000..f7f2f29ab4e0da1571c051f62c097ba46f9b8849 --- /dev/null +++ b/assets/ir/0017.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ef3550340702b7279188870ec9371bce91201618f3a4f8a8170b5b641d4d9c6 +size 3912 diff --git a/assets/ir/0018.flac b/assets/ir/0018.flac new file mode 100644 index 0000000000000000000000000000000000000000..87dc47b48130c2938af62fdfe290de6a9598cd13 --- /dev/null +++ b/assets/ir/0018.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96c709599477652af1361886ebe3f76bf99e37defbf5dd7fd3e2126a7cd97d55 +size 2480 diff --git a/assets/ir/0019.flac b/assets/ir/0019.flac new file mode 100644 index 0000000000000000000000000000000000000000..4da542a02b002be08f3f0e817656a08c72b49635 --- /dev/null +++ b/assets/ir/0019.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae539a878baad97a6b9608f21b9b4e0d96b412a2a4eab7af124ddc3a81e870fc +size 2800 diff --git a/assets/ir/0020.flac b/assets/ir/0020.flac new file mode 100644 index 0000000000000000000000000000000000000000..dc012b43d1d0621255fe7fdfc8fe814891913900 --- /dev/null +++ b/assets/ir/0020.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bf77608cedfcadcd89345617b4b4db33be6ac7f280c7870e40a8a0aed7aec68 +size 3088 diff --git a/assets/ir/0021.flac b/assets/ir/0021.flac new file mode 100644 index 0000000000000000000000000000000000000000..add04befc57f34fc08c8fd36568325493d7f6ee6 --- /dev/null +++ b/assets/ir/0021.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e3e580c9b7785f4da4601b8ea22e21c7f1feff8851148acdbff08a4e30a0f6e +size 4357 diff --git a/assets/ir/0022.flac b/assets/ir/0022.flac new file mode 100644 index 0000000000000000000000000000000000000000..d9b23cb2ff587096117e51f5d13d549725abc04e --- /dev/null +++ b/assets/ir/0022.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6686247918b80a73bd7d80679efba615c98b8b65bbe24be05ceb4c326901973e +size 5709 diff --git a/assets/ir/0023.flac b/assets/ir/0023.flac new file mode 100644 index 0000000000000000000000000000000000000000..bbd73a1b70ed040a6549988597efe414b55dbfe5 --- /dev/null +++ b/assets/ir/0023.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27a0d588c0d7c7207a600ed8d24bf49820e836c9ad1af6a0244468b38244419f +size 2864 diff --git a/assets/ir/0024.flac b/assets/ir/0024.flac new file mode 100644 index 0000000000000000000000000000000000000000..a548b02e63449affdcd2312f22a1a5b872a99a84 --- /dev/null +++ b/assets/ir/0024.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f27f99fc0438879c80c0a2f6b764cf13b0813c2744eca27e02a74935a5dbfef +size 4052 diff --git a/assets/ir/0025.flac b/assets/ir/0025.flac new file mode 100644 index 0000000000000000000000000000000000000000..a7c2bc55734a2139164798bd533d504acf7f6ded --- /dev/null +++ b/assets/ir/0025.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a50c9e1af08a44fc577383e6f98012172999ab614bb586e7789b0f8d458d8c68 +size 2706 diff --git a/assets/ir/0026.flac b/assets/ir/0026.flac new file mode 100644 index 0000000000000000000000000000000000000000..8a77572a3141940cb02e1e0bbbe6fb71f6154686 --- /dev/null +++ b/assets/ir/0026.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7af13e7667d0931c0a0e54563ca599b49b13d48ed32c6438a76da7eb2937c51e +size 12479 diff --git a/assets/ir/0027.flac b/assets/ir/0027.flac new file mode 100644 index 0000000000000000000000000000000000000000..606d4808120fe36cc15c0a027bbb78af3bd32ec9 --- /dev/null +++ b/assets/ir/0027.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da8d7d888af53004d6a871b1b157a3a5f12a4005fc77eafaa9a057624c0d7c22 +size 2815 diff --git a/assets/ir/0028.flac b/assets/ir/0028.flac new file mode 100644 index 0000000000000000000000000000000000000000..f24c63243ac17d36a2ee92966882b53fa2d2def3 --- /dev/null +++ b/assets/ir/0028.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcbe4fd2cfaf6e93216d6aee97171b2c9f5648b936cb98f6ad52216c67168951 +size 2085 diff --git a/assets/ir/0029.flac b/assets/ir/0029.flac new file mode 100644 index 0000000000000000000000000000000000000000..5ee4e2aad3db644469c42742f104c58d268a3595 --- /dev/null +++ b/assets/ir/0029.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b099b578317a1528005ae4ab473223a359dc67a4f4c4b3a6625279fd0b8d82c +size 10083 diff --git a/assets/ir/0030.flac b/assets/ir/0030.flac new file mode 100644 index 0000000000000000000000000000000000000000..e72f1e51954b3fd7bb4e561352f671457d0a97c7 --- /dev/null +++ b/assets/ir/0030.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d994f9a18cc94b7c9efa8e7a744643e2e42eaea929fea910368b0d220283db9e +size 2978 diff --git a/assets/ir/0031.flac b/assets/ir/0031.flac new file mode 100644 index 0000000000000000000000000000000000000000..7d339f59f475817666e1bd3912b66de7a2eb4f9e --- /dev/null +++ b/assets/ir/0031.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45c3535ce8e46a03385f13351511840e9f22ce55ab86f9620c1709dab29b2168 +size 2971 diff --git a/assets/ir/0032.flac b/assets/ir/0032.flac new file mode 100644 index 0000000000000000000000000000000000000000..751ef6d33900a00d32af12d54e1032c647217bd4 --- /dev/null +++ b/assets/ir/0032.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0640b771dcee43101971df6172e3605feb9dcfce48699bbac35d6f0335e0c33d +size 2662 diff --git a/assets/ir/0033.flac b/assets/ir/0033.flac new file mode 100644 index 0000000000000000000000000000000000000000..71f1c298cd6eeef81d08fff7be204c95c2a21f69 --- /dev/null +++ b/assets/ir/0033.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40d45e02b26597c7dcd8944eb5fdb0f0c70145d3ad4305b587f5fa06235f8a28 +size 4492 diff --git a/assets/ir/0034.flac b/assets/ir/0034.flac new file mode 100644 index 0000000000000000000000000000000000000000..30fdaadbdd1d419344405ce9ab53b9a5a0259c0f --- /dev/null +++ b/assets/ir/0034.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21dce4598a36aa12259a6d08c6c033178086d01c77bd8d07a7c2b1c034b3bd96 +size 3326 diff --git a/assets/ir/0035.flac b/assets/ir/0035.flac new file mode 100644 index 0000000000000000000000000000000000000000..ff909137d1771e3b6bf4d31f6b79e254464e032e --- /dev/null +++ b/assets/ir/0035.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:583e24f01c4928ef4bc5ba5297ac0645285387f7043f54b775ce21115d5a5976 +size 5702 diff --git a/assets/ir/0036.flac b/assets/ir/0036.flac new file mode 100644 index 0000000000000000000000000000000000000000..1ba91e16d63ec7891edf95fb75da54eda74883f2 --- /dev/null +++ b/assets/ir/0036.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dab1ae16f4006cf738bfb59f0e1185d03e13141614e7eeff50a552814c2c462 +size 5103 diff --git a/assets/ir/0037.flac b/assets/ir/0037.flac new file mode 100644 index 0000000000000000000000000000000000000000..e08c1f3e6437eb09a0d758977c5d8dc9982e34d6 --- /dev/null +++ b/assets/ir/0037.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d6a7df6d7d4cfd303b30c426b3dd9a9bdc4e6cbcb625db7452524e166dab551 +size 2530 diff --git a/assets/ir/0038.flac b/assets/ir/0038.flac new file mode 100644 index 0000000000000000000000000000000000000000..d79502b7ec6f8e7f2dc8c38f3ca48934ff33f6f4 --- /dev/null +++ b/assets/ir/0038.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3d9d103ab096839963af7f90ba65387dc67b8dd0930b62b36bce79f55862a98 +size 4089 diff --git a/assets/ir/0039.flac b/assets/ir/0039.flac new file mode 100644 index 0000000000000000000000000000000000000000..6e604b0692950922292f18a7d13c8150890a5fed --- /dev/null +++ b/assets/ir/0039.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6d64619b32900ea17a6b50f32d0eac87d4229976a56be0452678feb2ad66ec7 +size 2290 diff --git a/assets/ir/0040.flac b/assets/ir/0040.flac new file mode 100644 index 0000000000000000000000000000000000000000..b829a312f88187ba6b49d3cf10e3e7d6f432855e --- /dev/null +++ b/assets/ir/0040.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77edb2136375420291f5b73616fd39413746fcfc9ccbc89799274f12e18cb761 +size 2721 diff --git a/assets/ir/0041.flac b/assets/ir/0041.flac new file mode 100644 index 0000000000000000000000000000000000000000..90a75170dee47a42f5d11ca90102c60f9f8a9203 --- /dev/null +++ b/assets/ir/0041.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cedea219ff594828d948a761fc51d00a28223f602f4dbffe0c72312741c9e0a1 +size 5320 diff --git a/assets/ir/0042.flac b/assets/ir/0042.flac new file mode 100644 index 0000000000000000000000000000000000000000..bf638a7f76395b5bdabc0fce99501d82154e2f1f --- /dev/null +++ b/assets/ir/0042.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9be898ed4dbc868d14d57170f0c1982bae0b760d5246d2ab8ff1e2056f3a0bbc +size 7898 diff --git a/assets/ir/0043.flac b/assets/ir/0043.flac new file mode 100644 index 0000000000000000000000000000000000000000..d51fbe20c05480fba2758b36dde6eed312248f63 --- /dev/null +++ b/assets/ir/0043.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73cc203fe80a99e0185f84da902b440d706db18a6152630ff63cba078468a831 +size 4720 diff --git a/assets/ir/0044.flac b/assets/ir/0044.flac new file mode 100644 index 0000000000000000000000000000000000000000..1b75352bde1e678600bbf3f48cfdb87730f2984a --- /dev/null +++ b/assets/ir/0044.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea326ebf055c420a25cb113bab2a3fc50f6134e097798142fed17f2985dcc42d +size 2904 diff --git a/assets/ir/0045.flac b/assets/ir/0045.flac new file mode 100644 index 0000000000000000000000000000000000000000..c0e0307468b61a39a25d5d860eb425712a5da75c --- /dev/null +++ b/assets/ir/0045.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a6527546a28aaf9152d1560424964304358b4bf5659253a07715f2dcf987817 +size 8064 diff --git a/assets/ir/0046.flac b/assets/ir/0046.flac new file mode 100644 index 0000000000000000000000000000000000000000..36f73a474cc3f98966c02cf8d8a8ea00818a1179 --- /dev/null +++ b/assets/ir/0046.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:902f538091455af545f252e704bbbf4f51eec5df222dc479dc12cc34142ba7f9 +size 9751 diff --git a/assets/ir/0047.flac b/assets/ir/0047.flac new file mode 100644 index 0000000000000000000000000000000000000000..8821193279f1af4d02a8379bbb8e25ea07f94829 --- /dev/null +++ b/assets/ir/0047.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82170e45b6ba0bf2cc56022ca4ebd38a92a7508e2bdb6dd242b4e0b5f17eafbe +size 3483 diff --git a/assets/ir/0048.flac b/assets/ir/0048.flac new file mode 100644 index 0000000000000000000000000000000000000000..d17025de2302493f43e6b4c356b58a9e5110d570 --- /dev/null +++ b/assets/ir/0048.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fce841d6c8bc1f8acd1777f5f5c5449119eeaccea3dffbc35193bea670bd7f36 +size 3396 diff --git a/assets/ir/0049.flac b/assets/ir/0049.flac new file mode 100644 index 0000000000000000000000000000000000000000..a88aa137c8fe9677b5c7d3467204f6a0103a7d4f --- /dev/null +++ b/assets/ir/0049.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d7ed804ab4b750a13e336c7c8c01f39801d4286f0c526cdfa82408addbeab3a +size 3431 diff --git a/assets/ir/0050.flac b/assets/ir/0050.flac new file mode 100644 index 0000000000000000000000000000000000000000..4b271c130ab349295c53c7b06049fb7dd0f60abc --- /dev/null +++ b/assets/ir/0050.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:429a297be6ba5fb13a3abc8b880eabfb74cc72ea32506bf3deba05a061d631cb +size 8545 diff --git a/assets/ir/0051.flac b/assets/ir/0051.flac new file mode 100644 index 0000000000000000000000000000000000000000..f9384900a06b71e8a016aac9df5d7fe7ea1ab361 --- /dev/null +++ b/assets/ir/0051.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b191362edc899b83253e8d84135a0d4c58325a4c5ab0427d22dbb703dd458617 +size 4055 diff --git a/assets/ir/0052.flac b/assets/ir/0052.flac new file mode 100644 index 0000000000000000000000000000000000000000..2635ee99f8251408d9ccfa86e828b69a3c7045ac --- /dev/null +++ b/assets/ir/0052.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5da9a1e08c800fbccda72254e58c4dcf720b8024f525a84cd1bb56ff8f789e07 +size 9155 diff --git a/assets/ir/0053.flac b/assets/ir/0053.flac new file mode 100644 index 0000000000000000000000000000000000000000..e350189c98ce9657399e4bfec2efb2f06d4f63ca --- /dev/null +++ b/assets/ir/0053.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c7813f61f7a1cd99b9ae8f1ac4edb990d78ffc655a475c0ca94ba4d9735eeae +size 8195 diff --git a/assets/ir/0054.flac b/assets/ir/0054.flac new file mode 100644 index 0000000000000000000000000000000000000000..ed24dd540118fd4b82cca4551d3d639880c7656c --- /dev/null +++ b/assets/ir/0054.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06275c9770b1745f0b1ab9fb49643cde6c973ca80ba1f3fa7091a2a52eba1909 +size 2581 diff --git a/assets/ir/0055.flac b/assets/ir/0055.flac new file mode 100644 index 0000000000000000000000000000000000000000..bf14764ea5f42633fe7d8580cfd33acd713a1ffa --- /dev/null +++ b/assets/ir/0055.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fcd14bb52c7fbdd14a01b1fb02373e5df244a3f125dcbae4fa6fbcfb7c569b2 +size 3384 diff --git a/assets/ir/0056.flac b/assets/ir/0056.flac new file mode 100644 index 0000000000000000000000000000000000000000..380bc29bd26b4cb2208d4215d67ac4bfd6cd2392 --- /dev/null +++ b/assets/ir/0056.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df52ae566ed75224fcdb753ef5e908f0ec4d2028fccb769cd86a73cf1406b5c9 +size 3343 diff --git a/assets/ir/0057.flac b/assets/ir/0057.flac new file mode 100644 index 0000000000000000000000000000000000000000..b9fde5785e7904dfb7357af26eabb71506a18532 --- /dev/null +++ b/assets/ir/0057.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c9c72d3f544e9389d2679a11329c75ba415c48a434da54385975c8834ad66ff +size 4679 diff --git a/assets/ir/0058.flac b/assets/ir/0058.flac new file mode 100644 index 0000000000000000000000000000000000000000..ca4c451c8aa4b5f4b920dcb7a5056e0d97b94b20 --- /dev/null +++ b/assets/ir/0058.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:460bc9d88f5bb9d9c39afb355c9a0a0da3480c7bb0f6c417df01c6c99440df3c +size 7624 diff --git a/assets/ir/0059.flac b/assets/ir/0059.flac new file mode 100644 index 0000000000000000000000000000000000000000..fbdea0c748a95fbf5d52236a3875d554d88040bf --- /dev/null +++ b/assets/ir/0059.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d5b82e82136fe19dd715cefb7e1d5de97a540899e938896d226477501b40e33 +size 7211 diff --git a/assets/ir/0060.flac b/assets/ir/0060.flac new file mode 100644 index 0000000000000000000000000000000000000000..a6a85b4ff73653671383e7a4f7a6780af44304a8 --- /dev/null +++ b/assets/ir/0060.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac66d6722a861e9bb82efe7bd1c3145a8fc5f3d125a3d038bfd1d7809bf3942c +size 2943 diff --git a/assets/ir/0061.flac b/assets/ir/0061.flac new file mode 100644 index 0000000000000000000000000000000000000000..212feff7b118010dfc6b8983fff54ec4928f5cbe --- /dev/null +++ b/assets/ir/0061.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e49a622d6c767045cbed3130d43f87abfa33155879315706b6333577886f009c +size 8166 diff --git a/assets/ir/0062.flac b/assets/ir/0062.flac new file mode 100644 index 0000000000000000000000000000000000000000..0fd2b73a73bfdde73c4bff1a3dc76e0ac3af5f4a --- /dev/null +++ b/assets/ir/0062.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc9cde58788dd386f0351bf47ee0d365cb34787701c2abe2f508aec22aba9e12 +size 2868 diff --git a/assets/ir/0063.flac b/assets/ir/0063.flac new file mode 100644 index 0000000000000000000000000000000000000000..9a98c253c37a1d26dbfd93d12899f4682af3f691 --- /dev/null +++ b/assets/ir/0063.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d3a28fd958f91c86e71cf3c48b30357e11d5f0f789e455777422d266dca86c +size 2805 diff --git a/assets/ir/0064.flac b/assets/ir/0064.flac new file mode 100644 index 0000000000000000000000000000000000000000..ff5b92e76fd9940828b470ce3fbc215526c2b591 --- /dev/null +++ b/assets/ir/0064.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a258c10560e4e21979f0a0c4e263af8800010248acf08c26196efcf22c2c8502 +size 4350 diff --git a/assets/ir/0065.flac b/assets/ir/0065.flac new file mode 100644 index 0000000000000000000000000000000000000000..6f39770f6a7c586803dd77e499c421698d2ab433 --- /dev/null +++ b/assets/ir/0065.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:778dae55aa81d62669363e29dfb4ea7e30a1661cb7754fd505dfedbe73cf77df +size 4194 diff --git a/assets/ir/0066.flac b/assets/ir/0066.flac new file mode 100644 index 0000000000000000000000000000000000000000..5d21770fde1fbc7eb01f18cf4bec5da92479e281 --- /dev/null +++ b/assets/ir/0066.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c19843b7a048e206ccfa9211c07b2d300ea7c402aa0ef5872b97e69a18bf302a +size 5229 diff --git a/assets/ir/0067.flac b/assets/ir/0067.flac new file mode 100644 index 0000000000000000000000000000000000000000..fdc9c47ec36e87cac1b645e2762390f1d112f826 --- /dev/null +++ b/assets/ir/0067.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcab3994584aaccc706383875c8b614b94ff5774dd4ce2db912430b8cca8d019 +size 3927 diff --git a/assets/ir/0068.flac b/assets/ir/0068.flac new file mode 100644 index 0000000000000000000000000000000000000000..80e5b1dc8fd6f2f9151cad3e3492d3cd116ee789 --- /dev/null +++ b/assets/ir/0068.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5b7dbe930a00e7b1363e00e1b8d21efb8ef0f2763c8b4e404d6d29d2c82db53 +size 5928 diff --git a/assets/ir/0069.flac b/assets/ir/0069.flac new file mode 100644 index 0000000000000000000000000000000000000000..4e20765ce93dc4adc624eedcc7f96017a105c15f --- /dev/null +++ b/assets/ir/0069.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dfcf36e2ae2ae4742006da3bce9a8eec1a7589021d34983a575eb6aa08eacd7 +size 3419 diff --git a/assets/ir/0070.flac b/assets/ir/0070.flac new file mode 100644 index 0000000000000000000000000000000000000000..81075e1dc0acaa582f3cd2b423f886cdfa35b01b --- /dev/null +++ b/assets/ir/0070.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40862d86de2bb17a4ee31824c5f254ed2db407617b084bb39d7b2f23baf06cb7 +size 2765 diff --git a/assets/ir/0071.flac b/assets/ir/0071.flac new file mode 100644 index 0000000000000000000000000000000000000000..e1fd62f482aebc4bd20b3b2d3538cfc6bc0451e2 --- /dev/null +++ b/assets/ir/0071.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f72242e9871f6f93539df84c466ebaac3c6875cf795ab0c489e1ce4b75ccd754 +size 2195 diff --git a/assets/ir/0072.flac b/assets/ir/0072.flac new file mode 100644 index 0000000000000000000000000000000000000000..5f71fa6ad5ce087ab7c1b1fe5a7bc05edf32bd7b --- /dev/null +++ b/assets/ir/0072.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5217550ffe24332f1e23c839cd711ca1f60b5ef0f9beeb8f5cdcf6d7ab4942a +size 3404 diff --git a/assets/ir/0073.flac b/assets/ir/0073.flac new file mode 100644 index 0000000000000000000000000000000000000000..9f5d7badbae59d22ed331b9f3663a66b0f49da1a --- /dev/null +++ b/assets/ir/0073.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5066345a4a77ca5e2d7a659dfa8c40c52be4416797c148b36a47ef69e261c95a +size 3137 diff --git a/assets/ir/0074.flac b/assets/ir/0074.flac new file mode 100644 index 0000000000000000000000000000000000000000..104b3125b14f624176132ac63a28e6cf3816b761 --- /dev/null +++ b/assets/ir/0074.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e32a4be5092cc381b73259b2a0b24a7cb441a1c82faa7e3076faaa0e751556a1 +size 3555 diff --git a/assets/ir/0075.flac b/assets/ir/0075.flac new file mode 100644 index 0000000000000000000000000000000000000000..9301fe02889922e5954502d5f5edce40aa59c44a --- /dev/null +++ b/assets/ir/0075.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4775e09c0d779dff2454be5c3b584c760f48d6d001dbfa58b2311936c3691c4 +size 2990 diff --git a/assets/ir/0076.flac b/assets/ir/0076.flac new file mode 100644 index 0000000000000000000000000000000000000000..ca6aa84b2f0cbe6962ee5c845bba22498ae59805 --- /dev/null +++ b/assets/ir/0076.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cb2ce695dcde4971d86b23a74f012acb4f583366c735977025a333a3b27b202 +size 8228 diff --git a/assets/ir/0077.flac b/assets/ir/0077.flac new file mode 100644 index 0000000000000000000000000000000000000000..21ed0b73c85297aa0916853db81b6fa1d8a418a8 --- /dev/null +++ b/assets/ir/0077.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:449006c5a5d397264f3cad52af10ae5ca732e4fbb3c3dc53d73209a0816434fb +size 3814 diff --git a/assets/ir/0078.flac b/assets/ir/0078.flac new file mode 100644 index 0000000000000000000000000000000000000000..a0ff1b9d200f5fa35e0569fd4b939a2c3d832091 --- /dev/null +++ b/assets/ir/0078.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cde5f349cffb01b0de2fa242f8d3be5d4185d4fc5e568bc317eab10d57013027 +size 3442 diff --git a/assets/ir/0079.flac b/assets/ir/0079.flac new file mode 100644 index 0000000000000000000000000000000000000000..3a842c7daf79c45f30d0d638cddb85fa55da354a --- /dev/null +++ b/assets/ir/0079.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66750706359f3df45ea095cff01b368150d7b6eb761f95289fc7ceff4e265c47 +size 5054 diff --git a/assets/ir/0080.flac b/assets/ir/0080.flac new file mode 100644 index 0000000000000000000000000000000000000000..17f92aca98842d2057a4cc5ce3f2f2f257640347 --- /dev/null +++ b/assets/ir/0080.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d57639ad7cc860ea0f17b95259910a94a2869b5ef46d149d2b05e64b526d4e7d +size 2443 diff --git a/assets/ir/0081.flac b/assets/ir/0081.flac new file mode 100644 index 0000000000000000000000000000000000000000..1028ec3040bfc963d1c8f0363a7f610ab8c68706 --- /dev/null +++ b/assets/ir/0081.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92af6367758458fc023da4dd712ae01371b56ba5521aaae4b28d72cd8ad76e14 +size 3149 diff --git a/assets/ir/0082.flac b/assets/ir/0082.flac new file mode 100644 index 0000000000000000000000000000000000000000..aedd0cb201a19fdf4e7a5b3c99b440d0fbaf7609 --- /dev/null +++ b/assets/ir/0082.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fadff8c6d848d67a433863ea69f5d099b7042679fb3d27925efc0106535d27f7 +size 2893 diff --git a/assets/ir/0083.flac b/assets/ir/0083.flac new file mode 100644 index 0000000000000000000000000000000000000000..dc9e126ed122501304f2f9b5cbddc23cee416532 --- /dev/null +++ b/assets/ir/0083.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df5d55b0c0fe5079cc7fbbaa39e43a2ff61525a4a08b5bd654ae70934fa4676d +size 7336 diff --git a/assets/ir/0084.flac b/assets/ir/0084.flac new file mode 100644 index 0000000000000000000000000000000000000000..6bb67de9bcb0acc6523f9e2ed6ca737b7b929147 --- /dev/null +++ b/assets/ir/0084.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21cd67c6a359f066ae5a4f5a852e6995108b01ae944b7017c458eb12b13d6dab +size 2703 diff --git a/assets/ir/0085.flac b/assets/ir/0085.flac new file mode 100644 index 0000000000000000000000000000000000000000..77b4a3584bcbbe8f01da6323c534a4b6ce4d8a4c --- /dev/null +++ b/assets/ir/0085.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be5b3b74725544d6785edfb5a13de7e87749ec7cb7e0d9a3630b8bfe5e4fbd04 +size 5306 diff --git a/assets/ir/0086.flac b/assets/ir/0086.flac new file mode 100644 index 0000000000000000000000000000000000000000..c4310e06817d8a36cd3b106fc93c55a13bb1fb10 --- /dev/null +++ b/assets/ir/0086.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e94b147d896089dafa7e37d990dbacb62aca74f2c01fcf176a0576b7e37159d5 +size 3174 diff --git a/assets/ir/0087.flac b/assets/ir/0087.flac new file mode 100644 index 0000000000000000000000000000000000000000..edb0fc09fcffc5ca7d0a84121e031133505b86e9 --- /dev/null +++ b/assets/ir/0087.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:684c8986e0c0f563527cb0b3ba4513b325cf800b188beadd1ad2396236a65ae7 +size 4467 diff --git a/assets/ir/0088.flac b/assets/ir/0088.flac new file mode 100644 index 0000000000000000000000000000000000000000..57de4770baf3f42864988ebda2950825ac748e72 --- /dev/null +++ b/assets/ir/0088.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4be832e054417d311d1a608148c005754ff7c442a854a985a345ca855a3529c3 +size 2664 diff --git a/assets/ir/0089.flac b/assets/ir/0089.flac new file mode 100644 index 0000000000000000000000000000000000000000..59975edfeb4ffd19762cd02ea632505290a11d41 --- /dev/null +++ b/assets/ir/0089.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68a32080d551e0a5578baad67e354bcb10c7250f987099cd6d48187d6e24b712 +size 3903 diff --git a/assets/ir/0090.flac b/assets/ir/0090.flac new file mode 100644 index 0000000000000000000000000000000000000000..fb5c8d6fb7d307d8912e6844402efe54d877ab77 --- /dev/null +++ b/assets/ir/0090.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:124836585d661da10951cacc3237665f9f2500a60c9380103d315e86b16d2cbb +size 4094 diff --git a/assets/ir/0091.flac b/assets/ir/0091.flac new file mode 100644 index 0000000000000000000000000000000000000000..fdeb9a6ea523d74e996a4e0583944ad2d22efc24 --- /dev/null +++ b/assets/ir/0091.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60c636c32504ee3e57672861d1f1b01553f0af8fc37892b25807e927151bb22f +size 4526 diff --git a/assets/ir/0092.flac b/assets/ir/0092.flac new file mode 100644 index 0000000000000000000000000000000000000000..2f479ff861b7aea1fae657e607c7cbc9e39c6a53 --- /dev/null +++ b/assets/ir/0092.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:487e5419bf58dda6e09cae12a6fc205f9967c94703371114c5ecd8e9f096da30 +size 4164 diff --git a/assets/ir/0093.flac b/assets/ir/0093.flac new file mode 100644 index 0000000000000000000000000000000000000000..5509e979c6e3a4ab38daf70b207c7026d72c0c78 --- /dev/null +++ b/assets/ir/0093.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff88f34ee43b1fcafbf762d75ba4273365787aabed384e6593d8330a4ec43cb3 +size 5734 diff --git a/assets/ir/0094.flac b/assets/ir/0094.flac new file mode 100644 index 0000000000000000000000000000000000000000..62ebc07b9279d4cbb10b659926f6f3f04a124f37 --- /dev/null +++ b/assets/ir/0094.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e3a4bc6e03dae8c024c96f18d53d91d1fec81c278cbffff07c2934469b0be4d +size 3698 diff --git a/assets/ir/0095.flac b/assets/ir/0095.flac new file mode 100644 index 0000000000000000000000000000000000000000..3eb522341d57d8165238ed09962d29cd28caf3ce --- /dev/null +++ b/assets/ir/0095.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee247dfd309a16ea2371dcbeae3639c43acf0ad22ac1c6d38310979c6d449748 +size 2097 diff --git a/assets/ir/0096.flac b/assets/ir/0096.flac new file mode 100644 index 0000000000000000000000000000000000000000..d382a37de4a955d07eb0bc6945a5c235d039c8c9 --- /dev/null +++ b/assets/ir/0096.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1acdfab041546fe6d9e4d2b91e9d592137a761bf8ae18aa8e58c0817566ab7c +size 3437 diff --git a/assets/ir/0097.flac b/assets/ir/0097.flac new file mode 100644 index 0000000000000000000000000000000000000000..a525e46bc87678744393de9b15e3ca7aa278cff5 --- /dev/null +++ b/assets/ir/0097.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb2a8148c6c7ce173910492669c6322a058cabb217a51fdd769eb0a63513bb84 +size 9658 diff --git a/assets/ir/0098.flac b/assets/ir/0098.flac new file mode 100644 index 0000000000000000000000000000000000000000..fb0bc5ba5216ac6cc3b40a7d3d1b88dad2b59b8e --- /dev/null +++ b/assets/ir/0098.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bf5d4e0b275340dec6d3fe65b59f91b4db7273d6a59a896356f1081d3f8ea0f +size 7188 diff --git a/assets/ir/0099.flac b/assets/ir/0099.flac new file mode 100644 index 0000000000000000000000000000000000000000..a7a002ce7db32a57f189f6a323c462fa19773f4d --- /dev/null +++ b/assets/ir/0099.flac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f0e137b55296c913430ea1760dd020686d06a59e4b307fdcebf7fcec714bdaa +size 3907 diff --git a/assets/noise/01.wav b/assets/noise/01.wav new file mode 100644 index 0000000000000000000000000000000000000000..15eb2a515e30db6baa22d871d2f61de2170da72a Binary files /dev/null and b/assets/noise/01.wav differ diff --git a/assets/noise/02.wav b/assets/noise/02.wav new file mode 100644 index 0000000000000000000000000000000000000000..e0a3a582bfdcf40f927172d5c83247c803486c77 Binary files /dev/null and b/assets/noise/02.wav differ diff --git a/assets/noise/03.wav b/assets/noise/03.wav new file mode 100644 index 0000000000000000000000000000000000000000..5315d174754bda86f686159389615236f09b2e5e Binary files /dev/null and b/assets/noise/03.wav differ diff --git a/assets/noise/04.wav b/assets/noise/04.wav new file mode 100644 index 0000000000000000000000000000000000000000..f3cb456e2a71f52749986eb03d067e8219f862bf Binary files /dev/null and b/assets/noise/04.wav differ diff --git a/assets/noise/05.wav b/assets/noise/05.wav new file mode 100644 index 0000000000000000000000000000000000000000..68f509c4701a019eb76149f1a53c2cf64cdfa457 Binary files /dev/null and b/assets/noise/05.wav differ diff --git a/assets/noise/06.wav b/assets/noise/06.wav new file mode 100644 index 0000000000000000000000000000000000000000..31a0c14b24469f5fc8aedb03d3848b91f0021779 Binary files /dev/null and b/assets/noise/06.wav differ diff --git a/assets/noise/07.wav b/assets/noise/07.wav new file mode 100644 index 0000000000000000000000000000000000000000..95ac73b7ca6b00495223fa15f4c5fefcde69f603 Binary files /dev/null and b/assets/noise/07.wav differ diff --git a/assets/noise/08.wav b/assets/noise/08.wav new file mode 100644 index 0000000000000000000000000000000000000000..3b9d5fa475aa19603472678330816d2f22dd7214 Binary files /dev/null and b/assets/noise/08.wav differ diff --git a/assets/noise/09.wav b/assets/noise/09.wav new file mode 100644 index 0000000000000000000000000000000000000000..d2356c1c6a573ebcd7b83a45bb534293e93f0768 Binary files /dev/null and b/assets/noise/09.wav differ diff --git a/assets/noise/10.wav b/assets/noise/10.wav new file mode 100644 index 0000000000000000000000000000000000000000..2bd4c992a0296c571005f22c75aa0e2f88a84b59 Binary files /dev/null and b/assets/noise/10.wav differ diff --git a/assets/noise/100.wav b/assets/noise/100.wav new file mode 100644 index 0000000000000000000000000000000000000000..02753af764bdb204c75b926952931481e9f92eb9 Binary files /dev/null and b/assets/noise/100.wav differ diff --git a/assets/noise/101.wav b/assets/noise/101.wav new file mode 100644 index 0000000000000000000000000000000000000000..875fe0509d6604e3c8a407f77b3b960cb8074af9 Binary files /dev/null and b/assets/noise/101.wav differ diff --git a/assets/noise/102.wav b/assets/noise/102.wav new file mode 100644 index 0000000000000000000000000000000000000000..3daa37b5da91599e04c168531699f59160bab258 Binary files /dev/null and b/assets/noise/102.wav differ diff --git a/assets/noise/103.wav b/assets/noise/103.wav new file mode 100644 index 0000000000000000000000000000000000000000..dd4bbb5150c22d4467f3ff3ef6479318b20b553f Binary files /dev/null and b/assets/noise/103.wav differ diff --git a/assets/noise/104.wav b/assets/noise/104.wav new file mode 100644 index 0000000000000000000000000000000000000000..013a1de6028081bb18997cd6b72e8b2b9f141a6c Binary files /dev/null and b/assets/noise/104.wav differ diff --git a/assets/noise/105.wav b/assets/noise/105.wav new file mode 100644 index 0000000000000000000000000000000000000000..5d0391a01d8a2b927ff4c5be3422f62cac9576cf Binary files /dev/null and b/assets/noise/105.wav differ diff --git a/assets/noise/106.wav b/assets/noise/106.wav new file mode 100644 index 0000000000000000000000000000000000000000..c2920a602ebd3bb431882405d8e24c6e43d78705 Binary files /dev/null and b/assets/noise/106.wav differ diff --git a/assets/noise/107.wav b/assets/noise/107.wav new file mode 100644 index 0000000000000000000000000000000000000000..c16edf528bd84c82ef802de7ba7588002d397fd5 Binary files /dev/null and b/assets/noise/107.wav differ diff --git a/assets/noise/108.wav b/assets/noise/108.wav new file mode 100644 index 0000000000000000000000000000000000000000..19f4f7be40b4d2af1a0ce4faec92fc1d05cd2f8d Binary files /dev/null and b/assets/noise/108.wav differ diff --git a/assets/noise/109.wav b/assets/noise/109.wav new file mode 100644 index 0000000000000000000000000000000000000000..866db1163c5b46033b860ba6dc786f9f2874cef9 Binary files /dev/null and b/assets/noise/109.wav differ diff --git a/assets/noise/11.wav b/assets/noise/11.wav new file mode 100644 index 0000000000000000000000000000000000000000..084cb2f7337ab2efb645135ecddb1283cc5eeaf8 Binary files /dev/null and b/assets/noise/11.wav differ diff --git a/assets/noise/110.wav b/assets/noise/110.wav new file mode 100644 index 0000000000000000000000000000000000000000..cdb844892a1e916079a228620b8873798e3a9608 Binary files /dev/null and b/assets/noise/110.wav differ diff --git a/assets/noise/111.wav b/assets/noise/111.wav new file mode 100644 index 0000000000000000000000000000000000000000..77f04da838922da194d55d2b31183d3afabbd9af Binary files /dev/null and b/assets/noise/111.wav differ diff --git a/assets/noise/112.wav b/assets/noise/112.wav new file mode 100644 index 0000000000000000000000000000000000000000..443ebcb139966eea8c18bc59dc184ba45e7fa2e7 Binary files /dev/null and b/assets/noise/112.wav differ diff --git a/assets/noise/113.wav b/assets/noise/113.wav new file mode 100644 index 0000000000000000000000000000000000000000..6af9e80a306c22a28b551e51b7c08956f3f94760 Binary files /dev/null and b/assets/noise/113.wav differ diff --git a/assets/noise/114.wav b/assets/noise/114.wav new file mode 100644 index 0000000000000000000000000000000000000000..45efeeefacf8341a8e20d05a3ea423b194f872dc Binary files /dev/null and b/assets/noise/114.wav differ diff --git a/assets/noise/115.wav b/assets/noise/115.wav new file mode 100644 index 0000000000000000000000000000000000000000..b1430d936efb6483dee002df8f62f574e9ec097c Binary files /dev/null and b/assets/noise/115.wav differ diff --git a/assets/noise/116.wav b/assets/noise/116.wav new file mode 100644 index 0000000000000000000000000000000000000000..ae18f6a6ed4ce3b24d06b01cb2c7b17b99b526bb Binary files /dev/null and b/assets/noise/116.wav differ diff --git a/assets/noise/117.wav b/assets/noise/117.wav new file mode 100644 index 0000000000000000000000000000000000000000..6d5def145737adefe1a112623d6f41cb4c18fc81 Binary files /dev/null and b/assets/noise/117.wav differ diff --git a/assets/noise/118.wav b/assets/noise/118.wav new file mode 100644 index 0000000000000000000000000000000000000000..0e223420743e5ee2f9dfcb0ae4c5a63614de03ab Binary files /dev/null and b/assets/noise/118.wav differ diff --git a/assets/noise/119.wav b/assets/noise/119.wav new file mode 100644 index 0000000000000000000000000000000000000000..7aa4afec24d3320cb875f37ade2f915f849e6511 Binary files /dev/null and b/assets/noise/119.wav differ diff --git a/assets/noise/12.wav b/assets/noise/12.wav new file mode 100644 index 0000000000000000000000000000000000000000..b446071cf42ee5593c13cdfdca0df965535eda44 Binary files /dev/null and b/assets/noise/12.wav differ diff --git a/assets/noise/120.wav b/assets/noise/120.wav new file mode 100644 index 0000000000000000000000000000000000000000..74004a89789e6513490c99dfbdff533424818f48 Binary files /dev/null and b/assets/noise/120.wav differ diff --git a/assets/noise/121.wav b/assets/noise/121.wav new file mode 100644 index 0000000000000000000000000000000000000000..d7baba7d98f8035580a3d0dde964b20ce2a483bb Binary files /dev/null and b/assets/noise/121.wav differ diff --git a/assets/noise/122.wav b/assets/noise/122.wav new file mode 100644 index 0000000000000000000000000000000000000000..ecf5b52f80f2adcc5e1676cb01c0bd8dc6212331 Binary files /dev/null and b/assets/noise/122.wav differ diff --git a/assets/noise/123.wav b/assets/noise/123.wav new file mode 100644 index 0000000000000000000000000000000000000000..f61cfd5e3ec9ebc5e4c6460ac2485e2062c92091 Binary files /dev/null and b/assets/noise/123.wav differ diff --git a/assets/noise/124.wav b/assets/noise/124.wav new file mode 100644 index 0000000000000000000000000000000000000000..dceef38dda6336d90c98dc665c8200c24c5547c8 Binary files /dev/null and b/assets/noise/124.wav differ diff --git a/assets/noise/125.wav b/assets/noise/125.wav new file mode 100644 index 0000000000000000000000000000000000000000..a755dae247393e670d09465fc8b7446fc14f5c7f Binary files /dev/null and b/assets/noise/125.wav differ diff --git a/assets/noise/126.wav b/assets/noise/126.wav new file mode 100644 index 0000000000000000000000000000000000000000..e8dca37c644add373fc20e67cb7ef08ce7510f5b Binary files /dev/null and b/assets/noise/126.wav differ diff --git a/assets/noise/127.wav b/assets/noise/127.wav new file mode 100644 index 0000000000000000000000000000000000000000..58036d62acaae01c692972f5ddebe4b131a80aee Binary files /dev/null and b/assets/noise/127.wav differ diff --git a/assets/noise/128.wav b/assets/noise/128.wav new file mode 100644 index 0000000000000000000000000000000000000000..1da5f03af070fe2b579a039f16e14cf90442b8c8 Binary files /dev/null and b/assets/noise/128.wav differ diff --git a/assets/noise/129.wav b/assets/noise/129.wav new file mode 100644 index 0000000000000000000000000000000000000000..1b42daaf33d9713d97ebea064094f9f96cf432c9 Binary files /dev/null and b/assets/noise/129.wav differ diff --git a/assets/noise/13.wav b/assets/noise/13.wav new file mode 100644 index 0000000000000000000000000000000000000000..45527bb5ab8afb7b10ef2bf15279c9bd00687d5d Binary files /dev/null and b/assets/noise/13.wav differ diff --git a/assets/noise/130.wav b/assets/noise/130.wav new file mode 100644 index 0000000000000000000000000000000000000000..605e53a885b2fa6de2fca062a744b8ef960a6e45 Binary files /dev/null and b/assets/noise/130.wav differ diff --git a/assets/noise/131.wav b/assets/noise/131.wav new file mode 100644 index 0000000000000000000000000000000000000000..23e4ba10aed1a2647ecf9bf6d39aea80bfccdb96 Binary files /dev/null and b/assets/noise/131.wav differ diff --git a/assets/noise/132.wav b/assets/noise/132.wav new file mode 100644 index 0000000000000000000000000000000000000000..f50b553b5a18eeec1c0b17ddf3e38f699d0ffc3c Binary files /dev/null and b/assets/noise/132.wav differ diff --git a/assets/noise/133.wav b/assets/noise/133.wav new file mode 100644 index 0000000000000000000000000000000000000000..67459b4284cdb855473e31bbfa323866c94eb8bd Binary files /dev/null and b/assets/noise/133.wav differ diff --git a/assets/noise/134.wav b/assets/noise/134.wav new file mode 100644 index 0000000000000000000000000000000000000000..59da930f16e9b03a43dc3b3038867dfd62df746f Binary files /dev/null and b/assets/noise/134.wav differ diff --git a/assets/noise/135.wav b/assets/noise/135.wav new file mode 100644 index 0000000000000000000000000000000000000000..afb6585ff2e176a44bd88f69d0b442c6a4d00201 Binary files /dev/null and b/assets/noise/135.wav differ diff --git a/assets/noise/136.wav b/assets/noise/136.wav new file mode 100644 index 0000000000000000000000000000000000000000..57881d9371412375c108d07576fa356b4e46cd45 Binary files /dev/null and b/assets/noise/136.wav differ diff --git a/assets/noise/137.wav b/assets/noise/137.wav new file mode 100644 index 0000000000000000000000000000000000000000..6937ae3252293d44fcfd93f8759b14bcff2f0076 Binary files /dev/null and b/assets/noise/137.wav differ diff --git a/assets/noise/138.wav b/assets/noise/138.wav new file mode 100644 index 0000000000000000000000000000000000000000..da10ef92615e90c927dd77e5fb959d0316f4c080 Binary files /dev/null and b/assets/noise/138.wav differ diff --git a/assets/noise/139.wav b/assets/noise/139.wav new file mode 100644 index 0000000000000000000000000000000000000000..333c771a516f2dad52c41746b85d92d58f48da3f Binary files /dev/null and b/assets/noise/139.wav differ diff --git a/assets/noise/14.wav b/assets/noise/14.wav new file mode 100644 index 0000000000000000000000000000000000000000..2904d2310f06caceee1d9c7a87b74ce7d0894116 Binary files /dev/null and b/assets/noise/14.wav differ diff --git a/assets/noise/140.wav b/assets/noise/140.wav new file mode 100644 index 0000000000000000000000000000000000000000..d191015ab3292743d0c87f8ec40879c0ea4cfe60 Binary files /dev/null and b/assets/noise/140.wav differ diff --git a/assets/noise/141.wav b/assets/noise/141.wav new file mode 100644 index 0000000000000000000000000000000000000000..da76d1a78de35384f4cbb17953dd5c89b504f359 Binary files /dev/null and b/assets/noise/141.wav differ diff --git a/assets/noise/142.wav b/assets/noise/142.wav new file mode 100644 index 0000000000000000000000000000000000000000..fc6bb0a41714b3facfacb5ece3df9ae4bf8dfe1a Binary files /dev/null and b/assets/noise/142.wav differ diff --git a/assets/noise/143.wav b/assets/noise/143.wav new file mode 100644 index 0000000000000000000000000000000000000000..b77d93d75776244fb4e3e0741b13df966b94616c Binary files /dev/null and b/assets/noise/143.wav differ diff --git a/assets/noise/144.wav b/assets/noise/144.wav new file mode 100644 index 0000000000000000000000000000000000000000..fad16d00cb7111362fa3a56aa1238a097a77359a Binary files /dev/null and b/assets/noise/144.wav differ diff --git a/assets/noise/145.wav b/assets/noise/145.wav new file mode 100644 index 0000000000000000000000000000000000000000..594cf4a2ed9f9606a6b2baa3dc77e9cf745a7fc0 Binary files /dev/null and b/assets/noise/145.wav differ diff --git a/assets/noise/146.wav b/assets/noise/146.wav new file mode 100644 index 0000000000000000000000000000000000000000..35d4181d8a2e5fcf3418edb6b6364d7410207ffc Binary files /dev/null and b/assets/noise/146.wav differ diff --git a/assets/noise/147.wav b/assets/noise/147.wav new file mode 100644 index 0000000000000000000000000000000000000000..53f5eaa8e2435c0b36f04a99daf2ac74259eb8d8 Binary files /dev/null and b/assets/noise/147.wav differ diff --git a/assets/noise/148.wav b/assets/noise/148.wav new file mode 100644 index 0000000000000000000000000000000000000000..7bc6e6f9a0f121fcab0c0a7c0c554691e2befacf Binary files /dev/null and b/assets/noise/148.wav differ diff --git a/assets/noise/149.wav b/assets/noise/149.wav new file mode 100644 index 0000000000000000000000000000000000000000..6a69b112b1f36703fd50815384f41dda45cd4b00 Binary files /dev/null and b/assets/noise/149.wav differ diff --git a/assets/noise/15.wav b/assets/noise/15.wav new file mode 100644 index 0000000000000000000000000000000000000000..630ca2992227a6db779b0f6afc77d06307ac77cb Binary files /dev/null and b/assets/noise/15.wav differ diff --git a/assets/noise/150.wav b/assets/noise/150.wav new file mode 100644 index 0000000000000000000000000000000000000000..f62a3976e5602e1fd163125b133b566f5001af5b Binary files /dev/null and b/assets/noise/150.wav differ diff --git a/assets/noise/151.wav b/assets/noise/151.wav new file mode 100644 index 0000000000000000000000000000000000000000..078f3c44d8b28d255028020f18a76c2573f6c157 Binary files /dev/null and b/assets/noise/151.wav differ diff --git a/assets/noise/152.wav b/assets/noise/152.wav new file mode 100644 index 0000000000000000000000000000000000000000..c74a58d24cb476f63135f0c071026639a3fc01a0 Binary files /dev/null and b/assets/noise/152.wav differ diff --git a/assets/noise/153.wav b/assets/noise/153.wav new file mode 100644 index 0000000000000000000000000000000000000000..bb47bddeaa94f9b2ca906bea8f7e0a120614c42b Binary files /dev/null and b/assets/noise/153.wav differ diff --git a/assets/noise/154.wav b/assets/noise/154.wav new file mode 100644 index 0000000000000000000000000000000000000000..d6d843abdda16aab98226b0dc6850d47b9fe95b4 Binary files /dev/null and b/assets/noise/154.wav differ diff --git a/assets/noise/155.wav b/assets/noise/155.wav new file mode 100644 index 0000000000000000000000000000000000000000..5d214ec8dd85bcb25bddb2a3b907426462d1e909 Binary files /dev/null and b/assets/noise/155.wav differ diff --git a/assets/noise/156.wav b/assets/noise/156.wav new file mode 100644 index 0000000000000000000000000000000000000000..4785ccbd0394b47caf9d0859499f9507a6060dba Binary files /dev/null and b/assets/noise/156.wav differ diff --git a/assets/noise/157.wav b/assets/noise/157.wav new file mode 100644 index 0000000000000000000000000000000000000000..8a225475d70f24fa90175ba0e8b8d09f33e3d798 Binary files /dev/null and b/assets/noise/157.wav differ diff --git a/assets/noise/158.wav b/assets/noise/158.wav new file mode 100644 index 0000000000000000000000000000000000000000..1af71594e6fbeb6e45404d2cd316a03ab9b006bf Binary files /dev/null and b/assets/noise/158.wav differ diff --git a/assets/noise/159.wav b/assets/noise/159.wav new file mode 100644 index 0000000000000000000000000000000000000000..7d416c8a3c7ec6f6f1a3d58134a5e5380f7193f8 Binary files /dev/null and b/assets/noise/159.wav differ diff --git a/assets/noise/16.wav b/assets/noise/16.wav new file mode 100644 index 0000000000000000000000000000000000000000..5ed2acf3ab2720205dfc31deb1d767771171d6b1 Binary files /dev/null and b/assets/noise/16.wav differ diff --git a/assets/noise/160.wav b/assets/noise/160.wav new file mode 100644 index 0000000000000000000000000000000000000000..8992bedd8b7688e0b141753f88dc233f2ad770cb Binary files /dev/null and b/assets/noise/160.wav differ diff --git a/assets/noise/161.wav b/assets/noise/161.wav new file mode 100644 index 0000000000000000000000000000000000000000..eb5b4b1d6c2ebe4c65d9b2cb4e13781007ab7308 Binary files /dev/null and b/assets/noise/161.wav differ diff --git a/assets/noise/162.wav b/assets/noise/162.wav new file mode 100644 index 0000000000000000000000000000000000000000..dd5254854ac09f9bad738df12f2221bfbb0a6e50 Binary files /dev/null and b/assets/noise/162.wav differ diff --git a/assets/noise/163.wav b/assets/noise/163.wav new file mode 100644 index 0000000000000000000000000000000000000000..d2be651116617a46547337f60d7de6142333bd24 Binary files /dev/null and b/assets/noise/163.wav differ diff --git a/assets/noise/164.wav b/assets/noise/164.wav new file mode 100644 index 0000000000000000000000000000000000000000..994cec31df1066709dbdf8c128e18f7ec721261b Binary files /dev/null and b/assets/noise/164.wav differ diff --git a/assets/noise/165.wav b/assets/noise/165.wav new file mode 100644 index 0000000000000000000000000000000000000000..c9a92c52a7d8aba49d582828eeedc011a997b974 Binary files /dev/null and b/assets/noise/165.wav differ diff --git a/assets/noise/166.wav b/assets/noise/166.wav new file mode 100644 index 0000000000000000000000000000000000000000..f33668efe380a8053aab7ef1057014090f41b0d0 Binary files /dev/null and b/assets/noise/166.wav differ diff --git a/assets/noise/167.wav b/assets/noise/167.wav new file mode 100644 index 0000000000000000000000000000000000000000..1ea96299054f3255d5a05ce343e1038c01f7a59f Binary files /dev/null and b/assets/noise/167.wav differ diff --git a/assets/noise/168.wav b/assets/noise/168.wav new file mode 100644 index 0000000000000000000000000000000000000000..9264b1df7d32b4883310098640651bd152d3ea2d Binary files /dev/null and b/assets/noise/168.wav differ diff --git a/assets/noise/169.wav b/assets/noise/169.wav new file mode 100644 index 0000000000000000000000000000000000000000..08aa3e22f03ffabd42c782632e4245928af40159 Binary files /dev/null and b/assets/noise/169.wav differ diff --git a/assets/noise/17.wav b/assets/noise/17.wav new file mode 100644 index 0000000000000000000000000000000000000000..eac6a68d5a201902c0205d014324cbb4d9954075 Binary files /dev/null and b/assets/noise/17.wav differ diff --git a/assets/noise/170.wav b/assets/noise/170.wav new file mode 100644 index 0000000000000000000000000000000000000000..a5b3b94f8aa34e4ba42ea021ec99e3713aa88001 Binary files /dev/null and b/assets/noise/170.wav differ diff --git a/assets/noise/171.wav b/assets/noise/171.wav new file mode 100644 index 0000000000000000000000000000000000000000..7803b248e6c49b43d329b1d2cc1503d19bb6e072 Binary files /dev/null and b/assets/noise/171.wav differ diff --git a/assets/noise/172.wav b/assets/noise/172.wav new file mode 100644 index 0000000000000000000000000000000000000000..d85cb4642db7c4a29cee26642057cddf0b09ced3 Binary files /dev/null and b/assets/noise/172.wav differ diff --git a/assets/noise/173.wav b/assets/noise/173.wav new file mode 100644 index 0000000000000000000000000000000000000000..4ad7cea2ddc89a0b1701936a8517c9565f0899b9 Binary files /dev/null and b/assets/noise/173.wav differ diff --git a/assets/noise/174.wav b/assets/noise/174.wav new file mode 100644 index 0000000000000000000000000000000000000000..6b239ee303660c49bd23df82650c56b295204707 Binary files /dev/null and b/assets/noise/174.wav differ diff --git a/assets/noise/175.wav b/assets/noise/175.wav new file mode 100644 index 0000000000000000000000000000000000000000..b7d6253d57c8320a523a59c86b05a11b346e390a Binary files /dev/null and b/assets/noise/175.wav differ diff --git a/assets/noise/176.wav b/assets/noise/176.wav new file mode 100644 index 0000000000000000000000000000000000000000..8163bf0b216b312daa681fd9b325393c8676622d Binary files /dev/null and b/assets/noise/176.wav differ diff --git a/assets/noise/177.wav b/assets/noise/177.wav new file mode 100644 index 0000000000000000000000000000000000000000..21f9240233a094b1e8797585c17a37afb51ec58d Binary files /dev/null and b/assets/noise/177.wav differ diff --git a/assets/noise/178.wav b/assets/noise/178.wav new file mode 100644 index 0000000000000000000000000000000000000000..2626cf3a1fa99a3dc23b80599e6980dbe51edbcc Binary files /dev/null and b/assets/noise/178.wav differ diff --git a/assets/noise/179.wav b/assets/noise/179.wav new file mode 100644 index 0000000000000000000000000000000000000000..bf6b0a2e8342ccfd9d8b3878a576d547df6c4b50 Binary files /dev/null and b/assets/noise/179.wav differ diff --git a/assets/noise/18.wav b/assets/noise/18.wav new file mode 100644 index 0000000000000000000000000000000000000000..7c20fd43564ae569f8ec72cba96dcd88d0ad231a Binary files /dev/null and b/assets/noise/18.wav differ diff --git a/assets/noise/180.wav b/assets/noise/180.wav new file mode 100644 index 0000000000000000000000000000000000000000..c8f2fc5206e65b25c38e29a79c792ff73cbcb045 Binary files /dev/null and b/assets/noise/180.wav differ diff --git a/assets/noise/181.wav b/assets/noise/181.wav new file mode 100644 index 0000000000000000000000000000000000000000..8b860fcd66afd86e25618e298d689cedaed54cd3 Binary files /dev/null and b/assets/noise/181.wav differ diff --git a/assets/pretrained/003b_checkpoint_03000000.pt b/assets/pretrained/003b_checkpoint_03000000.pt new file mode 100644 index 0000000000000000000000000000000000000000..409e347bdf3acced95b1a86318c453508f261a77 --- /dev/null +++ b/assets/pretrained/003b_checkpoint_03000000.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48b250b90b482d7510e7f2c1148ccb186160a3f9a1b6289d3c53779cb217cf64 +size 26504680 diff --git a/assets/pretrained/008_1_checkpoint_00300000.pt b/assets/pretrained/008_1_checkpoint_00300000.pt new file mode 100644 index 0000000000000000000000000000000000000000..8ad61f1acc7098c7ad69c91651e7d6558864b2c6 --- /dev/null +++ b/assets/pretrained/008_1_checkpoint_00300000.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32174239b2fa3411544a8d6015f970fd5de65b7b512864f6980cbfe6f47043a6 +size 6907000 diff --git a/assets/pretrained/040c_checkpoint_libritts_r_200_02300000.pt b/assets/pretrained/040c_checkpoint_libritts_r_200_02300000.pt new file mode 100644 index 0000000000000000000000000000000000000000..9067d4dda262f5ad7e98d99ff54af0420f32c831 --- /dev/null +++ b/assets/pretrained/040c_checkpoint_libritts_r_200_02300000.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9a698c3e87a3a8ad81f676834f6b9e4e7f8e69ab5633b2aa5a638ec45aac42d +size 183806696 diff --git a/assets/test/common_voice_ja_38833628_16k.wav b/assets/test/common_voice_ja_38833628_16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..cf35daa5fefa113d2e45e96ed0d4f1e967fca813 Binary files /dev/null and b/assets/test/common_voice_ja_38833628_16k.wav differ diff --git a/assets/test/common_voice_ja_38843402_16k.wav b/assets/test/common_voice_ja_38843402_16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..6190ff251663a4ca455a6be6c67bbf88842168e0 Binary files /dev/null and b/assets/test/common_voice_ja_38843402_16k.wav differ diff --git a/assets/test/common_voice_ja_38852485_16k.wav b/assets/test/common_voice_ja_38852485_16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..598804671a1b90afb05bdd670e0d83c531d9b0ca Binary files /dev/null and b/assets/test/common_voice_ja_38852485_16k.wav differ diff --git a/assets/test/common_voice_ja_38853932_16k.wav b/assets/test/common_voice_ja_38853932_16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..d17e376a9582a6126733c17e607fffb445a20df1 Binary files /dev/null and b/assets/test/common_voice_ja_38853932_16k.wav differ diff --git a/assets/test/common_voice_ja_38864552_16k.wav b/assets/test/common_voice_ja_38864552_16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..f28604914ff276138e8ce97affed8853a5c841ee Binary files /dev/null and b/assets/test/common_voice_ja_38864552_16k.wav differ diff --git a/assets/test/common_voice_ja_38878413_16k.wav b/assets/test/common_voice_ja_38878413_16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..55e7225ead00766e3e61d9b5ad6690df7d4c8aa6 Binary files /dev/null and b/assets/test/common_voice_ja_38878413_16k.wav differ diff --git a/assets/test/common_voice_ja_38898180_16k.wav b/assets/test/common_voice_ja_38898180_16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..a91c25dd476b17e3ca16ab0b78b3616311144724 Binary files /dev/null and b/assets/test/common_voice_ja_38898180_16k.wav differ diff --git a/assets/test/common_voice_ja_38925334_16k.wav b/assets/test/common_voice_ja_38925334_16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..11e6d0cb5cb7b4d777027cd13112235eb9407848 Binary files /dev/null and b/assets/test/common_voice_ja_38925334_16k.wav differ diff --git a/beatrice_trainer/__init__.py b/beatrice_trainer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79079ab7abcc6b005780047d1580727377856806 --- /dev/null +++ b/beatrice_trainer/__init__.py @@ -0,0 +1 @@ +from .__main__ import * diff --git a/beatrice_trainer/__main__.py b/beatrice_trainer/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..06c562999e9899977e8cfe23f9f93b356ef653a6 --- /dev/null +++ b/beatrice_trainer/__main__.py @@ -0,0 +1,2931 @@ +# %% [markdown] +# ## Settings + +# %% +import argparse +import gc +import json +import math +import os +import shutil +import warnings +from collections import defaultdict +from copy import deepcopy +from fractions import Fraction +from functools import partial +from pathlib import Path +from pprint import pprint +from random import Random +from typing import BinaryIO, Literal, Optional, Union + +import numpy as np +import pyworld +import torch +import torch.nn as nn +import torchaudio +from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm, weight_norm +from torch.utils.tensorboard import SummaryWriter +from tqdm.auto import tqdm + +assert "soundfile" in torchaudio.list_audio_backends() + + +# モジュールのバージョンではない +PARAPHERNALIA_VERSION = "2.0.0-alpha.2" + + +def is_notebook() -> bool: + return "get_ipython" in globals() + + +def repo_root() -> Path: + d = Path.cwd() / "dummy" if is_notebook() else Path(__file__) + assert d.is_absolute(), d + for d in d.parents: + if (d / ".git").is_dir(): + return d + raise RuntimeError("Repository root is not found.") + + +# ハイパーパラメータ +# 学習データや出力ディレクトリなど、学習ごとに変わるようなものはここに含めない +dict_default_hparams = { + # train + "learning_rate": 1e-4, + "min_learning_rate": 5e-6, + "adam_betas": [0.8, 0.99], + "adam_eps": 1e-6, + "batch_size": 8, + "grad_weight_mel": 1.0, # grad_weight は比が同じなら同じ意味になるはず + "grad_weight_adv": 1.0, + "grad_weight_fm": 1.0, + "grad_balancer_ema_decay": 0.995, + "use_amp": True, + "num_workers": 16, + "n_steps": 20000, + "warmup_steps": 10000, + "in_sample_rate": 16000, # 変更不可 + "out_sample_rate": 24000, # 変更不可 + "wav_length": 4 * 24000, # 4s + "segment_length": 100, # 1s + # data + "phone_extractor_file": "assets/pretrained/003b_checkpoint_03000000.pt", + "pitch_estimator_file": "assets/pretrained/008_1_checkpoint_00300000.pt", + "in_ir_wav_dir": "assets/ir", + "in_noise_wav_dir": "assets/noise", + "in_test_wav_dir": "assets/test", + "pretrained_file": "assets/pretrained/040c_checkpoint_libritts_r_200_02300000.pt", # None も可 + # model + "hidden_channels": 256, # ファインチューン時変更不可、変更した場合は推論側の対応必要 + "san": False, # ファインチューン時変更不可 +} + +if __name__ == "__main__": + # スクリプト内部のデフォルト設定と assets/default_config.json が同期されているか確認 + default_config_file = repo_root() / "assets/default_config.json" + if default_config_file.is_file(): + with open(default_config_file, encoding="utf-8") as f: + default_config: dict = json.load(f) + for key, value in dict_default_hparams.items(): + if key not in default_config: + warnings.warn(f"{key} not found in default_config.json.") + else: + if value != default_config[key]: + warnings.warn( + f"{key} differs between default_config.json ({default_config[key]}) and internal default hparams ({value})." + ) + del default_config[key] + for key in default_config: + warnings.warn(f"{key} found in default_config.json is unknown.") + else: + warnings.warn("dafualt_config.json not found.") + + +def prepare_training_configs_for_experiment() -> tuple[dict, Path, Path, bool]: + import ipynbname + from IPython import get_ipython + + h = deepcopy(dict_default_hparams) + in_wav_dataset_dir = repo_root() / "../../data/processed/libritts_r_200" + try: + notebook_name = ipynbname.name() + except FileNotFoundError: + notebook_name = Path(get_ipython().user_ns["__vsc_ipynb_file__"]).name + out_dir = repo_root() / "notebooks" / notebook_name.split(".")[0].split("_")[0] + resume = False + return h, in_wav_dataset_dir, out_dir, resume + + +def prepare_training_configs() -> tuple[dict, Path, Path, bool]: + # data_dir, out_dir は config ファイルでもコマンドライン引数でも指定でき、 + # コマンドライン引数が優先される。 + # 各種ファイルパスを相対パスで指定した場合、config ファイルでは + # リポジトリルートからの相対パスとなるが、コマンドライン引数では + # カレントディレクトリからの相対パスとなる。 + + parser = argparse.ArgumentParser() + # fmt: off + parser.add_argument("-d", "--data_dir", type=Path, help="directory containing the training data") + parser.add_argument("-o", "--out_dir", type=Path, help="output directory") + parser.add_argument("-r", "--resume", action="store_true", help="resume training") + parser.add_argument("-c", "--config", type=Path, help="path to the config file") + # fmt: on + args = parser.parse_args() + + # config + if args.config is None: + h = deepcopy(dict_default_hparams) + else: + with open(args.config, encoding="utf-8") as f: + h = json.load(f) + for key in dict_default_hparams.keys(): + if key not in h: + h[key] = dict_default_hparams[key] + warnings.warn( + f"{key} is not specified in the config file. Using the default value." + ) + # data_dir + if args.data_dir is not None: + in_wav_dataset_dir = args.data_dir + elif "data_dir" in h: + in_wav_dataset_dir = repo_root() / Path(h["data_dir"]) + del h["data_dir"] + else: + raise ValueError( + "data_dir must be specified. " + "For example `python3 beatrice_trainer -d my_training_data_dir -o my_output_dir`." + ) + # out_dir + if args.out_dir is not None: + out_dir = args.out_dir + elif "out_dir" in h: + out_dir = repo_root() / Path(h["out_dir"]) + del h["out_dir"] + else: + raise ValueError( + "out_dir must be specified. " + "For example `python3 beatrice_trainer -d my_training_data_dir -o my_output_dir`." + ) + for key in list(h.keys()): + if key not in dict_default_hparams: + warnings.warn(f"`{key}` specified in the config file will be ignored.") + del h[key] + # resume + resume = args.resume + return h, in_wav_dataset_dir, out_dir, resume + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.__dict__ = self + + +# %% [markdown] +# ## Phone Extractor + + +# %% +def dump_params(params: torch.Tensor, f: BinaryIO): + if params is None: + return + if params.dtype == torch.bfloat16: + f.write( + params.detach() + .clone() + .float() + .view(torch.short) + .numpy() + .ravel()[1::2] + .tobytes() + ) + else: + f.write(params.detach().numpy().ravel().tobytes()) + f.flush() + + +def dump_layer(layer: nn.Module, f: BinaryIO): + dump = partial(dump_params, f=f) + if hasattr(layer, "dump"): + layer.dump(f) + elif isinstance(layer, (nn.Linear, nn.Conv1d, nn.LayerNorm)): + dump(layer.weight) + dump(layer.bias) + elif isinstance(layer, nn.ConvTranspose1d): + dump(layer.weight.transpose(0, 1)) + dump(layer.bias) + elif isinstance(layer, nn.GRU): + dump(layer.weight_ih_l0) + dump(layer.bias_ih_l0) + dump(layer.weight_hh_l0) + dump(layer.bias_hh_l0) + for i in range(1, 99999): + if not hasattr(layer, f"weight_ih_l{i}"): + break + dump(getattr(layer, f"weight_ih_l{i}")) + dump(getattr(layer, f"bias_ih_l{i}")) + dump(getattr(layer, f"weight_hh_l{i}")) + dump(getattr(layer, f"bias_hh_l{i}")) + elif isinstance(layer, nn.Embedding): + dump(layer.weight) + elif isinstance(layer, nn.ModuleList): + for l in layer: + dump_layer(l, f) + else: + assert False, layer + + +class CausalConv1d(nn.Conv1d): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + groups: int = 1, + bias: bool = True, + delay: int = 0, + ): + padding = (kernel_size - 1) * dilation - delay + self.trim = (kernel_size - 1) * dilation - 2 * delay + if self.trim < 0: + raise ValueError + super().__init__( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + result = super().forward(input) + if self.trim == 0: + return result + else: + return result[:, :, : -self.trim] + + +class ConvNeXtBlock(nn.Module): + def __init__( + self, + channels: int, + intermediate_channels: int, + layer_scale_init_value: float, + kernel_size: int = 7, + use_weight_norm: bool = False, + ): + super().__init__() + self.use_weight_norm = use_weight_norm + self.dwconv = CausalConv1d( + channels, channels, kernel_size=kernel_size, groups=channels + ) + self.norm = nn.LayerNorm(channels) + self.pwconv1 = nn.Linear(channels, intermediate_channels) + self.pwconv2 = nn.Linear(intermediate_channels, channels) + self.gamma = nn.Parameter(torch.full((channels,), layer_scale_init_value)) + if use_weight_norm: + self.norm = nn.Identity() + self.dwconv = weight_norm(self.dwconv) + self.pwconv1 = weight_norm(self.pwconv1) + self.pwconv2 = weight_norm(self.pwconv2) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + identity = x + x = self.dwconv(x) + x = x.transpose(1, 2) + x = self.norm(x) + x = self.pwconv1(x) + x = F.gelu(x, approximate="tanh") + x = self.pwconv2(x) + x *= self.gamma + x = x.transpose(1, 2) + x += identity + return x + + def remove_weight_norm(self): + if self.use_weight_norm: + remove_weight_norm(self.dwconv) + remove_weight_norm(self.pwconv1) + remove_weight_norm(self.pwconv2) + + def merge_weights(self): + if not self.use_weight_norm: + self.pwconv1.bias.data += ( + self.norm.bias.data[None, :] * self.pwconv1.weight.data + ).sum(1) + self.pwconv1.weight.data *= self.norm.weight.data[None, :] + self.norm.bias.data[:] = 0.0 + self.norm.weight.data[:] = 1.0 + self.pwconv2.weight.data *= self.gamma.data[:, None] + self.pwconv2.bias.data *= self.gamma.data + self.gamma.data[:] = 1.0 + + def dump(self, f: Union[BinaryIO, str, bytes, os.PathLike]): + if isinstance(f, (str, bytes, os.PathLike)): + with open(f, "wb") as f: + self.dump(f) + return + if not hasattr(f, "write"): + raise TypeError + + dump_layer(self.dwconv, f) + dump_layer(self.pwconv1, f) + dump_layer(self.pwconv2, f) + + +class ConvNeXtStack(nn.Module): + def __init__( + self, + in_channels: int, + channels: int, + intermediate_channels: int, + n_blocks: int, + delay: int, + embed_kernel_size: int, + kernel_size: int, + use_weight_norm: bool = False, + ): + super().__init__() + assert delay * 2 + 1 <= embed_kernel_size + self.use_weight_norm = use_weight_norm + self.embed = CausalConv1d(in_channels, channels, embed_kernel_size, delay=delay) + self.norm = nn.LayerNorm(channels) + self.convnext = nn.ModuleList( + [ + ConvNeXtBlock( + channels=channels, + intermediate_channels=intermediate_channels, + layer_scale_init_value=1.0 / n_blocks, + kernel_size=kernel_size, + use_weight_norm=use_weight_norm, + ) + for _ in range(n_blocks) + ] + ) + self.final_layer_norm = nn.LayerNorm(channels) + if use_weight_norm: + self.embed = weight_norm(self.embed) + self.norm = nn.Identity() + self.final_layer_norm = nn.Identity() + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, (nn.Conv1d, nn.Linear)): + nn.init.trunc_normal_(m.weight, std=0.02) + nn.init.constant_(m.bias, 0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.embed(x) + x = self.norm(x.transpose(1, 2)).transpose(1, 2) + for conv_block in self.convnext: + x = conv_block(x) + x = self.final_layer_norm(x.transpose(1, 2)).transpose(1, 2) + return x + + def remove_weight_norm(self): + if self.use_weight_norm: + remove_weight_norm(self.embed) + for conv_block in self.convnext: + conv_block.remove_weight_norm() + + def merge_weights(self): + for conv_block in self.convnext: + conv_block.merge_weights() + + def dump(self, f: Union[BinaryIO, str, bytes, os.PathLike]): + if isinstance(f, (str, bytes, os.PathLike)): + with open(f, "wb") as f: + self.dump(f) + return + if not hasattr(f, "write"): + raise TypeError + + dump_layer(self.embed, f) + if not self.use_weight_norm: + dump_layer(self.norm, f) + dump_layer(self.convnext, f) + if not self.use_weight_norm: + dump_layer(self.final_layer_norm, f) + + +class FeatureExtractor(nn.Module): + def __init__(self, hidden_channels: int): + super().__init__() + # fmt: off + self.conv0 = weight_norm(nn.Conv1d(1, hidden_channels // 8, 10, 5, bias=False)) + self.conv1 = weight_norm(nn.Conv1d(hidden_channels // 8, hidden_channels // 4, 3, 2, bias=False)) + self.conv2 = weight_norm(nn.Conv1d(hidden_channels // 4, hidden_channels // 2, 3, 2, bias=False)) + self.conv3 = weight_norm(nn.Conv1d(hidden_channels // 2, hidden_channels, 3, 2, bias=False)) + self.conv4 = weight_norm(nn.Conv1d(hidden_channels, hidden_channels, 3, 2, bias=False)) + self.conv5 = weight_norm(nn.Conv1d(hidden_channels, hidden_channels, 2, 2, bias=False)) + # fmt: on + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: [batch_size, 1, wav_length] + wav_length = x.size(2) + if wav_length % 160 != 0: + warnings.warn("wav_length % 160 != 0") + x = F.pad(x, (40, 40)) + x = F.gelu(self.conv0(x), approximate="tanh") + x = F.gelu(self.conv1(x), approximate="tanh") + x = F.gelu(self.conv2(x), approximate="tanh") + x = F.gelu(self.conv3(x), approximate="tanh") + x = F.gelu(self.conv4(x), approximate="tanh") + x = F.gelu(self.conv5(x), approximate="tanh") + # [batch_size, hidden_channels, wav_length / 160] + return x + + def remove_weight_norm(self): + remove_weight_norm(self.conv0) + remove_weight_norm(self.conv1) + remove_weight_norm(self.conv2) + remove_weight_norm(self.conv3) + remove_weight_norm(self.conv4) + remove_weight_norm(self.conv5) + + def dump(self, f: Union[BinaryIO, str, bytes, os.PathLike]): + if isinstance(f, (str, bytes, os.PathLike)): + with open(f, "wb") as f: + self.dump(f) + return + if not hasattr(f, "write"): + raise TypeError + + dump_layer(self.conv0, f) + dump_layer(self.conv1, f) + dump_layer(self.conv2, f) + dump_layer(self.conv3, f) + dump_layer(self.conv4, f) + dump_layer(self.conv5, f) + + +class FeatureProjection(nn.Module): + def __init__(self, in_channels: int, out_channels: int): + super().__init__() + self.norm = nn.LayerNorm(in_channels) + self.projection = nn.Conv1d(in_channels, out_channels, 1) + self.dropout = nn.Dropout(0.1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # [batch_size, channels, length] + x = self.norm(x.transpose(1, 2)).transpose(1, 2) + x = self.projection(x) + x = self.dropout(x) + return x + + def merge_weights(self): + self.projection.bias.data += ( + (self.norm.bias.data[None, :, None] * self.projection.weight.data) + .sum(1) + .squeeze(1) + ) + self.projection.weight.data *= self.norm.weight.data[None, :, None] + self.norm.bias.data[:] = 0.0 + self.norm.weight.data[:] = 1.0 + + def dump(self, f: Union[BinaryIO, str, bytes, os.PathLike]): + if isinstance(f, (str, bytes, os.PathLike)): + with open(f, "wb") as f: + self.dump(f) + return + if not hasattr(f, "write"): + raise TypeError + + dump_layer(self.projection, f) + + +class PhoneExtractor(nn.Module): + def __init__( + self, + phone_channels: int = 256, + hidden_channels: int = 256, + backbone_embed_kernel_size: int = 7, + kernel_size: int = 17, + n_blocks: int = 8, + ): + super().__init__() + self.feature_extractor = FeatureExtractor(hidden_channels) + self.feature_projection = FeatureProjection(hidden_channels, hidden_channels) + self.n_speaker_encoder_layers = 3 + self.speaker_encoder = nn.GRU( + hidden_channels, + hidden_channels, + self.n_speaker_encoder_layers, + batch_first=True, + ) + for i in range(self.n_speaker_encoder_layers): + for input_char in "ih": + self.speaker_encoder = weight_norm( + self.speaker_encoder, f"weight_{input_char}h_l{i}" + ) + self.backbone = ConvNeXtStack( + in_channels=hidden_channels, + channels=hidden_channels, + intermediate_channels=hidden_channels * 3, + n_blocks=n_blocks, + delay=0, + embed_kernel_size=backbone_embed_kernel_size, + kernel_size=kernel_size, + ) + self.head = weight_norm(nn.Conv1d(hidden_channels, phone_channels, 1)) + + def forward( + self, x: torch.Tensor, return_stats: bool = True + ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, float]]]: + # x: [batch_size, 1, wav_length] + + stats = {} + + # [batch_size, 1, wav_length] -> [batch_size, feature_extractor_hidden_channels, length] + x = self.feature_extractor(x) + if return_stats: + stats["feature_norm"] = x.detach().norm(dim=1).mean() + # [batch_size, feature_extractor_hidden_channels, length] -> [batch_size, hidden_channels, length] + x = self.feature_projection(x) + # [batch_size, hidden_channels, length] -> [batch_size, length, hidden_channels] + g, _ = self.speaker_encoder(x.transpose(1, 2)) + if self.training: + batch_size, length, _ = g.size() + shuffle_sizes_for_each_data = torch.randint( + 0, 50, (batch_size,), device=g.device + ) + max_indices = torch.arange(length, device=g.device)[None, :, None] + min_indices = ( + max_indices - shuffle_sizes_for_each_data[:, None, None] + ).clamp_(min=0) + with torch.cuda.amp.autocast(False): + indices = ( + torch.rand(g.size(), device=g.device) + * (max_indices - min_indices + 1) + ).long() + min_indices + assert indices.min() >= 0, indices.min() + assert indices.max() < length, (indices.max(), length) + g = g.gather(1, indices) + + # [batch_size, length, hidden_channels] -> [batch_size, hidden_channels, length] + g = g.transpose(1, 2).contiguous() + # [batch_size, hidden_channels, length] + x = self.backbone(x + g) + # [batch_size, hidden_channels, length] -> [batch_size, phone_channels, length] + phone = self.head(F.gelu(x, approximate="tanh")) + + results = [phone] + if return_stats: + stats["code_norm"] = phone.detach().norm(dim=1).mean().item() + results.append(stats) + + if len(results) == 1: + return results[0] + return tuple(results) + + @torch.inference_mode() + def units(self, x: torch.Tensor) -> torch.Tensor: + # x: [batch_size, 1, wav_length] + + # [batch_size, 1, wav_length] -> [batch_size, phone_channels, length] + phone = self.forward(x, return_stats=False) + # [batch_size, phone_channels, length] -> [batch_size, length, phone_channels] + phone = phone.transpose(1, 2) + # [batch_size, length, phone_channels] + return phone + + def remove_weight_norm(self): + self.feature_extractor.remove_weight_norm() + for i in range(self.n_speaker_encoder_layers): + for input_char in "ih": + remove_weight_norm(self.speaker_encoder, f"weight_{input_char}h_l{i}") + remove_weight_norm(self.head) + + def merge_weights(self): + self.feature_projection.merge_weights() + self.backbone.merge_weights() + + def dump(self, f: Union[BinaryIO, str, bytes, os.PathLike]): + if isinstance(f, (str, bytes, os.PathLike)): + with open(f, "wb") as f: + self.dump(f) + return + if not hasattr(f, "write"): + raise TypeError + + dump_layer(self.feature_extractor, f) + dump_layer(self.feature_projection, f) + dump_layer(self.speaker_encoder, f) + dump_layer(self.backbone, f) + dump_layer(self.head, f) + + +# %% [markdown] +# ## Pitch Estimator + + +# %% +def extract_pitch_features( + y: torch.Tensor, # [..., wav_length] + hop_length: int = 160, # 10ms + win_length: int = 560, # 35ms + max_corr_period: int = 256, # 16ms, 62.5Hz (16000 / 256) + corr_win_length: int = 304, # 19ms + instfreq_features_cutoff_bin: int = 64, # 1828Hz (16000 * 64 / 560) +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + assert max_corr_period + corr_win_length == win_length + + # パディングする + padding_length = (win_length - hop_length) // 2 + y = F.pad(y, (padding_length, padding_length)) + + # フレームにする + # [..., win_length, n_frames] + y_frames = y.unfold(-1, win_length, hop_length).transpose_(-2, -1) + + # 複素スペクトログラム + # Complex[..., (win_length // 2 + 1), n_frames] + spec: torch.Tensor = torch.fft.rfft(y_frames, n=win_length, dim=-2) + + # Complex[..., instfreq_features_cutoff_bin, n_frames] + spec = spec[..., :instfreq_features_cutoff_bin, :] + + # 対数パワースペクトログラム + log_power_spec = spec.abs().add_(1e-5).log10_() + + # 瞬時位相の時間差分 + # 時刻 0 の値は 0 + delta_spec = spec[..., :, 1:] * spec[..., :, :-1].conj() + delta_spec /= delta_spec.abs().add_(1e-5) + delta_spec = torch.cat( + [torch.zeros_like(delta_spec[..., :, :1]), delta_spec], dim=-1 + ) + + # [..., instfreq_features_cutoff_bin * 3, n_frames] + instfreq_features = torch.cat( + [log_power_spec, delta_spec.real, delta_spec.imag], dim=-2 + ) + + # 自己相関 + # 余裕があったら LPC 残差にするのも試したい + # 元々これに 2.0 / corr_win_length を掛けて使おうと思っていたが、 + # この値は振幅の 2 乗に比例していて、NN に入力するために良い感じに分散を + # 標準化する方法が思いつかなかったのでやめた + flipped_y_frames = y_frames.flip((-2,)) + a = torch.fft.rfft(flipped_y_frames, n=win_length, dim=-2) + b = torch.fft.rfft(y_frames[..., -corr_win_length:, :], n=win_length, dim=-2) + # [..., max_corr_period, n_frames] + corr = torch.fft.irfft(a * b, n=win_length, dim=-2)[..., corr_win_length:, :] + + # エネルギー項 + energy = flipped_y_frames.square_().cumsum_(-2) + energy0 = energy[..., corr_win_length - 1 : corr_win_length, :] + energy = energy[..., corr_win_length:, :] - energy[..., :-corr_win_length, :] + + # Difference function + corr_diff = (energy0 + energy).sub_(corr.mul_(2.0)) + assert corr_diff.min() >= -1e-3, corr_diff.min() + corr_diff.clamp_(min=0.0) # 計算誤差対策 + + # 標準化 + corr_diff *= 2.0 / corr_win_length + corr_diff.sqrt_() + + # 変換モデルへの入力用のエネルギー + energy = ( + y_frames.mul_( + torch.signal.windows.cosine(win_length, device=y.device)[..., None] + ) + .square_() + .sum(-2, keepdim=True) + ) + + energy.clamp_(min=1e-3).log10_() # >= -3, 振幅 1 の正弦波なら大体 2.15 + energy *= 0.5 # >= -1.5, 振幅 1 の正弦波なら大体 1.07, 1 の差は振幅で 20dB の差 + + return ( + instfreq_features, # [..., instfreq_features_cutoff_bin * 3, n_frames] + corr_diff, # [..., max_corr_period, n_frames] + energy, # [..., 1, n_frames] + ) + + +class PitchEstimator(nn.Module): + def __init__( + self, + input_instfreq_channels: int = 192, + input_corr_channels: int = 256, + pitch_channels: int = 384, + channels: int = 192, + intermediate_channels: int = 192 * 3, + n_blocks: int = 6, + delay: int = 1, # 10ms, 特徴抽出と合わせると 22.5ms + embed_kernel_size: int = 3, + kernel_size: int = 33, + bins_per_octave: int = 96, + ): + super().__init__() + self.bins_per_octave = bins_per_octave + + self.instfreq_embed_0 = nn.Conv1d(input_instfreq_channels, channels, 1) + self.instfreq_embed_1 = nn.Conv1d(channels, channels, 1) + self.corr_embed_0 = nn.Conv1d(input_corr_channels, channels, 1) + self.corr_embed_1 = nn.Conv1d(channels, channels, 1) + self.backbone = ConvNeXtStack( + channels, + channels, + intermediate_channels, + n_blocks, + delay, + embed_kernel_size, + kernel_size, + ) + self.head = nn.Conv1d(channels, pitch_channels, 1) + + def forward(self, wav: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + # wav: [batch_size, 1, wav_length] + + # [batch_size, input_instfreq_channels, length], + # [batch_size, input_corr_channels, length] + with torch.cuda.amp.autocast(False): + instfreq_features, corr_diff, energy = extract_pitch_features( + wav.squeeze(1), + hop_length=160, + win_length=560, + max_corr_period=256, + corr_win_length=304, + instfreq_features_cutoff_bin=64, + ) + instfreq_features = F.gelu( + self.instfreq_embed_0(instfreq_features), approximate="tanh" + ) + instfreq_features = self.instfreq_embed_1(instfreq_features) + corr_diff = F.gelu(self.corr_embed_0(corr_diff), approximate="tanh") + corr_diff = self.corr_embed_1(corr_diff) + # [batch_size, channels, length] + x = instfreq_features + corr_diff # ここ活性化関数忘れてる + x = self.backbone(x) + # [batch_size, pitch_channels, length] + x = self.head(x) + return x, energy + + def sample_pitch( + self, pitch: torch.Tensor, band_width: int = 48, return_features: bool = False + ) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]: + # pitch: [batch_size, pitch_channels, length] + # 返されるピッチの値には 0 は含まれない + batch_size, pitch_channels, length = pitch.size() + pitch = pitch.softmax(1) + if return_features: + unvoiced_proba = pitch[:, :1, :].clone() + pitch[:, 0, :] = -100.0 + pitch = ( + pitch.transpose(1, 2) + .contiguous() + .view(batch_size * length, 1, pitch_channels) + ) + band_pitch = F.conv1d( + pitch, + torch.ones((1, 1, 1), device=pitch.device).expand(1, 1, band_width), + ) + # [batch_size * length, 1, pitch_channels - band_width + 1] -> Long[batch_size * length, 1] + quantized_band_pitch = band_pitch.argmax(2) + if return_features: + # [batch_size * length, 1] + band_proba = band_pitch.gather(2, quantized_band_pitch[:, :, None]) + # [batch_size * length, 1] + half_pitch_band_proba = band_pitch.gather( + 2, + (quantized_band_pitch - self.bins_per_octave).clamp_(min=1)[:, :, None], + ) + half_pitch_band_proba[quantized_band_pitch <= self.bins_per_octave] = 0.0 + half_pitch_proba = (half_pitch_band_proba / (band_proba + 1e-6)).view( + batch_size, 1, length + ) + # [batch_size * length, 1] + double_pitch_band_proba = band_pitch.gather( + 2, + (quantized_band_pitch + self.bins_per_octave).clamp_( + max=pitch_channels - band_width + )[:, :, None], + ) + double_pitch_band_proba[ + quantized_band_pitch + > pitch_channels - band_width - self.bins_per_octave + ] = 0.0 + double_pitch_proba = (double_pitch_band_proba / (band_proba + 1e-6)).view( + batch_size, 1, length + ) + # Long[1, pitch_channels] + mask = torch.arange(pitch_channels, device=pitch.device)[None, :] + # bool[batch_size * length, pitch_channels] + mask = (quantized_band_pitch <= mask) & ( + mask < quantized_band_pitch + band_width + ) + # Long[batch_size, length] + quantized_pitch = (pitch.squeeze(1) * mask).argmax(1).view(batch_size, length) + + if return_features: + features = torch.cat( + [unvoiced_proba, half_pitch_proba, double_pitch_proba], dim=1 + ) + # Long[batch_size, length], [batch_size, 3, length] + return quantized_pitch, features + else: + return quantized_pitch + + def merge_weights(self): + self.backbone.merge_weights() + + def dump(self, f: Union[BinaryIO, str, bytes, os.PathLike]): + if isinstance(f, (str, bytes, os.PathLike)): + with open(f, "wb") as f: + self.dump(f) + return + if not hasattr(f, "write"): + raise TypeError + + dump_layer(self.instfreq_embed_0, f) + dump_layer(self.instfreq_embed_1, f) + dump_layer(self.corr_embed_0, f) + dump_layer(self.corr_embed_1, f) + dump_layer(self.backbone, f) + dump_layer(self.head, f) + + +# %% [markdown] +# ## Vocoder + + +# %% +def overlap_add( + ir: torch.Tensor, + pitch: torch.Tensor, + hop_length: int = 240, + delay: int = 0, +) -> torch.Tensor: + # print("ir, pitch: ", ir.dtype, pitch.dtype) + batch_size, ir_length, length = ir.size() + assert pitch.size() == (batch_size, length * hop_length) + assert 0 <= delay < ir_length, (delay, ir_length) + # 位相は [0, 1) で表す + normalized_freq = pitch / 24000.0 + # 初期位相をランダムに設定 + normalized_freq[:, 0] = torch.rand(batch_size, device=pitch.device) + with torch.cuda.amp.autocast(enabled=False): + phase = (normalized_freq.double().cumsum_(1) % 1.0).float() + # 重ねる箇所を求める + # [n_pitchmarks], [n_pitchmarks] + indices0, indices1 = torch.nonzero(phase[:, :-1] > phase[:, 1:], as_tuple=True) + # 重ねる箇所の小数部分 (位相の遅れ) を求める + numer = 1.0 - phase[indices0, indices1] + # [n_pitchmarks] + fractional_part = numer / (numer + phase[indices0, indices1 + 1]) + # 重ねる値を求める + # [n_pitchmarks, ir_length] + values = ir[indices0, :, indices1 // hop_length] + # 位相を遅らせる + # values が時間領域と仮定 + # Complex[n_pitchmarks, ir_length / 2 + 1] + values = torch.fft.rfft(values, n=ir_length, dim=1) + # 位相遅れの量 + # [n_pitchmarks, ir_length / 2 + 1] + delay_phase = ( + torch.arange(ir_length // 2 + 1, device=pitch.device, dtype=torch.float32)[ + None, : + ] + / -ir_length + * fractional_part[:, None] + ) + # Complex[n_pitchmarks, ir_length / 2 + 1] + delay_phase = torch.polar(torch.ones_like(delay_phase), delay_phase * math.tau) + # values *= delay_phase + values = values * delay_phase + # [n_pitchmarks, ir_length] + values = torch.fft.irfft(values, n=ir_length, dim=1) + + # 加算する値をサンプル単位にばらす + # [n_pitchmarks * ir_length] + values = values.ravel() + # Long[n_pitchmarks * ir_length] + indices0 = indices0[:, None].expand(-1, ir_length).ravel() + # Long[n_pitchmarks * ir_length] + indices1 = ( + indices1[:, None] + torch.arange(ir_length, device=pitch.device) + ).ravel() + + # overlap-add する + overlap_added_signal = torch.zeros( + (batch_size, length * hop_length + ir_length), device=pitch.device + ) + # print("overlap_added_signal, values: ", overlap_added_signal.dtype, values.dtype) + overlap_added_signal.index_put_((indices0, indices1), values, accumulate=True) + overlap_added_signal = overlap_added_signal[:, delay : -ir_length + delay] + + # sinc 重ねたものと ir を畳み込んだ方が FFT の回数減らせた気がする + return overlap_added_signal + + +def generate_noise(aperiodicity: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + # aperiodicity: [batch_size, hop_length, length] + batch_size, hop_length, length = aperiodicity.size() + excitation = torch.rand( + batch_size, (length + 1) * hop_length, device=aperiodicity.device + ) + excitation -= 0.5 + n_fft = 2 * hop_length + # 矩形窓で分析 + # Complex[batch_size, hop_length + 1, length] + noise = torch.stft( + excitation, + n_fft=n_fft, + hop_length=hop_length, + window=torch.ones(n_fft, device=excitation.device), + center=False, + return_complex=True, + ) + assert noise.size(2) == aperiodicity.size(2), ( + noise.size(), + aperiodicity.size(), + ) + noise[:, 0, :] = 0.0 + noise[:, 1:, :] *= aperiodicity + # ハン窓で合成 + # torch.istft は最適合成窓が使われるので使えないことに注意 + # [batch_size, 2 * hop_length, length] + noise = torch.fft.irfft(noise, n=2 * hop_length, dim=1) + noise *= torch.hann_window(2 * hop_length, device=noise.device)[None, :, None] + # [batch_size, (length + 1) * hop_length] + noise = F.fold( + noise, + (1, (length + 1) * hop_length), + (1, 2 * hop_length), + stride=(1, hop_length), + ).squeeze_((1, 2)) + noise = noise[:, hop_length // 2 : -hop_length // 2] + excitation = excitation[:, hop_length // 2 : -hop_length // 2] + return noise, excitation # [batch_size, length * hop_length] + + +class GradientEqualizerFunction(torch.autograd.Function): + """ノルムが小さいほど勾配が大きくなってしまうのを補正する""" + + @staticmethod + def forward(ctx, x: torch.Tensor) -> torch.Tensor: + # x: [batch_size, 1, length] + rms = x.square().mean(dim=2, keepdim=True).sqrt_() + ctx.save_for_backward(rms) + return x + + @staticmethod + def backward(ctx, dx: torch.Tensor) -> torch.Tensor: + # dx: [batch_size, 1, length] + (rms,) = ctx.saved_tensors + dx = dx * (math.sqrt(2.0) * rms + 0.1) + return dx + + +class PseudoDDSPVocoder(nn.Module): + def __init__( + self, + channels: int, + hop_length: int = 240, + n_pre_blocks: int = 4, + ): + super().__init__() + self.hop_length = hop_length + + self.prenet = ConvNeXtStack( + in_channels=channels, + channels=channels, + intermediate_channels=channels * 3, + n_blocks=n_pre_blocks, + delay=2, # 20ms 遅延 + embed_kernel_size=7, + kernel_size=33, + ) + self.ir_generator = ConvNeXtStack( + in_channels=channels, + channels=channels, + intermediate_channels=channels * 3, + n_blocks=2, + delay=0, + embed_kernel_size=3, + kernel_size=33, + use_weight_norm=True, + ) + self.ir_generator_post = weight_norm(nn.Conv1d(channels, 512, 1, bias=False)) + self.aperiodicity_generator = ConvNeXtStack( + in_channels=channels, + channels=channels, + intermediate_channels=channels * 3, + n_blocks=2, + delay=0, + embed_kernel_size=3, + kernel_size=33, + use_weight_norm=True, + ) + self.aperiodicity_generator_post = weight_norm( + nn.Conv1d(channels, hop_length, 1, bias=False) + ) + + def forward( + self, x: torch.Tensor, pitch: torch.Tensor + ) -> tuple[torch.Tensor, dict[str, torch.Tensor]]: + # x: [batch_size, channels, length] + # pitch: [batch_size, length] + + x = self.prenet(x) + ir = self.ir_generator(x) + ir = F.elu(ir, inplace=True) + # [batch_size, 512, length] + ir = self.ir_generator_post(ir) + + # 最近傍補間 + # [batch_size, length * hop_length] + pitch = torch.repeat_interleave(pitch, self.hop_length, dim=1) + + # [batch_size, length * hop_length] + periodic_signal = overlap_add(ir, pitch, self.hop_length, delay=120) + + aperiodicity = self.aperiodicity_generator(x) + aperiodicity = F.elu(aperiodicity, inplace=True) + # [batch_size, hop_length, length] + aperiodicity = self.aperiodicity_generator_post(aperiodicity) + # [batch_size, length * hop_length], [batch_size, length * hop_length] + aperiodic_signal, noise_excitation = generate_noise(aperiodicity) + + # [batch_size, 1, length * hop_length] + y_g_hat = (periodic_signal + aperiodic_signal)[:, None, :] + + y_g_hat = GradientEqualizerFunction.apply(y_g_hat) + + return y_g_hat, { + "periodic_signal": periodic_signal.detach(), + "aperiodic_signal": aperiodic_signal.detach(), + "noise_excitation": noise_excitation.detach(), + } + + def remove_weight_norm(self): + self.prenet.remove_weight_norm() + self.ir_generator.remove_weight_norm() + remove_weight_norm(self.ir_generator_post) + self.aperiodicity_generator.remove_weight_norm() + remove_weight_norm(self.aperiodicity_generator_post) + + def merge_weights(self): + self.prenet.merge_weights() + self.ir_generator.merge_weights() + self.aperiodicity_generator.merge_weights() + + def dump(self, f: Union[BinaryIO, str, bytes, os.PathLike]): + if isinstance(f, (str, bytes, os.PathLike)): + with open(f, "wb") as f: + self.dump(f) + return + if not hasattr(f, "write"): + raise TypeError + + dump_layer(self.prenet, f) + dump_layer(self.ir_generator, f) + dump_layer(self.ir_generator_post, f) + dump_layer(self.aperiodicity_generator, f) + dump_layer(self.aperiodicity_generator_post, f) + + +def slice_segments( + x: torch.Tensor, start_indices: torch.Tensor, segment_length: int +) -> torch.Tensor: + batch_size, channels, _ = x.size() + # [batch_size, 1, segment_size] + indices = start_indices[:, None, None] + torch.arange( + segment_length, device=start_indices.device + ) + # [batch_size, channels, segment_size] + indices = indices.expand(batch_size, channels, segment_length) + return x.gather(2, indices) + + +class ConverterNetwork(nn.Module): + def __init__( + self, + phone_extractor: PhoneExtractor, + pitch_estimator: PitchEstimator, + n_speakers: int, + hidden_channels: int, + ): + super().__init__() + self.frozen_modules = { + "phone_extractor": phone_extractor.eval().requires_grad_(False), + "pitch_estimator": pitch_estimator.eval().requires_grad_(False), + } + self.embed_phone = nn.Conv1d(256, hidden_channels, 1) + self.embed_quantized_pitch = nn.Embedding(384, hidden_channels) + phase = ( + torch.arange(384, dtype=torch.float)[:, None] + * ( + torch.arange(0, hidden_channels, 2, dtype=torch.float) + * (-math.log(10000.0) / hidden_channels) + ).exp_() + ) + self.embed_quantized_pitch.weight.data[:, 0::2] = phase.sin() + self.embed_quantized_pitch.weight.data[:, 1::2] = phase.cos_() + self.embed_quantized_pitch.weight.requires_grad_(False) + self.embed_pitch_features = nn.Conv1d(4, hidden_channels, 1) + self.embed_speaker = nn.Embedding(n_speakers, hidden_channels) + self.embed_formant_shift = nn.Embedding(9, hidden_channels) + self.vocoder = PseudoDDSPVocoder( + channels=hidden_channels, + hop_length=240, + n_pre_blocks=4, + ) + self.melspectrogram = torchaudio.transforms.MelSpectrogram( + sample_rate=24000, + n_fft=1024, + win_length=720, + hop_length=128, + n_mels=80, + power=2, # 不安定さの原因になっているかも + norm="slaney", + mel_scale="slaney", + ) + + def _get_resampler( + self, orig_freq, new_freq, device, cache={} + ) -> torchaudio.transforms.Resample: + key = orig_freq, new_freq + if key in cache: + return cache[key] + resampler = torchaudio.transforms.Resample(orig_freq, new_freq).to(device) + cache[key] = resampler + return resampler + + def forward( + self, + x: torch.Tensor, + target_speaker_id: torch.Tensor, + formant_shift_semitone: torch.Tensor, + pitch_shift_semitone: Optional[torch.Tensor] = None, + slice_start_indices: Optional[torch.Tensor] = None, + slice_segment_length: Optional[int] = None, + return_stats: bool = False, + ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, float]]]: + # x: [batch_size, 1, wav_length] + # target_speaker_id: Long[batch_size] + # formant_shift_semitone: [batch_size] + # pitch_shift_semitone: [batch_size] + # slice_start_indices: [batch_size] + + batch_size, _, _ = x.size() + + with torch.inference_mode(): + phone_extractor: PhoneExtractor = self.frozen_modules["phone_extractor"] + pitch_estimator: PitchEstimator = self.frozen_modules["pitch_estimator"] + # [batch_size, 1, wav_length] -> [batch_size, phone_channels, length] + phone = phone_extractor.units(x).transpose(1, 2) + # [batch_size, 1, wav_length] -> [batch_size, pitch_channels, length], [batch_size, 1, length] + pitch, energy = pitch_estimator(x) + # augmentation + if self.training: + # [batch_size, pitch_channels - 1] + weights = pitch.softmax(1)[:, 1:, :].mean(2) + # [batch_size] + mean_pitch = ( + weights * torch.arange(1, 384, device=weights.device) + ).sum(1) / weights.sum(1) + mean_pitch = mean_pitch.round_().long() + target_pitch = torch.randint_like(mean_pitch, 64, 257) + shift = target_pitch - mean_pitch + shift_ratio = ( + 2.0 ** (shift.float() / pitch_estimator.bins_per_octave) + ).tolist() + shift = [] + interval_length = 100 # 1s + interval_zeros = torch.zeros( + (1, 1, interval_length * 160), device=x.device + ) + concatenated_shifted_x = [] + offsets = [0] + for i in range(batch_size): + shift_ratio_i = shift_ratio[i] + shift_ratio_fraction_i = Fraction.from_float( + shift_ratio_i + ).limit_denominator(30) + shift_numer_i = shift_ratio_fraction_i.numerator + shift_denom_i = shift_ratio_fraction_i.denominator + shift_ratio_i = shift_numer_i / shift_denom_i + shift_i = int( + round( + math.log2(shift_ratio_i) * pitch_estimator.bins_per_octave + ) + ) + shift.append(shift_i) + shift_ratio[i] = shift_ratio_i + # [1, 1, wav_length / shift_ratio] + with torch.cuda.amp.autocast(False): + shifted_x_i = self._get_resampler( + shift_numer_i, shift_denom_i, x.device + )(x[i])[None] + if shifted_x_i.size(2) % 160 != 0: + shifted_x_i = F.pad( + shifted_x_i, + (0, 160 - shifted_x_i.size(2) % 160), + mode="reflect", + ) + assert shifted_x_i.size(2) % 160 == 0 + offsets.append( + offsets[-1] + interval_length + shifted_x_i.size(2) // 160 + ) + concatenated_shifted_x.extend([interval_zeros, shifted_x_i]) + if offsets[-1] % 256 != 0: + # 長さが同じ方が何かのキャッシュが効いて早くなるようなので + # 適当に 256 の倍数になるようにパディングして長さのパターン数を減らす + concatenated_shifted_x.append( + torch.zeros( + (1, 1, (256 - offsets[-1] % 256) * 160), device=x.device + ) + ) + # [batch_size, 1, sum(wav_length) + batch_size * 16000] + concatenated_shifted_x = torch.cat(concatenated_shifted_x, dim=2) + assert concatenated_shifted_x.size(2) % (256 * 160) == 0 + # [1, pitch_channels, length / shift_ratio], [1, 1, length / shift_ratio] + concatenated_pitch, concatenated_energy = pitch_estimator( + concatenated_shifted_x + ) + for i in range(batch_size): + shift_i = shift[i] + shift_ratio_i = shift_ratio[i] + left = offsets[i] + interval_length + right = offsets[i + 1] + pitch_i = concatenated_pitch[:, :, left:right] + energy_i = concatenated_energy[:, :, left:right] + pitch_i = F.interpolate( + pitch_i, + scale_factor=shift_ratio_i, + mode="linear", + align_corners=False, + ) + energy_i = F.interpolate( + energy_i, + scale_factor=shift_ratio_i, + mode="linear", + align_corners=False, + ) + assert pitch_i.size(2) == energy_i.size(2) + assert abs(pitch_i.size(2) - pitch.size(2)) <= 10 + length = min(pitch_i.size(2), pitch.size(2)) + + if shift_i > 0: + pitch[i : i + 1, :1, :length] = pitch_i[:, :1, :length] + pitch[i : i + 1, 1:-shift_i, :length] = pitch_i[ + :, 1 + shift_i :, :length + ] + pitch[i : i + 1, -shift_i:, :length] = -10.0 + elif shift_i < 0: + pitch[i : i + 1, :1, :length] = pitch_i[:, :1, :length] + pitch[i : i + 1, 1 : 1 - shift_i, :length] = -10.0 + pitch[i : i + 1, 1 - shift_i :, :length] = pitch_i[ + :, 1:shift_i, :length + ] + energy[i : i + 1, :, :length] = energy_i[:, :, :length] + + # [batch_size, pitch_channels, length] -> Long[batch_size, length], [batch_size, 3, length] + quantized_pitch, pitch_features = pitch_estimator.sample_pitch( + pitch, return_features=True + ) + if pitch_shift_semitone is not None: + quantized_pitch = torch.where( + quantized_pitch == 0, + quantized_pitch, + ( + quantized_pitch + + ( + pitch_shift_semitone[:, None] + * (pitch_estimator.bins_per_octave / 12) + ) + .round_() + .long() + ).clamp_(1, 383), + ) + pitch = 55.0 * 2.0 ** ( + quantized_pitch.float() / pitch_estimator.bins_per_octave + ) + # phone が 2.5ms 先読みしているのに対して、 + # energy は 12.5ms, pitch_features は 22.5ms 先読みしているので、 + # ずらして phone に合わせる + energy = F.pad(energy[:, :, :-1], (1, 0), mode="reflect") + quantized_pitch = F.pad(quantized_pitch[:, :-2], (2, 0), mode="reflect") + pitch_features = F.pad(pitch_features[:, :, :-2], (2, 0), mode="reflect") + # [batch_size, 1, length], [batch_size, 3, length] -> [batch_size, 4, length] + pitch_features = torch.cat([energy, pitch_features], dim=1) + formant_shift_indices = ( + ((formant_shift_semitone + 2.0) * 2.0).round_().long() + ) + + phone = phone.clone() + quantized_pitch = quantized_pitch.clone() + pitch_features = pitch_features.clone() + formant_shift_indices = formant_shift_indices.clone() + pitch = pitch.clone() + + # [batch_sise, hidden_channels, length] + x = ( + self.embed_phone(phone) + + self.embed_quantized_pitch(quantized_pitch).transpose(1, 2) + + self.embed_pitch_features(pitch_features) + + ( + self.embed_speaker(target_speaker_id)[:, :, None] + + self.embed_formant_shift(formant_shift_indices)[:, :, None] + ) + ) + if slice_start_indices is not None: + assert slice_segment_length is not None + # [batch_size, hidden_channels, length] -> [batch_size, hidden_channels, segment_length] + x = slice_segments(x, slice_start_indices, slice_segment_length) + x = F.silu(x, inplace=True) + # [batch_size, hidden_channels, segment_length] -> [batch_size, 1, segment_length * 240] + y_g_hat, stats = self.vocoder(x, pitch) + if return_stats: + return y_g_hat, stats + else: + return y_g_hat + + def _normalize_melsp(self, x): + return x.log().mul(0.5).clamp_(min=math.log(1e-5)) + + def forward_and_compute_loss( + self, + noisy_wavs_16k: torch.Tensor, + target_speaker_id: torch.Tensor, + formant_shift_semitone: torch.Tensor, + slice_start_indices: torch.Tensor, + slice_segment_length: int, + y_all: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + # noisy_wavs_16k: [batch_size, 1, wav_length] + # target_speaker_id: Long[batch_size] + # formant_shift_semitone: [batch_size] + # slice_start_indices: [batch_size] + # slice_segment_length: int + # y_all: [batch_size, 1, wav_length] + + # [batch_size, 1, wav_length] -> [batch_size, 1, wav_length * 240] + y_hat_all, stats = self( + noisy_wavs_16k, + target_speaker_id, + formant_shift_semitone, + return_stats=True, + ) + + with torch.cuda.amp.autocast(False): + melsp_periodic_signal = self.melspectrogram( + stats["periodic_signal"].float() + ) + melsp_aperiodic_signal = self.melspectrogram( + stats["aperiodic_signal"].float() + ) + melsp_noise_excitation = self.melspectrogram( + stats["noise_excitation"].float() + ) + # [1, n_mels, 1] + # 1/6 ... [-0.5, 0.5] の一様乱数の平均パワー + # 3/8 ... ハン窓をかけた時のパワー減衰 + # 0.5 ... 謎 + reference_melsp = self.melspectrogram.mel_scale( + torch.full( + (1, self.melspectrogram.n_fft // 2 + 1, 1), + (1 / 6) * (3 / 8) * 0.5 * self.melspectrogram.win_length, + device=noisy_wavs_16k.device, + ) + ) + aperiodic_ratio = melsp_aperiodic_signal / ( + melsp_periodic_signal + melsp_aperiodic_signal + 1e-5 + ) + compensation_ratio = reference_melsp / (melsp_noise_excitation + 1e-5) + + melsp_y_hat = self.melspectrogram(y_hat_all.float().squeeze(1)) + melsp_y_hat = melsp_y_hat * ( + (1.0 - aperiodic_ratio) + aperiodic_ratio * compensation_ratio + ) + + y_hat_mel = self._normalize_melsp(melsp_y_hat) + # [batch_size, 1, wav_length] -> [batch_size, 1, wav_length * 240] + y_hat = slice_segments( + y_hat_all, slice_start_indices * 240, slice_segment_length * 240 + ) + + y_mel = self._normalize_melsp(self.melspectrogram(y_all.squeeze(1))) + # [batch_size, 1, wav_length] -> [batch_size, 1, wav_length * 240] + y = slice_segments( + y_all, slice_start_indices * 240, slice_segment_length * 240 + ) + + loss_mel = F.l1_loss(y_hat_mel, y_mel) + + return y, y_hat, y_hat_all, loss_mel + + def remove_weight_norm(self): + self.vocoder.remove_weight_norm() + + def merge_weights(self): + self.vocoder.merge_weights() + + def dump(self, f: Union[BinaryIO, str, bytes, os.PathLike]): + if isinstance(f, (str, bytes, os.PathLike)): + with open(f, "wb") as f: + self.dump(f) + return + if not hasattr(f, "write"): + raise TypeError + + dump_layer(self.embed_phone, f) + dump_layer(self.embed_quantized_pitch, f) + dump_layer(self.embed_pitch_features, f) + dump_layer(self.vocoder, f) + + +# Discriminator + + +def _normalize(tensor: torch.Tensor, dim: int) -> torch.Tensor: + denom = tensor.norm(p=2.0, dim=dim, keepdim=True).clamp_min(1e-6) + return tensor / denom + + +class SANConv2d(nn.Conv2d): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + padding: int = 0, + dilation: int = 1, + bias: bool = True, + padding_mode="zeros", + device=None, + dtype=None, + ): + super().__init__( + in_channels, + out_channels, + kernel_size, + stride, + padding=padding, + dilation=dilation, + groups=1, + bias=bias, + padding_mode=padding_mode, + device=device, + dtype=dtype, + ) + scale = self.weight.norm(p=2.0, dim=[1, 2, 3], keepdim=True).clamp_min(1e-6) + self.weight = nn.parameter.Parameter(self.weight / scale.expand_as(self.weight)) + self.scale = nn.parameter.Parameter(scale.view(out_channels)) + if bias: + self.bias = nn.parameter.Parameter( + torch.zeros(in_channels, device=device, dtype=dtype) + ) + else: + self.register_parameter("bias", None) + + def forward( + self, input: torch.Tensor, flg_san_train: bool = False + ) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]: + if self.bias is not None: + input = input + self.bias.view(self.in_channels, 1, 1) + normalized_weight = self._get_normalized_weight() + scale = self.scale.view(self.out_channels, 1, 1) + if flg_san_train: + out_fun = F.conv2d( + input, + normalized_weight.detach(), + None, + self.stride, + self.padding, + self.dilation, + self.groups, + ) + out_dir = F.conv2d( + input.detach(), + normalized_weight, + None, + self.stride, + self.padding, + self.dilation, + self.groups, + ) + out = out_fun * scale, out_dir * scale.detach() + else: + out = F.conv2d( + input, + normalized_weight, + None, + self.stride, + self.padding, + self.dilation, + self.groups, + ) + out = out * scale + return out + + @torch.no_grad() + def normalize_weight(self): + self.weight.data = self._get_normalized_weight() + + def _get_normalized_weight(self) -> torch.Tensor: + return _normalize(self.weight, dim=[1, 2, 3]) + + +def get_padding(kernel_size: int, dilation: int = 1) -> int: + return (kernel_size * dilation - dilation) // 2 + + +class DiscriminatorP(nn.Module): + def __init__( + self, period: int, kernel_size: int = 5, stride: int = 3, san: bool = False + ): + super().__init__() + self.period = period + self.san = san + # fmt: off + self.convs = nn.ModuleList([ + weight_norm(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), (get_padding(kernel_size, 1), 0))), + weight_norm(nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), (get_padding(kernel_size, 1), 0))), + weight_norm(nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), (get_padding(kernel_size, 1), 0))), + weight_norm(nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), (get_padding(kernel_size, 1), 0))), + weight_norm(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, (get_padding(kernel_size, 1), 0))), + ]) + # fmt: on + if san: + self.conv_post = SANConv2d(1024, 1, (3, 1), 1, (1, 0)) + else: + self.conv_post = weight_norm(nn.Conv2d(1024, 1, (3, 1), 1, (1, 0))) + + def forward( + self, x: torch.Tensor, flg_san_train: bool = False + ) -> tuple[ + Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]], list[torch.Tensor] + ]: + fmap = [] + + b, c, t = x.shape + if t % self.period != 0: + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.silu(x, inplace=True) + fmap.append(x) + if self.san: + x = self.conv_post(x, flg_san_train=flg_san_train) + else: + x = self.conv_post(x) + if flg_san_train: + x_fun, x_dir = x + fmap.append(x_fun) + x_fun = torch.flatten(x_fun, 1, -1) + x_dir = torch.flatten(x_dir, 1, -1) + x = x_fun, x_dir + else: + fmap.append(x) + x = torch.flatten(x, 1, -1) + return x, fmap + + +class DiscriminatorR(nn.Module): + def __init__(self, resolution: int, san: bool = False): + super().__init__() + self.resolution = resolution + self.san = san + assert len(self.resolution) == 3 + self.convs = nn.ModuleList( + [ + weight_norm(nn.Conv2d(1, 32, (3, 9), padding=(1, 4))), + weight_norm(nn.Conv2d(32, 32, (3, 9), (1, 2), (1, 4))), + weight_norm(nn.Conv2d(32, 32, (3, 9), (1, 2), (1, 4))), + weight_norm(nn.Conv2d(32, 32, (3, 9), (1, 2), (1, 4))), + weight_norm(nn.Conv2d(32, 32, (3, 3), padding=(1, 1))), + ] + ) + if san: + self.conv_post = SANConv2d(32, 1, (3, 3), padding=(1, 1)) + else: + self.conv_post = weight_norm(nn.Conv2d(32, 1, (3, 3), padding=(1, 1))) + + def forward( + self, x: torch.Tensor, flg_san_train: bool = False + ) -> tuple[ + Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]], list[torch.Tensor] + ]: + fmap = [] + + x = self._spectrogram(x) + x.unsqueeze_(1) + for l in self.convs: + x = l(x) + x = F.silu(x, inplace=True) + fmap.append(x) + if self.san: + x = self.conv_post(x, flg_san_train=flg_san_train) + else: + x = self.conv_post(x) + if flg_san_train: + x_fun, x_dir = x + fmap.append(x_fun) + x_fun = torch.flatten(x_fun, 1, -1) + x_dir = torch.flatten(x_dir, 1, -1) + x = x_fun, x_dir + else: + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + def _spectrogram(self, x: torch.Tensor) -> torch.Tensor: + n_fft, hop_length, win_length = self.resolution + x = F.pad( + x, ((n_fft - hop_length) // 2, (n_fft - hop_length) // 2), mode="reflect" + ) + x.squeeze_(1) + with torch.cuda.amp.autocast(False): + mag = torch.stft( + x.float(), + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=torch.ones(win_length, device=x.device), + center=False, + return_complex=True, + ).abs() + + return mag + + +class MultiPeriodDiscriminator(nn.Module): + def __init__(self, san: bool = False): + super().__init__() + resolutions = [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]] + periods = [2, 3, 5, 7, 11] + self.discriminators = nn.ModuleList( + [DiscriminatorR(r, san=san) for r in resolutions] + + [DiscriminatorP(p, san=san) for p in periods] + ) + self.discriminator_names = [f"R_{n}_{h}_{w}" for n, h, w in resolutions] + [ + f"P_{p}" for p in periods + ] + self.san = san + + def forward( + self, y: torch.Tensor, y_hat: torch.Tensor, flg_san_train: bool = False + ) -> tuple[ + list[Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]], + list[Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]], + list[list[torch.Tensor]], + list[list[torch.Tensor]], + ]: + batch_size = y.size(0) + concatenated_y_y_hat = torch.cat([y, y_hat]) + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for d in self.discriminators: + if flg_san_train: + (y_d_fun, y_d_dir), fmap = d( + concatenated_y_y_hat, flg_san_train=flg_san_train + ) + y_d_r_fun, y_d_g_fun = torch.split(y_d_fun, batch_size) + y_d_r_dir, y_d_g_dir = torch.split(y_d_dir, batch_size) + y_d_r = y_d_r_fun, y_d_r_dir + y_d_g = y_d_g_fun, y_d_g_dir + else: + y_d, fmap = d(concatenated_y_y_hat, flg_san_train=flg_san_train) + y_d_r, y_d_g = torch.split(y_d, batch_size) + fmap_r = [] + fmap_g = [] + for fm in fmap: + fm_r, fm_g = torch.split(fm, batch_size) + fmap_r.append(fm_r) + fmap_g.append(fm_g) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + def forward_and_compute_discriminator_loss( + self, y: torch.Tensor, y_hat: torch.Tensor + ) -> tuple[torch.Tensor, dict[str, float]]: + y_d_rs, y_d_gs, _, _ = self(y, y_hat, flg_san_train=self.san) + loss = 0.0 + stats = {} + assert len(y_d_gs) == len(y_d_rs) == len(self.discriminators) + for dr, dg, name in zip(y_d_rs, y_d_gs, self.discriminator_names): + if self.san: + dr_fun, dr_dir = map(lambda x: x.float(), dr) + dg_fun, dg_dir = map(lambda x: x.float(), dg) + r_loss_fun = F.softplus(1.0 - dr_fun).square().mean() + g_loss_fun = F.softplus(dg_fun).square().mean() + r_loss_dir = F.softplus(1.0 - dr_dir).square().mean() + g_loss_dir = -F.softplus(1.0 - dg_dir).square().mean() + r_loss = r_loss_fun + r_loss_dir + g_loss = g_loss_fun + g_loss_dir + else: + dr = dr.float() + dg = dg.float() + r_loss = (1.0 - dr).square().mean() + g_loss = dg.square().mean() + stats[f"{name}_dr_loss"] = r_loss.item() + stats[f"{name}_dg_loss"] = g_loss.item() + loss += r_loss + g_loss + return loss, stats + + def forward_and_compute_generator_loss( + self, y: torch.Tensor, y_hat: torch.Tensor + ) -> tuple[torch.Tensor, torch.Tensor, dict[str, float]]: + _, y_d_gs, fmap_rs, fmap_gs = self(y, y_hat, flg_san_train=False) + stats = {} + # adversarial loss + adv_loss = 0.0 + for dg, name in zip(y_d_gs, self.discriminator_names): + dg = dg.float() + if self.san: + g_loss = F.softplus(1.0 - dg).square().mean() + else: + g_loss = (1.0 - dg).square().mean() + stats[f"{name}_gg_loss"] = g_loss.item() + adv_loss += g_loss + # feature mathcing loss + fm_loss = 0.0 + for fr, fg in zip(fmap_rs, fmap_gs): + for r, g in zip(fr, fg): + fm_loss += (r.detach() - g).abs().mean() + return adv_loss, fm_loss, stats + + +# %% [markdown] +# ## Utilities + + +# %% +class GradBalancer: + """Adapted from https://github.com/facebookresearch/encodec/blob/main/encodec/balancer.py""" + + def __init__( + self, + weights: dict[str, float], + rescale_grads: bool = True, + total_norm: float = 1.0, + ema_decay: float = 0.999, + per_batch_item: bool = True, + ): + self.weights = weights + self.per_batch_item = per_batch_item + self.total_norm = total_norm + self.ema_decay = ema_decay + self.rescale_grads = rescale_grads + + self.ema_total: dict[str, float] = defaultdict(float) + self.ema_fix: dict[str, float] = defaultdict(float) + + def backward( + self, + losses: dict[str, torch.Tensor], + input: torch.Tensor, + scaler: Optional[torch.cuda.amp.GradScaler] = None, + skip_update_ema: bool = False, + ) -> dict[str, float]: + stats = {} + if skip_update_ema: + assert len(losses) == len(self.ema_total) + ema_norms = {k: tot / self.ema_fix[k] for k, tot in self.ema_total.items()} + else: + # 各 loss に対して d loss / d input とそのノルムを計算する + norms = {} + grads = {} + for name, loss in losses.items(): + if scaler is not None: + loss = scaler.scale(loss) + (grad,) = torch.autograd.grad(loss, [input], retain_graph=True) + + if not grad.isfinite().all(): + input.backward(grad) + return {} + grad = grad.detach() / (1.0 if scaler is None else scaler.get_scale()) + if self.per_batch_item: + dims = tuple(range(1, grad.dim())) + ema_norm = grad.norm(dim=dims).mean() + else: + ema_norm = grad.norm() + norms[name] = float(ema_norm) + grads[name] = grad + + # ノルムの移動平均を計算する + for key, value in norms.items(): + self.ema_total[key] = self.ema_total[key] * self.ema_decay + value + self.ema_fix[key] = self.ema_fix[key] * self.ema_decay + 1.0 + ema_norms = {k: tot / self.ema_fix[k] for k, tot in self.ema_total.items()} + + # ログを取る + total_ema_norm = sum(ema_norms.values()) + for k, ema_norm in ema_norms.items(): + stats[f"grad_norm_value_{k}"] = ema_norm + stats[f"grad_norm_ratio_{k}"] = ema_norm / (total_ema_norm + 1e-12) + + # loss の係数の比率を計算する + if self.rescale_grads: + total_weights = sum([self.weights[k] for k in ema_norms]) + ratios = {k: w / total_weights for k, w in self.weights.items()} + + # 勾配を修正する + loss = 0.0 + for name, ema_norm in ema_norms.items(): + if self.rescale_grads: + scale = ratios[name] * self.total_norm / (ema_norm + 1e-12) + else: + scale = self.weights[name] + loss += (losses if skip_update_ema else grads)[name] * scale + if scaler is not None: + loss = scaler.scale(loss) + if skip_update_ema: + loss.backward() + else: + input.backward(loss) + return stats + + def state_dict(self): + return { + "ema_total": self.ema_total, + "ema_fix": self.ema_fix, + } + + def load_state_dict(self, state_dict): + self.ema_total = state_dict["ema_total"] + self.ema_fix = state_dict["ema_fix"] + + +class QualityTester(nn.Module): + def __init__(self): + super().__init__() + self.utmos = torch.hub.load( + "tarepan/SpeechMOS:v1.0.0", "utmos22_strong", trust_repo=True + ).eval() + + @torch.inference_mode() + def compute_mos(self, wav: torch.Tensor) -> dict[str, list[float]]: + res = {"utmos": self.utmos(wav, sr=16000).tolist()} + return res + + def test( + self, converted_wav: torch.Tensor, source_wav: torch.Tensor + ) -> dict[str, list[float]]: + # [batch_size, wav_length] + res = {} + res.update(self.compute_mos(converted_wav)) + return res + + def test_many( + self, converted_wavs: list[torch.Tensor], source_wavs: list[torch.Tensor] + ) -> tuple[dict[str, float], dict[str, list[float]]]: + # list[batch_size, wav_length] + results = defaultdict(list) + assert len(converted_wavs) == len(source_wavs) + for converted_wav, source_wav in zip(converted_wavs, source_wavs): + res = self.test(converted_wav, source_wav) + for metric_name, value in res.items(): + results[metric_name].extend(value) + return { + metric_name: sum(values) / len(values) + for metric_name, values in results.items() + }, results + + +def compute_grad_norm( + model: nn.Module, return_stats: bool = False +) -> Union[float, dict[str, float]]: + total_norm = 0.0 + stats = {} + for name, p in model.named_parameters(): + if p.grad is None: + continue + param_norm = p.grad.data.norm().item() + if not math.isfinite(param_norm): + param_norm = p.grad.data.float().norm().item() + total_norm += param_norm * param_norm + if return_stats: + stats[f"grad_norm_{name}"] = param_norm + total_norm = math.sqrt(total_norm) + if return_stats: + return total_norm, stats + else: + return total_norm + + +def compute_mean_f0( + files: list[Path], method: Literal["dio", "harvest"] = "dio" +) -> float: + sum_log_f0 = 0.0 + n_frames = 0 + for file in files: + wav, sr = torchaudio.load(file, backend="soundfile") + if method == "dio": + f0, _ = pyworld.dio(wav.ravel().numpy().astype(np.float64), sr) + elif method == "harvest": + f0, _ = pyworld.harvest(wav.ravel().numpy().astype(np.float64), sr) + else: + raise ValueError(f"Invalid method: {method}") + f0 = f0[f0 > 0] + sum_log_f0 += float(np.log(f0).sum()) + n_frames += len(f0) + if n_frames == 0: + return math.nan + mean_log_f0 = sum_log_f0 / n_frames + return math.exp(mean_log_f0) + + +# %% [markdown] +# ## Dataset + + +# %% +def get_resampler( + sr_before: int, sr_after: int, device="cpu", cache={} +) -> torchaudio.transforms.Resample: + if not isinstance(device, str): + device = str(device) + if (sr_before, sr_after, device) not in cache: + cache[(sr_before, sr_after, device)] = torchaudio.transforms.Resample( + sr_before, sr_after + ).to(device) + return cache[(sr_before, sr_after, device)] + + +def convolve(signal: torch.Tensor, ir: torch.Tensor) -> torch.Tensor: + n = 1 << (signal.size(-1) + ir.size(-1) - 2).bit_length() + res = torch.fft.irfft(torch.fft.rfft(signal, n=n) * torch.fft.rfft(ir, n=n), n=n) + return res[..., : signal.size(-1)] + + +def random_filter(audio: torch.Tensor) -> torch.Tensor: + assert audio.ndim == 2 + ab = torch.rand(audio.size(0), 6) * 0.75 - 0.375 + a, b = ab[:, :3], ab[:, 3:] + a[:, 0] = 1.0 + b[:, 0] = 1.0 + audio = torchaudio.functional.lfilter(audio, a, b, clamp=False) + return audio + + +def get_noise( + n_samples: int, sample_rate: float, files: list[Union[str, bytes, os.PathLike]] +) -> torch.Tensor: + resample_augmentation_candidates = [0.9, 0.95, 1.0, 1.05, 1.1] + wavs = [] + current_length = 0 + while current_length < n_samples: + idx_files = torch.randint(0, len(files), ()) + file = files[idx_files] + wav, sr = torchaudio.load(file, backend="soundfile") + assert wav.size(0) == 1 + augmented_sample_rate = int( + round( + sample_rate + * resample_augmentation_candidates[ + torch.randint(0, len(resample_augmentation_candidates), ()) + ] + ) + ) + resampler = get_resampler(sr, augmented_sample_rate) + wav = resampler(wav) + wav = random_filter(wav) + wav *= 0.99 / (wav.abs().max() + 1e-5) + wavs.append(wav) + current_length += wav.size(1) + start = torch.randint(0, current_length - n_samples + 1, ()) + wav = torch.cat(wavs, dim=1)[:, start : start + n_samples] + assert wav.size() == (1, n_samples), wav.size() + return wav + + +def get_butterworth_lpf( + cutoff_freq: int, sample_rate: int, cache={} +) -> tuple[torch.Tensor, torch.Tensor]: + if (cutoff_freq, sample_rate) not in cache: + q = math.sqrt(0.5) + omega = math.tau * cutoff_freq / sample_rate + cos_omega = math.cos(omega) + alpha = math.sin(omega) / (2.0 * q) + b1 = (1.0 - cos_omega) / (1.0 + alpha) + b0 = b1 * 0.5 + a1 = -2.0 * cos_omega / (1.0 + alpha) + a2 = (1.0 - alpha) / (1.0 + alpha) + cache[(cutoff_freq, sample_rate)] = torch.tensor([b0, b1, b0]), torch.tensor( + [1.0, a1, a2] + ) + return cache[(cutoff_freq, sample_rate)] + + +def augment_audio( + clean: torch.Tensor, + sample_rate: int, + noise_files: list[Union[str, bytes, os.PathLike]], + ir_files: list[Union[str, bytes, os.PathLike]], +) -> torch.Tensor: + # [1, wav_length] + assert clean.size(0) == 1 + n_samples = clean.size(1) + + snr_candidates = [-20, -25, -30, -35, -40, -45] + + original_clean_rms = clean.square().mean().sqrt_() + + # noise を取得して clean と concat する + noise = get_noise(n_samples, sample_rate, noise_files) + signals = torch.cat([clean, noise]) + + # clean, noise に異なるランダムフィルタをかける + signals = random_filter(signals) + + # clean, noise にリバーブをかける + if torch.rand(()) < 0.5: + ir_file = ir_files[torch.randint(0, len(ir_files), ())] + ir, sr = torchaudio.load(ir_file, backend="soundfile") + assert ir.size() == (2, sr), ir.size() + assert sr == sample_rate, (sr, sample_rate) + signals = convolve(signals, ir) + + # clean, noise に同じ LPF をかける + if torch.rand(()) < 0.2: + if signals.abs().max() > 0.8: + signals /= signals.abs().max() * 1.25 + cutoff_freq_candidates = [2000, 3000, 4000, 6000] + cutoff_freq = cutoff_freq_candidates[ + torch.randint(0, len(cutoff_freq_candidates), ()) + ] + b, a = get_butterworth_lpf(cutoff_freq, sample_rate) + signals = torchaudio.functional.lfilter(signals, a, b, clamp=False) + + # clean の音量を合わせる + clean, noise = signals + clean_rms = clean.square().mean().sqrt_() + clean *= original_clean_rms / clean_rms + + # clean, noise の音量をピークを重視して取る + clean_level = clean.square().square_().mean().sqrt_().sqrt_() + noise_level = noise.square().square_().mean().sqrt_().sqrt_() + # SNR + snr = snr_candidates[torch.randint(0, len(snr_candidates), ())] + # noisy を生成 + noisy = clean + noise * (10.0 ** (snr / 20.0) * clean_level / (noise_level + 1e-5)) + return noisy + + +class WavDataset(torch.utils.data.Dataset): + def __init__( + self, + audio_files: list[tuple[Path, int]], + in_sample_rate: int = 16000, + out_sample_rate: int = 24000, + wav_length: int = 4 * 24000, # 4s + segment_length: int = 100, # 1s + noise_files: Optional[list[Union[str, bytes, os.PathLike]]] = None, + ir_files: Optional[list[Union[str, bytes, os.PathLike]]] = None, + ): + self.audio_files = audio_files + self.in_sample_rate = in_sample_rate + self.out_sample_rate = out_sample_rate + self.wav_length = wav_length + self.segment_length = segment_length + self.noise_files = noise_files + self.ir_files = ir_files + + if (noise_files is None) is not (ir_files is None): + raise ValueError("noise_files and ir_files must be both None or not None") + + self.in_hop_length = in_sample_rate // 100 + self.out_hop_length = out_sample_rate // 100 # 10ms 刻み + + def __getitem__(self, index: int) -> tuple[torch.Tensor, torch.Tensor, int, int]: + file, speaker_id = self.audio_files[index] + clean_wav, sample_rate = torchaudio.load(file, backend="soundfile") + + formant_shift_candidates = [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0] + formant_shift = formant_shift_candidates[ + torch.randint(0, len(formant_shift_candidates), ()).item() + ] + + resampler_fraction = Fraction( + sample_rate / self.out_sample_rate * 2.0 ** (formant_shift / 12.0) + ).limit_denominator(300) + clean_wav = get_resampler( + resampler_fraction.numerator, resampler_fraction.denominator + )(clean_wav) + + assert clean_wav.size(0) == 1 + assert clean_wav.size(1) != 0 + + clean_wav = F.pad(clean_wav, (self.wav_length, self.wav_length)) + + if self.noise_files is None: + assert False + noisy_wav_16k = get_resampler(self.out_sample_rate, self.in_sample_rate)( + clean_wav + ) + else: + clean_wav_16k = get_resampler(self.out_sample_rate, self.in_sample_rate)( + clean_wav + ) + noisy_wav_16k = augment_audio( + clean_wav_16k, self.in_sample_rate, self.noise_files, self.ir_files + ) + + clean_wav = clean_wav.squeeze_(0) + noisy_wav_16k = noisy_wav_16k.squeeze_(0) + + # 音量をランダマイズする + amplitude = torch.rand(()).item() * 0.899 + 0.1 + factor = amplitude / clean_wav.abs().max() + clean_wav *= factor + noisy_wav_16k *= factor + while noisy_wav_16k.abs().max() >= 1.0: + clean_wav *= 0.5 + noisy_wav_16k *= 0.5 + + return clean_wav, noisy_wav_16k, speaker_id, formant_shift + + def __len__(self) -> int: + return len(self.audio_files) + + def collate( + self, batch: list[tuple[torch.Tensor, torch.Tensor, int, int]] + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + assert self.wav_length % self.out_hop_length == 0 + length = self.wav_length // self.out_hop_length + clean_wavs = [] + noisy_wavs = [] + slice_starts = [] + speaker_ids = [] + formant_shifts = [] + for clean_wav, noisy_wav, speaker_id, formant_shift in batch: + # 発声部分をランダムに 1 箇所選ぶ + (voiced,) = clean_wav.nonzero(as_tuple=True) + assert voiced.numel() != 0 + center = voiced[torch.randint(0, voiced.numel(), ()).item()].item() + # 発声部分が中央にくるように、スライス区間を選ぶ + slice_start = center - self.segment_length * self.out_hop_length // 2 + assert slice_start >= 0 + # スライス区間が含まれるように、ランダムに wav_length の長さを切り出す + r = torch.randint(0, length - self.segment_length + 1, ()).item() + offset = slice_start - r * self.out_hop_length + clean_wavs.append(clean_wav[offset : offset + self.wav_length]) + offset_in_sample_rate = int( + round(offset * self.in_sample_rate / self.out_sample_rate) + ) + noisy_wavs.append( + noisy_wav[ + offset_in_sample_rate : offset_in_sample_rate + + length * self.in_hop_length + ] + ) + slice_start = r + slice_starts.append(slice_start) + speaker_ids.append(speaker_id) + formant_shifts.append(formant_shift) + clean_wavs = torch.stack(clean_wavs) + noisy_wavs = torch.stack(noisy_wavs) + slice_starts = torch.tensor(slice_starts) + speaker_ids = torch.tensor(speaker_ids) + formant_shifts = torch.tensor(formant_shifts) + return ( + clean_wavs, # [batch_size, wav_length] + noisy_wavs, # [batch_size, wav_length] + slice_starts, # Long[batch_size] + speaker_ids, # Long[batch_size] + formant_shifts, # Long[batch_size] + ) + + +# %% [markdown] +# ## Train + +# %% +AUDIO_FILE_SUFFIXES = { + ".wav", + ".aif", + ".aiff", + ".fla", + ".flac", + ".oga", + ".ogg", + ".opus", + ".mp3", +} + + +def prepare_training(): + # 各種準備をする + # 副作用として、出力ディレクトリと TensorBoard のログファイルなどが生成される + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"device={device}") + + torch.backends.cudnn.benchmark = True + torch.backends.cuda.matmul.allow_tf32 = True + + (h, in_wav_dataset_dir, out_dir, resume) = ( + prepare_training_configs_for_experiment + if is_notebook() + else prepare_training_configs + )() + + print("config:") + pprint(h) + print() + h = AttrDict(h) + + if not in_wav_dataset_dir.is_dir(): + raise ValueError(f"{in_wav_dataset_dir} is not found.") + if resume: + latest_checkpoint_file = out_dir / "checkpoint_latest.pt" + if not latest_checkpoint_file.is_file(): + raise ValueError(f"{latest_checkpoint_file} is not found.") + else: + if out_dir.is_dir(): + if (out_dir / "checkpoint_latest.pt").is_file(): + raise ValueError( + f"{out_dir / 'checkpoint_latest.pt'} already exists. " + "Please specify a different output directory, or use --resume option." + ) + for file in out_dir.iterdir(): + if file.suffix == ".pt": + raise ValueError( + f"{out_dir} already contains model files. " + "Please specify a different output directory." + ) + else: + out_dir.mkdir(parents=True) + + in_ir_wav_dir = repo_root() / h.in_ir_wav_dir + in_noise_wav_dir = repo_root() / h.in_noise_wav_dir + in_test_wav_dir = repo_root() / h.in_test_wav_dir + + assert in_wav_dataset_dir.is_dir(), in_wav_dataset_dir + assert out_dir.is_dir(), out_dir + assert in_ir_wav_dir.is_dir(), in_ir_wav_dir + assert in_noise_wav_dir.is_dir(), in_noise_wav_dir + assert in_test_wav_dir.is_dir(), in_test_wav_dir + + # .wav または *.flac のファイルを再帰的に取得 + noise_files = sorted( + list(in_noise_wav_dir.rglob("*.wav")) + list(in_noise_wav_dir.rglob("*.flac")) + ) + if len(noise_files) == 0: + raise ValueError(f"No audio data found in {in_noise_wav_dir}.") + ir_files = sorted( + list(in_ir_wav_dir.rglob("*.wav")) + list(in_ir_wav_dir.rglob("*.flac")) + ) + if len(ir_files) == 0: + raise ValueError(f"No audio data found in {in_ir_wav_dir}.") + + # TODO: 無音除去とか + + def get_training_filelist(in_wav_dataset_dir: Path): + min_data_per_speaker = 1 + speakers: list[str] = [] + training_filelist: list[tuple[Path, int]] = [] + speaker_audio_files: list[list[Path]] = [] + for speaker_dir in sorted(in_wav_dataset_dir.iterdir()): + if not speaker_dir.is_dir(): + continue + candidates = [] + for wav_file in sorted(speaker_dir.rglob("*")): + if ( + not wav_file.is_file() + or wav_file.suffix.lower() not in AUDIO_FILE_SUFFIXES + ): + continue + candidates.append(wav_file) + if len(candidates) >= min_data_per_speaker: + speaker_id = len(speakers) + speakers.append(speaker_dir.name) + training_filelist.extend([(file, speaker_id) for file in candidates]) + speaker_audio_files.append(candidates) + return speakers, training_filelist, speaker_audio_files + + speakers, training_filelist, speaker_audio_files = get_training_filelist( + in_wav_dataset_dir + ) + n_speakers = len(speakers) + if n_speakers == 0: + raise ValueError(f"No speaker data found in {in_wav_dataset_dir}.") + print(f"{n_speakers=}") + for i, speaker in enumerate(speakers): + print(f" {i:{len(str(n_speakers - 1))}d}: {speaker}") + print() + print(f"{len(training_filelist)=}") + + def get_test_filelist( + in_test_wav_dir: Path, n_speakers: int + ) -> list[tuple[Path, list[int]]]: + max_n_test_files = 1000 + test_filelist = [] + rng = Random(42) + + def get_target_id_generator(): + if n_speakers > 8: + while True: + order = list(range(n_speakers)) + rng.shuffle(order) + yield from order + else: + while True: + yield from range(n_speakers) + + target_id_generator = get_target_id_generator() + for file in sorted(in_test_wav_dir.iterdir())[:max_n_test_files]: + if file.suffix.lower() not in AUDIO_FILE_SUFFIXES: + continue + target_ids = [next(target_id_generator) for _ in range(min(8, n_speakers))] + test_filelist.append((file, target_ids)) + return test_filelist + + test_filelist = get_test_filelist(in_test_wav_dir, n_speakers) + if len(test_filelist) == 0: + warnings.warn(f"No audio data found in {test_filelist}.") + print(f"{len(test_filelist)=}") + for file, target_ids in test_filelist[:12]: + print(f" {file}, {target_ids}") + if len(test_filelist) > 12: + print(" ...") + print() + + # データ + + training_dataset = WavDataset( + training_filelist, + in_sample_rate=h.in_sample_rate, + out_sample_rate=h.out_sample_rate, + wav_length=h.wav_length, + segment_length=h.segment_length, + noise_files=noise_files, + ir_files=ir_files, + ) + training_loader = torch.utils.data.DataLoader( + training_dataset, + num_workers=min(h.num_workers, os.cpu_count()), + collate_fn=training_dataset.collate, + shuffle=True, + sampler=None, + batch_size=h.batch_size, + pin_memory=True, + drop_last=True, + ) + + print("Computing mean F0s of target speakers...", end="") + speaker_f0s = [] + for speaker, files in enumerate(speaker_audio_files): + if len(files) > 10: + files = Random(42).sample(files, 10) + f0 = compute_mean_f0(files) + speaker_f0s.append(f0) + if speaker % 5 == 0: + print() + print(f" {speaker:3d}: {f0:.1f}Hz", end=",") + print() + print("Done.") + print("Computing pitch shifts for test files...") + test_pitch_shifts = [] + source_f0s = [] + for i, (file, target_ids) in enumerate(tqdm(test_filelist)): + source_f0 = compute_mean_f0([file], method="harvest") + source_f0s.append(source_f0) + if source_f0 != source_f0: + test_pitch_shifts.append([0] * len(target_ids)) + continue + pitch_shifts = [] + for target_id in target_ids: + target_f0 = speaker_f0s[target_id] + if target_f0 != target_f0: + pitch_shift = 0 + else: + pitch_shift = int(round(12 * math.log2(target_f0 / source_f0))) + pitch_shifts.append(pitch_shift) + test_pitch_shifts.append(pitch_shifts) + print("Done.") + + # モデルと最適化 + + phone_extractor = PhoneExtractor().to(device).eval().requires_grad_(False) + phone_extractor_checkpoint = torch.load( + repo_root() / h.phone_extractor_file, map_location="cpu" + ) + print( + phone_extractor.load_state_dict(phone_extractor_checkpoint["phone_extractor"]) + ) + del phone_extractor_checkpoint + + pitch_estimator = PitchEstimator().to(device).eval().requires_grad_(False) + pitch_estimator_checkpoint = torch.load( + repo_root() / h.pitch_estimator_file, map_location="cpu" + ) + print( + pitch_estimator.load_state_dict(pitch_estimator_checkpoint["pitch_estimator"]) + ) + del pitch_estimator_checkpoint + + net_g = ConverterNetwork( + phone_extractor, + pitch_estimator, + n_speakers, + h.hidden_channels, + ).to(device) + net_d = MultiPeriodDiscriminator(san=h.san).to(device) + + optim_g = torch.optim.AdamW( + net_g.parameters(), + h.learning_rate, + betas=h.adam_betas, + eps=h.adam_eps, + ) + optim_d = torch.optim.AdamW( + net_d.parameters(), + h.learning_rate, + betas=h.adam_betas, + eps=h.adam_eps, + ) + + grad_scaler = torch.cuda.amp.GradScaler(enabled=h.use_amp) + grad_balancer = GradBalancer( + weights={ + "loss_mel": h.grad_weight_mel, + "loss_adv": h.grad_weight_adv, + "loss_fm": h.grad_weight_fm, + }, + ema_decay=h.grad_balancer_ema_decay, + ) + resample_to_in_sample_rate = torchaudio.transforms.Resample( + h.out_sample_rate, h.in_sample_rate + ).to(device) + + # チェックポイント読み出し + + initial_iteration = 0 + if resume: + checkpoint_file = latest_checkpoint_file + elif h.pretrained_file is not None: + checkpoint_file = repo_root() / h.pretrained_file + else: + checkpoint_file = None + if checkpoint_file is not None: + checkpoint = torch.load(checkpoint_file, map_location="cpu") + if not resume: # ファインチューニング + checkpoint_n_speakers = len(checkpoint["net_g"]["embed_speaker.weight"]) + initial_speaker_embedding = checkpoint["net_g"]["embed_speaker.weight"][:1] + # initial_speaker_embedding = checkpoint["net_g"]["embed_speaker.weight"].mean( + # 0, keepdim=True + # ) + if True: + # 0 とかランダムとかの方が良いかもしれない + checkpoint["net_g"]["embed_speaker.weight"] = initial_speaker_embedding[ + [0] * n_speakers + ] + else: # 話者追加用 + assert n_speakers > checkpoint_n_speakers + print( + f"embed_speaker.weight was padded: {checkpoint_n_speakers} -> {n_speakers}" + ) + checkpoint["net_g"]["embed_speaker.weight"] = F.pad( + checkpoint["net_g"]["embed_speaker.weight"], + (0, 0, 0, n_speakers - checkpoint_n_speakers), + ) + checkpoint["net_g"]["embed_speaker.weight"][ + checkpoint_n_speakers: + ] = initial_speaker_embedding + print(net_g.load_state_dict(checkpoint["net_g"], strict=False)) + print(net_d.load_state_dict(checkpoint["net_d"], strict=False)) + if resume: + optim_g.load_state_dict(checkpoint["optim_g"]) + optim_d.load_state_dict(checkpoint["optim_d"]) + initial_iteration = checkpoint["iteration"] + grad_balancer.load_state_dict(checkpoint["grad_balancer"]) + grad_scaler.load_state_dict(checkpoint["grad_scaler"]) + + # スケジューラ + + def get_cosine_annealing_warmup_scheduler( + optimizer: torch.optim.Optimizer, + warmup_epochs: int, + total_epochs: int, + min_learning_rate: float, + ) -> torch.optim.lr_scheduler.LambdaLR: + lr_ratio = min_learning_rate / optimizer.param_groups[0]["lr"] + m = 0.5 * (1.0 - lr_ratio) + a = 0.5 * (1.0 + lr_ratio) + + def lr_lambda(current_epoch: int) -> float: + if current_epoch < warmup_epochs: + return current_epoch / warmup_epochs + elif current_epoch < total_epochs: + rate = (current_epoch - warmup_epochs) / (total_epochs - warmup_epochs) + return math.cos(rate * math.pi) * m + a + else: + return min_learning_rate + + return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda) + + scheduler_g = get_cosine_annealing_warmup_scheduler( + optim_g, h.warmup_steps, h.n_steps, h.min_learning_rate + ) + scheduler_d = get_cosine_annealing_warmup_scheduler( + optim_d, h.warmup_steps, h.n_steps, h.min_learning_rate + ) + for _ in range(initial_iteration + 1): + scheduler_g.step() + scheduler_d.step() + + net_g.train() + net_d.train() + + # ログとか + + dict_scalars = defaultdict(list) + quality_tester = QualityTester().eval().to(device) + writer = SummaryWriter(out_dir) + writer.add_text( + "log", + f"start training w/ {torch.cuda.get_device_name(device) if torch.cuda.is_available() else 'cpu'}.", + initial_iteration, + ) + if not resume: + with open(out_dir / "config.json", "w", encoding="utf-8") as f: + json.dump(dict(h), f, indent=4) + if not is_notebook(): + shutil.copy(__file__, out_dir) + + return ( + device, + in_wav_dataset_dir, + h, + out_dir, + speakers, + test_filelist, + training_loader, + speaker_f0s, + test_pitch_shifts, + phone_extractor, + pitch_estimator, + net_g, + net_d, + optim_g, + optim_d, + grad_scaler, + grad_balancer, + resample_to_in_sample_rate, + initial_iteration, + scheduler_g, + scheduler_d, + dict_scalars, + quality_tester, + writer, + ) + + +if __name__ == "__main__": + ( + device, + in_wav_dataset_dir, + h, + out_dir, + speakers, + test_filelist, + training_loader, + speaker_f0s, + test_pitch_shifts, + phone_extractor, + pitch_estimator, + net_g, + net_d, + optim_g, + optim_d, + grad_scaler, + grad_balancer, + resample_to_in_sample_rate, + initial_iteration, + scheduler_g, + scheduler_d, + dict_scalars, + quality_tester, + writer, + ) = prepare_training() + + # 学習 + + for iteration in tqdm(range(initial_iteration, h.n_steps)): + # === 1. データ前処理 === + try: + batch = next(data_iter) + except: + data_iter = iter(training_loader) + batch = next(data_iter) + ( + clean_wavs, + noisy_wavs_16k, + slice_starts, + speaker_ids, + formant_shift_semitone, + ) = map(lambda x: x.to(device, non_blocking=True), batch) + + # === 2.1 Discriminator の学習 === + + with torch.cuda.amp.autocast(h.use_amp): + # Generator + y, y_hat, y_hat_for_backward, loss_mel = net_g.forward_and_compute_loss( + noisy_wavs_16k[:, None, :], + speaker_ids, + formant_shift_semitone, + slice_start_indices=slice_starts, + slice_segment_length=h.segment_length, + y_all=clean_wavs[:, None, :], + ) + assert y_hat.isfinite().all() + assert loss_mel.isfinite().all() + + # Discriminator + loss_discriminator, discriminator_d_stats = ( + net_d.forward_and_compute_discriminator_loss(y, y_hat.detach()) + ) + + optim_d.zero_grad() + grad_scaler.scale(loss_discriminator).backward() + grad_scaler.unscale_(optim_d) + grad_norm_d, d_grad_norm_stats = compute_grad_norm(net_d, True) + grad_scaler.step(optim_d) + + # === 2.2 Generator の学習 === + + with torch.cuda.amp.autocast(h.use_amp): + # Discriminator + loss_adv, loss_fm, discriminator_g_stats = ( + net_d.forward_and_compute_generator_loss(y, y_hat) + ) + + optim_g.zero_grad() + gradient_balancer_stats = grad_balancer.backward( + { + "loss_mel": loss_mel, + "loss_adv": loss_adv, + "loss_fm": loss_fm, + }, + y_hat_for_backward, + grad_scaler, + skip_update_ema=iteration > 10 and iteration % 5 != 0, + ) + grad_scaler.unscale_(optim_g) + grad_norm_g, g_grad_norm_stats = compute_grad_norm(net_g, True) + grad_scaler.step(optim_g) + grad_scaler.update() + + # === 3. ログ === + + dict_scalars["loss_g/loss_mel"].append(loss_mel.item()) + dict_scalars["loss_g/loss_fm"].append(loss_fm.item()) + dict_scalars["loss_g/loss_adv"].append(loss_adv.item()) + dict_scalars["other/grad_scale"].append(grad_scaler.get_scale()) + dict_scalars["loss_d/loss_discriminator"].append(loss_discriminator.item()) + if math.isfinite(grad_norm_d): + dict_scalars["other/gradient_norm_d"].append(grad_norm_d) + for name, value in d_grad_norm_stats.items(): + dict_scalars[f"~gradient_norm_d/{name}"].append(value) + if math.isfinite(grad_norm_g): + dict_scalars["other/gradient_norm_g"].append(grad_norm_g) + for name, value in g_grad_norm_stats.items(): + dict_scalars[f"~gradient_norm_g/{name}"].append(value) + dict_scalars["other/lr_g"].append(scheduler_g.get_last_lr()[0]) + dict_scalars["other/lr_d"].append(scheduler_d.get_last_lr()[0]) + for k, v in discriminator_d_stats.items(): + dict_scalars[f"~loss_discriminator/{k}"].append(v) + for k, v in discriminator_g_stats.items(): + dict_scalars[f"~loss_discriminator/{k}"].append(v) + for k, v in gradient_balancer_stats.items(): + dict_scalars[f"~gradient_balancer/{k}"].append(v) + + if (iteration + 1) % 1000 == 0 or iteration == 0: + for name, scalars in dict_scalars.items(): + if scalars: + writer.add_scalar(name, sum(scalars) / len(scalars), iteration + 1) + scalars.clear() + + # === 4. 検証 === + if (iteration + 1) % 50000 == 0 or iteration + 1 in { + 1, + 5000, + 10000, + 30000, + h.n_steps, + }: + net_g.eval() + torch.cuda.empty_cache() + + dict_qualities_all = defaultdict(list) + n_added_wavs = 0 + with torch.inference_mode(): + for i, ((file, target_ids), pitch_shift_semitones) in enumerate( + zip(test_filelist, test_pitch_shifts) + ): + source_wav, sr = torchaudio.load(file, backend="soundfile") + source_wav = source_wav.to(device) + if sr != h.in_sample_rate: + source_wav = get_resampler(sr, h.in_sample_rate, device)( + source_wav + ) + source_wav = source_wav.to(device) + original_source_wav_length = source_wav.size(1) + # 長さのパターンを減らしてキャッシュを効かせる + if source_wav.size(1) % h.in_sample_rate == 0: + padded_source_wav = source_wav + else: + padded_source_wav = F.pad( + source_wav, + ( + 0, + h.in_sample_rate + - source_wav.size(1) % h.in_sample_rate, + ), + ) + converted = net_g( + padded_source_wav[[0] * len(target_ids), None], + torch.tensor(target_ids, device=device), + torch.tensor( + [0.0] * len(target_ids), device=device + ), # フォルマントシフト + torch.tensor( + [float(p) for p in pitch_shift_semitones], device=device + ), + ).squeeze_(1)[:, : original_source_wav_length // 160 * 240] + if i < 12: + if iteration == 0: + writer.add_audio( + f"source/y_{i:02d}", + source_wav, + iteration + 1, + h.in_sample_rate, + ) + for d in range( + min(len(target_ids), 1 + (12 - i - 1) // len(test_filelist)) + ): + idx_in_batch = n_added_wavs % len(target_ids) + writer.add_audio( + f"converted/y_hat_{i:02d}_{target_ids[idx_in_batch]:03d}_{pitch_shift_semitones[idx_in_batch]:+02d}", + converted[idx_in_batch], + iteration + 1, + h.out_sample_rate, + ) + n_added_wavs += 1 + converted = resample_to_in_sample_rate(converted) + quality = quality_tester.test(converted, source_wav) + for metric_name, values in quality.items(): + dict_qualities_all[metric_name].extend(values) + assert n_added_wavs == min( + 12, len(test_filelist) * len(test_filelist[0][1]) + ), ( + n_added_wavs, + len(test_filelist), + len(speakers), + len(test_filelist[0][1]), + ) + dict_qualities = { + metric_name: sum(values) / len(values) + for metric_name, values in dict_qualities_all.items() + if len(values) + } + for metric_name, value in dict_qualities.items(): + writer.add_scalar(f"validation/{metric_name}", value, iteration + 1) + for metric_name, values in dict_qualities_all.items(): + for i, value in enumerate(values): + writer.add_scalar( + f"~validation_{metric_name}/{i:03d}", value, iteration + 1 + ) + del dict_qualities, dict_qualities_all + + gc.collect() + net_g.train() + torch.cuda.empty_cache() + + # === 5. 保存 === + if (iteration + 1) % 50000 == 0 or iteration + 1 in { + 1, + 5000, + 10000, + 30000, + h.n_steps, + }: + # チェックポイント + name = f"{in_wav_dataset_dir.name}_{iteration + 1:08d}" + checkpoint_file_save = out_dir / f"checkpoint_{name}.pt" + if checkpoint_file_save.exists(): + checkpoint_file_save = checkpoint_file_save.with_name( + f"{checkpoint_file_save.name}_{hash(None):x}" + ) + torch.save( + { + "iteration": iteration + 1, + "net_g": net_g.state_dict(), + "phone_extractor": phone_extractor.state_dict(), + "pitch_estimator": pitch_estimator.state_dict(), + "net_d": net_d.state_dict(), + "optim_g": optim_g.state_dict(), + "optim_d": optim_d.state_dict(), + "grad_balancer": grad_balancer.state_dict(), + "grad_scaler": grad_scaler.state_dict(), + "h": dict(h), + }, + checkpoint_file_save, + ) + shutil.copy(checkpoint_file_save, out_dir / "checkpoint_latest.pt") + + # 推論用 + paraphernalia_dir = out_dir / f"paraphernalia_{name}" + if paraphernalia_dir.exists(): + paraphernalia_dir = paraphernalia_dir.with_name( + f"{paraphernalia_dir.name}_{hash(None):x}" + ) + paraphernalia_dir.mkdir() + phone_extractor_fp16 = PhoneExtractor() + phone_extractor_fp16.load_state_dict(phone_extractor.state_dict()) + phone_extractor_fp16.remove_weight_norm() + phone_extractor_fp16.merge_weights() + phone_extractor_fp16.half() + phone_extractor_fp16.dump(paraphernalia_dir / f"phone_extractor.bin") + del phone_extractor_fp16 + pitch_estimator_fp16 = PitchEstimator() + pitch_estimator_fp16.load_state_dict(pitch_estimator.state_dict()) + pitch_estimator_fp16.merge_weights() + pitch_estimator_fp16.half() + pitch_estimator_fp16.dump(paraphernalia_dir / f"pitch_estimator.bin") + del pitch_estimator_fp16 + net_g_fp16 = ConverterNetwork( + nn.Module(), nn.Module(), len(speakers), h.hidden_channels + ) + net_g_fp16.load_state_dict(net_g.state_dict()) + net_g_fp16.remove_weight_norm() + net_g_fp16.merge_weights() + net_g_fp16.half() + net_g_fp16.dump(paraphernalia_dir / f"waveform_generator.bin") + with open(paraphernalia_dir / f"speaker_embeddings.bin", "wb") as f: + dump_layer(net_g_fp16.embed_speaker, f) + with open(paraphernalia_dir / f"formant_shift_embeddings.bin", "wb") as f: + dump_layer(net_g_fp16.embed_formant_shift, f) + del net_g_fp16 + shutil.copy(repo_root() / "assets/images/noimage.png", paraphernalia_dir) + with open( + paraphernalia_dir / f"beatrice_paraphernalia_{name}.toml", + "w", + encoding="utf-8", + ) as f: + f.write( + f'''[model] +version = "{PARAPHERNALIA_VERSION}" +name = "{name}" +description = """ +No description for this model. +このモデルの説明はありません。 +""" +''' + ) + for speaker_id, (speaker, speaker_f0) in enumerate( + zip(speakers, speaker_f0s) + ): + average_pitch = 69.0 + 12.0 * math.log2(speaker_f0 / 440.0) + average_pitch = round(average_pitch * 8.0) / 8.0 + f.write( + f''' +[voice.{speaker_id}] +name = "{speaker}" +description = """ +No description for this voice. +この声の説明はありません。 +""" +average_pitch = {average_pitch} + +[voice.{speaker_id}.portrait] +path = "noimage.png" +description = """ +""" +''' + ) + del paraphernalia_dir + + # TODO: phone_extractor, pitch_estimator が既知のモデルであれば dump を省略 + + # === 6. スケジューラ更新 === + scheduler_g.step() + scheduler_d.step() + + +print("Training finished.") diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..5357f8e3c4d5a9d0c621034999a5e7dcdb732ed5 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,23 @@ +[tool.poetry] +name = "beatrice-trainer" +version = "2.0.0b0" +description = "A tool to train Beatrice models" +license = "MIT" +authors = ["Project Beatrice <167534685+prj-beatrice@users.noreply.github.com>"] +readme = "README.md" +homepage = "https://prj-beatrice.com/" +repository = "https://huggingface.co/fierce-cats/beatrice-trainer" + +[tool.poetry.dependencies] +python = "^3.9" +torch = "^2,<2.3" +torchaudio = "^2" +tqdm = "^4" +numpy = "^1" +tensorboard = "^2" +soundfile = "^0.11" +pyworld = "^0.3.2" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api"