diff --git a/.gitattributes b/.gitattributes index c7d9f3332a950355d5a77d85000f05e6f45435ea..42bfbed689f3358eda367c7e021adea1351b06b9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,13 +2,11 @@ *.arrow filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text *.bz2 filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text *.ftz filter=lfs diff=lfs merge=lfs -text *.gz filter=lfs diff=lfs merge=lfs -text *.h5 filter=lfs diff=lfs merge=lfs -text *.joblib filter=lfs diff=lfs merge=lfs -text *.lfs.* filter=lfs diff=lfs merge=lfs -text -*.mlmodel filter=lfs diff=lfs merge=lfs -text *.model filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text *.npy filter=lfs diff=lfs merge=lfs -text @@ -16,13 +14,12 @@ *.onnx filter=lfs diff=lfs merge=lfs -text *.ot filter=lfs diff=lfs merge=lfs -text *.parquet filter=lfs diff=lfs merge=lfs -text -*.pb filter=lfs diff=lfs merge=lfs -text *.pickle filter=lfs diff=lfs merge=lfs -text *.pkl filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text *.rar filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.tar.* filter=lfs diff=lfs merge=lfs -text *.tflite filter=lfs diff=lfs merge=lfs -text @@ -30,5 +27,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.wasm filter=lfs diff=lfs merge=lfs -text *.xz filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text -*.zst filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +*.o filter=lfs diff=lfs merge=lfs -text +*.dll filter=lfs diff=lfs merge=lfs -text +*.so filter=lfs diff=lfs merge=lfs -text +*.ocd2 filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..95c5523723c55a5844399392d3a5836c729d94b8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,365 @@ +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. +## +## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore + +# User-specific files +*.rsuser +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Mono auto generated files +mono_crash.* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +[Ww][Ii][Nn]32/ +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +bld/ +[Bb]in/ +[Oo]bj/ +[Oo]ut/ +[Ll]og/ +[Ll]ogs/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +# Uncomment if you have tasks that create the project's static files in wwwroot +#wwwroot/ + +# Visual Studio 2017 auto generated files +Generated\ Files/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUnit +*.VisualState.xml +TestResult.xml +nunit-*.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +# Benchmark Results +BenchmarkDotNet.Artifacts/ + +# .NET Core +project.lock.json +project.fragment.lock.json +artifacts/ + +# ASP.NET Scaffolding +ScaffoldingReadMe.txt + +# StyleCop +StyleCopReport.xml + +# Files built by Visual Studio +*_i.c +*_p.c +*_h.h +*.ilk +*.meta +*.obj +*.iobj +*.pch +*.pdb +*.ipdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.tmp_proj +*_wpftmp.csproj +*.log +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opendb +*.opensdf +*.sdf +*.cachefile +*.VC.db +*.VC.VC.opendb + +# Visual Studio profiler +*.psess +*.vsp +*.vspx +*.sap + +# Visual Studio Trace Files +*.e2e + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# AxoCover is a Code Coverage Tool +.axoCover/* +!.axoCover/settings.json + +# Coverlet is a free, cross platform Code Coverage Tool +coverage*.json +coverage*.xml +coverage*.info + +# Visual Studio code coverage results +*.coverage +*.coveragexml + +# NCrunch +_NCrunch_* +.*crunch*.local.xml +nCrunchTemp_* + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# Note: Comment the next line if you want to checkin your web deploy settings, +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# Microsoft Azure Web App publish settings. Comment the next line if you want to +# checkin your Azure Web App publish settings, but sensitive information contained +# in these scripts will be unencrypted +PublishScripts/ + +# NuGet Packages +*.nupkg +# NuGet Symbol Packages +*.snupkg +# The packages folder can be ignored because of Package Restore +**/[Pp]ackages/* +# except build/, which is used as an MSBuild target. +!**/[Pp]ackages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/[Pp]ackages/repositories.config +# NuGet v3's project.json files produces more ignorable files +*.nuget.props +*.nuget.targets + +# Microsoft Azure Build Output +csx/ +*.build.csdef + +# Microsoft Azure Emulator +ecf/ +rcf/ + +# Windows Store app package directories and files +AppPackages/ +BundleArtifacts/ +Package.StoreAssociation.xml +_pkginfo.txt +*.appx +*.appxbundle +*.appxupload + +# Visual Studio cache files +# files ending in .cache can be ignored +*.[Cc]ache +# but keep track of directories ending in .cache +!?*.[Cc]ache/ + +# Others +ClientBin/ +~$* +*~ +*.dbmdl +*.dbproj.schemaview +*.jfm +*.pfx +*.publishsettings +orleans.codegen.cs + +# Including strong name files can present a security risk +# (https://github.com/github/gitignore/pull/2483#issue-259490424) +#*.snk + +# Since there are multiple workflows, uncomment next line to ignore bower_components +# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) +#bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm +ServiceFabricBackup/ +*.rptproj.bak + +# SQL Server files +*.mdf +*.ldf +*.ndf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings +*.rptproj.rsuser +*- [Bb]ackup.rdl +*- [Bb]ackup ([0-9]).rdl +*- [Bb]ackup ([0-9][0-9]).rdl + +# Microsoft Fakes +FakesAssemblies/ + +# GhostDoc plugin setting file +*.GhostDoc.xml + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat +node_modules/ + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + +# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) +*.vbw + +# Visual Studio LightSwitch build output +**/*.HTMLClient/GeneratedArtifacts +**/*.DesktopClient/GeneratedArtifacts +**/*.DesktopClient/ModelManifest.xml +**/*.Server/GeneratedArtifacts +**/*.Server/ModelManifest.xml +_Pvt_Extensions + +# Paket dependency manager +.paket/paket.exe +paket-files/ + +# FAKE - F# Make +.fake/ + +# CodeRush personal settings +.cr/personal + +# Python Tools for Visual Studio (PTVS) +__pycache__/ +*.pyc + +# Cake - Uncomment if you are using it +# tools/** +# !tools/packages.config + +# Tabs Studio +*.tss + +# Telerik's JustMock configuration file +*.jmconfig + +# BizTalk build output +*.btp.cs +*.btm.cs +*.odx.cs +*.xsd.cs + +# OpenCover UI analysis results +OpenCover/ + +# Azure Stream Analytics local run output +ASALocalRun/ + +# MSBuild Binary and Structured Log +*.binlog + +# NVidia Nsight GPU debugger configuration file +*.nvuser + +# MFractors (Xamarin productivity tool) working folder +.mfractor/ + +# Local History for Visual Studio +.localhistory/ + +# BeatPulse healthcheck temp database +healthchecksdb + +# Backup folder for Package Reference Convert tool in Visual Studio 2017 +MigrationBackup/ + +# Ionide (cross platform F# VS Code tools) working folder +.ionide/ + +# Fody - auto-generated XML schema +FodyWeavers.xsd + +.idea/ \ No newline at end of file diff --git a/README.md b/README.md index 5cf64d652fbc8d78dd75df130de10292cfd31ea2..1ba3391523df086dcd40411b7d25fc28caab90f9 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,13 @@ --- -title: Mtts -emoji: 📈 -colorFrom: indigo -colorTo: red +title: Moe TTS +emoji: 😊🎙️ +colorFrom: red +colorTo: pink sdk: gradio -sdk_version: 3.10.1 +sdk_version: 3.9 app_file: app.py pinned: false +license: mit --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..475277508d08afe7e5e773127af56e8097c351fa --- /dev/null +++ b/app.py @@ -0,0 +1,328 @@ +import argparse +import json +import os +import re +import tempfile + +import librosa +import numpy as np +import torch +from torch import no_grad, LongTensor +import commons +import utils +import gradio as gr +import gradio.utils as gr_utils +import gradio.processing_utils as gr_processing_utils +from models import SynthesizerTrn +from text import text_to_sequence, _clean_text +from mel_processing import spectrogram_torch + +limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces + + +def audio_postprocess(self, y): + if y is None: + return None + + if gr_utils.validate_url(y): + file = gr_processing_utils.download_to_file(y, dir=self.temp_dir) + elif isinstance(y, tuple): + sample_rate, data = y + file = tempfile.NamedTemporaryFile( + suffix=".wav", dir=self.temp_dir, delete=False + ) + gr_processing_utils.audio_to_file(sample_rate, data, file.name) + else: + file = gr_processing_utils.create_tmp_copy_of_file(y, dir=self.temp_dir) + + return gr_processing_utils.encode_url_or_file_to_base64(file.name) + + +gr.Audio.postprocess = audio_postprocess + + +def get_text(text, hps, is_symbol): + text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) + if hps.data.add_blank: + text_norm = commons.intersperse(text_norm, 0) + text_norm = LongTensor(text_norm) + return text_norm + + +def create_tts_fn(model, hps, speaker_ids): + def tts_fn(text, speaker, speed, is_symbol): + if limitation: + text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) + max_len = 150 + if is_symbol: + max_len *= 3 + if text_len > max_len: + return "Error: Text is too long", None + + speaker_id = speaker_ids[speaker] + stn_tst = get_text(text, hps, is_symbol) + with no_grad(): + x_tst = stn_tst.unsqueeze(0).to(device) + x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) + sid = LongTensor([speaker_id]).to(device) + audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, + length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() + del stn_tst, x_tst, x_tst_lengths, sid + return "Success", (hps.data.sampling_rate, audio) + + return tts_fn + + +def create_vc_fn(model, hps, speaker_ids): + def vc_fn(original_speaker, target_speaker, input_audio): + if input_audio is None: + return "You need to upload an audio", None + sampling_rate, audio = input_audio + duration = audio.shape[0] / sampling_rate + if limitation and duration > 30: + return "Error: Audio is too long", None + original_speaker_id = speaker_ids[original_speaker] + target_speaker_id = speaker_ids[target_speaker] + + audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) + if len(audio.shape) > 1: + audio = librosa.to_mono(audio.transpose(1, 0)) + if sampling_rate != hps.data.sampling_rate: + audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate) + with no_grad(): + y = torch.FloatTensor(audio) + y = y.unsqueeze(0) + spec = spectrogram_torch(y, hps.data.filter_length, + hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, + center=False).to(device) + spec_lengths = LongTensor([spec.size(-1)]).to(device) + sid_src = LongTensor([original_speaker_id]).to(device) + sid_tgt = LongTensor([target_speaker_id]).to(device) + audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][ + 0, 0].data.cpu().float().numpy() + del y, spec, spec_lengths, sid_src, sid_tgt + return "Success", (hps.data.sampling_rate, audio) + + return vc_fn + + +def create_soft_vc_fn(model, hps, speaker_ids): + def soft_vc_fn(target_speaker, input_audio1, input_audio2): + input_audio = input_audio1 + if input_audio is None: + input_audio = input_audio2 + if input_audio is None: + return "You need to upload an audio", None + sampling_rate, audio = input_audio + duration = audio.shape[0] / sampling_rate + if limitation and duration > 30: + return "Error: Audio is too long", None + target_speaker_id = speaker_ids[target_speaker] + + audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) + if len(audio.shape) > 1: + audio = librosa.to_mono(audio.transpose(1, 0)) + if sampling_rate != 16000: + audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) + with torch.inference_mode(): + units = hubert.units(torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0).to(device)) + with no_grad(): + unit_lengths = LongTensor([units.size(1)]).to(device) + sid = LongTensor([target_speaker_id]).to(device) + audio = model.infer(units, unit_lengths, sid=sid, noise_scale=.667, + noise_scale_w=0.8)[0][0, 0].data.cpu().float().numpy() + del units, unit_lengths, sid + return "Success", (hps.data.sampling_rate, audio) + + return soft_vc_fn + + +def create_to_symbol_fn(hps): + def to_symbol_fn(is_symbol_input, input_text, temp_text): + return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \ + else (temp_text, temp_text) + + return to_symbol_fn + + +download_audio_js = """ +() =>{{ + let root = document.querySelector("body > gradio-app"); + if (root.shadowRoot != null) + root = root.shadowRoot; + let audio = root.querySelector("#{audio_id}").querySelector("audio"); + if (audio == undefined) + return; + audio = audio.src; + let oA = document.createElement("a"); + oA.download = Math.floor(Math.random()*100000000)+'.wav'; + oA.href = audio; + document.body.appendChild(oA); + oA.click(); + oA.remove(); +}} +""" + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--device', type=str, default='cpu') + parser.add_argument("--share", action="store_true", default=False, help="share gradio app") + args = parser.parse_args() + + device = torch.device(args.device) + models_tts = [] + models_vc = [] + models_soft_vc = [] + with open("saved_model/info.json", "r", encoding="utf-8") as f: + models_info = json.load(f) + for i, info in models_info.items(): + name = info["title"] + author = info["author"] + lang = info["lang"] + example = info["example"] + config_path = f"saved_model/{i}/config.json" + model_path = f"saved_model/{i}/model.pth" + cover = info["cover"] + cover_path = f"saved_model/{i}/{cover}" if cover else None + hps = utils.get_hparams_from_file(config_path) + model = SynthesizerTrn( + len(hps.symbols), + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + **hps.model) + utils.load_checkpoint(model_path, model, None) + model.eval().to(device) + speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"] + speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"] + + t = info["type"] + if t == "vits": + models_tts.append((name, author, cover_path, speakers, lang, example, + hps.symbols, create_tts_fn(model, hps, speaker_ids), + create_to_symbol_fn(hps))) + models_vc.append((name, author, cover_path, speakers, create_vc_fn(model, hps, speaker_ids))) + elif t == "soft-vits-vc": + models_soft_vc.append((name, author, cover_path, speakers, create_soft_vc_fn(model, hps, speaker_ids))) + + hubert = torch.hub.load("bshall/hubert:main", "hubert_soft", trust_repo=True).to(device) + + app = gr.Blocks() + + with app: + gr.Markdown("# Moe TTS And Voice Conversion Using VITS Model\n\n" + "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.moegoe)\n\n" + "[Open In Colab]" + "(https://colab.research.google.com/drive/14Pb8lpmwZL-JI5Ub6jpG4sz2-8KS0kbS?usp=sharing)" + " without queue and length limitation.\n\n" + "Feel free to [open discussion](https://huggingface.co/spaces/skytnt/moe-tts/discussions/new) " + "if you want to add your model to this app.") + with gr.Tabs(): + with gr.TabItem("TTS"): + with gr.Tabs(): + for i, (name, author, cover_path, speakers, lang, example, symbols, tts_fn, + to_symbol_fn) in enumerate(models_tts): + with gr.TabItem(f"model{i}"): + with gr.Column(): + cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" + gr.Markdown(f"## {name}\n\n" + f"{cover_markdown}" + f"model author: {author}\n\n" + f"language: {lang}") + tts_input1 = gr.TextArea(label="Text (150 words limitation)", value=example, + elem_id=f"tts-input{i}") + tts_input2 = gr.Dropdown(label="Speaker", choices=speakers, + type="index", value=speakers[0]) + tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.5, maximum=2, step=0.1) + with gr.Accordion(label="Advanced Options", open=False): + temp_text_var = gr.Variable() + symbol_input = gr.Checkbox(value=False, label="Symbol input") + symbol_list = gr.Dataset(label="Symbol list", components=[tts_input1], + samples=[[x] for x in symbols], + elem_id=f"symbol-list{i}") + symbol_list_json = gr.Json(value=symbols, visible=False) + tts_submit = gr.Button("Generate", variant="primary") + tts_output1 = gr.Textbox(label="Output Message") + tts_output2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio{i}") + download = gr.Button("Download Audio") + download.click(None, [], [], _js=download_audio_js.format(audio_id=f"tts-audio{i}")) + + tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3, symbol_input], + [tts_output1, tts_output2]) + symbol_input.change(to_symbol_fn, + [symbol_input, tts_input1, temp_text_var], + [tts_input1, temp_text_var]) + symbol_list.click(None, [symbol_list, symbol_list_json], [], + _js=f""" + (i,symbols) => {{ + let root = document.querySelector("body > gradio-app"); + if (root.shadowRoot != null) + root = root.shadowRoot; + let text_input = root.querySelector("#tts-input{i}").querySelector("textarea"); + let startPos = text_input.selectionStart; + let endPos = text_input.selectionEnd; + let oldTxt = text_input.value; + let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); + text_input.value = result; + let x = window.scrollX, y = window.scrollY; + text_input.focus(); + text_input.selectionStart = startPos + symbols[i].length; + text_input.selectionEnd = startPos + symbols[i].length; + text_input.blur(); + window.scrollTo(x, y); + return []; + }}""") + + with gr.TabItem("Voice Conversion"): + with gr.Tabs(): + for i, (name, author, cover_path, speakers, vc_fn) in enumerate(models_vc): + with gr.TabItem(f"model{i}"): + cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" + gr.Markdown(f"## {name}\n\n" + f"{cover_markdown}" + f"model author: {author}") + vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index", + value=speakers[0]) + vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index", + value=speakers[min(len(speakers) - 1, 1)]) + vc_input3 = gr.Audio(label="Input Audio (30s limitation)") + vc_submit = gr.Button("Convert", variant="primary") + vc_output1 = gr.Textbox(label="Output Message") + vc_output2 = gr.Audio(label="Output Audio", elem_id=f"vc-audio{i}") + download = gr.Button("Download Audio") + download.click(None, [], [], _js=download_audio_js.format(audio_id=f"vc-audio{i}")) + vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2]) + with gr.TabItem("Soft Voice Conversion"): + with gr.Tabs(): + for i, (name, author, cover_path, speakers, soft_vc_fn) in enumerate(models_soft_vc): + with gr.TabItem(f"model{i}"): + cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" + gr.Markdown(f"## {name}\n\n" + f"{cover_markdown}" + f"model author: {author}") + vc_input1 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index", + value=speakers[0]) + source_tabs = gr.Tabs() + with source_tabs: + with gr.TabItem("microphone"): + vc_input2 = gr.Audio(label="Input Audio (30s limitation)", source="microphone") + with gr.TabItem("upload"): + vc_input3 = gr.Audio(label="Input Audio (30s limitation)", source="upload") + vc_submit = gr.Button("Convert", variant="primary") + vc_output1 = gr.Textbox(label="Output Message") + vc_output2 = gr.Audio(label="Output Audio", elem_id=f"svc-audio{i}") + download = gr.Button("Download Audio") + download.click(None, [], [], _js=download_audio_js.format(audio_id=f"svc-audio{i}")) + # clear inputs + source_tabs.set_event_trigger("change", None, [], [vc_input2, vc_input3], + js="()=>[null,null]") + vc_submit.click(soft_vc_fn, [vc_input1, vc_input2, vc_input3], + [vc_output1, vc_output2]) + gr.Markdown( + "unofficial demo for \n\n" + "- [https://github.com/CjangCjengh/MoeGoe](https://github.com/CjangCjengh/MoeGoe)\n" + "- [https://github.com/Francis-Komizu/VITS](https://github.com/Francis-Komizu/VITS)\n" + "- [https://github.com/luoyily/MoeTTS](https://github.com/luoyily/MoeTTS)\n" + "- [https://github.com/Francis-Komizu/Sovits](https://github.com/Francis-Komizu/Sovits)" + ) + app.queue(concurrency_count=3).launch(show_api=False, share=args.share) diff --git a/attentions.py b/attentions.py new file mode 100644 index 0000000000000000000000000000000000000000..86bc73b5fe98cc7b443e9078553920346c996707 --- /dev/null +++ b/attentions.py @@ -0,0 +1,300 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F + +import commons +from modules import LayerNorm + + +class Encoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class Decoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + """ + x: decoder input + h: encoder output + """ + self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + assert t_s == t_t, "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + assert t_s == t_t, "Local attention is only available for self-attention." + block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) + x_flat = x.view([batch, heads, length**2 + length*(length -1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x diff --git a/chinese_dialect_lexicons/changzhou.json b/chinese_dialect_lexicons/changzhou.json new file mode 100644 index 0000000000000000000000000000000000000000..0a47b34211717e26fcdc3606726c1e6e59336e98 --- /dev/null +++ b/chinese_dialect_lexicons/changzhou.json @@ -0,0 +1,23 @@ +{ + "name": "Changzhou dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "changzhou.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "changzhou.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/changzhou.ocd2 b/chinese_dialect_lexicons/changzhou.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..d2dc15df0905c6c1fe1ba0ee4408eb4aed13cd0c --- /dev/null +++ b/chinese_dialect_lexicons/changzhou.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db4ec02be9812e804291a88f9a984f544e221ed472f682bba8da5ecbefbabd8c +size 96119 diff --git a/chinese_dialect_lexicons/cixi.json b/chinese_dialect_lexicons/cixi.json new file mode 100644 index 0000000000000000000000000000000000000000..8f6827fa9b1b9a156d311d76d9b197865a3d17fb --- /dev/null +++ b/chinese_dialect_lexicons/cixi.json @@ -0,0 +1,23 @@ +{ + "name": "Cixi dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "cixi.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "cixi.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/cixi.ocd2 b/chinese_dialect_lexicons/cixi.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..cee307cf789663d2dcbc75c67bf4689ef2465ea7 --- /dev/null +++ b/chinese_dialect_lexicons/cixi.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8113aca87c4728c66cfa6c7b5adfbb596a2930df9b7c6187c6a227ff2de87f00 +size 98015 diff --git a/chinese_dialect_lexicons/fuyang.json b/chinese_dialect_lexicons/fuyang.json new file mode 100644 index 0000000000000000000000000000000000000000..1dcc6726597eaa0156a3b3eabfa6d0f63b5b14c5 --- /dev/null +++ b/chinese_dialect_lexicons/fuyang.json @@ -0,0 +1,23 @@ +{ + "name": "Fuyang dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "fuyang.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "fuyang.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/fuyang.ocd2 b/chinese_dialect_lexicons/fuyang.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..55064976ce3663c3927a52f31acc1d0aca9653ac --- /dev/null +++ b/chinese_dialect_lexicons/fuyang.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e1fbec75e090550cf131de226a1d867c7896b51170f8d7d21f9101297f4c08 +size 83664 diff --git a/chinese_dialect_lexicons/hangzhou.json b/chinese_dialect_lexicons/hangzhou.json new file mode 100644 index 0000000000000000000000000000000000000000..7f8ae5d3e5a79c61511a5ee2f83e14213d103c0d --- /dev/null +++ b/chinese_dialect_lexicons/hangzhou.json @@ -0,0 +1,19 @@ +{ + "name": "Hangzhounese to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "hangzhou.ocd2" + } + }, + "conversion_chain": [{ + "dict": { + "type": "group", + "dicts": [{ + "type": "ocd2", + "file": "hangzhou.ocd2" + }] + } + }] +} diff --git a/chinese_dialect_lexicons/hangzhou.ocd2 b/chinese_dialect_lexicons/hangzhou.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..a22d3916b76fd2ac070239711137410c959778ea --- /dev/null +++ b/chinese_dialect_lexicons/hangzhou.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7a9eb5fbd3b8c91745dbb2734f2700b75a47c3821e381566afc567d7da4d9d5 +size 427268 diff --git a/chinese_dialect_lexicons/jiading.json b/chinese_dialect_lexicons/jiading.json new file mode 100644 index 0000000000000000000000000000000000000000..13ac26d6ea90f3253192d1cdd88abef86a9cee55 --- /dev/null +++ b/chinese_dialect_lexicons/jiading.json @@ -0,0 +1,23 @@ +{ + "name": "Jiading dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "jiading.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "jiading.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/jiading.ocd2 b/chinese_dialect_lexicons/jiading.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..3341b573e25ac5b38041cfe4ce2a47fd38aa757e --- /dev/null +++ b/chinese_dialect_lexicons/jiading.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f3ac33214e65e7223e8c561bc12ec90a2d87db3cf8d20e87a30bbd8eb788187 +size 111144 diff --git a/chinese_dialect_lexicons/jiashan.json b/chinese_dialect_lexicons/jiashan.json new file mode 100644 index 0000000000000000000000000000000000000000..14088ecbd3f619951ba92965699e65395e8c1fbd --- /dev/null +++ b/chinese_dialect_lexicons/jiashan.json @@ -0,0 +1,23 @@ +{ + "name": "Jiashan dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "jiashan.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "jiashan.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/jiashan.ocd2 b/chinese_dialect_lexicons/jiashan.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..198dc692b6b0155456864bf520973af42e049fe7 --- /dev/null +++ b/chinese_dialect_lexicons/jiashan.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6310729b85976b6e6407b4f66ad13a3ad7a51a42f3c05c98e294bcbb3159456c +size 71716 diff --git a/chinese_dialect_lexicons/jingjiang.json b/chinese_dialect_lexicons/jingjiang.json new file mode 100644 index 0000000000000000000000000000000000000000..b66c9b729c5e4524d60d70af393ce7c3b7e2c958 --- /dev/null +++ b/chinese_dialect_lexicons/jingjiang.json @@ -0,0 +1,23 @@ +{ + "name": "Jingjiang dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "jingjiang.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "jingjiang.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/jingjiang.ocd2 b/chinese_dialect_lexicons/jingjiang.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..ebe91068ab23792dbfb9ab7cdd9c4d1d949482f8 --- /dev/null +++ b/chinese_dialect_lexicons/jingjiang.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:154d9cac032a3284a6aa175689a5805f068f6896429009a7d94d41616694131f +size 86093 diff --git a/chinese_dialect_lexicons/jyutjyu.json b/chinese_dialect_lexicons/jyutjyu.json new file mode 100644 index 0000000000000000000000000000000000000000..4248e3d48d871ed5f9fe5c038918ec532c4cfca8 --- /dev/null +++ b/chinese_dialect_lexicons/jyutjyu.json @@ -0,0 +1,19 @@ +{ + "name": "Cantonese to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "jyutjyu.ocd2" + } + }, + "conversion_chain": [{ + "dict": { + "type": "group", + "dicts": [{ + "type": "ocd2", + "file": "jyutjyu.ocd2" + }] + } + }] +} diff --git a/chinese_dialect_lexicons/jyutjyu.ocd2 b/chinese_dialect_lexicons/jyutjyu.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..80508286ca40c7a1103792a6e5f055879fbd5762 --- /dev/null +++ b/chinese_dialect_lexicons/jyutjyu.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aea11bfe51b184b3f000d20ab49757979b216219203839d2b2e3c1f990a13fa5 +size 2432991 diff --git a/chinese_dialect_lexicons/linping.json b/chinese_dialect_lexicons/linping.json new file mode 100644 index 0000000000000000000000000000000000000000..134d411c8d663e55e12517e37abd1d2ceda63453 --- /dev/null +++ b/chinese_dialect_lexicons/linping.json @@ -0,0 +1,23 @@ +{ + "name": "Linping dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "linping.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "linping.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/linping.ocd2 b/chinese_dialect_lexicons/linping.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..5cb4e4d09e3efb2c126434849f3e65c0e8e4df70 --- /dev/null +++ b/chinese_dialect_lexicons/linping.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fcd3b53e5aa6cd64419835c14769d53cc230e229c0fbd20efb65c46e07b712b +size 65351 diff --git a/chinese_dialect_lexicons/ningbo.json b/chinese_dialect_lexicons/ningbo.json new file mode 100644 index 0000000000000000000000000000000000000000..4831f95ca13e5812572979ca55520e015c4524aa --- /dev/null +++ b/chinese_dialect_lexicons/ningbo.json @@ -0,0 +1,19 @@ +{ + "name": "Ningbonese to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "ningbo.ocd2" + } + }, + "conversion_chain": [{ + "dict": { + "type": "group", + "dicts": [{ + "type": "ocd2", + "file": "ningbo.ocd2" + }] + } + }] +} diff --git a/chinese_dialect_lexicons/ningbo.ocd2 b/chinese_dialect_lexicons/ningbo.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..e2c731b7765519203164e2d34d069e2803358608 --- /dev/null +++ b/chinese_dialect_lexicons/ningbo.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5876b000f611ea52bf18cda5bcbdd0cfcc55e1c09774d9a24e3b5c7d90002435 +size 386414 diff --git a/chinese_dialect_lexicons/pinghu.json b/chinese_dialect_lexicons/pinghu.json new file mode 100644 index 0000000000000000000000000000000000000000..09cd0cc3ea22667eca66ec77601b40e93262f310 --- /dev/null +++ b/chinese_dialect_lexicons/pinghu.json @@ -0,0 +1,23 @@ +{ + "name": "Pinghu dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "pinghu.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "pinghu.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/pinghu.ocd2 b/chinese_dialect_lexicons/pinghu.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..f71e6f0c2007562f8c702a7b351d6df0799f57a9 --- /dev/null +++ b/chinese_dialect_lexicons/pinghu.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01b0e0dad8cddb0e2cb23899d4a2f97f2c0b369d5ff369076c5cdb7bd4528e4f +size 69420 diff --git a/chinese_dialect_lexicons/ruao.json b/chinese_dialect_lexicons/ruao.json new file mode 100644 index 0000000000000000000000000000000000000000..842518ccc96272a9f718c4ae62ea39ac27c2e80b --- /dev/null +++ b/chinese_dialect_lexicons/ruao.json @@ -0,0 +1,23 @@ +{ + "name": "Ruao dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "ruao.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "ruao.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/ruao.ocd2 b/chinese_dialect_lexicons/ruao.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..598a918577ceeecab3cb97f73f628bf67fb014e7 --- /dev/null +++ b/chinese_dialect_lexicons/ruao.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:259a42ad761233f7d6ca6eec39268e27a65b2ded025f2b7725501cf5e3e02d8a +size 58841 diff --git a/chinese_dialect_lexicons/sanmen.json b/chinese_dialect_lexicons/sanmen.json new file mode 100644 index 0000000000000000000000000000000000000000..85642ce122fe197cc7f15466d786dffbd9538eed --- /dev/null +++ b/chinese_dialect_lexicons/sanmen.json @@ -0,0 +1,23 @@ +{ + "name": "Sanmen dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "sanmen.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "sanmen.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/sanmen.ocd2 b/chinese_dialect_lexicons/sanmen.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..b0db6bdc424a514f4b516b2c58c0acbe8098bb31 --- /dev/null +++ b/chinese_dialect_lexicons/sanmen.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afa70a920b6805e279ed15246026b70dbeb2a8329ad585fbae8cfdf45e7489a9 +size 80210 diff --git a/chinese_dialect_lexicons/shaoxing.json b/chinese_dialect_lexicons/shaoxing.json new file mode 100644 index 0000000000000000000000000000000000000000..ff4eb451f1e21c0c0eedd7c1333d022d0fa05ae4 --- /dev/null +++ b/chinese_dialect_lexicons/shaoxing.json @@ -0,0 +1,23 @@ +{ + "name": "Shaoxing dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "shaoxing.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "shaoxing.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/shaoxing.ocd2 b/chinese_dialect_lexicons/shaoxing.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..1c65bf712a33baf64ba5fd029d0036c011589fe8 --- /dev/null +++ b/chinese_dialect_lexicons/shaoxing.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a347aa25bf435803727b4194cf34de4de3e61f03427ee21043a711cdb0b9d940 +size 113108 diff --git a/chinese_dialect_lexicons/suichang.json b/chinese_dialect_lexicons/suichang.json new file mode 100644 index 0000000000000000000000000000000000000000..19a7654385d1e3c8f021169764b0b7f72eb1a0e3 --- /dev/null +++ b/chinese_dialect_lexicons/suichang.json @@ -0,0 +1,23 @@ +{ + "name": "Suichang dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "suichang.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "suichang.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/suichang.ocd2 b/chinese_dialect_lexicons/suichang.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..08c663c7adafd83aed8bc9ec14b2e1b92f0c7058 --- /dev/null +++ b/chinese_dialect_lexicons/suichang.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8062749ff70db65d469d91bd92375607f8648a138b896e58cf7c28edb8f970e +size 81004 diff --git a/chinese_dialect_lexicons/suzhou.json b/chinese_dialect_lexicons/suzhou.json new file mode 100644 index 0000000000000000000000000000000000000000..d9e93b7ca96881a62e0a139ec452a3ae390d794c --- /dev/null +++ b/chinese_dialect_lexicons/suzhou.json @@ -0,0 +1,19 @@ +{ + "name": "Suzhounese to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "suzhou.ocd2" + } + }, + "conversion_chain": [{ + "dict": { + "type": "group", + "dicts": [{ + "type": "ocd2", + "file": "suzhou.ocd2" + }] + } + }] +} diff --git a/chinese_dialect_lexicons/suzhou.ocd2 b/chinese_dialect_lexicons/suzhou.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..3a5bfa9452556cf671f5e4d0cd73f2c6923ef972 --- /dev/null +++ b/chinese_dialect_lexicons/suzhou.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8815595a248135874329e7f34662dd243a266be3e8375e8409f95da95d6d540 +size 506184 diff --git a/chinese_dialect_lexicons/tiantai.json b/chinese_dialect_lexicons/tiantai.json new file mode 100644 index 0000000000000000000000000000000000000000..16d2c2adc2b415fea2705dbeb030027154bac383 --- /dev/null +++ b/chinese_dialect_lexicons/tiantai.json @@ -0,0 +1,23 @@ +{ + "name": "Tiantai dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "tiantai.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "tiantai.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/tiantai.ocd2 b/chinese_dialect_lexicons/tiantai.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..1ac95abe173dab15b7c325ede05c2907bafbf09e --- /dev/null +++ b/chinese_dialect_lexicons/tiantai.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:339e0ba454444dbf8fbe75de6f49769d11dfe2f2f5ba7dea74ba20fba5d6d343 +size 120951 diff --git a/chinese_dialect_lexicons/tongxiang.json b/chinese_dialect_lexicons/tongxiang.json new file mode 100644 index 0000000000000000000000000000000000000000..5f2ad775c8bf1c6b03b9ce3ffa79d0fd7c571a59 --- /dev/null +++ b/chinese_dialect_lexicons/tongxiang.json @@ -0,0 +1,23 @@ +{ + "name": "Tongxiang dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "tongxiang.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "tongxiang.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/tongxiang.ocd2 b/chinese_dialect_lexicons/tongxiang.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..eb505b5c219c9011e3323d3478ffcaa7028bc030 --- /dev/null +++ b/chinese_dialect_lexicons/tongxiang.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7432d85588eb8ba34e7baea9f26af8d332572037ff7d41a6730f96c02e5fd063 +size 137499 diff --git a/chinese_dialect_lexicons/wenzhou.json b/chinese_dialect_lexicons/wenzhou.json new file mode 100644 index 0000000000000000000000000000000000000000..f5ba9ed3aa3331245920fc384717e403196acfa0 --- /dev/null +++ b/chinese_dialect_lexicons/wenzhou.json @@ -0,0 +1,23 @@ +{ + "name": "Wenzhou dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "wenzhou.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "wenzhou.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/wenzhou.ocd2 b/chinese_dialect_lexicons/wenzhou.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..20e7bd472d314e8f576abf0533d7c8dd88983d9f --- /dev/null +++ b/chinese_dialect_lexicons/wenzhou.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed05c0c615a38f55a139a73bcc3960897d8cd567c9482a0a06b272eb0b46aa05 +size 83121 diff --git a/chinese_dialect_lexicons/wuxi.json b/chinese_dialect_lexicons/wuxi.json new file mode 100644 index 0000000000000000000000000000000000000000..e9ece776f0990fc70f81ee16f6f937a5a7d3eadb --- /dev/null +++ b/chinese_dialect_lexicons/wuxi.json @@ -0,0 +1,19 @@ +{ + "name": "Wuxinese to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "wuxi.ocd2" + } + }, + "conversion_chain": [{ + "dict": { + "type": "group", + "dicts": [{ + "type": "ocd2", + "file": "wuxi.ocd2" + }] + } + }] +} diff --git a/chinese_dialect_lexicons/wuxi.ocd2 b/chinese_dialect_lexicons/wuxi.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..256d1f449155d580d666818dcdd0637684177c92 --- /dev/null +++ b/chinese_dialect_lexicons/wuxi.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64f27ffaa75e542e4464e53c4acf94607be1526a90922ac8b28870104aaebdff +size 358666 diff --git a/chinese_dialect_lexicons/xiaoshan.json b/chinese_dialect_lexicons/xiaoshan.json new file mode 100644 index 0000000000000000000000000000000000000000..c2881ee68c172c427d27209bfc4964f10092a395 --- /dev/null +++ b/chinese_dialect_lexicons/xiaoshan.json @@ -0,0 +1,23 @@ +{ + "name": "Xiaoshan dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "xiaoshan.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "xiaoshan.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/xiaoshan.ocd2 b/chinese_dialect_lexicons/xiaoshan.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..6c6e7f57ea81bdbac7474649cf332a0ec9866e88 --- /dev/null +++ b/chinese_dialect_lexicons/xiaoshan.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:651bd314c5e57312eeee537037f6c6e56a12ef446216264aad70bf68bf6a283d +size 77119 diff --git a/chinese_dialect_lexicons/xiashi.json b/chinese_dialect_lexicons/xiashi.json new file mode 100644 index 0000000000000000000000000000000000000000..1fe167e77011f6e64608614ae483cd78e06521e1 --- /dev/null +++ b/chinese_dialect_lexicons/xiashi.json @@ -0,0 +1,19 @@ +{ + "name": "Xiashi dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "xiashi.ocd2" + } + }, + "conversion_chain": [{ + "dict": { + "type": "group", + "dicts": [{ + "type": "ocd2", + "file": "xiashi.ocd2" + }] + } + }] +} diff --git a/chinese_dialect_lexicons/xiashi.ocd2 b/chinese_dialect_lexicons/xiashi.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..c69ae5e0ecbcda7d7b51dda1574bd9e31a4e303c --- /dev/null +++ b/chinese_dialect_lexicons/xiashi.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bc638633b82e196776a3adfc621c854d0da923b7cff6e7d0c9576723cdc03cd +size 70314 diff --git a/chinese_dialect_lexicons/yixing.json b/chinese_dialect_lexicons/yixing.json new file mode 100644 index 0000000000000000000000000000000000000000..3158b8591646171fc45cc377943bac1185a8d239 --- /dev/null +++ b/chinese_dialect_lexicons/yixing.json @@ -0,0 +1,19 @@ +{ + "name": "Yixing dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "yixing.ocd2" + } + }, + "conversion_chain": [{ + "dict": { + "type": "group", + "dicts": [{ + "type": "ocd2", + "file": "yixing.ocd2" + }] + } + }] +} diff --git a/chinese_dialect_lexicons/yixing.ocd2 b/chinese_dialect_lexicons/yixing.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..49e7069fbe06567e72163cf555c946c4c4475660 --- /dev/null +++ b/chinese_dialect_lexicons/yixing.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c56a73eb531f49f64562bdb714753d37dc015baac943b3264bccba9b2aacf9b +size 155050 diff --git a/chinese_dialect_lexicons/youbu.json b/chinese_dialect_lexicons/youbu.json new file mode 100644 index 0000000000000000000000000000000000000000..b7b929ee696b4bda08c3cb61c7c381a2b41cd389 --- /dev/null +++ b/chinese_dialect_lexicons/youbu.json @@ -0,0 +1,23 @@ +{ + "name": "Youbu dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "youbu.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "youbu.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/youbu.ocd2 b/chinese_dialect_lexicons/youbu.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..f52ede68cf2bca2e7888785724f842b427076067 --- /dev/null +++ b/chinese_dialect_lexicons/youbu.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fb1aef6b12d9474249717ce7c5b5303aeeea4d8d26943d62d269568b2985c17 +size 84985 diff --git a/chinese_dialect_lexicons/zaonhe.json b/chinese_dialect_lexicons/zaonhe.json new file mode 100644 index 0000000000000000000000000000000000000000..f50af3002c806f50a51757de7357be9fb2ae9d4e --- /dev/null +++ b/chinese_dialect_lexicons/zaonhe.json @@ -0,0 +1,19 @@ +{ + "name": "Shanghainese to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "zaonhe.ocd2" + } + }, + "conversion_chain": [{ + "dict": { + "type": "group", + "dicts": [{ + "type": "ocd2", + "file": "zaonhe.ocd2" + }] + } + }] +} diff --git a/chinese_dialect_lexicons/zaonhe.ocd2 b/chinese_dialect_lexicons/zaonhe.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..f356103416c06d69dcc56d64a1de91aeb32bafe6 --- /dev/null +++ b/chinese_dialect_lexicons/zaonhe.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a71b5a97eb49699f440137391565d208ea82156f0765986b7f3e16909e15672e +size 4095228 diff --git a/chinese_dialect_lexicons/zhenru.json b/chinese_dialect_lexicons/zhenru.json new file mode 100644 index 0000000000000000000000000000000000000000..1809396d6c972b9468c4b2d549881a6d59600e84 --- /dev/null +++ b/chinese_dialect_lexicons/zhenru.json @@ -0,0 +1,23 @@ +{ + "name": "Zhenru dialect to IPA", + "segmentation": { + "type": "mmseg", + "dict": { + "type": "ocd2", + "file": "zhenru.ocd2" + } + }, + "conversion_chain": [ + { + "dict": { + "type": "group", + "dicts": [ + { + "type": "ocd2", + "file": "zhenru.ocd2" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/chinese_dialect_lexicons/zhenru.ocd2 b/chinese_dialect_lexicons/zhenru.ocd2 new file mode 100644 index 0000000000000000000000000000000000000000..3132b10e0e3ec31887216931324dbf59468c2bbf --- /dev/null +++ b/chinese_dialect_lexicons/zhenru.ocd2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d71dd437715e1055534f929e1591b11086a265d87480694e723eb4a6a95874e8 +size 56967 diff --git a/commons.py b/commons.py new file mode 100644 index 0000000000000000000000000000000000000000..40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9 --- /dev/null +++ b/commons.py @@ -0,0 +1,172 @@ +import math +import torch +from torch.nn import functional as F +import torch.jit + + +def script_method(fn, _rcb=None): + return fn + + +def script(obj, optimize=True, _frames_up=0, _rcb=None): + return obj + + +torch.jit.script_method = script_method +torch.jit.script = script + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size*dilation - dilation)/2) + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def kl_divergence(m_p, logs_p, m_q, logs_q): + """KL(P||Q)""" + kl = (logs_q - logs_p) - 0.5 + kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) + return kl + + +def rand_gumbel(shape): + """Sample from the Gumbel distribution, protect from overflows.""" + uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 + return -torch.log(-torch.log(uniform_samples)) + + +def rand_gumbel_like(x): + g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) + return g + + +def slice_segments(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, :, idx_str:idx_end] + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size) + return ret, ids_str + + +def get_timing_signal_1d( + length, channels, min_timescale=1.0, max_timescale=1.0e4): + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (num_timescales - 1)) + inv_timescales = min_timescale * torch.exp( + torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = F.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + + +def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return x + signal.to(dtype=x.dtype, device=x.device) + + +def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) + + +def subsequent_mask(length): + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def generate_path(duration, mask): + """ + duration: [b, 1, t_x] + mask: [b, 1, t_y, t_x] + """ + device = duration.device + + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path.unsqueeze(1).transpose(2,3) * mask + return path + + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1. / norm_type) + return total_norm diff --git a/export_model.py b/export_model.py new file mode 100644 index 0000000000000000000000000000000000000000..98a49835df5a7a2486e76ddf94fbbb4444b52203 --- /dev/null +++ b/export_model.py @@ -0,0 +1,13 @@ +import torch + +if __name__ == '__main__': + model_path = "saved_model/11/model.pth" + output_path = "saved_model/11/model1.pth" + checkpoint_dict = torch.load(model_path, map_location='cpu') + checkpoint_dict_new = {} + for k, v in checkpoint_dict.items(): + if k == "optimizer": + print("remove optimizer") + continue + checkpoint_dict_new[k] = v + torch.save(checkpoint_dict_new, output_path) diff --git a/mel_processing.py b/mel_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..3e252e76320522a8a4195a60665168f22769aec2 --- /dev/null +++ b/mel_processing.py @@ -0,0 +1,101 @@ +import torch +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn + +MAX_WAV_VALUE = 32768.0 + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + """ + PARAMS + ------ + C: compression factor + """ + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + """ + PARAMS + ------ + C: compression factor used to compress + """ + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], + center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + return spec + + +def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): + global mel_basis + dtype_device = str(spec.dtype) + '_' + str(spec.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + return spec + + +def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global mel_basis, hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + + return spec diff --git a/models.py b/models.py new file mode 100644 index 0000000000000000000000000000000000000000..c214bbb0476ba4777093d8bcf032961f09e59496 --- /dev/null +++ b/models.py @@ -0,0 +1,549 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F + +import commons +import modules +import attentions +import monotonic_align + +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from commons import init_weights, get_padding + + +class StochasticDurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = modules.Log() + self.flows = nn.ModuleList() + self.flows.append(modules.ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.flows.append(modules.Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + self.post_flows = nn.ModuleList() + self.post_flows.append(modules.ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.post_flows.append(modules.Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) + logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.norm_1 = modules.LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.norm_2 = modules.LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + emotion_embedding): + super().__init__() + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emotion_embedding = emotion_embedding + + if self.n_vocab != 0: + self.emb = nn.Embedding(n_vocab, hidden_channels) + if emotion_embedding: + self.emo_proj = nn.Linear(1024, hidden_channels) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) + + self.encoder = attentions.Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, emotion_embedding=None): + if self.n_vocab != 0: + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + if emotion_embedding is not None: + x = x + self.emo_proj(emotion_embedding.unsqueeze(1)) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return x, m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, + gin_channels=gin_channels, mean_only=True)) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + +class Generator(torch.nn.Module): + def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, + upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append(weight_norm( + ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), + k, u, padding=(k - u) // 2))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + + def __init__(self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + emotion_embedding=False, + **kwargs): + + super().__init__() + self.n_vocab = n_vocab + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.use_sdp = use_sdp + + self.enc_p = TextEncoder(n_vocab, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + emotion_embedding) + self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, + upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, + gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) + + if use_sdp: + self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) + else: + self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) + + if n_speakers > 1: + self.emb_g = nn.Embedding(n_speakers, gin_channels) + + def forward(self, x, x_lengths, y, y_lengths, sid=None, emotion_embedding=None): + + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) + if self.n_speakers > 1: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + + with torch.no_grad(): + # negative cross-entropy + s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), + s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 + + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() + + w = attn.sum(2) + if self.use_sdp: + l_length = self.dp(x, x_mask, w, g=g) + l_length = l_length / torch.sum(x_mask) + else: + logw_ = torch.log(w + 1e-6) * x_mask + logw = self.dp(x, x_mask, g=g) + l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging + + # expand prior + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) + + z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=g) + return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, + emotion_embedding=None): + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) + if self.n_speakers > 1: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + if self.use_sdp: + logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) + else: + logw = self.dp(x, x_mask, g=g) + w = torch.exp(logw) * x_mask * length_scale + w_ceil = torch.ceil(w) + y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = commons.generate_path(w_ceil, attn_mask) + + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, + 2) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale + z = self.flow(z_p, y_mask, g=g, reverse=True) + o = self.dec((z * y_mask)[:, :, :max_len], g=g) + return o, attn, y_mask, (z, z_p, m_p, logs_p) + + def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): + assert self.n_speakers > 1, "n_speakers have to be larger than 1." + g_src = self.emb_g(sid_src).unsqueeze(-1) + g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) + z_p = self.flow(z, y_mask, g=g_src) + z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) + o_hat = self.dec(z_hat * y_mask, g=g_tgt) + return o_hat, y_mask, (z, z_p, z_hat) diff --git a/modules.py b/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..9c7fd9cd6eb8b7e0ec0e08957e970744a374a924 --- /dev/null +++ b/modules.py @@ -0,0 +1,390 @@ +import copy +import math +import numpy as np +import scipy +import torch +from torch import nn +from torch.nn import functional as F + +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm + +import commons +from commons import init_weights, get_padding +from transforms import piecewise_rational_quadratic_transform + + +LRELU_SLOPE = 0.1 + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential( + nn.ReLU(), + nn.Dropout(p_dropout)) + for _ in range(n_layers-1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DDSConv(nn.Module): + """ + Dialted and Depth-Separable Convolution + """ + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size ** i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, + groups=channels, dilation=dilation, padding=padding + )) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: + x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + + +class WN(torch.nn.Module): + def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): + super(WN, self).__init__() + assert(kernel_size % 2 == 1) + self.hidden_channels =hidden_channels + self.kernel_size = kernel_size, + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + + for i in range(n_layers): + dilation = dilation_rate ** i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, + dilation=dilation, padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] + else: + g_l = torch.zeros_like(x_in) + + acts = commons.fused_add_tanh_sigmoid_multiply( + x_in, + g_l, + n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:,:self.hidden_channels,:] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:,self.hidden_channels:,:] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(l) + + +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c2(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + def forward(self, x, x_mask=None): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels,1)) + self.logs = nn.Parameter(torch.zeros(channels,1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1,2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + + +class ResidualCouplingLayer(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) + self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels]*2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels]*2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1,2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + +class ConvFlow(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) + self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels]*2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] + + unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins:] + + x1, logabsdet = piecewise_rational_quadratic_transform(x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound + ) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1,2]) + if not reverse: + return x, logdet + else: + return x diff --git a/monotonic_align/__init__.py b/monotonic_align/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..40b6f64aa116c74cac2f6a33444c9eeea2fdb38c --- /dev/null +++ b/monotonic_align/__init__.py @@ -0,0 +1,21 @@ +from numpy import zeros, int32, float32 +from torch import from_numpy + +from .core import maximum_path_jit + + +def maximum_path(neg_cent, mask): + """ numba optimized version. + neg_cent: [b, t_t, t_s] + mask: [b, t_t, t_s] + """ + device = neg_cent.device + dtype = neg_cent.dtype + neg_cent = neg_cent.data.cpu().numpy().astype(float32) + path = zeros(neg_cent.shape, dtype=int32) + + t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) + t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) + maximum_path_jit(path, neg_cent, t_t_max, t_s_max) + return from_numpy(path).to(device=device, dtype=dtype) + diff --git a/monotonic_align/core.py b/monotonic_align/core.py new file mode 100644 index 0000000000000000000000000000000000000000..1f940605fe4fd0738fa0006149fcba14ef88223a --- /dev/null +++ b/monotonic_align/core.py @@ -0,0 +1,36 @@ +import numba + + +@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), + nopython=True, nogil=True) +def maximum_path_jit(paths, values, t_ys, t_xs): + b = paths.shape[0] + max_neg_val = -1e9 + for i in range(int(b)): + path = paths[i] + value = values[i] + t_y = t_ys[i] + t_x = t_xs[i] + + v_prev = v_cur = 0.0 + index = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y - 1, x] + if x == 0: + if y == 0: + v_prev = 0. + else: + v_prev = max_neg_val + else: + v_prev = value[y - 1, x - 1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): + index = index - 1 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b0518335c9750d84b5e1df4ce5d987feb4f0a8c1 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,25 @@ +numba +librosa +matplotlib +numpy +phonemizer +scipy +tensorboard +torch +torchvision +torchaudio +unidecode +pyopenjtalk>=0.3.0 +jamo +pypinyin +ko_pron +jieba +cn2an +protobuf +inflect +eng_to_ipa +ko_pron +indic_transliteration +num_thai +opencc +gradio \ No newline at end of file diff --git a/saved_model/0/config.json b/saved_model/0/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ca32af4df2d780faf623d2418715bfb2dc77522b --- /dev/null +++ b/saved_model/0/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abd4346c22576c4de22f8a0bef8ddaefe0c1313627f980cf6b4156dec9705b78 +size 1260 diff --git a/saved_model/0/cover.jpg b/saved_model/0/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a4452a8872140eb9b81c2e8b469e691c904ae68 --- /dev/null +++ b/saved_model/0/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d443da7d7eb5c5b054077ece85b68b2b94bf5db2b51001fe32404deea7f0717 +size 39900 diff --git a/saved_model/0/model.pth b/saved_model/0/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..f8e0df475374bc4faa35e3fb6295a160ac263570 --- /dev/null +++ b/saved_model/0/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17a70ab64709e25401441bc54b01bfe10370f2f7f7916a243c86fa87a6cdb9f5 +size 476620221 diff --git a/saved_model/1/config.json b/saved_model/1/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5170c826dad6cec1ff138c8471c02f45ab779896 --- /dev/null +++ b/saved_model/1/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8022ffb2ae81ff2c84edde380bbdfc60b9ad933f767c5187d4fcfd5c964315b1 +size 1302 diff --git a/saved_model/1/cover.jpg b/saved_model/1/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e211d4fd7edb521e8c116b200d8ce7dae770e2ad --- /dev/null +++ b/saved_model/1/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0123d1fa78031a85890869891b843b2f079c66fed12cf510cb6025e2e4db04c3 +size 50303 diff --git a/saved_model/1/model.pth b/saved_model/1/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..abbed3288de8a62bf45be828f094a7e819ff7a5c --- /dev/null +++ b/saved_model/1/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73af1a9812c8edb038bad97b30feddb34a6e3834e1a86181873e02dd916b7f81 +size 158884173 diff --git a/saved_model/10/config.json b/saved_model/10/config.json new file mode 100644 index 0000000000000000000000000000000000000000..778f9d9196aeff4150ffd61dbbd43feaabf740fa --- /dev/null +++ b/saved_model/10/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06b3c77565155ac550a3264e24f5c59627c6f8e4f9953a5f2423f6d375823e52 +size 1228 diff --git a/saved_model/10/cover.jpg b/saved_model/10/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..880d6ae3656475fe47d59d9e501b2cc30532d2f4 --- /dev/null +++ b/saved_model/10/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb5d83e14c8cd74a20185d8b9535f9a1699a15057f7ebce87a32f32f5aad94ba +size 103679 diff --git a/saved_model/10/model.pth b/saved_model/10/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..e2fae3c69a14d338b3fbbf148a7f9291e2d94ef9 --- /dev/null +++ b/saved_model/10/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d7d3dc42ad38c3479b41c1060c442ba33018069be637e664fefafb4bb4ad764 +size 220972879 diff --git a/saved_model/11/config.json b/saved_model/11/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bfd1ec546bf631240f541800abf7d769b46a8857 --- /dev/null +++ b/saved_model/11/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2aa2128f54f61bf1b01951f7d2e0e2d5a835a9750a4a9ef8b4854ac25324823 +size 1187 diff --git a/saved_model/11/cover.jpg b/saved_model/11/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6150d3f10b8e374f37d7e62dc0feb019f1d2911d --- /dev/null +++ b/saved_model/11/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ce5e75924dca82bb7cddbe9715f1254fe7aa0fc068085f72ff893c9324c586e +size 30214 diff --git a/saved_model/11/model.pth b/saved_model/11/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..f55d65e4af3416413b950a7e3d1cd051cf18b4af --- /dev/null +++ b/saved_model/11/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56d55e4672c5f335ebae30728529e5efb8a9c3975a9b63e6590454ef8769ae70 +size 203264375 diff --git a/saved_model/12/config.json b/saved_model/12/config.json new file mode 100644 index 0000000000000000000000000000000000000000..aea467a2c0006b8b1059ebcb066a296b1112d0c9 --- /dev/null +++ b/saved_model/12/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2d4fe332cfdbe95abc1152d977acdffebe88a28db6830f6c57e1cfb47a2799d +size 1397 diff --git a/saved_model/12/cover.jpg b/saved_model/12/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78647db1b3333b6884d605bd44d87ed2bc3b5037 --- /dev/null +++ b/saved_model/12/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4f93df7045805bcb028b92f464710e10961bae3ce43cddf2c289212673312e2 +size 41024 diff --git a/saved_model/12/model.pth b/saved_model/12/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..127bf646099e974d47c58d9c413dbde911758e0c --- /dev/null +++ b/saved_model/12/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf8761f1f7818c961651d2c0d914821f742a9a1df8841aae376c888289ae5609 +size 158888269 diff --git a/saved_model/13/config.json b/saved_model/13/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f3c8a46e394872ddfac9eb79605f7a56e4cee268 --- /dev/null +++ b/saved_model/13/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:283a9e4570fe9f196c9ccff599c2072e1d0a6f81d08affd15daa949ecc702550 +size 1817 diff --git a/saved_model/13/cover.jpg b/saved_model/13/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec6d4630e2e9903407b939a77b4e98d80a58fc4c --- /dev/null +++ b/saved_model/13/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4135cc056f26e03ba7e505f1be9ce76c6a9595340599f3d24cc929101f84d5f8 +size 19841 diff --git a/saved_model/13/model.pth b/saved_model/13/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..9a3fcda66c0db47fdc2d548123443fbcd969b2c5 --- /dev/null +++ b/saved_model/13/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e07fd627c9ad01002c889ddda9b8a9b0da9ab942115b50d44227ded7ca87ad4 +size 158907213 diff --git a/saved_model/14/config.json b/saved_model/14/config.json new file mode 100644 index 0000000000000000000000000000000000000000..58b4b8019394c79acf4096466d22877bb9f93fe3 --- /dev/null +++ b/saved_model/14/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50618702d27249d4557a39afd74ca19191f9537a0e192a4afeb0559967aa5527 +size 1592 diff --git a/saved_model/14/model.pth b/saved_model/14/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..6b7b5d56c8778fc3d89c5e915e6970498b72684a --- /dev/null +++ b/saved_model/14/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2801051beb8f90bd9785604fad617bf95a8f05df93722ad8993128dd6bf91301 +size 158912845 diff --git a/saved_model/15/config.json b/saved_model/15/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b40563fab0fb568a5bc363ed6591fd56f44087f3 --- /dev/null +++ b/saved_model/15/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d3cb3ce57d9e111d83d4f2570956be6621aff74166929f83f7b11d985a1858b +size 363860 diff --git a/saved_model/15/model.pth b/saved_model/15/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..caed1dda3e12732b5213fcf68e5856f9e5dbdc27 --- /dev/null +++ b/saved_model/15/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f021227e3d2e282ec5756e9704dcb2a28831c3b9ae527d639a2ca9b493e0636 +size 161855565 diff --git a/saved_model/16/config.json b/saved_model/16/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a6bf08cc6730fdcbb8db0f8d4a040d8913975e0f --- /dev/null +++ b/saved_model/16/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8b975a5a4f39b989d3bf45ace6d5194b29897dbdbb17a4a6ac458fef084e838 +size 1211 diff --git a/saved_model/16/model.pth b/saved_model/16/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..9432f8893685dea11918b20016ecad38a53a8d4e --- /dev/null +++ b/saved_model/16/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:750299355c3cd6bec4bca61ac50dbfb4c1e129be9b0806442cee24071bed657b +size 158882637 diff --git a/saved_model/17/config.json b/saved_model/17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..621d0515dba7c13e59c8467691ecd1be4f1c8ba7 --- /dev/null +++ b/saved_model/17/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:099f3191034423876aa79987acbbe1308878c45859d86840311245614635da27 +size 2174 diff --git a/saved_model/17/model.pth b/saved_model/17/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..cc8686a2cfd1f845a0ae167386ef26c1773d08f0 --- /dev/null +++ b/saved_model/17/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bafc0ad64442808ccbdc1c880846d4d7ed30e5db6b9c68982bade0070e135a9 +size 158966349 diff --git a/saved_model/2/config.json b/saved_model/2/config.json new file mode 100644 index 0000000000000000000000000000000000000000..25be48cadcc444ae0b288ad4c9c3953893056bf8 --- /dev/null +++ b/saved_model/2/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf80c1c1a185384ea9c4c8ea63964e2fc592e6d2aad3f8566b534a512ed90c28 +size 1294 diff --git a/saved_model/2/cover.jpg b/saved_model/2/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66adda74aaacb6645b897290775b36662f11cb90 --- /dev/null +++ b/saved_model/2/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf387dd1775ebf0f98245e433686a9f8f75bcc5aa8c4ceb192b8a98d0ec42432 +size 60164 diff --git a/saved_model/2/model.pth b/saved_model/2/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..33945f30851fe5f76e551d2a25882ae30eb0a4ff --- /dev/null +++ b/saved_model/2/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16adcc6dd3f23ac4407176769f1e6843f86a5b16e04b8abb5a6a11132e6b9751 +size 476622149 diff --git a/saved_model/3/config.json b/saved_model/3/config.json new file mode 100644 index 0000000000000000000000000000000000000000..335d233f350d2722dd021f17ee77f17c56cfe8bc --- /dev/null +++ b/saved_model/3/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8507d589dd869adbc8f2f49f083930db85561184afaa9dc472006434e4bb1d7e +size 1246 diff --git a/saved_model/3/cover.jpg b/saved_model/3/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adef08ed325760a8435a2d2ed7e74c002ce2960b --- /dev/null +++ b/saved_model/3/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1284933d68ad829768df808feaee25ad68693b8b004c44f675462750b94dd1d8 +size 47314 diff --git a/saved_model/3/model.pth b/saved_model/3/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..34a8d9ee092da3c4d6360cf73ff5ef34db4a61e1 --- /dev/null +++ b/saved_model/3/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60dfd6e56a1f895e3db4c054fd94d5a0362103dd5d2e19941e17dd1be41e6b11 +size 476796721 diff --git a/saved_model/4/config.json b/saved_model/4/config.json new file mode 100644 index 0000000000000000000000000000000000000000..62b94bf96c919d42223582407a42ab581f73deb2 --- /dev/null +++ b/saved_model/4/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cd3ae3718f63022451a3784e06abe7e2532e4066a826ded95db0632e153f508 +size 1264 diff --git a/saved_model/4/cover.jpg b/saved_model/4/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1b8a75acd5f90ba2d274fb4c45e57dab8d35a42 --- /dev/null +++ b/saved_model/4/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ecc5a318f4611b93bf40a584eaf5f6849d3ce812ee7ef6316f7a4a15df2c326 +size 141800 diff --git a/saved_model/4/model.pth b/saved_model/4/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..91e09c4f5087535b81f05b36e25c94028e0b261c --- /dev/null +++ b/saved_model/4/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae80b8e7f18766625a2fe991263c7c0d42364fa1a55d772c0c645f68c72a3750 +size 476799793 diff --git a/saved_model/5/config.json b/saved_model/5/config.json new file mode 100644 index 0000000000000000000000000000000000000000..37b3c0c83b0b9573747e73deef3e3f51968e2d3d --- /dev/null +++ b/saved_model/5/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d08ea4e940cd92bebaa656762031eb085439d47d6f636cdafb37411f24c927d1 +size 1262 diff --git a/saved_model/5/cover.jpg b/saved_model/5/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efdc5786a094868ad292ceec5fae44364ef9cd71 --- /dev/null +++ b/saved_model/5/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbed43668741a90c3a7faef3c3b5aace7723b94c251106fb5925a0f1ba0d7c5c +size 30497 diff --git a/saved_model/5/model.pth b/saved_model/5/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..c5f0776b063dae2bc42cbdc9843545cadf96c44d --- /dev/null +++ b/saved_model/5/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edfb6b428c398fab83a85b5ae41e13cb5a9f7be12692129e8a880d4553701f7b +size 158888013 diff --git a/saved_model/6/config.json b/saved_model/6/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8e733ab595f06298d0655684c06eec1697f6c6c8 --- /dev/null +++ b/saved_model/6/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a7d6956086537898264526d08e780c9abc4af8533bf75358dd960016c13da8b +size 1218 diff --git a/saved_model/6/cover.jpg b/saved_model/6/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..606bb04cffbc47ea3744df8756ee729081f5ef9f --- /dev/null +++ b/saved_model/6/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38e71373daa8849f04bd7867845676afab2057e69a5e0a1e312c2b6cfdd72794 +size 146236 diff --git a/saved_model/6/model.pth b/saved_model/6/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..398dd41a892468bd83279cb83d77a6648f4ef4b6 --- /dev/null +++ b/saved_model/6/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5461551d900d726e24fe5551c3773c0c27419c9237882fe7d400025344499f85 +size 158875981 diff --git a/saved_model/7/config.json b/saved_model/7/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9af88aaa765e752c983eb1b8cb6a2bf6df04bb93 --- /dev/null +++ b/saved_model/7/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7df7925410fe5775f1d6085a548f816304c43ed2ce84835a4cf9f815b524bad5 +size 1749 diff --git a/saved_model/7/cover.jpg b/saved_model/7/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9a4616842e70f7b794ac7cd31a76ddb8539deee --- /dev/null +++ b/saved_model/7/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd98e72f9a5de9df03d2cffae41f907dd70116b4ae89d9fe218df6fa45cd1767 +size 98813 diff --git a/saved_model/7/model.pth b/saved_model/7/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..549582df879c383e04e2450ddf49bb665a3500a6 --- /dev/null +++ b/saved_model/7/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f96e046a777407883d4665777118bdfbe0a48fc18c5fdea16c1d05eaa3af7773 +size 476818993 diff --git a/saved_model/8/config.json b/saved_model/8/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8c6db01c3ab083ac49d7e377143317420f5c3ba9 --- /dev/null +++ b/saved_model/8/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4304293bb85d90daa3b5fa2dc3a35ce0842f0282f54298df68103932fee0e9f2 +size 1873 diff --git a/saved_model/8/cover.jpg b/saved_model/8/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8840bf8d752d64ea56618539bcfa57c82bcde79a --- /dev/null +++ b/saved_model/8/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:090dd3b832004b22ac58c54075890f7484fe6989a9ce91d234af35f1adf27e0a +size 37199 diff --git a/saved_model/8/model.pth b/saved_model/8/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..32feea1f1496ca5ccc65fb449d2687b713a473d9 --- /dev/null +++ b/saved_model/8/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da9a595f45f51dee33168c1e32bba38fe402fe3d8857331f5798a2407b6b2a86 +size 158902605 diff --git a/saved_model/9/config.json b/saved_model/9/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b8172bbb7a9cd5c115061bb2ef2c1501ea1df172 --- /dev/null +++ b/saved_model/9/config.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2040ad22b30868bb031f4d2e2af91fdcfe057753f68e8cb135be5459374cba73 +size 816 diff --git a/saved_model/9/cover.jpg b/saved_model/9/cover.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8840bf8d752d64ea56618539bcfa57c82bcde79a --- /dev/null +++ b/saved_model/9/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:090dd3b832004b22ac58c54075890f7484fe6989a9ce91d234af35f1adf27e0a +size 37199 diff --git a/saved_model/9/model.pth b/saved_model/9/model.pth new file mode 100644 index 0000000000000000000000000000000000000000..5c2db429f5ae3075223dc115454c6b6af56417d1 --- /dev/null +++ b/saved_model/9/model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20b38cc55191ec02c2809e80d758ff0d56bd44760841704feb9921aa58a4d9de +size 203264375 diff --git a/saved_model/info.json b/saved_model/info.json new file mode 100644 index 0000000000000000000000000000000000000000..626eaced6ea5b1d26b9442c855753606fe8d9294 --- /dev/null +++ b/saved_model/info.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b71ff5dab7d60ce8fdbe75fa48b607574b845b4b57fa119299e1f21216d41e7 +size 4697 diff --git a/text/LICENSE b/text/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4ad4ed1d5e34d95c8380768ec16405d789cc6de4 --- /dev/null +++ b/text/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Keith Ito + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/text/__init__.py b/text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e69c354dd24e3243980236eca962cd5945a92fc --- /dev/null +++ b/text/__init__.py @@ -0,0 +1,32 @@ +""" from https://github.com/keithito/tacotron """ +from text import cleaners + + +def text_to_sequence(text, symbols, cleaner_names): + '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. + Args: + text: string to convert to a sequence + cleaner_names: names of the cleaner functions to run the text through + Returns: + List of integers corresponding to the symbols in the text + ''' + _symbol_to_id = {s: i for i, s in enumerate(symbols)} + + sequence = [] + + clean_text = _clean_text(text, cleaner_names) + for symbol in clean_text: + if symbol not in _symbol_to_id.keys(): + continue + symbol_id = _symbol_to_id[symbol] + sequence += [symbol_id] + return sequence + + +def _clean_text(text, cleaner_names): + for name in cleaner_names: + cleaner = getattr(cleaners, name) + if not cleaner: + raise Exception('Unknown cleaner: %s' % name) + text = cleaner(text) + return text diff --git a/text/cantonese.py b/text/cantonese.py new file mode 100644 index 0000000000000000000000000000000000000000..32eae72ef7eb43d493da6d6f75dd46176d0e8808 --- /dev/null +++ b/text/cantonese.py @@ -0,0 +1,59 @@ +import re +import cn2an +import opencc + + +converter = opencc.OpenCC('chinese_dialect_lexicons/jyutjyu') + +# List of (Latin alphabet, ipa) pairs: +_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('A', 'ei˥'), + ('B', 'biː˥'), + ('C', 'siː˥'), + ('D', 'tiː˥'), + ('E', 'iː˥'), + ('F', 'e˥fuː˨˩'), + ('G', 'tsiː˥'), + ('H', 'ɪk̚˥tsʰyː˨˩'), + ('I', 'ɐi˥'), + ('J', 'tsei˥'), + ('K', 'kʰei˥'), + ('L', 'e˥llou˨˩'), + ('M', 'ɛːm˥'), + ('N', 'ɛːn˥'), + ('O', 'ou˥'), + ('P', 'pʰiː˥'), + ('Q', 'kʰiːu˥'), + ('R', 'aː˥lou˨˩'), + ('S', 'ɛː˥siː˨˩'), + ('T', 'tʰiː˥'), + ('U', 'juː˥'), + ('V', 'wiː˥'), + ('W', 'tʊk̚˥piː˥juː˥'), + ('X', 'ɪk̚˥siː˨˩'), + ('Y', 'waːi˥'), + ('Z', 'iː˨sɛːt̚˥') +]] + + +def number_to_cantonese(text): + return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) + + +def latin_to_ipa(text): + for regex, replacement in _latin_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def cantonese_to_ipa(text): + text = number_to_cantonese(text.upper()) + text = converter.convert(text).replace('-','').replace('$',' ') + text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) + text = re.sub(r'[、;:]', ',', text) + text = re.sub(r'\s*,\s*', ', ', text) + text = re.sub(r'\s*。\s*', '. ', text) + text = re.sub(r'\s*?\s*', '? ', text) + text = re.sub(r'\s*!\s*', '! ', text) + text = re.sub(r'\s*$', '', text) + return text diff --git a/text/cleaners.py b/text/cleaners.py new file mode 100644 index 0000000000000000000000000000000000000000..eedbeaee8ad73dd4aaf6c12e3f900fc34a1ee630 --- /dev/null +++ b/text/cleaners.py @@ -0,0 +1,150 @@ +import re +import pyopenjtalk + +pyopenjtalk._lazy_init() + + +def japanese_cleaners(text): + from text.japanese import japanese_to_romaji_with_accent + text = japanese_to_romaji_with_accent(text) + text = re.sub(r'([A-Za-z])$', r'\1.', text) + return text + + +def japanese_cleaners2(text): + return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…') + + +def korean_cleaners(text): + '''Pipeline for Korean text''' + from text.korean import latin_to_hangul, number_to_hangul, divide_hangul + text = latin_to_hangul(text) + text = number_to_hangul(text) + text = divide_hangul(text) + text = re.sub(r'([\u3131-\u3163])$', r'\1.', text) + return text + + +def chinese_cleaners(text): + '''Pipeline for Chinese text''' + from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo + text = number_to_chinese(text) + text = chinese_to_bopomofo(text) + text = latin_to_bopomofo(text) + text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text) + return text + + +def zh_ja_mixture_cleaners(text): + from text.mandarin import chinese_to_romaji + from text.japanese import japanese_to_romaji_with_accent + text = re.sub(r'\[ZH\](.*?)\[ZH\]', + lambda x: chinese_to_romaji(x.group(1)) + ' ', text) + text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent( + x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…') + ' ', text) + text = re.sub(r'\s+$', '', text) + text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) + return text + + +def sanskrit_cleaners(text): + text = text.replace('॥', '।').replace('ॐ', 'ओम्') + if text[-1] != '।': + text += ' ।' + return text + + +def cjks_cleaners(text): + from text.mandarin import chinese_to_lazy_ipa + from text.japanese import japanese_to_ipa + from text.korean import korean_to_lazy_ipa + from text.sanskrit import devanagari_to_ipa + from text.english import english_to_lazy_ipa + text = re.sub(r'\[ZH\](.*?)\[ZH\]', + lambda x: chinese_to_lazy_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\[JA\](.*?)\[JA\]', + lambda x: japanese_to_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\[KO\](.*?)\[KO\]', + lambda x: korean_to_lazy_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\[SA\](.*?)\[SA\]', + lambda x: devanagari_to_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\[EN\](.*?)\[EN\]', + lambda x: english_to_lazy_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\s+$', '', text) + text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) + return text + + +def cjke_cleaners(text): + from text.mandarin import chinese_to_lazy_ipa + from text.japanese import japanese_to_ipa + from text.korean import korean_to_ipa + from text.english import english_to_ipa2 + text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace( + 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn') + ' ', text) + text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace( + 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz') + ' ', text) + text = re.sub(r'\[KO\](.*?)\[KO\]', + lambda x: korean_to_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace( + 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u') + ' ', text) + text = re.sub(r'\s+$', '', text) + text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) + return text + + +def cjke_cleaners2(text): + from text.mandarin import chinese_to_ipa + from text.japanese import japanese_to_ipa2 + from text.korean import korean_to_ipa + from text.english import english_to_ipa2 + text = re.sub(r'\[ZH\](.*?)\[ZH\]', + lambda x: chinese_to_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\[JA\](.*?)\[JA\]', + lambda x: japanese_to_ipa2(x.group(1)) + ' ', text) + text = re.sub(r'\[KO\](.*?)\[KO\]', + lambda x: korean_to_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\[EN\](.*?)\[EN\]', + lambda x: english_to_ipa2(x.group(1)) + ' ', text) + text = re.sub(r'\s+$', '', text) + text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) + return text + + +def thai_cleaners(text): + from text.thai import num_to_thai, latin_to_thai + text = num_to_thai(text) + text = latin_to_thai(text) + return text + + +def shanghainese_cleaners(text): + from text.shanghainese import shanghainese_to_ipa + text = shanghainese_to_ipa(text) + text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) + return text + + +def chinese_dialect_cleaners(text): + from text.mandarin import chinese_to_ipa2 + from text.japanese import japanese_to_ipa3 + from text.shanghainese import shanghainese_to_ipa + from text.cantonese import cantonese_to_ipa + from text.english import english_to_lazy_ipa2 + from text.ngu_dialect import ngu_dialect_to_ipa + text = re.sub(r'\[ZH\](.*?)\[ZH\]', + lambda x: chinese_to_ipa2(x.group(1)) + ' ', text) + text = re.sub(r'\[JA\](.*?)\[JA\]', + lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ') + ' ', text) + text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5', + '˧˧˦').replace( + '6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e') + ' ', text) + text = re.sub(r'\[GD\](.*?)\[GD\]', + lambda x: cantonese_to_ipa(x.group(1)) + ' ', text) + text = re.sub(r'\[EN\](.*?)\[EN\]', + lambda x: english_to_lazy_ipa2(x.group(1)) + ' ', text) + text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group( + 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ') + ' ', text) + text = re.sub(r'\s+$', '', text) + text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) + return text diff --git a/text/english.py b/text/english.py new file mode 100644 index 0000000000000000000000000000000000000000..6817392ba8a9eb830351de89fb7afc5ad72f5e42 --- /dev/null +++ b/text/english.py @@ -0,0 +1,188 @@ +""" from https://github.com/keithito/tacotron """ + +''' +Cleaners are transformations that run over the input text at both training and eval time. + +Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" +hyperparameter. Some cleaners are English-specific. You'll typically want to use: + 1. "english_cleaners" for English text + 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using + the Unidecode library (https://pypi.python.org/pypi/Unidecode) + 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update + the symbols in symbols.py to match your data). +''' + + +# Regular expression matching whitespace: + + +import re +import inflect +from unidecode import unidecode +import eng_to_ipa as ipa +_inflect = inflect.engine() +_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') +_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') +_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') +_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') +_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') +_number_re = re.compile(r'[0-9]+') + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ + ('mrs', 'misess'), + ('mr', 'mister'), + ('dr', 'doctor'), + ('st', 'saint'), + ('co', 'company'), + ('jr', 'junior'), + ('maj', 'major'), + ('gen', 'general'), + ('drs', 'doctors'), + ('rev', 'reverend'), + ('lt', 'lieutenant'), + ('hon', 'honorable'), + ('sgt', 'sergeant'), + ('capt', 'captain'), + ('esq', 'esquire'), + ('ltd', 'limited'), + ('col', 'colonel'), + ('ft', 'fort'), +]] + + +# List of (ipa, lazy ipa) pairs: +_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('r', 'ɹ'), + ('æ', 'e'), + ('ɑ', 'a'), + ('ɔ', 'o'), + ('ð', 'z'), + ('θ', 's'), + ('ɛ', 'e'), + ('ɪ', 'i'), + ('ʊ', 'u'), + ('ʒ', 'ʥ'), + ('ʤ', 'ʥ'), + ('ˈ', '↓'), +]] + +# List of (ipa, lazy ipa2) pairs: +_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('r', 'ɹ'), + ('ð', 'z'), + ('θ', 's'), + ('ʒ', 'ʑ'), + ('ʤ', 'dʑ'), + ('ˈ', '↓'), +]] + +# List of (ipa, ipa2) pairs +_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('r', 'ɹ'), + ('ʤ', 'dʒ'), + ('ʧ', 'tʃ') +]] + + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + + +def collapse_whitespace(text): + return re.sub(r'\s+', ' ', text) + + +def _remove_commas(m): + return m.group(1).replace(',', '') + + +def _expand_decimal_point(m): + return m.group(1).replace('.', ' point ') + + +def _expand_dollars(m): + match = m.group(1) + parts = match.split('.') + if len(parts) > 2: + return match + ' dollars' # Unexpected format + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = 'dollar' if dollars == 1 else 'dollars' + cent_unit = 'cent' if cents == 1 else 'cents' + return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) + elif dollars: + dollar_unit = 'dollar' if dollars == 1 else 'dollars' + return '%s %s' % (dollars, dollar_unit) + elif cents: + cent_unit = 'cent' if cents == 1 else 'cents' + return '%s %s' % (cents, cent_unit) + else: + return 'zero dollars' + + +def _expand_ordinal(m): + return _inflect.number_to_words(m.group(0)) + + +def _expand_number(m): + num = int(m.group(0)) + if num > 1000 and num < 3000: + if num == 2000: + return 'two thousand' + elif num > 2000 and num < 2010: + return 'two thousand ' + _inflect.number_to_words(num % 100) + elif num % 100 == 0: + return _inflect.number_to_words(num // 100) + ' hundred' + else: + return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') + else: + return _inflect.number_to_words(num, andword='') + + +def normalize_numbers(text): + text = re.sub(_comma_number_re, _remove_commas, text) + text = re.sub(_pounds_re, r'\1 pounds', text) + text = re.sub(_dollars_re, _expand_dollars, text) + text = re.sub(_decimal_number_re, _expand_decimal_point, text) + text = re.sub(_ordinal_re, _expand_ordinal, text) + text = re.sub(_number_re, _expand_number, text) + return text + + +def mark_dark_l(text): + return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) + + +def english_to_ipa(text): + text = unidecode(text).lower() + text = expand_abbreviations(text) + text = normalize_numbers(text) + phonemes = ipa.convert(text) + phonemes = collapse_whitespace(phonemes) + return phonemes + + +def english_to_lazy_ipa(text): + text = english_to_ipa(text) + for regex, replacement in _lazy_ipa: + text = re.sub(regex, replacement, text) + return text + + +def english_to_ipa2(text): + text = english_to_ipa(text) + text = mark_dark_l(text) + for regex, replacement in _ipa_to_ipa2: + text = re.sub(regex, replacement, text) + return text.replace('...', '…') + + +def english_to_lazy_ipa2(text): + text = english_to_ipa(text) + for regex, replacement in _lazy_ipa2: + text = re.sub(regex, replacement, text) + return text diff --git a/text/japanese.py b/text/japanese.py new file mode 100644 index 0000000000000000000000000000000000000000..375e4d50872d5c68ee57ca17470a2ca425425eba --- /dev/null +++ b/text/japanese.py @@ -0,0 +1,153 @@ +import re +from unidecode import unidecode +import pyopenjtalk + + +# Regular expression matching Japanese without punctuation marks: +_japanese_characters = re.compile( + r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') + +# Regular expression matching non-Japanese characters or punctuation marks: +_japanese_marks = re.compile( + r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') + +# List of (symbol, Japanese) pairs for marks: +_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('%', 'パーセント') +]] + +# List of (romaji, ipa) pairs for marks: +_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ts', 'ʦ'), + ('u', 'ɯ'), + ('j', 'ʥ'), + ('y', 'j'), + ('ni', 'n^i'), + ('nj', 'n^'), + ('hi', 'çi'), + ('hj', 'ç'), + ('f', 'ɸ'), + ('I', 'i*'), + ('U', 'ɯ*'), + ('r', 'ɾ') +]] + +# List of (romaji, ipa2) pairs for marks: +_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('u', 'ɯ'), + ('ʧ', 'tʃ'), + ('j', 'dʑ'), + ('y', 'j'), + ('ni', 'n^i'), + ('nj', 'n^'), + ('hi', 'çi'), + ('hj', 'ç'), + ('f', 'ɸ'), + ('I', 'i*'), + ('U', 'ɯ*'), + ('r', 'ɾ') +]] + +# List of (consonant, sokuon) pairs: +_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ + (r'Q([↑↓]*[kg])', r'k#\1'), + (r'Q([↑↓]*[tdjʧ])', r't#\1'), + (r'Q([↑↓]*[sʃ])', r's\1'), + (r'Q([↑↓]*[pb])', r'p#\1') +]] + +# List of (consonant, hatsuon) pairs: +_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ + (r'N([↑↓]*[pbm])', r'm\1'), + (r'N([↑↓]*[ʧʥj])', r'n^\1'), + (r'N([↑↓]*[tdn])', r'n\1'), + (r'N([↑↓]*[kg])', r'ŋ\1') +]] + + +def symbols_to_japanese(text): + for regex, replacement in _symbols_to_japanese: + text = re.sub(regex, replacement, text) + return text + + +def japanese_to_romaji_with_accent(text): + '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' + text = symbols_to_japanese(text) + sentences = re.split(_japanese_marks, text) + marks = re.findall(_japanese_marks, text) + text = '' + for i, sentence in enumerate(sentences): + if re.match(_japanese_characters, sentence): + if text != '': + text += ' ' + labels = pyopenjtalk.extract_fullcontext(sentence) + for n, label in enumerate(labels): + phoneme = re.search(r'\-([^\+]*)\+', label).group(1) + if phoneme not in ['sil', 'pau']: + text += phoneme.replace('ch', 'ʧ').replace('sh', + 'ʃ').replace('cl', 'Q') + else: + continue + # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) + a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) + a2 = int(re.search(r"\+(\d+)\+", label).group(1)) + a3 = int(re.search(r"\+(\d+)/", label).group(1)) + if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: + a2_next = -1 + else: + a2_next = int( + re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) + # Accent phrase boundary + if a3 == 1 and a2_next == 1: + text += ' ' + # Falling + elif a1 == 0 and a2_next == a2 + 1: + text += '↓' + # Rising + elif a2 == 1 and a2_next == 2: + text += '↑' + if i < len(marks): + text += unidecode(marks[i]).replace(' ', '') + return text + + +def get_real_sokuon(text): + for regex, replacement in _real_sokuon: + text = re.sub(regex, replacement, text) + return text + + +def get_real_hatsuon(text): + for regex, replacement in _real_hatsuon: + text = re.sub(regex, replacement, text) + return text + + +def japanese_to_ipa(text): + text = japanese_to_romaji_with_accent(text).replace('...', '…') + text = re.sub( + r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) + text = get_real_sokuon(text) + text = get_real_hatsuon(text) + for regex, replacement in _romaji_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def japanese_to_ipa2(text): + text = japanese_to_romaji_with_accent(text).replace('...', '…') + text = get_real_sokuon(text) + text = get_real_hatsuon(text) + for regex, replacement in _romaji_to_ipa2: + text = re.sub(regex, replacement, text) + return text + + +def japanese_to_ipa3(text): + text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( + 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') + text = re.sub( + r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) + text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) + return text diff --git a/text/korean.py b/text/korean.py new file mode 100644 index 0000000000000000000000000000000000000000..edee07429a450c55e3d8e246997faaa1e0b89cc9 --- /dev/null +++ b/text/korean.py @@ -0,0 +1,210 @@ +import re +from jamo import h2j, j2hcj +import ko_pron + + +# This is a list of Korean classifiers preceded by pure Korean numerals. +_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' + +# List of (hangul, hangul divided) pairs: +_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ㄳ', 'ㄱㅅ'), + ('ㄵ', 'ㄴㅈ'), + ('ㄶ', 'ㄴㅎ'), + ('ㄺ', 'ㄹㄱ'), + ('ㄻ', 'ㄹㅁ'), + ('ㄼ', 'ㄹㅂ'), + ('ㄽ', 'ㄹㅅ'), + ('ㄾ', 'ㄹㅌ'), + ('ㄿ', 'ㄹㅍ'), + ('ㅀ', 'ㄹㅎ'), + ('ㅄ', 'ㅂㅅ'), + ('ㅘ', 'ㅗㅏ'), + ('ㅙ', 'ㅗㅐ'), + ('ㅚ', 'ㅗㅣ'), + ('ㅝ', 'ㅜㅓ'), + ('ㅞ', 'ㅜㅔ'), + ('ㅟ', 'ㅜㅣ'), + ('ㅢ', 'ㅡㅣ'), + ('ㅑ', 'ㅣㅏ'), + ('ㅒ', 'ㅣㅐ'), + ('ㅕ', 'ㅣㅓ'), + ('ㅖ', 'ㅣㅔ'), + ('ㅛ', 'ㅣㅗ'), + ('ㅠ', 'ㅣㅜ') +]] + +# List of (Latin alphabet, hangul) pairs: +_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('a', '에이'), + ('b', '비'), + ('c', '시'), + ('d', '디'), + ('e', '이'), + ('f', '에프'), + ('g', '지'), + ('h', '에이치'), + ('i', '아이'), + ('j', '제이'), + ('k', '케이'), + ('l', '엘'), + ('m', '엠'), + ('n', '엔'), + ('o', '오'), + ('p', '피'), + ('q', '큐'), + ('r', '아르'), + ('s', '에스'), + ('t', '티'), + ('u', '유'), + ('v', '브이'), + ('w', '더블유'), + ('x', '엑스'), + ('y', '와이'), + ('z', '제트') +]] + +# List of (ipa, lazy ipa) pairs: +_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('t͡ɕ','ʧ'), + ('d͡ʑ','ʥ'), + ('ɲ','n^'), + ('ɕ','ʃ'), + ('ʷ','w'), + ('ɭ','l`'), + ('ʎ','ɾ'), + ('ɣ','ŋ'), + ('ɰ','ɯ'), + ('ʝ','j'), + ('ʌ','ə'), + ('ɡ','g'), + ('\u031a','#'), + ('\u0348','='), + ('\u031e',''), + ('\u0320',''), + ('\u0339','') +]] + + +def latin_to_hangul(text): + for regex, replacement in _latin_to_hangul: + text = re.sub(regex, replacement, text) + return text + + +def divide_hangul(text): + text = j2hcj(h2j(text)) + for regex, replacement in _hangul_divided: + text = re.sub(regex, replacement, text) + return text + + +def hangul_number(num, sino=True): + '''Reference https://github.com/Kyubyong/g2pK''' + num = re.sub(',', '', num) + + if num == '0': + return '영' + if not sino and num == '20': + return '스무' + + digits = '123456789' + names = '일이삼사오육칠팔구' + digit2name = {d: n for d, n in zip(digits, names)} + + modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' + decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' + digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} + digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} + + spelledout = [] + for i, digit in enumerate(num): + i = len(num) - i - 1 + if sino: + if i == 0: + name = digit2name.get(digit, '') + elif i == 1: + name = digit2name.get(digit, '') + '십' + name = name.replace('일십', '십') + else: + if i == 0: + name = digit2mod.get(digit, '') + elif i == 1: + name = digit2dec.get(digit, '') + if digit == '0': + if i % 4 == 0: + last_three = spelledout[-min(3, len(spelledout)):] + if ''.join(last_three) == '': + spelledout.append('') + continue + else: + spelledout.append('') + continue + if i == 2: + name = digit2name.get(digit, '') + '백' + name = name.replace('일백', '백') + elif i == 3: + name = digit2name.get(digit, '') + '천' + name = name.replace('일천', '천') + elif i == 4: + name = digit2name.get(digit, '') + '만' + name = name.replace('일만', '만') + elif i == 5: + name = digit2name.get(digit, '') + '십' + name = name.replace('일십', '십') + elif i == 6: + name = digit2name.get(digit, '') + '백' + name = name.replace('일백', '백') + elif i == 7: + name = digit2name.get(digit, '') + '천' + name = name.replace('일천', '천') + elif i == 8: + name = digit2name.get(digit, '') + '억' + elif i == 9: + name = digit2name.get(digit, '') + '십' + elif i == 10: + name = digit2name.get(digit, '') + '백' + elif i == 11: + name = digit2name.get(digit, '') + '천' + elif i == 12: + name = digit2name.get(digit, '') + '조' + elif i == 13: + name = digit2name.get(digit, '') + '십' + elif i == 14: + name = digit2name.get(digit, '') + '백' + elif i == 15: + name = digit2name.get(digit, '') + '천' + spelledout.append(name) + return ''.join(elem for elem in spelledout) + + +def number_to_hangul(text): + '''Reference https://github.com/Kyubyong/g2pK''' + tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) + for token in tokens: + num, classifier = token + if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: + spelledout = hangul_number(num, sino=False) + else: + spelledout = hangul_number(num, sino=True) + text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') + # digit by digit for remaining digits + digits = '0123456789' + names = '영일이삼사오육칠팔구' + for d, n in zip(digits, names): + text = text.replace(d, n) + return text + + +def korean_to_lazy_ipa(text): + text = latin_to_hangul(text) + text = number_to_hangul(text) + text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text) + for regex, replacement in _ipa_to_lazy_ipa: + text = re.sub(regex, replacement, text) + return text + + +def korean_to_ipa(text): + text = korean_to_lazy_ipa(text) + return text.replace('ʧ','tʃ').replace('ʥ','dʑ') diff --git a/text/mandarin.py b/text/mandarin.py new file mode 100644 index 0000000000000000000000000000000000000000..ff71de9788e4f20c897b971a775d1ecfbfe1c7b7 --- /dev/null +++ b/text/mandarin.py @@ -0,0 +1,329 @@ +import os +import sys +import re +from pypinyin import lazy_pinyin, BOPOMOFO +import jieba +import cn2an +import logging + +logging.getLogger('jieba').setLevel(logging.WARNING) +jieba.initialize() + + +# List of (Latin alphabet, bopomofo) pairs: +_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('a', 'ㄟˉ'), + ('b', 'ㄅㄧˋ'), + ('c', 'ㄙㄧˉ'), + ('d', 'ㄉㄧˋ'), + ('e', 'ㄧˋ'), + ('f', 'ㄝˊㄈㄨˋ'), + ('g', 'ㄐㄧˋ'), + ('h', 'ㄝˇㄑㄩˋ'), + ('i', 'ㄞˋ'), + ('j', 'ㄐㄟˋ'), + ('k', 'ㄎㄟˋ'), + ('l', 'ㄝˊㄛˋ'), + ('m', 'ㄝˊㄇㄨˋ'), + ('n', 'ㄣˉ'), + ('o', 'ㄡˉ'), + ('p', 'ㄆㄧˉ'), + ('q', 'ㄎㄧㄡˉ'), + ('r', 'ㄚˋ'), + ('s', 'ㄝˊㄙˋ'), + ('t', 'ㄊㄧˋ'), + ('u', 'ㄧㄡˉ'), + ('v', 'ㄨㄧˉ'), + ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), + ('x', 'ㄝˉㄎㄨˋㄙˋ'), + ('y', 'ㄨㄞˋ'), + ('z', 'ㄗㄟˋ') +]] + +# List of (bopomofo, romaji) pairs: +_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ㄅㄛ', 'p⁼wo'), + ('ㄆㄛ', 'pʰwo'), + ('ㄇㄛ', 'mwo'), + ('ㄈㄛ', 'fwo'), + ('ㄅ', 'p⁼'), + ('ㄆ', 'pʰ'), + ('ㄇ', 'm'), + ('ㄈ', 'f'), + ('ㄉ', 't⁼'), + ('ㄊ', 'tʰ'), + ('ㄋ', 'n'), + ('ㄌ', 'l'), + ('ㄍ', 'k⁼'), + ('ㄎ', 'kʰ'), + ('ㄏ', 'h'), + ('ㄐ', 'ʧ⁼'), + ('ㄑ', 'ʧʰ'), + ('ㄒ', 'ʃ'), + ('ㄓ', 'ʦ`⁼'), + ('ㄔ', 'ʦ`ʰ'), + ('ㄕ', 's`'), + ('ㄖ', 'ɹ`'), + ('ㄗ', 'ʦ⁼'), + ('ㄘ', 'ʦʰ'), + ('ㄙ', 's'), + ('ㄚ', 'a'), + ('ㄛ', 'o'), + ('ㄜ', 'ə'), + ('ㄝ', 'e'), + ('ㄞ', 'ai'), + ('ㄟ', 'ei'), + ('ㄠ', 'au'), + ('ㄡ', 'ou'), + ('ㄧㄢ', 'yeNN'), + ('ㄢ', 'aNN'), + ('ㄧㄣ', 'iNN'), + ('ㄣ', 'əNN'), + ('ㄤ', 'aNg'), + ('ㄧㄥ', 'iNg'), + ('ㄨㄥ', 'uNg'), + ('ㄩㄥ', 'yuNg'), + ('ㄥ', 'əNg'), + ('ㄦ', 'əɻ'), + ('ㄧ', 'i'), + ('ㄨ', 'u'), + ('ㄩ', 'ɥ'), + ('ˉ', '→'), + ('ˊ', '↑'), + ('ˇ', '↓↑'), + ('ˋ', '↓'), + ('˙', ''), + (',', ','), + ('。', '.'), + ('!', '!'), + ('?', '?'), + ('—', '-') +]] + +# List of (romaji, ipa) pairs: +_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('ʃy', 'ʃ'), + ('ʧʰy', 'ʧʰ'), + ('ʧ⁼y', 'ʧ⁼'), + ('NN', 'n'), + ('Ng', 'ŋ'), + ('y', 'j'), + ('h', 'x') +]] + +# List of (bopomofo, ipa) pairs: +_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ㄅㄛ', 'p⁼wo'), + ('ㄆㄛ', 'pʰwo'), + ('ㄇㄛ', 'mwo'), + ('ㄈㄛ', 'fwo'), + ('ㄅ', 'p⁼'), + ('ㄆ', 'pʰ'), + ('ㄇ', 'm'), + ('ㄈ', 'f'), + ('ㄉ', 't⁼'), + ('ㄊ', 'tʰ'), + ('ㄋ', 'n'), + ('ㄌ', 'l'), + ('ㄍ', 'k⁼'), + ('ㄎ', 'kʰ'), + ('ㄏ', 'x'), + ('ㄐ', 'tʃ⁼'), + ('ㄑ', 'tʃʰ'), + ('ㄒ', 'ʃ'), + ('ㄓ', 'ts`⁼'), + ('ㄔ', 'ts`ʰ'), + ('ㄕ', 's`'), + ('ㄖ', 'ɹ`'), + ('ㄗ', 'ts⁼'), + ('ㄘ', 'tsʰ'), + ('ㄙ', 's'), + ('ㄚ', 'a'), + ('ㄛ', 'o'), + ('ㄜ', 'ə'), + ('ㄝ', 'ɛ'), + ('ㄞ', 'aɪ'), + ('ㄟ', 'eɪ'), + ('ㄠ', 'ɑʊ'), + ('ㄡ', 'oʊ'), + ('ㄧㄢ', 'jɛn'), + ('ㄩㄢ', 'ɥæn'), + ('ㄢ', 'an'), + ('ㄧㄣ', 'in'), + ('ㄩㄣ', 'ɥn'), + ('ㄣ', 'ən'), + ('ㄤ', 'ɑŋ'), + ('ㄧㄥ', 'iŋ'), + ('ㄨㄥ', 'ʊŋ'), + ('ㄩㄥ', 'jʊŋ'), + ('ㄥ', 'əŋ'), + ('ㄦ', 'əɻ'), + ('ㄧ', 'i'), + ('ㄨ', 'u'), + ('ㄩ', 'ɥ'), + ('ˉ', '→'), + ('ˊ', '↑'), + ('ˇ', '↓↑'), + ('ˋ', '↓'), + ('˙', ''), + (',', ','), + ('。', '.'), + ('!', '!'), + ('?', '?'), + ('—', '-') +]] + +# List of (bopomofo, ipa2) pairs: +_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ㄅㄛ', 'pwo'), + ('ㄆㄛ', 'pʰwo'), + ('ㄇㄛ', 'mwo'), + ('ㄈㄛ', 'fwo'), + ('ㄅ', 'p'), + ('ㄆ', 'pʰ'), + ('ㄇ', 'm'), + ('ㄈ', 'f'), + ('ㄉ', 't'), + ('ㄊ', 'tʰ'), + ('ㄋ', 'n'), + ('ㄌ', 'l'), + ('ㄍ', 'k'), + ('ㄎ', 'kʰ'), + ('ㄏ', 'h'), + ('ㄐ', 'tɕ'), + ('ㄑ', 'tɕʰ'), + ('ㄒ', 'ɕ'), + ('ㄓ', 'tʂ'), + ('ㄔ', 'tʂʰ'), + ('ㄕ', 'ʂ'), + ('ㄖ', 'ɻ'), + ('ㄗ', 'ts'), + ('ㄘ', 'tsʰ'), + ('ㄙ', 's'), + ('ㄚ', 'a'), + ('ㄛ', 'o'), + ('ㄜ', 'ɤ'), + ('ㄝ', 'ɛ'), + ('ㄞ', 'aɪ'), + ('ㄟ', 'eɪ'), + ('ㄠ', 'ɑʊ'), + ('ㄡ', 'oʊ'), + ('ㄧㄢ', 'jɛn'), + ('ㄩㄢ', 'yæn'), + ('ㄢ', 'an'), + ('ㄧㄣ', 'in'), + ('ㄩㄣ', 'yn'), + ('ㄣ', 'ən'), + ('ㄤ', 'ɑŋ'), + ('ㄧㄥ', 'iŋ'), + ('ㄨㄥ', 'ʊŋ'), + ('ㄩㄥ', 'jʊŋ'), + ('ㄥ', 'ɤŋ'), + ('ㄦ', 'əɻ'), + ('ㄧ', 'i'), + ('ㄨ', 'u'), + ('ㄩ', 'y'), + ('ˉ', '˥'), + ('ˊ', '˧˥'), + ('ˇ', '˨˩˦'), + ('ˋ', '˥˩'), + ('˙', ''), + (',', ','), + ('。', '.'), + ('!', '!'), + ('?', '?'), + ('—', '-') +]] + + +def number_to_chinese(text): + numbers = re.findall(r'\d+(?:\.?\d+)?', text) + for number in numbers: + text = text.replace(number, cn2an.an2cn(number), 1) + return text + + +def chinese_to_bopomofo(text): + text = text.replace('、', ',').replace(';', ',').replace(':', ',') + words = jieba.lcut(text, cut_all=False) + text = '' + for word in words: + bopomofos = lazy_pinyin(word, BOPOMOFO) + if not re.search('[\u4e00-\u9fff]', word): + text += word + continue + for i in range(len(bopomofos)): + bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) + if text != '': + text += ' ' + text += ''.join(bopomofos) + return text + + +def latin_to_bopomofo(text): + for regex, replacement in _latin_to_bopomofo: + text = re.sub(regex, replacement, text) + return text + + +def bopomofo_to_romaji(text): + for regex, replacement in _bopomofo_to_romaji: + text = re.sub(regex, replacement, text) + return text + + +def bopomofo_to_ipa(text): + for regex, replacement in _bopomofo_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def bopomofo_to_ipa2(text): + for regex, replacement in _bopomofo_to_ipa2: + text = re.sub(regex, replacement, text) + return text + + +def chinese_to_romaji(text): + text = number_to_chinese(text) + text = chinese_to_bopomofo(text) + text = latin_to_bopomofo(text) + text = bopomofo_to_romaji(text) + text = re.sub('i([aoe])', r'y\1', text) + text = re.sub('u([aoəe])', r'w\1', text) + text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', + r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') + text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) + return text + + +def chinese_to_lazy_ipa(text): + text = chinese_to_romaji(text) + for regex, replacement in _romaji_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def chinese_to_ipa(text): + text = number_to_chinese(text) + text = chinese_to_bopomofo(text) + text = latin_to_bopomofo(text) + text = bopomofo_to_ipa(text) + text = re.sub('i([aoe])', r'j\1', text) + text = re.sub('u([aoəe])', r'w\1', text) + text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', + r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') + text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) + return text + + +def chinese_to_ipa2(text): + text = number_to_chinese(text) + text = chinese_to_bopomofo(text) + text = latin_to_bopomofo(text) + text = bopomofo_to_ipa2(text) + text = re.sub(r'i([aoe])', r'j\1', text) + text = re.sub(r'u([aoəe])', r'w\1', text) + text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) + text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) + return text diff --git a/text/ngu_dialect.py b/text/ngu_dialect.py new file mode 100644 index 0000000000000000000000000000000000000000..69d0ce6fe5a989843ee059a71ccab793f20f9176 --- /dev/null +++ b/text/ngu_dialect.py @@ -0,0 +1,30 @@ +import re +import opencc + + +dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', + 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', + 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', + 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', + 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', + 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} + +converters = {} + +for dialect in dialects.values(): + try: + converters[dialect] = opencc.OpenCC("chinese_dialect_lexicons/"+dialect) + except: + pass + + +def ngu_dialect_to_ipa(text, dialect): + dialect = dialects[dialect] + text = converters[dialect].convert(text).replace('-','').replace('$',' ') + text = re.sub(r'[、;:]', ',', text) + text = re.sub(r'\s*,\s*', ', ', text) + text = re.sub(r'\s*。\s*', '. ', text) + text = re.sub(r'\s*?\s*', '? ', text) + text = re.sub(r'\s*!\s*', '! ', text) + text = re.sub(r'\s*$', '', text) + return text diff --git a/text/sanskrit.py b/text/sanskrit.py new file mode 100644 index 0000000000000000000000000000000000000000..0223aaac384a2f850f5bc20651fc18eb964607d0 --- /dev/null +++ b/text/sanskrit.py @@ -0,0 +1,62 @@ +import re +from indic_transliteration import sanscript + + +# List of (iast, ipa) pairs: +_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('a', 'ə'), + ('ā', 'aː'), + ('ī', 'iː'), + ('ū', 'uː'), + ('ṛ', 'ɹ`'), + ('ṝ', 'ɹ`ː'), + ('ḷ', 'l`'), + ('ḹ', 'l`ː'), + ('e', 'eː'), + ('o', 'oː'), + ('k', 'k⁼'), + ('k⁼h', 'kʰ'), + ('g', 'g⁼'), + ('g⁼h', 'gʰ'), + ('ṅ', 'ŋ'), + ('c', 'ʧ⁼'), + ('ʧ⁼h', 'ʧʰ'), + ('j', 'ʥ⁼'), + ('ʥ⁼h', 'ʥʰ'), + ('ñ', 'n^'), + ('ṭ', 't`⁼'), + ('t`⁼h', 't`ʰ'), + ('ḍ', 'd`⁼'), + ('d`⁼h', 'd`ʰ'), + ('ṇ', 'n`'), + ('t', 't⁼'), + ('t⁼h', 'tʰ'), + ('d', 'd⁼'), + ('d⁼h', 'dʰ'), + ('p', 'p⁼'), + ('p⁼h', 'pʰ'), + ('b', 'b⁼'), + ('b⁼h', 'bʰ'), + ('y', 'j'), + ('ś', 'ʃ'), + ('ṣ', 's`'), + ('r', 'ɾ'), + ('l̤', 'l`'), + ('h', 'ɦ'), + ("'", ''), + ('~', '^'), + ('ṃ', '^') +]] + + +def devanagari_to_ipa(text): + text = text.replace('ॐ', 'ओम्') + text = re.sub(r'\s*।\s*$', '.', text) + text = re.sub(r'\s*।\s*', ', ', text) + text = re.sub(r'\s*॥', '.', text) + text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST) + for regex, replacement in _iast_to_ipa: + text = re.sub(regex, replacement, text) + text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0) + [:-1]+'h'+x.group(1)+'*', text) + return text diff --git a/text/shanghainese.py b/text/shanghainese.py new file mode 100644 index 0000000000000000000000000000000000000000..1c28c17d0dc0d920fd222c909a53d703c95e043b --- /dev/null +++ b/text/shanghainese.py @@ -0,0 +1,64 @@ +import re +import cn2an +import opencc + + +converter = opencc.OpenCC('chinese_dialect_lexicons/zaonhe') + +# List of (Latin alphabet, ipa) pairs: +_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('A', 'ᴇ'), + ('B', 'bi'), + ('C', 'si'), + ('D', 'di'), + ('E', 'i'), + ('F', 'ᴇf'), + ('G', 'dʑi'), + ('H', 'ᴇtɕʰ'), + ('I', 'ᴀi'), + ('J', 'dʑᴇ'), + ('K', 'kʰᴇ'), + ('L', 'ᴇl'), + ('M', 'ᴇm'), + ('N', 'ᴇn'), + ('O', 'o'), + ('P', 'pʰi'), + ('Q', 'kʰiu'), + ('R', 'ᴀl'), + ('S', 'ᴇs'), + ('T', 'tʰi'), + ('U', 'ɦiu'), + ('V', 'vi'), + ('W', 'dᴀbɤliu'), + ('X', 'ᴇks'), + ('Y', 'uᴀi'), + ('Z', 'zᴇ') +]] + + +def _number_to_shanghainese(num): + num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两') + return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num) + + +def number_to_shanghainese(text): + return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) + + +def latin_to_ipa(text): + for regex, replacement in _latin_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def shanghainese_to_ipa(text): + text = number_to_shanghainese(text.upper()) + text = converter.convert(text).replace('-','').replace('$',' ') + text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) + text = re.sub(r'[、;:]', ',', text) + text = re.sub(r'\s*,\s*', ', ', text) + text = re.sub(r'\s*。\s*', '. ', text) + text = re.sub(r'\s*?\s*', '? ', text) + text = re.sub(r'\s*!\s*', '! ', text) + text = re.sub(r'\s*$', '', text) + return text diff --git a/text/thai.py b/text/thai.py new file mode 100644 index 0000000000000000000000000000000000000000..998207c01a85c710a46db1ec8b62c39c2d94bc84 --- /dev/null +++ b/text/thai.py @@ -0,0 +1,44 @@ +import re +from num_thai.thainumbers import NumThai + + +num = NumThai() + +# List of (Latin alphabet, Thai) pairs: +_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('a', 'เอ'), + ('b','บี'), + ('c','ซี'), + ('d','ดี'), + ('e','อี'), + ('f','เอฟ'), + ('g','จี'), + ('h','เอช'), + ('i','ไอ'), + ('j','เจ'), + ('k','เค'), + ('l','แอล'), + ('m','เอ็ม'), + ('n','เอ็น'), + ('o','โอ'), + ('p','พี'), + ('q','คิว'), + ('r','แอร์'), + ('s','เอส'), + ('t','ที'), + ('u','ยู'), + ('v','วี'), + ('w','ดับเบิลยู'), + ('x','เอ็กซ์'), + ('y','วาย'), + ('z','ซี') +]] + + +def num_to_thai(text): + return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text) + +def latin_to_thai(text): + for regex, replacement in _latin_to_thai: + text = re.sub(regex, replacement, text) + return text diff --git a/transforms.py b/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..4793d67ca5a5630e0ffe0f9fb29445c949e64dae --- /dev/null +++ b/transforms.py @@ -0,0 +1,193 @@ +import torch +from torch.nn import functional as F + +import numpy as np + + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + + +def piecewise_rational_quadratic_transform(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = { + 'tails': tails, + 'tail_bound': tail_bound + } + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs + ) + return outputs, logabsdet + + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return torch.sum( + inputs[..., None] >= bin_locations, + dim=-1 + ) - 1 + + +def unconstrained_rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails='linear', + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + logabsdet = torch.zeros_like(inputs) + + if tails == 'linear': + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError('{} tails are not implemented.'.format(tails)) + + outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + inverse=inverse, + left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative + ) + + return outputs, logabsdet + +def rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0., right=1., bottom=0., top=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + if torch.min(inputs) < left or torch.max(inputs) > right: + raise ValueError('Input to a transform is not within its domain') + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError('Minimal bin width too large for the number of bins') + if min_bin_height * num_bins > 1.0: + raise ValueError('Minimal bin height too large for the number of bins') + + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., -1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., -1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = searchsorted(cumwidths, inputs)[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + if inverse: + a = (((inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta) + + input_heights * (input_delta - input_derivatives))) + b = (input_heights * input_derivatives + - (inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta)) + c = - input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * (input_delta * theta.pow(2) + + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet diff --git a/utils.py b/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb5b43d0ca2bae496e7871b2094f2ffb26ab642 --- /dev/null +++ b/utils.py @@ -0,0 +1,226 @@ +import os +import glob +import sys +import argparse +import logging +import json +import subprocess +import numpy as np +from scipy.io.wavfile import read +import torch + +MATPLOTLIB_FLAG = False + +logging.basicConfig(stream=sys.stdout, level=logging.ERROR) +logger = logging + + +def load_checkpoint(checkpoint_path, model, optimizer=None): + assert os.path.isfile(checkpoint_path) + checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') + iteration = checkpoint_dict['iteration'] + learning_rate = checkpoint_dict['learning_rate'] + if optimizer is not None: + optimizer.load_state_dict(checkpoint_dict['optimizer']) + saved_state_dict = checkpoint_dict['model'] + if hasattr(model, 'module'): + state_dict = model.module.state_dict() + else: + state_dict = model.state_dict() + new_state_dict = {} + for k, v in state_dict.items(): + try: + new_state_dict[k] = saved_state_dict[k] + except: + logger.info("%s is not in the checkpoint" % k) + new_state_dict[k] = v + if hasattr(model, 'module'): + model.module.load_state_dict(new_state_dict) + else: + model.load_state_dict(new_state_dict) + logger.info("Loaded checkpoint '{}' (iteration {})".format( + checkpoint_path, iteration)) + return model, optimizer, learning_rate, iteration + + +def plot_spectrogram_to_numpy(spectrogram): + global MATPLOTLIB_FLAG + if not MATPLOTLIB_FLAG: + import matplotlib + matplotlib.use("Agg") + MATPLOTLIB_FLAG = True + mpl_logger = logging.getLogger('matplotlib') + mpl_logger.setLevel(logging.WARNING) + import matplotlib.pylab as plt + import numpy as np + + fig, ax = plt.subplots(figsize=(10, 2)) + im = ax.imshow(spectrogram, aspect="auto", origin="lower", + interpolation='none') + plt.colorbar(im, ax=ax) + plt.xlabel("Frames") + plt.ylabel("Channels") + plt.tight_layout() + + fig.canvas.draw() + data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') + data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + plt.close() + return data + + +def plot_alignment_to_numpy(alignment, info=None): + global MATPLOTLIB_FLAG + if not MATPLOTLIB_FLAG: + import matplotlib + matplotlib.use("Agg") + MATPLOTLIB_FLAG = True + mpl_logger = logging.getLogger('matplotlib') + mpl_logger.setLevel(logging.WARNING) + import matplotlib.pylab as plt + import numpy as np + + fig, ax = plt.subplots(figsize=(6, 4)) + im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', + interpolation='none') + fig.colorbar(im, ax=ax) + xlabel = 'Decoder timestep' + if info is not None: + xlabel += '\n\n' + info + plt.xlabel(xlabel) + plt.ylabel('Encoder timestep') + plt.tight_layout() + + fig.canvas.draw() + data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') + data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + plt.close() + return data + + +def load_wav_to_torch(full_path): + sampling_rate, data = read(full_path) + return torch.FloatTensor(data.astype(np.float32)), sampling_rate + + +def load_filepaths_and_text(filename, split="|"): + with open(filename, encoding='utf-8') as f: + filepaths_and_text = [line.strip().split(split) for line in f] + return filepaths_and_text + + +def get_hparams(init=True): + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--config', type=str, default="./configs/base.json", + help='JSON file for configuration') + parser.add_argument('-m', '--model', type=str, required=True, + help='Model name') + + args = parser.parse_args() + model_dir = os.path.join("./logs", args.model) + + if not os.path.exists(model_dir): + os.makedirs(model_dir) + + config_path = args.config + config_save_path = os.path.join(model_dir, "config.json") + if init: + with open(config_path, "r") as f: + data = f.read() + with open(config_save_path, "w") as f: + f.write(data) + else: + with open(config_save_path, "r") as f: + data = f.read() + config = json.loads(data) + + hparams = HParams(**config) + hparams.model_dir = model_dir + return hparams + + +def get_hparams_from_dir(model_dir): + config_save_path = os.path.join(model_dir, "config.json") + with open(config_save_path, "r") as f: + data = f.read() + config = json.loads(data) + + hparams = HParams(**config) + hparams.model_dir = model_dir + return hparams + + +def get_hparams_from_file(config_path): + with open(config_path, "r", encoding="utf-8") as f: + data = f.read() + config = json.loads(data) + + hparams = HParams(**config) + return hparams + + +def check_git_hash(model_dir): + source_dir = os.path.dirname(os.path.realpath(__file__)) + if not os.path.exists(os.path.join(source_dir, ".git")): + logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( + source_dir + )) + return + + cur_hash = subprocess.getoutput("git rev-parse HEAD") + + path = os.path.join(model_dir, "githash") + if os.path.exists(path): + saved_hash = open(path).read() + if saved_hash != cur_hash: + logger.warn("git hash values are different. {}(saved) != {}(current)".format( + saved_hash[:8], cur_hash[:8])) + else: + open(path, "w").write(cur_hash) + + +def get_logger(model_dir, filename="train.log"): + global logger + logger = logging.getLogger(os.path.basename(model_dir)) + logger.setLevel(logging.DEBUG) + + formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") + if not os.path.exists(model_dir): + os.makedirs(model_dir) + h = logging.FileHandler(os.path.join(model_dir, filename)) + h.setLevel(logging.DEBUG) + h.setFormatter(formatter) + logger.addHandler(h) + return logger + + +class HParams(): + def __init__(self, **kwargs): + for k, v in kwargs.items(): + if type(v) == dict: + v = HParams(**v) + self[k] = v + + def keys(self): + return self.__dict__.keys() + + def items(self): + return self.__dict__.items() + + def values(self): + return self.__dict__.values() + + def __len__(self): + return len(self.__dict__) + + def __getitem__(self, key): + return getattr(self, key) + + def __setitem__(self, key, value): + return setattr(self, key, value) + + def __contains__(self, key): + return key in self.__dict__ + + def __repr__(self): + return self.__dict__.__repr__()