Upload 42 files
Browse files- bert/bert-base-japanese-v3/README.md +53 -0
- bert/bert-base-japanese-v3/config.json +19 -0
- bert/bert-base-japanese-v3/vocab.txt +0 -0
- bert/chinese-roberta-wwm-ext-large/.gitattributes +9 -0
- bert/chinese-roberta-wwm-ext-large/.gitignore +1 -0
- bert/chinese-roberta-wwm-ext-large/README.md +57 -0
- bert/chinese-roberta-wwm-ext-large/added_tokens.json +1 -0
- bert/chinese-roberta-wwm-ext-large/config.json +28 -0
- bert/chinese-roberta-wwm-ext-large/special_tokens_map.json +1 -0
- bert/chinese-roberta-wwm-ext-large/tokenizer.json +0 -0
- bert/chinese-roberta-wwm-ext-large/tokenizer_config.json +1 -0
- bert/chinese-roberta-wwm-ext-large/vocab.txt +0 -0
- configs/config.json +95 -0
- filelists/otto.list.cleaned +0 -0
- filelists/train.list +0 -0
- filelists/val.list +4 -0
- monotonic_align/__init__.py +16 -0
- monotonic_align/__pycache__/__init__.cpython-38.pyc +0 -0
- monotonic_align/__pycache__/core.cpython-38.pyc +0 -0
- monotonic_align/core.py +46 -0
- text/__init__.py +28 -0
- text/__pycache__/__init__.cpython-38.pyc +0 -0
- text/__pycache__/chinese.cpython-38.pyc +0 -0
- text/__pycache__/chinese_bert.cpython-38.pyc +0 -0
- text/__pycache__/cleaner.cpython-38.pyc +0 -0
- text/__pycache__/english_bert_mock.cpython-38.pyc +0 -0
- text/__pycache__/japanese.cpython-38.pyc +0 -0
- text/__pycache__/japanese_bert.cpython-38.pyc +0 -0
- text/__pycache__/symbols.cpython-38.pyc +0 -0
- text/__pycache__/tone_sandhi.cpython-38.pyc +0 -0
- text/chinese.py +198 -0
- text/chinese_bert.py +100 -0
- text/cleaner.py +28 -0
- text/cmudict.rep +0 -0
- text/cmudict_cache.pickle +3 -0
- text/english.py +214 -0
- text/english_bert_mock.py +5 -0
- text/japanese.py +586 -0
- text/japanese_bert.py +38 -0
- text/opencpop-strict.txt +429 -0
- text/symbols.py +187 -0
- text/tone_sandhi.py +769 -0
bert/bert-base-japanese-v3/README.md
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
datasets:
|
4 |
+
- cc100
|
5 |
+
- wikipedia
|
6 |
+
language:
|
7 |
+
- ja
|
8 |
+
widget:
|
9 |
+
- text: 東北大学で[MASK]の研究をしています。
|
10 |
+
---
|
11 |
+
|
12 |
+
# BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
|
13 |
+
|
14 |
+
This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
|
15 |
+
|
16 |
+
This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
|
17 |
+
Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
|
18 |
+
|
19 |
+
The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
|
20 |
+
|
21 |
+
## Model architecture
|
22 |
+
|
23 |
+
The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads.
|
24 |
+
|
25 |
+
## Training Data
|
26 |
+
|
27 |
+
The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
|
28 |
+
For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
|
29 |
+
The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
|
30 |
+
|
31 |
+
For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
|
32 |
+
|
33 |
+
## Tokenization
|
34 |
+
|
35 |
+
The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
|
36 |
+
The vocabulary size is 32768.
|
37 |
+
|
38 |
+
We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
|
39 |
+
|
40 |
+
## Training
|
41 |
+
|
42 |
+
We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
|
43 |
+
For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
|
44 |
+
|
45 |
+
For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
|
46 |
+
|
47 |
+
## Licenses
|
48 |
+
|
49 |
+
The pretrained models are distributed under the Apache License 2.0.
|
50 |
+
|
51 |
+
## Acknowledgments
|
52 |
+
|
53 |
+
This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
|
bert/bert-base-japanese-v3/config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForPreTraining"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"hidden_act": "gelu",
|
7 |
+
"hidden_dropout_prob": 0.1,
|
8 |
+
"hidden_size": 768,
|
9 |
+
"initializer_range": 0.02,
|
10 |
+
"intermediate_size": 3072,
|
11 |
+
"layer_norm_eps": 1e-12,
|
12 |
+
"max_position_embeddings": 512,
|
13 |
+
"model_type": "bert",
|
14 |
+
"num_attention_heads": 12,
|
15 |
+
"num_hidden_layers": 12,
|
16 |
+
"pad_token_id": 0,
|
17 |
+
"type_vocab_size": 2,
|
18 |
+
"vocab_size": 32768
|
19 |
+
}
|
bert/bert-base-japanese-v3/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
bert/chinese-roberta-wwm-ext-large/.gitattributes
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
bert/chinese-roberta-wwm-ext-large/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.bin
|
bert/chinese-roberta-wwm-ext-large/README.md
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- zh
|
4 |
+
tags:
|
5 |
+
- bert
|
6 |
+
license: "apache-2.0"
|
7 |
+
---
|
8 |
+
|
9 |
+
# Please use 'Bert' related functions to load this model!
|
10 |
+
|
11 |
+
## Chinese BERT with Whole Word Masking
|
12 |
+
For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
|
13 |
+
|
14 |
+
**[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
|
15 |
+
Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
|
16 |
+
|
17 |
+
This repository is developed based on:https://github.com/google-research/bert
|
18 |
+
|
19 |
+
You may also interested in,
|
20 |
+
- Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
|
21 |
+
- Chinese MacBERT: https://github.com/ymcui/MacBERT
|
22 |
+
- Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
|
23 |
+
- Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
|
24 |
+
- Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
|
25 |
+
|
26 |
+
More resources by HFL: https://github.com/ymcui/HFL-Anthology
|
27 |
+
|
28 |
+
## Citation
|
29 |
+
If you find the technical report or resource is useful, please cite the following technical report in your paper.
|
30 |
+
- Primary: https://arxiv.org/abs/2004.13922
|
31 |
+
```
|
32 |
+
@inproceedings{cui-etal-2020-revisiting,
|
33 |
+
title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
|
34 |
+
author = "Cui, Yiming and
|
35 |
+
Che, Wanxiang and
|
36 |
+
Liu, Ting and
|
37 |
+
Qin, Bing and
|
38 |
+
Wang, Shijin and
|
39 |
+
Hu, Guoping",
|
40 |
+
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
|
41 |
+
month = nov,
|
42 |
+
year = "2020",
|
43 |
+
address = "Online",
|
44 |
+
publisher = "Association for Computational Linguistics",
|
45 |
+
url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
|
46 |
+
pages = "657--668",
|
47 |
+
}
|
48 |
+
```
|
49 |
+
- Secondary: https://arxiv.org/abs/1906.08101
|
50 |
+
```
|
51 |
+
@article{chinese-bert-wwm,
|
52 |
+
title={Pre-Training with Whole Word Masking for Chinese BERT},
|
53 |
+
author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
|
54 |
+
journal={arXiv preprint arXiv:1906.08101},
|
55 |
+
year={2019}
|
56 |
+
}
|
57 |
+
```
|
bert/chinese-roberta-wwm-ext-large/added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
bert/chinese-roberta-wwm-ext-large/config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"bos_token_id": 0,
|
7 |
+
"directionality": "bidi",
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 1024,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 4096,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 16,
|
18 |
+
"num_hidden_layers": 24,
|
19 |
+
"output_past": true,
|
20 |
+
"pad_token_id": 0,
|
21 |
+
"pooler_fc_size": 768,
|
22 |
+
"pooler_num_attention_heads": 12,
|
23 |
+
"pooler_num_fc_layers": 3,
|
24 |
+
"pooler_size_per_head": 128,
|
25 |
+
"pooler_type": "first_token_transform",
|
26 |
+
"type_vocab_size": 2,
|
27 |
+
"vocab_size": 21128
|
28 |
+
}
|
bert/chinese-roberta-wwm-ext-large/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
bert/chinese-roberta-wwm-ext-large/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
bert/chinese-roberta-wwm-ext-large/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"init_inputs": []}
|
bert/chinese-roberta-wwm-ext-large/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
configs/config.json
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"eval_interval": 1000,
|
5 |
+
"seed": 52,
|
6 |
+
"epochs": 10000,
|
7 |
+
"learning_rate": 0.0003,
|
8 |
+
"betas": [
|
9 |
+
0.8,
|
10 |
+
0.99
|
11 |
+
],
|
12 |
+
"eps": 1e-09,
|
13 |
+
"batch_size": 8,
|
14 |
+
"fp16_run": false,
|
15 |
+
"lr_decay": 0.999875,
|
16 |
+
"segment_size": 16384,
|
17 |
+
"init_lr_ratio": 1,
|
18 |
+
"warmup_epochs": 0,
|
19 |
+
"c_mel": 45,
|
20 |
+
"c_kl": 1.0,
|
21 |
+
"skip_optimizer": true
|
22 |
+
},
|
23 |
+
"data": {
|
24 |
+
"training_files": "filelists/train.list",
|
25 |
+
"validation_files": "filelists/val.list",
|
26 |
+
"max_wav_value": 32768.0,
|
27 |
+
"sampling_rate": 44100,
|
28 |
+
"filter_length": 2048,
|
29 |
+
"hop_length": 512,
|
30 |
+
"win_length": 2048,
|
31 |
+
"n_mel_channels": 128,
|
32 |
+
"mel_fmin": 0.0,
|
33 |
+
"mel_fmax": null,
|
34 |
+
"add_blank": true,
|
35 |
+
"n_speakers": 256,
|
36 |
+
"cleaned_text": true,
|
37 |
+
"spk2id": {
|
38 |
+
"otto": 0
|
39 |
+
}
|
40 |
+
},
|
41 |
+
"model": {
|
42 |
+
"use_spk_conditioned_encoder": true,
|
43 |
+
"use_noise_scaled_mas": true,
|
44 |
+
"use_mel_posterior_encoder": false,
|
45 |
+
"use_duration_discriminator": true,
|
46 |
+
"inter_channels": 192,
|
47 |
+
"hidden_channels": 192,
|
48 |
+
"filter_channels": 768,
|
49 |
+
"n_heads": 2,
|
50 |
+
"n_layers": 6,
|
51 |
+
"kernel_size": 3,
|
52 |
+
"p_dropout": 0.1,
|
53 |
+
"resblock": "1",
|
54 |
+
"resblock_kernel_sizes": [
|
55 |
+
3,
|
56 |
+
7,
|
57 |
+
11
|
58 |
+
],
|
59 |
+
"resblock_dilation_sizes": [
|
60 |
+
[
|
61 |
+
1,
|
62 |
+
3,
|
63 |
+
5
|
64 |
+
],
|
65 |
+
[
|
66 |
+
1,
|
67 |
+
3,
|
68 |
+
5
|
69 |
+
],
|
70 |
+
[
|
71 |
+
1,
|
72 |
+
3,
|
73 |
+
5
|
74 |
+
]
|
75 |
+
],
|
76 |
+
"upsample_rates": [
|
77 |
+
8,
|
78 |
+
8,
|
79 |
+
2,
|
80 |
+
2,
|
81 |
+
2
|
82 |
+
],
|
83 |
+
"upsample_initial_channel": 512,
|
84 |
+
"upsample_kernel_sizes": [
|
85 |
+
16,
|
86 |
+
16,
|
87 |
+
8,
|
88 |
+
2,
|
89 |
+
2
|
90 |
+
],
|
91 |
+
"n_layers_q": 3,
|
92 |
+
"use_spectral_norm": false,
|
93 |
+
"gin_channels": 256
|
94 |
+
}
|
95 |
+
}
|
filelists/otto.list.cleaned
ADDED
The diff for this file is too large to render.
See raw diff
|
|
filelists/train.list
ADDED
The diff for this file is too large to render.
See raw diff
|
|
filelists/val.list
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
./dataset/otto/otto_557.wav|otto|ZH|游戏的时候你也可能是这个造性但是你在看我的时候你觉得哎呀你不应该这样你不可以你要是骂人了那我就得骂你因为你骂别人了呀你老是给自己弄一个师出有名的|_ y ou x i d e sh ir h ou n i y E k e n eng sh ir zh e g e z ao x ing d an sh ir n i z ai k an w o d e sh ir h ou n i j ve d e AA ai y a n i b u y ing g ai zh e y ang n i b u k e y i n i y ao sh ir m a r en l e n a w o j iu d e m a n i y in w ei n i m a b ie r en l e y a n i l ao sh ir g ei z i0 j i n ong y i g e sh ir ch u y ou m ing d e _|0 2 2 4 4 5 5 2 2 5 5 2 2 3 3 3 3 2 2 4 4 4 4 5 5 4 4 4 4 4 4 4 4 3 3 4 4 4 4 3 3 5 5 2 2 5 5 3 3 2 2 5 5 1 1 1 1 3 3 4 4 1 1 1 1 4 4 4 4 3 3 4 4 2 2 3 3 3 3 4 4 4 4 4 4 2 2 5 5 4 4 3 3 4 4 5 5 4 4 3 3 1 1 4 4 3 3 4 4 2 2 2 2 5 5 5 5 2 2 3 3 4 4 3 3 4 4 3 3 4 4 2 2 5 5 1 1 1 1 3 3 2 2 5 5 0|1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1
|
2 |
+
./dataset/otto/otto_81.wav|otto|ZH|艾克去下路一波的三打九百经济场我有什么办法我他妈逼有什么办法操你妈|_ AA ai k e q v x ia l u y i b o d e s an d a j iu b ai j ing j i ch ang w o y ou sh en m e b an f a w o t a m a b i y ou sh en m e b an f a c ao n i m a _|0 4 4 4 4 4 4 4 4 4 4 4 4 1 1 5 5 1 1 3 3 2 2 3 3 1 1 4 4 3 3 2 2 3 3 2 2 5 5 4 4 3 3 3 3 1 1 1 1 1 1 3 3 2 2 5 5 4 4 3 3 1 1 3 3 1 1 0|1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1
|
3 |
+
./dataset/otto/otto_94.wav|otto|ZH|我已经尽我打野所有的力量所有这把游戏能做的任何事情了|_ w o y i j ing j in w o d a y E s uo y ou d e l i l iang s uo y ou zh e b a y ou x i n eng z uo d e r en h e sh ir q ing l e _|0 2 2 3 3 1 1 2 2 3 3 2 2 3 3 2 2 3 3 5 5 4 4 4 4 2 2 3 3 4 4 3 3 2 2 4 4 2 2 4 4 5 5 4 4 2 2 4 4 5 5 5 5 0|1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1
|
4 |
+
./dataset/otto/otto_225.wav|otto|ZH|那我给你抽不是理所应当的我难道非得说我得收一千礼物我再给你抽一千|_ n a w o g ei n i ch ou b u sh ir l i s uo y ing d ang d e w o n an d ao f ei d ei sh uo w o d e sh ou y i q ian l i w u w o z ai g ei n i ch ou y i q ian _|0 4 4 3 3 2 2 3 3 1 1 2 2 4 4 2 2 3 3 1 1 1 1 5 5 3 3 2 2 4 4 1 1 5 5 1 1 3 3 2 2 1 1 1 1 1 1 3 3 4 4 3 3 4 4 2 2 3 3 1 1 1 1 1 1 0|1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1
|
monotonic_align/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy import zeros, int32, float32
|
2 |
+
from torch import from_numpy
|
3 |
+
|
4 |
+
from .core import maximum_path_jit
|
5 |
+
|
6 |
+
|
7 |
+
def maximum_path(neg_cent, mask):
|
8 |
+
device = neg_cent.device
|
9 |
+
dtype = neg_cent.dtype
|
10 |
+
neg_cent = neg_cent.data.cpu().numpy().astype(float32)
|
11 |
+
path = zeros(neg_cent.shape, dtype=int32)
|
12 |
+
|
13 |
+
t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
|
14 |
+
t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
|
15 |
+
maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
|
16 |
+
return from_numpy(path).to(device=device, dtype=dtype)
|
monotonic_align/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (733 Bytes). View file
|
|
monotonic_align/__pycache__/core.cpython-38.pyc
ADDED
Binary file (988 Bytes). View file
|
|
monotonic_align/core.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numba
|
2 |
+
|
3 |
+
|
4 |
+
@numba.jit(
|
5 |
+
numba.void(
|
6 |
+
numba.int32[:, :, ::1],
|
7 |
+
numba.float32[:, :, ::1],
|
8 |
+
numba.int32[::1],
|
9 |
+
numba.int32[::1],
|
10 |
+
),
|
11 |
+
nopython=True,
|
12 |
+
nogil=True,
|
13 |
+
)
|
14 |
+
def maximum_path_jit(paths, values, t_ys, t_xs):
|
15 |
+
b = paths.shape[0]
|
16 |
+
max_neg_val = -1e9
|
17 |
+
for i in range(int(b)):
|
18 |
+
path = paths[i]
|
19 |
+
value = values[i]
|
20 |
+
t_y = t_ys[i]
|
21 |
+
t_x = t_xs[i]
|
22 |
+
|
23 |
+
v_prev = v_cur = 0.0
|
24 |
+
index = t_x - 1
|
25 |
+
|
26 |
+
for y in range(t_y):
|
27 |
+
for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
|
28 |
+
if x == y:
|
29 |
+
v_cur = max_neg_val
|
30 |
+
else:
|
31 |
+
v_cur = value[y - 1, x]
|
32 |
+
if x == 0:
|
33 |
+
if y == 0:
|
34 |
+
v_prev = 0.0
|
35 |
+
else:
|
36 |
+
v_prev = max_neg_val
|
37 |
+
else:
|
38 |
+
v_prev = value[y - 1, x - 1]
|
39 |
+
value[y, x] += max(v_prev, v_cur)
|
40 |
+
|
41 |
+
for y in range(t_y - 1, -1, -1):
|
42 |
+
path[y, index] = 1
|
43 |
+
if index != 0 and (
|
44 |
+
index == y or value[y - 1, index] < value[y - 1, index - 1]
|
45 |
+
):
|
46 |
+
index = index - 1
|
text/__init__.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from text.symbols import *
|
2 |
+
|
3 |
+
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
4 |
+
|
5 |
+
|
6 |
+
def cleaned_text_to_sequence(cleaned_text, tones, language):
|
7 |
+
"""Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
|
8 |
+
Args:
|
9 |
+
text: string to convert to a sequence
|
10 |
+
Returns:
|
11 |
+
List of integers corresponding to the symbols in the text
|
12 |
+
"""
|
13 |
+
phones = [_symbol_to_id[symbol] for symbol in cleaned_text]
|
14 |
+
tone_start = language_tone_start_map[language]
|
15 |
+
tones = [i + tone_start for i in tones]
|
16 |
+
lang_id = language_id_map[language]
|
17 |
+
lang_ids = [lang_id for i in phones]
|
18 |
+
return phones, tones, lang_ids
|
19 |
+
|
20 |
+
|
21 |
+
def get_bert(norm_text, word2ph, language, device):
|
22 |
+
from .chinese_bert import get_bert_feature as zh_bert
|
23 |
+
from .english_bert_mock import get_bert_feature as en_bert
|
24 |
+
from .japanese_bert import get_bert_feature as jp_bert
|
25 |
+
|
26 |
+
lang_bert_func_map = {"ZH": zh_bert, "EN": en_bert, "JP": jp_bert}
|
27 |
+
bert = lang_bert_func_map[language](norm_text, word2ph, device)
|
28 |
+
return bert
|
text/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (1.58 kB). View file
|
|
text/__pycache__/chinese.cpython-38.pyc
ADDED
Binary file (4.52 kB). View file
|
|
text/__pycache__/chinese_bert.cpython-38.pyc
ADDED
Binary file (1.64 kB). View file
|
|
text/__pycache__/cleaner.cpython-38.pyc
ADDED
Binary file (953 Bytes). View file
|
|
text/__pycache__/english_bert_mock.cpython-38.pyc
ADDED
Binary file (319 Bytes). View file
|
|
text/__pycache__/japanese.cpython-38.pyc
ADDED
Binary file (14 kB). View file
|
|
text/__pycache__/japanese_bert.cpython-38.pyc
ADDED
Binary file (1.17 kB). View file
|
|
text/__pycache__/symbols.cpython-38.pyc
ADDED
Binary file (1.84 kB). View file
|
|
text/__pycache__/tone_sandhi.cpython-38.pyc
ADDED
Binary file (15.6 kB). View file
|
|
text/chinese.py
ADDED
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
|
4 |
+
import cn2an
|
5 |
+
from pypinyin import lazy_pinyin, Style
|
6 |
+
|
7 |
+
from text.symbols import punctuation
|
8 |
+
from text.tone_sandhi import ToneSandhi
|
9 |
+
|
10 |
+
current_file_path = os.path.dirname(__file__)
|
11 |
+
pinyin_to_symbol_map = {
|
12 |
+
line.split("\t")[0]: line.strip().split("\t")[1]
|
13 |
+
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
|
14 |
+
}
|
15 |
+
|
16 |
+
import jieba.posseg as psg
|
17 |
+
|
18 |
+
|
19 |
+
rep_map = {
|
20 |
+
":": ",",
|
21 |
+
";": ",",
|
22 |
+
",": ",",
|
23 |
+
"。": ".",
|
24 |
+
"!": "!",
|
25 |
+
"?": "?",
|
26 |
+
"\n": ".",
|
27 |
+
"·": ",",
|
28 |
+
"、": ",",
|
29 |
+
"...": "…",
|
30 |
+
"$": ".",
|
31 |
+
"“": "'",
|
32 |
+
"”": "'",
|
33 |
+
"‘": "'",
|
34 |
+
"’": "'",
|
35 |
+
"(": "'",
|
36 |
+
")": "'",
|
37 |
+
"(": "'",
|
38 |
+
")": "'",
|
39 |
+
"《": "'",
|
40 |
+
"》": "'",
|
41 |
+
"【": "'",
|
42 |
+
"】": "'",
|
43 |
+
"[": "'",
|
44 |
+
"]": "'",
|
45 |
+
"—": "-",
|
46 |
+
"~": "-",
|
47 |
+
"~": "-",
|
48 |
+
"「": "'",
|
49 |
+
"」": "'",
|
50 |
+
}
|
51 |
+
|
52 |
+
tone_modifier = ToneSandhi()
|
53 |
+
|
54 |
+
|
55 |
+
def replace_punctuation(text):
|
56 |
+
text = text.replace("嗯", "恩").replace("呣", "母")
|
57 |
+
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
|
58 |
+
|
59 |
+
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
|
60 |
+
|
61 |
+
replaced_text = re.sub(
|
62 |
+
r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", replaced_text
|
63 |
+
)
|
64 |
+
|
65 |
+
return replaced_text
|
66 |
+
|
67 |
+
|
68 |
+
def g2p(text):
|
69 |
+
pattern = r"(?<=[{0}])\s*".format("".join(punctuation))
|
70 |
+
sentences = [i for i in re.split(pattern, text) if i.strip() != ""]
|
71 |
+
phones, tones, word2ph = _g2p(sentences)
|
72 |
+
assert sum(word2ph) == len(phones)
|
73 |
+
assert len(word2ph) == len(text) # Sometimes it will crash,you can add a try-catch.
|
74 |
+
phones = ["_"] + phones + ["_"]
|
75 |
+
tones = [0] + tones + [0]
|
76 |
+
word2ph = [1] + word2ph + [1]
|
77 |
+
return phones, tones, word2ph
|
78 |
+
|
79 |
+
|
80 |
+
def _get_initials_finals(word):
|
81 |
+
initials = []
|
82 |
+
finals = []
|
83 |
+
orig_initials = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.INITIALS)
|
84 |
+
orig_finals = lazy_pinyin(
|
85 |
+
word, neutral_tone_with_five=True, style=Style.FINALS_TONE3
|
86 |
+
)
|
87 |
+
for c, v in zip(orig_initials, orig_finals):
|
88 |
+
initials.append(c)
|
89 |
+
finals.append(v)
|
90 |
+
return initials, finals
|
91 |
+
|
92 |
+
|
93 |
+
def _g2p(segments):
|
94 |
+
phones_list = []
|
95 |
+
tones_list = []
|
96 |
+
word2ph = []
|
97 |
+
for seg in segments:
|
98 |
+
# Replace all English words in the sentence
|
99 |
+
seg = re.sub("[a-zA-Z]+", "", seg)
|
100 |
+
seg_cut = psg.lcut(seg)
|
101 |
+
initials = []
|
102 |
+
finals = []
|
103 |
+
seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
|
104 |
+
for word, pos in seg_cut:
|
105 |
+
if pos == "eng":
|
106 |
+
continue
|
107 |
+
sub_initials, sub_finals = _get_initials_finals(word)
|
108 |
+
sub_finals = tone_modifier.modified_tone(word, pos, sub_finals)
|
109 |
+
initials.append(sub_initials)
|
110 |
+
finals.append(sub_finals)
|
111 |
+
|
112 |
+
# assert len(sub_initials) == len(sub_finals) == len(word)
|
113 |
+
initials = sum(initials, [])
|
114 |
+
finals = sum(finals, [])
|
115 |
+
#
|
116 |
+
for c, v in zip(initials, finals):
|
117 |
+
raw_pinyin = c + v
|
118 |
+
# NOTE: post process for pypinyin outputs
|
119 |
+
# we discriminate i, ii and iii
|
120 |
+
if c == v:
|
121 |
+
assert c in punctuation
|
122 |
+
phone = [c]
|
123 |
+
tone = "0"
|
124 |
+
word2ph.append(1)
|
125 |
+
else:
|
126 |
+
v_without_tone = v[:-1]
|
127 |
+
tone = v[-1]
|
128 |
+
|
129 |
+
pinyin = c + v_without_tone
|
130 |
+
assert tone in "12345"
|
131 |
+
|
132 |
+
if c:
|
133 |
+
# 多音节
|
134 |
+
v_rep_map = {
|
135 |
+
"uei": "ui",
|
136 |
+
"iou": "iu",
|
137 |
+
"uen": "un",
|
138 |
+
}
|
139 |
+
if v_without_tone in v_rep_map.keys():
|
140 |
+
pinyin = c + v_rep_map[v_without_tone]
|
141 |
+
else:
|
142 |
+
# 单音节
|
143 |
+
pinyin_rep_map = {
|
144 |
+
"ing": "ying",
|
145 |
+
"i": "yi",
|
146 |
+
"in": "yin",
|
147 |
+
"u": "wu",
|
148 |
+
}
|
149 |
+
if pinyin in pinyin_rep_map.keys():
|
150 |
+
pinyin = pinyin_rep_map[pinyin]
|
151 |
+
else:
|
152 |
+
single_rep_map = {
|
153 |
+
"v": "yu",
|
154 |
+
"e": "e",
|
155 |
+
"i": "y",
|
156 |
+
"u": "w",
|
157 |
+
}
|
158 |
+
if pinyin[0] in single_rep_map.keys():
|
159 |
+
pinyin = single_rep_map[pinyin[0]] + pinyin[1:]
|
160 |
+
|
161 |
+
assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
|
162 |
+
phone = pinyin_to_symbol_map[pinyin].split(" ")
|
163 |
+
word2ph.append(len(phone))
|
164 |
+
|
165 |
+
phones_list += phone
|
166 |
+
tones_list += [int(tone)] * len(phone)
|
167 |
+
return phones_list, tones_list, word2ph
|
168 |
+
|
169 |
+
|
170 |
+
def text_normalize(text):
|
171 |
+
numbers = re.findall(r"\d+(?:\.?\d+)?", text)
|
172 |
+
for number in numbers:
|
173 |
+
text = text.replace(number, cn2an.an2cn(number), 1)
|
174 |
+
text = replace_punctuation(text)
|
175 |
+
return text
|
176 |
+
|
177 |
+
|
178 |
+
def get_bert_feature(text, word2ph):
|
179 |
+
from text import chinese_bert
|
180 |
+
|
181 |
+
return chinese_bert.get_bert_feature(text, word2ph)
|
182 |
+
|
183 |
+
|
184 |
+
if __name__ == "__main__":
|
185 |
+
from text.chinese_bert import get_bert_feature
|
186 |
+
|
187 |
+
text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
|
188 |
+
text = text_normalize(text)
|
189 |
+
print(text)
|
190 |
+
phones, tones, word2ph = g2p(text)
|
191 |
+
bert = get_bert_feature(text, word2ph)
|
192 |
+
|
193 |
+
print(phones, tones, word2ph, bert.shape)
|
194 |
+
|
195 |
+
|
196 |
+
# # 示例用法
|
197 |
+
# text = "这是一个示例文本:,你好!这是一个测试...."
|
198 |
+
# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
|
text/chinese_bert.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import sys
|
3 |
+
from transformers import AutoTokenizer, AutoModelForMaskedLM
|
4 |
+
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large")
|
6 |
+
|
7 |
+
models = dict()
|
8 |
+
|
9 |
+
|
10 |
+
def get_bert_feature(text, word2ph, device=None):
|
11 |
+
if (
|
12 |
+
sys.platform == "darwin"
|
13 |
+
and torch.backends.mps.is_available()
|
14 |
+
and device == "cpu"
|
15 |
+
):
|
16 |
+
device = "mps"
|
17 |
+
if not device:
|
18 |
+
device = "cuda"
|
19 |
+
if device not in models.keys():
|
20 |
+
models[device] = AutoModelForMaskedLM.from_pretrained(
|
21 |
+
"./bert/chinese-roberta-wwm-ext-large"
|
22 |
+
).to(device)
|
23 |
+
with torch.no_grad():
|
24 |
+
inputs = tokenizer(text, return_tensors="pt")
|
25 |
+
for i in inputs:
|
26 |
+
inputs[i] = inputs[i].to(device)
|
27 |
+
res = models[device](**inputs, output_hidden_states=True)
|
28 |
+
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()
|
29 |
+
|
30 |
+
assert len(word2ph) == len(text) + 2
|
31 |
+
word2phone = word2ph
|
32 |
+
phone_level_feature = []
|
33 |
+
for i in range(len(word2phone)):
|
34 |
+
repeat_feature = res[i].repeat(word2phone[i], 1)
|
35 |
+
phone_level_feature.append(repeat_feature)
|
36 |
+
|
37 |
+
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
38 |
+
|
39 |
+
return phone_level_feature.T
|
40 |
+
|
41 |
+
|
42 |
+
if __name__ == "__main__":
|
43 |
+
import torch
|
44 |
+
|
45 |
+
word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征
|
46 |
+
word2phone = [
|
47 |
+
1,
|
48 |
+
2,
|
49 |
+
1,
|
50 |
+
2,
|
51 |
+
2,
|
52 |
+
1,
|
53 |
+
2,
|
54 |
+
2,
|
55 |
+
1,
|
56 |
+
2,
|
57 |
+
2,
|
58 |
+
1,
|
59 |
+
2,
|
60 |
+
2,
|
61 |
+
2,
|
62 |
+
2,
|
63 |
+
2,
|
64 |
+
1,
|
65 |
+
1,
|
66 |
+
2,
|
67 |
+
2,
|
68 |
+
1,
|
69 |
+
2,
|
70 |
+
2,
|
71 |
+
2,
|
72 |
+
2,
|
73 |
+
1,
|
74 |
+
2,
|
75 |
+
2,
|
76 |
+
2,
|
77 |
+
2,
|
78 |
+
2,
|
79 |
+
1,
|
80 |
+
2,
|
81 |
+
2,
|
82 |
+
2,
|
83 |
+
2,
|
84 |
+
1,
|
85 |
+
]
|
86 |
+
|
87 |
+
# 计算总帧数
|
88 |
+
total_frames = sum(word2phone)
|
89 |
+
print(word_level_feature.shape)
|
90 |
+
print(word2phone)
|
91 |
+
phone_level_feature = []
|
92 |
+
for i in range(len(word2phone)):
|
93 |
+
print(word_level_feature[i].shape)
|
94 |
+
|
95 |
+
# 对每个词重复word2phone[i]次
|
96 |
+
repeat_feature = word_level_feature[i].repeat(word2phone[i], 1)
|
97 |
+
phone_level_feature.append(repeat_feature)
|
98 |
+
|
99 |
+
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
100 |
+
print(phone_level_feature.shape) # torch.Size([36, 1024])
|
text/cleaner.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from text import chinese, japanese, cleaned_text_to_sequence
|
2 |
+
|
3 |
+
|
4 |
+
language_module_map = {"ZH": chinese, "JP": japanese}
|
5 |
+
|
6 |
+
|
7 |
+
def clean_text(text, language):
|
8 |
+
language_module = language_module_map[language]
|
9 |
+
norm_text = language_module.text_normalize(text)
|
10 |
+
phones, tones, word2ph = language_module.g2p(norm_text)
|
11 |
+
return norm_text, phones, tones, word2ph
|
12 |
+
|
13 |
+
|
14 |
+
def clean_text_bert(text, language):
|
15 |
+
language_module = language_module_map[language]
|
16 |
+
norm_text = language_module.text_normalize(text)
|
17 |
+
phones, tones, word2ph = language_module.g2p(norm_text)
|
18 |
+
bert = language_module.get_bert_feature(norm_text, word2ph)
|
19 |
+
return phones, tones, bert
|
20 |
+
|
21 |
+
|
22 |
+
def text_to_sequence(text, language):
|
23 |
+
norm_text, phones, tones, word2ph = clean_text(text, language)
|
24 |
+
return cleaned_text_to_sequence(phones, tones, language)
|
25 |
+
|
26 |
+
|
27 |
+
if __name__ == "__main__":
|
28 |
+
pass
|
text/cmudict.rep
ADDED
The diff for this file is too large to render.
See raw diff
|
|
text/cmudict_cache.pickle
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b9b21b20325471934ba92f2e4a5976989e7d920caa32e7a286eacb027d197949
|
3 |
+
size 6212655
|
text/english.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
import os
|
3 |
+
import re
|
4 |
+
from g2p_en import G2p
|
5 |
+
|
6 |
+
from text import symbols
|
7 |
+
|
8 |
+
current_file_path = os.path.dirname(__file__)
|
9 |
+
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
|
10 |
+
CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle")
|
11 |
+
_g2p = G2p()
|
12 |
+
|
13 |
+
arpa = {
|
14 |
+
"AH0",
|
15 |
+
"S",
|
16 |
+
"AH1",
|
17 |
+
"EY2",
|
18 |
+
"AE2",
|
19 |
+
"EH0",
|
20 |
+
"OW2",
|
21 |
+
"UH0",
|
22 |
+
"NG",
|
23 |
+
"B",
|
24 |
+
"G",
|
25 |
+
"AY0",
|
26 |
+
"M",
|
27 |
+
"AA0",
|
28 |
+
"F",
|
29 |
+
"AO0",
|
30 |
+
"ER2",
|
31 |
+
"UH1",
|
32 |
+
"IY1",
|
33 |
+
"AH2",
|
34 |
+
"DH",
|
35 |
+
"IY0",
|
36 |
+
"EY1",
|
37 |
+
"IH0",
|
38 |
+
"K",
|
39 |
+
"N",
|
40 |
+
"W",
|
41 |
+
"IY2",
|
42 |
+
"T",
|
43 |
+
"AA1",
|
44 |
+
"ER1",
|
45 |
+
"EH2",
|
46 |
+
"OY0",
|
47 |
+
"UH2",
|
48 |
+
"UW1",
|
49 |
+
"Z",
|
50 |
+
"AW2",
|
51 |
+
"AW1",
|
52 |
+
"V",
|
53 |
+
"UW2",
|
54 |
+
"AA2",
|
55 |
+
"ER",
|
56 |
+
"AW0",
|
57 |
+
"UW0",
|
58 |
+
"R",
|
59 |
+
"OW1",
|
60 |
+
"EH1",
|
61 |
+
"ZH",
|
62 |
+
"AE0",
|
63 |
+
"IH2",
|
64 |
+
"IH",
|
65 |
+
"Y",
|
66 |
+
"JH",
|
67 |
+
"P",
|
68 |
+
"AY1",
|
69 |
+
"EY0",
|
70 |
+
"OY2",
|
71 |
+
"TH",
|
72 |
+
"HH",
|
73 |
+
"D",
|
74 |
+
"ER0",
|
75 |
+
"CH",
|
76 |
+
"AO1",
|
77 |
+
"AE1",
|
78 |
+
"AO2",
|
79 |
+
"OY1",
|
80 |
+
"AY2",
|
81 |
+
"IH1",
|
82 |
+
"OW0",
|
83 |
+
"L",
|
84 |
+
"SH",
|
85 |
+
}
|
86 |
+
|
87 |
+
|
88 |
+
def post_replace_ph(ph):
|
89 |
+
rep_map = {
|
90 |
+
":": ",",
|
91 |
+
";": ",",
|
92 |
+
",": ",",
|
93 |
+
"。": ".",
|
94 |
+
"!": "!",
|
95 |
+
"?": "?",
|
96 |
+
"\n": ".",
|
97 |
+
"·": ",",
|
98 |
+
"、": ",",
|
99 |
+
"...": "…",
|
100 |
+
"v": "V",
|
101 |
+
}
|
102 |
+
if ph in rep_map.keys():
|
103 |
+
ph = rep_map[ph]
|
104 |
+
if ph in symbols:
|
105 |
+
return ph
|
106 |
+
if ph not in symbols:
|
107 |
+
ph = "UNK"
|
108 |
+
return ph
|
109 |
+
|
110 |
+
|
111 |
+
def read_dict():
|
112 |
+
g2p_dict = {}
|
113 |
+
start_line = 49
|
114 |
+
with open(CMU_DICT_PATH) as f:
|
115 |
+
line = f.readline()
|
116 |
+
line_index = 1
|
117 |
+
while line:
|
118 |
+
if line_index >= start_line:
|
119 |
+
line = line.strip()
|
120 |
+
word_split = line.split(" ")
|
121 |
+
word = word_split[0]
|
122 |
+
|
123 |
+
syllable_split = word_split[1].split(" - ")
|
124 |
+
g2p_dict[word] = []
|
125 |
+
for syllable in syllable_split:
|
126 |
+
phone_split = syllable.split(" ")
|
127 |
+
g2p_dict[word].append(phone_split)
|
128 |
+
|
129 |
+
line_index = line_index + 1
|
130 |
+
line = f.readline()
|
131 |
+
|
132 |
+
return g2p_dict
|
133 |
+
|
134 |
+
|
135 |
+
def cache_dict(g2p_dict, file_path):
|
136 |
+
with open(file_path, "wb") as pickle_file:
|
137 |
+
pickle.dump(g2p_dict, pickle_file)
|
138 |
+
|
139 |
+
|
140 |
+
def get_dict():
|
141 |
+
if os.path.exists(CACHE_PATH):
|
142 |
+
with open(CACHE_PATH, "rb") as pickle_file:
|
143 |
+
g2p_dict = pickle.load(pickle_file)
|
144 |
+
else:
|
145 |
+
g2p_dict = read_dict()
|
146 |
+
cache_dict(g2p_dict, CACHE_PATH)
|
147 |
+
|
148 |
+
return g2p_dict
|
149 |
+
|
150 |
+
|
151 |
+
eng_dict = get_dict()
|
152 |
+
|
153 |
+
|
154 |
+
def refine_ph(phn):
|
155 |
+
tone = 0
|
156 |
+
if re.search(r"\d$", phn):
|
157 |
+
tone = int(phn[-1]) + 1
|
158 |
+
phn = phn[:-1]
|
159 |
+
return phn.lower(), tone
|
160 |
+
|
161 |
+
|
162 |
+
def refine_syllables(syllables):
|
163 |
+
tones = []
|
164 |
+
phonemes = []
|
165 |
+
for phn_list in syllables:
|
166 |
+
for i in range(len(phn_list)):
|
167 |
+
phn = phn_list[i]
|
168 |
+
phn, tone = refine_ph(phn)
|
169 |
+
phonemes.append(phn)
|
170 |
+
tones.append(tone)
|
171 |
+
return phonemes, tones
|
172 |
+
|
173 |
+
|
174 |
+
def text_normalize(text):
|
175 |
+
# todo: eng text normalize
|
176 |
+
return text
|
177 |
+
|
178 |
+
|
179 |
+
def g2p(text):
|
180 |
+
phones = []
|
181 |
+
tones = []
|
182 |
+
words = re.split(r"([,;.\-\?\!\s+])", text)
|
183 |
+
for w in words:
|
184 |
+
if w.upper() in eng_dict:
|
185 |
+
phns, tns = refine_syllables(eng_dict[w.upper()])
|
186 |
+
phones += phns
|
187 |
+
tones += tns
|
188 |
+
else:
|
189 |
+
phone_list = list(filter(lambda p: p != " ", _g2p(w)))
|
190 |
+
for ph in phone_list:
|
191 |
+
if ph in arpa:
|
192 |
+
ph, tn = refine_ph(ph)
|
193 |
+
phones.append(ph)
|
194 |
+
tones.append(tn)
|
195 |
+
else:
|
196 |
+
phones.append(ph)
|
197 |
+
tones.append(0)
|
198 |
+
# todo: implement word2ph
|
199 |
+
word2ph = [1 for i in phones]
|
200 |
+
|
201 |
+
phones = [post_replace_ph(i) for i in phones]
|
202 |
+
return phones, tones, word2ph
|
203 |
+
|
204 |
+
|
205 |
+
if __name__ == "__main__":
|
206 |
+
# print(get_dict())
|
207 |
+
# print(eng_word_to_phoneme("hello"))
|
208 |
+
print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
|
209 |
+
# all_phones = set()
|
210 |
+
# for k, syllables in eng_dict.items():
|
211 |
+
# for group in syllables:
|
212 |
+
# for ph in group:
|
213 |
+
# all_phones.add(ph)
|
214 |
+
# print(all_phones)
|
text/english_bert_mock.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
def get_bert_feature(norm_text, word2ph):
|
5 |
+
return torch.zeros(1024, sum(word2ph))
|
text/japanese.py
ADDED
@@ -0,0 +1,586 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Convert Japanese text to phonemes which is
|
2 |
+
# compatible with Julius https://github.com/julius-speech/segmentation-kit
|
3 |
+
import re
|
4 |
+
import unicodedata
|
5 |
+
|
6 |
+
from transformers import AutoTokenizer
|
7 |
+
|
8 |
+
from text import punctuation, symbols
|
9 |
+
|
10 |
+
try:
|
11 |
+
import MeCab
|
12 |
+
except ImportError as e:
|
13 |
+
raise ImportError("Japanese requires mecab-python3 and unidic-lite.") from e
|
14 |
+
from num2words import num2words
|
15 |
+
|
16 |
+
_CONVRULES = [
|
17 |
+
# Conversion of 2 letters
|
18 |
+
"アァ/ a a",
|
19 |
+
"イィ/ i i",
|
20 |
+
"イェ/ i e",
|
21 |
+
"イャ/ y a",
|
22 |
+
"ウゥ/ u:",
|
23 |
+
"エェ/ e e",
|
24 |
+
"オォ/ o:",
|
25 |
+
"カァ/ k a:",
|
26 |
+
"キィ/ k i:",
|
27 |
+
"クゥ/ k u:",
|
28 |
+
"クャ/ ky a",
|
29 |
+
"クュ/ ky u",
|
30 |
+
"クョ/ ky o",
|
31 |
+
"ケェ/ k e:",
|
32 |
+
"コォ/ k o:",
|
33 |
+
"ガァ/ g a:",
|
34 |
+
"ギィ/ g i:",
|
35 |
+
"グゥ/ g u:",
|
36 |
+
"グャ/ gy a",
|
37 |
+
"グュ/ gy u",
|
38 |
+
"グョ/ gy o",
|
39 |
+
"ゲェ/ g e:",
|
40 |
+
"ゴォ/ g o:",
|
41 |
+
"サァ/ s a:",
|
42 |
+
"シィ/ sh i:",
|
43 |
+
"スゥ/ s u:",
|
44 |
+
"スャ/ sh a",
|
45 |
+
"スュ/ sh u",
|
46 |
+
"スョ/ sh o",
|
47 |
+
"セェ/ s e:",
|
48 |
+
"ソォ/ s o:",
|
49 |
+
"ザァ/ z a:",
|
50 |
+
"ジィ/ j i:",
|
51 |
+
"ズゥ/ z u:",
|
52 |
+
"ズャ/ zy a",
|
53 |
+
"ズュ/ zy u",
|
54 |
+
"ズョ/ zy o",
|
55 |
+
"ゼェ/ z e:",
|
56 |
+
"ゾォ/ z o:",
|
57 |
+
"タァ/ t a:",
|
58 |
+
"チィ/ ch i:",
|
59 |
+
"ツァ/ ts a",
|
60 |
+
"ツィ/ ts i",
|
61 |
+
"ツゥ/ ts u:",
|
62 |
+
"ツャ/ ch a",
|
63 |
+
"ツュ/ ch u",
|
64 |
+
"ツョ/ ch o",
|
65 |
+
"ツェ/ ts e",
|
66 |
+
"ツォ/ ts o",
|
67 |
+
"テェ/ t e:",
|
68 |
+
"トォ/ t o:",
|
69 |
+
"ダァ/ d a:",
|
70 |
+
"ヂィ/ j i:",
|
71 |
+
"ヅゥ/ d u:",
|
72 |
+
"ヅャ/ zy a",
|
73 |
+
"ヅュ/ zy u",
|
74 |
+
"ヅョ/ zy o",
|
75 |
+
"デェ/ d e:",
|
76 |
+
"ドォ/ d o:",
|
77 |
+
"ナァ/ n a:",
|
78 |
+
"ニィ/ n i:",
|
79 |
+
"ヌゥ/ n u:",
|
80 |
+
"ヌャ/ ny a",
|
81 |
+
"ヌュ/ ny u",
|
82 |
+
"ヌョ/ ny o",
|
83 |
+
"ネェ/ n e:",
|
84 |
+
"ノォ/ n o:",
|
85 |
+
"ハァ/ h a:",
|
86 |
+
"ヒィ/ h i:",
|
87 |
+
"フゥ/ f u:",
|
88 |
+
"フャ/ hy a",
|
89 |
+
"フュ/ hy u",
|
90 |
+
"フョ/ hy o",
|
91 |
+
"ヘェ/ h e:",
|
92 |
+
"ホォ/ h o:",
|
93 |
+
"バァ/ b a:",
|
94 |
+
"ビィ/ b i:",
|
95 |
+
"ブゥ/ b u:",
|
96 |
+
"フャ/ hy a",
|
97 |
+
"ブュ/ by u",
|
98 |
+
"フョ/ hy o",
|
99 |
+
"ベェ/ b e:",
|
100 |
+
"ボォ/ b o:",
|
101 |
+
"パァ/ p a:",
|
102 |
+
"ピィ/ p i:",
|
103 |
+
"プゥ/ p u:",
|
104 |
+
"プャ/ py a",
|
105 |
+
"プュ/ py u",
|
106 |
+
"プョ/ py o",
|
107 |
+
"ペェ/ p e:",
|
108 |
+
"ポォ/ p o:",
|
109 |
+
"マァ/ m a:",
|
110 |
+
"ミィ/ m i:",
|
111 |
+
"ムゥ/ m u:",
|
112 |
+
"ムャ/ my a",
|
113 |
+
"ムュ/ my u",
|
114 |
+
"ムョ/ my o",
|
115 |
+
"メェ/ m e:",
|
116 |
+
"モォ/ m o:",
|
117 |
+
"ヤァ/ y a:",
|
118 |
+
"ユゥ/ y u:",
|
119 |
+
"ユャ/ y a:",
|
120 |
+
"ユュ/ y u:",
|
121 |
+
"ユョ/ y o:",
|
122 |
+
"ヨォ/ y o:",
|
123 |
+
"ラァ/ r a:",
|
124 |
+
"リィ/ r i:",
|
125 |
+
"ルゥ/ r u:",
|
126 |
+
"ルャ/ ry a",
|
127 |
+
"ルュ/ ry u",
|
128 |
+
"ルョ/ ry o",
|
129 |
+
"レェ/ r e:",
|
130 |
+
"ロォ/ r o:",
|
131 |
+
"ワァ/ w a:",
|
132 |
+
"ヲォ/ o:",
|
133 |
+
"ディ/ d i",
|
134 |
+
"デェ/ d e:",
|
135 |
+
"デャ/ dy a",
|
136 |
+
"デュ/ dy u",
|
137 |
+
"デョ/ dy o",
|
138 |
+
"ティ/ t i",
|
139 |
+
"テェ/ t e:",
|
140 |
+
"テャ/ ty a",
|
141 |
+
"テュ/ ty u",
|
142 |
+
"テョ/ ty o",
|
143 |
+
"スィ/ s i",
|
144 |
+
"ズァ/ z u a",
|
145 |
+
"ズィ/ z i",
|
146 |
+
"ズゥ/ z u",
|
147 |
+
"ズャ/ zy a",
|
148 |
+
"ズュ/ zy u",
|
149 |
+
"ズョ/ zy o",
|
150 |
+
"ズェ/ z e",
|
151 |
+
"ズォ/ z o",
|
152 |
+
"キャ/ ky a",
|
153 |
+
"キュ/ ky u",
|
154 |
+
"キョ/ ky o",
|
155 |
+
"シャ/ sh a",
|
156 |
+
"シュ/ sh u",
|
157 |
+
"シェ/ sh e",
|
158 |
+
"ショ/ sh o",
|
159 |
+
"チャ/ ch a",
|
160 |
+
"チュ/ ch u",
|
161 |
+
"チェ/ ch e",
|
162 |
+
"チョ/ ch o",
|
163 |
+
"トゥ/ t u",
|
164 |
+
"トャ/ ty a",
|
165 |
+
"トュ/ ty u",
|
166 |
+
"トョ/ ty o",
|
167 |
+
"ドァ/ d o a",
|
168 |
+
"ドゥ/ d u",
|
169 |
+
"ドャ/ dy a",
|
170 |
+
"ドュ/ dy u",
|
171 |
+
"ドョ/ dy o",
|
172 |
+
"ドォ/ d o:",
|
173 |
+
"ニャ/ ny a",
|
174 |
+
"ニュ/ ny u",
|
175 |
+
"ニョ/ ny o",
|
176 |
+
"ヒャ/ hy a",
|
177 |
+
"ヒュ/ hy u",
|
178 |
+
"ヒョ/ hy o",
|
179 |
+
"ミャ/ my a",
|
180 |
+
"ミュ/ my u",
|
181 |
+
"ミョ/ my o",
|
182 |
+
"リャ/ ry a",
|
183 |
+
"リュ/ ry u",
|
184 |
+
"リョ/ ry o",
|
185 |
+
"ギャ/ gy a",
|
186 |
+
"ギュ/ gy u",
|
187 |
+
"ギョ/ gy o",
|
188 |
+
"ヂェ/ j e",
|
189 |
+
"ヂャ/ j a",
|
190 |
+
"ヂュ/ j u",
|
191 |
+
"ヂョ/ j o",
|
192 |
+
"ジェ/ j e",
|
193 |
+
"ジャ/ j a",
|
194 |
+
"ジュ/ j u",
|
195 |
+
"ジョ/ j o",
|
196 |
+
"ビャ/ by a",
|
197 |
+
"ビュ/ by u",
|
198 |
+
"ビョ/ by o",
|
199 |
+
"ピャ/ py a",
|
200 |
+
"ピュ/ py u",
|
201 |
+
"ピョ/ py o",
|
202 |
+
"ウァ/ u a",
|
203 |
+
"ウィ/ w i",
|
204 |
+
"ウェ/ w e",
|
205 |
+
"ウォ/ w o",
|
206 |
+
"ファ/ f a",
|
207 |
+
"フィ/ f i",
|
208 |
+
"フゥ/ f u",
|
209 |
+
"フャ/ hy a",
|
210 |
+
"フュ/ hy u",
|
211 |
+
"フョ/ hy o",
|
212 |
+
"フェ/ f e",
|
213 |
+
"フォ/ f o",
|
214 |
+
"ヴァ/ b a",
|
215 |
+
"ヴィ/ b i",
|
216 |
+
"ヴェ/ b e",
|
217 |
+
"ヴォ/ b o",
|
218 |
+
"ヴュ/ by u",
|
219 |
+
# Conversion of 1 letter
|
220 |
+
"ア/ a",
|
221 |
+
"イ/ i",
|
222 |
+
"ウ/ u",
|
223 |
+
"エ/ e",
|
224 |
+
"オ/ o",
|
225 |
+
"カ/ k a",
|
226 |
+
"キ/ k i",
|
227 |
+
"ク/ k u",
|
228 |
+
"ケ/ k e",
|
229 |
+
"コ/ k o",
|
230 |
+
"サ/ s a",
|
231 |
+
"シ/ sh i",
|
232 |
+
"ス/ s u",
|
233 |
+
"セ/ s e",
|
234 |
+
"ソ/ s o",
|
235 |
+
"タ/ t a",
|
236 |
+
"チ/ ch i",
|
237 |
+
"ツ/ ts u",
|
238 |
+
"テ/ t e",
|
239 |
+
"ト/ t o",
|
240 |
+
"ナ/ n a",
|
241 |
+
"ニ/ n i",
|
242 |
+
"ヌ/ n u",
|
243 |
+
"ネ/ n e",
|
244 |
+
"ノ/ n o",
|
245 |
+
"ハ/ h a",
|
246 |
+
"ヒ/ h i",
|
247 |
+
"フ/ f u",
|
248 |
+
"ヘ/ h e",
|
249 |
+
"ホ/ h o",
|
250 |
+
"マ/ m a",
|
251 |
+
"ミ/ m i",
|
252 |
+
"ム/ m u",
|
253 |
+
"メ/ m e",
|
254 |
+
"モ/ m o",
|
255 |
+
"ラ/ r a",
|
256 |
+
"リ/ r i",
|
257 |
+
"ル/ r u",
|
258 |
+
"レ/ r e",
|
259 |
+
"ロ/ r o",
|
260 |
+
"ガ/ g a",
|
261 |
+
"ギ/ g i",
|
262 |
+
"グ/ g u",
|
263 |
+
"ゲ/ g e",
|
264 |
+
"ゴ/ g o",
|
265 |
+
"ザ/ z a",
|
266 |
+
"ジ/ j i",
|
267 |
+
"ズ/ z u",
|
268 |
+
"ゼ/ z e",
|
269 |
+
"ゾ/ z o",
|
270 |
+
"ダ/ d a",
|
271 |
+
"ヂ/ j i",
|
272 |
+
"ヅ/ z u",
|
273 |
+
"デ/ d e",
|
274 |
+
"ド/ d o",
|
275 |
+
"バ/ b a",
|
276 |
+
"ビ/ b i",
|
277 |
+
"ブ/ b u",
|
278 |
+
"ベ/ b e",
|
279 |
+
"ボ/ b o",
|
280 |
+
"パ/ p a",
|
281 |
+
"ピ/ p i",
|
282 |
+
"プ/ p u",
|
283 |
+
"ペ/ p e",
|
284 |
+
"ポ/ p o",
|
285 |
+
"ヤ/ y a",
|
286 |
+
"ユ/ y u",
|
287 |
+
"ヨ/ y o",
|
288 |
+
"ワ/ w a",
|
289 |
+
"ヰ/ i",
|
290 |
+
"ヱ/ e",
|
291 |
+
"ヲ/ o",
|
292 |
+
"ン/ N",
|
293 |
+
"ッ/ q",
|
294 |
+
"ヴ/ b u",
|
295 |
+
"ー/:",
|
296 |
+
# Try converting broken text
|
297 |
+
"ァ/ a",
|
298 |
+
"ィ/ i",
|
299 |
+
"ゥ/ u",
|
300 |
+
"ェ/ e",
|
301 |
+
"ォ/ o",
|
302 |
+
"ヮ/ w a",
|
303 |
+
"ォ/ o",
|
304 |
+
# Symbols
|
305 |
+
"、/ ,",
|
306 |
+
"。/ .",
|
307 |
+
"!/ !",
|
308 |
+
"?/ ?",
|
309 |
+
"・/ ,",
|
310 |
+
]
|
311 |
+
|
312 |
+
_COLON_RX = re.compile(":+")
|
313 |
+
_REJECT_RX = re.compile("[^ a-zA-Z:,.?]")
|
314 |
+
|
315 |
+
|
316 |
+
def _makerulemap():
|
317 |
+
l = [tuple(x.split("/")) for x in _CONVRULES]
|
318 |
+
return tuple({k: v for k, v in l if len(k) == i} for i in (1, 2))
|
319 |
+
|
320 |
+
|
321 |
+
_RULEMAP1, _RULEMAP2 = _makerulemap()
|
322 |
+
|
323 |
+
|
324 |
+
def kata2phoneme(text: str) -> str:
|
325 |
+
"""Convert katakana text to phonemes."""
|
326 |
+
text = text.strip()
|
327 |
+
res = []
|
328 |
+
while text:
|
329 |
+
if len(text) >= 2:
|
330 |
+
x = _RULEMAP2.get(text[:2])
|
331 |
+
if x is not None:
|
332 |
+
text = text[2:]
|
333 |
+
res += x.split(" ")[1:]
|
334 |
+
continue
|
335 |
+
x = _RULEMAP1.get(text[0])
|
336 |
+
if x is not None:
|
337 |
+
text = text[1:]
|
338 |
+
res += x.split(" ")[1:]
|
339 |
+
continue
|
340 |
+
res.append(text[0])
|
341 |
+
text = text[1:]
|
342 |
+
# res = _COLON_RX.sub(":", res)
|
343 |
+
return res
|
344 |
+
|
345 |
+
|
346 |
+
_KATAKANA = "".join(chr(ch) for ch in range(ord("ァ"), ord("ン") + 1))
|
347 |
+
_HIRAGANA = "".join(chr(ch) for ch in range(ord("ぁ"), ord("ん") + 1))
|
348 |
+
_HIRA2KATATRANS = str.maketrans(_HIRAGANA, _KATAKANA)
|
349 |
+
|
350 |
+
|
351 |
+
def hira2kata(text: str) -> str:
|
352 |
+
text = text.translate(_HIRA2KATATRANS)
|
353 |
+
return text.replace("う゛", "ヴ")
|
354 |
+
|
355 |
+
|
356 |
+
_SYMBOL_TOKENS = set(list("・、。?!"))
|
357 |
+
_NO_YOMI_TOKENS = set(list("「」『』―()[][]"))
|
358 |
+
_TAGGER = MeCab.Tagger()
|
359 |
+
|
360 |
+
|
361 |
+
def text2kata(text: str) -> str:
|
362 |
+
parsed = _TAGGER.parse(text)
|
363 |
+
res = []
|
364 |
+
for line in parsed.split("\n"):
|
365 |
+
if line == "EOS":
|
366 |
+
break
|
367 |
+
parts = line.split("\t")
|
368 |
+
|
369 |
+
word, yomi = parts[0], parts[1]
|
370 |
+
if yomi:
|
371 |
+
res.append(yomi)
|
372 |
+
else:
|
373 |
+
if word in _SYMBOL_TOKENS:
|
374 |
+
res.append(word)
|
375 |
+
elif word in ("っ", "ッ"):
|
376 |
+
res.append("ッ")
|
377 |
+
elif word in _NO_YOMI_TOKENS:
|
378 |
+
pass
|
379 |
+
else:
|
380 |
+
res.append(word)
|
381 |
+
return hira2kata("".join(res))
|
382 |
+
|
383 |
+
|
384 |
+
_ALPHASYMBOL_YOMI = {
|
385 |
+
"#": "シャープ",
|
386 |
+
"%": "パーセント",
|
387 |
+
"&": "アンド",
|
388 |
+
"+": "プラス",
|
389 |
+
"-": "マイナス",
|
390 |
+
":": "コロン",
|
391 |
+
";": "セミコロン",
|
392 |
+
"<": "小なり",
|
393 |
+
"=": "イコール",
|
394 |
+
">": "大なり",
|
395 |
+
"@": "アット",
|
396 |
+
"a": "エー",
|
397 |
+
"b": "ビー",
|
398 |
+
"c": "シー",
|
399 |
+
"d": "ディー",
|
400 |
+
"e": "イー",
|
401 |
+
"f": "エフ",
|
402 |
+
"g": "ジー",
|
403 |
+
"h": "エイチ",
|
404 |
+
"i": "アイ",
|
405 |
+
"j": "ジェー",
|
406 |
+
"k": "ケー",
|
407 |
+
"l": "エル",
|
408 |
+
"m": "エム",
|
409 |
+
"n": "エヌ",
|
410 |
+
"o": "オー",
|
411 |
+
"p": "ピー",
|
412 |
+
"q": "キュー",
|
413 |
+
"r": "アール",
|
414 |
+
"s": "エス",
|
415 |
+
"t": "ティー",
|
416 |
+
"u": "ユー",
|
417 |
+
"v": "ブイ",
|
418 |
+
"w": "ダブリュー",
|
419 |
+
"x": "エックス",
|
420 |
+
"y": "ワイ",
|
421 |
+
"z": "ゼット",
|
422 |
+
"α": "アルファ",
|
423 |
+
"β": "ベータ",
|
424 |
+
"γ": "ガンマ",
|
425 |
+
"δ": "デルタ",
|
426 |
+
"ε": "イプシロン",
|
427 |
+
"ζ": "ゼータ",
|
428 |
+
"η": "イータ",
|
429 |
+
"θ": "シータ",
|
430 |
+
"ι": "イオタ",
|
431 |
+
"κ": "カッパ",
|
432 |
+
"λ": "ラムダ",
|
433 |
+
"μ": "ミュー",
|
434 |
+
"ν": "ニュー",
|
435 |
+
"ξ": "クサイ",
|
436 |
+
"ο": "オミクロン",
|
437 |
+
"π": "パイ",
|
438 |
+
"ρ": "ロー",
|
439 |
+
"σ": "シグマ",
|
440 |
+
"τ": "タウ",
|
441 |
+
"υ": "ウプシロン",
|
442 |
+
"φ": "ファイ",
|
443 |
+
"χ": "カイ",
|
444 |
+
"ψ": "プサイ",
|
445 |
+
"ω": "オメガ",
|
446 |
+
}
|
447 |
+
|
448 |
+
|
449 |
+
_NUMBER_WITH_SEPARATOR_RX = re.compile("[0-9]{1,3}(,[0-9]{3})+")
|
450 |
+
_CURRENCY_MAP = {"$": "ドル", "¥": "円", "£": "ポンド", "€": "ユーロ"}
|
451 |
+
_CURRENCY_RX = re.compile(r"([$¥£€])([0-9.]*[0-9])")
|
452 |
+
_NUMBER_RX = re.compile(r"[0-9]+(\.[0-9]+)?")
|
453 |
+
|
454 |
+
|
455 |
+
def japanese_convert_numbers_to_words(text: str) -> str:
|
456 |
+
res = _NUMBER_WITH_SEPARATOR_RX.sub(lambda m: m[0].replace(",", ""), text)
|
457 |
+
res = _CURRENCY_RX.sub(lambda m: m[2] + _CURRENCY_MAP.get(m[1], m[1]), res)
|
458 |
+
res = _NUMBER_RX.sub(lambda m: num2words(m[0], lang="ja"), res)
|
459 |
+
return res
|
460 |
+
|
461 |
+
|
462 |
+
def japanese_convert_alpha_symbols_to_words(text: str) -> str:
|
463 |
+
return "".join([_ALPHASYMBOL_YOMI.get(ch, ch) for ch in text.lower()])
|
464 |
+
|
465 |
+
|
466 |
+
def japanese_text_to_phonemes(text: str) -> str:
|
467 |
+
"""Convert Japanese text to phonemes."""
|
468 |
+
res = unicodedata.normalize("NFKC", text)
|
469 |
+
res = japanese_convert_numbers_to_words(res)
|
470 |
+
# res = japanese_convert_alpha_symbols_to_words(res)
|
471 |
+
res = text2kata(res)
|
472 |
+
res = kata2phoneme(res)
|
473 |
+
return res
|
474 |
+
|
475 |
+
|
476 |
+
def is_japanese_character(char):
|
477 |
+
# 定义日语文字系统的 Unicode 范围
|
478 |
+
japanese_ranges = [
|
479 |
+
(0x3040, 0x309F), # 平假名
|
480 |
+
(0x30A0, 0x30FF), # 片假名
|
481 |
+
(0x4E00, 0x9FFF), # 汉字 (CJK Unified Ideographs)
|
482 |
+
(0x3400, 0x4DBF), # 汉字扩展 A
|
483 |
+
(0x20000, 0x2A6DF), # 汉字扩展 B
|
484 |
+
# 可以根据需要添加其他汉字扩展范围
|
485 |
+
]
|
486 |
+
|
487 |
+
# 将字符的 Unicode 编码转换为整数
|
488 |
+
char_code = ord(char)
|
489 |
+
|
490 |
+
# 检查字符是否在任何一个日语范围内
|
491 |
+
for start, end in japanese_ranges:
|
492 |
+
if start <= char_code <= end:
|
493 |
+
return True
|
494 |
+
|
495 |
+
return False
|
496 |
+
|
497 |
+
|
498 |
+
rep_map = {
|
499 |
+
":": ",",
|
500 |
+
";": ",",
|
501 |
+
",": ",",
|
502 |
+
"。": ".",
|
503 |
+
"!": "!",
|
504 |
+
"?": "?",
|
505 |
+
"\n": ".",
|
506 |
+
"·": ",",
|
507 |
+
"、": ",",
|
508 |
+
"...": "…",
|
509 |
+
}
|
510 |
+
|
511 |
+
|
512 |
+
def replace_punctuation(text):
|
513 |
+
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
|
514 |
+
|
515 |
+
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
|
516 |
+
|
517 |
+
replaced_text = re.sub(
|
518 |
+
r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF"
|
519 |
+
+ "".join(punctuation)
|
520 |
+
+ r"]+",
|
521 |
+
"",
|
522 |
+
replaced_text,
|
523 |
+
)
|
524 |
+
|
525 |
+
return replaced_text
|
526 |
+
|
527 |
+
|
528 |
+
def text_normalize(text):
|
529 |
+
res = unicodedata.normalize("NFKC", text)
|
530 |
+
res = japanese_convert_numbers_to_words(res)
|
531 |
+
# res = "".join([i for i in res if is_japanese_character(i)])
|
532 |
+
res = replace_punctuation(res)
|
533 |
+
return res
|
534 |
+
|
535 |
+
|
536 |
+
def distribute_phone(n_phone, n_word):
|
537 |
+
phones_per_word = [0] * n_word
|
538 |
+
for task in range(n_phone):
|
539 |
+
min_tasks = min(phones_per_word)
|
540 |
+
min_index = phones_per_word.index(min_tasks)
|
541 |
+
phones_per_word[min_index] += 1
|
542 |
+
return phones_per_word
|
543 |
+
|
544 |
+
|
545 |
+
tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3")
|
546 |
+
|
547 |
+
|
548 |
+
def g2p(norm_text):
|
549 |
+
tokenized = tokenizer.tokenize(norm_text)
|
550 |
+
phs = []
|
551 |
+
ph_groups = []
|
552 |
+
for t in tokenized:
|
553 |
+
if not t.startswith("#"):
|
554 |
+
ph_groups.append([t])
|
555 |
+
else:
|
556 |
+
ph_groups[-1].append(t.replace("#", ""))
|
557 |
+
word2ph = []
|
558 |
+
for group in ph_groups:
|
559 |
+
phonemes = kata2phoneme(text2kata("".join(group)))
|
560 |
+
# phonemes = [i for i in phonemes if i in symbols]
|
561 |
+
for i in phonemes:
|
562 |
+
assert i in symbols, (group, norm_text, tokenized)
|
563 |
+
phone_len = len(phonemes)
|
564 |
+
word_len = len(group)
|
565 |
+
|
566 |
+
aaa = distribute_phone(phone_len, word_len)
|
567 |
+
word2ph += aaa
|
568 |
+
|
569 |
+
phs += phonemes
|
570 |
+
phones = ["_"] + phs + ["_"]
|
571 |
+
tones = [0 for i in phones]
|
572 |
+
word2ph = [1] + word2ph + [1]
|
573 |
+
return phones, tones, word2ph
|
574 |
+
|
575 |
+
|
576 |
+
if __name__ == "__main__":
|
577 |
+
tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3")
|
578 |
+
text = "hello,こんにちは、世界!……"
|
579 |
+
from text.japanese_bert import get_bert_feature
|
580 |
+
|
581 |
+
text = text_normalize(text)
|
582 |
+
print(text)
|
583 |
+
phones, tones, word2ph = g2p(text)
|
584 |
+
bert = get_bert_feature(text, word2ph)
|
585 |
+
|
586 |
+
print(phones, tones, word2ph, bert.shape)
|
text/japanese_bert.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoTokenizer, AutoModelForMaskedLM
|
3 |
+
import sys
|
4 |
+
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3")
|
6 |
+
|
7 |
+
models = dict()
|
8 |
+
|
9 |
+
|
10 |
+
def get_bert_feature(text, word2ph, device=None):
|
11 |
+
if (
|
12 |
+
sys.platform == "darwin"
|
13 |
+
and torch.backends.mps.is_available()
|
14 |
+
and device == "cpu"
|
15 |
+
):
|
16 |
+
device = "mps"
|
17 |
+
if not device:
|
18 |
+
device = "cuda"
|
19 |
+
if device not in models.keys():
|
20 |
+
models[device] = AutoModelForMaskedLM.from_pretrained(
|
21 |
+
"./bert/bert-base-japanese-v3"
|
22 |
+
).to(device)
|
23 |
+
with torch.no_grad():
|
24 |
+
inputs = tokenizer(text, return_tensors="pt")
|
25 |
+
for i in inputs:
|
26 |
+
inputs[i] = inputs[i].to(device)
|
27 |
+
res = models[device](**inputs, output_hidden_states=True)
|
28 |
+
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()
|
29 |
+
assert inputs["input_ids"].shape[-1] == len(word2ph)
|
30 |
+
word2phone = word2ph
|
31 |
+
phone_level_feature = []
|
32 |
+
for i in range(len(word2phone)):
|
33 |
+
repeat_feature = res[i].repeat(word2phone[i], 1)
|
34 |
+
phone_level_feature.append(repeat_feature)
|
35 |
+
|
36 |
+
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
37 |
+
|
38 |
+
return phone_level_feature.T
|
text/opencpop-strict.txt
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
a AA a
|
2 |
+
ai AA ai
|
3 |
+
an AA an
|
4 |
+
ang AA ang
|
5 |
+
ao AA ao
|
6 |
+
ba b a
|
7 |
+
bai b ai
|
8 |
+
ban b an
|
9 |
+
bang b ang
|
10 |
+
bao b ao
|
11 |
+
bei b ei
|
12 |
+
ben b en
|
13 |
+
beng b eng
|
14 |
+
bi b i
|
15 |
+
bian b ian
|
16 |
+
biao b iao
|
17 |
+
bie b ie
|
18 |
+
bin b in
|
19 |
+
bing b ing
|
20 |
+
bo b o
|
21 |
+
bu b u
|
22 |
+
ca c a
|
23 |
+
cai c ai
|
24 |
+
can c an
|
25 |
+
cang c ang
|
26 |
+
cao c ao
|
27 |
+
ce c e
|
28 |
+
cei c ei
|
29 |
+
cen c en
|
30 |
+
ceng c eng
|
31 |
+
cha ch a
|
32 |
+
chai ch ai
|
33 |
+
chan ch an
|
34 |
+
chang ch ang
|
35 |
+
chao ch ao
|
36 |
+
che ch e
|
37 |
+
chen ch en
|
38 |
+
cheng ch eng
|
39 |
+
chi ch ir
|
40 |
+
chong ch ong
|
41 |
+
chou ch ou
|
42 |
+
chu ch u
|
43 |
+
chua ch ua
|
44 |
+
chuai ch uai
|
45 |
+
chuan ch uan
|
46 |
+
chuang ch uang
|
47 |
+
chui ch ui
|
48 |
+
chun ch un
|
49 |
+
chuo ch uo
|
50 |
+
ci c i0
|
51 |
+
cong c ong
|
52 |
+
cou c ou
|
53 |
+
cu c u
|
54 |
+
cuan c uan
|
55 |
+
cui c ui
|
56 |
+
cun c un
|
57 |
+
cuo c uo
|
58 |
+
da d a
|
59 |
+
dai d ai
|
60 |
+
dan d an
|
61 |
+
dang d ang
|
62 |
+
dao d ao
|
63 |
+
de d e
|
64 |
+
dei d ei
|
65 |
+
den d en
|
66 |
+
deng d eng
|
67 |
+
di d i
|
68 |
+
dia d ia
|
69 |
+
dian d ian
|
70 |
+
diao d iao
|
71 |
+
die d ie
|
72 |
+
ding d ing
|
73 |
+
diu d iu
|
74 |
+
dong d ong
|
75 |
+
dou d ou
|
76 |
+
du d u
|
77 |
+
duan d uan
|
78 |
+
dui d ui
|
79 |
+
dun d un
|
80 |
+
duo d uo
|
81 |
+
e EE e
|
82 |
+
ei EE ei
|
83 |
+
en EE en
|
84 |
+
eng EE eng
|
85 |
+
er EE er
|
86 |
+
fa f a
|
87 |
+
fan f an
|
88 |
+
fang f ang
|
89 |
+
fei f ei
|
90 |
+
fen f en
|
91 |
+
feng f eng
|
92 |
+
fo f o
|
93 |
+
fou f ou
|
94 |
+
fu f u
|
95 |
+
ga g a
|
96 |
+
gai g ai
|
97 |
+
gan g an
|
98 |
+
gang g ang
|
99 |
+
gao g ao
|
100 |
+
ge g e
|
101 |
+
gei g ei
|
102 |
+
gen g en
|
103 |
+
geng g eng
|
104 |
+
gong g ong
|
105 |
+
gou g ou
|
106 |
+
gu g u
|
107 |
+
gua g ua
|
108 |
+
guai g uai
|
109 |
+
guan g uan
|
110 |
+
guang g uang
|
111 |
+
gui g ui
|
112 |
+
gun g un
|
113 |
+
guo g uo
|
114 |
+
ha h a
|
115 |
+
hai h ai
|
116 |
+
han h an
|
117 |
+
hang h ang
|
118 |
+
hao h ao
|
119 |
+
he h e
|
120 |
+
hei h ei
|
121 |
+
hen h en
|
122 |
+
heng h eng
|
123 |
+
hong h ong
|
124 |
+
hou h ou
|
125 |
+
hu h u
|
126 |
+
hua h ua
|
127 |
+
huai h uai
|
128 |
+
huan h uan
|
129 |
+
huang h uang
|
130 |
+
hui h ui
|
131 |
+
hun h un
|
132 |
+
huo h uo
|
133 |
+
ji j i
|
134 |
+
jia j ia
|
135 |
+
jian j ian
|
136 |
+
jiang j iang
|
137 |
+
jiao j iao
|
138 |
+
jie j ie
|
139 |
+
jin j in
|
140 |
+
jing j ing
|
141 |
+
jiong j iong
|
142 |
+
jiu j iu
|
143 |
+
ju j v
|
144 |
+
jv j v
|
145 |
+
juan j van
|
146 |
+
jvan j van
|
147 |
+
jue j ve
|
148 |
+
jve j ve
|
149 |
+
jun j vn
|
150 |
+
jvn j vn
|
151 |
+
ka k a
|
152 |
+
kai k ai
|
153 |
+
kan k an
|
154 |
+
kang k ang
|
155 |
+
kao k ao
|
156 |
+
ke k e
|
157 |
+
kei k ei
|
158 |
+
ken k en
|
159 |
+
keng k eng
|
160 |
+
kong k ong
|
161 |
+
kou k ou
|
162 |
+
ku k u
|
163 |
+
kua k ua
|
164 |
+
kuai k uai
|
165 |
+
kuan k uan
|
166 |
+
kuang k uang
|
167 |
+
kui k ui
|
168 |
+
kun k un
|
169 |
+
kuo k uo
|
170 |
+
la l a
|
171 |
+
lai l ai
|
172 |
+
lan l an
|
173 |
+
lang l ang
|
174 |
+
lao l ao
|
175 |
+
le l e
|
176 |
+
lei l ei
|
177 |
+
leng l eng
|
178 |
+
li l i
|
179 |
+
lia l ia
|
180 |
+
lian l ian
|
181 |
+
liang l iang
|
182 |
+
liao l iao
|
183 |
+
lie l ie
|
184 |
+
lin l in
|
185 |
+
ling l ing
|
186 |
+
liu l iu
|
187 |
+
lo l o
|
188 |
+
long l ong
|
189 |
+
lou l ou
|
190 |
+
lu l u
|
191 |
+
luan l uan
|
192 |
+
lun l un
|
193 |
+
luo l uo
|
194 |
+
lv l v
|
195 |
+
lve l ve
|
196 |
+
ma m a
|
197 |
+
mai m ai
|
198 |
+
man m an
|
199 |
+
mang m ang
|
200 |
+
mao m ao
|
201 |
+
me m e
|
202 |
+
mei m ei
|
203 |
+
men m en
|
204 |
+
meng m eng
|
205 |
+
mi m i
|
206 |
+
mian m ian
|
207 |
+
miao m iao
|
208 |
+
mie m ie
|
209 |
+
min m in
|
210 |
+
ming m ing
|
211 |
+
miu m iu
|
212 |
+
mo m o
|
213 |
+
mou m ou
|
214 |
+
mu m u
|
215 |
+
na n a
|
216 |
+
nai n ai
|
217 |
+
nan n an
|
218 |
+
nang n ang
|
219 |
+
nao n ao
|
220 |
+
ne n e
|
221 |
+
nei n ei
|
222 |
+
nen n en
|
223 |
+
neng n eng
|
224 |
+
ni n i
|
225 |
+
nian n ian
|
226 |
+
niang n iang
|
227 |
+
niao n iao
|
228 |
+
nie n ie
|
229 |
+
nin n in
|
230 |
+
ning n ing
|
231 |
+
niu n iu
|
232 |
+
nong n ong
|
233 |
+
nou n ou
|
234 |
+
nu n u
|
235 |
+
nuan n uan
|
236 |
+
nun n un
|
237 |
+
nuo n uo
|
238 |
+
nv n v
|
239 |
+
nve n ve
|
240 |
+
o OO o
|
241 |
+
ou OO ou
|
242 |
+
pa p a
|
243 |
+
pai p ai
|
244 |
+
pan p an
|
245 |
+
pang p ang
|
246 |
+
pao p ao
|
247 |
+
pei p ei
|
248 |
+
pen p en
|
249 |
+
peng p eng
|
250 |
+
pi p i
|
251 |
+
pian p ian
|
252 |
+
piao p iao
|
253 |
+
pie p ie
|
254 |
+
pin p in
|
255 |
+
ping p ing
|
256 |
+
po p o
|
257 |
+
pou p ou
|
258 |
+
pu p u
|
259 |
+
qi q i
|
260 |
+
qia q ia
|
261 |
+
qian q ian
|
262 |
+
qiang q iang
|
263 |
+
qiao q iao
|
264 |
+
qie q ie
|
265 |
+
qin q in
|
266 |
+
qing q ing
|
267 |
+
qiong q iong
|
268 |
+
qiu q iu
|
269 |
+
qu q v
|
270 |
+
qv q v
|
271 |
+
quan q van
|
272 |
+
qvan q van
|
273 |
+
que q ve
|
274 |
+
qve q ve
|
275 |
+
qun q vn
|
276 |
+
qvn q vn
|
277 |
+
ran r an
|
278 |
+
rang r ang
|
279 |
+
rao r ao
|
280 |
+
re r e
|
281 |
+
ren r en
|
282 |
+
reng r eng
|
283 |
+
ri r ir
|
284 |
+
rong r ong
|
285 |
+
rou r ou
|
286 |
+
ru r u
|
287 |
+
rua r ua
|
288 |
+
ruan r uan
|
289 |
+
rui r ui
|
290 |
+
run r un
|
291 |
+
ruo r uo
|
292 |
+
sa s a
|
293 |
+
sai s ai
|
294 |
+
san s an
|
295 |
+
sang s ang
|
296 |
+
sao s ao
|
297 |
+
se s e
|
298 |
+
sen s en
|
299 |
+
seng s eng
|
300 |
+
sha sh a
|
301 |
+
shai sh ai
|
302 |
+
shan sh an
|
303 |
+
shang sh ang
|
304 |
+
shao sh ao
|
305 |
+
she sh e
|
306 |
+
shei sh ei
|
307 |
+
shen sh en
|
308 |
+
sheng sh eng
|
309 |
+
shi sh ir
|
310 |
+
shou sh ou
|
311 |
+
shu sh u
|
312 |
+
shua sh ua
|
313 |
+
shuai sh uai
|
314 |
+
shuan sh uan
|
315 |
+
shuang sh uang
|
316 |
+
shui sh ui
|
317 |
+
shun sh un
|
318 |
+
shuo sh uo
|
319 |
+
si s i0
|
320 |
+
song s ong
|
321 |
+
sou s ou
|
322 |
+
su s u
|
323 |
+
suan s uan
|
324 |
+
sui s ui
|
325 |
+
sun s un
|
326 |
+
suo s uo
|
327 |
+
ta t a
|
328 |
+
tai t ai
|
329 |
+
tan t an
|
330 |
+
tang t ang
|
331 |
+
tao t ao
|
332 |
+
te t e
|
333 |
+
tei t ei
|
334 |
+
teng t eng
|
335 |
+
ti t i
|
336 |
+
tian t ian
|
337 |
+
tiao t iao
|
338 |
+
tie t ie
|
339 |
+
ting t ing
|
340 |
+
tong t ong
|
341 |
+
tou t ou
|
342 |
+
tu t u
|
343 |
+
tuan t uan
|
344 |
+
tui t ui
|
345 |
+
tun t un
|
346 |
+
tuo t uo
|
347 |
+
wa w a
|
348 |
+
wai w ai
|
349 |
+
wan w an
|
350 |
+
wang w ang
|
351 |
+
wei w ei
|
352 |
+
wen w en
|
353 |
+
weng w eng
|
354 |
+
wo w o
|
355 |
+
wu w u
|
356 |
+
xi x i
|
357 |
+
xia x ia
|
358 |
+
xian x ian
|
359 |
+
xiang x iang
|
360 |
+
xiao x iao
|
361 |
+
xie x ie
|
362 |
+
xin x in
|
363 |
+
xing x ing
|
364 |
+
xiong x iong
|
365 |
+
xiu x iu
|
366 |
+
xu x v
|
367 |
+
xv x v
|
368 |
+
xuan x van
|
369 |
+
xvan x van
|
370 |
+
xue x ve
|
371 |
+
xve x ve
|
372 |
+
xun x vn
|
373 |
+
xvn x vn
|
374 |
+
ya y a
|
375 |
+
yan y En
|
376 |
+
yang y ang
|
377 |
+
yao y ao
|
378 |
+
ye y E
|
379 |
+
yi y i
|
380 |
+
yin y in
|
381 |
+
ying y ing
|
382 |
+
yo y o
|
383 |
+
yong y ong
|
384 |
+
you y ou
|
385 |
+
yu y v
|
386 |
+
yv y v
|
387 |
+
yuan y van
|
388 |
+
yvan y van
|
389 |
+
yue y ve
|
390 |
+
yve y ve
|
391 |
+
yun y vn
|
392 |
+
yvn y vn
|
393 |
+
za z a
|
394 |
+
zai z ai
|
395 |
+
zan z an
|
396 |
+
zang z ang
|
397 |
+
zao z ao
|
398 |
+
ze z e
|
399 |
+
zei z ei
|
400 |
+
zen z en
|
401 |
+
zeng z eng
|
402 |
+
zha zh a
|
403 |
+
zhai zh ai
|
404 |
+
zhan zh an
|
405 |
+
zhang zh ang
|
406 |
+
zhao zh ao
|
407 |
+
zhe zh e
|
408 |
+
zhei zh ei
|
409 |
+
zhen zh en
|
410 |
+
zheng zh eng
|
411 |
+
zhi zh ir
|
412 |
+
zhong zh ong
|
413 |
+
zhou zh ou
|
414 |
+
zhu zh u
|
415 |
+
zhua zh ua
|
416 |
+
zhuai zh uai
|
417 |
+
zhuan zh uan
|
418 |
+
zhuang zh uang
|
419 |
+
zhui zh ui
|
420 |
+
zhun zh un
|
421 |
+
zhuo zh uo
|
422 |
+
zi z i0
|
423 |
+
zong z ong
|
424 |
+
zou z ou
|
425 |
+
zu z u
|
426 |
+
zuan z uan
|
427 |
+
zui z ui
|
428 |
+
zun z un
|
429 |
+
zuo z uo
|
text/symbols.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
punctuation = ["!", "?", "…", ",", ".", "'", "-"]
|
2 |
+
pu_symbols = punctuation + ["SP", "UNK"]
|
3 |
+
pad = "_"
|
4 |
+
|
5 |
+
# chinese
|
6 |
+
zh_symbols = [
|
7 |
+
"E",
|
8 |
+
"En",
|
9 |
+
"a",
|
10 |
+
"ai",
|
11 |
+
"an",
|
12 |
+
"ang",
|
13 |
+
"ao",
|
14 |
+
"b",
|
15 |
+
"c",
|
16 |
+
"ch",
|
17 |
+
"d",
|
18 |
+
"e",
|
19 |
+
"ei",
|
20 |
+
"en",
|
21 |
+
"eng",
|
22 |
+
"er",
|
23 |
+
"f",
|
24 |
+
"g",
|
25 |
+
"h",
|
26 |
+
"i",
|
27 |
+
"i0",
|
28 |
+
"ia",
|
29 |
+
"ian",
|
30 |
+
"iang",
|
31 |
+
"iao",
|
32 |
+
"ie",
|
33 |
+
"in",
|
34 |
+
"ing",
|
35 |
+
"iong",
|
36 |
+
"ir",
|
37 |
+
"iu",
|
38 |
+
"j",
|
39 |
+
"k",
|
40 |
+
"l",
|
41 |
+
"m",
|
42 |
+
"n",
|
43 |
+
"o",
|
44 |
+
"ong",
|
45 |
+
"ou",
|
46 |
+
"p",
|
47 |
+
"q",
|
48 |
+
"r",
|
49 |
+
"s",
|
50 |
+
"sh",
|
51 |
+
"t",
|
52 |
+
"u",
|
53 |
+
"ua",
|
54 |
+
"uai",
|
55 |
+
"uan",
|
56 |
+
"uang",
|
57 |
+
"ui",
|
58 |
+
"un",
|
59 |
+
"uo",
|
60 |
+
"v",
|
61 |
+
"van",
|
62 |
+
"ve",
|
63 |
+
"vn",
|
64 |
+
"w",
|
65 |
+
"x",
|
66 |
+
"y",
|
67 |
+
"z",
|
68 |
+
"zh",
|
69 |
+
"AA",
|
70 |
+
"EE",
|
71 |
+
"OO",
|
72 |
+
]
|
73 |
+
num_zh_tones = 6
|
74 |
+
|
75 |
+
# japanese
|
76 |
+
ja_symbols = [
|
77 |
+
"N",
|
78 |
+
"a",
|
79 |
+
"a:",
|
80 |
+
"b",
|
81 |
+
"by",
|
82 |
+
"ch",
|
83 |
+
"d",
|
84 |
+
"dy",
|
85 |
+
"e",
|
86 |
+
"e:",
|
87 |
+
"f",
|
88 |
+
"g",
|
89 |
+
"gy",
|
90 |
+
"h",
|
91 |
+
"hy",
|
92 |
+
"i",
|
93 |
+
"i:",
|
94 |
+
"j",
|
95 |
+
"k",
|
96 |
+
"ky",
|
97 |
+
"m",
|
98 |
+
"my",
|
99 |
+
"n",
|
100 |
+
"ny",
|
101 |
+
"o",
|
102 |
+
"o:",
|
103 |
+
"p",
|
104 |
+
"py",
|
105 |
+
"q",
|
106 |
+
"r",
|
107 |
+
"ry",
|
108 |
+
"s",
|
109 |
+
"sh",
|
110 |
+
"t",
|
111 |
+
"ts",
|
112 |
+
"ty",
|
113 |
+
"u",
|
114 |
+
"u:",
|
115 |
+
"w",
|
116 |
+
"y",
|
117 |
+
"z",
|
118 |
+
"zy",
|
119 |
+
]
|
120 |
+
num_ja_tones = 1
|
121 |
+
|
122 |
+
# English
|
123 |
+
en_symbols = [
|
124 |
+
"aa",
|
125 |
+
"ae",
|
126 |
+
"ah",
|
127 |
+
"ao",
|
128 |
+
"aw",
|
129 |
+
"ay",
|
130 |
+
"b",
|
131 |
+
"ch",
|
132 |
+
"d",
|
133 |
+
"dh",
|
134 |
+
"eh",
|
135 |
+
"er",
|
136 |
+
"ey",
|
137 |
+
"f",
|
138 |
+
"g",
|
139 |
+
"hh",
|
140 |
+
"ih",
|
141 |
+
"iy",
|
142 |
+
"jh",
|
143 |
+
"k",
|
144 |
+
"l",
|
145 |
+
"m",
|
146 |
+
"n",
|
147 |
+
"ng",
|
148 |
+
"ow",
|
149 |
+
"oy",
|
150 |
+
"p",
|
151 |
+
"r",
|
152 |
+
"s",
|
153 |
+
"sh",
|
154 |
+
"t",
|
155 |
+
"th",
|
156 |
+
"uh",
|
157 |
+
"uw",
|
158 |
+
"V",
|
159 |
+
"w",
|
160 |
+
"y",
|
161 |
+
"z",
|
162 |
+
"zh",
|
163 |
+
]
|
164 |
+
num_en_tones = 4
|
165 |
+
|
166 |
+
# combine all symbols
|
167 |
+
normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols))
|
168 |
+
symbols = [pad] + normal_symbols + pu_symbols
|
169 |
+
sil_phonemes_ids = [symbols.index(i) for i in pu_symbols]
|
170 |
+
|
171 |
+
# combine all tones
|
172 |
+
num_tones = num_zh_tones + num_ja_tones + num_en_tones
|
173 |
+
|
174 |
+
# language maps
|
175 |
+
language_id_map = {"ZH": 0, "JP": 1, "EN": 2}
|
176 |
+
num_languages = len(language_id_map.keys())
|
177 |
+
|
178 |
+
language_tone_start_map = {
|
179 |
+
"ZH": 0,
|
180 |
+
"JP": num_zh_tones,
|
181 |
+
"EN": num_zh_tones + num_ja_tones,
|
182 |
+
}
|
183 |
+
|
184 |
+
if __name__ == "__main__":
|
185 |
+
a = set(zh_symbols)
|
186 |
+
b = set(en_symbols)
|
187 |
+
print(sorted(a & b))
|
text/tone_sandhi.py
ADDED
@@ -0,0 +1,769 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import List
|
15 |
+
from typing import Tuple
|
16 |
+
|
17 |
+
import jieba
|
18 |
+
from pypinyin import lazy_pinyin
|
19 |
+
from pypinyin import Style
|
20 |
+
|
21 |
+
|
22 |
+
class ToneSandhi:
|
23 |
+
def __init__(self):
|
24 |
+
self.must_neural_tone_words = {
|
25 |
+
"麻烦",
|
26 |
+
"麻利",
|
27 |
+
"鸳鸯",
|
28 |
+
"高粱",
|
29 |
+
"骨头",
|
30 |
+
"骆驼",
|
31 |
+
"马虎",
|
32 |
+
"首饰",
|
33 |
+
"馒头",
|
34 |
+
"馄饨",
|
35 |
+
"风筝",
|
36 |
+
"难为",
|
37 |
+
"队伍",
|
38 |
+
"阔气",
|
39 |
+
"闺女",
|
40 |
+
"门道",
|
41 |
+
"锄头",
|
42 |
+
"铺盖",
|
43 |
+
"铃铛",
|
44 |
+
"铁匠",
|
45 |
+
"钥匙",
|
46 |
+
"里脊",
|
47 |
+
"里头",
|
48 |
+
"部分",
|
49 |
+
"那么",
|
50 |
+
"道士",
|
51 |
+
"造化",
|
52 |
+
"迷糊",
|
53 |
+
"连累",
|
54 |
+
"这么",
|
55 |
+
"这个",
|
56 |
+
"运气",
|
57 |
+
"过去",
|
58 |
+
"软和",
|
59 |
+
"转悠",
|
60 |
+
"踏实",
|
61 |
+
"跳蚤",
|
62 |
+
"跟头",
|
63 |
+
"趔趄",
|
64 |
+
"财主",
|
65 |
+
"豆腐",
|
66 |
+
"讲究",
|
67 |
+
"记性",
|
68 |
+
"记号",
|
69 |
+
"认识",
|
70 |
+
"规矩",
|
71 |
+
"见识",
|
72 |
+
"裁缝",
|
73 |
+
"补丁",
|
74 |
+
"衣裳",
|
75 |
+
"衣服",
|
76 |
+
"衙门",
|
77 |
+
"街坊",
|
78 |
+
"行李",
|
79 |
+
"行当",
|
80 |
+
"蛤蟆",
|
81 |
+
"蘑菇",
|
82 |
+
"薄荷",
|
83 |
+
"葫芦",
|
84 |
+
"葡萄",
|
85 |
+
"萝卜",
|
86 |
+
"荸荠",
|
87 |
+
"苗条",
|
88 |
+
"苗头",
|
89 |
+
"苍蝇",
|
90 |
+
"芝麻",
|
91 |
+
"舒服",
|
92 |
+
"舒坦",
|
93 |
+
"舌头",
|
94 |
+
"自在",
|
95 |
+
"膏药",
|
96 |
+
"脾气",
|
97 |
+
"脑袋",
|
98 |
+
"脊梁",
|
99 |
+
"能耐",
|
100 |
+
"胳膊",
|
101 |
+
"胭脂",
|
102 |
+
"胡萝",
|
103 |
+
"胡琴",
|
104 |
+
"胡同",
|
105 |
+
"聪明",
|
106 |
+
"耽误",
|
107 |
+
"耽搁",
|
108 |
+
"耷拉",
|
109 |
+
"耳朵",
|
110 |
+
"老爷",
|
111 |
+
"老实",
|
112 |
+
"老婆",
|
113 |
+
"老头",
|
114 |
+
"老太",
|
115 |
+
"翻腾",
|
116 |
+
"罗嗦",
|
117 |
+
"罐头",
|
118 |
+
"编辑",
|
119 |
+
"结实",
|
120 |
+
"红火",
|
121 |
+
"累赘",
|
122 |
+
"糨糊",
|
123 |
+
"糊涂",
|
124 |
+
"精神",
|
125 |
+
"粮食",
|
126 |
+
"簸箕",
|
127 |
+
"篱笆",
|
128 |
+
"算计",
|
129 |
+
"算盘",
|
130 |
+
"答应",
|
131 |
+
"笤帚",
|
132 |
+
"笑语",
|
133 |
+
"笑话",
|
134 |
+
"窟窿",
|
135 |
+
"窝囊",
|
136 |
+
"窗户",
|
137 |
+
"稳当",
|
138 |
+
"稀罕",
|
139 |
+
"称呼",
|
140 |
+
"秧歌",
|
141 |
+
"秀气",
|
142 |
+
"秀才",
|
143 |
+
"福气",
|
144 |
+
"祖宗",
|
145 |
+
"砚台",
|
146 |
+
"码头",
|
147 |
+
"石榴",
|
148 |
+
"石头",
|
149 |
+
"石匠",
|
150 |
+
"知识",
|
151 |
+
"眼睛",
|
152 |
+
"眯缝",
|
153 |
+
"眨巴",
|
154 |
+
"眉毛",
|
155 |
+
"相声",
|
156 |
+
"盘算",
|
157 |
+
"白净",
|
158 |
+
"痢疾",
|
159 |
+
"痛快",
|
160 |
+
"疟疾",
|
161 |
+
"疙瘩",
|
162 |
+
"疏忽",
|
163 |
+
"畜生",
|
164 |
+
"生意",
|
165 |
+
"甘蔗",
|
166 |
+
"琵琶",
|
167 |
+
"琢磨",
|
168 |
+
"琉璃",
|
169 |
+
"玻璃",
|
170 |
+
"玫瑰",
|
171 |
+
"玄乎",
|
172 |
+
"狐狸",
|
173 |
+
"状元",
|
174 |
+
"特务",
|
175 |
+
"牲口",
|
176 |
+
"牙碜",
|
177 |
+
"牌楼",
|
178 |
+
"爽快",
|
179 |
+
"爱人",
|
180 |
+
"热闹",
|
181 |
+
"烧饼",
|
182 |
+
"烟筒",
|
183 |
+
"烂糊",
|
184 |
+
"点心",
|
185 |
+
"炊帚",
|
186 |
+
"灯笼",
|
187 |
+
"火候",
|
188 |
+
"漂亮",
|
189 |
+
"滑溜",
|
190 |
+
"溜达",
|
191 |
+
"温和",
|
192 |
+
"清楚",
|
193 |
+
"消息",
|
194 |
+
"浪头",
|
195 |
+
"活泼",
|
196 |
+
"比方",
|
197 |
+
"正经",
|
198 |
+
"欺负",
|
199 |
+
"模糊",
|
200 |
+
"槟榔",
|
201 |
+
"棺材",
|
202 |
+
"棒槌",
|
203 |
+
"棉花",
|
204 |
+
"核桃",
|
205 |
+
"栅栏",
|
206 |
+
"柴火",
|
207 |
+
"架势",
|
208 |
+
"枕头",
|
209 |
+
"枇杷",
|
210 |
+
"机灵",
|
211 |
+
"本事",
|
212 |
+
"木头",
|
213 |
+
"木匠",
|
214 |
+
"朋友",
|
215 |
+
"月饼",
|
216 |
+
"月亮",
|
217 |
+
"暖和",
|
218 |
+
"明白",
|
219 |
+
"时候",
|
220 |
+
"新鲜",
|
221 |
+
"故事",
|
222 |
+
"收拾",
|
223 |
+
"收成",
|
224 |
+
"提防",
|
225 |
+
"挖苦",
|
226 |
+
"挑剔",
|
227 |
+
"指甲",
|
228 |
+
"指头",
|
229 |
+
"拾掇",
|
230 |
+
"拳头",
|
231 |
+
"拨弄",
|
232 |
+
"招牌",
|
233 |
+
"招呼",
|
234 |
+
"抬举",
|
235 |
+
"护士",
|
236 |
+
"折腾",
|
237 |
+
"扫帚",
|
238 |
+
"打量",
|
239 |
+
"打算",
|
240 |
+
"打点",
|
241 |
+
"打扮",
|
242 |
+
"打听",
|
243 |
+
"打发",
|
244 |
+
"扎实",
|
245 |
+
"扁担",
|
246 |
+
"戒指",
|
247 |
+
"懒得",
|
248 |
+
"意识",
|
249 |
+
"意思",
|
250 |
+
"情形",
|
251 |
+
"悟性",
|
252 |
+
"怪物",
|
253 |
+
"思量",
|
254 |
+
"怎么",
|
255 |
+
"念头",
|
256 |
+
"念叨",
|
257 |
+
"快活",
|
258 |
+
"忙活",
|
259 |
+
"志气",
|
260 |
+
"心思",
|
261 |
+
"得罪",
|
262 |
+
"张罗",
|
263 |
+
"弟兄",
|
264 |
+
"开通",
|
265 |
+
"应酬",
|
266 |
+
"庄稼",
|
267 |
+
"干事",
|
268 |
+
"帮手",
|
269 |
+
"帐篷",
|
270 |
+
"希罕",
|
271 |
+
"师父",
|
272 |
+
"师傅",
|
273 |
+
"巴结",
|
274 |
+
"巴掌",
|
275 |
+
"差事",
|
276 |
+
"工夫",
|
277 |
+
"岁数",
|
278 |
+
"屁股",
|
279 |
+
"尾巴",
|
280 |
+
"少爷",
|
281 |
+
"小气",
|
282 |
+
"小伙",
|
283 |
+
"将就",
|
284 |
+
"对头",
|
285 |
+
"对付",
|
286 |
+
"寡妇",
|
287 |
+
"家伙",
|
288 |
+
"客气",
|
289 |
+
"实在",
|
290 |
+
"官司",
|
291 |
+
"学问",
|
292 |
+
"学生",
|
293 |
+
"字号",
|
294 |
+
"嫁妆",
|
295 |
+
"媳妇",
|
296 |
+
"媒人",
|
297 |
+
"婆家",
|
298 |
+
"娘家",
|
299 |
+
"委屈",
|
300 |
+
"姑娘",
|
301 |
+
"姐夫",
|
302 |
+
"妯娌",
|
303 |
+
"妥当",
|
304 |
+
"妖精",
|
305 |
+
"奴才",
|
306 |
+
"女婿",
|
307 |
+
"头发",
|
308 |
+
"太阳",
|
309 |
+
"大爷",
|
310 |
+
"大方",
|
311 |
+
"大意",
|
312 |
+
"大夫",
|
313 |
+
"多少",
|
314 |
+
"多么",
|
315 |
+
"外甥",
|
316 |
+
"壮实",
|
317 |
+
"地道",
|
318 |
+
"地方",
|
319 |
+
"在乎",
|
320 |
+
"困难",
|
321 |
+
"嘴巴",
|
322 |
+
"嘱咐",
|
323 |
+
"嘟囔",
|
324 |
+
"嘀咕",
|
325 |
+
"喜欢",
|
326 |
+
"喇嘛",
|
327 |
+
"喇叭",
|
328 |
+
"商量",
|
329 |
+
"唾沫",
|
330 |
+
"哑巴",
|
331 |
+
"哈欠",
|
332 |
+
"哆嗦",
|
333 |
+
"咳嗽",
|
334 |
+
"和尚",
|
335 |
+
"告诉",
|
336 |
+
"告示",
|
337 |
+
"含糊",
|
338 |
+
"吓唬",
|
339 |
+
"后头",
|
340 |
+
"名字",
|
341 |
+
"名堂",
|
342 |
+
"合同",
|
343 |
+
"吆喝",
|
344 |
+
"叫唤",
|
345 |
+
"口袋",
|
346 |
+
"厚道",
|
347 |
+
"厉害",
|
348 |
+
"千斤",
|
349 |
+
"包袱",
|
350 |
+
"包涵",
|
351 |
+
"匀称",
|
352 |
+
"勤快",
|
353 |
+
"动静",
|
354 |
+
"动弹",
|
355 |
+
"功夫",
|
356 |
+
"力气",
|
357 |
+
"前头",
|
358 |
+
"刺猬",
|
359 |
+
"刺激",
|
360 |
+
"别扭",
|
361 |
+
"利落",
|
362 |
+
"利索",
|
363 |
+
"利害",
|
364 |
+
"分析",
|
365 |
+
"出息",
|
366 |
+
"凑合",
|
367 |
+
"凉快",
|
368 |
+
"冷战",
|
369 |
+
"冤枉",
|
370 |
+
"冒失",
|
371 |
+
"养活",
|
372 |
+
"关系",
|
373 |
+
"先生",
|
374 |
+
"兄弟",
|
375 |
+
"便宜",
|
376 |
+
"使唤",
|
377 |
+
"佩服",
|
378 |
+
"作坊",
|
379 |
+
"体面",
|
380 |
+
"位置",
|
381 |
+
"似的",
|
382 |
+
"伙计",
|
383 |
+
"休息",
|
384 |
+
"什么",
|
385 |
+
"人家",
|
386 |
+
"亲戚",
|
387 |
+
"亲家",
|
388 |
+
"交情",
|
389 |
+
"云彩",
|
390 |
+
"事情",
|
391 |
+
"买卖",
|
392 |
+
"主意",
|
393 |
+
"丫头",
|
394 |
+
"丧气",
|
395 |
+
"两口",
|
396 |
+
"东西",
|
397 |
+
"东家",
|
398 |
+
"世故",
|
399 |
+
"不由",
|
400 |
+
"不在",
|
401 |
+
"下水",
|
402 |
+
"下巴",
|
403 |
+
"上头",
|
404 |
+
"上司",
|
405 |
+
"丈夫",
|
406 |
+
"丈人",
|
407 |
+
"一辈",
|
408 |
+
"那个",
|
409 |
+
"菩萨",
|
410 |
+
"父亲",
|
411 |
+
"母亲",
|
412 |
+
"咕噜",
|
413 |
+
"邋遢",
|
414 |
+
"费用",
|
415 |
+
"冤家",
|
416 |
+
"甜头",
|
417 |
+
"介绍",
|
418 |
+
"荒唐",
|
419 |
+
"大人",
|
420 |
+
"泥鳅",
|
421 |
+
"幸福",
|
422 |
+
"熟悉",
|
423 |
+
"计划",
|
424 |
+
"扑腾",
|
425 |
+
"蜡烛",
|
426 |
+
"姥爷",
|
427 |
+
"照顾",
|
428 |
+
"喉咙",
|
429 |
+
"吉他",
|
430 |
+
"弄堂",
|
431 |
+
"蚂蚱",
|
432 |
+
"凤凰",
|
433 |
+
"拖沓",
|
434 |
+
"寒碜",
|
435 |
+
"糟蹋",
|
436 |
+
"倒腾",
|
437 |
+
"报复",
|
438 |
+
"逻辑",
|
439 |
+
"盘缠",
|
440 |
+
"喽啰",
|
441 |
+
"牢骚",
|
442 |
+
"咖喱",
|
443 |
+
"扫把",
|
444 |
+
"惦记",
|
445 |
+
}
|
446 |
+
self.must_not_neural_tone_words = {
|
447 |
+
"男子",
|
448 |
+
"女子",
|
449 |
+
"分子",
|
450 |
+
"原子",
|
451 |
+
"量子",
|
452 |
+
"莲子",
|
453 |
+
"石子",
|
454 |
+
"瓜子",
|
455 |
+
"电子",
|
456 |
+
"人人",
|
457 |
+
"虎虎",
|
458 |
+
}
|
459 |
+
self.punc = ":,;。?!“”‘’':,;.?!"
|
460 |
+
|
461 |
+
# the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041
|
462 |
+
# e.g.
|
463 |
+
# word: "家里"
|
464 |
+
# pos: "s"
|
465 |
+
# finals: ['ia1', 'i3']
|
466 |
+
def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:
|
467 |
+
# reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺
|
468 |
+
for j, item in enumerate(word):
|
469 |
+
if (
|
470 |
+
j - 1 >= 0
|
471 |
+
and item == word[j - 1]
|
472 |
+
and pos[0] in {"n", "v", "a"}
|
473 |
+
and word not in self.must_not_neural_tone_words
|
474 |
+
):
|
475 |
+
finals[j] = finals[j][:-1] + "5"
|
476 |
+
ge_idx = word.find("个")
|
477 |
+
if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶":
|
478 |
+
finals[-1] = finals[-1][:-1] + "5"
|
479 |
+
elif len(word) >= 1 and word[-1] in "的地得":
|
480 |
+
finals[-1] = finals[-1][:-1] + "5"
|
481 |
+
# e.g. 走了, 看着, 去过
|
482 |
+
# elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}:
|
483 |
+
# finals[-1] = finals[-1][:-1] + "5"
|
484 |
+
elif (
|
485 |
+
len(word) > 1
|
486 |
+
and word[-1] in "们子"
|
487 |
+
and pos in {"r", "n"}
|
488 |
+
and word not in self.must_not_neural_tone_words
|
489 |
+
):
|
490 |
+
finals[-1] = finals[-1][:-1] + "5"
|
491 |
+
# e.g. 桌上, 地下, 家里
|
492 |
+
elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}:
|
493 |
+
finals[-1] = finals[-1][:-1] + "5"
|
494 |
+
# e.g. 上来, 下去
|
495 |
+
elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开":
|
496 |
+
finals[-1] = finals[-1][:-1] + "5"
|
497 |
+
# 个做量词
|
498 |
+
elif (
|
499 |
+
ge_idx >= 1
|
500 |
+
and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in "几有两半多各整每做是")
|
501 |
+
) or word == "个":
|
502 |
+
finals[ge_idx] = finals[ge_idx][:-1] + "5"
|
503 |
+
else:
|
504 |
+
if (
|
505 |
+
word in self.must_neural_tone_words
|
506 |
+
or word[-2:] in self.must_neural_tone_words
|
507 |
+
):
|
508 |
+
finals[-1] = finals[-1][:-1] + "5"
|
509 |
+
|
510 |
+
word_list = self._split_word(word)
|
511 |
+
finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]
|
512 |
+
for i, word in enumerate(word_list):
|
513 |
+
# conventional neural in Chinese
|
514 |
+
if (
|
515 |
+
word in self.must_neural_tone_words
|
516 |
+
or word[-2:] in self.must_neural_tone_words
|
517 |
+
):
|
518 |
+
finals_list[i][-1] = finals_list[i][-1][:-1] + "5"
|
519 |
+
finals = sum(finals_list, [])
|
520 |
+
return finals
|
521 |
+
|
522 |
+
def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:
|
523 |
+
# e.g. 看不懂
|
524 |
+
if len(word) == 3 and word[1] == "不":
|
525 |
+
finals[1] = finals[1][:-1] + "5"
|
526 |
+
else:
|
527 |
+
for i, char in enumerate(word):
|
528 |
+
# "不" before tone4 should be bu2, e.g. 不怕
|
529 |
+
if char == "不" and i + 1 < len(word) and finals[i + 1][-1] == "4":
|
530 |
+
finals[i] = finals[i][:-1] + "2"
|
531 |
+
return finals
|
532 |
+
|
533 |
+
def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:
|
534 |
+
# "一" in number sequences, e.g. 一零零, 二一零
|
535 |
+
if word.find("一") != -1 and all(
|
536 |
+
[item.isnumeric() for item in word if item != "一"]
|
537 |
+
):
|
538 |
+
return finals
|
539 |
+
# "一" between reduplication words should be yi5, e.g. 看一看
|
540 |
+
elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]:
|
541 |
+
finals[1] = finals[1][:-1] + "5"
|
542 |
+
# when "一" is ordinal word, it should be yi1
|
543 |
+
elif word.startswith("第一"):
|
544 |
+
finals[1] = finals[1][:-1] + "1"
|
545 |
+
else:
|
546 |
+
for i, char in enumerate(word):
|
547 |
+
if char == "一" and i + 1 < len(word):
|
548 |
+
# "一" before tone4 should be yi2, e.g. 一段
|
549 |
+
if finals[i + 1][-1] == "4":
|
550 |
+
finals[i] = finals[i][:-1] + "2"
|
551 |
+
# "一" before non-tone4 should be yi4, e.g. 一天
|
552 |
+
else:
|
553 |
+
# "一" 后面如果是标点,还读一声
|
554 |
+
if word[i + 1] not in self.punc:
|
555 |
+
finals[i] = finals[i][:-1] + "4"
|
556 |
+
return finals
|
557 |
+
|
558 |
+
def _split_word(self, word: str) -> List[str]:
|
559 |
+
word_list = jieba.cut_for_search(word)
|
560 |
+
word_list = sorted(word_list, key=lambda i: len(i), reverse=False)
|
561 |
+
first_subword = word_list[0]
|
562 |
+
first_begin_idx = word.find(first_subword)
|
563 |
+
if first_begin_idx == 0:
|
564 |
+
second_subword = word[len(first_subword) :]
|
565 |
+
new_word_list = [first_subword, second_subword]
|
566 |
+
else:
|
567 |
+
second_subword = word[: -len(first_subword)]
|
568 |
+
new_word_list = [second_subword, first_subword]
|
569 |
+
return new_word_list
|
570 |
+
|
571 |
+
def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:
|
572 |
+
if len(word) == 2 and self._all_tone_three(finals):
|
573 |
+
finals[0] = finals[0][:-1] + "2"
|
574 |
+
elif len(word) == 3:
|
575 |
+
word_list = self._split_word(word)
|
576 |
+
if self._all_tone_three(finals):
|
577 |
+
# disyllabic + monosyllabic, e.g. 蒙古/包
|
578 |
+
if len(word_list[0]) == 2:
|
579 |
+
finals[0] = finals[0][:-1] + "2"
|
580 |
+
finals[1] = finals[1][:-1] + "2"
|
581 |
+
# monosyllabic + disyllabic, e.g. 纸/老虎
|
582 |
+
elif len(word_list[0]) == 1:
|
583 |
+
finals[1] = finals[1][:-1] + "2"
|
584 |
+
else:
|
585 |
+
finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]
|
586 |
+
if len(finals_list) == 2:
|
587 |
+
for i, sub in enumerate(finals_list):
|
588 |
+
# e.g. 所有/人
|
589 |
+
if self._all_tone_three(sub) and len(sub) == 2:
|
590 |
+
finals_list[i][0] = finals_list[i][0][:-1] + "2"
|
591 |
+
# e.g. 好/喜欢
|
592 |
+
elif (
|
593 |
+
i == 1
|
594 |
+
and not self._all_tone_three(sub)
|
595 |
+
and finals_list[i][0][-1] == "3"
|
596 |
+
and finals_list[0][-1][-1] == "3"
|
597 |
+
):
|
598 |
+
finals_list[0][-1] = finals_list[0][-1][:-1] + "2"
|
599 |
+
finals = sum(finals_list, [])
|
600 |
+
# split idiom into two words who's length is 2
|
601 |
+
elif len(word) == 4:
|
602 |
+
finals_list = [finals[:2], finals[2:]]
|
603 |
+
finals = []
|
604 |
+
for sub in finals_list:
|
605 |
+
if self._all_tone_three(sub):
|
606 |
+
sub[0] = sub[0][:-1] + "2"
|
607 |
+
finals += sub
|
608 |
+
|
609 |
+
return finals
|
610 |
+
|
611 |
+
def _all_tone_three(self, finals: List[str]) -> bool:
|
612 |
+
return all(x[-1] == "3" for x in finals)
|
613 |
+
|
614 |
+
# merge "不" and the word behind it
|
615 |
+
# if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error
|
616 |
+
def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
|
617 |
+
new_seg = []
|
618 |
+
last_word = ""
|
619 |
+
for word, pos in seg:
|
620 |
+
if last_word == "不":
|
621 |
+
word = last_word + word
|
622 |
+
if word != "不":
|
623 |
+
new_seg.append((word, pos))
|
624 |
+
last_word = word[:]
|
625 |
+
if last_word == "不":
|
626 |
+
new_seg.append((last_word, "d"))
|
627 |
+
last_word = ""
|
628 |
+
return new_seg
|
629 |
+
|
630 |
+
# function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听"
|
631 |
+
# function 2: merge single "一" and the word behind it
|
632 |
+
# if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error
|
633 |
+
# e.g.
|
634 |
+
# input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]
|
635 |
+
# output seg: [['听一听', 'v']]
|
636 |
+
def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
|
637 |
+
new_seg = []
|
638 |
+
# function 1
|
639 |
+
for i, (word, pos) in enumerate(seg):
|
640 |
+
if (
|
641 |
+
i - 1 >= 0
|
642 |
+
and word == "一"
|
643 |
+
and i + 1 < len(seg)
|
644 |
+
and seg[i - 1][0] == seg[i + 1][0]
|
645 |
+
and seg[i - 1][1] == "v"
|
646 |
+
):
|
647 |
+
new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
|
648 |
+
else:
|
649 |
+
if (
|
650 |
+
i - 2 >= 0
|
651 |
+
and seg[i - 1][0] == "一"
|
652 |
+
and seg[i - 2][0] == word
|
653 |
+
and pos == "v"
|
654 |
+
):
|
655 |
+
continue
|
656 |
+
else:
|
657 |
+
new_seg.append([word, pos])
|
658 |
+
seg = new_seg
|
659 |
+
new_seg = []
|
660 |
+
# function 2
|
661 |
+
for i, (word, pos) in enumerate(seg):
|
662 |
+
if new_seg and new_seg[-1][0] == "一":
|
663 |
+
new_seg[-1][0] = new_seg[-1][0] + word
|
664 |
+
else:
|
665 |
+
new_seg.append([word, pos])
|
666 |
+
return new_seg
|
667 |
+
|
668 |
+
# the first and the second words are all_tone_three
|
669 |
+
def _merge_continuous_three_tones(
|
670 |
+
self, seg: List[Tuple[str, str]]
|
671 |
+
) -> List[Tuple[str, str]]:
|
672 |
+
new_seg = []
|
673 |
+
sub_finals_list = [
|
674 |
+
lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
|
675 |
+
for (word, pos) in seg
|
676 |
+
]
|
677 |
+
assert len(sub_finals_list) == len(seg)
|
678 |
+
merge_last = [False] * len(seg)
|
679 |
+
for i, (word, pos) in enumerate(seg):
|
680 |
+
if (
|
681 |
+
i - 1 >= 0
|
682 |
+
and self._all_tone_three(sub_finals_list[i - 1])
|
683 |
+
and self._all_tone_three(sub_finals_list[i])
|
684 |
+
and not merge_last[i - 1]
|
685 |
+
):
|
686 |
+
# if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
|
687 |
+
if (
|
688 |
+
not self._is_reduplication(seg[i - 1][0])
|
689 |
+
and len(seg[i - 1][0]) + len(seg[i][0]) <= 3
|
690 |
+
):
|
691 |
+
new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
|
692 |
+
merge_last[i] = True
|
693 |
+
else:
|
694 |
+
new_seg.append([word, pos])
|
695 |
+
else:
|
696 |
+
new_seg.append([word, pos])
|
697 |
+
|
698 |
+
return new_seg
|
699 |
+
|
700 |
+
def _is_reduplication(self, word: str) -> bool:
|
701 |
+
return len(word) == 2 and word[0] == word[1]
|
702 |
+
|
703 |
+
# the last char of first word and the first char of second word is tone_three
|
704 |
+
def _merge_continuous_three_tones_2(
|
705 |
+
self, seg: List[Tuple[str, str]]
|
706 |
+
) -> List[Tuple[str, str]]:
|
707 |
+
new_seg = []
|
708 |
+
sub_finals_list = [
|
709 |
+
lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
|
710 |
+
for (word, pos) in seg
|
711 |
+
]
|
712 |
+
assert len(sub_finals_list) == len(seg)
|
713 |
+
merge_last = [False] * len(seg)
|
714 |
+
for i, (word, pos) in enumerate(seg):
|
715 |
+
if (
|
716 |
+
i - 1 >= 0
|
717 |
+
and sub_finals_list[i - 1][-1][-1] == "3"
|
718 |
+
and sub_finals_list[i][0][-1] == "3"
|
719 |
+
and not merge_last[i - 1]
|
720 |
+
):
|
721 |
+
# if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
|
722 |
+
if (
|
723 |
+
not self._is_reduplication(seg[i - 1][0])
|
724 |
+
and len(seg[i - 1][0]) + len(seg[i][0]) <= 3
|
725 |
+
):
|
726 |
+
new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
|
727 |
+
merge_last[i] = True
|
728 |
+
else:
|
729 |
+
new_seg.append([word, pos])
|
730 |
+
else:
|
731 |
+
new_seg.append([word, pos])
|
732 |
+
return new_seg
|
733 |
+
|
734 |
+
def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
|
735 |
+
new_seg = []
|
736 |
+
for i, (word, pos) in enumerate(seg):
|
737 |
+
if i - 1 >= 0 and word == "儿" and seg[i - 1][0] != "#":
|
738 |
+
new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
|
739 |
+
else:
|
740 |
+
new_seg.append([word, pos])
|
741 |
+
return new_seg
|
742 |
+
|
743 |
+
def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
|
744 |
+
new_seg = []
|
745 |
+
for i, (word, pos) in enumerate(seg):
|
746 |
+
if new_seg and word == new_seg[-1][0]:
|
747 |
+
new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
|
748 |
+
else:
|
749 |
+
new_seg.append([word, pos])
|
750 |
+
return new_seg
|
751 |
+
|
752 |
+
def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
|
753 |
+
seg = self._merge_bu(seg)
|
754 |
+
try:
|
755 |
+
seg = self._merge_yi(seg)
|
756 |
+
except:
|
757 |
+
print("_merge_yi failed")
|
758 |
+
seg = self._merge_reduplication(seg)
|
759 |
+
seg = self._merge_continuous_three_tones(seg)
|
760 |
+
seg = self._merge_continuous_three_tones_2(seg)
|
761 |
+
seg = self._merge_er(seg)
|
762 |
+
return seg
|
763 |
+
|
764 |
+
def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:
|
765 |
+
finals = self._bu_sandhi(word, finals)
|
766 |
+
finals = self._yi_sandhi(word, finals)
|
767 |
+
finals = self._neural_sandhi(word, pos, finals)
|
768 |
+
finals = self._three_sandhi(word, finals)
|
769 |
+
return finals
|