thomas0104 commited on
Commit
17d75f1
1 Parent(s): 50a2519

Upload 26 files

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - zh
4
+ license: apache-2.0
5
+ tags:
6
+ - whisper-event
7
+ - generated_from_trainer
8
+ datasets:
9
+ - mozilla-foundation/common_voice_11_0
10
+ metrics:
11
+ - wer
12
+ model-index:
13
+ - name: Whisper medium nan-tw only char
14
+ results:
15
+ - task:
16
+ name: Automatic Speech Recognition
17
+ type: automatic-speech-recognition
18
+ dataset:
19
+ name: mozilla-foundation/common_voice_11_0 nan-tw
20
+ type: mozilla-foundation/common_voice_11_0
21
+ config: nan-tw
22
+ split: test
23
+ args: nan-tw
24
+ metrics:
25
+ - name: Wer
26
+ type: wer
27
+ value: 45.2824427480916
28
+ ---
29
+
30
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
31
+ should probably proofread and complete it, then remove this comment. -->
32
+
33
+ # Whisper medium nan-tw only char
34
+
35
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the mozilla-foundation/common_voice_11_0 nan-tw dataset.
36
+ It achieves the following results on the evaluation set:
37
+ - Loss: 0.9944
38
+ - Wer: 45.2824
39
+ - Cer: 45.3667
40
+
41
+ ## Model description
42
+
43
+ More information needed
44
+
45
+ ## Intended uses & limitations
46
+
47
+ More information needed
48
+
49
+ ## Training and evaluation data
50
+
51
+ More information needed
52
+
53
+ ## Training procedure
54
+
55
+ ### Training hyperparameters
56
+
57
+ The following hyperparameters were used during training:
58
+ - learning_rate: 1e-05
59
+ - train_batch_size: 2
60
+ - eval_batch_size: 2
61
+ - seed: 42
62
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
63
+ - lr_scheduler_type: linear
64
+ - lr_scheduler_warmup_steps: 500
65
+ - training_steps: 5000
66
+ - mixed_precision_training: Native AMP
67
+
68
+ ### Training results
69
+
70
+ | Training Loss | Epoch | Step | Validation Loss | Wer | Cer |
71
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|
72
+ | 0.5832 | 1.04 | 1000 | 1.0634 | 56.3053 | 56.4745 |
73
+ | 0.1467 | 2.08 | 2000 | 1.0407 | 50.9618 | 51.0112 |
74
+ | 0.016 | 3.13 | 3000 | 1.0226 | 46.4427 | 46.5137 |
75
+ | 0.0001 | 5.01 | 4000 | 0.9974 | 45.4656 | 45.6082 |
76
+ | 0.0001 | 6.05 | 5000 | 0.9944 | 45.2824 | 45.3667 |
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.27.0.dev0
82
+ - Pytorch 1.13.1+cu117
83
+ - Datasets 2.8.0
84
+ - Tokenizers 0.13.2
__pycache__/ryNormText.cpython-38.pyc ADDED
Binary file (1.82 kB). View file
 
added_tokens.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|af|>": 50327,
3
+ "<|am|>": 50334,
4
+ "<|ar|>": 50272,
5
+ "<|as|>": 50350,
6
+ "<|az|>": 50304,
7
+ "<|ba|>": 50355,
8
+ "<|be|>": 50330,
9
+ "<|bg|>": 50292,
10
+ "<|bn|>": 50302,
11
+ "<|bo|>": 50347,
12
+ "<|br|>": 50309,
13
+ "<|bs|>": 50315,
14
+ "<|ca|>": 50270,
15
+ "<|cs|>": 50283,
16
+ "<|cy|>": 50297,
17
+ "<|da|>": 50285,
18
+ "<|de|>": 50261,
19
+ "<|el|>": 50281,
20
+ "<|en|>": 50259,
21
+ "<|es|>": 50262,
22
+ "<|et|>": 50307,
23
+ "<|eu|>": 50310,
24
+ "<|fa|>": 50300,
25
+ "<|fi|>": 50277,
26
+ "<|fo|>": 50338,
27
+ "<|fr|>": 50265,
28
+ "<|gl|>": 50319,
29
+ "<|gu|>": 50333,
30
+ "<|haw|>": 50352,
31
+ "<|ha|>": 50354,
32
+ "<|he|>": 50279,
33
+ "<|hi|>": 50276,
34
+ "<|hr|>": 50291,
35
+ "<|ht|>": 50339,
36
+ "<|hu|>": 50286,
37
+ "<|hy|>": 50312,
38
+ "<|id|>": 50275,
39
+ "<|is|>": 50311,
40
+ "<|it|>": 50274,
41
+ "<|ja|>": 50266,
42
+ "<|jw|>": 50356,
43
+ "<|ka|>": 50329,
44
+ "<|kk|>": 50316,
45
+ "<|km|>": 50323,
46
+ "<|kn|>": 50306,
47
+ "<|ko|>": 50264,
48
+ "<|la|>": 50294,
49
+ "<|lb|>": 50345,
50
+ "<|ln|>": 50353,
51
+ "<|lo|>": 50336,
52
+ "<|lt|>": 50293,
53
+ "<|lv|>": 50301,
54
+ "<|mg|>": 50349,
55
+ "<|mi|>": 50295,
56
+ "<|mk|>": 50308,
57
+ "<|ml|>": 50296,
58
+ "<|mn|>": 50314,
59
+ "<|mr|>": 50320,
60
+ "<|ms|>": 50282,
61
+ "<|mt|>": 50343,
62
+ "<|my|>": 50346,
63
+ "<|ne|>": 50313,
64
+ "<|nl|>": 50271,
65
+ "<|nn|>": 50342,
66
+ "<|nocaptions|>": 50362,
67
+ "<|notimestamps|>": 50363,
68
+ "<|no|>": 50288,
69
+ "<|oc|>": 50328,
70
+ "<|pa|>": 50321,
71
+ "<|pl|>": 50269,
72
+ "<|ps|>": 50340,
73
+ "<|pt|>": 50267,
74
+ "<|ro|>": 50284,
75
+ "<|ru|>": 50263,
76
+ "<|sa|>": 50344,
77
+ "<|sd|>": 50332,
78
+ "<|si|>": 50322,
79
+ "<|sk|>": 50298,
80
+ "<|sl|>": 50305,
81
+ "<|sn|>": 50324,
82
+ "<|so|>": 50326,
83
+ "<|sq|>": 50317,
84
+ "<|sr|>": 50303,
85
+ "<|startoflm|>": 50360,
86
+ "<|startofprev|>": 50361,
87
+ "<|startoftranscript|>": 50258,
88
+ "<|su|>": 50357,
89
+ "<|sv|>": 50273,
90
+ "<|sw|>": 50318,
91
+ "<|ta|>": 50287,
92
+ "<|te|>": 50299,
93
+ "<|tg|>": 50331,
94
+ "<|th|>": 50289,
95
+ "<|tk|>": 50341,
96
+ "<|tl|>": 50348,
97
+ "<|transcribe|>": 50359,
98
+ "<|translate|>": 50358,
99
+ "<|tr|>": 50268,
100
+ "<|tt|>": 50351,
101
+ "<|uk|>": 50280,
102
+ "<|ur|>": 50290,
103
+ "<|uz|>": 50337,
104
+ "<|vi|>": 50278,
105
+ "<|yi|>": 50335,
106
+ "<|yo|>": 50325,
107
+ "<|zh|>": 50260
108
+ }
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.05,
3
+ "eval_cer": 45.36673709628735,
4
+ "eval_loss": 0.994395911693573,
5
+ "eval_runtime": 226.1976,
6
+ "eval_samples_per_second": 4.359,
7
+ "eval_steps_per_second": 2.18,
8
+ "eval_wer": 45.2824427480916,
9
+ "train_loss": 0.3284327008752851,
10
+ "train_runtime": 3725.671,
11
+ "train_samples_per_second": 2.684,
12
+ "train_steps_per_second": 1.342
13
+ }
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-medium",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "apply_spec_augment": false,
6
+ "architectures": [
7
+ "WhisperForConditionalGeneration"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "begin_suppress_tokens": [
11
+ 220,
12
+ 50257
13
+ ],
14
+ "bos_token_id": 50257,
15
+ "d_model": 1024,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_ffn_dim": 4096,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 24,
20
+ "decoder_start_token_id": 50258,
21
+ "dropout": 0.0,
22
+ "encoder_attention_heads": 16,
23
+ "encoder_ffn_dim": 4096,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 24,
26
+ "eos_token_id": 50257,
27
+ "forced_decoder_ids": null,
28
+ "init_std": 0.02,
29
+ "is_encoder_decoder": true,
30
+ "mask_feature_length": 10,
31
+ "mask_feature_min_masks": 0,
32
+ "mask_feature_prob": 0.0,
33
+ "mask_time_length": 10,
34
+ "mask_time_min_masks": 2,
35
+ "mask_time_prob": 0.05,
36
+ "max_length": 448,
37
+ "max_source_positions": 1500,
38
+ "max_target_positions": 448,
39
+ "model_type": "whisper",
40
+ "num_hidden_layers": 24,
41
+ "num_mel_bins": 80,
42
+ "pad_token_id": 50257,
43
+ "scale_embedding": false,
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.27.0.dev0",
46
+ "use_cache": true,
47
+ "vocab_size": 51865
48
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.05,
3
+ "eval_cer": 45.36673709628735,
4
+ "eval_loss": 0.994395911693573,
5
+ "eval_runtime": 226.1976,
6
+ "eval_samples_per_second": 4.359,
7
+ "eval_steps_per_second": 2.18,
8
+ "eval_wer": 45.2824427480916
9
+ }
generation_config.json ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "begin_suppress_tokens": [
3
+ 220,
4
+ 50257
5
+ ],
6
+ "bos_token_id": 50257,
7
+ "decoder_start_token_id": 50258,
8
+ "eos_token_id": 50257,
9
+ "forced_decoder_ids": [
10
+ [
11
+ 1,
12
+ null
13
+ ],
14
+ [
15
+ 2,
16
+ 50359
17
+ ],
18
+ [
19
+ 3,
20
+ 50363
21
+ ]
22
+ ],
23
+ "is_multilingual": true,
24
+ "lang_to_id": {
25
+ "<|af|>": 50327,
26
+ "<|am|>": 50334,
27
+ "<|ar|>": 50272,
28
+ "<|as|>": 50350,
29
+ "<|az|>": 50304,
30
+ "<|ba|>": 50355,
31
+ "<|be|>": 50330,
32
+ "<|bg|>": 50292,
33
+ "<|bn|>": 50302,
34
+ "<|bo|>": 50347,
35
+ "<|br|>": 50309,
36
+ "<|bs|>": 50315,
37
+ "<|ca|>": 50270,
38
+ "<|cs|>": 50283,
39
+ "<|cy|>": 50297,
40
+ "<|da|>": 50285,
41
+ "<|de|>": 50261,
42
+ "<|el|>": 50281,
43
+ "<|en|>": 50259,
44
+ "<|es|>": 50262,
45
+ "<|et|>": 50307,
46
+ "<|eu|>": 50310,
47
+ "<|fa|>": 50300,
48
+ "<|fi|>": 50277,
49
+ "<|fo|>": 50338,
50
+ "<|fr|>": 50265,
51
+ "<|gl|>": 50319,
52
+ "<|gu|>": 50333,
53
+ "<|haw|>": 50352,
54
+ "<|ha|>": 50354,
55
+ "<|he|>": 50279,
56
+ "<|hi|>": 50276,
57
+ "<|hr|>": 50291,
58
+ "<|ht|>": 50339,
59
+ "<|hu|>": 50286,
60
+ "<|hy|>": 50312,
61
+ "<|id|>": 50275,
62
+ "<|is|>": 50311,
63
+ "<|it|>": 50274,
64
+ "<|ja|>": 50266,
65
+ "<|jw|>": 50356,
66
+ "<|ka|>": 50329,
67
+ "<|kk|>": 50316,
68
+ "<|km|>": 50323,
69
+ "<|kn|>": 50306,
70
+ "<|ko|>": 50264,
71
+ "<|la|>": 50294,
72
+ "<|lb|>": 50345,
73
+ "<|ln|>": 50353,
74
+ "<|lo|>": 50336,
75
+ "<|lt|>": 50293,
76
+ "<|lv|>": 50301,
77
+ "<|mg|>": 50349,
78
+ "<|mi|>": 50295,
79
+ "<|mk|>": 50308,
80
+ "<|ml|>": 50296,
81
+ "<|mn|>": 50314,
82
+ "<|mr|>": 50320,
83
+ "<|ms|>": 50282,
84
+ "<|mt|>": 50343,
85
+ "<|my|>": 50346,
86
+ "<|ne|>": 50313,
87
+ "<|nl|>": 50271,
88
+ "<|nn|>": 50342,
89
+ "<|no|>": 50288,
90
+ "<|oc|>": 50328,
91
+ "<|pa|>": 50321,
92
+ "<|pl|>": 50269,
93
+ "<|ps|>": 50340,
94
+ "<|pt|>": 50267,
95
+ "<|ro|>": 50284,
96
+ "<|ru|>": 50263,
97
+ "<|sa|>": 50344,
98
+ "<|sd|>": 50332,
99
+ "<|si|>": 50322,
100
+ "<|sk|>": 50298,
101
+ "<|sl|>": 50305,
102
+ "<|sn|>": 50324,
103
+ "<|so|>": 50326,
104
+ "<|sq|>": 50317,
105
+ "<|sr|>": 50303,
106
+ "<|su|>": 50357,
107
+ "<|sv|>": 50273,
108
+ "<|sw|>": 50318,
109
+ "<|ta|>": 50287,
110
+ "<|te|>": 50299,
111
+ "<|tg|>": 50331,
112
+ "<|th|>": 50289,
113
+ "<|tk|>": 50341,
114
+ "<|tl|>": 50348,
115
+ "<|tr|>": 50268,
116
+ "<|tt|>": 50351,
117
+ "<|uk|>": 50280,
118
+ "<|ur|>": 50290,
119
+ "<|uz|>": 50337,
120
+ "<|vi|>": 50278,
121
+ "<|yi|>": 50335,
122
+ "<|yo|>": 50325,
123
+ "<|zh|>": 50260
124
+ },
125
+ "max_initial_timestamp_index": 1,
126
+ "max_length": 448,
127
+ "no_timestamps_token_id": 50363,
128
+ "pad_token_id": 50257,
129
+ "suppress_tokens": [
130
+ 1,
131
+ 2,
132
+ 7,
133
+ 8,
134
+ 9,
135
+ 10,
136
+ 14,
137
+ 25,
138
+ 26,
139
+ 27,
140
+ 28,
141
+ 29,
142
+ 31,
143
+ 58,
144
+ 59,
145
+ 60,
146
+ 61,
147
+ 62,
148
+ 63,
149
+ 90,
150
+ 91,
151
+ 92,
152
+ 93,
153
+ 359,
154
+ 503,
155
+ 522,
156
+ 542,
157
+ 873,
158
+ 893,
159
+ 902,
160
+ 918,
161
+ 922,
162
+ 931,
163
+ 1350,
164
+ 1853,
165
+ 1982,
166
+ 2460,
167
+ 2627,
168
+ 3246,
169
+ 3253,
170
+ 3268,
171
+ 3536,
172
+ 3846,
173
+ 3961,
174
+ 4183,
175
+ 4667,
176
+ 6585,
177
+ 6647,
178
+ 7273,
179
+ 9061,
180
+ 9383,
181
+ 10428,
182
+ 10929,
183
+ 11938,
184
+ 12033,
185
+ 12331,
186
+ 12562,
187
+ 13793,
188
+ 14157,
189
+ 14635,
190
+ 15265,
191
+ 15618,
192
+ 16553,
193
+ 16604,
194
+ 18362,
195
+ 18956,
196
+ 20075,
197
+ 21675,
198
+ 22520,
199
+ 26130,
200
+ 26161,
201
+ 26435,
202
+ 28279,
203
+ 29464,
204
+ 31650,
205
+ 32302,
206
+ 32470,
207
+ 36865,
208
+ 42863,
209
+ 47425,
210
+ 49870,
211
+ 50254,
212
+ 50258,
213
+ 50358,
214
+ 50359,
215
+ 50360,
216
+ 50361,
217
+ 50362
218
+ ],
219
+ "task_to_id": {
220
+ "transcribe": 50359,
221
+ "translate": 50358
222
+ },
223
+ "transformers_version": "4.27.0.dev0"
224
+ }
label.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ label_str=['心 理 性 別 ', '火 燒 尻 川 ', '草 仔 色 ', '十 三 行 ', '水 里 鄉 ', '這 搭 有 一 个 大 商 場 ', '聯 合 國 ', '菜 頭 籠 仔 ', '辣 椒 ', '肉 筋 ', '新 興 區 ', '癩 疒 哥 病 ', '覕 冬 ', '摧 仔 ', '西 藥 房 ', '心 碎 ', '燒 包 仔 ', '通 敨 ', '皇 軍 ', '濁 水 溪 出 代 誌 ', '粗 細 ', '哀 哼 ', '九 年 一 貫 ', '精 靈 ', '三 寶 ', '速 速 ', '坐 疶 咧 等 ', '儑 目 袂 曉 看 懸 低 ', '圳 後 ', '兩 个 人 已 經 講 和 矣 ', '足 媠 ', '細 空 喙 ', '掠 著 根 頭 ', '茨 城 縣 ', '概 率 ', '草 人 坑 ', '祝 你 生 日 快 樂 ', '坑 崁 ', '出 水 ', '咇 怦 喘 ', '夯 枷 ', '外 勞 ', '臺 灣 人 ', '落 湳 ', '苑 裡 坑 ', '坑 仔 內 ', '起 慼 到 地 ', '雨 晴 ', '紅 蟲 ', '大 湖 ', '誻 誻 叫 ', '柴 球 ', '大 胃 王 ', '大 稻 埕 ', '偷 壘 ', '鬆 餅 ', '喙 罨 囊 ', '龍 文 ', '時 局 ', '摔 死 ', '煞 尾 ', '無 話 講 茭 荖 ', '下 暗 飯 ', '一 籠 ', '一 定 考 袂 牢 ', '看 一 个 影 生 一 个 囝 ', '倒 踅 ', '筆 型 ', '歸 正 修 善 ', '滯 到 ', '變 天 ', '一 工 到 暗 ', '雲 吞 ', '有 幾 項 菜 ', '炎 舞 ', '鹿 角 龜 ', '過 路 線 ', '惡 馬 惡 人 騎 ', '粗 俗 ', '這 个 偌 濟 錢 ', '七 仔 ', '看 甲 凊 凊 楚 楚 ', '戇 神 ', '發 生 ', '病 囡 仔 ', '心 花 當 開 ', '番 鼎 仔 ', '老 硞 硞 ', '大 傢 伙 ', '名 聲 真 敨 ', '攕 烘 肉 ', '滑 ', '龜 蛇 ', '水 雞 皮 仔 ', '攑 硬 篙 ', '抾 無 三 門 墓 著 想 欲 做 土 公 ', '紅 圓 仔 ', '臺 北 橋 ', '雲 林 縣 ', '臺 中 港 ', '水 里 ', '臺 南 市 ', '七 堵 ', '做 功 德 ', '大 村 鄉 ', '小 喇 叭 ', '紹 ', '漢 草 真 好 ', '田 嬰 ', '按 呢 生 ', '海 尾 ', '無 米 閣 拄 著 閏 月 ', '枝 香 小 細 ', '噗 ', '頭 尾 ', '五 短 生 張 ', '正 常 ', '套 牢 咧 ', '動 物 性 膠 質 ', '雪 文 精 ', '悠 遊 卡 ', '你 攏 無 歇 睏 喔 ', '埤 頭 鄉 ', '信 義 大 安 路 口 ', '三 三 八 八 ', '你 話 毋 通 烏 白 講 ', '麥 寮 鄉 ', '你 莫 定 定 挖 肚 臍 ', '新 安 路 ', '金 錢 毋 是 萬 能 的 ', '菠 菱 仔 菜 ', '富 寮 里 ', '你 是 啥 人 ', '反 烏 ', '新 街 ', '合 銅 ', '疼 疼 ', '番 麥 殼 ', '天 氣 冷 矣 你 就 愛 加 疊 一 領 衫 ', '我 到 底 看 著 啥 物 ', '手 扞 仔 ', '姑 不 二 三 將 ', '品 格 ', '透 種 仔 ', '漚 腳 數 ', '隔 音 壁 ', '拍 通 透 ', '開 花 滿 天 芳 結 子 才 驚 人 ', '東 北 季 風 ', '無 人 機 ', '做 牛 無 惜 力 ', '淨 空 排 放 ', '這 藥 仔 抹 落 去 真 緊 就 會 退 癀 矣 ', '毛 蓑 仔 ', '抉 喙 䫌 ', '陽 明 山 ', '咖 啡 館 ', '囡 仔 色 ', '佇 墜 樓 前 一 點 鐘 ', '伊 規 心 欲 考 大 學 ', '東 湖 國 中 ', '中 原 公 園 ', '半 路 店 ', '檳 榔 坑 ', '東 石 鄉 ', '摒 掃 的 ', '無 中 心 化 ', '肉 粽 角 ', '編 譯 器 ', '厝 邊 頭 尾 ', '桃 源 街 ', '仁 愛 杭 州 路 口 ', '千 葉 ', '七 色 圖 ', '大 吉 大 利 ', '尪 仔 頭 鏡 ', '徐 ', '咱 做 代 誌 的 時 陣 ', '包 水 餃 ', '歹 鳥 毋 知 飛 歹 柴 破 袂 開 ', '分 裝 場 ', '健 康 中 心 ', '印 度 尼 西 亞 ', '新 里 族 ', '獅 仔 頭 ', '大 度 路 ', '市 仔 ', '露 營 區 ', '學 姐 ', '枋 南 線 ', '虎 尾 鎮 ', '瀉 腹 肚 ', '恆 春 鎮 ', '小 血 球 ', '破 格 ', '我 衫 攏 穿 上 大 領 ', '溜 落 來 ', '半 條 命 ', '生 活 習 慣 ', '電 影 間 ', '菜 脯 卵 ', '塗 塗 塗 ', '鐵 甲 牛 ', '風 火 頭 ', '現 流 仔 ', '白 了 工 ', '空 港 ', '較 大 ', '公 正 橋 ', '巴 結 ', '果 汁 ', '有 䆀 無 一 好 ', '這 時 ', '竹 北 ', '下 早 ', '較 早 睏 ', '六 六 大 順 ', '公 視 ', '趕 趕 緊 緊 ', '帕 仔 ', '藍 瘦 香 菇 ', '山 貓 ', '大 頭 狗 母 ', '土 板 仁 豆 ', '咧 ', '內 緣 ', '阮 會 曉 唸 ', '茶 末 ', '真 有 喙 水 ', '魔 導 士 ', '先 承 認 你 就 是 恁 朋 友 ', '青 山 王 ', '奧 克 蘭 ', '金 東 ', '便 若 ', '陳 奕 齊 ', '九 尾 金 ', '貓 空 ', '過 tang跤 ', '頭 汴 坑 溪 ', '無 定 著 ', '控 機 仔 ', '趨 湧 ', '中 洲 ', '青 田 街 ', '同 房 ', '鼢 底 的 ', '多 元 性 別 ', '舊 厝 ', '貓 潲 ', '福 連 國 小 ', '點 撇 ', '玜 玳 ', '欲 起 鼓 矣 ', '納 豆 ', '中 環 ', '你 死 我 活 ', '踏 著 歹 地 步 ', '曲 去 ', '怙 喙 講 个 無 準 算 ', '交 力 坪 ', '愈 來 愈 嚴 重 ', '一 个 一 个 ', '淡 江 大 橋 ', '上 大 代 死 煞 ', '檨 仔 葉 公 ', '胳 胴 跤 ', '郁 永 河 ', '講 啥 物 芋 仔 番 薯 ', '霓 ', '青 盲 雞 啄 著 蟲 ', '搣 屎 毋 知 臭 ', '斗 南 ', '孤 笑 無 嗣 ', '水 鬼 仔 ', '門 將 ', '手 控 ', '心 胞 膜 炎 ', '敢 食 汽 油 ', '阿 卜 蛇 ', '機 動 戰 士 ', '便 當 ', '敧 一 爿 ', '敲 仙 古 ', '所 羅 門 群 島 ', '終 界 ', '我 愛 台 語 ', '溝 頂 ', '墊 定 ', '詳 解 ', '火 燒 ', '絡 繹 不 ��� ', '尻 川 門 ', '甌 仔 疊 碟 仔 ', '黃 巾 甕 仔 ', '汕 ', '生 甲 袂 䆀 ', '狗 母 酥 ', '倚 桌 仔 ', '中 央 大 學 ', '石 門 水 庫 ', '亂 使 想 ', '百 貨 店 ', '酸 素 矸 ', '蛤 kai', '洗 衫 仔 枋 ', '天 台 ', '數 位 pay', '漚 缺 ', '鈍 市 貨 ', '偷 走 壘 ', '夭 壽 鬼 ', '好 所 在 ', '軟 市 ', '徹 理 ', '現 挽 ', '叫 電 話 ', '伊 生 做 遮 爾 臭 老 ', '魔 神 仔 咒 讖 ', '一 點 點 仔 ', '兩 光 ', '契 查 某 囝 ', '提 著 ', '傷 心 酒 店 ', '過 失 致 死 ', '較 拚 咧 ', '透 身 大 汗 ', '碌 硞 馬 仔 ', '兄 弟 姊 妹 著 愛 仝 心 ', '家 伙 仔 ', '垃 圾 物 仔 ', '狐 狸 貂 ', '石 斑 ', '虎 魚 ', '落 雨 了 後 塗 跤 變 甲 澹 漉 漉 ', '較 慘 ', '基 隆 市 ', '你 欲 行 對 佗 位 去 ', '敦 化 和 平 路 口 ', '港 仔 喙 ', '阮 囝 今 年 二 十 出 頭 矣 ', '新 港 飴 ', '哀 父 叫 母 ', '一 份 ', '毛 毛 仔 雨 ', '磐 安 ', '牛 籠 ', '袂 看 得 ', '立 功 立 德 ', '龜 山 島 ', '過 晝 ', '捒 做 堆 ', '梅 仔 跤 ', '包 帶 ', '番 地 ', '三 重 ', '林 子 口 ', '筆 電 ', '操 刀 ', '揣 轉 來 ', '指 中 指 ', '珠 仔 台 ', '新 廍 ', '相 帶 ', '頂 塊 廖 ', '竹 南 ', '你 共 包 仔 提 去 熁 燒 一 下 ', '平 方 根 ', '頂 公 館 ', '散 仙 ', '演 藝 界 ', '繪 本 ', '毋 情 毋 願 ', '較 緊 ', '有 也 好 無 也 好 ', '操 心 ', '摃 槌 仔 龍 ', '怪 奇 古 ', '國 破 家 亡 ', '新 羅 ', '貴 氣 ', '沓 沓 仔 ', '拍 薄 ', '物 件 若 予 伊 提 過 手 ', '輾 仔 鞋 ', '異 界 ', '鼠 尾 風 ', '湖 內 ', '提 走 ', '無 打 無 啥 ', '我 欲 準 備 落 山 矣 ', '盜 用 ', '聽 無 ', '彼 个 頭 家 娘 對 人 客 誠 好 禮 ', '大 細 輦 ', '無 歇 ', '無 疑 悟 你 會 出 國 讀 冊 ', '石 園 ', '嘉 義 ', '後 寮 ', '我 攏 有 看 著 ', '劍 南 路 ', '歹 天 ', '三 跤 步 一 坎 店 ', '臘 肉 ', '舊 里 族 ', '下 晡 點 心 ', '冷 管 ', '無 及 格 ', '公 允 ', '十 喙 九 尻 川 ', '投 影 片 ', '納 錢 ', '墓 坑 鳥 ', '活 餌 桶 仔 ', '激 屎 面 ', '袂 對 同 ', '紡 織 機 ', '木 質 部 ', '落 口 講 出 來 ', '投 丁 ', '棚 頂 做 甲 流 汗 棚 跤 嫌 甲 流 瀾 ', '僫 鬥 陣 ', '拍 死 板 ', '捅 鼻 ', '搭 峇 ', '簡 述 ', '一 个 接 一 个 ', '緊 來 旋 ', '閒 仔 話 ', '拍 爽 ', '德 拉 瓦 ', '總 舖 ', '三 日 不 見 久 溜 溜 ', '合 掛 ', '手 神 重 ', '電 子 ', '暗 空 ', '瘦 食 ', '塑 膠 車 ', '要 意 ', '燒 雞 胿 ', '吊 猴 ', '掠 猴 ', '暗 頭 仔 ', '濟 囝 濟 擘 腹 濟 新 婦 濟 體 剔 ', '交 球 ', '無 拄 好 ', '搶 頭 標 ', '金 環 失 日 ', '觀 音 大 士 ', '在 準 ', '欲 暗 矣 ', '嘔 落 ', '蛣 蛚 ', '一 目 仔 ', '提 款 片 ', '無 你 个 代 ', '天 公 祖 ', '串 講 ', '離 題 ', '馬 鈴 薯 條 ', '做 塗 水 的 ', '人 情 味 ', '劉 銘 傳 ', '肝 包 油 ', '糖 醋 ', '二 戰 ', '拍 鐵 師 ', '慢 一 跤 步 ', '提 批 的 ', '下 禮 拜 ', '死 亡 之 握 ', '你 毋 通 囥 佇 心 肝 內 ', '臭 龜 仔 ', '趁 一 空 食 三 冬 ', '我 欲 學 臺 語 ', '埋 ', '馬 卡 道 ', '箍 喙 罨 ', '貴 州 省 ', '敦 化 北 路 ', '你 看 起 來 是 遮 爾 仔 媠 ', '精 个 食 戇 戇 个 食 天 公 ', '轉 去 ', '下 晡 市 ', '幾 个 ', '邦 長 ', '港 墘 ', '字 條 仔 ', '無 去 矣 ', '言 行 錄 ', '買 空 ', '六 點 ', '死 忠 ', '石 母 奶 ', '耳 仔 機 ', '貢 寮 區 ', '含 血 噴 天 ', '大 好 額 人 ', '行 政 院 ', '食 市 ', '九 塊 厝 ', '冷 清 ', '月 光 ', '種 喙 齒 ', '街 友 ', '天 公 星 ', '縣 ', '愛 臺 語 ', '我 強 欲 袂 赴 矣 ', '添 飯 ', '有 連 線 ', '昶 ', '中 國 話 ', '攑 香 綴 拜 ', '四 跤 草 ', '疼 風 ', '觀 覽 ', '吉 祥 卵 ', '擲 掉 ', '打 狗 山 ', '西 瓜 倚 大 爿 ', '有 這 款 的 代 誌 我 攏 毋 知 ', '磨 豆 仔 機 ', '國 姓 爺 ', '林 口 區 ', '潘 ', '可 算 名 詞 ', '代 誌 大 碗 ', '半 信 半 疑 ', '恬 稚 恬 稚 討 客 兄 唯 是 ', '遛 光 光 ', '扲 糖 無 洗 手 的 朋 友 ', '烏 日 ', '魏 風 ', '膵 臟 ', '犯 險 ', '連 半 个 人 影 嘛 無 ', '吊 褲 ', '塗 水 工 ', '霧 嗄 嗄 ', '飛 行 船 ', '欲 完 矣 ', '運 將 ', '恁 老 母 拄 咧 念 你 ', '伊 有 誠 濟 內 孫 ', '港 仔 喙 國 小 ', '橐 個 束 個 ', '落 尾 ', '平 枋 電 腦 ', '毛 毛 仔 雨 落 久 塗 塗 嘛 會 澹 ', '冷 滾 水 ', '一 个 紅 龜 按 一 个 位 ', '鹹 酥 雞 ', '雞 卵 糕 仔 ', '溼 汗 ', '諸 島 ', '聖 誕 老 阿 公 ', '天 頂 白 茫 茫 ', '飯 煎 ', '男 權 ', '毋 認 輸 ', '餘 溫 ', '包 山 包 海 ', '踮 空 龜 ', '洗 衫 精 ', '迒 境 電 商 ', '開 喙 蚶 粒 粒 臭 ', '焦 水 期 ', '老 龜 精 ', '塑 膠 橐 仔 ', '蘇 維 埃 聯 盟 ', '臭 水 ', '束 頷 頸 仔 ', '飛 行 機 ', '平 溪 ', '唯 恐 天 下 不 亂 ', '錢 票 ', '瓜 仔 鬚 ', '羊 角 三 劍 ', '半 腰 ', '辭 彙 ', '衝 湧 ', '敢 有 問 題 ', '食 涼 ', '畫 蛇 添 足 ', '做 塗 水 ', '怪 奇 ', '板 金 ', '交 關 ', '海 牙 ', '中 山 路 ', '番 仔 狗 ', '粉 圓 仔 ', '龜 龜 毛 毛 ', '毋 捌 字 看 告 示 ', '表 單 ', '莒 光 號 ', '海 豬 仔 ', '猴 頭 果 ', '翕 新 娘 相 ', '面 書 ', '茶 葉 蛋 ', '花 枝 招 展 ', '食 人 一 口 還 人 一 斗 ', '鐵 彈 ', '東 白 仔 ', '年 兜 ', '佛 心 投 資 者 ', '人 講 綴 人 講 ', '金 剛 戰 士 ', '做 肉 餅 ', '相 駁 喙 ', '鸚 哥 魚 ', '扁 桃 泉 ', '一 筆 錢 ', '果 菜 ', '柑 仔 蜜 醬 ', '一 枝 草 一 點 露 ', '早 時 ', '大 頭 母 人 ', '包 的 ', '頂 禮 拜 ', '尿 苴 仔 ', '人 驚 鬼 鬼 驚 人 ', '無 記 持 症 ', '日 落 山 ', '偏 鄉 ', '拭 塗 跤 ', '袂 死 心 ', '蓋 濟 ', '左 右 鄰 ', '有 聽 著 無 ', '大 圓 埕 ', '斗 籠 仔 本 ', '摃 寇 ', '野 蓮 ', '勥 查 某 ', '暗 頓 食 啥 ', '草 其 略 仔 ', '暢 快 ', '小 包 ', '薄 餅 ', '趒 ', '暖 帽 ', '無 精 彩 ', '內 家 ', '鬍 鬚 ', '燒 仙 草 ', '痀 崙 ', '吳 剛 剉 桂 ', '沙 灘 ', '人 聲 喊 喝 ', '天 公 無 帶 著 ', '望 安 鄉 ', '食 風 ', '分 子 ', '批 車 ', '大 舌 ', '公 館 路 ', '碧 潭 路 ', '臺 北 歐 洲 學 校 小 學 部 ', '充 耳 不 聞 ', '四 份 仔 ', '頭 崁 的 ', '南 機 場 ', '永 綏 街 ', '朴 子 溪 ', '池 上 ', '跤 車 ', '管 待 ', '苦 袂 當 ', '陳 滄 江 ', '缺 囝 化 ', '硨 磲 ', '共 人 唌 ', '頂 塊 黃 ', '潮 州 街 口 ', '社 群 ', '鄉 長 厝 ', '豐 樂 公 園 ', '金 湖 鎮 ', '忠 孝 復 興 ', '民 族 區 運 路 口 ', '松 信 路 ', '糋 麭 粉 ', '鄞 州 ', '台 北 聯 絡 線 ', '又 閣 咧 咳 嗽 ', '伊 加 我 一 輩 ', '霜 仔 ', '魚 池 鄉 ', '囝 甘 仔 囝 ', '四 跤 水 仙 ', '暖 暖 區 ', '用 齒 抿 仔 洗 喙 時 愛 先 捋 齒 膏 ', '新 社 ', '母 身 若 顧 予 伊 好 ', '實 喉 ', '埔 尾 ', '步 路 ', '冰 糖 ', '復 興 橋 ', '阿 拉 ', '繏 絚 ', '員 林 市 ', '蝦 糋 ', '戇 代 誌 ', '年 拄 年 ', '景 興 路 ', '海 結 仔 頭 ', '一 過 半 過 ', '實 聯 制 ', '後 擴 ', '瑞 芳 ', '蘭 嶼 貓 頭 鳥 ', '貓 羅 溪 ', '子 宮 喙 癌 ', '果 子 宅 ', '假 說 ', '不 在 ', '燕 巢 區 ', '上 海 毛 蟹 ', '意 式 麵 ', '捲 風 ', '話 語 之 中 ', '皮 蛋 精 肉 糜 ', '紅 豆 仔 麭 ', '頂 回 ', '相 見 ', '三 兩 人 講 四 斤 話 ', '梘 尾 ', '質 子 ', '兩 箍 ', '真 珠 看 做 鳥 鼠 屎 ', '天 晴 ', '角 運 動 量 ', '莫 問 足 恐 怖 ', '鼻 隔 間 ', '叨 撚 ', '止 疼 藥 ', '恙 蟲 病 ', '火 成 岩 ', '許 崑 源 ', '土 象 ', '豬 頭 癀 ', '禁 喙 ', '昨 昏 ', '卡 通 ', '番 薯 片 ', '砰 去 ', '卵 珠 ', '歡 喜 甘 願 ', '大 海 毋 驚 大 水 ', '橫 的 ', '跤 手 賤 ', '旭 ', '仙 巴 掌 ', '徛 桌 櫃 ', '查 畝 營 ', '番 薯 球 ', '芳 料 草 ', '大 火 ', '夥 計 某 ', '英 國 話 ', '明 知 故 問 ', '老 街 ', '看 有 ', '燃 柴 添 火 著 ', '藝 文 活 動 ', '臺 羅 字 ', '利 ', '金 煌 檨 ', '死 袂 見 笑 ', '嚇 止 ', '加 法 ', '馬 鈴 薯 片 ', '日 頭 曝 尻 川 矣 閣 毋 起 來 ', '英 語 課 本 ', '凍 水 機 ', '大 肚 胿 仔 ', '卑 南 ', '袂 當 干 焦 我 看 著 ', '牛 睏 山 ', '代 誌 好 勢 矣 未 ', '媠 氣 ', '景 文 街 ', '咒 天 咒 地 ', '今 年 是 一 个 好 年 冬 ', '事 假 ', '烏 山 頭 ', '三 講 四 毋 著 ', '語 素 文 字 ', '反 差 ', '大 日 本 帝 國 ', '迒 界 網 商 ', '嚨 喉 ', '褪 躘 ', '莫 假 ', '緊 去 做 代 誌 較 要 緊 ', '你 講 啥 ', '頸 椎 ', '違 扴 ', '身 體 健 康 ', '跤 手 十 足 ', '電 磁 學 ', '繫 年 ', '大 鑼 大 鼓 ', '伊 是 一 个 大 美 人 ', '解 溶 ', '磅 去 ', '你 陪 我 去 彼 爿 好 無 ', '網 路 用 甲 牢 咧 ', '酒 駛 ', '孤 單 一 身 ', '家 治 ', '死 忠 迷 眾 ', '浮 洲 仔 ', '孤 行 獨 市 ', '日 冕 ', '注 目 ', '擉 仔 ', '燒 狗 ', '名 喙 ', '創 辦 人 ', '水 燦 林 ', '飼 奶 動 物 ', '目 睭 擘 金 ', '抾 囡 仔 ', '我 予 滾 水 燙 一 下 膨 疱 足 疼 的 ', '象 桮 ', '有 的 人 ', '瓜 月 ', '三 字 經 ', '飯 焦 ', '天 車 ', '梅 毒 ', '目 仔 久 ', '耙 形 ', '便 物 ', '鹽 酸 草 ', '番 仔 番 薯 ', '每 一 擺 來 你 攏 煮 好 料 的 ', '來 喔 緊 來 看 喔 ', '番 界 ', '總 書 記 ', '破 柴 ', '列 位 ', '鳳 山 ', '劉 厝 ', '凱 旋 武 昌 ', '金 美 國 小 ', '竹 北 市 ', '橋 頭 區 ', '南 州 ', '和 美 鎮 ', '中 壢 ', '西 區 ', '國 泰 ', '水 長 流 ', '中 央 路 ', '南 福 德 ', '金 華 大 廈 ', '獅 甲 ', '士 林 區 ', '紙 票 變 厚 ', '中 山 橋 ', '彰 化 銀 行 ', '黃 國 昌 ', '翠 峰 橋 ', '花 仔 菜 ', '中 正 國 小 ', '按 呢 就 好 ', '老 去 矣 ', '車 埕 ', '南 陽 街 ', '匈 奴 ', '松 壽 路 口 ', '中 坑 ', '不 速 之 客 ', '瑞 源 ', '後 驛 ', '大 稻 埕 ', '鳥 仔 弓 ', '再 敗 ', '新 營 區 ', '正 義 郵 局 ', '寧 夏 路 ', '動 漫 ', '大 和 園 ', '仁 愛 鄉 ', '快 龍 ', '範 ', '西 面 ', '食 鹼 ', '吊 懸 低 ', '先 生 娘 ', '拍 鐵 寮 ', '露 營 區 ', '韻 尾 ', '外 雙 溪 ', '信 義 區 ', '恁 阿 母 敢 知 影 你 佇 遮 發 廢 文 ', '敢 按 呢 ', '厚 操 煩 ', '望 星 橋 ', '姊 夫 ', '十 二 甲 ', '文 昌 橋 ', '有 要 緊 無 ', '林 內 鄉 ', '紅 目 石 獅 ', '囡 仔 人 嘛 看 有 ', '聽 帶 ', '歕 鼓 吹 ', '歹 喙 斗 ', '查 某 營 ', '滷 菜 頭 ', '跳 過 ', '鴨 母 拖 秤 錘 ', '不 貪 不 取 ', '契 兄 ', '白 墨 粉 ', '天 安 門 廣 場 ', '投 文 ', '隱 名 化 ', '粗 花 ', '雞 籠 蜂 ', '暗 眠 摸 山 貓 ', '袂 曉 講 話 ', '搖 床 ', '順 序 ', '捎 攏 無 ', '企 業 家 ', '梯 度 ', '足 久 無 看 見 ', '起 雞 母 皮 ', '台 東 ', '不 見 天 ', '內 華 達 ', '釣 蝦 仔 場 ', '罟 寮 仔 ', '五 支 指 頭 仔 咬 起 來 逐 支 嘛 疼 ', '羊 毛 揻 ', '厝 尾 頂 ', '扞 事 人 ', '大 箍 ', '較 低 ', '客 服 ', '鴨 母 泅 ', '蚵 仔 菇 ', '海 口 ', '阿 里 不 達 ', '反 桌 ', '截 ', '屎 啦 ', '臭 屎 星 ', '摸 王 爺 尻 川 ', '切 腹 ', '雄 三 飛 彈 ', '一 切 攏 是 假 的 ', '小 山 崙 仔 ', '剪 仔 龜 ', '青 盲 仔 目 鏡 ', '親 愛 的 ', '在 地 大 漢 ', '欲 去 佗 位 ', '崁 頭 鴨 ', '冕 旒 ', '跩 著 ', '雞 仔 腸 鳥 仔 肚 ', '門 擋 仔 ', '菱 角 龍 ', '狗 頭 芙 蓉 ', '癱 瘓 ', '我 欲 去 便 所 ', '彼 當 時 ', '鼻 淚 管 ', '代 誌 毋 好 ', '浸 透 ', '瓦 斯 彈 ', '材 料 科 學 ', '甕 底 水 雞 ', '匯 錢 ', '睏 袂 去 ']
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
normalizer.json ADDED
@@ -0,0 +1,1742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accessorise": "accessorize",
3
+ "accessorised": "accessorized",
4
+ "accessorises": "accessorizes",
5
+ "accessorising": "accessorizing",
6
+ "acclimatisation": "acclimatization",
7
+ "acclimatise": "acclimatize",
8
+ "acclimatised": "acclimatized",
9
+ "acclimatises": "acclimatizes",
10
+ "acclimatising": "acclimatizing",
11
+ "accoutrements": "accouterments",
12
+ "aeon": "eon",
13
+ "aeons": "eons",
14
+ "aerogramme": "aerogram",
15
+ "aerogrammes": "aerograms",
16
+ "aeroplane": "airplane",
17
+ "aeroplanes": "airplanes",
18
+ "aesthete": "esthete",
19
+ "aesthetes": "esthetes",
20
+ "aesthetic": "esthetic",
21
+ "aesthetically": "esthetically",
22
+ "aesthetics": "esthetics",
23
+ "aetiology": "etiology",
24
+ "ageing": "aging",
25
+ "aggrandisement": "aggrandizement",
26
+ "agonise": "agonize",
27
+ "agonised": "agonized",
28
+ "agonises": "agonizes",
29
+ "agonising": "agonizing",
30
+ "agonisingly": "agonizingly",
31
+ "almanack": "almanac",
32
+ "almanacks": "almanacs",
33
+ "aluminium": "aluminum",
34
+ "amortisable": "amortizable",
35
+ "amortisation": "amortization",
36
+ "amortisations": "amortizations",
37
+ "amortise": "amortize",
38
+ "amortised": "amortized",
39
+ "amortises": "amortizes",
40
+ "amortising": "amortizing",
41
+ "amphitheatre": "amphitheater",
42
+ "amphitheatres": "amphitheaters",
43
+ "anaemia": "anemia",
44
+ "anaemic": "anemic",
45
+ "anaesthesia": "anesthesia",
46
+ "anaesthetic": "anesthetic",
47
+ "anaesthetics": "anesthetics",
48
+ "anaesthetise": "anesthetize",
49
+ "anaesthetised": "anesthetized",
50
+ "anaesthetises": "anesthetizes",
51
+ "anaesthetising": "anesthetizing",
52
+ "anaesthetist": "anesthetist",
53
+ "anaesthetists": "anesthetists",
54
+ "anaesthetize": "anesthetize",
55
+ "anaesthetized": "anesthetized",
56
+ "anaesthetizes": "anesthetizes",
57
+ "anaesthetizing": "anesthetizing",
58
+ "analogue": "analog",
59
+ "analogues": "analogs",
60
+ "analyse": "analyze",
61
+ "analysed": "analyzed",
62
+ "analyses": "analyzes",
63
+ "analysing": "analyzing",
64
+ "anglicise": "anglicize",
65
+ "anglicised": "anglicized",
66
+ "anglicises": "anglicizes",
67
+ "anglicising": "anglicizing",
68
+ "annualised": "annualized",
69
+ "antagonise": "antagonize",
70
+ "antagonised": "antagonized",
71
+ "antagonises": "antagonizes",
72
+ "antagonising": "antagonizing",
73
+ "apologise": "apologize",
74
+ "apologised": "apologized",
75
+ "apologises": "apologizes",
76
+ "apologising": "apologizing",
77
+ "appal": "appall",
78
+ "appals": "appalls",
79
+ "appetiser": "appetizer",
80
+ "appetisers": "appetizers",
81
+ "appetising": "appetizing",
82
+ "appetisingly": "appetizingly",
83
+ "arbour": "arbor",
84
+ "arbours": "arbors",
85
+ "archaeologically": "archeologically",
86
+ "archaeologist": "archeologist",
87
+ "archaeologists": "archeologists",
88
+ "archaeology": "archeology</span>",
89
+ "archeological": "archaeological",
90
+ "ardour": "ardor",
91
+ "armour": "armor",
92
+ "armoured": "armored",
93
+ "armourer": "armorer",
94
+ "armourers": "armorers",
95
+ "armouries": "armories",
96
+ "armoury": "armory",
97
+ "artefact": "artifact",
98
+ "artefacts": "artifacts",
99
+ "authorise": "authorize",
100
+ "authorised": "authorized",
101
+ "authorises": "authorizes",
102
+ "authorising": "authorizing",
103
+ "axe": "ax",
104
+ "backpedalled": "backpedaled",
105
+ "backpedalling": "backpedaling",
106
+ "bannister": "banister",
107
+ "bannisters": "banisters",
108
+ "baptise": "baptize",
109
+ "baptised": "baptized",
110
+ "baptises": "baptizes",
111
+ "baptising": "baptizing",
112
+ "bastardise": "bastardize",
113
+ "bastardised": "bastardized",
114
+ "bastardises": "bastardizes",
115
+ "bastardising": "bastardizing",
116
+ "battleax": "battleaxe",
117
+ "baulk": "balk",
118
+ "baulked": "balked",
119
+ "baulking": "balking",
120
+ "baulks": "balks",
121
+ "bedevilled": "bedeviled",
122
+ "bedevilling": "bedeviling",
123
+ "behaviour": "behavior",
124
+ "behavioural": "behavioral",
125
+ "behaviourism": "behaviorism",
126
+ "behaviourist": "behaviorist",
127
+ "behaviourists": "behaviorists",
128
+ "behaviours": "behaviors",
129
+ "behove": "behoove",
130
+ "behoved": "behooved",
131
+ "behoves": "behooves",
132
+ "bejewelled": "bejeweled",
133
+ "belabour": "belabor",
134
+ "belaboured": "belabored",
135
+ "belabouring": "belaboring",
136
+ "belabours": "belabors",
137
+ "bevelled": "beveled",
138
+ "bevvies": "bevies",
139
+ "bevvy": "bevy",
140
+ "biassed": "biased",
141
+ "biassing": "biasing",
142
+ "bingeing": "binging",
143
+ "bougainvillaea": "bougainvillea",
144
+ "bougainvillaeas": "bougainvilleas",
145
+ "bowdlerise": "bowdlerize",
146
+ "bowdlerised": "bowdlerized",
147
+ "bowdlerises": "bowdlerizes",
148
+ "bowdlerising": "bowdlerizing",
149
+ "breathalyse": "breathalyze",
150
+ "breathalysed": "breathalyzed",
151
+ "breathalyser": "breathalyzer",
152
+ "breathalysers": "breathalyzers",
153
+ "breathalyses": "breathalyzes",
154
+ "breathalysing": "breathalyzing",
155
+ "brutalise": "brutalize",
156
+ "brutalised": "brutalized",
157
+ "brutalises": "brutalizes",
158
+ "brutalising": "brutalizing",
159
+ "busses": "buses",
160
+ "bussing": "busing",
161
+ "caesarean": "cesarean",
162
+ "caesareans": "cesareans",
163
+ "calibre": "caliber",
164
+ "calibres": "calibers",
165
+ "calliper": "caliper",
166
+ "callipers": "calipers",
167
+ "callisthenics": "calisthenics",
168
+ "canalise": "canalize",
169
+ "canalised": "canalized",
170
+ "canalises": "canalizes",
171
+ "canalising": "canalizing",
172
+ "cancelation": "cancellation",
173
+ "cancelations": "cancellations",
174
+ "cancelled": "canceled",
175
+ "cancelling": "canceling",
176
+ "candour": "candor",
177
+ "cannibalise": "cannibalize",
178
+ "cannibalised": "cannibalized",
179
+ "cannibalises": "cannibalizes",
180
+ "cannibalising": "cannibalizing",
181
+ "canonise": "canonize",
182
+ "canonised": "canonized",
183
+ "canonises": "canonizes",
184
+ "canonising": "canonizing",
185
+ "capitalise": "capitalize",
186
+ "capitalised": "capitalized",
187
+ "capitalises": "capitalizes",
188
+ "capitalising": "capitalizing",
189
+ "caramelise": "caramelize",
190
+ "caramelised": "caramelized",
191
+ "caramelises": "caramelizes",
192
+ "caramelising": "caramelizing",
193
+ "carbonise": "carbonize",
194
+ "carbonised": "carbonized",
195
+ "carbonises": "carbonizes",
196
+ "carbonising": "carbonizing",
197
+ "carolled": "caroled",
198
+ "carolling": "caroling",
199
+ "catalogue": "catalog",
200
+ "catalogued": "cataloged",
201
+ "catalogues": "catalogs",
202
+ "cataloguing": "cataloging",
203
+ "catalyse": "catalyze",
204
+ "catalysed": "catalyzed",
205
+ "catalyses": "catalyzes",
206
+ "catalysing": "catalyzing",
207
+ "categorise": "categorize",
208
+ "categorised": "categorized",
209
+ "categorises": "categorizes",
210
+ "categorising": "categorizing",
211
+ "cauterise": "cauterize",
212
+ "cauterised": "cauterized",
213
+ "cauterises": "cauterizes",
214
+ "cauterising": "cauterizing",
215
+ "cavilled": "caviled",
216
+ "cavilling": "caviling",
217
+ "centigramme": "centigram",
218
+ "centigrammes": "centigrams",
219
+ "centilitre": "centiliter",
220
+ "centilitres": "centiliters",
221
+ "centimetre": "centimeter",
222
+ "centimetres": "centimeters",
223
+ "centralise": "centralize",
224
+ "centralised": "centralized",
225
+ "centralises": "centralizes",
226
+ "centralising": "centralizing",
227
+ "centre": "center",
228
+ "centred": "centered",
229
+ "centrefold": "centerfold",
230
+ "centrefolds": "centerfolds",
231
+ "centrepiece": "centerpiece",
232
+ "centrepieces": "centerpieces",
233
+ "centres": "centers",
234
+ "channelled": "channeled",
235
+ "channelling": "channeling",
236
+ "characterise": "characterize",
237
+ "characterised": "characterized",
238
+ "characterises": "characterizes",
239
+ "characterising": "characterizing",
240
+ "cheque": "check",
241
+ "chequebook": "checkbook",
242
+ "chequebooks": "checkbooks",
243
+ "chequered": "checkered",
244
+ "cheques": "checks",
245
+ "chilli": "chili",
246
+ "chimaera": "chimera",
247
+ "chimaeras": "chimeras",
248
+ "chiselled": "chiseled",
249
+ "chiselling": "chiseling",
250
+ "circularise": "circularize",
251
+ "circularised": "circularized",
252
+ "circularises": "circularizes",
253
+ "circularising": "circularizing",
254
+ "civilise": "civilize",
255
+ "civilised": "civilized",
256
+ "civilises": "civilizes",
257
+ "civilising": "civilizing",
258
+ "clamour": "clamor",
259
+ "clamoured": "clamored",
260
+ "clamouring": "clamoring",
261
+ "clamours": "clamors",
262
+ "clangour": "clangor",
263
+ "clarinettist": "clarinetist",
264
+ "clarinettists": "clarinetists",
265
+ "collectivise": "collectivize",
266
+ "collectivised": "collectivized",
267
+ "collectivises": "collectivizes",
268
+ "collectivising": "collectivizing",
269
+ "colonisation": "colonization",
270
+ "colonise": "colonize",
271
+ "colonised": "colonized",
272
+ "coloniser": "colonizer",
273
+ "colonisers": "colonizers",
274
+ "colonises": "colonizes",
275
+ "colonising": "colonizing",
276
+ "colour": "color",
277
+ "colourant": "colorant",
278
+ "colourants": "colorants",
279
+ "coloured": "colored",
280
+ "coloureds": "coloreds",
281
+ "colourful": "colorful",
282
+ "colourfully": "colorfully",
283
+ "colouring": "coloring",
284
+ "colourize": "colorize",
285
+ "colourized": "colorized",
286
+ "colourizes": "colorizes",
287
+ "colourizing": "colorizing",
288
+ "colourless": "colorless",
289
+ "colours": "colors",
290
+ "commercialise": "commercialize",
291
+ "commercialised": "commercialized",
292
+ "commercialises": "commercializes",
293
+ "commercialising": "commercializing",
294
+ "compartmentalise": "compartmentalize",
295
+ "compartmentalised": "compartmentalized",
296
+ "compartmentalises": "compartmentalizes",
297
+ "compartmentalising": "compartmentalizing",
298
+ "computerise": "computerize",
299
+ "computerised": "computerized",
300
+ "computerises": "computerizes",
301
+ "computerising": "computerizing",
302
+ "conceptualise": "conceptualize",
303
+ "conceptualised": "conceptualized",
304
+ "conceptualises": "conceptualizes",
305
+ "conceptualising": "conceptualizing",
306
+ "connexion": "connection",
307
+ "connexions": "connections",
308
+ "contextualise": "contextualize",
309
+ "contextualised": "contextualized",
310
+ "contextualises": "contextualizes",
311
+ "contextualising": "contextualizing",
312
+ "cosier": "cozier",
313
+ "cosies": "cozies",
314
+ "cosiest": "coziest",
315
+ "cosily": "cozily",
316
+ "cosiness": "coziness",
317
+ "cosy": "cozy",
318
+ "councillor": "councilor",
319
+ "councillors": "councilors",
320
+ "counselled": "counseled",
321
+ "counselling": "counseling",
322
+ "counsellor": "counselor",
323
+ "counsellors": "counselors",
324
+ "crenelated": "crenellated",
325
+ "criminalise": "criminalize",
326
+ "criminalised": "criminalized",
327
+ "criminalises": "criminalizes",
328
+ "criminalising": "criminalizing",
329
+ "criticise": "criticize",
330
+ "criticised": "criticized",
331
+ "criticises": "criticizes",
332
+ "criticising": "criticizing",
333
+ "crueller": "crueler",
334
+ "cruellest": "cruelest",
335
+ "crystallisation": "crystallization",
336
+ "crystallise": "crystallize",
337
+ "crystallised": "crystallized",
338
+ "crystallises": "crystallizes",
339
+ "crystallising": "crystallizing",
340
+ "cudgelled": "cudgeled",
341
+ "cudgelling": "cudgeling",
342
+ "customise": "customize",
343
+ "customised": "customized",
344
+ "customises": "customizes",
345
+ "customising": "customizing",
346
+ "cypher": "cipher",
347
+ "cyphers": "ciphers",
348
+ "decentralisation": "decentralization",
349
+ "decentralise": "decentralize",
350
+ "decentralised": "decentralized",
351
+ "decentralises": "decentralizes",
352
+ "decentralising": "decentralizing",
353
+ "decriminalisation": "decriminalization",
354
+ "decriminalise": "decriminalize",
355
+ "decriminalised": "decriminalized",
356
+ "decriminalises": "decriminalizes",
357
+ "decriminalising": "decriminalizing",
358
+ "defence": "defense",
359
+ "defenceless": "defenseless",
360
+ "defences": "defenses",
361
+ "dehumanisation": "dehumanization",
362
+ "dehumanise": "dehumanize",
363
+ "dehumanised": "dehumanized",
364
+ "dehumanises": "dehumanizes",
365
+ "dehumanising": "dehumanizing",
366
+ "demeanour": "demeanor",
367
+ "demilitarisation": "demilitarization",
368
+ "demilitarise": "demilitarize",
369
+ "demilitarised": "demilitarized",
370
+ "demilitarises": "demilitarizes",
371
+ "demilitarising": "demilitarizing",
372
+ "demobilisation": "demobilization",
373
+ "demobilise": "demobilize",
374
+ "demobilised": "demobilized",
375
+ "demobilises": "demobilizes",
376
+ "demobilising": "demobilizing",
377
+ "democratisation": "democratization",
378
+ "democratise": "democratize",
379
+ "democratised": "democratized",
380
+ "democratises": "democratizes",
381
+ "democratising": "democratizing",
382
+ "demonise": "demonize",
383
+ "demonised": "demonized",
384
+ "demonises": "demonizes",
385
+ "demonising": "demonizing",
386
+ "demoralisation": "demoralization",
387
+ "demoralise": "demoralize",
388
+ "demoralised": "demoralized",
389
+ "demoralises": "demoralizes",
390
+ "demoralising": "demoralizing",
391
+ "denationalisation": "denationalization",
392
+ "denationalise": "denationalize",
393
+ "denationalised": "denationalized",
394
+ "denationalises": "denationalizes",
395
+ "denationalising": "denationalizing",
396
+ "deodorise": "deodorize",
397
+ "deodorised": "deodorized",
398
+ "deodorises": "deodorizes",
399
+ "deodorising": "deodorizing",
400
+ "depersonalise": "depersonalize",
401
+ "depersonalised": "depersonalized",
402
+ "depersonalises": "depersonalizes",
403
+ "depersonalising": "depersonalizing",
404
+ "deputise": "deputize",
405
+ "deputised": "deputized",
406
+ "deputises": "deputizes",
407
+ "deputising": "deputizing",
408
+ "desensitisation": "desensitization",
409
+ "desensitise": "desensitize",
410
+ "desensitised": "desensitized",
411
+ "desensitises": "desensitizes",
412
+ "desensitising": "desensitizing",
413
+ "destabilisation": "destabilization",
414
+ "destabilise": "destabilize",
415
+ "destabilised": "destabilized",
416
+ "destabilises": "destabilizes",
417
+ "destabilising": "destabilizing",
418
+ "dialled": "dialed",
419
+ "dialling": "dialing",
420
+ "dialogue": "dialog",
421
+ "dialogues": "dialogs",
422
+ "diarrhoea": "diarrhea",
423
+ "digitise": "digitize",
424
+ "digitised": "digitized",
425
+ "digitises": "digitizes",
426
+ "digitising": "digitizing",
427
+ "disc": "disk",
428
+ "discolour": "discolor",
429
+ "discoloured": "discolored",
430
+ "discolouring": "discoloring",
431
+ "discolours": "discolors",
432
+ "discs": "disks",
433
+ "disembowelled": "disemboweled",
434
+ "disembowelling": "disemboweling",
435
+ "disfavour": "disfavor",
436
+ "dishevelled": "disheveled",
437
+ "dishonour": "dishonor",
438
+ "dishonourable": "dishonorable",
439
+ "dishonourably": "dishonorably",
440
+ "dishonoured": "dishonored",
441
+ "dishonouring": "dishonoring",
442
+ "dishonours": "dishonors",
443
+ "disorganisation": "disorganization",
444
+ "disorganised": "disorganized",
445
+ "distil": "distill",
446
+ "distils": "distills",
447
+ "dramatisation": "dramatization",
448
+ "dramatisations": "dramatizations",
449
+ "dramatise": "dramatize",
450
+ "dramatised": "dramatized",
451
+ "dramatises": "dramatizes",
452
+ "dramatising": "dramatizing",
453
+ "draught": "draft",
454
+ "draughtboard": "draftboard",
455
+ "draughtboards": "draftboards",
456
+ "draughtier": "draftier",
457
+ "draughtiest": "draftiest",
458
+ "draughts": "drafts",
459
+ "draughtsman": "draftsman",
460
+ "draughtsmanship": "draftsmanship",
461
+ "draughtsmen": "draftsmen",
462
+ "draughtswoman": "draftswoman",
463
+ "draughtswomen": "draftswomen",
464
+ "draughty": "drafty",
465
+ "drivelled": "driveled",
466
+ "drivelling": "driveling",
467
+ "duelled": "dueled",
468
+ "duelling": "dueling",
469
+ "economise": "economize",
470
+ "economised": "economized",
471
+ "economises": "economizes",
472
+ "economising": "economizing",
473
+ "editorialise": "editorialize",
474
+ "editorialised": "editorialized",
475
+ "editorialises": "editorializes",
476
+ "editorialising": "editorializing",
477
+ "edoema": "edema",
478
+ "empathise": "empathize",
479
+ "empathised": "empathized",
480
+ "empathises": "empathizes",
481
+ "empathising": "empathizing",
482
+ "emphasise": "emphasize",
483
+ "emphasised": "emphasized",
484
+ "emphasises": "emphasizes",
485
+ "emphasising": "emphasizing",
486
+ "enamelled": "enameled",
487
+ "enamelling": "enameling",
488
+ "enamoured": "enamored",
489
+ "encyclopaedia": "encyclopedia",
490
+ "encyclopaedias": "encyclopedias",
491
+ "encyclopaedic": "encyclopedic",
492
+ "endeavour": "endeavor",
493
+ "endeavoured": "endeavored",
494
+ "endeavouring": "endeavoring",
495
+ "endeavours": "endeavors",
496
+ "energise": "energize",
497
+ "energised": "energized",
498
+ "energises": "energizes",
499
+ "energising": "energizing",
500
+ "enrol": "enroll",
501
+ "enrols": "enrolls",
502
+ "enthral": "enthrall",
503
+ "enthrals": "enthralls",
504
+ "epaulette": "epaulet",
505
+ "epaulettes": "epaulets",
506
+ "epicentre": "epicenter",
507
+ "epicentres": "epicenters",
508
+ "epilogue": "epilog",
509
+ "epilogues": "epilogs",
510
+ "epitomise": "epitomize",
511
+ "epitomised": "epitomized",
512
+ "epitomises": "epitomizes",
513
+ "epitomising": "epitomizing",
514
+ "equalisation": "equalization",
515
+ "equalise": "equalize",
516
+ "equalised": "equalized",
517
+ "equaliser": "equalizer",
518
+ "equalisers": "equalizers",
519
+ "equalises": "equalizes",
520
+ "equalising": "equalizing",
521
+ "eulogise": "eulogize",
522
+ "eulogised": "eulogized",
523
+ "eulogises": "eulogizes",
524
+ "eulogising": "eulogizing",
525
+ "evangelise": "evangelize",
526
+ "evangelised": "evangelized",
527
+ "evangelises": "evangelizes",
528
+ "evangelising": "evangelizing",
529
+ "exorcise": "exorcize",
530
+ "exorcised": "exorcized",
531
+ "exorcises": "exorcizes",
532
+ "exorcising": "exorcizing",
533
+ "extemporisation": "extemporization",
534
+ "extemporise": "extemporize",
535
+ "extemporised": "extemporized",
536
+ "extemporises": "extemporizes",
537
+ "extemporising": "extemporizing",
538
+ "externalisation": "externalization",
539
+ "externalisations": "externalizations",
540
+ "externalise": "externalize",
541
+ "externalised": "externalized",
542
+ "externalises": "externalizes",
543
+ "externalising": "externalizing",
544
+ "factorise": "factorize",
545
+ "factorised": "factorized",
546
+ "factorises": "factorizes",
547
+ "factorising": "factorizing",
548
+ "faecal": "fecal",
549
+ "faeces": "feces",
550
+ "familiarisation": "familiarization",
551
+ "familiarise": "familiarize",
552
+ "familiarised": "familiarized",
553
+ "familiarises": "familiarizes",
554
+ "familiarising": "familiarizing",
555
+ "fantasise": "fantasize",
556
+ "fantasised": "fantasized",
557
+ "fantasises": "fantasizes",
558
+ "fantasising": "fantasizing",
559
+ "favour": "favor",
560
+ "favourable": "favorable",
561
+ "favourably": "favorably",
562
+ "favoured": "favored",
563
+ "favouring": "favoring",
564
+ "favourite": "favorite",
565
+ "favourites": "favorites",
566
+ "favouritism": "favoritism",
567
+ "favours": "favors",
568
+ "feminise": "feminize",
569
+ "feminised": "feminized",
570
+ "feminises": "feminizes",
571
+ "feminising": "feminizing",
572
+ "fertilisation": "fertilization",
573
+ "fertilise": "fertilize",
574
+ "fertilised": "fertilized",
575
+ "fertiliser": "fertilizer",
576
+ "fertilisers": "fertilizers",
577
+ "fertilises": "fertilizes",
578
+ "fertilising": "fertilizing",
579
+ "fervour": "fervor",
580
+ "fibre": "fiber",
581
+ "fibreglass": "fiberglass",
582
+ "fibres": "fibers",
583
+ "fictionalisation": "fictionalization",
584
+ "fictionalisations": "fictionalizations",
585
+ "fictionalise": "fictionalize",
586
+ "fictionalised": "fictionalized",
587
+ "fictionalises": "fictionalizes",
588
+ "fictionalising": "fictionalizing",
589
+ "fillet": "filet",
590
+ "filleted": "fileted",
591
+ "filleting": "fileting",
592
+ "fillets": "filets",
593
+ "finalisation": "finalization",
594
+ "finalise": "finalize",
595
+ "finalised": "finalized",
596
+ "finalises": "finalizes",
597
+ "finalising": "finalizing",
598
+ "flautist": "flutist",
599
+ "flautists": "flutists",
600
+ "flavour": "flavor",
601
+ "flavoured": "flavored",
602
+ "flavouring": "flavoring",
603
+ "flavourings": "flavorings",
604
+ "flavourless": "flavorless",
605
+ "flavours": "flavors",
606
+ "flavoursome": "flavorsome",
607
+ "flyer / flier": "flier / flyer",
608
+ "foetal": "fetal",
609
+ "foetid": "fetid",
610
+ "foetus": "fetus",
611
+ "foetuses": "fetuses",
612
+ "formalisation": "formalization",
613
+ "formalise": "formalize",
614
+ "formalised": "formalized",
615
+ "formalises": "formalizes",
616
+ "formalising": "formalizing",
617
+ "fossilisation": "fossilization",
618
+ "fossilise": "fossilize",
619
+ "fossilised": "fossilized",
620
+ "fossilises": "fossilizes",
621
+ "fossilising": "fossilizing",
622
+ "fraternisation": "fraternization",
623
+ "fraternise": "fraternize",
624
+ "fraternised": "fraternized",
625
+ "fraternises": "fraternizes",
626
+ "fraternising": "fraternizing",
627
+ "fulfil": "fulfill",
628
+ "fulfilment": "fulfillment",
629
+ "fulfils": "fulfills",
630
+ "funnelled": "funneled",
631
+ "funnelling": "funneling",
632
+ "gage": "gauge",
633
+ "gaged": "gauged",
634
+ "gages": "gauges",
635
+ "gaging": "gauging",
636
+ "galvanise": "galvanize",
637
+ "galvanised": "galvanized",
638
+ "galvanises": "galvanizes",
639
+ "galvanising": "galvanizing",
640
+ "gambolled": "gamboled",
641
+ "gambolling": "gamboling",
642
+ "gaol": "jail",
643
+ "gaolbird": "jailbird",
644
+ "gaolbirds": "jailbirds",
645
+ "gaolbreak": "jailbreak",
646
+ "gaolbreaks": "jailbreaks",
647
+ "gaoled": "jailed",
648
+ "gaoler": "jailer",
649
+ "gaolers": "jailers",
650
+ "gaoling": "jailing",
651
+ "gaols": "jails",
652
+ "gasses": "gases",
653
+ "generalisation": "generalization",
654
+ "generalisations": "generalizations",
655
+ "generalise": "generalize",
656
+ "generalised": "generalized",
657
+ "generalises": "generalizes",
658
+ "generalising": "generalizing",
659
+ "ghettoise": "ghettoize",
660
+ "ghettoised": "ghettoized",
661
+ "ghettoises": "ghettoizes",
662
+ "ghettoising": "ghettoizing",
663
+ "gipsies": "gypsies",
664
+ "glamor": "glamour",
665
+ "glamorise": "glamorize",
666
+ "glamorised": "glamorized",
667
+ "glamorises": "glamorizes",
668
+ "glamorising": "glamorizing",
669
+ "globalisation": "globalization",
670
+ "globalise": "globalize",
671
+ "globalised": "globalized",
672
+ "globalises": "globalizes",
673
+ "globalising": "globalizing",
674
+ "glueing": "gluing",
675
+ "goitre": "goiter",
676
+ "goitres": "goiters",
677
+ "gonorrhoea": "gonorrhea",
678
+ "gramme": "gram",
679
+ "grammes": "grams",
680
+ "gravelled": "graveled",
681
+ "grey": "gray",
682
+ "greyed": "grayed",
683
+ "greying": "graying",
684
+ "greyish": "grayish",
685
+ "greyness": "grayness",
686
+ "greys": "grays",
687
+ "grovelled": "groveled",
688
+ "grovelling": "groveling",
689
+ "groyne": "groin",
690
+ "groynes": "groins",
691
+ "gruelling": "grueling",
692
+ "gruellingly": "gruelingly",
693
+ "gryphon": "griffin",
694
+ "gryphons": "griffins",
695
+ "gynaecological": "gynecological",
696
+ "gynaecologist": "gynecologist",
697
+ "gynaecologists": "gynecologists",
698
+ "gynaecology": "gynecology",
699
+ "haematological": "hematological",
700
+ "haematologist": "hematologist",
701
+ "haematologists": "hematologists",
702
+ "haematology": "hematology",
703
+ "haemoglobin": "hemoglobin",
704
+ "haemophilia": "hemophilia",
705
+ "haemophiliac": "hemophiliac",
706
+ "haemophiliacs": "hemophiliacs",
707
+ "haemorrhage": "hemorrhage",
708
+ "haemorrhaged": "hemorrhaged",
709
+ "haemorrhages": "hemorrhages",
710
+ "haemorrhaging": "hemorrhaging",
711
+ "haemorrhoids": "hemorrhoids",
712
+ "harbour": "harbor",
713
+ "harboured": "harbored",
714
+ "harbouring": "harboring",
715
+ "harbours": "harbors",
716
+ "harmonisation": "harmonization",
717
+ "harmonise": "harmonize",
718
+ "harmonised": "harmonized",
719
+ "harmonises": "harmonizes",
720
+ "harmonising": "harmonizing",
721
+ "homoeopath": "homeopath",
722
+ "homoeopathic": "homeopathic",
723
+ "homoeopaths": "homeopaths",
724
+ "homoeopathy": "homeopathy",
725
+ "homogenise": "homogenize",
726
+ "homogenised": "homogenized",
727
+ "homogenises": "homogenizes",
728
+ "homogenising": "homogenizing",
729
+ "honour": "honor",
730
+ "honourable": "honorable",
731
+ "honourably": "honorably",
732
+ "honoured": "honored",
733
+ "honouring": "honoring",
734
+ "honours": "honors",
735
+ "hospitalisation": "hospitalization",
736
+ "hospitalise": "hospitalize",
737
+ "hospitalised": "hospitalized",
738
+ "hospitalises": "hospitalizes",
739
+ "hospitalising": "hospitalizing",
740
+ "humanise": "humanize",
741
+ "humanised": "humanized",
742
+ "humanises": "humanizes",
743
+ "humanising": "humanizing",
744
+ "humour": "humor",
745
+ "humoured": "humored",
746
+ "humouring": "humoring",
747
+ "humourless": "humorless",
748
+ "humours": "humors",
749
+ "hybridise": "hybridize",
750
+ "hybridised": "hybridized",
751
+ "hybridises": "hybridizes",
752
+ "hybridising": "hybridizing",
753
+ "hypnotise": "hypnotize",
754
+ "hypnotised": "hypnotized",
755
+ "hypnotises": "hypnotizes",
756
+ "hypnotising": "hypnotizing",
757
+ "hypothesise": "hypothesize",
758
+ "hypothesised": "hypothesized",
759
+ "hypothesises": "hypothesizes",
760
+ "hypothesising": "hypothesizing",
761
+ "idealisation": "idealization",
762
+ "idealise": "idealize",
763
+ "idealised": "idealized",
764
+ "idealises": "idealizes",
765
+ "idealising": "idealizing",
766
+ "idolise": "idolize",
767
+ "idolised": "idolized",
768
+ "idolises": "idolizes",
769
+ "idolising": "idolizing",
770
+ "immobilisation": "immobilization",
771
+ "immobilise": "immobilize",
772
+ "immobilised": "immobilized",
773
+ "immobiliser": "immobilizer",
774
+ "immobilisers": "immobilizers",
775
+ "immobilises": "immobilizes",
776
+ "immobilising": "immobilizing",
777
+ "immortalise": "immortalize",
778
+ "immortalised": "immortalized",
779
+ "immortalises": "immortalizes",
780
+ "immortalising": "immortalizing",
781
+ "immunisation": "immunization",
782
+ "immunise": "immunize",
783
+ "immunised": "immunized",
784
+ "immunises": "immunizes",
785
+ "immunising": "immunizing",
786
+ "impanelled": "impaneled",
787
+ "impanelling": "impaneling",
788
+ "imperilled": "imperiled",
789
+ "imperilling": "imperiling",
790
+ "individualise": "individualize",
791
+ "individualised": "individualized",
792
+ "individualises": "individualizes",
793
+ "individualising": "individualizing",
794
+ "industrialise": "industrialize",
795
+ "industrialised": "industrialized",
796
+ "industrialises": "industrializes",
797
+ "industrialising": "industrializing",
798
+ "inflexion": "inflection",
799
+ "inflexions": "inflections",
800
+ "initialise": "initialize",
801
+ "initialised": "initialized",
802
+ "initialises": "initializes",
803
+ "initialising": "initializing",
804
+ "initialled": "initialed",
805
+ "initialling": "initialing",
806
+ "instal": "install",
807
+ "instalment": "installment",
808
+ "instalments": "installments",
809
+ "instals": "installs",
810
+ "instil": "instill",
811
+ "instils": "instills",
812
+ "institutionalisation": "institutionalization",
813
+ "institutionalise": "institutionalize",
814
+ "institutionalised": "institutionalized",
815
+ "institutionalises": "institutionalizes",
816
+ "institutionalising": "institutionalizing",
817
+ "intellectualise": "intellectualize",
818
+ "intellectualised": "intellectualized",
819
+ "intellectualises": "intellectualizes",
820
+ "intellectualising": "intellectualizing",
821
+ "internalisation": "internalization",
822
+ "internalise": "internalize",
823
+ "internalised": "internalized",
824
+ "internalises": "internalizes",
825
+ "internalising": "internalizing",
826
+ "internationalisation": "internationalization",
827
+ "internationalise": "internationalize",
828
+ "internationalised": "internationalized",
829
+ "internationalises": "internationalizes",
830
+ "internationalising": "internationalizing",
831
+ "ionisation": "ionization",
832
+ "ionise": "ionize",
833
+ "ionised": "ionized",
834
+ "ioniser": "ionizer",
835
+ "ionisers": "ionizers",
836
+ "ionises": "ionizes",
837
+ "ionising": "ionizing",
838
+ "italicise": "italicize",
839
+ "italicised": "italicized",
840
+ "italicises": "italicizes",
841
+ "italicising": "italicizing",
842
+ "itemise": "itemize",
843
+ "itemised": "itemized",
844
+ "itemises": "itemizes",
845
+ "itemising": "itemizing",
846
+ "jeopardise": "jeopardize",
847
+ "jeopardised": "jeopardized",
848
+ "jeopardises": "jeopardizes",
849
+ "jeopardising": "jeopardizing",
850
+ "jewelled": "jeweled",
851
+ "jeweller": "jeweler",
852
+ "jewellers": "jewelers",
853
+ "jewellery": "jewelry",
854
+ "judgement": "judgment",
855
+ "kilogramme": "kilogram",
856
+ "kilogrammes": "kilograms",
857
+ "kilometre": "kilometer",
858
+ "kilometres": "kilometers",
859
+ "labelled": "labeled",
860
+ "labelling": "labeling",
861
+ "labour": "labor",
862
+ "laboured": "labored",
863
+ "labourer": "laborer",
864
+ "labourers": "laborers",
865
+ "labouring": "laboring",
866
+ "labours": "labors",
867
+ "lacklustre": "lackluster",
868
+ "legalisation": "legalization",
869
+ "legalise": "legalize",
870
+ "legalised": "legalized",
871
+ "legalises": "legalizes",
872
+ "legalising": "legalizing",
873
+ "legitimise": "legitimize",
874
+ "legitimised": "legitimized",
875
+ "legitimises": "legitimizes",
876
+ "legitimising": "legitimizing",
877
+ "leukaemia": "leukemia",
878
+ "levelled": "leveled",
879
+ "leveller": "leveler",
880
+ "levellers": "levelers",
881
+ "levelling": "leveling",
882
+ "libelled": "libeled",
883
+ "libelling": "libeling",
884
+ "libellous": "libelous",
885
+ "liberalisation": "liberalization",
886
+ "liberalise": "liberalize",
887
+ "liberalised": "liberalized",
888
+ "liberalises": "liberalizes",
889
+ "liberalising": "liberalizing",
890
+ "licence": "license",
891
+ "licenced": "licensed",
892
+ "licences": "licenses",
893
+ "licencing": "licensing",
894
+ "likeable": "likable",
895
+ "lionisation": "lionization",
896
+ "lionise": "lionize",
897
+ "lionised": "lionized",
898
+ "lionises": "lionizes",
899
+ "lionising": "lionizing",
900
+ "liquidise": "liquidize",
901
+ "liquidised": "liquidized",
902
+ "liquidiser": "liquidizer",
903
+ "liquidisers": "liquidizers",
904
+ "liquidises": "liquidizes",
905
+ "liquidising": "liquidizing",
906
+ "litre": "liter",
907
+ "litres": "liters",
908
+ "localise": "localize",
909
+ "localised": "localized",
910
+ "localises": "localizes",
911
+ "localising": "localizing",
912
+ "louvre": "louver",
913
+ "louvred": "louvered",
914
+ "louvres": "louvers",
915
+ "lustre": "luster",
916
+ "magnetise": "magnetize",
917
+ "magnetised": "magnetized",
918
+ "magnetises": "magnetizes",
919
+ "magnetising": "magnetizing",
920
+ "manoeuvrability": "maneuverability",
921
+ "manoeuvrable": "maneuverable",
922
+ "manoeuvre": "maneuver",
923
+ "manoeuvred": "maneuvered",
924
+ "manoeuvres": "maneuvers",
925
+ "manoeuvring": "maneuvering",
926
+ "manoeuvrings": "maneuverings",
927
+ "marginalisation": "marginalization",
928
+ "marginalise": "marginalize",
929
+ "marginalised": "marginalized",
930
+ "marginalises": "marginalizes",
931
+ "marginalising": "marginalizing",
932
+ "marshalled": "marshaled",
933
+ "marshalling": "marshaling",
934
+ "marvelled": "marveled",
935
+ "marvelling": "marveling",
936
+ "marvellous": "marvelous",
937
+ "marvellously": "marvelously",
938
+ "materialisation": "materialization",
939
+ "materialise": "materialize",
940
+ "materialised": "materialized",
941
+ "materialises": "materializes",
942
+ "materialising": "materializing",
943
+ "maximisation": "maximization",
944
+ "maximise": "maximize",
945
+ "maximised": "maximized",
946
+ "maximises": "maximizes",
947
+ "maximising": "maximizing",
948
+ "meagre": "meager",
949
+ "mechanisation": "mechanization",
950
+ "mechanise": "mechanize",
951
+ "mechanised": "mechanized",
952
+ "mechanises": "mechanizes",
953
+ "mechanising": "mechanizing",
954
+ "mediaeval": "medieval",
955
+ "memorialise": "memorialize",
956
+ "memorialised": "memorialized",
957
+ "memorialises": "memorializes",
958
+ "memorialising": "memorializing",
959
+ "memorise": "memorize",
960
+ "memorised": "memorized",
961
+ "memorises": "memorizes",
962
+ "memorising": "memorizing",
963
+ "mesmerise": "mesmerize",
964
+ "mesmerised": "mesmerized",
965
+ "mesmerises": "mesmerizes",
966
+ "mesmerising": "mesmerizing",
967
+ "metabolise": "metabolize",
968
+ "metabolised": "metabolized",
969
+ "metabolises": "metabolizes",
970
+ "metabolising": "metabolizing",
971
+ "metre": "meter",
972
+ "metres": "meters",
973
+ "mhm": "hmm",
974
+ "micrometre": "micrometer",
975
+ "micrometres": "micrometers",
976
+ "militarise": "militarize",
977
+ "militarised": "militarized",
978
+ "militarises": "militarizes",
979
+ "militarising": "militarizing",
980
+ "milligramme": "milligram",
981
+ "milligrammes": "milligrams",
982
+ "millilitre": "milliliter",
983
+ "millilitres": "milliliters",
984
+ "millimetre": "millimeter",
985
+ "millimetres": "millimeters",
986
+ "miniaturisation": "miniaturization",
987
+ "miniaturise": "miniaturize",
988
+ "miniaturised": "miniaturized",
989
+ "miniaturises": "miniaturizes",
990
+ "miniaturising": "miniaturizing",
991
+ "minibusses": "minibuses",
992
+ "minimise": "minimize",
993
+ "minimised": "minimized",
994
+ "minimises": "minimizes",
995
+ "minimising": "minimizing",
996
+ "misbehaviour": "misbehavior",
997
+ "misdemeanour": "misdemeanor",
998
+ "misdemeanours": "misdemeanors",
999
+ "misspelt": "misspelled",
1000
+ "mitre": "miter",
1001
+ "mitres": "miters",
1002
+ "mm": "hmm",
1003
+ "mmm": "hmm",
1004
+ "mobilisation": "mobilization",
1005
+ "mobilise": "mobilize",
1006
+ "mobilised": "mobilized",
1007
+ "mobilises": "mobilizes",
1008
+ "mobilising": "mobilizing",
1009
+ "modelled": "modeled",
1010
+ "modeller": "modeler",
1011
+ "modellers": "modelers",
1012
+ "modelling": "modeling",
1013
+ "modernise": "modernize",
1014
+ "modernised": "modernized",
1015
+ "modernises": "modernizes",
1016
+ "modernising": "modernizing",
1017
+ "moisturise": "moisturize",
1018
+ "moisturised": "moisturized",
1019
+ "moisturiser": "moisturizer",
1020
+ "moisturisers": "moisturizers",
1021
+ "moisturises": "moisturizes",
1022
+ "moisturising": "moisturizing",
1023
+ "monologue": "monolog",
1024
+ "monologues": "monologs",
1025
+ "monopolisation": "monopolization",
1026
+ "monopolise": "monopolize",
1027
+ "monopolised": "monopolized",
1028
+ "monopolises": "monopolizes",
1029
+ "monopolising": "monopolizing",
1030
+ "moralise": "moralize",
1031
+ "moralised": "moralized",
1032
+ "moralises": "moralizes",
1033
+ "moralising": "moralizing",
1034
+ "motorised": "motorized",
1035
+ "mould": "mold",
1036
+ "moulded": "molded",
1037
+ "moulder": "molder",
1038
+ "mouldered": "moldered",
1039
+ "mouldering": "moldering",
1040
+ "moulders": "molders",
1041
+ "mouldier": "moldier",
1042
+ "mouldiest": "moldiest",
1043
+ "moulding": "molding",
1044
+ "mouldings": "moldings",
1045
+ "moulds": "molds",
1046
+ "mouldy": "moldy",
1047
+ "moult": "molt",
1048
+ "moulted": "molted",
1049
+ "moulting": "molting",
1050
+ "moults": "molts",
1051
+ "moustache": "mustache",
1052
+ "moustached": "mustached",
1053
+ "moustaches": "mustaches",
1054
+ "moustachioed": "mustachioed",
1055
+ "multicoloured": "multicolored",
1056
+ "nationalisation": "nationalization",
1057
+ "nationalisations": "nationalizations",
1058
+ "nationalise": "nationalize",
1059
+ "nationalised": "nationalized",
1060
+ "nationalises": "nationalizes",
1061
+ "nationalising": "nationalizing",
1062
+ "naturalisation": "naturalization",
1063
+ "naturalise": "naturalize",
1064
+ "naturalised": "naturalized",
1065
+ "naturalises": "naturalizes",
1066
+ "naturalising": "naturalizing",
1067
+ "neighbour": "neighbor",
1068
+ "neighbourhood": "neighborhood",
1069
+ "neighbourhoods": "neighborhoods",
1070
+ "neighbouring": "neighboring",
1071
+ "neighbourliness": "neighborliness",
1072
+ "neighbourly": "neighborly",
1073
+ "neighbours": "neighbors",
1074
+ "neutralisation": "neutralization",
1075
+ "neutralise": "neutralize",
1076
+ "neutralised": "neutralized",
1077
+ "neutralises": "neutralizes",
1078
+ "neutralising": "neutralizing",
1079
+ "normalisation": "normalization",
1080
+ "normalise": "normalize",
1081
+ "normalised": "normalized",
1082
+ "normalises": "normalizes",
1083
+ "normalising": "normalizing",
1084
+ "odour": "odor",
1085
+ "odourless": "odorless",
1086
+ "odours": "odors",
1087
+ "oesophagus": "esophagus",
1088
+ "oesophaguses": "esophaguses",
1089
+ "oestrogen": "estrogen",
1090
+ "offence": "offense",
1091
+ "offences": "offenses",
1092
+ "omelette": "omelet",
1093
+ "omelettes": "omelets",
1094
+ "optimise": "optimize",
1095
+ "optimised": "optimized",
1096
+ "optimises": "optimizes",
1097
+ "optimising": "optimizing",
1098
+ "organisation": "organization",
1099
+ "organisational": "organizational",
1100
+ "organisations": "organizations",
1101
+ "organise": "organize",
1102
+ "organised": "organized",
1103
+ "organiser": "organizer",
1104
+ "organisers": "organizers",
1105
+ "organises": "organizes",
1106
+ "organising": "organizing",
1107
+ "orthopaedic": "orthopedic",
1108
+ "orthopaedics": "orthopedics",
1109
+ "ostracise": "ostracize",
1110
+ "ostracised": "ostracized",
1111
+ "ostracises": "ostracizes",
1112
+ "ostracising": "ostracizing",
1113
+ "outmanoeuvre": "outmaneuver",
1114
+ "outmanoeuvred": "outmaneuvered",
1115
+ "outmanoeuvres": "outmaneuvers",
1116
+ "outmanoeuvring": "outmaneuvering",
1117
+ "overemphasise": "overemphasize",
1118
+ "overemphasised": "overemphasized",
1119
+ "overemphasises": "overemphasizes",
1120
+ "overemphasising": "overemphasizing",
1121
+ "oxidisation": "oxidization",
1122
+ "oxidise": "oxidize",
1123
+ "oxidised": "oxidized",
1124
+ "oxidises": "oxidizes",
1125
+ "oxidising": "oxidizing",
1126
+ "paederast": "pederast",
1127
+ "paederasts": "pederasts",
1128
+ "paediatric": "pediatric",
1129
+ "paediatrician": "pediatrician",
1130
+ "paediatricians": "pediatricians",
1131
+ "paediatrics": "pediatrics",
1132
+ "paedophile": "pedophile",
1133
+ "paedophiles": "pedophiles",
1134
+ "paedophilia": "pedophilia",
1135
+ "palaeolithic": "paleolithic",
1136
+ "palaeontologist": "paleontologist",
1137
+ "palaeontologists": "paleontologists",
1138
+ "palaeontology": "paleontology",
1139
+ "panelled": "paneled",
1140
+ "panelling": "paneling",
1141
+ "panellist": "panelist",
1142
+ "panellists": "panelists",
1143
+ "paralyse": "paralyze",
1144
+ "paralysed": "paralyzed",
1145
+ "paralyses": "paralyzes",
1146
+ "paralysing": "paralyzing",
1147
+ "parcelled": "parceled",
1148
+ "parcelling": "parceling",
1149
+ "parlour": "parlor",
1150
+ "parlours": "parlors",
1151
+ "particularise": "particularize",
1152
+ "particularised": "particularized",
1153
+ "particularises": "particularizes",
1154
+ "particularising": "particularizing",
1155
+ "passivisation": "passivization",
1156
+ "passivise": "passivize",
1157
+ "passivised": "passivized",
1158
+ "passivises": "passivizes",
1159
+ "passivising": "passivizing",
1160
+ "pasteurisation": "pasteurization",
1161
+ "pasteurise": "pasteurize",
1162
+ "pasteurised": "pasteurized",
1163
+ "pasteurises": "pasteurizes",
1164
+ "pasteurising": "pasteurizing",
1165
+ "patronise": "patronize",
1166
+ "patronised": "patronized",
1167
+ "patronises": "patronizes",
1168
+ "patronising": "patronizing",
1169
+ "patronisingly": "patronizingly",
1170
+ "pedalled": "pedaled",
1171
+ "pedalling": "pedaling",
1172
+ "pedestrianisation": "pedestrianization",
1173
+ "pedestrianise": "pedestrianize",
1174
+ "pedestrianised": "pedestrianized",
1175
+ "pedestrianises": "pedestrianizes",
1176
+ "pedestrianising": "pedestrianizing",
1177
+ "penalise": "penalize",
1178
+ "penalised": "penalized",
1179
+ "penalises": "penalizes",
1180
+ "penalising": "penalizing",
1181
+ "pencilled": "penciled",
1182
+ "pencilling": "penciling",
1183
+ "personalise": "personalize",
1184
+ "personalised": "personalized",
1185
+ "personalises": "personalizes",
1186
+ "personalising": "personalizing",
1187
+ "pharmacopoeia": "pharmacopeia",
1188
+ "pharmacopoeias": "pharmacopeias",
1189
+ "philosophise": "philosophize",
1190
+ "philosophised": "philosophized",
1191
+ "philosophises": "philosophizes",
1192
+ "philosophising": "philosophizing",
1193
+ "philtre": "filter",
1194
+ "philtres": "filters",
1195
+ "phoney": "phony",
1196
+ "plagiarise": "plagiarize",
1197
+ "plagiarised": "plagiarized",
1198
+ "plagiarises": "plagiarizes",
1199
+ "plagiarising": "plagiarizing",
1200
+ "plough": "plow",
1201
+ "ploughed": "plowed",
1202
+ "ploughing": "plowing",
1203
+ "ploughman": "plowman",
1204
+ "ploughmen": "plowmen",
1205
+ "ploughs": "plows",
1206
+ "ploughshare": "plowshare",
1207
+ "ploughshares": "plowshares",
1208
+ "polarisation": "polarization",
1209
+ "polarise": "polarize",
1210
+ "polarised": "polarized",
1211
+ "polarises": "polarizes",
1212
+ "polarising": "polarizing",
1213
+ "politicisation": "politicization",
1214
+ "politicise": "politicize",
1215
+ "politicised": "politicized",
1216
+ "politicises": "politicizes",
1217
+ "politicising": "politicizing",
1218
+ "popularisation": "popularization",
1219
+ "popularise": "popularize",
1220
+ "popularised": "popularized",
1221
+ "popularises": "popularizes",
1222
+ "popularising": "popularizing",
1223
+ "pouffe": "pouf",
1224
+ "pouffes": "poufs",
1225
+ "practise": "practice",
1226
+ "practised": "practiced",
1227
+ "practises": "practices",
1228
+ "practising": "practicing",
1229
+ "praesidium": "presidium",
1230
+ "praesidiums": "presidiums",
1231
+ "pressurisation": "pressurization",
1232
+ "pressurise": "pressurize",
1233
+ "pressurised": "pressurized",
1234
+ "pressurises": "pressurizes",
1235
+ "pressurising": "pressurizing",
1236
+ "pretence": "pretense",
1237
+ "pretences": "pretenses",
1238
+ "primaeval": "primeval",
1239
+ "prioritisation": "prioritization",
1240
+ "prioritise": "prioritize",
1241
+ "prioritised": "prioritized",
1242
+ "prioritises": "prioritizes",
1243
+ "prioritising": "prioritizing",
1244
+ "privatisation": "privatization",
1245
+ "privatisations": "privatizations",
1246
+ "privatise": "privatize",
1247
+ "privatised": "privatized",
1248
+ "privatises": "privatizes",
1249
+ "privatising": "privatizing",
1250
+ "professionalisation": "professionalization",
1251
+ "professionalise": "professionalize",
1252
+ "professionalised": "professionalized",
1253
+ "professionalises": "professionalizes",
1254
+ "professionalising": "professionalizing",
1255
+ "programme": "program",
1256
+ "programmes": "programs",
1257
+ "prologue": "prolog",
1258
+ "prologues": "prologs",
1259
+ "propagandise": "propagandize",
1260
+ "propagandised": "propagandized",
1261
+ "propagandises": "propagandizes",
1262
+ "propagandising": "propagandizing",
1263
+ "proselytise": "proselytize",
1264
+ "proselytised": "proselytized",
1265
+ "proselytiser": "proselytizer",
1266
+ "proselytisers": "proselytizers",
1267
+ "proselytises": "proselytizes",
1268
+ "proselytising": "proselytizing",
1269
+ "psychoanalyse": "psychoanalyze",
1270
+ "psychoanalysed": "psychoanalyzed",
1271
+ "psychoanalyses": "psychoanalyzes",
1272
+ "psychoanalysing": "psychoanalyzing",
1273
+ "publicise": "publicize",
1274
+ "publicised": "publicized",
1275
+ "publicises": "publicizes",
1276
+ "publicising": "publicizing",
1277
+ "pulverisation": "pulverization",
1278
+ "pulverise": "pulverize",
1279
+ "pulverised": "pulverized",
1280
+ "pulverises": "pulverizes",
1281
+ "pulverising": "pulverizing",
1282
+ "pummelled": "pummel",
1283
+ "pummelling": "pummeled",
1284
+ "pyjama": "pajama",
1285
+ "pyjamas": "pajamas",
1286
+ "pzazz": "pizzazz",
1287
+ "quarrelled": "quarreled",
1288
+ "quarrelling": "quarreling",
1289
+ "radicalise": "radicalize",
1290
+ "radicalised": "radicalized",
1291
+ "radicalises": "radicalizes",
1292
+ "radicalising": "radicalizing",
1293
+ "rancour": "rancor",
1294
+ "randomise": "randomize",
1295
+ "randomised": "randomized",
1296
+ "randomises": "randomizes",
1297
+ "randomising": "randomizing",
1298
+ "rationalisation": "rationalization",
1299
+ "rationalisations": "rationalizations",
1300
+ "rationalise": "rationalize",
1301
+ "rationalised": "rationalized",
1302
+ "rationalises": "rationalizes",
1303
+ "rationalising": "rationalizing",
1304
+ "ravelled": "raveled",
1305
+ "ravelling": "raveling",
1306
+ "realisable": "realizable",
1307
+ "realisation": "realization",
1308
+ "realisations": "realizations",
1309
+ "realise": "realize",
1310
+ "realised": "realized",
1311
+ "realises": "realizes",
1312
+ "realising": "realizing",
1313
+ "recognisable": "recognizable",
1314
+ "recognisably": "recognizably",
1315
+ "recognisance": "recognizance",
1316
+ "recognise": "recognize",
1317
+ "recognised": "recognized",
1318
+ "recognises": "recognizes",
1319
+ "recognising": "recognizing",
1320
+ "reconnoitre": "reconnoiter",
1321
+ "reconnoitred": "reconnoitered",
1322
+ "reconnoitres": "reconnoiters",
1323
+ "reconnoitring": "reconnoitering",
1324
+ "refuelled": "refueled",
1325
+ "refuelling": "refueling",
1326
+ "regularisation": "regularization",
1327
+ "regularise": "regularize",
1328
+ "regularised": "regularized",
1329
+ "regularises": "regularizes",
1330
+ "regularising": "regularizing",
1331
+ "remodelled": "remodeled",
1332
+ "remodelling": "remodeling",
1333
+ "remould": "remold",
1334
+ "remoulded": "remolded",
1335
+ "remoulding": "remolding",
1336
+ "remoulds": "remolds",
1337
+ "reorganisation": "reorganization",
1338
+ "reorganisations": "reorganizations",
1339
+ "reorganise": "reorganize",
1340
+ "reorganised": "reorganized",
1341
+ "reorganises": "reorganizes",
1342
+ "reorganising": "reorganizing",
1343
+ "revelled": "reveled",
1344
+ "reveller": "reveler",
1345
+ "revellers": "revelers",
1346
+ "revelling": "reveling",
1347
+ "revitalise": "revitalize",
1348
+ "revitalised": "revitalized",
1349
+ "revitalises": "revitalizes",
1350
+ "revitalising": "revitalizing",
1351
+ "revolutionise": "revolutionize",
1352
+ "revolutionised": "revolutionized",
1353
+ "revolutionises": "revolutionizes",
1354
+ "revolutionising": "revolutionizing",
1355
+ "rhapsodise": "rhapsodize",
1356
+ "rhapsodised": "rhapsodized",
1357
+ "rhapsodises": "rhapsodizes",
1358
+ "rhapsodising": "rhapsodizing",
1359
+ "rigour": "rigor",
1360
+ "rigours": "rigors",
1361
+ "ritualised": "ritualized",
1362
+ "rivalled": "rivaled",
1363
+ "rivalling": "rivaling",
1364
+ "romanticise": "romanticize",
1365
+ "romanticised": "romanticized",
1366
+ "romanticises": "romanticizes",
1367
+ "romanticising": "romanticizing",
1368
+ "rumour": "rumor",
1369
+ "rumoured": "rumored",
1370
+ "rumours": "rumors",
1371
+ "sabre": "saber",
1372
+ "sabres": "sabers",
1373
+ "saltpetre": "saltpeter",
1374
+ "sanitise": "sanitize",
1375
+ "sanitised": "sanitized",
1376
+ "sanitises": "sanitizes",
1377
+ "sanitising": "sanitizing",
1378
+ "satirise": "satirize",
1379
+ "satirised": "satirized",
1380
+ "satirises": "satirizes",
1381
+ "satirising": "satirizing",
1382
+ "saviour": "savior",
1383
+ "saviours": "saviors",
1384
+ "savour": "savor",
1385
+ "savoured": "savored",
1386
+ "savouries": "savories",
1387
+ "savouring": "savoring",
1388
+ "savours": "savors",
1389
+ "savoury": "savory",
1390
+ "scandalise": "scandalize",
1391
+ "scandalised": "scandalized",
1392
+ "scandalises": "scandalizes",
1393
+ "scandalising": "scandalizing",
1394
+ "sceptic": "skeptic",
1395
+ "sceptical": "skeptical",
1396
+ "sceptically": "skeptically",
1397
+ "scepticism": "skepticism",
1398
+ "sceptics": "skeptics",
1399
+ "sceptre": "scepter",
1400
+ "sceptres": "scepters",
1401
+ "scrutinise": "scrutinize",
1402
+ "scrutinised": "scrutinized",
1403
+ "scrutinises": "scrutinizes",
1404
+ "scrutinising": "scrutinizing",
1405
+ "secularisation": "secularization",
1406
+ "secularise": "secularize",
1407
+ "secularised": "secularized",
1408
+ "secularises": "secularizes",
1409
+ "secularising": "secularizing",
1410
+ "sensationalise": "sensationalize",
1411
+ "sensationalised": "sensationalized",
1412
+ "sensationalises": "sensationalizes",
1413
+ "sensationalising": "sensationalizing",
1414
+ "sensitise": "sensitize",
1415
+ "sensitised": "sensitized",
1416
+ "sensitises": "sensitizes",
1417
+ "sensitising": "sensitizing",
1418
+ "sentimentalise": "sentimentalize",
1419
+ "sentimentalised": "sentimentalized",
1420
+ "sentimentalises": "sentimentalizes",
1421
+ "sentimentalising": "sentimentalizing",
1422
+ "sepulchre": "sepulcher",
1423
+ "sepulchres": "sepulchers",
1424
+ "serialisation": "serialization",
1425
+ "serialisations": "serializations",
1426
+ "serialise": "serialize",
1427
+ "serialised": "serialized",
1428
+ "serialises": "serializes",
1429
+ "serialising": "serializing",
1430
+ "sermonise": "sermonize",
1431
+ "sermonised": "sermonized",
1432
+ "sermonises": "sermonizes",
1433
+ "sermonising": "sermonizing",
1434
+ "sheikh": "sheik",
1435
+ "shovelled": "shoveled",
1436
+ "shovelling": "shoveling",
1437
+ "shrivelled": "shriveled",
1438
+ "shrivelling": "shriveling",
1439
+ "signalise": "signalize",
1440
+ "signalised": "signalized",
1441
+ "signalises": "signalizes",
1442
+ "signalising": "signalizing",
1443
+ "signalled": "signaled",
1444
+ "signalling": "signaling",
1445
+ "smoulder": "smolder",
1446
+ "smouldered": "smoldered",
1447
+ "smouldering": "smoldering",
1448
+ "smoulders": "smolders",
1449
+ "snivelled": "sniveled",
1450
+ "snivelling": "sniveling",
1451
+ "snorkelled": "snorkeled",
1452
+ "snorkelling": "snorkeling",
1453
+ "snowplough": "snowplow",
1454
+ "snowploughs": "snowplow",
1455
+ "socialisation": "socialization",
1456
+ "socialise": "socialize",
1457
+ "socialised": "socialized",
1458
+ "socialises": "socializes",
1459
+ "socialising": "socializing",
1460
+ "sodomise": "sodomize",
1461
+ "sodomised": "sodomized",
1462
+ "sodomises": "sodomizes",
1463
+ "sodomising": "sodomizing",
1464
+ "solemnise": "solemnize",
1465
+ "solemnised": "solemnized",
1466
+ "solemnises": "solemnizes",
1467
+ "solemnising": "solemnizing",
1468
+ "sombre": "somber",
1469
+ "specialisation": "specialization",
1470
+ "specialisations": "specializations",
1471
+ "specialise": "specialize",
1472
+ "specialised": "specialized",
1473
+ "specialises": "specializes",
1474
+ "specialising": "specializing",
1475
+ "spectre": "specter",
1476
+ "spectres": "specters",
1477
+ "spiralled": "spiraled",
1478
+ "spiralling": "spiraling",
1479
+ "splendour": "splendor",
1480
+ "splendours": "splendors",
1481
+ "squirrelled": "squirreled",
1482
+ "squirrelling": "squirreling",
1483
+ "stabilisation": "stabilization",
1484
+ "stabilise": "stabilize",
1485
+ "stabilised": "stabilized",
1486
+ "stabiliser": "stabilizer",
1487
+ "stabilisers": "stabilizers",
1488
+ "stabilises": "stabilizes",
1489
+ "stabilising": "stabilizing",
1490
+ "standardisation": "standardization",
1491
+ "standardise": "standardize",
1492
+ "standardised": "standardized",
1493
+ "standardises": "standardizes",
1494
+ "standardising": "standardizing",
1495
+ "stencilled": "stenciled",
1496
+ "stencilling": "stenciling",
1497
+ "sterilisation": "sterilization",
1498
+ "sterilisations": "sterilizations",
1499
+ "sterilise": "sterilize",
1500
+ "sterilised": "sterilized",
1501
+ "steriliser": "sterilizer",
1502
+ "sterilisers": "sterilizers",
1503
+ "sterilises": "sterilizes",
1504
+ "sterilising": "sterilizing",
1505
+ "stigmatisation": "stigmatization",
1506
+ "stigmatise": "stigmatize",
1507
+ "stigmatised": "stigmatized",
1508
+ "stigmatises": "stigmatizes",
1509
+ "stigmatising": "stigmatizing",
1510
+ "storey": "story",
1511
+ "storeys": "stories",
1512
+ "subsidisation": "subsidization",
1513
+ "subsidise": "subsidize",
1514
+ "subsidised": "subsidized",
1515
+ "subsidiser": "subsidizer",
1516
+ "subsidisers": "subsidizers",
1517
+ "subsidises": "subsidizes",
1518
+ "subsidising": "subsidizing",
1519
+ "succour": "succor",
1520
+ "succoured": "succored",
1521
+ "succouring": "succoring",
1522
+ "succours": "succors",
1523
+ "sulphate": "sulfate",
1524
+ "sulphates": "sulfates",
1525
+ "sulphide": "sulfide",
1526
+ "sulphides": "sulfides",
1527
+ "sulphur": "sulfur",
1528
+ "sulphurous": "sulfurous",
1529
+ "summarise": "summarize",
1530
+ "summarised": "summarized",
1531
+ "summarises": "summarizes",
1532
+ "summarising": "summarizing",
1533
+ "swivelled": "swiveled",
1534
+ "swivelling": "swiveling",
1535
+ "symbolise": "symbolize",
1536
+ "symbolised": "symbolized",
1537
+ "symbolises": "symbolizes",
1538
+ "symbolising": "symbolizing",
1539
+ "sympathise": "sympathize",
1540
+ "sympathised": "sympathized",
1541
+ "sympathiser": "sympathizer",
1542
+ "sympathisers": "sympathizers",
1543
+ "sympathises": "sympathizes",
1544
+ "sympathising": "sympathizing",
1545
+ "synchronisation": "synchronization",
1546
+ "synchronise": "synchronize",
1547
+ "synchronised": "synchronized",
1548
+ "synchronises": "synchronizes",
1549
+ "synchronising": "synchronizing",
1550
+ "synthesise": "synthesize",
1551
+ "synthesised": "synthesized",
1552
+ "synthesiser": "synthesizer",
1553
+ "synthesisers": "synthesizers",
1554
+ "synthesises": "synthesizes",
1555
+ "synthesising": "synthesizing",
1556
+ "syphon": "siphon",
1557
+ "syphoned": "siphoned",
1558
+ "syphoning": "siphoning",
1559
+ "syphons": "siphons",
1560
+ "systematisation": "systematization",
1561
+ "systematise": "systematize",
1562
+ "systematised": "systematized",
1563
+ "systematises": "systematizes",
1564
+ "systematising": "systematizing",
1565
+ "tantalise": "tantalize",
1566
+ "tantalised": "tantalized",
1567
+ "tantalises": "tantalizes",
1568
+ "tantalising": "tantalizing",
1569
+ "tantalisingly": "tantalizingly",
1570
+ "tasselled": "tasseled",
1571
+ "technicolour": "technicolor",
1572
+ "temporise": "temporize",
1573
+ "temporised": "temporized",
1574
+ "temporises": "temporizes",
1575
+ "temporising": "temporizing",
1576
+ "tenderise": "tenderize",
1577
+ "tenderised": "tenderized",
1578
+ "tenderises": "tenderizes",
1579
+ "tenderising": "tenderizing",
1580
+ "terrorise": "terrorize",
1581
+ "terrorised": "terrorized",
1582
+ "terrorises": "terrorizes",
1583
+ "terrorising": "terrorizing",
1584
+ "theatre": "theater",
1585
+ "theatregoer": "theatergoer",
1586
+ "theatregoers": "theatergoers",
1587
+ "theatres": "theaters",
1588
+ "theorise": "theorize",
1589
+ "theorised": "theorized",
1590
+ "theorises": "theorizes",
1591
+ "theorising": "theorizing",
1592
+ "tonne": "ton",
1593
+ "tonnes": "tons",
1594
+ "towelled": "toweled",
1595
+ "towelling": "toweling",
1596
+ "toxaemia": "toxemia",
1597
+ "tranquillise": "tranquilize",
1598
+ "tranquillised": "tranquilized",
1599
+ "tranquilliser": "tranquilizer",
1600
+ "tranquillisers": "tranquilizers",
1601
+ "tranquillises": "tranquilizes",
1602
+ "tranquillising": "tranquilizing",
1603
+ "tranquillity": "tranquility",
1604
+ "tranquillize": "tranquilize",
1605
+ "tranquillized": "tranquilized",
1606
+ "tranquillizer": "tranquilizer",
1607
+ "tranquillizers": "tranquilizers",
1608
+ "tranquillizes": "tranquilizes",
1609
+ "tranquillizing": "tranquilizing",
1610
+ "tranquilly": "tranquility",
1611
+ "transistorised": "transistorized",
1612
+ "traumatise": "traumatize",
1613
+ "traumatised": "traumatized",
1614
+ "traumatises": "traumatizes",
1615
+ "traumatising": "traumatizing",
1616
+ "travelled": "traveled",
1617
+ "traveller": "traveler",
1618
+ "travellers": "travelers",
1619
+ "travelling": "traveling",
1620
+ "travelog": "travelogue",
1621
+ "travelogs": "travelogues",
1622
+ "trialled": "trialed",
1623
+ "trialling": "trialing",
1624
+ "tricolour": "tricolor",
1625
+ "tricolours": "tricolors",
1626
+ "trivialise": "trivialize",
1627
+ "trivialised": "trivialized",
1628
+ "trivialises": "trivializes",
1629
+ "trivialising": "trivializing",
1630
+ "tumour": "tumor",
1631
+ "tumours": "tumors",
1632
+ "tunnelled": "tunneled",
1633
+ "tunnelling": "tunneling",
1634
+ "tyrannise": "tyrannize",
1635
+ "tyrannised": "tyrannized",
1636
+ "tyrannises": "tyrannizes",
1637
+ "tyrannising": "tyrannizing",
1638
+ "tyre": "tire",
1639
+ "tyres": "tires",
1640
+ "unauthorised": "unauthorized",
1641
+ "uncivilised": "uncivilized",
1642
+ "underutilised": "underutilized",
1643
+ "unequalled": "unequaled",
1644
+ "unfavourable": "unfavorable",
1645
+ "unfavourably": "unfavorably",
1646
+ "unionisation": "unionization",
1647
+ "unionise": "unionize",
1648
+ "unionised": "unionized",
1649
+ "unionises": "unionizes",
1650
+ "unionising": "unionizing",
1651
+ "unorganised": "unorganized",
1652
+ "unravelled": "unraveled",
1653
+ "unravelling": "unraveling",
1654
+ "unrecognisable": "unrecognizable",
1655
+ "unrecognised": "unrecognized",
1656
+ "unrivalled": "unrivaled",
1657
+ "unsavoury": "unsavory",
1658
+ "untrammelled": "untrammeled",
1659
+ "urbanisation": "urbanization",
1660
+ "urbanise": "urbanize",
1661
+ "urbanised": "urbanized",
1662
+ "urbanises": "urbanizes",
1663
+ "urbanising": "urbanizing",
1664
+ "utilisable": "utilizable",
1665
+ "utilisation": "utilization",
1666
+ "utilise": "utilize",
1667
+ "utilised": "utilized",
1668
+ "utilises": "utilizes",
1669
+ "utilising": "utilizing",
1670
+ "valour": "valor",
1671
+ "vandalise": "vandalize",
1672
+ "vandalised": "vandalized",
1673
+ "vandalises": "vandalizes",
1674
+ "vandalising": "vandalizing",
1675
+ "vaporisation": "vaporization",
1676
+ "vaporise": "vaporize",
1677
+ "vaporised": "vaporized",
1678
+ "vaporises": "vaporizes",
1679
+ "vaporising": "vaporizing",
1680
+ "vapour": "vapor",
1681
+ "vapours": "vapors",
1682
+ "verbalise": "verbalize",
1683
+ "verbalised": "verbalized",
1684
+ "verbalises": "verbalizes",
1685
+ "verbalising": "verbalizing",
1686
+ "victimisation": "victimization",
1687
+ "victimise": "victimize",
1688
+ "victimised": "victimized",
1689
+ "victimises": "victimizes",
1690
+ "victimising": "victimizing",
1691
+ "videodisc": "videodisk",
1692
+ "videodiscs": "videodisks",
1693
+ "vigour": "vigor",
1694
+ "visualisation": "visualization",
1695
+ "visualisations": "visualizations",
1696
+ "visualise": "visualize",
1697
+ "visualised": "visualized",
1698
+ "visualises": "visualizes",
1699
+ "visualising": "visualizing",
1700
+ "vocalisation": "vocalization",
1701
+ "vocalisations": "vocalizations",
1702
+ "vocalise": "vocalize",
1703
+ "vocalised": "vocalized",
1704
+ "vocalises": "vocalizes",
1705
+ "vocalising": "vocalizing",
1706
+ "vulcanised": "vulcanized",
1707
+ "vulgarisation": "vulgarization",
1708
+ "vulgarise": "vulgarize",
1709
+ "vulgarised": "vulgarized",
1710
+ "vulgarises": "vulgarizes",
1711
+ "vulgarising": "vulgarizing",
1712
+ "waggon": "wagon",
1713
+ "waggons": "wagons",
1714
+ "watercolour": "watercolor",
1715
+ "watercolours": "watercolors",
1716
+ "weaselled": "weaseled",
1717
+ "weaselling": "weaseling",
1718
+ "westernisation": "westernization",
1719
+ "westernise": "westernize",
1720
+ "westernised": "westernized",
1721
+ "westernises": "westernizes",
1722
+ "westernising": "westernizing",
1723
+ "womanise": "womanize",
1724
+ "womanised": "womanized",
1725
+ "womaniser": "womanizer",
1726
+ "womanisers": "womanizers",
1727
+ "womanises": "womanizes",
1728
+ "womanising": "womanizing",
1729
+ "woollen": "woolen",
1730
+ "woollens": "woolens",
1731
+ "woollies": "woolies",
1732
+ "woolly": "wooly",
1733
+ "worshipped": "worshiped",
1734
+ "worshipper": "worshiper",
1735
+ "worshipping": "worshiping",
1736
+ "yodelled": "yodeled",
1737
+ "yodelling": "yodeling",
1738
+ "yoghourt": "yogurt",
1739
+ "yoghourts": "yogurts",
1740
+ "yoghurt": "yogurt",
1741
+ "yoghurts": "yogurts"
1742
+ }
pred.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pred_str=['心 理 心 白 ', '血 燒 尻 川 ', '打 鼠 ', '十 三 項 ', '水 里 鄉 ', '這 擔 有 一 个 大 上 場 ', '聯 合 國 ', '菜 頭 人 仔 ', '賴 州 ', '肉 羹 ', '新 興 區 ', '刣 姑 平 ', '鯉 東 ', '推 仔 ', '西 園 邦 ', '心 喙 ', '燒 包 仔 ', '康 頭 ', '鳳 坑 ', '羅 水 溪 出 代 誌 ', '粗 勢 ', '哀 哀 ', '九 年 一 關 ', '精 靈 ', '三 寶 ', '索 索 ', '坐 船 咧 等 ', '眼 目 袂 曉 看 懸 低 ', '圳 毋 ', '兩 个 人 已 經 講 好 矣 ', '足 水 ', '細 空 喙 ', '你 仔 有 經 頭 ', '中 新 灣 ', '解 率 ', '草 人 坑 ', '祝 你 生 日 快 樂 ', '坑 仔 縫 ', '出 水 ', '皮 平 串 ', '耳 仔 街 ', '活 躼 ', '臺 灣 人 ', '落 崙 ', '婉 里 坑 ', '坑 仔 內 ', '起 起 落 落 ', '戶 錢 ', '紅 蟲 ', '大 湖 ', '拋 拋 轎 ', '柴 犬 ', '大 位 王 ', '大 著 鼎 ', '頭 鑢 ', '鬆 餅 ', '喙 掩 籠 ', '龍 雲 ', '施 局 ', '縮 時 ', '煞 尾 ', '無 話 講 甲 好 ', '下 暗 磅 ', '一 籠 ', '一 定 哭 未 著 ', '看 一 个 影 生 一 个 囝 ', '倒 勢 ', '北 京 ', '規 陣 俗 姓 ', '肚 臟 ', '變 天 ', '一 工 九 甲 ', '混 沌 ', '有 規 項 菜 ', '厭 武 ', '六 角 區 ', '過 路 山 ', '予 袂 予 人 徛 ', '粗 暑 ', '這 咧 活 蠐 錢 ', '車 仔 ', '看 甲 清 清 粗 粗 ', '楞 神 ', '化 身 ', '白 囡 仔 ', '心 花 東 區 ', '番 鼎 仔 ', '流 擴 擴 ', '大 雞 血 ', '贏 城 真 頭 ', '閃 爬 肉 ', '骨 ', '龜 大 ', '水 雞 皮 仔 ', '夯 硬 糕 ', '抾 無 三 文 本 著 想 欲 做 土 公 ', '阿 姆 姨 仔 ', '臺 北 橋 ', '雲 林 縣 ', '臺 中 港 ', '水 理 ', '臺 南 市 ', '七 堵 ', '做 公 德 ', '臺 川 鄉 ', '青 仔 百 ', '消 ', '漢 臭 真 好 ', '餐 飲 ', '按 呢 生 ', '海 尾 ', '無 米 可 拄 著 卵 粿 ', '旗 鄉 小 細 ', '睏 ', '頭 尾 ', '五 斷 時 鐘 ', '政 商 ', '吐 蟯 咧 ', '動 物 性 膠 質 ', '數 文 精 ', '悠 悠 卡 ', '你 攏 無 效 睏 喔 ', '埤 頭 鄉 ', '信 義 大 安 路 口 ', '三 三 八 八 ', '你 有 的 毋 通 予 別 講 ', '白 寮 鄉 ', '你 莫 著 定 我 肚 臍 ', '新 園 路 ', '金 錢 毋 是 萬 鈴 的 ', '飛 鈴 仔 菜 ', '烰 料 理 ', '你 是 誰 ', '番 烏 ', '新 街 ', '合 同 ', '挺 挺 ', '番 麗 殼 ', '天 氣 冷 仔 你 著 愛 雞 踢 一 領 三 ', '我 到 底 看 著 啥 物 ', '手 番 仔 ', '姑 不 二 三 張 ', '天 格 ', '透 珍 仔 ', '漚 叫 聲 ', '隔 音 白 ', '拍 通 頭 ', '開 花 滿 天 芳 結 子 遮 夯 人 ', '東 北 貴 風 ', '無 人 機 ', '做 牛 無 小 栗 ', '真 空 排 放 ', '這 搖 仔 活 落 去 真 緊 著 的 替 風 仔 ', '毛 瑞 仔 ', '我 喙 歹 ', '陽 明 山 ', '咖 啡 館 ', '囡 仔 食 ', '佇 對 流 針 一 點 點 ', '伊 規 心 欲 苦 大 學 ', '東 湖 國 中 ', '中 原 公 園 ', '半 路 店 ', '屏 南 坑 ', '東 石 鄉 ', '拚 拚 哨 的 ', '無 中 心 化 ', '肉 鬃 角 ', '典 型 氣 ', '鼠 邊 頭 尾 ', '波 丸 街 ', '人 愛 烘 粥 路 口 ', '千 葉 ', '切 鑠 度 ', '帶 擊 帶 離 ', '尪 仔 頭 囝 ', '飼 ', '咱 做 代 誌 的 時 圳 ', '包 水 轎 ', '海 灶 毋 知 尾 海 柴 破 袂 開 ', '遠 總 場 ', '健 康 中 生 ', '印 圇 醫 師 仔 ', '心 理 族 ', '獅 仔 頭 ', '大 肚 路 ', '飼 仔 ', '鹿 爺 區 ', '學 子 ', '放 懶 山 ', '虎 尾 竹 ', '卸 腹 肚 ', '興 順 鎮 ', '小 血 球 ', '化 格 ', '我 三 攏 輕 鬆 掛 領 ', '溜 落 來 ', '半 條 命 ', '心 我 習 慣 ', '陳 影 弓 ', '菜 寶 卵 ', '土 塗 塗 ', '鐵 甲 巫 ', '風 火 頭 ', '現 流 仔 ', '白 鑢 公 ', '悾 港 ', '較 大 ', '公 眾 橋 ', '巴 結 ', '果 汁 ', '有 拜 無 一 好 ', '這 時 ', '竹 百 ', '下 晝 ', '較 早 睏 ', '寮 寮 帶 順 ', '公 司 ', '看 看 緊 緊 ', '坑 仔 ', '若 使 香 菇 ', '山 貓 ', '大 頭 猴 母 ', '塗 板 人 豆 ', '奶 ', '內 延 ', '滾 下 鄉 林 ', '推 磨 ', '真 有 喙 水 ', '摩 托 斯 ', '想 想 眼 你 就 是 你 朋 友 ', '青 山 王 ', '澳 抾 蘭 ', '金 東 ', '便 仔 ', '陳 奕 債 ', '九 尾 金 ', '相 空 ', '過 東 跤 ', '頭 順 傾 傾 ', '某 定 著 ', '礦 機 仔 ', '跙 湧 ', '中 州 ', '青 天 街 ', '東 方 ', '文 底 ', '都 原 性 別 ', '舊 廁 ', '貓 潲 ', '法 人 國 小 ', '點 破 ', '公 臺 ', '某 起 姑 仔 ', '蠟 豆 ', '中 環 ', '鑢 死 瓜 活 ', '打 著 歹 對 步 ', '挑 俘 ', '夠 水 公 的 無 尊 姓 ', '交 力 片 ', '如 此 如 此 嚴 重 ', '一 个 一 个 ', '陳 港 大 橋 ', '上 大 代 誌 說 ', '瑞 仔 旭 光 ', '茴 東 跤 ', '旭 園 湖 ', '講 啥 物 烏 仔 漢 字 ', '爺 ', '刺 鼠 菇 毒 著 蟲 ', '袂 使 毋 知 臭 ', '斗 南 ', '姑 哺 笑 無 事 ', '水 粿 仔 ', '滿 狀 ', '手 礦 ', '心 腹 無 厭 ', '敢 食 戛 油 ', '阿 寶 磚 ', '機 動 戰 士 ', '變 動 ', '興 一 病 母 ', '敲 仙 鼓 ', '所 羅 門 君 堵 ', '終 界 ', '我 愛 臺 語 ', '校 鼎 ', '甜 甜 ', '上 蓋 ', '海 �� ', '落 葉 不 絲 ', '踏 車 巫 ', '殕 仔 徛 豬 仔 ', '方 琴 尪 仔 ', '山 ', '生 甲 袂 䆀 ', '九 尾 素 ', '倚 刀 仔 ', '中 央 大 學 ', '石 門 水 庫 ', '暖 手 相 ', '百 貨 店 ', '倯 素 甘 ', '較 蓋 ', '洗 身 仔 枋 ', '天 台 ', '數 字 片 ', '漚 愷 ', '鈍 市 會 ', '頭 走 淚 ', '夭 壽 鬼 ', '好 所 在 ', '軟 斜 ', '撤 離 ', '現 辦 ', '究 勘 話 ', '伊 心 做 遮 爾 擦 爾 ', '無 神 那 縱 深 ', '一 點 點 仔 ', '龍 江 ', '隔 查 某 囝 ', '推 著 ', '心 心 酒 店 ', '過 死 直 死 ', '較 平 咧 ', '透 生 大 汗 ', '漉 糊 白 仔 ', '行 避 址 袂 著 愛 人 生 ', '雞 花 仔 ', '那 三 年 仔 ', '荷 里 雕 ', '石 板 ', '虎 魚 ', '落 雨 了 喔 頭 殼 陂 間 擔 落 落 ', '較 聳 ', '雞 人 飼 ', '你 欲 行 對 佗 位 去 ', '敦 化 胡 平 路 口 ', '港 仔 喙 ', '我 影 今 年 二 十 出 頭 矣 ', '心 肝 醫 ', '下 晝 覺 武 ', '一 分 ', '蠓 蠓 仔 蠓 ', '完 安 ', '牛 郎 ', '袂 看 得 ', '立 公 立 直 ', '龜 山 島 ', '過 渡 ', '塑 做 堆 ', '尾 仔 跤 ', '包 大 ', '原 地 ', '三 頂 三 重 ', '林 仔 口 ', '畢 電 ', '草 拗 ', '揣 轉 愛 ', '機 中 站 ', '苴 仔 代 ', '心 酺 ', '燒 桌 ', '頂 底 寮 ', '竹 南 ', '你 共 包 仔 提 起 嚇 小 一 个 ', '平 方 筋 平 方 筋 ', '頂 公 館 ', '線 仙 ', '演 藝 界 ', '血 本 ', '毋 知 無 怨 ', '較 緊 ', '有 也 好 無 也 好 ', '草 鑠 ', '講 推 仔 龍 ', '怪 奇 菇 ', '國 抱 加 盟 ', '新 羅 ', '過 去 ', '豆 豆 仔 ', '拍 寶 ', '物 件 若 予 伊 提 過 手 ', '奶 仔 嬰 ', '意 蓋 ', '刺 尾 風 ', '烏 來 ', '挺 走 ', '無 踏 無 踅 ', '我 袂 準 備 落 山 ', '倒 擁 ', '聽 無 ', '喜 的 頭 雞 娘 對 人 家 正 好 咧 ', '大 細 輪 ', '五 葉 ', '無 年 五 你 會 出 國 讀 冊 ', '借 園 ', '虼 兒 ', '後 寮 ', '我 攏 有 看 著 ', '建 人 路 ', '歹 天 ', '三 跤 步 一 坎 唸 ', '賴 肉 ', '龜 你 祝 ', '下 晡 點 心 ', '冷 管 ', '無 枝 結 ', '公 文 ', '雜 喙 狗 尻 川 ', '頭 爺 屁 ', '橐 錢 ', '茂 空 石 ', '蚵 仔 雞 湯 仔 ', '雞 使 面 ', '白 對 頭 ', '蜂 一 枝 ', '無 一 步 ', '路 口 講 出 來 ', '頭 頂 ', '防 地 做 甲 流 汗 防 跤 相 甲 流 瀾 ', '好 鬥 陣 ', '拍 死 板 ', '同 陂 ', '打 巴 ', '簡 順 ', '一 个 才 一 个 ', '緊 來 選 ', '洋 仔 話 ', '拍 數 ', '你 拉 瓦 ', '總 舖 ', '山 里 無 間 龜 溜 溜 ', '鑿 瓜 ', '手 心 動 ', '電 子 ', '暗 空 ', '散 食 ', '塑 膠 車 ', '要 意 ', '燒 雞 胿 ', '吊 猴 ', '掠 猴 ', '暗 頭 仔 ', '濟 囝 濟 八 八 濟 新 婦 濟 體 踢 ', '猴 球 ', '無 拄 好 ', '草 頭 貓 ', '金 寬 失 立 ', '觀 音 臺 師 ', '財 圳 ', '壥 暗 仔 ', '拗 路 ', '結 禮 ', '一 目 仔 ', '提 款 片 ', '無 你 的 代 ', '天 公 債 ', '篡 廣 ', '離 櫥 ', '馬 鈴 薯 條 ', '做 塗 水 的 ', '林 爭 味 ', '流 明 傳 ', '官 包 油 ', '湯 醋 ', '離 間 ', '拍 鐵 獅 ', '萬 一 跤 步 ', '鐵 耙 的 ', '下 禮 拜 ', '死 亡 誌 悌 ', '你 毋 通 放 在 心 肝 內 ', '臭 牛 仔 ', '趁 一 空 食 三 冬 ', '我 欲 下 代 語 ', '臺 ', '拉 甲 斗 ', '雨 水 罨 ', '貴 州 姓 ', '敦 華 北 路 ', '你 看 起 來 是 正 年 仔 媠 ', '真 硬 食 公 公 硬 食 天 公 ', '回 去 ', '下 晡 時 ', '幾 个 ', '紅 燈 ', '感 情 ', '二 條 仔 ', '無 鐵 仔 ', '延 興 路 ', '馬 空 ', '六 點 ', '死 中 ', '超 寶 奶 ', '園 仔 機 ', '廣 寮 區 ', '甘 血 噴 天 ', '大 好 夯 人 ', '行 政 院 ', '食 飼 ', '狗 弟 厝 ', '冷 清 ', '我 的 港 ', '浸 水 器 ', '雞 柳 ', '天 公 山 ', '關 ', '愛 代 語 ', '我 共 欲 袂 予 ', '天 飯 ', '有 冷 汗 ', '衝 ', '中 國 話 ', '爿 香 對 排 ', '四 塊 草 ', '拆 箍 ', '關 湳 ', '結 霜 卵 ', '擔 貂 ', '大 公 山 ', '死 瓜 丸 大 平 ', '有 這 款 的 代 誌 我 攏 毋 知 ', '埔 頭 仔 機 ', '國 姓 爺 ', '那 口 區 ', '盤 ', '苦 山 林 斯 ', '代 誌 大 案 ', '半 姓 半 姨 ', '點 滴 點 滴 桃 坑 鄉 魑 施 ', '你 有 工 工 ', '任 通 無 洗 手 的 朋 友 ', '烏 日 ', '衛 風 ', '水 園 ', '怨 恬 ', '連 半 个 人 影 無 ', '吊 褲 ', '塗 子 工 ', '霧 沙 沙 ', '衛 生 箸 ', '鯉 丸 仔 ', '搵 醬 ', '你 老 母 拄 咧 掠 咧 ', '友 情 濟 來 順 ', '港 仔 喙 國 小 ', '漉 糊 塑 ', '落 尾 的 ', '平 方 電 腦 ', '蠓 蠓 仔 鰗 鰆 塗 嘛 會 擔 ', '冷 滾 水 ', '一 个 人 口 按 一 个 位 ', '兼 數 家 ', '雞 卵 糕 仔 ', '溼 灌 ', '珠 倒 ', '聖 誕 老 阿 公 ', '期 頂 白 毛 毛 ', '飯 碗 ', '南 縣 ', '毋 人 事 ', '餘 韻 ', '包 山 包 海 ', '兼 空 區 ', '洗 澡 精 ', '拋 勁 電 傷 ', '開 喙 涵 粒 粒 臭 ', '踏 水 器 ', '老 姑 精 ', '塑 膠 路 仔 ', '巣 位 仔 聯 名 ', '臭 水 ', '塑 膠 矸 仔 ', '衛 生 機 ', '面 開 ', '圍 場 天 下 不 亂 ', '錢 票 ', '郭 仔 樹 ', '牛 角 三 件 ', '半 夜 ', '思 雷 ', '衝 湧 ', '感 有 文 德 ', '食 涼 ', '有 的 紙 欠 濁 ', '做 塗 水 ', '怪 奇 ', '半 斤 ', '交 關 ', '海 岸 ', '中 山 路 ', '番 仔 狗 ', '婉 勇 仔 ', '龜 龜 毛 毛 ', '毋 捌 意 看 過 時 ', '標 段 ', '機 公 號 ', '海 拄 仔 ', '猴 頭 猴 ', '彼 死 兩 相 ', '民 主 ', '底 香 卵 ', '無 底 交 電 ', '食 人 一 口 還 人 一 斗 ', '鐵 段 ', '東 埤 仔 ', '年 猴 ', '佛 心 投 資 者 ', '人 講 對 人 講 ', '金 剛 戰 事 ', '做 肉 餅 ', '小 補 喙 ', '櫻 桮 魚 ', '美 頭 船 ', '一 筆 錢 ', '割 菜 ', '狗 馬 日 晟 ', '一 坵 草 一 點 鹿 ', '閘 時 ', '大 頭 母 人 ', '包 圍 ', '頂 哩 擺 ', '柔 珠 仔 ', '人 間 幾 幾 個 人 ', '無 氣 體 情 ', '日 落 山 ', '偏 鄉 ', '七 頭 跤 ', '袂 死 心 ', '戒 罪 ', '走 友 人 ', '有 聽 著 無 ', '大 湯 埕 ', '斗 人 仔 本 ', '滾 鼓 ', '影 人 ', '抢 查 某 ', '暗 當 食 聖 ', '草 鰆 鰡 仔 ', '痛 快 ', '手 包 ', '包 餅 ', '手 轎 ', '暖 無 ', '無 精 彩 ', '內 街 ', '烏 手 ', '燒 身 厝 ', '龜 潤 ', '五 公 厝 貯 ', '山 巡 ', '人 生 欠 貨 ', '天 公 無 大 著 ', '望 安 鄉 ', '食 虎 ', '分 子 ', '批 車 ', '大 支 ', '公 館 路 ', '碧 潭 路 ', '臺 北 後 酒 後 酒 小 學 步 ', '青 年 不 文 ', '四 分 仔 ', '頭 哭 的 ', '南 機 場 ', '湧 瑞 街 ', '浮 珠 溪 ', '池 上 ', '跤 車 ', '關 胎 ', '苦 袂 冬 ', '單 蒼 公 ', '睏 鏡 花 ', '車 驅 ', '共 人 嗙 ', '頂 底 園 ', '潮 州 街 口 ', '車 坤 ', '龍 頂 厝 ', '芳 樂 公 園 ', '金 剛 鎮 ', '忠 孝 福 興 ', '民 族 保 溫 路 口 ', '松 信 路 ', '糋 麭 粉 ', '文 州 ', '臺 北 蘭 路 山 ', '有 夠 咧 加 速 ', '伊 雞 我 一 擲 ', '笙 仔 ', '魚 豬 鄉 ', '囝 柑 仔 囝 ', '四 跤 水 仙 ', '暖 暖 區 ', '用 起 美 若 洗 喙 時 愛 上 如 此 高 ', '新 社 ', '武 神 力 故 宮 好 ', '實 包 ', '埔 尾 ', '寶 路 ', '冰 糖 ', '福 星 橋 ', '阿 拉 ', '聖 安 ', '遠 林 市 ', '歹 舌 ', '講 代 誌 ', '年 度 年 ', '景 興 路 ', '海 蛤 仔 頭 ', '一 稈 半 稈 ', '死 輪 節 ', '包 殼 ', '瑞 穂 ', '咱 思 貓 頭 鳥 ', '茂 羅 開 ', '酒 光 喙 罨 ', '果 汁 體 ', '蛤 術 ', '不 知 ', '遠 東 區 ', '上 海 茂 荷 ', '意 識 面 ', '光 芳 ', '話 語 指 中 ', '皮 人 積 肉 糜 ', '紅 豆 仔 麭 ', '頂 花 ', '相 伨 ', '三 兩 人 講 四 斤 話 ', '禁 欲 ', '一 坵 ', '卵 箍 ', '真 珠 看 做 貓 齒 使 ', '天 津 ', '角 運 動 龍 ', '莫 門 足 恐 怖 ', '品 德 街 ', '羅 典 ', '錢 拆 藥 ', '用 湯 平 ', '花 生 園 ', '可 睏 丸 ', '武 松 ', '豬 頭 鳳 ', '蔭 出 ', '閘 位 ', '敲 蟲 ', '番 薯 片 ', '摒 氣 ', '卵 子 ', '番 喜 甘 丸 ', '大 海 毋 驚 大 水 ', '橫 的 ', '跤 手 指 ', '歇 ', '千 巴 總 ', '徛 倒 規 ', '查 某 影 ', '番 薯 球 ', '蜂 寮 草 ', '大 火 ', '火 記 母 ', '英 國 話 ', '免 滴 顧 問 ', '老 街 ', '看 有 ', '摒 車 天 花 肚 ', '下 文 活 東 ', '臺 羅 姿 ', '內 ', '金 風 山 ', '死 袂 見 效 ', '嚇 死 ', '交 法 ', '馬 鈴 薯 片 ', '里 頭 拍 甲 蒼 仔 股 毋 戛 ', '營 養 庫 本 ', '放 水 機 ', '大 大 塊 仔 ', '宜 蘭 ', '無 一 定 敢 仔 我 看 著 ', '有 困 山 ', '代 誌 好 生 仔 尾 ', '媠 氣 ', '景 文 街 ', '酒 天 酒 地 ', '今 年 是 一 个 好 年 冬 ', '師 佮 ', '烏 山 頭 ', '三 講 四 毋 著 ', '武 術 文 字 ', '反 策 ', '大 日 本 地 國 ', '仝 蓋 網 上 ', '若 後 ', '吞 奶 ', '莫 閣 ', '緊 急 做 代 一 跤 要 緊 ', '你 講 啥 ', '經 水 ', '位 格 ', '生 體 健 康 ', '跤 手 十 足 ', '電 子 學 ', '夏 年 ', '大 路 大 股 ', '伊 是 一 个 大 米 奶 ', '解 油 ', '膨 氣 ', '你 陪 我 去 那 邊 好 無 ', '網 路 用 較 著 咧 ', '周 賽 ', '姑 誕 一 生 ', '家 己 ', '死 中 袂 終 ', '牛 舟 仔 ', '姑 烘 毒 黒 ', '離 蜢 ', '注 目 ', '掠 仔 ', '燒 桌 ', '啉 喙 ', '創 辦 人 ', '水 灿 林 ', '市 場 動 物 ', '目 睭 白 金 ', '抾 囡 仔 ', '我 予 滾 水 通 一 个 泡 泡 就 聽 的 ', '順 桮 ', '話 人 ', '郭 牙 ', '三 里 經 ', '飯 盒 ', '天 車 ', '圍 堵 ', '茂 仔 久 ', '白 蟳 ', '電 蜜 ', '鹽 酸 草 ', '番 仔 番 薯 ', '年 一 擺 來 你 攏 趨 好 了 的 ', '來 喔 緊 愛 看 喔 ', '番 界 ', '總 事 件 ', '破 柴 ', '列 位 ', '鳳 山 ', '老 厝 ', '開 山 母 鄉 ', '金 米 國 小 ', '跌 腹 市 ', '橋 頭 區 ', '南 州 ', '好 米 鎮 ', '中 立 ', '西 區 ', '國 泰 ', '水 轉 流 ', '中 央 路 ', '南 福 著 ', '金 華 大 哈 ', '使 甲 ', '士 林 區 ', '紙 票 變 厚 ', '中 山 橋 ', '彰 化 人 烘 ', '二 國 倉 ', '喙 風 轎 ', '花 仔 菜 ', '中 間 國 小 ', '按 呢 著 好 ', '老 去 矣 ', '車 埕 ', '南 洋 街 ', '匈 牙 ', '松 壽 路 口 ', '中 坑 ', '不 屬 志 溪 ', '瑞 源 ', '後 驛 ', '大 �� 鼎 ', '鳥 仔 弓 ', '再 掰 ', '新 園 區 ', '政 義 遊 腳 ', '寧 夏 路 ', '動 蠻 ', '大 好 蜈 蚣 ', '仁 愛 鄉 ', '看 龍 ', '班 ', '西 米 粉 ', '食 機 ', '吊 管 街 ', '仙 師 娘 ', '拍 鐵 寮 ', '路 影 區 ', '韻 尾 ', '倚 雙 溪 ', '信 義 區 ', '你 母 甘 知 仔 你 一 隻 發 肺 文 ', '敢 按 呢 ', '狗 草 番 ', '望 星 橋 ', '肌 膚 ', '擦 耳 膠 ', '文 聳 橋 ', '有 要 緊 無 ', '若 來 鄉 ', '紅 目 石 獅 ', '囡 仔 人 嘛 看 有 ', '青 大 ', '文 鼓 炊 ', '歹 喙 道 ', '查 某 人 ', '魯 菜 頭 ', '跳 尾 ', '鴨 母 大 正 堆 ', '不 貪 不 素 ', '徛 名 ', '白 目 粉 ', '聽 阿 毛 講 場 ', '豆 磨 ', '婉 明 府 ', '粗 花 ', '雞 人 蜂 ', '暗 眠 膜 山 貓 ', '袂 曉 講 話 ', '遊 蠢 ', '巡 事 ', '三 攏 無 ', '鐵 葉 膏 ', '推 桌 ', '足 夠 無 看 的 ', '起 雞 母 皮 ', '臺 港 ', '北 建 圈 ', '爛 花 踏 ', '著 瓦 場 ', '苦 寮 仔 ', '五 起 肩 頭 仔 交 起 來 的 達 基 馬 騰 ', '絨 帽 位 ', '厝 尾 頂 ', '花 樹 人 ', '大 箍 ', '較 低 ', '客 戶 ', '鴨 母 燒 ', '烏 仔 箍 ', '海 口 ', '阿 里 不 達 ', '冰 島 ', '節 ', '使 啦 ', '臭 使 星 ', '網 網 爺 尻 川 ', '冊 鑿 ', '陽 山 迴 腸 ', '你 冊 螺 絲 格 ', '小 山 郎 仔 ', '剪 仔 菇 ', '青 梅 仔 目 鏡 ', '真 愛 ', '在 地 大 學 ', '欲 去 佗 位 ', '崁 頭 鴨 ', '免 流 ', '揣 著 ', '橋 頭 走 路 ', '門 東 仔 ', '冷 角 龍 ', '狗 頭 殕 容 ', '傳 番 ', '我 欲 去 變 數 ', '那 頓 食 ', '鼻 雷 公 ', '代 誌 文 好 ', '抌 頭 ', '牛 牙 使 彈 ', '材 料 科 學 ', '掩 地 水 街 ', '貨 錢 ', '睏 袂 去 ']
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 80,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 16000
14
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ba901ed975c5cd4ccdcf22f79d75ab9211d9b2a28135620fd264d554d5f2684
3
+ size 3055754841
run.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python3 run_speech_recognition_seq2seq_streaming.py \
2
+ --model_name_or_path="openai/whisper-medium" \
3
+ --dataset_name="mozilla-foundation/common_voice_11_0" \
4
+ --dataset_config_name="nan-tw" \
5
+ --language="chinese" \
6
+ --train_split_name="train" \
7
+ --eval_split_name="test" \
8
+ --model_index_name="Whisper medium nan-tw" \
9
+ --max_steps="5000" \
10
+ --output_dir="./" \
11
+ --per_device_train_batch_size="2" \
12
+ --per_device_eval_batch_size="2" \
13
+ --logging_steps="25" \
14
+ --learning_rate="1e-5" \
15
+ --warmup_steps="500" \
16
+ --evaluation_strategy="steps" \
17
+ --eval_steps="1000" \
18
+ --save_strategy="steps" \
19
+ --save_steps="1000" \
20
+ --generation_max_length="225" \
21
+ --length_column_name="input_length" \
22
+ --max_duration_in_seconds="30" \
23
+ --text_column_name="sentence" \
24
+ --freeze_feature_encoder="False" \
25
+ --report_to="tensorboard" \
26
+ --gradient_checkpointing \
27
+ --fp16 \
28
+ --overwrite_output_dir \
29
+ --do_train \
30
+ --do_eval \
31
+ --predict_with_generate \
32
+ --do_normalize_eval \
33
+ --use_auth_token \
34
+ --optim="adamw_bnb_8bit"
run_speech_recognition_seq2seq_streaming.py ADDED
@@ -0,0 +1,617 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for sequence to sequence speech recognition
18
+ with 🤗 Datasets' streaming mode.
19
+ """
20
+ # You can also adapt this script for your own sequence to sequence speech
21
+ # recognition task. Pointers for this are left as comments.
22
+ import ryNormText
23
+ import logging
24
+ import os
25
+ import re
26
+ import string
27
+ import sys
28
+ from dataclasses import dataclass, field
29
+ from typing import Any, Dict, List, Optional, Union
30
+
31
+ import datasets
32
+ import torch
33
+ from datasets import IterableDatasetDict, interleave_datasets, load_dataset
34
+ from torch.utils.data import IterableDataset
35
+
36
+ import evaluate
37
+ import transformers
38
+ from transformers import (
39
+ AutoConfig,
40
+ AutoFeatureExtractor,
41
+ AutoModelForSpeechSeq2Seq,
42
+ AutoProcessor,
43
+ AutoTokenizer,
44
+ HfArgumentParser,
45
+ Seq2SeqTrainer,
46
+ Seq2SeqTrainingArguments,
47
+ TrainerCallback,
48
+ set_seed,
49
+ )
50
+ from transformers.trainer_pt_utils import IterableDatasetShard
51
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
52
+ from transformers.utils import check_min_version, send_example_telemetry
53
+ from transformers.utils.versions import require_version
54
+
55
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
56
+ check_min_version("4.25.0.dev0")
57
+
58
+ require_version("datasets>=1.18.2", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
59
+
60
+ logger = logging.getLogger(__name__)
61
+
62
+
63
+ @dataclass
64
+ class ModelArguments:
65
+ """
66
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
67
+ """
68
+
69
+ model_name_or_path: str = field(
70
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
71
+ )
72
+ config_name: Optional[str] = field(
73
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
74
+ )
75
+ tokenizer_name: Optional[str] = field(
76
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
77
+ )
78
+ feature_extractor_name: Optional[str] = field(
79
+ default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
80
+ )
81
+ cache_dir: Optional[str] = field(
82
+ default=None,
83
+ metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
84
+ )
85
+ use_fast_tokenizer: bool = field(
86
+ default=True,
87
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
88
+ )
89
+ model_revision: str = field(
90
+ default="main",
91
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
92
+ )
93
+ use_auth_token: bool = field(
94
+ default=False,
95
+ metadata={
96
+ "help": (
97
+ "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
98
+ "with private models)."
99
+ )
100
+ },
101
+ )
102
+ freeze_feature_encoder: bool = field(
103
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
104
+ )
105
+ freeze_encoder: bool = field(
106
+ default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."}
107
+ )
108
+ forced_decoder_ids: List[List[int]] = field(
109
+ default=None,
110
+ metadata={
111
+ "help": (
112
+ "A list of pairs of integers which indicates a mapping from generation indices to token indices "
113
+ "that will be forced before sampling. For example, [[0, 123]] means the first generated token "
114
+ "will always be a token of index 123."
115
+ )
116
+ },
117
+ )
118
+ suppress_tokens: List[int] = field(
119
+ default=None, metadata={"help": "A list of tokens that will be suppressed at generation."}
120
+ )
121
+ model_index_name: str = field(default=None, metadata={"help": "Pretty name for the model card."})
122
+
123
+
124
+ @dataclass
125
+ class DataTrainingArguments:
126
+ """
127
+ Arguments pertaining to what data we are going to input our model for training and eval.
128
+ """
129
+
130
+ dataset_name: str = field(
131
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
132
+ )
133
+ dataset_config_name: Optional[str] = field(
134
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
135
+ )
136
+ text_column: Optional[str] = field(
137
+ default=None,
138
+ metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
139
+ )
140
+ max_train_samples: Optional[int] = field(
141
+ default=None,
142
+ metadata={
143
+ "help": (
144
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
145
+ "value if set."
146
+ )
147
+ },
148
+ )
149
+ max_eval_samples: Optional[int] = field(
150
+ default=None,
151
+ metadata={
152
+ "help": (
153
+ "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
154
+ "value if set."
155
+ )
156
+ },
157
+ )
158
+ audio_column_name: str = field(
159
+ default="audio",
160
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
161
+ )
162
+ text_column_name: str = field(
163
+ default="text",
164
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
165
+ )
166
+ max_duration_in_seconds: float = field(
167
+ default=20.0,
168
+ metadata={
169
+ "help": (
170
+ "Truncate audio files that are longer than `max_duration_in_seconds` seconds to"
171
+ " 'max_duration_in_seconds`"
172
+ )
173
+ },
174
+ )
175
+ min_duration_in_seconds: float = field(
176
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
177
+ )
178
+ train_split_name: str = field(
179
+ default="train",
180
+ metadata={
181
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
182
+ },
183
+ )
184
+ eval_split_name: str = field(
185
+ default="test",
186
+ metadata={
187
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
188
+ },
189
+ )
190
+ do_lower_case: bool = field(
191
+ default=False,
192
+ metadata={"help": "Whether the target text should be lower cased."},
193
+ )
194
+ do_remove_punctuation: bool = field(
195
+ default=False,
196
+ metadata={"help": "Whether the target text should be striped of punctuation."},
197
+ )
198
+ do_normalize_eval: bool = field(
199
+ default=True,
200
+ metadata={"help": "Whether to normalise the references and predictions in the eval WER calculation."},
201
+ )
202
+ language: str = field(
203
+ default=None,
204
+ metadata={
205
+ "help": (
206
+ "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
207
+ "only. For English speech recognition, it should be set to `None`."
208
+ )
209
+ },
210
+ )
211
+ task: str = field(
212
+ default="transcribe",
213
+ metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
214
+ )
215
+ shuffle_buffer_size: Optional[int] = field(
216
+ default=500,
217
+ metadata={
218
+ "help": (
219
+ "The number of streamed examples to download before shuffling them. The large the buffer, "
220
+ "the closer it is to real offline shuffling."
221
+ )
222
+ },
223
+ )
224
+
225
+
226
+ @dataclass
227
+ class DataCollatorSpeechSeq2SeqWithPadding:
228
+ """
229
+ Data collator that will dynamically pad the inputs received.
230
+ Args:
231
+ processor ([`WhisperProcessor`])
232
+ The processor used for processing the data.
233
+ decoder_start_token_id (`int`)
234
+ The begin-of-sentence of the decoder.
235
+ """
236
+
237
+ processor: Any
238
+ decoder_start_token_id: int
239
+
240
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
241
+ # split inputs and labels since they have to be of different lengths and need
242
+ # different padding methods
243
+ model_input_name = self.processor.model_input_names[0]
244
+ input_features = [{model_input_name: feature[model_input_name]} for feature in features]
245
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
246
+
247
+ batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
248
+
249
+ labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
250
+
251
+ # replace padding with -100 to ignore loss correctly
252
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
253
+
254
+ # if bos token is appended in previous tokenization step,
255
+ # cut bos token here as it's append later anyways
256
+ if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
257
+ labels = labels[:, 1:]
258
+
259
+ batch["labels"] = labels
260
+
261
+ return batch
262
+
263
+
264
+ def load_streaming_dataset(dataset_name, dataset_config_name, split="train", **kwargs):
265
+ """
266
+ Utility function to load a dataset in streaming mode. For datasets with multiple splits,
267
+ each split is loaded individually and then splits combined by taking alternating examples from
268
+ each (interleaving).
269
+ """
270
+ if "+" in split:
271
+ # load multiple splits separated by the `+` symbol with streaming mode
272
+ dataset_splits = [
273
+ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=True, **kwargs)
274
+ for split_name in split.split("+")
275
+ ]
276
+ # interleave multiple splits to form one dataset
277
+ interleaved_dataset = interleave_datasets(dataset_splits)
278
+ return interleaved_dataset
279
+ else:
280
+ # load a single split *with* streaming mode
281
+ dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=True, **kwargs)
282
+ return dataset
283
+
284
+
285
+ def main():
286
+ # 1. Parse input arguments
287
+ # See all possible arguments in src/transformers/training_args.py
288
+ # or by passing the --help flag to this script.
289
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
290
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
291
+
292
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
293
+ # If we pass only one argument to the script and it's the path to a json file,
294
+ # let's parse it to get our arguments.
295
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
296
+ else:
297
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
298
+
299
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
300
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
301
+ send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
302
+
303
+ # 2. Setup logging
304
+ logging.basicConfig(
305
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
306
+ datefmt="%m/%d/%Y %H:%M:%S",
307
+ handlers=[logging.StreamHandler(sys.stdout)],
308
+ )
309
+ log_level = training_args.get_process_log_level()
310
+ logger.setLevel(log_level)
311
+ datasets.utils.logging.set_verbosity(log_level)
312
+ transformers.utils.logging.set_verbosity(log_level)
313
+ transformers.utils.logging.enable_default_handler()
314
+ transformers.utils.logging.enable_explicit_format()
315
+
316
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
317
+
318
+ # Log on each process the small summary:
319
+ logger.warning(
320
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
321
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
322
+ )
323
+ logger.info(f"Training/evaluation parameters {training_args}")
324
+
325
+ # Set the verbosity to info of the Transformers logger (on main process only):
326
+ if is_main_process(training_args.local_rank):
327
+ transformers.utils.logging.set_verbosity_info()
328
+ logger.info("Training/evaluation parameters %s", training_args)
329
+
330
+ # 3. Detecting last checkpoint and eventually continue from last checkpoint
331
+ last_checkpoint = None
332
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
333
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
334
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
335
+ raise ValueError(
336
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
337
+ "Use --overwrite_output_dir to overcome."
338
+ )
339
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
340
+ logger.info(
341
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
342
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
343
+ )
344
+
345
+ # Set seed before initializing model.
346
+ set_seed(training_args.seed)
347
+
348
+ # 4. Load dataset
349
+ raw_datasets = IterableDatasetDict()
350
+
351
+ if training_args.do_train:
352
+ raw_datasets["train"] = load_streaming_dataset(
353
+ data_args.dataset_name,
354
+ data_args.dataset_config_name,
355
+ split=data_args.train_split_name,
356
+ use_auth_token=True if model_args.use_auth_token else None,
357
+ )
358
+
359
+ if training_args.do_eval:
360
+ raw_datasets["eval"] = load_streaming_dataset(
361
+ data_args.dataset_name,
362
+ data_args.dataset_config_name,
363
+ split=data_args.eval_split_name,
364
+ use_auth_token=True if model_args.use_auth_token else None,
365
+ )
366
+
367
+ raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys())
368
+
369
+ if data_args.audio_column_name not in raw_datasets_features:
370
+ raise ValueError(
371
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
372
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
373
+ f"{', '.join(raw_datasets_features)}."
374
+ )
375
+
376
+ if data_args.text_column_name not in raw_datasets_features:
377
+ raise ValueError(
378
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
379
+ "Make sure to set `--text_column_name` to the correct text column - one of "
380
+ f"{', '.join(raw_datasets_features)}."
381
+ )
382
+
383
+ # 5. Load pretrained model, tokenizer, and feature extractor
384
+ #
385
+ # Distributed training:
386
+ # The .from_pretrained methods guarantee that only one local process can concurrently
387
+ config = AutoConfig.from_pretrained(
388
+ model_args.config_name if model_args.config_name else model_args.model_name_or_path,
389
+ cache_dir=model_args.cache_dir,
390
+ revision=model_args.model_revision,
391
+ use_auth_token=True if model_args.use_auth_token else None,
392
+ )
393
+
394
+ config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens})
395
+
396
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
397
+ model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
398
+ cache_dir=model_args.cache_dir,
399
+ revision=model_args.model_revision,
400
+ use_auth_token=True if model_args.use_auth_token else None,
401
+ )
402
+ tokenizer = AutoTokenizer.from_pretrained(
403
+ model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
404
+ cache_dir=model_args.cache_dir,
405
+ use_fast=model_args.use_fast_tokenizer,
406
+ revision=model_args.model_revision,
407
+ use_auth_token=True if model_args.use_auth_token else None,
408
+ )
409
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
410
+ model_args.model_name_or_path,
411
+ config=config,
412
+ cache_dir=model_args.cache_dir,
413
+ revision=model_args.model_revision,
414
+ use_auth_token=True if model_args.use_auth_token else None,
415
+ )
416
+
417
+ if model.config.decoder_start_token_id is None:
418
+ raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
419
+
420
+ if model_args.freeze_feature_encoder:
421
+ model.freeze_feature_encoder()
422
+
423
+ if model_args.freeze_encoder:
424
+ model.freeze_encoder()
425
+ model.model.encoder.gradient_checkpointing = False
426
+
427
+ if data_args.language is not None:
428
+ # We only need to set the task id when the language is specified (i.e. in a multilingual setting)
429
+ tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
430
+
431
+ # 6. Resample speech dataset if necessary
432
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
433
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
434
+ raw_datasets = raw_datasets.cast_column(
435
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
436
+ )
437
+
438
+ # 7. Preprocessing the datasets.
439
+ # We need to read the audio files as arrays and tokenize the targets.
440
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
441
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
442
+ audio_column_name = data_args.audio_column_name
443
+ text_column_name = data_args.text_column_name
444
+ model_input_name = feature_extractor.model_input_names[0]
445
+ do_lower_case = data_args.do_lower_case
446
+ do_remove_punctuation = data_args.do_remove_punctuation
447
+
448
+ punctuation_to_remove = string.punctuation.replace("'", "") # don't remove apostrophes
449
+ punctuation_to_remove_regex = f"[{''.join(punctuation_to_remove)}]"
450
+
451
+ if data_args.max_train_samples is not None:
452
+ raw_datasets["train"] = raw_datasets["train"].take(data_args.max_train_samples)
453
+
454
+ if data_args.max_eval_samples is not None:
455
+ raw_datasets["eval"] = raw_datasets["eval"].take(data_args.max_eval_samples)
456
+
457
+ def prepare_dataset(batch):
458
+ # process audio
459
+ sample = batch[audio_column_name]
460
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
461
+ # process audio length
462
+ batch[model_input_name] = inputs.get(model_input_name)[0]
463
+ batch["input_length"] = len(sample["array"])
464
+
465
+ # process targets (only char no rome)
466
+ input_str = batch[text_column_name].lower().split('(')[0] if do_lower_case else batch[text_column_name].split('(')[0]
467
+
468
+ if do_remove_punctuation:
469
+ input_str = re.sub(punctuation_to_remove_regex, " ", input_str).strip()
470
+ input_str = re.sub("\s\s+", " ", input_str)
471
+ batch["labels"] = tokenizer(input_str).input_ids
472
+ return batch
473
+
474
+ with training_args.main_process_first(desc="dataset map pre-processing"):
475
+ vectorized_datasets = raw_datasets.map(
476
+ prepare_dataset,
477
+ remove_columns=raw_datasets_features,
478
+ ).with_format("torch")
479
+
480
+ if training_args.do_train:
481
+ vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(
482
+ buffer_size=data_args.shuffle_buffer_size,
483
+ seed=training_args.seed,
484
+ )
485
+
486
+ # filter training data that is shorter than min_input_length or longer than
487
+ # max_input_length
488
+ def is_audio_in_length_range(length):
489
+ return min_input_length < length < max_input_length
490
+
491
+ if training_args.do_train:
492
+ vectorized_datasets["train"] = vectorized_datasets["train"].filter(
493
+ is_audio_in_length_range,
494
+ input_columns=["input_length"],
495
+ )
496
+
497
+ # 8. Load Metric
498
+ cer_metric = evaluate.load("cer")
499
+ wer_metric = evaluate.load("wer")
500
+ do_normalize_eval = data_args.do_normalize_eval
501
+
502
+ def compute_metrics(pred):
503
+ pred_ids = pred.predictions
504
+
505
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
506
+
507
+ pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True, normalize=do_normalize_eval)
508
+ # we do not want to group tokens when computing the metrics
509
+ label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True, normalize=do_normalize_eval)
510
+ cer = 100 * cer_metric.compute(predictions=pred_str, references=label_str)
511
+ pred_str = [ryNormText.separ_char_word(x) for x in pred_str]
512
+ label_str = [ryNormText.separ_char_word(x) for x in label_str]
513
+ p = open('pred.txt','w')
514
+ p.write(f'{pred_str=}')
515
+ p.close()
516
+ p = open('label.txt','w')
517
+ p.write(f'{label_str=}')
518
+ p.close()
519
+ wer = 100 * wer_metric.compute(predictions=pred_str, references=label_str)
520
+ return {"wer": wer, "cer": cer}
521
+
522
+ # 9. Create a single speech processor
523
+ if is_main_process(training_args.local_rank):
524
+ # save feature extractor, tokenizer and config
525
+ feature_extractor.save_pretrained(training_args.output_dir)
526
+ tokenizer.save_pretrained(training_args.output_dir)
527
+ config.save_pretrained(training_args.output_dir)
528
+
529
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
530
+
531
+ # 10. Define data collator
532
+ data_collator = DataCollatorSpeechSeq2SeqWithPadding(
533
+ processor=processor,
534
+ decoder_start_token_id=model.config.decoder_start_token_id,
535
+ )
536
+
537
+ # 11. Configure Trainer
538
+ # Trainer callback to reinitialise and reshuffle the streamable datasets at the beginning of each epoch
539
+ class ShuffleCallback(TrainerCallback):
540
+ def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs):
541
+ if isinstance(train_dataloader.dataset, IterableDatasetShard):
542
+ pass # set_epoch() is handled by the Trainer
543
+ elif isinstance(train_dataloader.dataset, IterableDataset):
544
+ train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1)
545
+
546
+ # Initialize Trainer
547
+ trainer = Seq2SeqTrainer(
548
+ model=model,
549
+ args=training_args,
550
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
551
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
552
+ tokenizer=feature_extractor,
553
+ data_collator=data_collator,
554
+ compute_metrics=compute_metrics if training_args.predict_with_generate else None,
555
+ callbacks=[ShuffleCallback()],
556
+ )
557
+
558
+ # 12. Training
559
+ if training_args.do_train:
560
+ checkpoint = None
561
+ if training_args.resume_from_checkpoint is not None:
562
+ checkpoint = training_args.resume_from_checkpoint
563
+ elif last_checkpoint is not None:
564
+ checkpoint = last_checkpoint
565
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
566
+ trainer.save_model() # Saves the feature extractor too for easy upload
567
+
568
+ metrics = train_result.metrics
569
+ if data_args.max_train_samples:
570
+ metrics["train_samples"] = data_args.max_train_samples
571
+ trainer.log_metrics("train", metrics)
572
+ trainer.save_metrics("train", metrics)
573
+ trainer.save_state()
574
+
575
+ # 13. Evaluation
576
+ results = {}
577
+ if training_args.do_eval:
578
+ logger.info("*** Evaluate ***")
579
+ metrics = trainer.evaluate(
580
+
581
+ metric_key_prefix="eval",
582
+ max_length=training_args.generation_max_length,
583
+ num_beams=training_args.generation_num_beams,
584
+ )
585
+ if data_args.max_eval_samples:
586
+ metrics["eval_samples"] = data_args.max_eval_samples
587
+
588
+ trainer.log_metrics("eval", metrics)
589
+ trainer.save_metrics("eval", metrics)
590
+
591
+ # 14. Write Training Stats
592
+ kwargs = {
593
+ "finetuned_from": model_args.model_name_or_path,
594
+ "tasks": "automatic-speech-recognition",
595
+ "tags": "whisper-event",
596
+ }
597
+ if data_args.dataset_name is not None:
598
+ kwargs["dataset_tags"] = data_args.dataset_name
599
+ if data_args.dataset_config_name is not None:
600
+ kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
601
+ else:
602
+ kwargs["dataset"] = data_args.dataset_name
603
+ if "common_voice" in data_args.dataset_name:
604
+ kwargs["language"] = data_args.dataset_config_name
605
+ if model_args.model_index_name is not None:
606
+ kwargs["model_name"] = model_args.model_index_name
607
+
608
+ if training_args.push_to_hub:
609
+ trainer.push_to_hub(**kwargs)
610
+ else:
611
+ trainer.create_model_card(**kwargs)
612
+
613
+ return results
614
+
615
+
616
+ if __name__ == "__main__":
617
+ main()
runs/May27_19-41-29_DESKTOP-EVSD799/1685187714.55857/events.out.tfevents.1685187714.DESKTOP-EVSD799.25803.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5500d1e2c8d5578bb18571a8d4d6cc288213b57d4c9b90efcdd866351cf4d973
3
+ size 5980
runs/May27_19-41-29_DESKTOP-EVSD799/events.out.tfevents.1685187714.DESKTOP-EVSD799.25803.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff47ac59dcaf1ee4861f352b3a9fb2a99f19eabab7a0ac5819b82a0ea506daed
3
+ size 38140
runs/May27_19-41-29_DESKTOP-EVSD799/events.out.tfevents.1685191671.DESKTOP-EVSD799.25803.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83d1f7e287620ce4d434d4b5130b733d03a4bc3fb0402bef362ca2334752b572
3
+ size 405
ryNormText.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string, re, opencc
2
+
3
+
4
+ 全型2半型= str.maketrans(
5
+ ' 0123456789'
6
+ 'abcdefghijklmnopqrstuvwxyz'
7
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
8
+ '!゛#$%&()*+、ー。/:;〈=〉?@[]^_‘{|}~',
9
+ ' 0123456789'
10
+ 'abcdefghijklmnopqrstuvwxyz'
11
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
12
+ '!"#$%&()*+,-./:;<=>?@[]^_`{|}~'
13
+ )
14
+
15
+ def 把怪字修進unicode(xStr):
16
+ xStr= re.sub('\uf5c3','𪜶', xStr)
17
+ return xStr
18
+
19
+ def ryNormText(s):
20
+ """
21
+ <<<None>>> ==> 刪除
22
+ 標點 ==> 空白
23
+ 連續空白 ==> 1個空白
24
+ 簡繁
25
+ """
26
+
27
+ punc1= string.punctuation # 這是為英文
28
+ punc1
29
+ punc2= '。,﹐、!?::;『』「」…,\n' # 這是為中文,尚未完備!!
30
+
31
+ punc= f"[{punc1}{punc2}]" ## 這是 regular expression 的 pattern
32
+
33
+ ## <<<None>>> ==> 刪除
34
+ s= re.sub('<<<None>>>','',s)
35
+
36
+ # 標點 ==> 空白
37
+ s= re.sub(punc,' ',s)
38
+
39
+
40
+ # 連續空白 ==> 1個空白
41
+ s= re.sub('[ ]+',' ',s)
42
+
43
+
44
+ # 空白 ==> 刪除
45
+ s= re.sub(' ','',s)
46
+
47
+ s= 把怪字修進unicode(s)
48
+
49
+ # 簡繁
50
+ s= opencc.OpenCC('s2tw').convert(s)
51
+
52
+
53
+ return s
54
+
55
+ import unicodedata
56
+ import re
57
+
58
+
59
+ def separ_char_word(inputString= '我是呂仁園 Renyuan Lyu'):
60
+
61
+ inputString= 把怪字修進unicode(inputString)
62
+
63
+ y= ''
64
+ for x in inputString:
65
+ y += x
66
+ try:
67
+ un= unicodedata.name(x)
68
+ if un.startswith('CJK'):
69
+ y += ' '
70
+ else:
71
+ pass
72
+ except Exception as ex:
73
+ y = ' '+y+' '
74
+ print(f'ryErr:(def 中英分開:){ex= }\t【{x= }】\t{inputString= }')
75
+
76
+ y= re.sub('[ ]+',' ', y) #連續空白只保留1個空白
77
+ return y
78
+
79
+ #q= 中英分開('大家好 da jia hao 我是呂仁園 I am Renyuan Lyu')
80
+
81
+ #print(q)
special_tokens_map.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|startoftranscript|>",
5
+ "<|en|>",
6
+ "<|zh|>",
7
+ "<|de|>",
8
+ "<|es|>",
9
+ "<|ru|>",
10
+ "<|ko|>",
11
+ "<|fr|>",
12
+ "<|ja|>",
13
+ "<|pt|>",
14
+ "<|tr|>",
15
+ "<|pl|>",
16
+ "<|ca|>",
17
+ "<|nl|>",
18
+ "<|ar|>",
19
+ "<|sv|>",
20
+ "<|it|>",
21
+ "<|id|>",
22
+ "<|hi|>",
23
+ "<|fi|>",
24
+ "<|vi|>",
25
+ "<|he|>",
26
+ "<|uk|>",
27
+ "<|el|>",
28
+ "<|ms|>",
29
+ "<|cs|>",
30
+ "<|ro|>",
31
+ "<|da|>",
32
+ "<|hu|>",
33
+ "<|ta|>",
34
+ "<|no|>",
35
+ "<|th|>",
36
+ "<|ur|>",
37
+ "<|hr|>",
38
+ "<|bg|>",
39
+ "<|lt|>",
40
+ "<|la|>",
41
+ "<|mi|>",
42
+ "<|ml|>",
43
+ "<|cy|>",
44
+ "<|sk|>",
45
+ "<|te|>",
46
+ "<|fa|>",
47
+ "<|lv|>",
48
+ "<|bn|>",
49
+ "<|sr|>",
50
+ "<|az|>",
51
+ "<|sl|>",
52
+ "<|kn|>",
53
+ "<|et|>",
54
+ "<|mk|>",
55
+ "<|br|>",
56
+ "<|eu|>",
57
+ "<|is|>",
58
+ "<|hy|>",
59
+ "<|ne|>",
60
+ "<|mn|>",
61
+ "<|bs|>",
62
+ "<|kk|>",
63
+ "<|sq|>",
64
+ "<|sw|>",
65
+ "<|gl|>",
66
+ "<|mr|>",
67
+ "<|pa|>",
68
+ "<|si|>",
69
+ "<|km|>",
70
+ "<|sn|>",
71
+ "<|yo|>",
72
+ "<|so|>",
73
+ "<|af|>",
74
+ "<|oc|>",
75
+ "<|ka|>",
76
+ "<|be|>",
77
+ "<|tg|>",
78
+ "<|sd|>",
79
+ "<|gu|>",
80
+ "<|am|>",
81
+ "<|yi|>",
82
+ "<|lo|>",
83
+ "<|uz|>",
84
+ "<|fo|>",
85
+ "<|ht|>",
86
+ "<|ps|>",
87
+ "<|tk|>",
88
+ "<|nn|>",
89
+ "<|mt|>",
90
+ "<|sa|>",
91
+ "<|lb|>",
92
+ "<|my|>",
93
+ "<|bo|>",
94
+ "<|tl|>",
95
+ "<|mg|>",
96
+ "<|as|>",
97
+ "<|tt|>",
98
+ "<|haw|>",
99
+ "<|ln|>",
100
+ "<|ha|>",
101
+ "<|ba|>",
102
+ "<|jw|>",
103
+ "<|su|>",
104
+ "<|translate|>",
105
+ "<|transcribe|>",
106
+ "<|startoflm|>",
107
+ "<|startofprev|>",
108
+ "<|nocaptions|>",
109
+ "<|notimestamps|>"
110
+ ],
111
+ "bos_token": {
112
+ "content": "<|endoftext|>",
113
+ "lstrip": false,
114
+ "normalized": true,
115
+ "rstrip": false,
116
+ "single_word": false
117
+ },
118
+ "eos_token": {
119
+ "content": "<|endoftext|>",
120
+ "lstrip": false,
121
+ "normalized": true,
122
+ "rstrip": false,
123
+ "single_word": false
124
+ },
125
+ "pad_token": "<|endoftext|>",
126
+ "unk_token": {
127
+ "content": "<|endoftext|>",
128
+ "lstrip": false,
129
+ "normalized": true,
130
+ "rstrip": false,
131
+ "single_word": false
132
+ }
133
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 1024,
22
+ "pad_token": null,
23
+ "processor_class": "WhisperProcessor",
24
+ "return_attention_mask": false,
25
+ "special_tokens_map_file": null,
26
+ "tokenizer_class": "WhisperTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<|endoftext|>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.05,
3
+ "train_loss": 0.3284327008752851,
4
+ "train_runtime": 3725.671,
5
+ "train_samples_per_second": 2.684,
6
+ "train_steps_per_second": 1.342
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,1275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 6.0532,
5
+ "global_step": 5000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 4.2000000000000006e-07,
13
+ "loss": 2.3482,
14
+ "step": 25
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 9.200000000000001e-07,
19
+ "loss": 2.3433,
20
+ "step": 50
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 1.42e-06,
25
+ "loss": 1.761,
26
+ "step": 75
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 1.9200000000000003e-06,
31
+ "loss": 1.5988,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 0.03,
36
+ "learning_rate": 2.4000000000000003e-06,
37
+ "loss": 1.4245,
38
+ "step": 125
39
+ },
40
+ {
41
+ "epoch": 0.03,
42
+ "learning_rate": 2.9e-06,
43
+ "loss": 1.5665,
44
+ "step": 150
45
+ },
46
+ {
47
+ "epoch": 0.04,
48
+ "learning_rate": 3.4000000000000005e-06,
49
+ "loss": 1.4588,
50
+ "step": 175
51
+ },
52
+ {
53
+ "epoch": 0.04,
54
+ "learning_rate": 3.900000000000001e-06,
55
+ "loss": 1.2609,
56
+ "step": 200
57
+ },
58
+ {
59
+ "epoch": 0.04,
60
+ "learning_rate": 4.4e-06,
61
+ "loss": 1.2301,
62
+ "step": 225
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "learning_rate": 4.9000000000000005e-06,
67
+ "loss": 1.4295,
68
+ "step": 250
69
+ },
70
+ {
71
+ "epoch": 0.06,
72
+ "learning_rate": 5.400000000000001e-06,
73
+ "loss": 1.415,
74
+ "step": 275
75
+ },
76
+ {
77
+ "epoch": 0.06,
78
+ "learning_rate": 5.9e-06,
79
+ "loss": 1.1804,
80
+ "step": 300
81
+ },
82
+ {
83
+ "epoch": 0.07,
84
+ "learning_rate": 6.4000000000000006e-06,
85
+ "loss": 1.3484,
86
+ "step": 325
87
+ },
88
+ {
89
+ "epoch": 0.07,
90
+ "learning_rate": 6.9e-06,
91
+ "loss": 1.3593,
92
+ "step": 350
93
+ },
94
+ {
95
+ "epoch": 0.07,
96
+ "learning_rate": 7.4e-06,
97
+ "loss": 1.179,
98
+ "step": 375
99
+ },
100
+ {
101
+ "epoch": 0.08,
102
+ "learning_rate": 7.9e-06,
103
+ "loss": 1.3824,
104
+ "step": 400
105
+ },
106
+ {
107
+ "epoch": 0.09,
108
+ "learning_rate": 8.400000000000001e-06,
109
+ "loss": 1.1744,
110
+ "step": 425
111
+ },
112
+ {
113
+ "epoch": 0.09,
114
+ "learning_rate": 8.900000000000001e-06,
115
+ "loss": 1.2079,
116
+ "step": 450
117
+ },
118
+ {
119
+ "epoch": 0.1,
120
+ "learning_rate": 9.4e-06,
121
+ "loss": 1.2476,
122
+ "step": 475
123
+ },
124
+ {
125
+ "epoch": 0.1,
126
+ "learning_rate": 9.9e-06,
127
+ "loss": 1.3404,
128
+ "step": 500
129
+ },
130
+ {
131
+ "epoch": 0.1,
132
+ "learning_rate": 9.955555555555556e-06,
133
+ "loss": 1.0947,
134
+ "step": 525
135
+ },
136
+ {
137
+ "epoch": 0.11,
138
+ "learning_rate": 9.9e-06,
139
+ "loss": 1.3787,
140
+ "step": 550
141
+ },
142
+ {
143
+ "epoch": 0.12,
144
+ "learning_rate": 9.844444444444446e-06,
145
+ "loss": 1.0917,
146
+ "step": 575
147
+ },
148
+ {
149
+ "epoch": 0.12,
150
+ "learning_rate": 9.78888888888889e-06,
151
+ "loss": 1.1884,
152
+ "step": 600
153
+ },
154
+ {
155
+ "epoch": 0.12,
156
+ "learning_rate": 9.733333333333334e-06,
157
+ "loss": 1.2814,
158
+ "step": 625
159
+ },
160
+ {
161
+ "epoch": 0.13,
162
+ "learning_rate": 9.677777777777778e-06,
163
+ "loss": 1.1693,
164
+ "step": 650
165
+ },
166
+ {
167
+ "epoch": 0.14,
168
+ "learning_rate": 9.622222222222222e-06,
169
+ "loss": 1.3852,
170
+ "step": 675
171
+ },
172
+ {
173
+ "epoch": 0.14,
174
+ "learning_rate": 9.566666666666668e-06,
175
+ "loss": 1.0322,
176
+ "step": 700
177
+ },
178
+ {
179
+ "epoch": 0.14,
180
+ "learning_rate": 9.511111111111112e-06,
181
+ "loss": 1.1455,
182
+ "step": 725
183
+ },
184
+ {
185
+ "epoch": 0.15,
186
+ "learning_rate": 9.455555555555557e-06,
187
+ "loss": 0.9578,
188
+ "step": 750
189
+ },
190
+ {
191
+ "epoch": 0.15,
192
+ "learning_rate": 9.4e-06,
193
+ "loss": 1.2149,
194
+ "step": 775
195
+ },
196
+ {
197
+ "epoch": 1.0,
198
+ "learning_rate": 9.344444444444446e-06,
199
+ "loss": 0.9243,
200
+ "step": 800
201
+ },
202
+ {
203
+ "epoch": 1.01,
204
+ "learning_rate": 9.28888888888889e-06,
205
+ "loss": 0.7373,
206
+ "step": 825
207
+ },
208
+ {
209
+ "epoch": 1.01,
210
+ "learning_rate": 9.233333333333334e-06,
211
+ "loss": 0.7298,
212
+ "step": 850
213
+ },
214
+ {
215
+ "epoch": 1.02,
216
+ "learning_rate": 9.17777777777778e-06,
217
+ "loss": 0.7556,
218
+ "step": 875
219
+ },
220
+ {
221
+ "epoch": 1.02,
222
+ "learning_rate": 9.122222222222223e-06,
223
+ "loss": 0.6975,
224
+ "step": 900
225
+ },
226
+ {
227
+ "epoch": 1.03,
228
+ "learning_rate": 9.066666666666667e-06,
229
+ "loss": 0.7299,
230
+ "step": 925
231
+ },
232
+ {
233
+ "epoch": 1.03,
234
+ "learning_rate": 9.011111111111111e-06,
235
+ "loss": 0.6555,
236
+ "step": 950
237
+ },
238
+ {
239
+ "epoch": 1.04,
240
+ "learning_rate": 8.955555555555555e-06,
241
+ "loss": 0.6755,
242
+ "step": 975
243
+ },
244
+ {
245
+ "epoch": 1.04,
246
+ "learning_rate": 8.900000000000001e-06,
247
+ "loss": 0.5832,
248
+ "step": 1000
249
+ },
250
+ {
251
+ "epoch": 1.04,
252
+ "eval_cer": 56.474494415937215,
253
+ "eval_loss": 1.0634080171585083,
254
+ "eval_runtime": 219.1746,
255
+ "eval_samples_per_second": 4.499,
256
+ "eval_steps_per_second": 2.249,
257
+ "eval_wer": 56.30534351145038,
258
+ "step": 1000
259
+ },
260
+ {
261
+ "epoch": 1.05,
262
+ "learning_rate": 8.844444444444445e-06,
263
+ "loss": 0.5757,
264
+ "step": 1025
265
+ },
266
+ {
267
+ "epoch": 1.05,
268
+ "learning_rate": 8.788888888888891e-06,
269
+ "loss": 0.6065,
270
+ "step": 1050
271
+ },
272
+ {
273
+ "epoch": 1.06,
274
+ "learning_rate": 8.733333333333333e-06,
275
+ "loss": 0.5487,
276
+ "step": 1075
277
+ },
278
+ {
279
+ "epoch": 1.06,
280
+ "learning_rate": 8.677777777777779e-06,
281
+ "loss": 0.6339,
282
+ "step": 1100
283
+ },
284
+ {
285
+ "epoch": 1.07,
286
+ "learning_rate": 8.622222222222223e-06,
287
+ "loss": 0.5477,
288
+ "step": 1125
289
+ },
290
+ {
291
+ "epoch": 1.07,
292
+ "learning_rate": 8.566666666666667e-06,
293
+ "loss": 0.597,
294
+ "step": 1150
295
+ },
296
+ {
297
+ "epoch": 1.08,
298
+ "learning_rate": 8.511111111111113e-06,
299
+ "loss": 0.5394,
300
+ "step": 1175
301
+ },
302
+ {
303
+ "epoch": 1.08,
304
+ "learning_rate": 8.455555555555555e-06,
305
+ "loss": 0.52,
306
+ "step": 1200
307
+ },
308
+ {
309
+ "epoch": 1.09,
310
+ "learning_rate": 8.400000000000001e-06,
311
+ "loss": 0.6005,
312
+ "step": 1225
313
+ },
314
+ {
315
+ "epoch": 1.09,
316
+ "learning_rate": 8.344444444444445e-06,
317
+ "loss": 0.4988,
318
+ "step": 1250
319
+ },
320
+ {
321
+ "epoch": 1.1,
322
+ "learning_rate": 8.288888888888889e-06,
323
+ "loss": 0.5381,
324
+ "step": 1275
325
+ },
326
+ {
327
+ "epoch": 1.1,
328
+ "learning_rate": 8.233333333333335e-06,
329
+ "loss": 0.5005,
330
+ "step": 1300
331
+ },
332
+ {
333
+ "epoch": 1.11,
334
+ "learning_rate": 8.177777777777779e-06,
335
+ "loss": 0.3982,
336
+ "step": 1325
337
+ },
338
+ {
339
+ "epoch": 1.11,
340
+ "learning_rate": 8.122222222222223e-06,
341
+ "loss": 0.5537,
342
+ "step": 1350
343
+ },
344
+ {
345
+ "epoch": 1.12,
346
+ "learning_rate": 8.066666666666667e-06,
347
+ "loss": 0.3985,
348
+ "step": 1375
349
+ },
350
+ {
351
+ "epoch": 1.12,
352
+ "learning_rate": 8.011111111111113e-06,
353
+ "loss": 0.3653,
354
+ "step": 1400
355
+ },
356
+ {
357
+ "epoch": 1.13,
358
+ "learning_rate": 7.955555555555557e-06,
359
+ "loss": 0.4001,
360
+ "step": 1425
361
+ },
362
+ {
363
+ "epoch": 1.13,
364
+ "learning_rate": 7.9e-06,
365
+ "loss": 0.4278,
366
+ "step": 1450
367
+ },
368
+ {
369
+ "epoch": 1.14,
370
+ "learning_rate": 7.844444444444446e-06,
371
+ "loss": 0.3982,
372
+ "step": 1475
373
+ },
374
+ {
375
+ "epoch": 1.14,
376
+ "learning_rate": 7.788888888888889e-06,
377
+ "loss": 0.3967,
378
+ "step": 1500
379
+ },
380
+ {
381
+ "epoch": 1.15,
382
+ "learning_rate": 7.733333333333334e-06,
383
+ "loss": 0.4366,
384
+ "step": 1525
385
+ },
386
+ {
387
+ "epoch": 1.15,
388
+ "learning_rate": 7.677777777777778e-06,
389
+ "loss": 0.4665,
390
+ "step": 1550
391
+ },
392
+ {
393
+ "epoch": 1.16,
394
+ "learning_rate": 7.622222222222223e-06,
395
+ "loss": 0.4292,
396
+ "step": 1575
397
+ },
398
+ {
399
+ "epoch": 2.0,
400
+ "learning_rate": 7.566666666666667e-06,
401
+ "loss": 0.2518,
402
+ "step": 1600
403
+ },
404
+ {
405
+ "epoch": 2.01,
406
+ "learning_rate": 7.511111111111111e-06,
407
+ "loss": 0.1433,
408
+ "step": 1625
409
+ },
410
+ {
411
+ "epoch": 2.01,
412
+ "learning_rate": 7.455555555555556e-06,
413
+ "loss": 0.1932,
414
+ "step": 1650
415
+ },
416
+ {
417
+ "epoch": 2.02,
418
+ "learning_rate": 7.4e-06,
419
+ "loss": 0.1875,
420
+ "step": 1675
421
+ },
422
+ {
423
+ "epoch": 2.02,
424
+ "learning_rate": 7.344444444444445e-06,
425
+ "loss": 0.2236,
426
+ "step": 1700
427
+ },
428
+ {
429
+ "epoch": 2.03,
430
+ "learning_rate": 7.28888888888889e-06,
431
+ "loss": 0.2445,
432
+ "step": 1725
433
+ },
434
+ {
435
+ "epoch": 2.03,
436
+ "learning_rate": 7.233333333333334e-06,
437
+ "loss": 0.1751,
438
+ "step": 1750
439
+ },
440
+ {
441
+ "epoch": 2.04,
442
+ "learning_rate": 7.177777777777778e-06,
443
+ "loss": 0.1976,
444
+ "step": 1775
445
+ },
446
+ {
447
+ "epoch": 2.04,
448
+ "learning_rate": 7.122222222222222e-06,
449
+ "loss": 0.1307,
450
+ "step": 1800
451
+ },
452
+ {
453
+ "epoch": 2.05,
454
+ "learning_rate": 7.066666666666667e-06,
455
+ "loss": 0.1849,
456
+ "step": 1825
457
+ },
458
+ {
459
+ "epoch": 2.05,
460
+ "learning_rate": 7.011111111111112e-06,
461
+ "loss": 0.1233,
462
+ "step": 1850
463
+ },
464
+ {
465
+ "epoch": 2.06,
466
+ "learning_rate": 6.955555555555557e-06,
467
+ "loss": 0.136,
468
+ "step": 1875
469
+ },
470
+ {
471
+ "epoch": 2.06,
472
+ "learning_rate": 6.9e-06,
473
+ "loss": 0.1014,
474
+ "step": 1900
475
+ },
476
+ {
477
+ "epoch": 2.07,
478
+ "learning_rate": 6.844444444444445e-06,
479
+ "loss": 0.1366,
480
+ "step": 1925
481
+ },
482
+ {
483
+ "epoch": 2.07,
484
+ "learning_rate": 6.788888888888889e-06,
485
+ "loss": 0.2011,
486
+ "step": 1950
487
+ },
488
+ {
489
+ "epoch": 2.08,
490
+ "learning_rate": 6.733333333333334e-06,
491
+ "loss": 0.0962,
492
+ "step": 1975
493
+ },
494
+ {
495
+ "epoch": 2.08,
496
+ "learning_rate": 6.677777777777779e-06,
497
+ "loss": 0.1467,
498
+ "step": 2000
499
+ },
500
+ {
501
+ "epoch": 2.08,
502
+ "eval_cer": 51.01116812556595,
503
+ "eval_loss": 1.040711760520935,
504
+ "eval_runtime": 219.0004,
505
+ "eval_samples_per_second": 4.502,
506
+ "eval_steps_per_second": 2.251,
507
+ "eval_wer": 50.961832061068705,
508
+ "step": 2000
509
+ },
510
+ {
511
+ "epoch": 2.09,
512
+ "learning_rate": 6.6222222222222236e-06,
513
+ "loss": 0.1376,
514
+ "step": 2025
515
+ },
516
+ {
517
+ "epoch": 2.09,
518
+ "learning_rate": 6.566666666666667e-06,
519
+ "loss": 0.1322,
520
+ "step": 2050
521
+ },
522
+ {
523
+ "epoch": 2.1,
524
+ "learning_rate": 6.511111111111112e-06,
525
+ "loss": 0.0889,
526
+ "step": 2075
527
+ },
528
+ {
529
+ "epoch": 2.1,
530
+ "learning_rate": 6.455555555555556e-06,
531
+ "loss": 0.0725,
532
+ "step": 2100
533
+ },
534
+ {
535
+ "epoch": 2.11,
536
+ "learning_rate": 6.4000000000000006e-06,
537
+ "loss": 0.084,
538
+ "step": 2125
539
+ },
540
+ {
541
+ "epoch": 2.11,
542
+ "learning_rate": 6.3444444444444454e-06,
543
+ "loss": 0.078,
544
+ "step": 2150
545
+ },
546
+ {
547
+ "epoch": 2.12,
548
+ "learning_rate": 6.28888888888889e-06,
549
+ "loss": 0.0666,
550
+ "step": 2175
551
+ },
552
+ {
553
+ "epoch": 2.12,
554
+ "learning_rate": 6.2333333333333335e-06,
555
+ "loss": 0.1335,
556
+ "step": 2200
557
+ },
558
+ {
559
+ "epoch": 2.13,
560
+ "learning_rate": 6.177777777777778e-06,
561
+ "loss": 0.0919,
562
+ "step": 2225
563
+ },
564
+ {
565
+ "epoch": 2.13,
566
+ "learning_rate": 6.1222222222222224e-06,
567
+ "loss": 0.0966,
568
+ "step": 2250
569
+ },
570
+ {
571
+ "epoch": 2.14,
572
+ "learning_rate": 6.066666666666667e-06,
573
+ "loss": 0.0947,
574
+ "step": 2275
575
+ },
576
+ {
577
+ "epoch": 2.14,
578
+ "learning_rate": 6.011111111111112e-06,
579
+ "loss": 0.0731,
580
+ "step": 2300
581
+ },
582
+ {
583
+ "epoch": 2.15,
584
+ "learning_rate": 5.955555555555555e-06,
585
+ "loss": 0.1585,
586
+ "step": 2325
587
+ },
588
+ {
589
+ "epoch": 2.15,
590
+ "learning_rate": 5.9e-06,
591
+ "loss": 0.1475,
592
+ "step": 2350
593
+ },
594
+ {
595
+ "epoch": 3.0,
596
+ "learning_rate": 5.844444444444445e-06,
597
+ "loss": 0.0635,
598
+ "step": 2375
599
+ },
600
+ {
601
+ "epoch": 3.01,
602
+ "learning_rate": 5.788888888888889e-06,
603
+ "loss": 0.0489,
604
+ "step": 2400
605
+ },
606
+ {
607
+ "epoch": 3.01,
608
+ "learning_rate": 5.733333333333334e-06,
609
+ "loss": 0.0325,
610
+ "step": 2425
611
+ },
612
+ {
613
+ "epoch": 3.02,
614
+ "learning_rate": 5.677777777777779e-06,
615
+ "loss": 0.0661,
616
+ "step": 2450
617
+ },
618
+ {
619
+ "epoch": 3.02,
620
+ "learning_rate": 5.622222222222222e-06,
621
+ "loss": 0.0433,
622
+ "step": 2475
623
+ },
624
+ {
625
+ "epoch": 3.03,
626
+ "learning_rate": 5.566666666666667e-06,
627
+ "loss": 0.0215,
628
+ "step": 2500
629
+ },
630
+ {
631
+ "epoch": 3.03,
632
+ "learning_rate": 5.511111111111112e-06,
633
+ "loss": 0.0477,
634
+ "step": 2525
635
+ },
636
+ {
637
+ "epoch": 3.04,
638
+ "learning_rate": 5.455555555555556e-06,
639
+ "loss": 0.0315,
640
+ "step": 2550
641
+ },
642
+ {
643
+ "epoch": 3.04,
644
+ "learning_rate": 5.400000000000001e-06,
645
+ "loss": 0.0798,
646
+ "step": 2575
647
+ },
648
+ {
649
+ "epoch": 3.05,
650
+ "learning_rate": 5.344444444444446e-06,
651
+ "loss": 0.0298,
652
+ "step": 2600
653
+ },
654
+ {
655
+ "epoch": 3.05,
656
+ "learning_rate": 5.288888888888889e-06,
657
+ "loss": 0.033,
658
+ "step": 2625
659
+ },
660
+ {
661
+ "epoch": 3.06,
662
+ "learning_rate": 5.233333333333334e-06,
663
+ "loss": 0.0151,
664
+ "step": 2650
665
+ },
666
+ {
667
+ "epoch": 3.06,
668
+ "learning_rate": 5.177777777777779e-06,
669
+ "loss": 0.0465,
670
+ "step": 2675
671
+ },
672
+ {
673
+ "epoch": 3.07,
674
+ "learning_rate": 5.122222222222223e-06,
675
+ "loss": 0.0138,
676
+ "step": 2700
677
+ },
678
+ {
679
+ "epoch": 3.07,
680
+ "learning_rate": 5.0666666666666676e-06,
681
+ "loss": 0.0293,
682
+ "step": 2725
683
+ },
684
+ {
685
+ "epoch": 3.08,
686
+ "learning_rate": 5.011111111111111e-06,
687
+ "loss": 0.0296,
688
+ "step": 2750
689
+ },
690
+ {
691
+ "epoch": 3.08,
692
+ "learning_rate": 4.9555555555555565e-06,
693
+ "loss": 0.0336,
694
+ "step": 2775
695
+ },
696
+ {
697
+ "epoch": 3.09,
698
+ "learning_rate": 4.9000000000000005e-06,
699
+ "loss": 0.0377,
700
+ "step": 2800
701
+ },
702
+ {
703
+ "epoch": 3.09,
704
+ "learning_rate": 4.8444444444444446e-06,
705
+ "loss": 0.0345,
706
+ "step": 2825
707
+ },
708
+ {
709
+ "epoch": 3.1,
710
+ "learning_rate": 4.7888888888888894e-06,
711
+ "loss": 0.0129,
712
+ "step": 2850
713
+ },
714
+ {
715
+ "epoch": 3.1,
716
+ "learning_rate": 4.7333333333333335e-06,
717
+ "loss": 0.0141,
718
+ "step": 2875
719
+ },
720
+ {
721
+ "epoch": 3.11,
722
+ "learning_rate": 4.677777777777778e-06,
723
+ "loss": 0.0165,
724
+ "step": 2900
725
+ },
726
+ {
727
+ "epoch": 3.11,
728
+ "learning_rate": 4.622222222222222e-06,
729
+ "loss": 0.0318,
730
+ "step": 2925
731
+ },
732
+ {
733
+ "epoch": 3.12,
734
+ "learning_rate": 4.566666666666667e-06,
735
+ "loss": 0.0161,
736
+ "step": 2950
737
+ },
738
+ {
739
+ "epoch": 3.12,
740
+ "learning_rate": 4.511111111111111e-06,
741
+ "loss": 0.028,
742
+ "step": 2975
743
+ },
744
+ {
745
+ "epoch": 3.13,
746
+ "learning_rate": 4.455555555555555e-06,
747
+ "loss": 0.016,
748
+ "step": 3000
749
+ },
750
+ {
751
+ "epoch": 3.13,
752
+ "eval_cer": 46.5137337760338,
753
+ "eval_loss": 1.0225664377212524,
754
+ "eval_runtime": 217.5747,
755
+ "eval_samples_per_second": 4.532,
756
+ "eval_steps_per_second": 2.266,
757
+ "eval_wer": 46.44274809160305,
758
+ "step": 3000
759
+ },
760
+ {
761
+ "epoch": 3.13,
762
+ "learning_rate": 4.4e-06,
763
+ "loss": 0.0509,
764
+ "step": 3025
765
+ },
766
+ {
767
+ "epoch": 3.14,
768
+ "learning_rate": 4.344444444444445e-06,
769
+ "loss": 0.0254,
770
+ "step": 3050
771
+ },
772
+ {
773
+ "epoch": 3.14,
774
+ "learning_rate": 4.288888888888889e-06,
775
+ "loss": 0.0075,
776
+ "step": 3075
777
+ },
778
+ {
779
+ "epoch": 3.15,
780
+ "learning_rate": 4.233333333333334e-06,
781
+ "loss": 0.0303,
782
+ "step": 3100
783
+ },
784
+ {
785
+ "epoch": 3.15,
786
+ "learning_rate": 4.177777777777778e-06,
787
+ "loss": 0.0127,
788
+ "step": 3125
789
+ },
790
+ {
791
+ "epoch": 3.16,
792
+ "learning_rate": 4.122222222222222e-06,
793
+ "loss": 0.0184,
794
+ "step": 3150
795
+ },
796
+ {
797
+ "epoch": 4.0,
798
+ "learning_rate": 4.066666666666667e-06,
799
+ "loss": 0.0311,
800
+ "step": 3175
801
+ },
802
+ {
803
+ "epoch": 4.01,
804
+ "learning_rate": 4.011111111111111e-06,
805
+ "loss": 0.0012,
806
+ "step": 3200
807
+ },
808
+ {
809
+ "epoch": 4.01,
810
+ "learning_rate": 3.955555555555556e-06,
811
+ "loss": 0.0106,
812
+ "step": 3225
813
+ },
814
+ {
815
+ "epoch": 4.02,
816
+ "learning_rate": 3.900000000000001e-06,
817
+ "loss": 0.0085,
818
+ "step": 3250
819
+ },
820
+ {
821
+ "epoch": 4.02,
822
+ "learning_rate": 3.844444444444445e-06,
823
+ "loss": 0.0016,
824
+ "step": 3275
825
+ },
826
+ {
827
+ "epoch": 4.03,
828
+ "learning_rate": 3.7888888888888893e-06,
829
+ "loss": 0.0083,
830
+ "step": 3300
831
+ },
832
+ {
833
+ "epoch": 4.03,
834
+ "learning_rate": 3.7333333333333337e-06,
835
+ "loss": 0.0099,
836
+ "step": 3325
837
+ },
838
+ {
839
+ "epoch": 4.04,
840
+ "learning_rate": 3.6777777777777778e-06,
841
+ "loss": 0.0017,
842
+ "step": 3350
843
+ },
844
+ {
845
+ "epoch": 4.04,
846
+ "learning_rate": 3.6222222222222226e-06,
847
+ "loss": 0.0097,
848
+ "step": 3375
849
+ },
850
+ {
851
+ "epoch": 4.05,
852
+ "learning_rate": 3.566666666666667e-06,
853
+ "loss": 0.0083,
854
+ "step": 3400
855
+ },
856
+ {
857
+ "epoch": 4.05,
858
+ "learning_rate": 3.511111111111111e-06,
859
+ "loss": 0.0182,
860
+ "step": 3425
861
+ },
862
+ {
863
+ "epoch": 4.06,
864
+ "learning_rate": 3.455555555555556e-06,
865
+ "loss": 0.0041,
866
+ "step": 3450
867
+ },
868
+ {
869
+ "epoch": 4.06,
870
+ "learning_rate": 3.4000000000000005e-06,
871
+ "loss": 0.0321,
872
+ "step": 3475
873
+ },
874
+ {
875
+ "epoch": 4.07,
876
+ "learning_rate": 3.3444444444444445e-06,
877
+ "loss": 0.0009,
878
+ "step": 3500
879
+ },
880
+ {
881
+ "epoch": 4.07,
882
+ "learning_rate": 3.2888888888888894e-06,
883
+ "loss": 0.0008,
884
+ "step": 3525
885
+ },
886
+ {
887
+ "epoch": 4.08,
888
+ "learning_rate": 3.2333333333333334e-06,
889
+ "loss": 0.0016,
890
+ "step": 3550
891
+ },
892
+ {
893
+ "epoch": 4.08,
894
+ "learning_rate": 3.177777777777778e-06,
895
+ "loss": 0.0368,
896
+ "step": 3575
897
+ },
898
+ {
899
+ "epoch": 4.09,
900
+ "learning_rate": 3.1222222222222228e-06,
901
+ "loss": 0.006,
902
+ "step": 3600
903
+ },
904
+ {
905
+ "epoch": 4.09,
906
+ "learning_rate": 3.066666666666667e-06,
907
+ "loss": 0.0023,
908
+ "step": 3625
909
+ },
910
+ {
911
+ "epoch": 4.1,
912
+ "learning_rate": 3.0111111111111113e-06,
913
+ "loss": 0.001,
914
+ "step": 3650
915
+ },
916
+ {
917
+ "epoch": 4.1,
918
+ "learning_rate": 2.955555555555556e-06,
919
+ "loss": 0.0012,
920
+ "step": 3675
921
+ },
922
+ {
923
+ "epoch": 4.11,
924
+ "learning_rate": 2.9e-06,
925
+ "loss": 0.0009,
926
+ "step": 3700
927
+ },
928
+ {
929
+ "epoch": 4.11,
930
+ "learning_rate": 2.8444444444444446e-06,
931
+ "loss": 0.0009,
932
+ "step": 3725
933
+ },
934
+ {
935
+ "epoch": 4.12,
936
+ "learning_rate": 2.788888888888889e-06,
937
+ "loss": 0.0005,
938
+ "step": 3750
939
+ },
940
+ {
941
+ "epoch": 4.12,
942
+ "learning_rate": 2.7333333333333336e-06,
943
+ "loss": 0.0095,
944
+ "step": 3775
945
+ },
946
+ {
947
+ "epoch": 4.13,
948
+ "learning_rate": 2.677777777777778e-06,
949
+ "loss": 0.0072,
950
+ "step": 3800
951
+ },
952
+ {
953
+ "epoch": 4.13,
954
+ "learning_rate": 2.6222222222222225e-06,
955
+ "loss": 0.0203,
956
+ "step": 3825
957
+ },
958
+ {
959
+ "epoch": 4.14,
960
+ "learning_rate": 2.566666666666667e-06,
961
+ "loss": 0.001,
962
+ "step": 3850
963
+ },
964
+ {
965
+ "epoch": 4.14,
966
+ "learning_rate": 2.5111111111111114e-06,
967
+ "loss": 0.0004,
968
+ "step": 3875
969
+ },
970
+ {
971
+ "epoch": 4.15,
972
+ "learning_rate": 2.455555555555556e-06,
973
+ "loss": 0.0224,
974
+ "step": 3900
975
+ },
976
+ {
977
+ "epoch": 4.15,
978
+ "learning_rate": 2.4000000000000003e-06,
979
+ "loss": 0.0012,
980
+ "step": 3925
981
+ },
982
+ {
983
+ "epoch": 5.0,
984
+ "learning_rate": 2.3444444444444448e-06,
985
+ "loss": 0.0002,
986
+ "step": 3950
987
+ },
988
+ {
989
+ "epoch": 5.01,
990
+ "learning_rate": 2.2888888888888892e-06,
991
+ "loss": 0.0003,
992
+ "step": 3975
993
+ },
994
+ {
995
+ "epoch": 5.01,
996
+ "learning_rate": 2.2333333333333333e-06,
997
+ "loss": 0.0001,
998
+ "step": 4000
999
+ },
1000
+ {
1001
+ "epoch": 5.01,
1002
+ "eval_cer": 45.60821008149713,
1003
+ "eval_loss": 0.9974298477172852,
1004
+ "eval_runtime": 214.4422,
1005
+ "eval_samples_per_second": 4.598,
1006
+ "eval_steps_per_second": 2.299,
1007
+ "eval_wer": 45.465648854961835,
1008
+ "step": 4000
1009
+ },
1010
+ {
1011
+ "epoch": 5.02,
1012
+ "learning_rate": 2.1777777777777777e-06,
1013
+ "loss": 0.0002,
1014
+ "step": 4025
1015
+ },
1016
+ {
1017
+ "epoch": 5.02,
1018
+ "learning_rate": 2.1222222222222226e-06,
1019
+ "loss": 0.0003,
1020
+ "step": 4050
1021
+ },
1022
+ {
1023
+ "epoch": 5.03,
1024
+ "learning_rate": 2.0666666666666666e-06,
1025
+ "loss": 0.0011,
1026
+ "step": 4075
1027
+ },
1028
+ {
1029
+ "epoch": 5.03,
1030
+ "learning_rate": 2.011111111111111e-06,
1031
+ "loss": 0.0002,
1032
+ "step": 4100
1033
+ },
1034
+ {
1035
+ "epoch": 5.04,
1036
+ "learning_rate": 1.955555555555556e-06,
1037
+ "loss": 0.0005,
1038
+ "step": 4125
1039
+ },
1040
+ {
1041
+ "epoch": 5.04,
1042
+ "learning_rate": 1.9000000000000002e-06,
1043
+ "loss": 0.0003,
1044
+ "step": 4150
1045
+ },
1046
+ {
1047
+ "epoch": 5.05,
1048
+ "learning_rate": 1.8444444444444445e-06,
1049
+ "loss": 0.0001,
1050
+ "step": 4175
1051
+ },
1052
+ {
1053
+ "epoch": 5.05,
1054
+ "learning_rate": 1.788888888888889e-06,
1055
+ "loss": 0.0002,
1056
+ "step": 4200
1057
+ },
1058
+ {
1059
+ "epoch": 5.06,
1060
+ "learning_rate": 1.7333333333333336e-06,
1061
+ "loss": 0.0018,
1062
+ "step": 4225
1063
+ },
1064
+ {
1065
+ "epoch": 5.06,
1066
+ "learning_rate": 1.6777777777777779e-06,
1067
+ "loss": 0.0002,
1068
+ "step": 4250
1069
+ },
1070
+ {
1071
+ "epoch": 5.07,
1072
+ "learning_rate": 1.6222222222222223e-06,
1073
+ "loss": 0.0001,
1074
+ "step": 4275
1075
+ },
1076
+ {
1077
+ "epoch": 5.07,
1078
+ "learning_rate": 1.566666666666667e-06,
1079
+ "loss": 0.0037,
1080
+ "step": 4300
1081
+ },
1082
+ {
1083
+ "epoch": 5.08,
1084
+ "learning_rate": 1.5111111111111112e-06,
1085
+ "loss": 0.0002,
1086
+ "step": 4325
1087
+ },
1088
+ {
1089
+ "epoch": 5.08,
1090
+ "learning_rate": 1.4555555555555557e-06,
1091
+ "loss": 0.0001,
1092
+ "step": 4350
1093
+ },
1094
+ {
1095
+ "epoch": 5.09,
1096
+ "learning_rate": 1.4000000000000001e-06,
1097
+ "loss": 0.0001,
1098
+ "step": 4375
1099
+ },
1100
+ {
1101
+ "epoch": 5.09,
1102
+ "learning_rate": 1.3444444444444446e-06,
1103
+ "loss": 0.0004,
1104
+ "step": 4400
1105
+ },
1106
+ {
1107
+ "epoch": 5.1,
1108
+ "learning_rate": 1.288888888888889e-06,
1109
+ "loss": 0.0001,
1110
+ "step": 4425
1111
+ },
1112
+ {
1113
+ "epoch": 5.1,
1114
+ "learning_rate": 1.2333333333333335e-06,
1115
+ "loss": 0.0001,
1116
+ "step": 4450
1117
+ },
1118
+ {
1119
+ "epoch": 5.11,
1120
+ "learning_rate": 1.1777777777777778e-06,
1121
+ "loss": 0.0001,
1122
+ "step": 4475
1123
+ },
1124
+ {
1125
+ "epoch": 5.11,
1126
+ "learning_rate": 1.1222222222222222e-06,
1127
+ "loss": 0.0005,
1128
+ "step": 4500
1129
+ },
1130
+ {
1131
+ "epoch": 5.12,
1132
+ "learning_rate": 1.066666666666667e-06,
1133
+ "loss": 0.0014,
1134
+ "step": 4525
1135
+ },
1136
+ {
1137
+ "epoch": 5.12,
1138
+ "learning_rate": 1.0111111111111111e-06,
1139
+ "loss": 0.0001,
1140
+ "step": 4550
1141
+ },
1142
+ {
1143
+ "epoch": 5.13,
1144
+ "learning_rate": 9.555555555555556e-07,
1145
+ "loss": 0.0022,
1146
+ "step": 4575
1147
+ },
1148
+ {
1149
+ "epoch": 5.13,
1150
+ "learning_rate": 9.000000000000001e-07,
1151
+ "loss": 0.0001,
1152
+ "step": 4600
1153
+ },
1154
+ {
1155
+ "epoch": 5.14,
1156
+ "learning_rate": 8.444444444444445e-07,
1157
+ "loss": 0.0003,
1158
+ "step": 4625
1159
+ },
1160
+ {
1161
+ "epoch": 5.14,
1162
+ "learning_rate": 7.888888888888889e-07,
1163
+ "loss": 0.0001,
1164
+ "step": 4650
1165
+ },
1166
+ {
1167
+ "epoch": 5.15,
1168
+ "learning_rate": 7.333333333333334e-07,
1169
+ "loss": 0.0003,
1170
+ "step": 4675
1171
+ },
1172
+ {
1173
+ "epoch": 5.15,
1174
+ "learning_rate": 6.777777777777779e-07,
1175
+ "loss": 0.0001,
1176
+ "step": 4700
1177
+ },
1178
+ {
1179
+ "epoch": 5.16,
1180
+ "learning_rate": 6.222222222222223e-07,
1181
+ "loss": 0.0001,
1182
+ "step": 4725
1183
+ },
1184
+ {
1185
+ "epoch": 6.0,
1186
+ "learning_rate": 5.666666666666667e-07,
1187
+ "loss": 0.0001,
1188
+ "step": 4750
1189
+ },
1190
+ {
1191
+ "epoch": 6.01,
1192
+ "learning_rate": 5.111111111111112e-07,
1193
+ "loss": 0.0001,
1194
+ "step": 4775
1195
+ },
1196
+ {
1197
+ "epoch": 6.01,
1198
+ "learning_rate": 4.5555555555555563e-07,
1199
+ "loss": 0.0001,
1200
+ "step": 4800
1201
+ },
1202
+ {
1203
+ "epoch": 6.02,
1204
+ "learning_rate": 4.0000000000000003e-07,
1205
+ "loss": 0.0001,
1206
+ "step": 4825
1207
+ },
1208
+ {
1209
+ "epoch": 6.02,
1210
+ "learning_rate": 3.444444444444445e-07,
1211
+ "loss": 0.0001,
1212
+ "step": 4850
1213
+ },
1214
+ {
1215
+ "epoch": 6.03,
1216
+ "learning_rate": 2.888888888888889e-07,
1217
+ "loss": 0.0001,
1218
+ "step": 4875
1219
+ },
1220
+ {
1221
+ "epoch": 6.03,
1222
+ "learning_rate": 2.3333333333333336e-07,
1223
+ "loss": 0.0001,
1224
+ "step": 4900
1225
+ },
1226
+ {
1227
+ "epoch": 6.04,
1228
+ "learning_rate": 1.777777777777778e-07,
1229
+ "loss": 0.0001,
1230
+ "step": 4925
1231
+ },
1232
+ {
1233
+ "epoch": 6.04,
1234
+ "learning_rate": 1.2222222222222225e-07,
1235
+ "loss": 0.0001,
1236
+ "step": 4950
1237
+ },
1238
+ {
1239
+ "epoch": 6.05,
1240
+ "learning_rate": 6.666666666666668e-08,
1241
+ "loss": 0.0001,
1242
+ "step": 4975
1243
+ },
1244
+ {
1245
+ "epoch": 6.05,
1246
+ "learning_rate": 1.1111111111111112e-08,
1247
+ "loss": 0.0001,
1248
+ "step": 5000
1249
+ },
1250
+ {
1251
+ "epoch": 6.05,
1252
+ "eval_cer": 45.36673709628735,
1253
+ "eval_loss": 0.994395911693573,
1254
+ "eval_runtime": 216.4096,
1255
+ "eval_samples_per_second": 4.556,
1256
+ "eval_steps_per_second": 2.278,
1257
+ "eval_wer": 45.2824427480916,
1258
+ "step": 5000
1259
+ },
1260
+ {
1261
+ "epoch": 6.05,
1262
+ "step": 5000,
1263
+ "total_flos": 1.019992544575488e+19,
1264
+ "train_loss": 0.3284327008752851,
1265
+ "train_runtime": 3725.671,
1266
+ "train_samples_per_second": 2.684,
1267
+ "train_steps_per_second": 1.342
1268
+ }
1269
+ ],
1270
+ "max_steps": 5000,
1271
+ "num_train_epochs": 9223372036854775807,
1272
+ "total_flos": 1.019992544575488e+19,
1273
+ "trial_name": null,
1274
+ "trial_params": null
1275
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:597458d84bfea0d6498124c77965aabd1bbaac3387cdb9ee39c08874f88302dd
3
+ size 3643
vocab.json ADDED
The diff for this file is too large to render. See raw diff