File size: 75,091 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
{
    "paper_id": "O04-2007",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T08:00:14.612240Z"
    },
    "title": "The Properties and Further Applications of Chinese Frequent Strings",
    "authors": [
        {
            "first": "Yih-Jeng",
            "middle": [],
            "last": "Lin",
            "suffix": "",
            "affiliation": {},
            "email": "yclin@ckit.edu.tw"
        },
        {
            "first": "Ming-Shing",
            "middle": [],
            "last": "Yu",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "National Chung-Hsing University",
                "location": {
                    "postCode": "40227",
                    "settlement": "Taichung, Taiwan"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper reveals some important properties of CFSs and applications in Chinese natural language processing (NLP). We have previously proposed a method for extracting Chinese frequent strings that contain unknown words from a Chinese corpus [Lin and Yu 2001]. We found that CFSs contain many 4-character strings, 3-word strings, and longer n-grams. Such information can only be derived from an extremely large corpus using a traditional language model(LM). In contrast to using a traditional LM, we can achieve high precision and efficiency by using CFSs to solve Chinese toneless phoneme-to-character conversion and to correct Chinese spelling errors with a small training corpus. An accuracy rate of 92.86% was achieved for Chinese toneless phoneme-to-character conversion, and an accuracy rate of 87.32% was achieved for Chinese spelling error correction. We also attempted to assign syntactic categories to a CFS. The accuracy rate for assigning syntactic categories to the CFSs was 88.53% for outside testing when the syntactic categories of the highest level were used.",
    "pdf_parse": {
        "paper_id": "O04-2007",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper reveals some important properties of CFSs and applications in Chinese natural language processing (NLP). We have previously proposed a method for extracting Chinese frequent strings that contain unknown words from a Chinese corpus [Lin and Yu 2001]. We found that CFSs contain many 4-character strings, 3-word strings, and longer n-grams. Such information can only be derived from an extremely large corpus using a traditional language model(LM). In contrast to using a traditional LM, we can achieve high precision and efficiency by using CFSs to solve Chinese toneless phoneme-to-character conversion and to correct Chinese spelling errors with a small training corpus. An accuracy rate of 92.86% was achieved for Chinese toneless phoneme-to-character conversion, and an accuracy rate of 87.32% was achieved for Chinese spelling error correction. We also attempted to assign syntactic categories to a CFS. The accuracy rate for assigning syntactic categories to the CFSs was 88.53% for outside testing when the syntactic categories of the highest level were used.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "An increasing number of new or unknown words are being used on the Internet. Such new or unknown words are called \"out of vocabulary (OOV) words\" [Yang 1998 ], and they are not listed in traditional dictionaries. Many researchers have overcome problems caused by OOV words by using N-gram LMs along with smoothing methods. N-gram LMs have many useful applications in NLP [Yang 1998 ]. In Chinese NLP tasks, word-based bi-gram LMs are used by many researchers. To obtain useful probabilities for training, a corpus size proportional to 80000 2 (80000 is the approximate number of words in ASCED) = 6.4*10 9 words is required. However, it is not easy to find such a corpus at the present time.",
                "cite_spans": [
                    {
                        "start": 146,
                        "end": 156,
                        "text": "[Yang 1998",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 371,
                        "end": 381,
                        "text": "[Yang 1998",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "A small-size corpus will lead too many unseen events when using N-gram LMs. Although we can apply some smoothing strategies, such as Witten-Bell interpolation or the Good-turing method [Wu and Zheng 2001] to estimate the probabilities of unseen events, this will be of no use when the size of training corpus is limited. From our observations, many the unseen events that occur when using N-gram LMs are unknown words or phrases. Such unknown words and phrases cannot be found in a dictionary. For example, the term \"\u9031\u4f11\u4e8c \u65e5\" (two days off per week) is presently popular in Taiwan. We cannot find this term in a traditional dictionary. The term \"\u9031\u4f11\u4e8c\u65e5\" is a 4-word string pattern which consists of four words: \"\u9031\" (a week), \"\u4f11\" (to rest), \"\u4e8c\" (two), and \"\u65e5\" (day). A word-based 4-gram LM and a large training corpus are required to record the data of such terms. Such a word-base 4-gram LM has not been applied to Chinese NLP in practice, and such a huge training corpus cannot be found at present. Alternatively, we can record the specifics of the term \"\u9031\u4f11\u4e8c\u65e5\" by using a CFS with relatively limited training data in which the specified term appear two or more times. Such training data could be recorded in one or two news articles containing hundreds of Chinese characters. Many researchers have shown that frequent strings can be used in many applications [Jelinek 1990; Suhm and Waibel 1994] .",
                "cite_spans": [
                    {
                        "start": 185,
                        "end": 204,
                        "text": "[Wu and Zheng 2001]",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 1356,
                        "end": 1370,
                        "text": "[Jelinek 1990;",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 1371,
                        "end": 1392,
                        "text": "Suhm and Waibel 1994]",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "We have shown that adding Chinese frequent strings (CFSs), including unknown words, can improve performance in Chinese NLP tasks [Lin and Yu 2001] . A CFS defined based on our research is a Chinese string which appears two or more times by itself in the corpus. For example, consider the following fragment:",
                "cite_spans": [
                    {
                        "start": 129,
                        "end": 146,
                        "text": "[Lin and Yu 2001]",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "\"\u570b\uf9f7\u4e2d\u8208\u5927\u5b78\uff0c\u4e2d\u8208\u5927\u5b78\u3002\" (National Chung-Hsing University, Chung-Hsing University.) \"\u4e2d\u8208\u5927\u5b78\" (Chung-Hsing University) is a CFS since it appears twice and its appearances are not brought out by other longer strings. The string \"\u4e2d\u8208\" (Chung-Hsing) appears twice, but it is not a CFS here since it is brought about by the longer string \"\u4e2d\u8208\u5927\u5b78\".",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "In our previous research, we showed that adding CFSs to a traditional lexicon, such as ASCED, can reduce the normalized perplexity from 251.7 to 63.5 [Lin and Yu 2001] . We also employed CFSs combined with ASCED as a dictionary to solve some Chinese NLP problems using the word-based uni-gram language model. We achieved promising results in both Chinese CTP and PTC conversion. It is well known that using a word-based bi-gram LM with a traditional lexicon can also improve accuracy in these two cases, especially in Chinese PTC conversion.",
                "cite_spans": [
                    {
                        "start": 150,
                        "end": 167,
                        "text": "[Lin and Yu 2001]",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "The organization of this paper is as follows. Section 2 gives some properties and distributions of CFSs, and we also make a comparison between CFS and an n-gram LM. Section 3 shows that by using a CFS-based uni-gram LM, we can achieve higher accuracy than we can by using a traditional lexicon with a word-based bi-gram LM. We demonstrate this by using two challenging examples of Chinese NLP. In section 4, we assign syntactic categories to CFSs. Finally, section 5 presents our conclusions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "We used a training corpus of 59 MB (about 29.5M Chinese characters) in our experiments. In this section, we will present the properties of CFSs. Compared with language models and ASCED, CFSs have some important and distinctive features. We extracted 439,666 CFSs from a training corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Properties of CFS",
                "sec_num": "2."
            },
            {
                "text": "The algorithm for extracting CFSs was proposed in our previous work [Lin and Yu 2001] . We extracted CFSs from a training corpus that contained 29.5M characters. The training corpus also included a portion of the Academia Sinica Balanced Corpus [Chen et al. 1996 ] and many Internet news texts.",
                "cite_spans": [
                    {
                        "start": 68,
                        "end": 85,
                        "text": "[Lin and Yu 2001]",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 245,
                        "end": 262,
                        "text": "[Chen et al. 1996",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extracting CFSs from a Training Corpus",
                "sec_num": "2.1"
            },
            {
                "text": "The length distribution of the CFSs is shown in the second column of Table 1 . The total number of CFSs that we extracted was 439,666. Our dictionary, which we call CFSD, is comprised of these 439,666 CFSs. In contrast to the second column of Table 1 , we show the length distribution of the words in ASCED in the forth column of Table 1 . We found that three-character CFSs were most numerous in our CFS lexicon, while two-character words were most numerous in ASCED. Many meaningful strings and unknown words are collected in our CFSs. These CFSs usually contain more than two characters. Some examples are \"\u5c0f\u4f01 \u9d5d\" (a little penguin), \"\u897f\u91ab\u5e2b\" (modern medicine), \"\u4f5b\u6559\u601d\u60f3\" (Buddhist thought), \"\uf914\u900f\u5f69 \u5238\" (lottery), and so on. The above examples cannot be found in ASCED, yet they frequently appear in our training corpus.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 69,
                        "end": 76,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    },
                    {
                        "start": 243,
                        "end": 250,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    },
                    {
                        "start": 330,
                        "end": 337,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Extracting CFSs from a Training Corpus",
                "sec_num": "2.1"
            },
            {
                "text": "Since CFSs are strings frequently used by people, a CFS like \"\u5927\u5b78\u6559\u6388\" (professors of a university) may contain more characters than a word defined in ASCED does. That is, a CFS may contain two or more words. If a CFS contains two words, we say that this CFS is a 2-word CFS. If a CFS contains three words, we say that this CFS is a 3-word CFS and so on. Figure 1 shows the distributions of CFSs according to word-based n-grams. The words are defined in ASCED. We also found 31,275 CFSs(7.11% of the CFSs in CFSD) that are words in ASCED.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 352,
                        "end": 360,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Comparing CFSs with Word-Based N-Gram LMs",
                "sec_num": "2.2"
            },
            {
                "text": "From Figure 1 , it can be shown that a CFS may contain more than 3 words. Many researchers in Chinese NLP have used word-based bi-gram LMs [Yang 1998 ] as a basic LM to solve problems. A very large corpus is required to train a word-based 3-gram LM, while our CFS-based uni-gram model does not need such a large corpus. We also found that a CFS contains 2.8 words on average in CFSD. This shows that a CFS contains more information than a word-based bi-gram LM. In our experiment, we also found that the average number of characters of a word-based bi-gram was 2.75, and that the average number of characters of a CFS was 4.07. This also shows that a CFS contains more information than a word-based bi-gram LM. ",
                "cite_spans": [
                    {
                        "start": 139,
                        "end": 149,
                        "text": "[Yang 1998",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 5,
                        "end": 13,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Comparing CFSs with Word-Based N-Gram LMs",
                "sec_num": "2.2"
            },
            {
                "text": "1 -w o r d C F S 2 -w o r d C F S 3 -w o r d C F S 4 -w o r d C F S 5 -w o r d C F S 6 -w o r d C F S 7 -w o r d C F S 8 -w o r d C F S 9 -w o r d C F S 1 0 -w o r d C F S % Figure 1.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparing CFSs with Word-Based N-Gram LMs",
                "sec_num": "2.2"
            },
            {
                "text": "In this subsection, we will make a comparison between our CFSs and ASCED. Table 1 and Figure 2 show the length distributions of our CFSs and ASCED. Comparing them, we find that the average number of characters in a word in ASCED is 2.36, while the average number of characters in a CFS is 4.07. Examining Figure 2 , we notice that most of the words in ASCED are 2-character words, while the largest portion of CFSs are 2-character CFSs, 3-character CFSs, 4-character CFSs, and 5-character CFSs. This shows that our CFSs contain many 4-character and 5-character strings. To train character-based 4-gram and character-based 5-gram LMs requires a large training corpus. We also find that the number of one-character CFSs is fewer than that in ASCED. This shows that by using the CFSs, we can eliminate some ambiguities in Chinese PTC and Chinese CTP. We found 31,275 CFSs that were in ASCED. The length distribution of these 31,275 CFSs is shown in Table 2 . We also compared the length distribution of these 31,275 CFSs with the length distribution in ASCED. Our comparison is shown in Figure 3 . Note that the length distribution in ASCED is listed in the fifth column of Table 1 . We find that the length distribution of these 31,275 CFSs is similar to the length distribution in ASCED. We conjecture that if the corpus is large enough, we can find most of the words in ASCED. The length distributions of 31,275 CFSs and ASCED 0 50 100 1 2 3 4 5 6 7 8 9 10 length of a CFS or a word",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 74,
                        "end": 81,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    },
                    {
                        "start": 86,
                        "end": 94,
                        "text": "Figure 2",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 305,
                        "end": 313,
                        "text": "Figure 2",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 946,
                        "end": 953,
                        "text": "Table 2",
                        "ref_id": "TABREF1"
                    },
                    {
                        "start": 1084,
                        "end": 1092,
                        "text": "Figure 3",
                        "ref_id": "FIGREF1"
                    },
                    {
                        "start": 1171,
                        "end": 1178,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Compare the Distributions of CFSs and ASCED",
                "sec_num": "2.3"
            },
            {
                "text": "CFSs ASCED ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Ratio (%)",
                "sec_num": null
            },
            {
                "text": "Perplexity [Rabiner and Juang 1993] is an important and commonly used measurement of language models. Formula (1) provides a definition of perplexity. Since Nw, which is the number of words in the test corpus, in (1) is uncertain for Chinese, we normalize the perplexity into characters by means of (2) [Yang 1998 ], producing is called the normalized perplexity (or relative perplexity):",
                "cite_spans": [
                    {
                        "start": 11,
                        "end": 35,
                        "text": "[Rabiner and Juang 1993]",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 303,
                        "end": 313,
                        "text": "[Yang 1998",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparing the Normalized Perplexity",
                "sec_num": "2.4"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "( ) 1 1 Pr Nw Nw PP W \u2212 = ,",
                        "eq_num": "(1)"
                    }
                ],
                "section": "Comparing the Normalized Perplexity",
                "sec_num": "2.4"
            },
            {
                "text": "where",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparing the Normalized Perplexity",
                "sec_num": "2.4"
            },
            {
                "text": "( ) ( ) ( ) ( ), Pr ... Pr Pr Pr 2 1 1 Nw Nw w w w W \u2022 \u2022 \u2022 = ( ) . W L Nw PP NP = (2)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparing the Normalized Perplexity",
                "sec_num": "2.4"
            },
            {
                "text": "Here,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparing the Normalized Perplexity",
                "sec_num": "2.4"
            },
            {
                "text": "is the test sequence of the corpus and is the probability that will be computed within a given language model. L (W) is the number of characters in W. PP is perplexity, and NP is the normalized perplexity.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparing the Normalized Perplexity",
                "sec_num": "2.4"
            },
            {
                "text": "Nw w w w W ... 2 1 1 = ) Pr( 1 Nw W Nw W 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Nw",
                "sec_num": null
            },
            {
                "text": "We used a testing corpus to compute the normalized perplexities within the CFS-based uni-gram LMs and the word-based bi-gram LMs. The size of the testing corpus was 2.5M characters. We used the same training corpus mentioned in subsection 2.1 to extract CFSs and to train the word-based bi-gram LMs. Each word in the word-based bi-gram LM was defined in ASCED. We used the Good-Turing smoothing method to estimate the unseen bi-gram events. The normalized perplexity obtained using the word-based bi-gram LM was 78.6. The normalized perplexity became 32.5 when the CFS-based uni-gram LM was used. This shows that the CFS-based uni-gram LM has a lower normalized perplexity. That is to say, using the CFS-based uni-gram LM is better than using the traditional word-based bi-gram LM with a small-sized training corpus of 29.5M characters.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Nw",
                "sec_num": null
            },
            {
                "text": "In a previous study [Lin and Yu 2001] , we showed that using CFSs and ASCED as the dictionary with the uni-gram language model can lead to good results in two Chinese NLP applications. These two applications are Chinese character-to-phoneme (CTP) conversion and Chinese phoneme-to-character (PTC) conversion. The achieved accuracy rates were 99.7% for CTP conversion and 96.4% for PTC conversion [Lin and Yu 2001] . The size of the training corpus in our previous research was 0.5M characters. There were 55,518 CFSs extracted from the training corpus. In this study, we solved two challenging Chinese NLP problems with a larger training corpus. The two problems were Chinese toneless phoneme-to-character (TPTC) conversion and Chinese spelling error correction (SEC).",
                "cite_spans": [
                    {
                        "start": 20,
                        "end": 37,
                        "text": "[Lin and Yu 2001]",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 396,
                        "end": 413,
                        "text": "[Lin and Yu 2001]",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Application of CFS to Two Difficult Problems",
                "sec_num": "3."
            },
            {
                "text": "The first task was Chinese TPTC conversion. Chinese TPTC tries to generate correct characters according to input syllables without tonal information. The second task was Chinese SEC (spelling error correction). In our study, we attempted to identify and correct the possible errors in sentences with no more than one error that were input using the Cang-Jie (\u5009 \u9821) input method.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Application of CFS to Two Difficult Problems",
                "sec_num": "3."
            },
            {
                "text": "The first task was Chinese TPTC conversion. The lexicon we used was CFSD as mentioned in section 2.1. This task is more complex than traditional Chinese phoneme-to-character conversion. There are five tones in Mandarin. They are high-level (1 st tone), high-rising (2 nd tone), low-dipping (3 rd tone), high-falling (4 th tone), and the neutral tone [National Taiwan Normal University 1982] . There are a total of 1,244 possible syllables (combinations of phonetic symbols) in Mandarin, and there are a total of 408 possible toneless syllables. Therefore, each toneless syllable has about 1,244/408=3.05 times the number of characters of a tonal syllable. The average length of a sentence in our training corpus is 8 characters per sentence. The number of possibilities for Chinese TPTC conversion is about 3.05 8 =7489 times that of Chinese PTC conversion. This shows that Chinese TPTC conversion is more difficult than Chinese PTC conversion.",
                "cite_spans": [
                    {
                        "start": 350,
                        "end": 390,
                        "text": "[National Taiwan Normal University 1982]",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Chinese Toneless Phoneme-to-Character Conversion",
                "sec_num": "3.1"
            },
            {
                "text": "The size of the outside testing data was 2.5M characters. In our TPTC module, we initially searched the system dictionary to access all the possible CFSs according to the input toneless phonemes. Such possible CFSs constitute a CFS lattice. We applied a dynamic programming methodology to find the best path in the CFS lattice, where the best path was the sequence of CFS-based uni-grams with the highest probability. The definition we employed of the probability P(S) of each input sentence S was as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Chinese Toneless Phoneme-to-Character Conversion",
                "sec_num": "3.1"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "S = CFS 1 CFS 2 \u2026 CFS n , P(S) = P(CFS 1 )\u2024P(CFS 2 )\u2024\u2026\u2024P(CFS n ) ,",
                        "eq_num": "(3)"
                    }
                ],
                "section": "Chinese Toneless Phoneme-to-Character Conversion",
                "sec_num": "3.1"
            },
            {
                "text": "The achieved precision rate was 92.86%. The precision rate was obtained by using the formula (total number of correct characters) / (total number of characters). The processing time was 12 ms/character. We also applied the dictionary used in our previous research [Lin and Yu 2001] to test the data, which was 2.5M characters in size. The dictionary combines ASCDE with 55,518 CFSs. The achieved precision rate in solving the Chinese TPTC problem was 87.3%. This indicates that if we can collect more CFSs, we can obtain higher accuracy.",
                "cite_spans": [
                    {
                        "start": 264,
                        "end": 281,
                        "text": "[Lin and Yu 2001]",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Chinese Toneless Phoneme-to-Character Conversion",
                "sec_num": "3.1"
            },
            {
                "text": "In this task, we also applied the word-based bi-gram LM with ASCED. The size of the training corpus was the same as that of the corpus mentioned in section 2.1, that is, 29.5M characters. The Good-Turing smoothing method was applied here to estimate the unseen events. The achieved precision rate was 66.9%, and the processing time was 510 ms/character. These results show that when the CFS-based uni-gram LM was used, the precision rate improved greatly (92.8 % vs. 66.9%) and the processing time was greatly reduced (12 ms/character vs. 510 ms/character) compared to the results obtained using the traditional word-based bi-gram LM.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Chinese Toneless Phoneme-to-Character Conversion",
                "sec_num": "3.1"
            },
            {
                "text": "We also applied the CFS-based uni-gram LM to the Chinese SEC problem [Chang 1994 ]. Chinese SEC is a challenging task in Chinese natural language. A Chinese SEC system should correct character errors in input sentences. To make the task meaningful in practice, we limited our Chinese SEC problem based on the following constraints: (1) the sentences were input using the Cang-Jie Chinese input method; (2) there was no more than one character error in an input sentence.",
                "cite_spans": [
                    {
                        "start": 69,
                        "end": 80,
                        "text": "[Chang 1994",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Chinese Spelling Error Correction Issue",
                "sec_num": "3.2"
            },
            {
                "text": "The reasons why we applied the above two constraints are as follows: (1) our Chinese SEC system is designed for practiced typists; (2) the Cang-Jie Chinese input method is a popular method widely used in Taiwan; (3) at most one character error is likely to be made in a sentence by a practiced typist; and (4) we can easily apply the methodology used this research to other Chinese input or processing systems. Our methodology for Chinese SEC is shown in Algorithm SEC.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Chinese Spelling Error Correction Issue",
                "sec_num": "3.2"
            },
            {
                "text": "Characters with similar Cang-Jie codes define a confusing set in Algorithm SEC. We constructed the confusing set for each Chinese character based on the five rules listed in Table  3 . The longest common subsequence (LCS) algorithm is a well known algorithm that can be found in most computer algorithm textbooks, such as [Cormen et al. 1998 ].",
                "cite_spans": [
                    {
                        "start": 322,
                        "end": 341,
                        "text": "[Cormen et al. 1998",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 174,
                        "end": 182,
                        "text": "Table  3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Chinese Spelling Error Correction Issue",
                "sec_num": "3.2"
            },
            {
                "text": "Input: A sentence S with no more than one incorrect character.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Algorithm SEC.",
                "sec_num": null
            },
            {
                "text": "Output: The corrected sentence for the input sentence S.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Algorithm SEC.",
                "sec_num": null
            },
            {
                "text": "Step 1: For each i-th character in S, find the characters whose Cang-Jie codes are similar to the code of the i-th character. Let C be the set consisting of such characters. C is called the 'confusing set'.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Algorithm:",
                "sec_num": null
            },
            {
                "text": "Step 2: Replace each character in C with the i-th character in S. There will be a 'maybe' sentence S 1 . Find the probability of S 1 by using the CFS-based uni-gram LM. Record the maybe sentence with the highest probability.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Algorithm:",
                "sec_num": null
            },
            {
                "text": "Step 3: For each character in S, repeat Step 1 and Step 2.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Algorithm:",
                "sec_num": null
            },
            {
                "text": "Step 4: Output the 'maybe' sentence with the highest probability found in Steps 1, 2, and 3.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Algorithm:",
                "sec_num": null
            },
            {
                "text": "Length of Cang-Jie code: to the target character t",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 3. Rules used to construct the confusing set based on the Cang-Jie Chinese input method.",
                "sec_num": null
            },
            {
                "text": "Each character s satisfying the conditions listed below is a similar character of t. 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 3. Rules used to construct the confusing set based on the Cang-Jie Chinese input method.",
                "sec_num": null
            },
            {
                "text": "The characters whose Cang-Jie codes are the same as that of the target character. The length of the Cang-Jie code of s is greater than 1, and the length of the LCS of s and t is 2. 4",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 3. Rules used to construct the confusing set based on the Cang-Jie Chinese input method.",
                "sec_num": null
            },
            {
                "text": "The length of the Cang-Jie code of s is greater than 2, and the length of the LCS of s and t is 3. 5",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 3. Rules used to construct the confusing set based on the Cang-Jie Chinese input method.",
                "sec_num": null
            },
            {
                "text": "The length of Cang-Jie code of s is 4 or 5, and the length of the LCS of s and t is 4.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 3. Rules used to construct the confusing set based on the Cang-Jie Chinese input method.",
                "sec_num": null
            },
            {
                "text": "The uni-gram language model was used to determine the probability of each sentence. We used CFSD as our dictionary. There were 485,272 sentences for the outside test. No more than one character in each sentence was replaced with a similar character. Both the location of the replaced character and that of the similar character were randomly selected. The achieved precision rate was 87.32% for the top one choice. The precision rate was defined as (the number of correct sentences) / (the number of tested sentences). The top 5 precision rates are listed in Table 4 . The precision rate of the top 5 choices was about 95%, as shown in Table 4 . This shows that our approach can provide five possible corrected sentences for users in practice. The achieved precision rate in determining the location of the replaced character with the top one choice was 97.03%. We also applied ASCDE with word-based bi-gram LMs to compute the probability for each possible sentence. The size of the training corpus was 29.5M characters, which was the same as that of the training corpus mentioned in section 2.1. We also used the Good-Turing smoothing method to estimate the unseen bi-gram events. The achieved precision rates are shown in Table 5 . The achieved precision rate for the top one choice was 80.95%. From Table 4 and Table 5 , we can find that using CFS-based uni-gram LM is better than using ASCED with a word-based bi-gram LM. The advantages are the high achieved precision rate (87.32% vs. 80.95%) and short processing time (55 ms/character vs. 820 ms/character).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 559,
                        "end": 566,
                        "text": "Table 4",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 636,
                        "end": 643,
                        "text": "Table 4",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 1224,
                        "end": 1231,
                        "text": "Table 5",
                        "ref_id": "TABREF3"
                    },
                    {
                        "start": 1302,
                        "end": 1309,
                        "text": "Table 4",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 1314,
                        "end": 1321,
                        "text": "Table 5",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Table 3. Rules used to construct the confusing set based on the Cang-Jie Chinese input method.",
                "sec_num": null
            },
            {
                "text": "A CFS is a frequently used combination of Chinese characters. It may be a proper noun, like \"\u7db2\u969b\u7db2\uf937\" (the Internet), a verb phrase, like \"\u5168\uf98a\u52d5\u54e1\u6295\u5165\" (try one's best to mobilize), and other word forms. If a CFS can be assigned to some syntactic categories, it can be used in more applications. The CYK algorithm is a well known method used to assign syntactic categories [Lin 1994] . In this study, we tried to assign syntactic categories to CFSs by a using dynamic programming strategy. If a CFS s is also a word w, we can assign the syntactic categories of w to s. When s is a combination of several words, we can attempt to find syntactic categories associated with it. We first find the probabilities of production rules. Then, we use these probabilities to determine the syntactic categories.",
                "cite_spans": [
                    {
                        "start": 365,
                        "end": 375,
                        "text": "[Lin 1994]",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Assigning Syntactic Categories to CFSs",
                "sec_num": "4."
            },
            {
                "text": "We used the Sinica Treebank [Chen et al. 1994] as the training and testing data. The contents of the Sinica Treebank are composed of the structural trees of sentences. Structural trees contain the forms of words, the syntactic categories of each word, and the reductions of the syntactic categories of words. There are 38,725 structural trees in the Sinica Treebank version 1.0. They are stored in 9 files. We first used a portion of the 38,725 structural trees as the training data. We wanted to extract the production rules from each structural tree. These production rules were used to determine the syntactic categories of CFSs. Since each CFS could contain one or more words, the syntactic category of a CFS could be a portion of the structural tree. For example, four different production rules were extracted from the structural tree shown in Figure 4 . They are \"NP\u2190Nhaa\uff02, \"VE2\u2190VE2+Dc+VE2\uff02,\uff02NP\u2190DM+Nab\uff02, and \"S\u2190NP+VE2+NP\uff02. The notations of syntactic categories are defined by the Chinese Knowledge Information Processing group (CKIP).",
                "cite_spans": [
                    {
                        "start": 28,
                        "end": 46,
                        "text": "[Chen et al. 1994]",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 850,
                        "end": 858,
                        "text": "Figure 4",
                        "ref_id": "FIGREF4"
                    }
                ],
                "eq_spans": [],
                "section": "Extracting Production Rules from Sinica Treebank Version 1.0",
                "sec_num": "4.1"
            },
            {
                "text": "Examples of probabilities of production rules are listed in Table 6 . We extracted 15,946 different production rules from 90% of the Sinica Treebank version 1.0. The other 10% of the structural trees are left for testing. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 60,
                        "end": 67,
                        "text": "Table 6",
                        "ref_id": "TABREF4"
                    }
                ],
                "eq_spans": [],
                "section": "Extracting Production Rules from Sinica Treebank Version 1.0",
                "sec_num": "4.1"
            },
            {
                "text": "We used the 15,946 production rules to determine the syntactic categories of CFSs. To perform this task, a lexicon with syntactic categories was required for each word. We used ASCED, provided by Academia Sinica, Taiwan, as the dictionary. ASCED is a well-defined dictionary which contains about 80,000 words. For an input CFS, we first looked in ASCED to get the syntactic categories for each substring word of the input CFS. We also used these syntactic categories and the 15,946 production rules to determine the syntactic categories of the input CFS. We tried to find the syntactic categories of a CFS by using the syntactic categories of the substrings of that CFS. The method we used is a dynamic programming method. As an example, Figure 5 shows the syntactic categories of the CFS \"\uf9f4\u5c0f\u59d0\" (Miss Lin). As shown in Figure 5 , we first looked in ASCED to find the syntactic categories of each possible word which was a substring of \"\uf9f4\u5c0f\u59d0\". Cell (A,1) contains the possible syntactic categories of the word \"\uf9f4\", cell (B,2) contains the possible syntactic categories of \"\u5c0f\", cell (C,3) contains the possible syntactic categories of \"\u59d0\", and cell (B, 3) contains the possible syntactic categories of \"\u5c0f\u59d0\". The number following each syntactic category in a cell is the probability of that syntactic category. Next, we tried to determine the syntactic categories of cell (A, 2) by using the production rules we extracted from the Sinica Treebank. The syntactic categories of cell (A, 2) could be derived using the information of cell (A, 1) and cell (B, 2). A total of 2 * 4 = 8 possible production rules were derived. Examining the production rules we extracted, we found that only one of the 8 possible combinations existed in the production rules. This combination was NP \u2190 Nab + Nv4. The result of cell (A, 2) was NP. The probability was 1 because Nab + Nv4 could only derive NP. The contents of cell (B, 3) could also be derived from the contents of cells (B, 2) and (C, 3).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 738,
                        "end": 746,
                        "text": "Figure 5",
                        "ref_id": "FIGREF5"
                    },
                    {
                        "start": 819,
                        "end": 827,
                        "text": "Figure 5",
                        "ref_id": "FIGREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Determining the Syntactic Categories of a CFS",
                "sec_num": "4.2"
            },
            {
                "text": "Finally, we determined the syntactic categories of cell (A, 3) in the same way as in the preceding step. The syntactic categories of cell (A, 3) could be derived from cells (A, 1) and (B, 3), or cells (A, 2) and (C, 3) or cells (A, 1) and (B, 2) and (C, 3). The result was NP, which was derived from cell (A,1) and (B,3) by using the rule NP \u2190 Nbc + Nab. The syntactic category of the CFS \"\uf9f4\u5c0f\u59d0\" was NP, which was the only syntactic category derived by inspecting the contents of cell (A, 3).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Determining the Syntactic Categories of a CFS",
                "sec_num": "4.2"
            },
            {
                "text": "Our goal was to determine the syntactic categories of CFSs. The testing data we chose were in the bottom layer of each structural tree. Each level of the testing data contained many words. For example, we determined the syntactic categories of \"\u8981\uf967\u8981\" and \"\u9019\u5e45\u756b\" as described for the example shown in Figure 4 . We found that the syntactic category of \"\u8981\uf967\u8981\" was VE2, and that syntactic category of \"\u9019\u5e45\u756b\" was NP. We retrieved 1,309 patterns and their related syntactic categories from the testing corpus. Among the 1,309 patterns, 98 patterns were our CFSs.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 298,
                        "end": 306,
                        "text": "Figure 4",
                        "ref_id": "FIGREF4"
                    }
                ],
                "eq_spans": [],
                "section": "Experimental Results",
                "sec_num": "4.3"
            },
            {
                "text": "The structure of the notations of the syntactic categories defined by CKIP is a hierarchical one. There are a total of 178 syntactic categories with five layers in the hierarchical tree [CKIP 1993 ]. There are 8 categories in the first layer: N (noun), C (conjunction), V (verb), A (adjective), D (adverb), P (preposition), I (interjection), and T (auxiliary). The second layer contains 103 syntactic categories. For example, there are two sub-categories, Ca and Cb, in the second layer of category C in the first layer. Seven syntactic categories are defined in the Sinica Treebank. They are S (sentence), VP (verb phrase), NP (noun phrase), GP (direction phrase), PP (preposition phrase), XP (conjunction phrase), and DM (determinate phrase). We also put these 7 syntactic categories in the first layer of the hierarchical tree.",
                "cite_spans": [
                    {
                        "start": 186,
                        "end": 196,
                        "text": "[CKIP 1993",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Results",
                "sec_num": "4.3"
            },
            {
                "text": "The achieved accuracy rates for determining the syntactic categories of these 98 CFSs by using all of the syntactic categories are shown in Table 7 . When we used the syntactic categories in the first layer, the accuracy rate for the top one choice was 70.35%. Because the size of training corpus was small compared with the hundreds of available syntactic categories, we also reduced the tags in each production tree to the second layer of the hierarchical tree. For example, when we reduced the syntactic categories of the production rule \"S \u2190 Cbca + NP + Dbb + VK2 + NP\" to the second layer, we got the reduced production rule \"S \u2190 Cb + NP + Db + VK + NP \". We also determined the syntactic categories of the 98 patterns. The results are shown in Table 8 . When we used the syntactic categories in the first layer, the accuracy rate for the top 1 choice was 76.28%. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 140,
                        "end": 147,
                        "text": "Table 7",
                        "ref_id": "TABREF5"
                    },
                    {
                        "start": 750,
                        "end": 757,
                        "text": "Table 8",
                        "ref_id": "TABREF6"
                    }
                ],
                "eq_spans": [],
                "section": "Experimental Results",
                "sec_num": "4.3"
            },
            {
                "text": "In this paper, we have presented some important properties of Chinese frequent strings. We used CFSs in several applications. We found that the CFS-based uni-gram LM was superior to traditional N-gram LMs when the training data was sparse. While the size of a corpus using the CFS-based uni-gram LM can be far smaller than that needed when using traditional N-gram LMs, for the applications studied here, the results obtained using the CFS-based uni-gram LM are better than those obtained using an n-gram LM.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "5."
            }
        ],
        "back_matter": [
            {
                "text": "We would like to thank Academia Sinica for providing its ASBC corpus, ASCED dictionary, and Sinica Treebank. We also extend our thanks to the many news companies for distributing their files on the Internet.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "A Pilot Study on Automatic Chinese Spelling Error Correction",
                "authors": [
                    {
                        "first": "C",
                        "middle": [
                            "H"
                        ],
                        "last": "Chang",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Communication of COLIPS",
                "volume": "4",
                "issue": "2",
                "pages": "143--149",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "C. H. Chang, \"A Pilot Study on Automatic Chinese Spelling Error Correction,\" Communication of COLIPS, Vol. 4, No. 2, 1994, pp. 143-149.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "SINICA CORPUS: Design Methodology for Balanced Corpora",
                "authors": [
                    {
                        "first": "K",
                        "middle": [
                            "J"
                        ],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [
                            "R"
                        ],
                        "last": "Huang",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [
                            "P"
                        ],
                        "last": "Chang",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [
                            "L"
                        ],
                        "last": "Hsu",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proceeding of PACLIC 11 th Conference",
                "volume": "",
                "issue": "",
                "pages": "167--176",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. J. Chen, C. R. Huang, L. P. Chang, and H. L. Hsu, \"SINICA CORPUS: Design Methodology for Balanced Corpora,\" Proceeding of PACLIC 11 th Conference, 1996, pp. 167-176.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Sinica Treebank",
                "authors": [
                    {
                        "first": "F",
                        "middle": [
                            "Y"
                        ],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [
                            "F"
                        ],
                        "last": "Tsai",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [
                            "J"
                        ],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [
                            "R"
                        ],
                        "last": "Huang",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Computational Linguistics and Chinese Language Processing",
                "volume": "4",
                "issue": "",
                "pages": "75--85",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Y. Chen, P. F. Tsai, K. J. Chen, and C. R. Huang, \"Sinica Treebank,\" Computational Linguistics and Chinese Language Processing, Vol. 4, No. 2, 1994, pp. 75-85.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Analysis of Chinese Part-of-Speech (\u4e2d\u6587\u8a5e\uf9d0\u5206\u6790)",
                "authors": [],
                "year": 1993,
                "venue": "Academia Sinica",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "CKIP( Chinese Knowledge Information Processing Group, \u8a5e\u5eab\u5c0f\u7d44) , \"Analysis of Chinese Part-of-Speech (\u4e2d\u6587\u8a5e\uf9d0\u5206\u6790), Technical Report of CKIP #93-05(\u4e2d\u6587\u8a5e\u77e5\uf9fc\u5eab\u5c0f\u7d44 \u6280\u8853\u5831\u544a #93-05),\" Academia Sinica, Taipei, Taiwan, 1993.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Introduction to Algorithms",
                "authors": [
                    {
                        "first": "T",
                        "middle": [
                            "H"
                        ],
                        "last": "Cormen",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [
                            "E"
                        ],
                        "last": "Leiserson",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [
                            "L"
                        ],
                        "last": "Rivest",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "T. H. Cormen, C. E. Leiserson, R. L. Rivest, \"Introduction to Algorithms,\" The MIT Press, 1998.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Self-organized Language Modeling for Speech Recognition",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Jelinek",
                        "suffix": ""
                    }
                ],
                "year": 1990,
                "venue": "Readings in Speech Recognition",
                "volume": "",
                "issue": "",
                "pages": "450--506",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Jelinek, \"Self-organized Language Modeling for Speech Recognition,\" Readings in Speech Recognition, Ed. A. Wabel and K. F. Lee. Morgan Kaufmann Publishers Inc., San Mateo, California, 1990, pp. 450-506.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "A Level Synchronous Approach to Ill-formed Sentence Parsing and Error Correction",
                "authors": [
                    {
                        "first": "Y",
                        "middle": [
                            "C"
                        ],
                        "last": "Lin",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Y. C. Lin, \"A Level Synchronous Approach to Ill-formed Sentence Parsing and Error Correction,\" Ph.D. Thesis, Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan, June 1994.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Extracting Chinese Frequent Strings Without a Dictionary From a Chinese Corpus And its Applications",
                "authors": [
                    {
                        "first": "Y",
                        "middle": [
                            "J"
                        ],
                        "last": "Lin",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [
                            "S"
                        ],
                        "last": "Yu",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Journal of Information Science and Engineering",
                "volume": "17",
                "issue": "5",
                "pages": "805--824",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Y. J. Lin and M. S. Yu, \"Extracting Chinese Frequent Strings Without a Dictionary From a Chinese Corpus And its Applications,\" Journal of Information Science and Engineering, Vol. 17, No. 5, 2001, pp. 805-824.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Mandarin Phonetics",
                "authors": [],
                "year": 1982,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "National Taiwan Normal University, \"Mandarin Phonetics,\" National Taiwan Normal University Press, Taipei, Taiwan, 1982.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Fundamentals of Speech Recognition",
                "authors": [
                    {
                        "first": "L",
                        "middle": [
                            "R"
                        ],
                        "last": "Rabiner",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [
                            "H"
                        ],
                        "last": "Juang",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "L. R. Rabiner and B. H. Juang, \"Fundamentals of Speech Recognition,\" Prentice Hall Co. Ltd., 1993.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Toward Better Language Models for Spontaneous Speech",
                "authors": [
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Suhm",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Waibel",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proc. ICSLP",
                "volume": "",
                "issue": "",
                "pages": "831--834",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "B. Suhm and A. Waibel, \"Toward Better Language Models for Spontaneous Speech,\" Proc. ICSLP, 1994, pp. 831-834.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "On Enhancing Katz-Smoothing Based Back-Off Language Model",
                "authors": [
                    {
                        "first": "Jian",
                        "middle": [],
                        "last": "Wu",
                        "suffix": ""
                    },
                    {
                        "first": "Fang",
                        "middle": [],
                        "last": "Zheng",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "International Conference on Spoken Language Processing",
                "volume": "",
                "issue": "",
                "pages": "198--201",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jian Wu and Fang Zheng, \"On Enhancing Katz-Smoothing Based Back-Off Language Model,\" International Conference on Spoken Language Processing, 2001, pp. I-198-201.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Further Studies for Practical Chinese Language Modeling",
                "authors": [
                    {
                        "first": "K",
                        "middle": [
                            "C"
                        ],
                        "last": "Yang",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. C. Yang, \"Further Studies for Practical Chinese Language Modeling,\" Master Thesis, Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan, June 1998.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "text": "The length distributions of CFSs and ASCED.",
                "num": null,
                "uris": null
            },
            "FIGREF1": {
                "type_str": "figure",
                "text": "The length distributions of 31,275 CFSs and ASCED.",
                "num": null,
                "uris": null
            },
            "FIGREF2": {
                "type_str": "figure",
                "text": "2 A. The length of the Cang-Jie code of s is 2, and the length of the LCS of s and t is 1. B. The length of the Cang-Jie code of s is 3, and the length of the LCS of s and t is 2. 3",
                "num": null,
                "uris": null
            },
            "FIGREF3": {
                "type_str": "figure",
                "text": "Figure 4shows the structural tree of the sentence \"\u4f60\u8981\uf967\u8981\u9019 \u5e45\u756b\" (Do you want this picture?). The representation of this structural tree in the Sinica Treebank is as follows: #S((agent:NP(Head:Nhaa: \u4f60 ))|(Head:VE2(Head:VE2: \u8981 )|(negation:Dc: \uf967 )|(Head:VE2: \u8981))|(goal:NP(quantifier:DM:\u9019\u5e45)|(Head:Nab:\u756b)",
                "num": null,
                "uris": null
            },
            "FIGREF4": {
                "type_str": "figure",
                "text": "The structural tree of the sentence \"\u4f60\u8981\uf967\u8981\u9019\u5e45\u756b\" (Do you want this picture?)",
                "num": null,
                "uris": null
            },
            "FIGREF5": {
                "type_str": "figure",
                "text": "The syntactic categories of the CFS \"\uf9f4\u5c0f\u59d0\" (Miss Lin).",
                "num": null,
                "uris": null
            },
            "TABREF0": {
                "html": null,
                "text": "",
                "content": "<table><tr><td>Number of characters</td><td>Number of CFSs of</td><td colspan=\"2\">Percentage Number of words of</td><td>Percentage</td></tr><tr><td>in a CFS or a word</td><td>that length in our</td><td/><td>that length in ASCED</td><td/></tr><tr><td/><td>CFS dictionary</td><td/><td/><td/></tr><tr><td>1</td><td>3,877</td><td>0.88%</td><td>7,745</td><td>9.57%</td></tr><tr><td>2</td><td>69,358</td><td>15.78%</td><td>49,908</td><td>61.67%</td></tr><tr><td>3</td><td>114,458</td><td>26.03%</td><td>11,663</td><td>14.41%</td></tr><tr><td>4</td><td>113,005</td><td>25.70%</td><td>10,518</td><td>13.00%</td></tr><tr><td>5</td><td>60,475</td><td>13.75%</td><td>587</td><td>0.73%</td></tr><tr><td>6</td><td>37,044</td><td>8.43%</td><td>292</td><td>0.36%</td></tr><tr><td>7</td><td>19,287</td><td>4.39%</td><td>135</td><td>0.17%</td></tr><tr><td>8</td><td>11,494</td><td>2.61%</td><td>66</td><td>0.08%</td></tr><tr><td>9</td><td>6,588</td><td>1.50%</td><td>3</td><td>0.004%</td></tr><tr><td>10</td><td>4,080</td><td>0.93%</td><td>8</td><td>0.006%</td></tr><tr><td colspan=\"4\">The distributions of CFSs by word grams</td><td/></tr><tr><td>50</td><td/><td/><td/><td/></tr><tr><td>40</td><td/><td/><td/><td/></tr><tr><td>30</td><td/><td/><td/><td/></tr><tr><td>20</td><td/><td/><td/><td/></tr><tr><td>10</td><td/><td/><td/><td/></tr><tr><td>0</td><td/><td/><td/><td/></tr></table>",
                "num": null,
                "type_str": "table"
            },
            "TABREF1": {
                "html": null,
                "text": "",
                "content": "<table><tr><td>Number of characters in a CFS</td><td>Number of CFSs</td><td>Percentage</td></tr><tr><td>1</td><td>3,877</td><td>12.40%</td></tr><tr><td>2</td><td>21,411</td><td>68.46%</td></tr><tr><td>3</td><td>3,742</td><td>11.96%</td></tr><tr><td>4</td><td>2,089</td><td>6.68%</td></tr><tr><td>5</td><td>115</td><td>0.37%</td></tr><tr><td>6</td><td>33</td><td>0.105%</td></tr><tr><td>7</td><td>7</td><td>0.022%</td></tr><tr><td>8</td><td>1</td><td>0.003%</td></tr><tr><td>9</td><td>0</td><td>0%</td></tr><tr><td>10</td><td>0</td><td>0%</td></tr></table>",
                "num": null,
                "type_str": "table"
            },
            "TABREF2": {
                "html": null,
                "text": "",
                "content": "<table><tr><td>Top n</td><td>Precision rate</td></tr><tr><td>1</td><td>87.32%</td></tr><tr><td>2</td><td>90.82%</td></tr><tr><td>3</td><td>92.66%</td></tr><tr><td>4</td><td>93.98%</td></tr><tr><td>5</td><td>94.98%</td></tr></table>",
                "num": null,
                "type_str": "table"
            },
            "TABREF3": {
                "html": null,
                "text": "",
                "content": "<table><tr><td>Top n</td><td>Precision rate</td></tr><tr><td>1</td><td>80.95%</td></tr><tr><td>2</td><td>82.58%</td></tr><tr><td>3</td><td>83.31%</td></tr><tr><td>4</td><td>83.77%</td></tr><tr><td>5</td><td>84.09%</td></tr></table>",
                "num": null,
                "type_str": "table"
            },
            "TABREF4": {
                "html": null,
                "text": "",
                "content": "<table><tr><td/><td/><td>Rule</td><td>Count</td><td>Probability</td></tr><tr><td colspan=\"2\">ADV \u2190</td><td>A</td><td>1</td><td>1</td></tr><tr><td colspan=\"2\">ADV \u2190</td><td>Dbaa</td><td>4</td><td>1</td></tr><tr><td>S</td><td>\u2190</td><td>Cbaa + S</td><td>15</td><td>0.9375</td></tr><tr><td>VP</td><td>\u2190</td><td>Cbaa + S</td><td>1</td><td>0.0625</td></tr><tr><td>NP</td><td>\u2190</td><td>NP + A + Nab</td><td>5</td><td>1</td></tr><tr><td>S</td><td>\u2190</td><td>Cbba + NP + VJ3</td><td>1</td><td>0.5</td></tr><tr><td>VP</td><td>\u2190</td><td>Cbba + NP + VJ3</td><td>1</td><td>0.5</td></tr><tr><td>NP</td><td>\u2190</td><td>NP + VG2 + NP</td><td>1</td><td>0.008</td></tr><tr><td>S</td><td>\u2190</td><td>NP + VG2 + NP</td><td>111</td><td>0.941</td></tr><tr><td>VP</td><td>\u2190</td><td>NP + VG2 + NP</td><td>6</td><td>0.051</td></tr></table>",
                "num": null,
                "type_str": "table"
            },
            "TABREF5": {
                "html": null,
                "text": "",
                "content": "<table><tr><td>TOP n</td><td>Accuracy</td></tr><tr><td>TOP 1</td><td>63.26%</td></tr><tr><td>TOP 2</td><td>78.57%</td></tr><tr><td>TOP 3</td><td>91.67%</td></tr><tr><td>TOP 4</td><td>97.62%</td></tr><tr><td>TOP 5</td><td>97.62%</td></tr></table>",
                "num": null,
                "type_str": "table"
            },
            "TABREF6": {
                "html": null,
                "text": "",
                "content": "<table><tr><td>TOP n</td><td>Accuracy</td></tr><tr><td>TOP 1</td><td>71.02%</td></tr><tr><td>TOP 2</td><td>84.53%</td></tr><tr><td>TOP 3</td><td>92.86%</td></tr><tr><td>TOP 4</td><td>96.43%</td></tr><tr><td>TOP 5</td><td>98.81%</td></tr></table>",
                "num": null,
                "type_str": "table"
            }
        }
    }
}