File size: 72,980 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
{
    "paper_id": "O13-5003",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T08:03:52.958835Z"
    },
    "title": "Correcting Serial Grammatical Errors based on N-grams and Syntax",
    "authors": [
        {
            "first": "Jian-Cheng",
            "middle": [],
            "last": "Wu",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "National Tsing Hua University",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Jim",
            "middle": [],
            "last": "Chang",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "National Tsing Hua University",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Jason",
            "middle": [
                "S"
            ],
            "last": "Chang",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "National Tsing Hua University",
                "location": {}
            },
            "email": "jason.jschang@gmail.com"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "In this paper, we present a new method based on machine translation for correcting serial grammatical errors in a given sentence in learners' writing. In our approach, translation models are generated to translate the input into a grammatical sentence. The method involves automatically learning two translation models that are based on Web-scale n-grams. The first model translates trigrams containing serial preposition-verb errors into correct ones. The second model is a back-off model, used in the case where the trigram is not found in the training data. At run-time, the phrases in the input are matched and translated, and ranking is performed on all possible translations to produce a corrected sentence as output. Evaluation on a set of sentences in a learner corpus shows that the method corrects serial errors reasonably well. Our methodology exploits the state-of-the art in machine translation, resulting in an effective system that can deal with many error types at the same time.",
    "pdf_parse": {
        "paper_id": "O13-5003",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "In this paper, we present a new method based on machine translation for correcting serial grammatical errors in a given sentence in learners' writing. In our approach, translation models are generated to translate the input into a grammatical sentence. The method involves automatically learning two translation models that are based on Web-scale n-grams. The first model translates trigrams containing serial preposition-verb errors into correct ones. The second model is a back-off model, used in the case where the trigram is not found in the training data. At run-time, the phrases in the input are matched and translated, and ranking is performed on all possible translations to produce a corrected sentence as output. Evaluation on a set of sentences in a learner corpus shows that the method corrects serial errors reasonably well. Our methodology exploits the state-of-the art in machine translation, resulting in an effective system that can deal with many error types at the same time.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Many people are learning English as a second or foreign language: it is estimated there are 375 million English as a Second Language (ESL) and 750 million English as a Foreign Language (EFL) learners around the world, according to Graddol (2006) . Three times as many people speak English as a second language as there are native speakers of English. Nevertheless, non-native speakers tend to make many kinds of errors in their writing, due to the influence of their native languages (e.g., Chinese or Japanese). Therefore, automatic grammar checkers are needed to help learners improve their writing. In the long run, automatic grammar checkers also can help non-native writers learn from the corrections and The LDOCE shows that grammatical errors in learners' writing can either appear in isolation (e.g., the wrong proposition in \"I want to improve my ability of [in] English.\") or consecutively (e.g., the unnecessary preposition immediately followed by a wrong verb form in \"These machines are destroying our ability of thinking [to think].\"). We refer to two or more errors appearing consecutively as serial errors.",
                "cite_spans": [
                    {
                        "start": 231,
                        "end": 245,
                        "text": "Graddol (2006)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "Previous works on grammar checkers either have focused on handling one common type of error exclusively or handling it independently in a sequence of errors. Nevertheless, when an error is not isolated, it is difficult to correct the error when another related error is in the immediate context. In other words, when serial errors occur in a sentence, a grammar checker needs to correct the first error in the presence of the second error (or vice-versa) , making correction difficult to achieve. These errors could be corrected more effectively if the corrector recognized them as serial errors and attempted to correct the serial errors at once.",
                "cite_spans": [
                    {
                        "start": 439,
                        "end": 454,
                        "text": "(or vice-versa)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "Consider an erroneous sentence, \"I have difficulty to understand English.\" The correct sentence should be \"I have difficulty in understanding English.\" It is hard to correct these two errors one by one, since the errors are dependent on each other. Intuitively, by identifying \"difficulty to understand\" as containing serial errors and correcting it to \"difficulty in understanding,\" we can handle this kind of problem more effectively. ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "We present a new system that automatically generates a statistical machine translation model based on a trigram containing a word followed by preposition and verb or by an infinitive in web-scale n-gram data. At run-time, the system generates multiple possible trigrams by changing a word's lexical form and preposition in the original trigram. Example trigrams generated for \"difficulty to understand\" are shown in Figure 1 . The system then ranks all of these generated sentences and use the highest ranking sentence as suggestion.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 416,
                        "end": 424,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "33",
                "sec_num": null
            },
            {
                "text": "The rest of the paper is organized as follows. We review the related work in the next section. Then, we describe our method for automatically learning to translate a sentence that may contain preposition-verb serial errors into a grammatical sentence (Section 3). In our evaluation, we describe how to measure the precision and recall of producing grammatical sentences (Section 4) in an automatic evaluation (Section 5) over a set of marked sentences in a learner corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "33",
                "sec_num": null
            },
            {
                "text": "Grammatical Error Detection (GED) for language learners has been an area of active research. GED involves pinpointing some words in a given sentence as ungrammatical and offering correction if necessary. Common errors in learners' writing include misuse of articles, prepositions, noun number, and verb form. Recently, the state-of-the-art research on GED has been surveyed by Leacock et al. (2010) . In our work, we address serial errors in English learners' writing which are simultaneously related to the preposition and verb form, an aspect that has not been dealt with in most GED research. We also consider the issues of broadening the training data for better coverage and coping with data sparseness when unseen events happen.",
                "cite_spans": [
                    {
                        "start": 377,
                        "end": 398,
                        "text": "Leacock et al. (2010)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2."
            },
            {
                "text": "Although there are over a billion people estimated to be using or learning English as a second or foreign language, common English proofreading tools do not target specifically the most common errors made by second language learners. Many widely-used grammar checking tools are based on pattern matching and at least some linguistic analysis, based on hand-coded grammar rules (Leacock et al., 2010) . In the 1990s, data-driven, statistical methods began to emerge. Statistical systems have the advantage of being more intolerant of ill-form, interlanguage, and unknown words produced by the learners than the rule-based systems. Knight and Chander (1994) proposed a method based on a decision tree classifier to correct article errors in the output of machine translation systems. Articles were selected based on contextual similarity to the same noun phrase in the training data. Atwell (1987) used a language model of a language to represent correct usage for that language. He used the language model to detect errors that tend to have a low language model score.",
                "cite_spans": [
                    {
                        "start": 377,
                        "end": 399,
                        "text": "(Leacock et al., 2010)",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 630,
                        "end": 655,
                        "text": "Knight and Chander (1994)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 882,
                        "end": 895,
                        "text": "Atwell (1987)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2."
            },
            {
                "text": "More recently, researchers have looked at grammatical errors related to the most common prepositions (9 to 34 prepositions, depending on the percentage of coverage). Eeg-Olofsson and Knuttson (2003) described a rule-based system to detect preposition errors for learners of Swedish. Based on part-of-speech tags assigned by a statistical trigram tagger, 31 rules were written for very specific preposition errors. Tetreault and Chodorow 2008, Gamon et al. (2008) , and Gamon (2010) developed statistical classifiers for preposition error detection. De Felice and Pulman (2007) trained a voted perceptron classifier on features of grammatical relations and WordNet categories in an automatic parse of a sentence. Han et al. (2010) found that a preposition error detection model trained on correct and incorrect usage in a learner corpus works better than using well-formed text in a reference corpus.",
                "cite_spans": [
                    {
                        "start": 443,
                        "end": 462,
                        "text": "Gamon et al. (2008)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 712,
                        "end": 729,
                        "text": "Han et al. (2010)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2."
            },
            {
                "text": "In the research area of detecting verb form errors, Heidorn (2000) and Bender et al. (2004) proposed methods based on parse tree and error templates. Lee and Seneff (2008) focused on three cases of verb form errors: subject-verb agreement, auxiliary agreement, and verb complement. The first two types are isolated verb form errors, while the third type may involve serial errors related to preposition and verb. Izumi et al. (2003) proposed a maximum entropy model, using lexical and POS features, to recognize a variety of errors, including verb form errors. Lee and Seneff (2008) used a database of irregular parsing caused by verb form misuse to detect and correct verb form errors. In addition, they also used the Google n-gram corpus to filter out improbable detections. Both Izumi et al. (2003) and Lee and Seneff (2008) obtained a high error correction rate, but they did not report serial errors separately, making comparison with our approach is impossible.",
                "cite_spans": [
                    {
                        "start": 52,
                        "end": 66,
                        "text": "Heidorn (2000)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 71,
                        "end": 91,
                        "text": "Bender et al. (2004)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 150,
                        "end": 171,
                        "text": "Lee and Seneff (2008)",
                        "ref_id": null
                    },
                    {
                        "start": 413,
                        "end": 432,
                        "text": "Izumi et al. (2003)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 561,
                        "end": 582,
                        "text": "Lee and Seneff (2008)",
                        "ref_id": null
                    },
                    {
                        "start": 782,
                        "end": 801,
                        "text": "Izumi et al. (2003)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 806,
                        "end": 827,
                        "text": "Lee and Seneff (2008)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2."
            },
            {
                "text": "In a study more closely related to our work, Alla Rozovskaya and Dan Roth (2013) introduced a joint learning scheme to jointly resolve pairs of interacting errors related to subject-verb and article-noun agreements. They showed that the overall error correction rate is improved by learning a model that jointly learns each of these interacting errors.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2."
            },
            {
                "text": "Correcting serial errors (e.g., \"I have difficulty to understand English.\") one error at a time in the traditional way may not work very well, but previous works typically have dealt with one type of error at a time. Unfortunately, it may be difficult to correct an error in the context of another error, because an error could only be corrected successfully within the correct context. Besides, such systems need to correct a sentence multiple times, which is time-consuming and more error-prone. To handle serial errors, a promising approach is to treat serial errors together as one single error.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method",
                "sec_num": "3."
            },
            {
                "text": "We focus on correcting serial errors in learners' writing using the context of trigrams in a sentence. We train a statistical machine translation model to correct learners' errors of the types of a content word followed by a preposition and a verb using web-scale n-grams.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Problem Statement",
                "sec_num": "3.1"
            },
            {
                "text": "We are given a sentence S = w 1 , w 2 , \u2026, w n , and web-scale n-gram, webgram. Our goal is to train two statistical machine translation model TM and back-off model TM bo to correct learners' writing. At run-time, trigrams (w i , w i+1 , w i+2 ) in S (i =1, n-2) are matched and replaced using TM and the back-off model TM bo to translate S into a correct sentence T.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Problem Statement:",
                "sec_num": null
            },
            {
                "text": "In the rest of this section, we describe our solution to this problem. First, we describe the strategy to train TM (Section 3.2) and TM bo (Section 3.3) using webgrams. Finally, we show how our system corrects a sentence at run-time using TM, TM bo , and a language model LM (Section 3.4).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Problem Statement:",
                "sec_num": null
            },
            {
                "text": "We attempt to identify trigrams that fit the pattern of serial errors and correction we are dealing with in webngram, and we group the selected trigrams by their content words and verb lemmas. Our learning process is shown in Figure 2 . We assume that, within each group, the low frequency trigrams are probably errors that should be replaced by the most frequent trigram: a one construction per collocation constraint. For example, when expressing \"difficulty\" and \"to understand,\" any NPV constructs with low frequency (e.g., \"difficulty for understanding\" and \"difficulty about understanding\") are erroneous forms of the most frequent trigram \"difficulty in understanding\". Therefore, we generate TM with such phrase to phrase translations accordingly.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 226,
                        "end": 234,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Generating TM",
                "sec_num": "3.2"
            },
            {
                "text": "(1) Select trigrams related to serial errors and corrections from webngram (Section 3.2.1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generating TM",
                "sec_num": "3.2"
            },
            {
                "text": "(2) Group the selected trigrams by the first and last word in the trigrams (Section 3.2.2)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generating TM",
                "sec_num": "3.2"
            },
            {
                "text": "(3) Generate a phrase table for the statistical machine translation modelsfor each group (Section 3.2.3) Figure 2 . Outline of the process used to generate TM.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 105,
                        "end": 113,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Generating TM",
                "sec_num": "3.2"
            },
            {
                "text": "We select four types of trigrams (t 1 , t 2 , t 3 ) from webngram, including noun-prep-verb (NPV), verb-prep-verb (VPV), adj-prep-verb (APV), and adverb-prep-verb (RPV). We then annotate the trigrams with types and lemmas of content words t 1 and t 3 (e.g., \"accused of being 230633\" becomes \"VPV, accuse be, accused of being 230633). Figure 3 shows some sample annotated trigrams. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 335,
                        "end": 343,
                        "text": "Figure 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Select and Annotate Trigrams",
                "sec_num": "3.2.1"
            },
            {
                "text": "We then group the trigrams by types, the first words, and the verb lemmas. See Figure 4 for a sample VPV group of trigrams. This step should bring together the trigrams containing serial errors and their correction. Note that we assume certain serial errors will have a correction of the same length here, which is true in most cases.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 79,
                        "end": 87,
                        "text": "Figure 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Group Trigrams",
                "sec_num": "3.2.2"
            },
            {
                "text": "For each group of annotated trigrams, we then generate phrase and translation pairs with",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generate Rules",
                "sec_num": "3.2.3"
            },
            {
                "text": "Correcting Serial Grammatical Errors based on N-grams and Syntax 37 probability as follows. Recall that we assume that the higher the count of the trigram, the more likely the trigram is to be correct. So, we generate \"l 1 , l 2 , l 3 ||| h 1 , h 2 , h 3 ||| p ,\" where h 1 , h 2 , h 3 is the trigram with the highest frequency count; l 1 , l 2 , l 3 is one of the trigrams with lower frequency count; and p denotes the probability of l 1 , l 2 , l 3 translating into h 1 , h 2 , h 3 . We define p=(highest frequency count)/(group frequency count).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generate Rules",
                "sec_num": "3.2.3"
            },
            {
                "text": "In addition to the surface-level translation model TM, we also build a back-off model as a way of coping with cases where the trigram (t 1 , t 2 , t 3 ) is unseen in TM. The idea is to assume the complement (t 2 , t 3 ) of t 1 tends to be in a certain syntactic form regardless of the verb t 3 , as dictionaries typically would describe the usage of \"accuse\" in terms of \"accuse somebody of doing something.\" Our learning process for TM bo is shown in Figure 9 . (1) Select trigrams with specific forms from Web 1T n-gram",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 452,
                        "end": 460,
                        "text": "Figure 9",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Generating TM bo",
                "sec_num": "3.3"
            },
            {
                "text": "(2) Reform trigrams W3 to W3's lexical",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generating TM bo",
                "sec_num": "3.3"
            },
            {
                "text": "(3) Group the selected trigrams using the first word (4) Group the selected trigrams using the first word Figure 9 . Outline of the process used to generate TM bo",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 106,
                        "end": 114,
                        "text": "Figure 9",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Generating TM bo",
                "sec_num": "3.3"
            },
            {
                "text": "First, we generalize the annotated trigrams (see Section 3.2.1) by replacing the verb form with its part of speech designator (i.e., replace \"accuse\" with VERB, and replace \"accusing\" with VERB-ing).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generalize Trigrams",
                "sec_num": "3.3.1"
            },
            {
                "text": "In this step, we group the identically transformed trigrams and sum up the frequency counts. See Figure 6 for sample results.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 97,
                        "end": 105,
                        "text": "Figure 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Sum Counts",
                "sec_num": "3.3.2"
            },
            {
                "text": "We then group the trigrams by type and by the first word (context). See Figure 7 for a sample \"accuse P V\" group of trigrams.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 72,
                        "end": 80,
                        "text": "Figure 7",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Group Trigrams of the Same Context",
                "sec_num": "3.3.3"
            },
            {
                "text": "For each group of generalized trigrams, we then generate the phrase and translation pair with the probability as described in Section 3.2.3. See Figure 8 for a sample of back-off translations.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 145,
                        "end": 153,
                        "text": "Figure 8",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Generate Rules",
                "sec_num": "3.3.4"
            },
            {
                "text": "If one loads TM and TM bo into memory before the decoding process (generating, ranking, and selecting translations), that would take up a lot of memory and slow the process of matching phrases to find translations. Therefore, we generate phrase translations on the fly for the given sentence before decoding. Our process of decoding to correct grammatical errors is shown in Figure 10 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 375,
                        "end": 384,
                        "text": "Figure 10",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Run-time Correction",
                "sec_num": "3.4"
            },
            {
                "text": "(1) Tag the input sentence with part of speech information in order to find trigrams that fit the type of serial errors",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Run-time Correction",
                "sec_num": "3.4"
            },
            {
                "text": "(2) Search TM and generate translations for the input phrases",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Run-time Correction",
                "sec_num": "3.4"
            },
            {
                "text": "(3) Search TM bo and generate translations for the input phrases (4) Run statistical machine translation Figure 10 . Outline of the process used to correct the sentence at run-time",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 105,
                        "end": 114,
                        "text": "Figure 10",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Run-time Correction",
                "sec_num": "3.4"
            },
            {
                "text": "We use a POS tagger to tag the input sentence, and we identify trigrams (t 1 , t 2 , t 3 ) consisting of a content word followed by a preposition and verb (belonging to the NPV, VPV, APV, or RPV types we described in Section 3.2.1).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tag the Input Ssentence",
                "sec_num": "3.4.1"
            },
            {
                "text": "Correcting Serial Grammatical Errors based on N-grams and Syntax 39",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tag the Input Ssentence",
                "sec_num": "3.4.1"
            },
            {
                "text": "We then search for the group of trigrams (indexed by POS type and t 1 , t 3 ) in TM containing the trigrams (t 1 , t 2 , t 3 ), found in Step 3.4.1. We find the trigram (h 1 , h 2 , h 3 ) with the highest count in that group. With that, we can dynamically add the translation, \"t 1 , t 2 , t 3 ||| h 1 , h 2 , h 3 ||| 1.0\" to the cache of TM in memory (e.g., \"difficulty to understand ||| difficulty in understanding ||| 1.0\") to speed up the subsequent decoding process.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Search TM and Generate Translation Rules",
                "sec_num": "3.4.2"
            },
            {
                "text": "Just like in 3.4.2, we use t 1 and its part of speech p 1 to search TM bo for the generalized trigram group that matches (t 1 , t 2 , t 3 ). We then find the most frequent generalized trigram (h 1 , h 2 , h 3 ) in that group. After that, we need to specialize (h 1 , h 2 , h 3 ) for t 3 by replacing h 3 with the verb form of t 3 for the designator h 3 , resulting in (h 1 , h 2 , h' 3 ). Consider the generalized trigram \"accused of VERB-ing\" and t 3 = \"murder,\" the specialized trigram would be \"accused of murdering.\" Finally, we add \"t 1 , t 2 , t 3 ||| h 1 , h 2 , h' 3 ||| 1.0\" (e.g., \"accused to murder ||| accused of murdering ||| 1.0\") to the cache of TM in memory for the same purpose of speeding up decoding.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Search TM bo and Generate Translation Rules",
                "sec_num": "3.4.3"
            },
            {
                "text": "Finally, we run a monotone decoder with the cache TM and a language model LM. By default, any word not in TM will be translated into itself.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Decode the Input Sentence without Reordering",
                "sec_num": "3.4.4"
            },
            {
                "text": "Our system DeeD (Don'ts-to-Do's English-English Decoder) was designed to correct preposition-verb serial errors in a given sentence written by language learners. Nevertheless, since large-scale learner corpora annotated with errors are not widely available, we have resorted to Web scale n-grams to train our system, while using a small annotated learner corpus to evaluate its performance. In this section, we first present the details of training DeeD for the evaluation (Section 4.1). Then, Section 4.2 lists the grammar checking systems that we used in our evaluation and comparison. Section 4.3 introduces the evaluation metrics for the performance of the systems, and details of the sentences evaluated and performance judgments are reported in Section 4.4.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setting",
                "sec_num": "4."
            },
            {
                "text": "We used the Web 1T 5-grams (Brants & Franz, 2006) to train our systems. Web 1T 5-grams is a collection that contains 1 to 5 grams calculated from a 1 trillion words of public Web pages provided by Google through the Linguistic Data Consortium (LDC). There are some ten million unigrams, 3 hundred million bigrams, and around 1 billion trigrams to fivegrams. We obtained 104,537,560 trigrams, containing only words in the General Service List (West, 1954) and Academic Word List (Coxhead, 1999) . These trigrams were further reduced to 4,486,615 entries that fit the patterns of four types of serial errors and corrections: an adjective, noun, verb, or adverb followed by a preposition (or infinitive to) and a verb.",
                "cite_spans": [
                    {
                        "start": 27,
                        "end": 49,
                        "text": "(Brants & Franz, 2006)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 442,
                        "end": 454,
                        "text": "(West, 1954)",
                        "ref_id": null
                    },
                    {
                        "start": 478,
                        "end": 493,
                        "text": "(Coxhead, 1999)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training DeeD",
                "sec_num": "4.1"
            },
            {
                "text": "To determine the part of speech of words in the n-gram, we used the most frequent tag of a given word in BNC to tag words in the trigram.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training DeeD",
                "sec_num": "4.1"
            },
            {
                "text": "Once we have trained DeeD as described in Section 3, we evaluated its performance using two datasets. The first dataset contained sentences written by an ESL or EFL learner with the serial errors with corrections. The second dataset contained mostly correct sentences in British National Corpus (BNC) with mostly published works written by native, expert speakers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Checking Systems Compared",
                "sec_num": "4.2"
            },
            {
                "text": "The first testset is a subset of the Cambridge Learner Corpus, the CLC First Certificate Exam Dataset (CLC/FCE). This dataset contains 1,244 exam essays written by students who took the Cambridge ESOL First Certificate in English (FCE) examination in 2000 and 2001. For each exam script, the CLC/FCE Dataset includes the original text annotated with error, type, and correction. From the 34,893 sentences in the 1,244 exam essays, we extracted 118 sentences that contained the serial errors in question. Other types of errors were replaced with corrections in these sentences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Checking Systems Compared",
                "sec_num": "4.2"
            },
            {
                "text": "The second testset is a random sample of 1000 sentences containing trigrams that fit the error patterns also used to evaluate our system. The four system and testset combinations evaluated are:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Checking Systems Compared",
                "sec_num": "4.2"
            },
            {
                "text": "-Learner corpus without back-off model (LRN): The proposed system using only the surface-level translation model was tested on the first testset obtained from a learner corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Checking Systems Compared",
                "sec_num": "4.2"
            },
            {
                "text": "-Learner corpus with back-off model (LRN-BO): The proposed system with the additional back-off model was tested on the first testset obtained from a learner corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Checking Systems Compared",
                "sec_num": "4.2"
            },
            {
                "text": "-BNC without back-off model (BNC): The proposed system using only the surface-level translation model was tested on the first testset obtained from the British National Corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Checking Systems Compared",
                "sec_num": "4.2"
            },
            {
                "text": "-BNC with back-off model (BNC-BO): The proposed system without the back-off model was tested on the first testset obtained from the British National Corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar Checking Systems Compared",
                "sec_num": "4.2"
            },
            {
                "text": "English correction systems usually are compared based on the quality and completeness of correction suggestions. We measured the quality using the metrics of precision, recall, and error rate. For the first testset, we measured precision and recall rates while, for the second Correcting Serial Grammatical Errors based on N-grams and Syntax 41 testset, we measured the error rate (false alarms). We define precision and recall as:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Metrics",
                "sec_num": "4.3"
            },
            {
                "text": "Precision = C/S (1) Recall = C/N (2)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Metrics",
                "sec_num": "4.3"
            },
            {
                "text": "where N is the number of serial errors, S is the number of corrections our system found, and C is the number of corrections where our system was correct. We also computed the corresponding F-score. Error rate was used in the second dataset described above, and we define the error rate as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Metrics",
                "sec_num": "4.3"
            },
            {
                "text": "Error Rate = E/T (3)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Metrics",
                "sec_num": "4.3"
            },
            {
                "text": "where E is the number of corrections our system found (which are all wrong, since we were testing sentences with no errors) and T is the number of sentences tested.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Metrics",
                "sec_num": "4.3"
            },
            {
                "text": "In this section, we report the results of the evaluation using the dataset and environment mentioned in the previous section. During this evaluation, 118 sentences with serial errors were used to evaluate the two systems: LRN and LRN-BO. Table 1 shows the average precision, recall, and F-score of LRN and LRN-BO. As we can see, LRN performs better in precision, which is reasonable since the back-off model corrects errors without the information of the verb involved. LRN-BO performs better in recall because the back-off model applies when the original model does not cover the case. Overall, LRN-BO performs better in F-score. During this evaluation, 1000 sentences in BNC that fit the pattern of serial errors but in fact do not contain errors, were used to evaluate the same two systems: BNC and BNC-BO. Table 2 shows the average error rate of BNC and BNC-BO. It is not surprising that BNC performs better than BNC-BO, since BNC always makes fewer corrections than BNC-BO. Nevertheless, BNC-BO is only slightly worse than BNC.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 238,
                        "end": 245,
                        "text": "Table 1",
                        "ref_id": "TABREF3"
                    },
                    {
                        "start": 810,
                        "end": 817,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation Results",
                "sec_num": "5."
            },
            {
                "text": "Many avenues exist for future research and improvement of our system. For example, spell checking can be done before correcting grammatical errors. Context used to \"translate\" the serial errors can be enlarged from one word to two or more words (immediately or closely) preceding the errors. We can also add one more level of backing off for the context word preceding the serial errors: from surface word to lemma or from a proper name to named entity type (PERSON, PLACE, ORGANIZATION) . We also can improve the accuracy of part of speech tagging used in applying the back-off model. Additionally, an interesting direction to explore is extending this approach to handle other types of isolated and serial errors commonly found in learners' writing. Yet another direction of research would be to consider corrections resulting in more or fewer words (e.g., one less word as in *spend time for work vs. spend time working). Or, we could also combine n-gram statistics from different types of corpora: a Web-scale corpus, a reference corpus, and a learner corpus. For example, the translation probability can be determined via statistical classifier training on the learner corpus with features extracted from n-grams of multiple corpora.",
                "cite_spans": [
                    {
                        "start": 458,
                        "end": 487,
                        "text": "(PERSON, PLACE, ORGANIZATION)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "6."
            },
            {
                "text": "In summary, we have introduced a new method for correcting serial errors in a given sentence in learners' writing. In our approach, a statistical machine translation model is generated to attempt to translate the given sentence into a grammatical sentence. The method involves automatically learning two translation models based on Web-scale n-grams. The first model translates trigrams containing serial preposition-verb errors into correct ones. The second model is a back-off model for the first model, used in the case where the trigram is not found in the training data. At run-time, the phrases in the input are matched using the translation model and are translated before ranking is performed on all possible translation sentences generated. Evaluation on a set of sentences in a learner corpus shows that the method corrects serial errors reasonably well. Our methodology exploits the state of the art in machine translation, resulting in an effective system that can deal with serial errors at the same time.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "6."
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "How to detect grammatical errors in a text without parsing it",
                "authors": [
                    {
                        "first": "E",
                        "middle": [
                            "S"
                        ],
                        "last": "Atwell",
                        "suffix": ""
                    }
                ],
                "year": 1987,
                "venue": "Proceedings of the Third Conference of the European Association for Computational Linguistics (EACL)",
                "volume": "",
                "issue": "",
                "pages": "38--45",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Atwell, E. S. (1987). How to detect grammatical errors in a text without parsing it. In Proceedings of the Third Conference of the European Association for Computational Linguistics (EACL), 38-45, Copenhagen.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Arboretum: Using a precision grammar for grammar checking in CALL",
                "authors": [
                    {
                        "first": "E",
                        "middle": [
                            "M"
                        ],
                        "last": "Bender",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Flickinger",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Oepen",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Baldwin",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the Integrating Speech Tech-nology in Learning/Intelligent Computer Assisted Language Learning Correcting Serial Grammatical Errors based on N-grams and Syntax 43 (inSTIL/ICALL) Symposium: NLP and Speech Technologies in Advanced Language Learning Systems",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Bender, E. M., Flickinger, D., Oepen, S., & Baldwin, T. (2004). Arboretum: Using a precision grammar for grammar checking in CALL. In Proceedings of the Integrating Speech Tech-nology in Learning/Intelligent Computer Assisted Language Learning Correcting Serial Grammatical Errors based on N-grams and Syntax 43 (inSTIL/ICALL) Symposium: NLP and Speech Technologies in Advanced Language Learning Systems, Venice.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "The Google Web 1T 5-gram corpus version 1.1. LDC2006T13",
                "authors": [
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Brants",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Franz",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Brants, T., & Franz, A. (2006). The Google Web 1T 5-gram corpus version 1.1. LDC2006T13.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "A new academic word list",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Coxhead",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "TESOL quarterly",
                "volume": "34",
                "issue": "2",
                "pages": "213--238",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Coxhead, A. (2000). A new academic word list. TESOL quarterly, 34(2), 213-238.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Automatic detection of preposition errors in learner writing",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "De Felice",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [
                            "G"
                        ],
                        "last": "Pulman",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "CALICO Journal",
                "volume": "26",
                "issue": "3",
                "pages": "512--528",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "De Felice, R., & Pulman, S. G. (2009). Automatic detection of preposition errors in learner writing. CALICO Journal, 26(3), 512-528.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Automatic grammar checking for second language learners -the use of prepositions",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Eeg-Olofsson",
                        "suffix": ""
                    },
                    {
                        "first": "O",
                        "middle": [],
                        "last": "Knuttson",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the 14th Nordic Conference in Computational Linguistics (NoDaLiDa)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Eeg-Olofsson, E., & Knuttson, O. (2003). Automatic grammar checking for second language learners -the use of prepositions. In Proceedings of the 14th Nordic Conference in Computational Linguistics (NoDaLiDa).",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Using mostly native data to correct errors in learners' writing",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Gamon",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proceedings of the Eleventh Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gamon, M. (2010). Using mostly native data to correct errors in learners' writing. In Proceedings of the Eleventh Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL), Los Angeles.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Using contextual speller techniques and language modeling for ESL error correction",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Gamon",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Gao",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Brockett",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Klementiev",
                        "suffix": ""
                    },
                    {
                        "first": "W",
                        "middle": [
                            "B"
                        ],
                        "last": "Dolan",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Be-Lenko",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Vanderwende",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of the International Joint Conference on Natural Language Processing (IJCNLP)",
                "volume": "",
                "issue": "",
                "pages": "449--456",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gamon, M., Gao, J., Brockett, C., Klementiev, A., Dolan, W. B., Be-lenko, D., & Vanderwende, L. (2008). Using contextual speller techniques and language modeling for ESL error correction. In Proceedings of the International Joint Conference on Natural Language Processing (IJCNLP), 449-456, Hyderabad, India.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "English next: Why global English may mean the end of 'English as a Foreign Language",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Graddol",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Graddol, D. (2006). English next: Why global English may mean the end of 'English as a Foreign Language.' UK: British Council.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Using error-annotated ESL data to develop an ESL error correction system",
                "authors": [
                    {
                        "first": "N.-R",
                        "middle": [],
                        "last": "Han",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Tetreault",
                        "suffix": ""
                    },
                    {
                        "first": "S.-H",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    },
                    {
                        "first": "J.-Y",
                        "middle": [],
                        "last": "Ha",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Han, N.-R., Tetreault, J., Lee, S.-H., & Ha, J.-Y. (2010). Using error-annotated ESL data to develop an ESL error correction system. In Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC), Malta.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Intelligent writing assistance",
                "authors": [
                    {
                        "first": "G",
                        "middle": [
                            "E"
                        ],
                        "last": "Heidorn",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Handbook of Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "181--207",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Heidorn, G. E. (2000). Intelligent writing assistance. In R. Dale, H. Moisl, and H. Somers, editors, Handbook of Natural Language Processing, 181-207. Marcel Dekker, New York.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Automatic error detection in the Japanese learners' English spoken data",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Izumi",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Uchimoto",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Saiga",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Supnithi",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Isahara",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Companion Volume to the Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL)",
                "volume": "",
                "issue": "",
                "pages": "145--148",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Izumi, E., Uchimoto, K., Saiga, T., Supnithi, T., & Isahara, H. (2003). Automatic error detection in the Japanese learners' English spoken data. In Companion Volume to the Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL), 145-148.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Automated postediting of documents",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Chander",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proceedings of the Twelfth National Conference on Artificial Intelligence (AAAI)",
                "volume": "",
                "issue": "",
                "pages": "779--784",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Knight, K., & Chander, I. (1994). Automated postediting of documents. In Proceedings of the Twelfth National Conference on Artificial Intelligence (AAAI), 779-784, Seattle.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Automated grammatical error detection for language learners",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Leacock",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Synthesis Lectures on Human Language Technologies",
                "volume": "3",
                "issue": "1",
                "pages": "1--134",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Leacock, C. et al. 2010. Automated grammatical error detection for language learners. Synthesis Lectures on Human Language Technologies, 3(1), 1-134.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Automatic grammar correction for second-language learners",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Seneff",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of the Ninth International Conference on Spoken Language Processing",
                "volume": "",
                "issue": "",
                "pages": "1978--1981",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lee, J., & Seneff, S. (2006). Automatic grammar correction for second-language learners. In Proceedings of the Ninth International Conference on Spoken Language Processing (Interspeech), 1978-1981.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Human evaluation of article and noun number usage: Influences of context and construction variability",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Tetreault",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Chodorow",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Proceedings of the Third Linguistic Annotation Workshop",
                "volume": "",
                "issue": "",
                "pages": "60--63",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lee, J., Tetreault, J., & Chodorow, M. (2009b). Human evaluation of article and noun number usage: Influences of context and construction variability. In Proceedings of the Third Linguistic Annotation Workshop (LAW), 60-63, Suntec, Singapore.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Joint Learning and Inference for Grammatical Error Correction",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Rozovskaya",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Roth",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "791--802",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Rozovskaya, A., & Roth, D. (2013). Joint Learning and Inference for Grammatical Error Correction, In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, 791-802.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "A General Service List of English Words",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "West",
                        "suffix": ""
                    }
                ],
                "year": 1953,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "West, M. (1953). A General Service List of English Words. London: Longman, 1953.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "A New Dataset and Method for Automatically Grading ESOL Texts",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Yannakoudakis",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Briscoe",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Medlock",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistic",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yannakoudakis, H., Briscoe, T., & Medlock, B. (2011). A New Dataset and Method for Automatically Grading ESOL Texts, In Proceedings of the Annual Meeting of the Association for Computational Linguistic.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "text": "Example session of correcting the sentence, \"I have difficulty to understand English.\"Correcting Serial Grammatical Errors based on N-grams and Syntax",
                "uris": null,
                "num": null
            },
            "FIGREF1": {
                "type_str": "figure",
                "text": "Sample phrase translations for a trigram group",
                "uris": null,
                "num": null
            },
            "FIGREF2": {
                "type_str": "figure",
                "text": "Sample trigram group accused to VERB ||| accused of VERB-ing ||| 0.47 accused of VERB ||| accused of VERB-ing ||| 0.47",
                "uris": null,
                "num": null
            },
            "FIGREF3": {
                "type_str": "figure",
                "text": "Sample back-off translations",
                "uris": null,
                "num": null
            },
            "TABREF3": {
                "type_str": "table",
                "text": "",
                "content": "<table><tr><td/><td>F-Score</td><td>Precision</td><td>Recall</td></tr><tr><td>LRN</td><td>0.43</td><td>0.71</td><td>0.31</td></tr><tr><td>LRN-BO</td><td>0.45</td><td>0.68</td><td>0.33</td></tr><tr><td colspan=\"4\">Table 2. Average error rate of BNC and BNC-BO</td></tr><tr><td/><td colspan=\"2\">Error Rate</td><td/></tr><tr><td>BNC</td><td>0.10</td><td/><td/></tr><tr><td>BNC-BO</td><td>0.13</td><td/><td/></tr></table>",
                "html": null,
                "num": null
            }
        }
    }
}