File size: 69,885 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
{
    "paper_id": "I11-1013",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:31:23.560690Z"
    },
    "title": "Handling verb phrase morphology in highly inflected Indian languages for Machine Translation",
    "authors": [
        {
            "first": "Ankur",
            "middle": [],
            "last": "Gandhe",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "IBM Research",
                "location": {
                    "country": "India"
                }
            },
            "email": ""
        },
        {
            "first": "Rashmi",
            "middle": [],
            "last": "Gangadharaiah",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "IBM Research",
                "location": {
                    "country": "India"
                }
            },
            "email": "rashgang@in.ibm.com"
        },
        {
            "first": "Karthik",
            "middle": [],
            "last": "Visweswariah",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "IBM Research",
                "location": {
                    "country": "India"
                }
            },
            "email": ""
        },
        {
            "first": "Ananthakrishnan",
            "middle": [],
            "last": "Ramanathan",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "IBM Research",
                "location": {
                    "country": "India"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "The phrase based systems for machine translation are limited by the phrases that they see during the training. For highly inflected languages, it is uncommon to see all the forms of a word in the parallel corpora used during training. This problem is amplified for verbs in highly inflected languages where the correct form of the word depends on factors like gender, number and tense aspect. We propose a solution to augment the phrase table with all possible forms of a verb for improving the overall accuracy of the MT system. Our system makes use of simple stemmers and easily available monolingual data to generate new phrase table entries that cover the different variations seen for a verb. We report significant gains in BLEU for English to Hindi translation.",
    "pdf_parse": {
        "paper_id": "I11-1013",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "The phrase based systems for machine translation are limited by the phrases that they see during the training. For highly inflected languages, it is uncommon to see all the forms of a word in the parallel corpora used during training. This problem is amplified for verbs in highly inflected languages where the correct form of the word depends on factors like gender, number and tense aspect. We propose a solution to augment the phrase table with all possible forms of a verb for improving the overall accuracy of the MT system. Our system makes use of simple stemmers and easily available monolingual data to generate new phrase table entries that cover the different variations seen for a verb. We report significant gains in BLEU for English to Hindi translation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Data driven approaches have become widely popular as they use little or no language specific knowledge. The main drawback of these approaches is the need for large amounts of data. (Koehn et al., 2003) have shown that the quality of the translations produced by data driven approaches mainly depends on the amount of parallel data available for the language-pair under consideration. Creation of a large bilingual corpus is expensive and time consuming if high quality manual translations are required. Hence, building MT systems for language-pairs with limited amounts of data is a big challenge.",
                "cite_spans": [
                    {
                        "start": 181,
                        "end": 201,
                        "text": "(Koehn et al., 2003)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Approaches have been suggested in the past to mine the world-wide-web to automatically obtain large amounts of parallel data. For example, news articles in two different languages describing the same event can be sentence-aligned to obtain a parallel corpus. Although this approach has shown improvements, this cannot be extended to languages that have little or no data on the world wide web.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The situation gets worse for languages that are rich in morphology. Clearly large amounts of parallel data are required to observe all variations of a word. Popovic and Ney (2004) applied transformations to verbs to reduce the number of outof-vocabulary words and showed improvements in translation quality when morphemes were considered. Yang and Kirchhoff (2006) used a back off model in a Phrase-based SMT system which translated word forms in the source language by hierarchical morphological abstractions. Unknown words in the test data were stemmed and phrasetable entries were modified such that words sharing the same root were replaced by their stems. Freeman et al. (2006) and Habash (2008) find in-vocabulary words for OOV words that could be morphological variants of the OOV words. Phrases in the phrase table containing these invocabulary words are then replaced by OOV words to create new entries. Vilar et al. (2007) used a letter-based MT system that treated the source and target sentences as a string of letters for translating unknown words.",
                "cite_spans": [
                    {
                        "start": 157,
                        "end": 179,
                        "text": "Popovic and Ney (2004)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 339,
                        "end": 364,
                        "text": "Yang and Kirchhoff (2006)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 661,
                        "end": 682,
                        "text": "Freeman et al. (2006)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 687,
                        "end": 700,
                        "text": "Habash (2008)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 913,
                        "end": 932,
                        "text": "Vilar et al. (2007)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "All the above approaches handled OOV issues that arise when the source language is morphologically rich. Generation of the target sentence when the target language is morphologically rich from a source language that is not rich in morphology is non-trivial as the source language does not contain all the information for inflecting the target words. Minkov et. al (2007) predicted inflected forms of a sequence of word stems on languages that are morphologically rich using syntactic and rich morphological sources. This inflection generation model was then applied in MT by (Toutanova et al., 2008) while translating English into morphologically complex languages and showed improve-ment in translation quality. Their methods require a syntactic analyzer and a very rich morphological analyzer which may not be available for many rare or low-density languages. Also, their feature set includes bilingual features that require expensive and difficult to get bilingual corpora. We rely more on monolingual data and a small amount of parallel data. In cases of multi word compound words ( explained in section 1.1 ) , since inflections on the light verb might change with change in the root verb compounding with it, we need to predict these verbs together and not as separate words.",
                "cite_spans": [
                    {
                        "start": 350,
                        "end": 370,
                        "text": "Minkov et. al (2007)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 575,
                        "end": 599,
                        "text": "(Toutanova et al., 2008)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper, we consider Indian languages which are considered as low density languages as they do not have rich knowledge sources such as parsers or complex morphological analyzers. These languages also suffer from data sparsity and hence form ideal languages for the analysis of our proposed method. We also consider only various forms of verbs and do not consider other words such as noun phrases and adjectives affected by inflections.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "India has fifteen official languages which originated from the Indo-Iranian branch of the Indo-European language family, the non-Indo-European Dravidian family, Austro-Asiatic, Tai-Kadai and the Sino-Tibetan language families (Microsoft Encarta Online Encyclopedia, 1997) . The languages that stem from the Dravidian family, are -Tamil, Kannada, Malayalam and Telugu, spoken in the South Indian states. Languages in North India, such as Hindi, Urdu, Punjabi, Gujarati, Bengali, Marathi, Kashmir, Sindhi, Konkani, Rajasthani, Assamese and Oriya, stem from Sanskrit and Pali. Indian languages are verb final i.e., verbs are placed at the end of the sentences. Verbs in these languages are inflected to contain information about gender (masculine and feminine), tense, aspect and number of the subject (singular or plural). A few examples showing inflections on the verbs in Hindi are shown below:",
                "cite_spans": [
                    {
                        "start": 226,
                        "end": 271,
                        "text": "(Microsoft Encarta Online Encyclopedia, 1997)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background on Indian Languages",
                "sec_num": "1.1"
            },
            {
                "text": "These languages also contain compound verbs (multi-word compound representing a single verb). They contain a light verb which receives inflections and another component that can be a noun or a verb responsible for conveying the meaning. For example, in Hindi, most commonly used light verbs are \"karna\" (to make), \"lena\" (to take), \"hona\" (to happen) and \"dena\" (to give).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background on Indian Languages",
                "sec_num": "1.1"
            },
            {
                "text": "When translating from a morphologically poor language such as English to any of the Indian languages, finding the right translation along with the inflections on the verbs becomes difficult, especially when the amount of bilingual data available is scarce. We try to use the pattern behavior of verbs to tackle this problem. Table 1 gives an example of hindi verbs classified according to their light verbs. Hindi side is transliterated for the sake of clarity. It shows how the verb phrase of one compound verb (clean) can generate verb phrase for words in the same group (help and forgive) just by replacing the corresponding source and target root words. The suffixes (shown in bold) are separated from the word to show how the actual process takes place. This paper tries to automatically group the different kinds of verbs occurring in the language based on their light verbs and generates the variation for all the verbs in one group by looking at the variations of any one member. he will be clean ing -> vo saaph kar egaa he will be forgive ing -> vo maaph kar egaa he will be help ing -> vo madad kar egaa The novel concept of this paper is generation of verb phrases on the source side and their translations using a) source and target monolingual data, b) simple morphological segmentation on source and target side and c) Small amount of manual translations or d) Word alignments of parallel corpora. We have described two methods of generating these verb phrase translations in the following sections.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 325,
                        "end": 332,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Motivation",
                "sec_num": "2"
            },
            {
                "text": "The idea here is to get the manual translations of source and target verb pairs which could capture the entire range of variations seen in the source and the target verb phrases. These translations can then be used to generate the variations for rest of the verb pairs. The entire flow of the method is shown in Figure 1 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 312,
                        "end": 320,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Manual Generation",
                "sec_num": "3"
            },
            {
                "text": "Given a language pair (e,f), we extract all verb phrases that occur in the source monolingual data using a verb phrase chunker. Part of speech (POS) tags can be used to extract verb phrases for languages having a good POS tagger. In our experiments for English-Hindi language pair, POS tags were used for English verb phrase chunking. Modals in English were included as a part of the verb phrase since their counterparts in Hindi appear as verbs. For Hindi, the verb phrase chunker was trained on a small set of 6000 sentences, where the reference markings were obtained by projecting the verb phrases from English. The 6000 sentences were hand aligned with the corresponding English sentences, hence helping with the accuracy of the projected verb phrases. On this data, we built a CRF based chunker (Lafferty et al., 2001 ) using word and POS tag features.",
                "cite_spans": [
                    {
                        "start": 801,
                        "end": 823,
                        "text": "(Lafferty et al., 2001",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Verb Phrase Chunking",
                "sec_num": "3.1"
            },
            {
                "text": "Using a segmenter, the root verb is separated from its inflected suffix for all the extracted verb phrases. These extracted verb phrases are then clustered based on the root verb so that all the variations of a root verb '<verb>' are grouped together into one cluster. As an example, a part of the verb cluster for 'play' is shown below. Note that all possible variations of each verb (both source and target side) are under one cluster. play was play +ing should have play +ed ought be played would have been play +ed is play +ed cannot be play +ed is being play +ed The different variations within each verb cluster are normalized by replacing the root verb by a normalization tag '<verb>' so that similar root verb (Minnen et al., 2000) an open source stemming package, to get the root form of the head verb in the extracted English verb phrases. It was observed that all the root verb clusters had the same variations of verb phrases and hence all belonged to the same class. Thus, from the source (English) side, only one verb could be picked up to cover all the variations, which could be then replicated for all the others verbs. We call that class 'AE'.",
                "cite_spans": [
                    {
                        "start": 718,
                        "end": 739,
                        "text": "(Minnen et al., 2000)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Verb Classing",
                "sec_num": "3.2"
            },
            {
                "text": "In Hindi, as explained in section 1.1, many verbs occur as compound verbs where a noun followed by a light verbs is considered as a verb and hence we also included these for our clustering and classification. The extracted verb phrases were segmented using a stemmer similar to one in (Ramanathan et al., 2003) . After clustering the verb phrases based on their root verbs into groups and classing them based on the different variations, the main classes depended on the a) whether the verb has a light verb or not and b) the type of light verb attached. Table 2 shows the different classes found along with the number of verbs within each class.",
                "cite_spans": [
                    {
                        "start": 285,
                        "end": 310,
                        "text": "(Ramanathan et al., 2003)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 555,
                        "end": 562,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Verb Classing",
                "sec_num": "3.2"
            },
            {
                "text": "There were more classes with a different auxiliary verb but we neglected them since the frequency of verbs in those classes was insignificant. 'lena' and 'dena' verb forms take the same vari-ations (differing only in one character), we could easily generate one from another. Overall, only 3 classes were used for manual translation on the Hindi side(AH,BH and CH). Note that we allow a verb to belong to more than one class, suggesting that a word can be used in more than just one way depending on the context.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Verb Classing",
                "sec_num": "3.2"
            },
            {
                "text": "Given a parallel corpus, it is possible to extract source root verb to target root verb translation. Since a parallel corpus will contain the inflected form of a verb, it is necessary to stem them to their root form before calculating the word translation probabilities. Hence, given a parallel corpus, sentences are machine aligned by a maxent model as described in (Ittycheriah and Roukos, 2005) and then the verbs on both the source and target side are stemmed to the respective root forms using a suitable stemmer. Given the list of possible source verbs and target words from the previous clustering step, the forward and reverse translation probabilities for these verbs is calculated from the alignments using by relative frequency:",
                "cite_spans": [
                    {
                        "start": 367,
                        "end": 397,
                        "text": "(Ittycheriah and Roukos, 2005)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Root verb translation pairs",
                "sec_num": "3.3"
            },
            {
                "text": "P (f i /e j ) = count(f i , e j )/ f count(f, e j ) (1) P (e i /f j ) = count(e i , f j )/ e count(e, f j ) (2)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Root verb translation pairs",
                "sec_num": "3.3"
            },
            {
                "text": "Using these forward and reverse probability, a mapping file that maps the source root verb to the corresponding target root verb(s) is created by empirically combining the two probabilities. P tot (e i /f j ) = 0.5 * P (e i /f j )+0.5 * P (f j /e i ) (3)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Root verb translation pairs",
                "sec_num": "3.3"
            },
            {
                "text": "We allow one source verb mapping to multiple target verb, since the meaning can change due to context in the test sentence. TopM translations of source word are selected for translations, provided P tot > P thresh . We empirically found the P thresh = 0.2 and M=4 to work reasonably well. Two or more worded root verb, such as phrasal verbs 'take off', 'figure out', were not considered while creating the mapping since the meaning is often different that the individual words and the generation of verb phrases from these root verbs is more tricky. Such constructions, where one word verb may translate to multiple words, occurred for only 3% of the verbs in the test data and hence could be ignored without any significant loss in improvement. ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Root verb translation pairs",
                "sec_num": "3.3"
            },
            {
                "text": "Given the root verb mapping and the classes to which these source and target root verb belong to, we create a 'source class' to 'target class' mapping, or a 'verb-pair class', by replacing the root verbs with their corresponding verb classes. This causes each of the verb pair to fall under some particular verb-pair class. If there are n classes in the source side and m on the target side, the maximum number of verb-pair classes are N = m*n By picking any one root verb pair from each of these verb-pair classes, we can cover all the possible variations of verb phrase translation pairs. These pairs can then be given for human translation, by creating all possible variations of either the source side or target side and asking humans to translate to the other.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generation of Verb Phrase Dictionary",
                "sec_num": "3.4"
            },
            {
                "text": "Templates for each of the N verb-pair class are created from the manually translated data by segmenting the verb phrase pairs on both sides and replacing the root verb by the '<verb>' tag. An example of such translation pair for English-Hindi is shown was <verb> +ing == <verb> raha tha [Class AH-AE ] was <verb> +ing == <verb> kar raha tha [Class BH-AE ] Picking root verb pairs from each verb-pair class and replacing the <verb< tag with corresponding verbs, these templates are used to create new verb phrases which may not be present in the parallel data to a large extent. A reverse morphological tool or joiner is used to recombine the segmented verb phrases and create a verb phrase dictionary.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generation of Verb Phrase Dictionary",
                "sec_num": "3.4"
            },
            {
                "text": "In our paper, English had one class and Hindi had 3. Thus, only 3 Hindi-English verb pairs needed to be translated, one from each of the verbpair classes AH-AE, BH-AE, and CH-AE. We created different variations of the English verbs, since it had only one class and could be easily generated using manually built rules. Grammar rules contain number (singular/plural), tense and aspect agreement between different auxiliary forms (for example: was, is, were, can, might, could not, wouldn't, etc) and verbs (for example: answer, punishing, cleaned, etc.). A unique Hindi-English verb pair is picked from each of the verb-pair classes obtained earlier and their English verbs are used in generating the English verb forms. For example, \"saaf\" belonged to the \"karna\" cluster, so its English translation, \"clean\" is used for creating verb forms. Gender information is also added to the Verb forms which will be required for the Hindi counterparts. About \u2248 970 verb forms were generated for each of the 3 verbs. Examples of a few Verb forms are given below: 'Not' is included as a part of the extracted verb phrases since it is the most common adverb that occurs within the verb phrases. Other adverbs such as 'now', 'also' have not been dealt with in this paper. These variations, along with the mapping of the English-Hindi root verb was given to the annotators for translation. The subject information within '[]' helps the annotators to decide on the number and gender inflections on the target (Hindi) side. These are removed before using in the machine translation system. The reverse morphology of the generated verb phrases is done using MorphG (Minnen et al., 2000) for English and a simple suffix joiner for hindi.",
                "cite_spans": [
                    {
                        "start": 1648,
                        "end": 1669,
                        "text": "(Minnen et al., 2000)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generation of Verb Phrase Dictionary",
                "sec_num": "3.4"
            },
            {
                "text": "[",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generation of Verb Phrase Dictionary",
                "sec_num": "3.4"
            },
            {
                "text": "Although manual translation is a clean and effective way of generating these verb phrases, a human is still required in the loop to complete the setup. Instead, both side monolingual data can be employed to extract all the variations for each root verb, parallel corpus can be used to get the sourceverb to root-verb pairs, and finally a model can be learnt to align the source verb phrase to target verb phrase using the verb alignments from the hand alignments and machine alignments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Automatic generation",
                "sec_num": "4"
            },
            {
                "text": "Using the technique described in section 3.3, a source to target root verb mapping are obtained.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Verb pairs",
                "sec_num": "4.1"
            },
            {
                "text": "Clustering of verb phrases on source and target side is done as explained in section 3.2 so that each cluster contains different variations of the same root verb form. The phrases within each cluster are segmented on both sides using the techniques described section 3.2 and are generalized by replacing the root verb in the segmented verb phrase by a '<verb>' tag as this will help while aligning the source verb phrase to its corresponding target verb phrase.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clustering",
                "sec_num": "4.2"
            },
            {
                "text": "In order to learn a verb phrase alignment model, we need good quality verb phrase alignments from the parallel corpora. We concentrate on hand aligned data and accurate machine alignments. Machine aligned verb phrases that occurred less than three times were treated as inaccurate. The source side verb phrases are extracted using the scheme similar to one in section 3.1, and by looking at the target words they align to, verb phrase alignments are obtained. The aligned verb phrases are segmented on both the target and the source side using the strategy described in section 3.2 and then normalized by replacing the head word for both the source side verb phrase and the target side verb phrase by a '<verb>' tag. The '<verb>' tagged verb phrases act as templates for verb alignments. Since all the root forms of verb will not occur in the extracted verb alignments, it's necessary to normalize them to be able to learn a general model. This way, if the translation of a particular source verb phrase variation is known, its generalized form can be used to get the trans-lation of a different root verb for the same variation. This is similar to our claim in section 3.4 that translation of one root verb can generate translations for all other verbs belonging to the same class.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Verb Phrase Translations",
                "sec_num": "4.3"
            },
            {
                "text": "A simple word alignment model is used to learn the word translation probabilities. Please note that the suffixes of the verbs are also treated as words since they contain important information about tense, gender and number. We used GIZA++ model 4 to learn P(V si /V tj ), which is the probability of the i th source word/segment aligning to the j th target word/segment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Verb Phrase Translations",
                "sec_num": "4.3"
            },
            {
                "text": "From the root-verb pairs obtained in section 4.1, each verb pair is picked and the best translation for a source verb phrase in source-side verb cluster is searched for in the target verb cluster. If a cluster for the source or target verb does not exist, that pair is ignored. Both the source and the target verb clusters contain the generalized verb phrase of the form '... aux \u22121 <verb> aux 1 aux 2 .. '. First, a perfect match of a source phrase and target verb phrase is searched in the hand aligned and machine aligned verb phrase pairs. If found, that phrase pair is treated as a valid verb phrase pair. If no perfect match is found, word alignment probabilities obtained in previous section are use to get the source to target verb phrase alignments. Any verb phrase alignment pair with score lower than a threshold score of 0.5 is ignored.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Automatic Alignment and Generation",
                "sec_num": "4.4"
            },
            {
                "text": "After obtaining all the valid verb phrase pairs, the tag <verb> is replaced by their corresponding root verbs and as in section 3.4 and the suffixes are joined to the root verb to get the automatically generated verb phrase dictionary which can be used in the MT system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Automatic Alignment and Generation",
                "sec_num": "4.4"
            },
            {
                "text": "In this section, we report our experimental results on English -Hindi language pair. We first report on the coverage ratio, which gives an estimate of number of exact verb phrases covered by the baseline system and our method. In addition, we also report on English to Hindi Machine translation results for phrase based systems.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "5"
            },
            {
                "text": "The data used for clustering and classification on source and target side, the parallel corpora and the test set details are shown in table 3. English Monolingual  6 million  Hindi Monolingual  1.4 million  Test set 1  4000  Test set 2  715  Training Data  280k   Table 3 : Data used for experiments",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 143,
                        "end": 271,
                        "text": "English Monolingual  6 million  Hindi Monolingual  1.4 million  Test set 1  4000  Test set 2  715  Training Data  280k   Table 3",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Discovery of new data",
                "sec_num": "5.1"
            },
            {
                "text": "The Hindi monolingual data was used to collect 4320 Hindi verb clusters belonging to 3 different classes ( section 3.2 ) and the English monolingual yielded 4872 clusters. Many of these clusters were false positives due to the bad quality of verb phrase chunker but were eliminated in the subsequent steps. The parallel data was aligned using a maxent model (Ittycheriah and Roukos, 2005) and gave us 2944 verb-pairs.",
                "cite_spans": [
                    {
                        "start": 358,
                        "end": 388,
                        "text": "(Ittycheriah and Roukos, 2005)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "No of Sentences",
                "sec_num": null
            },
            {
                "text": "For manual generation method, 3 verb pairs were given to annotators for translation, with about 972 different English verb forms in each. The generated dictionary from the manual translations had 2.7 million verb phrases. The automatic method aligned the corresponding clusters of the 2944 verb-pairs and produced about 300k new verb phrases. The considerably lesser size of the automatically created verb phrase dictionary compared to the manual dictionary can be attributed to the fact that the manual dictionary contains variations that are not seen in our monolingual data. We claim that the manually and automatically created verb phrase dictionaries add new data to the system and have a higher chance of finding a matching source verb phrase in a given corpus than our phrase based system. We verify this claim by extracting verb phrases from two test sets and searching for them in:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "No of Sentences",
                "sec_num": null
            },
            {
                "text": "1. Baseline Phrase table 2. Base+Manual generated Dictionary 3. Base+Auto generated Dictionary Figure 2 shows the ratio of the number of verbs phrases found in the three cases to the total number of verbs searched. We call this as coverage ratio. The verb phrases are divided based on their lengths. The plot clearly shows that the coverage increases considerably by the addition of these generated verb phrases which may or may not be seen in the training data, especially as the length of the verb phrase increases. Verb phrases of length 1 are not shown since the coverage was almost same for the 3 settings. Table 3 shows the training data and the two test sets used for evaluation. The bilingual parallel data is split into training data and Test set 1. Test set 2 is a generic test set. All the data (training and test ) used is predominately contains news. We report are results on BLEU (Papineni et al., 2002) .",
                "cite_spans": [
                    {
                        "start": 894,
                        "end": 917,
                        "text": "(Papineni et al., 2002)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 95,
                        "end": 103,
                        "text": "Figure 2",
                        "ref_id": "FIGREF1"
                    },
                    {
                        "start": 612,
                        "end": 619,
                        "text": "Table 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "No of Sentences",
                "sec_num": null
            },
            {
                "text": "The verb phrase pairs were generated as explained in manual generation section and then added to the baseline system as a part of corpus(Base+manVPcorp). To emphasis on the improvement from generated verb phrases, an experiment where only the human translated verb phrases are added to the baseline corpus was also conducted(Base+humanVPcorp). Table 4 shows the results on Moses -a state of the art phrase based system, and on a phrase based system (PBMT) similar to (Tillman et al., 2006) on test sets 1 and 2. Both systems were trained on 280k parallel sentences. On the in-domain data, we had an improvement of 4.8 BLEU points for Moses and 0.9 for PBMT. On the more generic test set, Moses gave a BLEU score improvement of 1.1 whereas the PBMT performance was comparable. One reason for this difference in the BLEU score jumps is the better alignments in the PBMT system, aligned by a maxent model as described in (Ittycheriah and Roukos, 2005) . The PBMT system thus has a higher chances of having a good verb phrase in the baseline system than Moses and hence on adding generated verb phrases, we would see a lesser gain. Since the PBMT system had comparable results on the in-domain data with Moses and performed better on the out of domain (more generic) test set, the remaining experiments have been conducted with the PBMT system.",
                "cite_spans": [
                    {
                        "start": 467,
                        "end": 489,
                        "text": "(Tillman et al., 2006)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 918,
                        "end": 948,
                        "text": "(Ittycheriah and Roukos, 2005)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 344,
                        "end": 351,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Machine translation Results",
                "sec_num": "5.2"
            },
            {
                "text": "PBMT Set 1 Set2 Set 1 Set2 Baseline 13.5 08.6 13.4 16.1 Base+humanVPcorp 14.1 08.6 14.0 16.1 Base+manVPcorp 18.3 9.7 14.3 16.0 Table 4 : BLEU score on test set 1 and 2 for different settings on moses and PBMT Adding the generated phrases as a parallel corpus can alter the translation probabilities of individual words and sub-phrases. This is one of the reasons for no improvement in the bleu score of the PBMT system when the generated verb phrases are added as corpus. A better method would be to add the verb phrases directly to the phrase table. We added the manual dictionary to the PBMT system and the results are tabulated in table 5.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 127,
                        "end": 134,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Moses",
                "sec_num": null
            },
            {
                "text": "Baseline 13.4 16.1 Base+humanVP-PT 14.0 16.1 base+manVP-PT 14.9 16.5 base+autoVP-PT 14.8 16.3 Table 5 : BLEU score for PBMT system after adding verb phrases directly to Phrase Table (PT) Adding the verb phrases directly to the system keeps the rest of the phrases and their scores intact. Only phrases with matching source side phrase need to be re-normalized to adjust the translation probabilities. This would mean that the probability of only the verb phrases we add to the baseline phrase table would be affected while the rest of the translation model would be the same. Table 5 shows that addition of manually generated data to the phrase table(Base+manVP-PT), gives a good improvement of 1.5 points on the in-domain data and 0.4 on the out of domain data. A significant improvement of 1.4 BLEU points is seen even when the automatically generated verb phrases are added (Base+autoVP-PT), which was not seen when these were added as a corpus to the system. Figure 3 shows the variation of BLEU score with change in the corpus size. We should expect that the gain be higher in the case of low corpus size. However, note that the verb-pair list used to generate the verb phrases also changes with the change in corpus size, since decreasing the corpus size would decrease the quality of the overall alignments and the number of verbs seen. Thus, while the verb-pair list using 160k sentences had a total of 2499 verb pairs, the 20k corpus produced only 1347 verb pairs. So, for a smaller corpus size, the number of new verb phrases added to the table would also be lesser. This explains the rather constant gain in BLEU score throughout the graph. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 94,
                        "end": 101,
                        "text": "Table 5",
                        "ref_id": null
                    },
                    {
                        "start": 176,
                        "end": 186,
                        "text": "Table (PT)",
                        "ref_id": null
                    },
                    {
                        "start": 576,
                        "end": 583,
                        "text": "Table 5",
                        "ref_id": null
                    },
                    {
                        "start": 963,
                        "end": 971,
                        "text": "Figure 3",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Set 1 Set2",
                "sec_num": null
            },
            {
                "text": "We showed an improvement of up to 1.5 bleu points on the in domain data set and an improvement of 0.4 bleu on the generic test set. However, there are still some errors in respect to the morphology of the verb phrases, which the Language Model is unable to tackle. These are primarily the long range dependencies which includes determining the gender and number of the subject or object to get the appropriate inflection. Having a dynamic feature based system, which does not require rich morphological resources, and predicts the suffixes and inflections would be able to solve this problem. Also, when a verb has more than one meaning, the contextual information is not captured efficiently in the current method and often produces a more literal translation than the the reference.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "6"
            },
            {
                "text": "Apart from adding the verb phrases to the phrase table, filtering of poor verb phrase pairs from the original phrase table is another approach to consider. The two methods together can give a higher boost to the translation than just one of them. A more language independent method of extraction of verb phrases also needs to be constructed, which does not require building language dependent stemmers and verb phrase chunkers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "6"
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Generating Complex Morphology for Machine Translation",
                "authors": [
                    {
                        "first": "Einat",
                        "middle": [],
                        "last": "Minkov",
                        "suffix": ""
                    },
                    {
                        "first": "Kristina",
                        "middle": [],
                        "last": "Toutanova",
                        "suffix": ""
                    },
                    {
                        "first": "Hisami",
                        "middle": [],
                        "last": "Suzuki",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proc. 45th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "128--135",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Einat Minkov, Kristina Toutanova and Hisami Suzuki. 2007. Generating Complex Morphology for Ma- chine Translation, in Proc. 45th Annual Meeting of the Association for Computational Linguistics, 2007, pp. 128-135.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Applying Morphology Generation Models to Machine Translation",
                "authors": [
                    {
                        "first": "Kristina",
                        "middle": [],
                        "last": "Toutanova",
                        "suffix": ""
                    },
                    {
                        "first": "Hisami",
                        "middle": [],
                        "last": "Suzuki",
                        "suffix": ""
                    },
                    {
                        "first": "Achim",
                        "middle": [],
                        "last": "Ruopp",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proc. 46th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kristina Toutanova, Hisami Suzuki and Achim Ruopp 2008. Applying Morphology Generation Models to Machine Translation, in Proc. 46th Annual Meet- ing of the Association for Computational Linguis- tics, 2008.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Can we translate letters?",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Vilar",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Peter",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [
                            "F"
                        ],
                        "last": "Informatik",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "In Proceedings of Association Computational Linguistics Workshop on SMT",
                "volume": "",
                "issue": "",
                "pages": "33--39",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. Vilar, J. Peter, H. Ney, and L. F. Informatik 2007. Can we translate letters?, In Proceedings of Associ- ation Computational Linguistics Workshop on SMT, pages 33-39, 2007.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Cross linguistic name matching in english and arabic: a \"one to many mapping\" extension of the levenshtein edit distance algorithm",
                "authors": [
                    {
                        "first": "A",
                        "middle": [
                            "T"
                        ],
                        "last": "Freeman",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [
                            "L"
                        ],
                        "last": "Condon",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [
                            "M"
                        ],
                        "last": "Ackerman",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of the main conference on Human Language Technology, Conference of the North American Chapter of the Association of Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "A. T. Freeman, S. L. Condon, and C. M. Ackerman 2006. Cross linguistic name matching in english and arabic: a \"one to many mapping\" extension of the levenshtein edit distance algorithm. , In Pro- ceedings of the main conference on Human Lan- guage Technology, Conference of the North Amer- ican Chapter of the Association of Computational Linguistics.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Four techniques for online handling of out-of-vocabulary words in arabic-english statistical machine translation",
                "authors": [
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Habash",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of Association for Computational Linguistics-08",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "N Habash 2008. Four techniques for online handling of out-of-vocabulary words in arabic-english statis- tical machine translation , In Proceedings of Asso- ciation for Computational Linguistics-08.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Towards the use of word stems and suffixes for statistical machine translation",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Popovic",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of The International Conference on Language Resources and Evaluation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Popovic and H. Ney 2004. Towards the use of word stems and suffixes for statistical machine trans- lation, In Proceedings of The International Confer- ence on Language Resources and Evaluation.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Phrase-based backoff models for machine translation of highly inflected languages",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Yang",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Kirchhoff",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of the European Chapter of the ACL",
                "volume": "",
                "issue": "",
                "pages": "41--48",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Yang and K. Kirchhoff 2006 Phrase-based backoff models for machine translation of highly inflected languages, In Proceedings of the European Chapter of the ACL, pages 41-48, 2006.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Statistical Phrase-Based Translation",
                "authors": [
                    {
                        "first": "Philipp",
                        "middle": [],
                        "last": "Koehn",
                        "suffix": ""
                    },
                    {
                        "first": "Franz",
                        "middle": [
                            "Josef"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "Daniel",
                        "middle": [],
                        "last": "Marcu",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of HLT-NAACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Philipp Koehn, Franz Josef Och, Daniel Marcu. 2003. Statistical Phrase-Based Translation, In Proceed- ings of HLT-NAACL 2003.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Enriching Morphologically Poor Languagesfor Statistical Machine Translation",
                "authors": [
                    {
                        "first": "Eleftherios",
                        "middle": [],
                        "last": "Avramidis",
                        "suffix": ""
                    },
                    {
                        "first": "Philipp",
                        "middle": [],
                        "last": "Koehn",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of ACL-08",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Eleftherios Avramidis, Philipp Koehn 2008. Enrich- ing Morphologically Poor Languagesfor Statistical Machine Translation , In Proceedings of ACL-08, HLT.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "A Lightweight Stemmer for Hindi",
                "authors": [
                    {
                        "first": "Ananthakrishnan",
                        "middle": [],
                        "last": "Ramanathan",
                        "suffix": ""
                    },
                    {
                        "first": "Durgesh",
                        "middle": [],
                        "last": "Rao",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Workshop on Computational Linguistics for South-Asian Languages, EACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ananthakrishnan Ramanathan and Durgesh Rao 2003. A Lightweight Stemmer for Hindi 2003, Workshop on Computational Linguistics for South-Asian Lan- guages, EACL.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Simple Syntactic and Morphological Processing Can Help English-Hindi Statistical Machine Translation",
                "authors": [
                    {
                        "first": "Ananthakrishnan",
                        "middle": [],
                        "last": "Ramanathan",
                        "suffix": ""
                    },
                    {
                        "first": "Pushpak",
                        "middle": [],
                        "last": "Bhattacharyya",
                        "suffix": ""
                    },
                    {
                        "first": "Jayprasad",
                        "middle": [],
                        "last": "Hegde",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Ritesh",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Shah",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Sasikumar",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of International Joint Conference on Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ananthakrishnan Ramanathan, Pushpak Bhat- tacharyya, Jayprasad Hegde, Ritesh M. Shah., Sasikumar M 2007 Simple Syntactic and Mor- phological Processing Can Help English-Hindi Statistical Machine Translation, In Proceedings of International Joint Conference on Natural Language Processing,2007.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Efficient Dynamic Programming Search Algorithms for Phrase-based SMT",
                "authors": [
                    {
                        "first": "Christoph",
                        "middle": [],
                        "last": "Tillman",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of the Workshop CHPSLP at HLT'06",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Christoph Tillman 2006 Efficient Dynamic Program- ming Search Algorithms for Phrase-based SMT, In Proceedings of the Workshop CHPSLP at HLT'06.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Robust, applied morphological generation",
                "authors": [
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Minnen",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Carroll",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Pearce",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 1st International Natural Language Generation Conference",
                "volume": "",
                "issue": "",
                "pages": "201--208",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Minnen, G., J. Carroll and D. Pearce 2000 Robust, ap- plied morphological generation, In Proceedings of the 1st International Natural Language Generation Conference, Mitzpe Ramon, Israel pp 201-208.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "A maximum entropy word aligner for arabic-english machine translation",
                "authors": [
                    {
                        "first": "Abraham",
                        "middle": [],
                        "last": "Ittycheriah",
                        "suffix": ""
                    },
                    {
                        "first": "Salim",
                        "middle": [],
                        "last": "Roukos",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of HLT/EMNLP, HLT-05",
                "volume": "",
                "issue": "",
                "pages": "89--96",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Abraham Ittycheriah and Salim Roukos 2005. A max- imum entropy word aligner for arabic-english ma- chine translation, In Proceedings of HLT/EMNLP, HLT-05, pages 89-96, Stroudsburg, PA, USA. Asso- ciation for Computational Linguistics.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data",
                "authors": [
                    {
                        "first": "John",
                        "middle": [],
                        "last": "Lafferty",
                        "suffix": ""
                    },
                    {
                        "first": "Andrew",
                        "middle": [],
                        "last": "Mccallum",
                        "suffix": ""
                    },
                    {
                        "first": "Fernando",
                        "middle": [],
                        "last": "Pereira",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proc. International Conference on Machine Learning (ICML)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "John Lafferty, Andrew McCallum, and Fernando Pereira 2002. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data , in Proc. International Conference on Machine Learning (ICML), 2001.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "BLEU: a method for automatic evaluation of machine translation",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Papineni",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Roukos",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Ward",
                        "suffix": ""
                    },
                    {
                        "first": "W",
                        "middle": [
                            "J"
                        ],
                        "last": "Zhu",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proc. ACL-2002: 40th Annual meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "311--318",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Papineni, K., Roukos, S., Ward, T., and Zhu, W. J. 2002. BLEU: a method for automatic evaluation of machine translation , in Proc. ACL-2002: 40th An- nual meeting of the Association for Computational Linguistics pp. 311 -318.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "text": "Steps involved in manual generation of verb phrases",
                "type_str": "figure",
                "num": null
            },
            "FIGREF1": {
                "uris": null,
                "text": "Coverage percentage for different settings",
                "type_str": "figure",
                "num": null
            },
            "FIGREF2": {
                "uris": null,
                "text": "Change in BLEU for different corpus sizes",
                "type_str": "figure",
                "num": null
            },
            "TABREF1": {
                "html": null,
                "text": "",
                "type_str": "table",
                "content": "<table><tr><td>: Example of verbs belonging to different</td></tr><tr><td>groups based on their light verb</td></tr></table>",
                "num": null
            },
            "TABREF3": {
                "html": null,
                "text": "Count of verbs belonging to different classes in Hindi clusters now contain exactly the same variations and can be aggregated together easily. These clusters are put in N different 'verb classes' so that all verbs occurring with the same variations are under",
                "type_str": "table",
                "content": "<table/>",
                "num": null
            }
        }
    }
}