File size: 55,868 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
{
    "paper_id": "2005",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:17:07.128726Z"
    },
    "title": "The MIT-LL/AFRL MT System",
    "authors": [
        {
            "first": "Wade",
            "middle": [],
            "last": "Shen",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Brian",
            "middle": [],
            "last": "Delaney",
            "suffix": "",
            "affiliation": {},
            "email": "bdelaney@ll.mit.edu"
        },
        {
            "first": "Tim",
            "middle": [],
            "last": "Anderson",
            "suffix": "",
            "affiliation": {},
            "email": "timothy.anderson@wpafb.af.mil"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "The MIT-LL/AFRL MT system is a statistical phrase-based translation system that implements many modern SMT training and decoding techniques. Our system was designed with the long term goal of dealing with corrupted ASR input for Speech-to-Speech MT applications. This paper will discuss the architecture of the MIT-LL/AFRL MT system, and experiments with manual and ASR transcription data that were run as part of the IWSLT-2005 Chinese-to-English evaluation campaign. 1",
    "pdf_parse": {
        "paper_id": "2005",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "The MIT-LL/AFRL MT system is a statistical phrase-based translation system that implements many modern SMT training and decoding techniques. Our system was designed with the long term goal of dealing with corrupted ASR input for Speech-to-Speech MT applications. This paper will discuss the architecture of the MIT-LL/AFRL MT system, and experiments with manual and ASR transcription data that were run as part of the IWSLT-2005 Chinese-to-English evaluation campaign. 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "In recent years, the development of statistical methods for machine translation has resulted in high quality translations that can be used in real applications with increasing confidence. Specific advancements include:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "\u2022 Extracting word alignments from parallel corpora [1] [2] \u2022 Learning and modeling the translation of phrases [4] [5] \u2022 Combining and optimizing model parameters [6] [7] [8] \u2022 Decoding and rescoring techniques [9] [10] Our system draws from these advances and implements a number of these techniques including log-linear model combination and minimum error rate training to translate foreign language sentences. We developed our system during preparation for IWSLT-2005 to serve as a platform for future research. Most of the components of our system have been developed in-house in order to facilitate future experimentation.",
                "cite_spans": [
                    {
                        "start": 51,
                        "end": 54,
                        "text": "[1]",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 55,
                        "end": 58,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 110,
                        "end": 113,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 114,
                        "end": 117,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 162,
                        "end": 165,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 166,
                        "end": 169,
                        "text": "[7]",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 170,
                        "end": 173,
                        "text": "[8]",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 210,
                        "end": 213,
                        "text": "[9]",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 214,
                        "end": 218,
                        "text": "[10]",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "In subsequent sections, we will discuss the details translation system including our alignment and language models and methods we've implemented for optimization and decoding. The basic translation training and decoding processes are shown in Figure 1 . We start with a word alignment extracted from a training set using GIZA++. These alignments are expanded and phrases are counted to form the phrase translation model. Language models are then trained from the English side of training set (and possibly with other English texts, if available).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 243,
                        "end": 251,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "Using development bitexts separated from the training set, we then employ a minimum error rate training process to optimize model parameters utilizing a held out development set. These trained parameters and models can then be applied to test data during decoding and rescoring phases of the translation process. ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "The basic phrase-translation model we employ is described in detail [4] and [5] . We use GIZA++ [1] , [3] and [4] to generate alignments between foreign language and English language words/tokens in both directions (f-to-e and e-to-f). An expanded alignment is then computed using heuristics that interpolate between the intersection and union of these bidirectional word alignments as detailed in [4] and [5] . Then phrases are extracted from the expanded alignments to build the phrase model. We introduced two minor modifications to this basic process in our system:",
                "cite_spans": [
                    {
                        "start": 68,
                        "end": 71,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 76,
                        "end": 79,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 96,
                        "end": 99,
                        "text": "[1]",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 102,
                        "end": 105,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 110,
                        "end": 113,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 398,
                        "end": 401,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 406,
                        "end": 409,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Translation Models",
                "sec_num": "2."
            },
            {
                "text": "we first add points that are unaligned in both the source and target language sentences that otherwise fit the standard inclusion criteria. The combination of these two enhancements provided a two point improvement in BLEU score on the IWSLT-2004 development set in the supplied data condition.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Prior to iterative expansion of intersection alignment,",
                "sec_num": "1."
            },
            {
                "text": "Alignment",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Model Training Translation",
                "sec_num": null
            },
            {
                "text": "We extract translation models for both translation directions (i.e. P(f|e) and P(e|f)). In addition to these models, we add lexical weights extracted from the expanded alignment process in both translation directions [5] and a fixed phrasepenalty [11] .",
                "cite_spans": [
                    {
                        "start": 217,
                        "end": 220,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 247,
                        "end": 251,
                        "text": "[11]",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Model Training Translation",
                "sec_num": null
            },
            {
                "text": "For this evaluation we used a simple distortion model that was described by Koehn et al in [5] and shown here:",
                "cite_spans": [
                    {
                        "start": 91,
                        "end": 94,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Distortion Model",
                "sec_num": "3."
            },
            {
                "text": "\u2211 \u2212 + \u2212 = \u2212 i i i D FirstW FinalW f e P ) ) 1 ( exp( ) | ( 1 (1) where 1 \u2212 i",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Distortion Model",
                "sec_num": "3."
            },
            {
                "text": "FinalW is the position of the final word of the previous phrase and i FirstW is position of the first word of the current phrase. Although simple, we found this model to be very effective for the Chinese-to-English task. Additional constraints on distortion can be chosen to limit the phrase reordering during search, however we chose not to limit distortion when decoding on the IWSLT05 test data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Distortion Model",
                "sec_num": "3."
            },
            {
                "text": "We used the SRI language modeling toolkit to build language models [12] . A trigram language model is used during initial decoding. Two additional language models are introduced during rescoring and minimum error rate training: a four-gram language model, and a 5-gram class-based language model. All of these models were trained with modified Knesser-Ney interpolation as suggested by Goodman and Chen in [13] and [14] .",
                "cite_spans": [
                    {
                        "start": 67,
                        "end": 71,
                        "text": "[12]",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 406,
                        "end": 410,
                        "text": "[13]",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 415,
                        "end": 419,
                        "text": "[14]",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Language Models",
                "sec_num": "4."
            },
            {
                "text": "Our system employs minimum error rate training to optimize parameter weights for each of the individual model components that are summarized in table 1. Many different optimization strategies have been proposed for combination of log-linear models (see [6] , [7] , [8] and [15] for further details). The algorithm we used is a variation of the method described by Och in [7] . In particular, we optimize each of these parameters assuming log-linear combination as given by The optimization process attempts to sets parameter weights that minimize the overall error rate. To do this, sentences from a development set are used to sample the error surface defined by the model parameters listed above and the loss function to be minimized. Each sentence is decoded to produce n-best translation output samples and a line search is then performed per sentence/model parameter to minimize the overall error rate. This process yields a set of parameter weights for each model that can be then used for decoding and rescoring. The training is iterative in that n-best lists are created using the new parameters and merged with the existing n-best lists to increase the resolution of our sampling for each iteration. As reported by Och [7] , this iterative procedure converges after 6-8 iterations. ClassLM -Five-gram class-based LM ",
                "cite_spans": [
                    {
                        "start": 253,
                        "end": 256,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 259,
                        "end": 262,
                        "text": "[7]",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 265,
                        "end": 268,
                        "text": "[8]",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 273,
                        "end": 277,
                        "text": "[15]",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 371,
                        "end": 374,
                        "text": "[7]",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 1228,
                        "end": 1231,
                        "text": "[7]",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Error Rate Training",
                "sec_num": "5."
            },
            {
                "text": "\u2211 \u2211 \u2211 \u2200 \u2200 \u2200 = ' )) ' , ( exp( )) , ( exp( ) | (",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Error Rate Training",
                "sec_num": "5."
            },
            {
                "text": "For the IWSLT 2005 results that were submitted, we used Philip Koehn's Pharaoh Decoder [9] . Pharaoh is stack decoder with an A-star search heuristic. We have recreated similar results with our own in-house decoder, which was not ready before the submission deadline.",
                "cite_spans": [
                    {
                        "start": 87,
                        "end": 90,
                        "text": "[9]",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Decoding",
                "sec_num": "6."
            },
            {
                "text": "Our in-house decoder utilizes a Viterbi graph search algorithm, where the graph is built from left to right a word at a time. Each node in the search graph contains a list of backpointers to previous nodes, the probabilities for each translation option, the trigram/bigram context needed for node expansion, and the best path so far. When creating new nodes, the number of nodes can be greatly reduced by using bigrams whenever a particular trigram does not exist in the language model. After all possible phrases are added for each word, both beam pruning and histrogram pruning are used to rid the search graph of unlikely candidate nodes based on the best path.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Decoding",
                "sec_num": "6."
            },
            {
                "text": "This search algorithm offers fast decoding and easy generation of output word lattices by simply traversing the final data structure. In the case of monotone decoding, this search algorithm is capable of real-time decoding one word at a time (as from a speech recognizer).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Decoding",
                "sec_num": "6."
            },
            {
                "text": "In order to include distortion in the search, additional information needs to be kept for each node including a word coverage vector, the distortion probability, and an estimate of the future cost. Nodes are connected to previous nodes only if the intersection of the word coverage vectors is empty. The future cost estimate is used so that all nodes can be pruned together (A-star search). This limits the search space enough so that unconstrained reordering can be used without running out of memory, but there is a possibility that the search will not be able to select a final path that covers all input words. In the case of a search failure, the search is restarted using improved future cost heuristics from the previous pass.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Decoding",
                "sec_num": "6."
            },
            {
                "text": "In this section, we present results from applying the system described above to the IWSLT 2005 evaluation. We present results from both manual and ASR transcription conditions in the supplied data track for Chinese-to-English translation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "7."
            },
            {
                "text": "Because the training data in this track is limited to 20,000 sentence pairs, we employed a number of small modifications to the basic phrase-model extraction procedure to minimize the number of out-of-vocabulary tokens during decoding. These techniques are described in the section below.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "7."
            },
            {
                "text": "For both ASR and manual transcription conditions we did not limit distortion and the four-gram and class-based language models were applied during rescoring.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "7."
            },
            {
                "text": "For efficiency reasons we limited the decoder stack to 350 hypotheses.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "7."
            },
            {
                "text": "Two development sets were provided as part of the IWSLT 2005 evaluation campaign: For minimum error rate training we held out one of devsets 1-4 and tested against another devset (without overlap). Interestingly, devset sentences were generally longer in set 4 and, not surprisingly, scores were generally worse. Devset 5 was used to optimize final parameters for test set decoding. Tables 2 and 3 Table 3 : Devset 3 and 4 Results",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 383,
                        "end": 397,
                        "text": "Tables 2 and 3",
                        "ref_id": "TABREF4"
                    },
                    {
                        "start": 398,
                        "end": 405,
                        "text": "Table 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Dev Set Experiments",
                "sec_num": "7.1."
            },
            {
                "text": "Using devset 2 for development testing, we experimented with a number of different configurations of our basic SMT system. Table 4 shows the effect of different preprocessing options and training parameters on BLEU scores for devset 2. Applying these settings and the optimization weights used for devset 2, we tested against each of the ASR outputs for devset 2. Table 5 shows results from each of ASR engine and their corresponding ASR for both 1- For each of the n-best > 1 conditions we simply decode the entire ASR n-best list and merge results before rescoring. We did not weight ASR acoustic or language models for this experiment, but this remains to be explored in future experiments.",
                "cite_spans": [
                    {
                        "start": 447,
                        "end": 449,
                        "text": "1-",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 123,
                        "end": 130,
                        "text": "Table 4",
                        "ref_id": "TABREF6"
                    },
                    {
                        "start": 364,
                        "end": 371,
                        "text": "Table 5",
                        "ref_id": "TABREF8"
                    }
                ],
                "eq_spans": [],
                "section": "Dev Set Experiments",
                "sec_num": "7.1."
            },
            {
                "text": "We submitted results using multiple optimization parameters from different devsets (devset 5 parameters being primary). These results are shown in Table 6 . As expected, optimizing with devset 5 yielded the best performance. Although our optimization loss function was based on BLEU-4, our system performed reasonably well with other metrics as well (METEOR, PER, NIST). It is also interesting to note that the additional 506 sentences of the CSTAR-03 development set did not provide us much gain over the IWSLT-04 set alone.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 147,
                        "end": 154,
                        "text": "Table 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "IWSLT 2005 Manual Transcription",
                "sec_num": "7.2."
            },
            {
                "text": "Better language model rescoring seems to have made the biggest difference. As we did not fully examine language model training possibilities, it is possible that further improvements in this area may yield even better performance. Table 6 : Test set scores with various optimization settings",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 231,
                        "end": 238,
                        "text": "Table 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "IWSLT 2005 Manual Transcription",
                "sec_num": "7.2."
            },
            {
                "text": "Our primary submission for this track used n-best output from the ASR system. As we saw with our devset 2 experiments, n-best ASR output gives a gain of 2-3 BLEU points over the 1-best hypothesis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "IWSLT 2005 ASR Transcription",
                "sec_num": "7.3."
            },
            {
                "text": "NIST BLEU Table 7 : Test set scores with various optimization settings Again in this condition, our system's performance was quite reasonable, especially with respect to non-BLEU metrics. We expect that more efficient use of ASR model parameters (acoustic and language model scores, and perhaps lattice posteriors) we could yield even better performance.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 10,
                        "end": 17,
                        "text": "Table 7",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Configurations N-best Length",
                "sec_num": null
            },
            {
                "text": "From experiments with both the development and test data, it is clear that better language modeling can increase the performance of phrase-based statistical machine translation systems as evidence from [16] and [17] also suggests. It is interesting to note that in the case of Chinese translation, distortion models seems to play a major role and, as such, further work is needed to design models that account for the reordering effects we see in Chinese. In past experiments, this has not necessarily been true, for instance with French to English translation monotone decoding often yields better performance.",
                "cite_spans": [
                    {
                        "start": 202,
                        "end": 206,
                        "text": "[16]",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 211,
                        "end": 215,
                        "text": "[17]",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "8."
            },
            {
                "text": "We are just beginning to explore methods for fusing ASR and MT systems. From an anecdotal evaluation of our ASR MT results by a native speaker, we found that only 2 of 30 sampled sentences contained additional errors (relative to MT output from manual transcription). Both this and objective evaluation results from this evaluation are encouraging, but it is clear that much more work is needed to make the output of Speech MT usable. We expect that joint optimization of ASR model parameters and MT model parameters could yield better results but careful research is needed to integrate ASR and MT parameter appropriately.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "8."
            }
        ],
        "back_matter": [
            {
                "text": "We would like to thank John Luu at AFRL for his error analysis efforts after the evaluation. We would also like to thank Doug Jones for setting up an in-house MT evaluation test suite for benchmarking our development progress. Finally, thanks to the members of Lincoln Lab IST group for leaving us plenty of machines to run our test data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": "9."
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "The mathematics of statistical machine translation: Parameter estimation",
                "authors": [
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Brown",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Della Pietra",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Della Pietra",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Mercer",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computational Linguistics",
                "volume": "19",
                "issue": "2",
                "pages": "263--311",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Brown, P., Della Pietra, V., Della Pietra, S. and Mercer, R. \"The mathematics of statistical machine translation: Parameter estimation\", Computational Linguistics 19(2):263--311. 1993.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "HMM-based word alignment in statistical translation",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Vogel",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Tillmann",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proceedings of the 16th Conference on Computational Linguistics",
                "volume": "2",
                "issue": "",
                "pages": "836--841",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Vogel, S., Ney, H., and Tillmann, C. \"HMM-based word alignment in statistical translation\", In Proceedings of the 16th Conference on Computational Linguistics -Volume 2, pp. 836-841, Copenhagen, Denmark, August, 1996.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Statistical machine translation: Final report",
                "authors": [
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Al-Onaizan",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Curin",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Jahr",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Lafferty",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [
                            "D"
                        ],
                        "last": "Melamed",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Purdy",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [
                            "A"
                        ],
                        "last": "Smith",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Yarowsky",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Proceedings of the Summer Workshop on Language Engineering",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Al-Onaizan, Y., Curin, J., Jahr, M., Knight, K., Lafferty, J., Melamed, I.D., Och, F.J., Purdy, D., Smith, N.A., Yarowsky, D., \"Statistical machine translation: Final report\", In Proceedings of the Summer Workshop on Language Engineering. John Hopkins University Center for Language and Speech Processing, Baltimore, MD 1999.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Improved Statistical Alignment Models",
                "authors": [
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Hermann",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proc. of the 38th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "440--447",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Och, F. J. and Hermann, N. \"Improved Statistical Alignment Models\", In Proc. of the 38th Annual Meeting of the Association for Computational Linguistics, pp. 440-447, Hong Kong, October, 2000.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Statistical Phrase-Based Translation",
                "authors": [
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Koehn",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Marcu",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the Human Language Technology Conference 2003 (HLT-NAACL 2003)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Koehn, P., Och, F. J. and Marcu, D., \"Statistical Phrase- Based Translation\", In Proceedings of the Human Language Technology Conference 2003 (HLT-NAACL 2003), Edmonton, Canada, May 2003.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Discriminative Training and Maximum Entropy Models for Statistical Machine Translation",
                "authors": [
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "ACL 2002: Proc. of the 40th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "295--302",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Och, F. J. and Ney, H., \"Discriminative Training and Maximum Entropy Models for Statistical Machine Translation\". In ACL 2002: Proc. of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 295-302, Philadelphia, PA, July 2002.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Minimum Error Rate Training for Statistical Machine Translation",
                "authors": [
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "ACL 2003: Proc. of the 41st Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Och, F. J., \"Minimum Error Rate Training for Statistical Machine Translation\", In ACL 2003: Proc. of the 41st Annual Meeting of the Association for Computational Linguistics, Japan, Sapporo, July 2003.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Considerations in Minimum Classification Error and Maximum Mutual Information Training for Statistical Machine Translation",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Venugopal",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Vogel",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of the Tenth Conference of the European Association for Machine Translation (EAMT-05)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Venugopal, A. and Vogel S., \"Considerations in Minimum Classification Error and Maximum Mutual Information Training for Statistical Machine Translation\", In Proceedings of the Tenth Conference of the European Association for Machine Translation (EAMT-05), Budapest, Hungary, May 2005.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Pharaoh: A Beam Search Decoder for Phrase-Based Statistical Machine Translation Models",
                "authors": [
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Koehn",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the Association of Machine Translation in the Americas (AMTA-2004)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Koehn, P., \"Pharaoh: A Beam Search Decoder for Phrase-Based Statistical Machine Translation Models\", In Proceedings of the Association of Machine Translation in the Americas (AMTA-2004), Washington, DC, October, 2004.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Generation of Word Graphs in Statistical Machine Translation",
                "authors": [
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Ueffing",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proc. Conference on Empirical Methods for Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "156--163",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ueffing, N., Och, F. J., Ney, H., \"Generation of Word Graphs in Statistical Machine Translation\", In Proc. Conference on Empirical Methods for Natural Language Processing, pp. 156-163, Philadelphia, PA, July 2002.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "The ISI/USC MT System",
                "authors": [
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Thayer",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Ettelaie",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Marcu",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [
                            "S"
                        ],
                        "last": "Munteanu",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "Q",
                        "middle": [],
                        "last": "Tipu",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proc. of the International Workshop on Spoken Language Translation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Thayer, I., Ettelaie, E., Knight, K., Marcu, D., Munteanu D. S., Och F. J., and Tipu, Q., \"The ISI/USC MT System\", In Proc. of the International Workshop on Spoken Language Translation, Kyoto, Japan, 2004.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "SRILM -An Extensible Language Modeling Toolkit",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Stolcke",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the International Conference on Spoken Language Processing",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Stolcke, A., \"SRILM -An Extensible Language Modeling Toolkit\", In Proceedings of the International Conference on Spoken Language Processing, Denver, CO, 2002.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "An Empirical Study of Smoothing Techniques for Language Modeling",
                "authors": [
                    {
                        "first": "S",
                        "middle": [
                            "F"
                        ],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Goodman",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Computer Speech and Language",
                "volume": "13",
                "issue": "",
                "pages": "359--394",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Chen, S. F. and Goodman, J., \"An Empirical Study of Smoothing Techniques for Language Modeling\", Computer Speech and Language, 13:359-394, October, 1999.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "A Bit of Progress in Language Modeling",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Goodman",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Computer Speech and Language",
                "volume": "",
                "issue": "",
                "pages": "403--434",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Goodman, J., \"A Bit of Progress in Language Modeling\", Computer Speech and Language, pp. 403-434, 2001.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Minimum Error Rate Training of Log-Linear Translation Models",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Cettolo",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Federico",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proc. of the International Workshop on Spoken Language Translation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cettolo, M. and Federico, M., \"Minimum Error Rate Training of Log-Linear Translation Models\", In Proc. of the International Workshop on Spoken Language Translation, Kyoto, Japan, 2004.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Alignment Templates: the RWTH SMT System",
                "authors": [
                    {
                        "first": "O",
                        "middle": [],
                        "last": "Bender",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Zens",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Matusov",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proc. of the International Workshop on Spoken Language Translation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Bender, O., Zens, R., Matusov, E. and Ney, H., \"Alignment Templates: the RWTH SMT System\", In Proc. of the International Workshop on Spoken Language Translation, Kyoto, Japan, 2004.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "The Google MT System",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Och",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Presentation at NIST MT Workshop",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Och, F., \"The Google MT System\", Presentation at NIST MT Workshop, Bethesda, MD, 2005.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "Basic Statistical Translation Architecture",
                "type_str": "figure",
                "uris": null,
                "num": null
            },
            "FIGREF1": {
                "text": "the feature function associated with estimates from each individual translation/language model.",
                "type_str": "figure",
                "uris": null,
                "num": null
            },
            "FIGREF2": {
                "text": "Evaluation data from CSTAR 2003 Evaluation data from IWSLT 2004We used these tests to construct 3 other dev sets for more robust parameter optimization:\u2022 Devset 3: first \u00bd of Devsets 1 and Devset 4: second \u00bd of Devsets 1 and Devset 5: sum of Devset 1 and 2",
                "type_str": "figure",
                "uris": null,
                "num": null
            },
            "TABREF2": {
                "html": null,
                "type_str": "table",
                "num": null,
                "text": "",
                "content": "<table/>"
            },
            "TABREF3": {
                "html": null,
                "type_str": "table",
                "num": null,
                "text": "shows scores from different development run configurations.",
                "content": "<table><tr><td>Dev</td><td>1</td><td>2</td></tr><tr><td>Test</td><td/><td/></tr><tr><td>1</td><td/><td>36.64</td></tr><tr><td>2</td><td>42.00</td><td/></tr></table>"
            },
            "TABREF4": {
                "html": null,
                "type_str": "table",
                "num": null,
                "text": "Devset 1 and 2 Results",
                "content": "<table><tr><td>Dev</td><td>3</td><td>4</td></tr><tr><td>Test</td><td/><td/></tr><tr><td>3</td><td/><td>42.44</td></tr><tr><td>4</td><td>33.84</td><td/></tr></table>"
            },
            "TABREF6": {
                "html": null,
                "type_str": "table",
                "num": null,
                "text": "",
                "content": "<table/>"
            },
            "TABREF8": {
                "html": null,
                "type_str": "table",
                "num": null,
                "text": "Devset 2 results with various ASR transcripts.",
                "content": "<table/>"
            }
        }
    }
}