File size: 51,226 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
{
    "paper_id": "2005",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:24:02.142077Z"
    },
    "title": "The CASIA Phrase-Based Machine Translation System",
    "authors": [
        {
            "first": "Wei",
            "middle": [],
            "last": "Pang",
            "suffix": "",
            "affiliation": {},
            "email": "wpang@hitic.ia.ac.cn"
        },
        {
            "first": "Zhendong",
            "middle": [],
            "last": "Yang",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Zhenbiao",
            "middle": [],
            "last": "Chen",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Wei",
            "middle": [],
            "last": "Wei",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Bo",
            "middle": [],
            "last": "Xu",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Chengqing",
            "middle": [],
            "last": "Zong",
            "suffix": "",
            "affiliation": {},
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "In this paper we propose a phrase-based translation system. In the system, we use phrase translation model instead of word-based model. An improved method of computing phrase translation probability is studied. We translate numeral phrases first by using a standard templates depository. We develop a phrase-based decoder that employs a beam search algorithm. To make the result more reasonable, we apply those words with fertility probability of zero. We improve the previously proposed tracing back algorithm to get the best path. Some experiments concerned are presented.",
    "pdf_parse": {
        "paper_id": "2005",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "In this paper we propose a phrase-based translation system. In the system, we use phrase translation model instead of word-based model. An improved method of computing phrase translation probability is studied. We translate numeral phrases first by using a standard templates depository. We develop a phrase-based decoder that employs a beam search algorithm. To make the result more reasonable, we apply those words with fertility probability of zero. We improve the previously proposed tracing back algorithm to get the best path. Some experiments concerned are presented.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Statistical machine translation is a promising approach for large vocabulary text translation. In the early 90s, IBM developed Candide system. Since then, many statistical machine translation systems were proposed [2] [3] . These systems apply a translation model to capture the relationship between the source and target languages, and use a language model to drive the search process. The primary IBM model was purely word-based. To get more complex structure, better lexical choice and more reliable local reordering, the phrase-based statistical machine translation systems were proposed. Yamada and Knight [4] used phrase translation in a syntax-based translation system; March and Wong [5] introduced a joint-probability model for phrase translation; CMU and IBM also improved their systems with phrase translation capability.",
                "cite_spans": [
                    {
                        "start": 214,
                        "end": 217,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 218,
                        "end": 221,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 611,
                        "end": 614,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 692,
                        "end": 695,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our system applies a phrase-based translation model to capture the corresponding relationship between two languages. We propose a formula to compute the phrase translation probability through word alignment. The phrase-based decoder we developed employs a beam search algorithm, similar to the one in [6] . We applied zero fertility words in the target language. Because the translation quality largely depends on the accuracy of phrase-to-phrase translation pairs extracted from bilingual corpora, we propose a different tracing back algorithm to find the best path. Four methods are studied to extract bilingual phrase pairs. We describe these methods and phrase-based translation model in Section 2. Section 3 explains the method of numeral phrase translation. Section 4 outlines the architecture of the decoder that combines the translation model, distortion model, language model to generate target sentence. In Section 5, we present a series of experiments in which Chinese sentences are translated into English sentences, and analyze the results of these experiments. We make a summary in Section 6.",
                "cite_spans": [
                    {
                        "start": 301,
                        "end": 304,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our system is based on a phrase translation model, which is different from the original IBM model. The phrase we mention here is composed of a series of words that perhaps possess no syntax or semantic meanings. In addition, a word can also be treated as a phrase, so the word-based model is included in the phrase-based model.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Phrase Translation Model",
                "sec_num": "2"
            },
            {
                "text": "There are different methods of getting phrase pairs from a bilingual corpus .We used four methods as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Phrase Translation Model",
                "sec_num": "2"
            },
            {
                "text": "By using IBM model 4, we get a series of target language words that correspond to the source language words of the bilingual sentence pair. Then we form these words into the target phrase. For with a certain probability, and get the target word's position in . If there are more than one target language word correspond to , then the one with the highest probability is selected. Extracting the words that lie between the minimum and maximum position, we get the correspondent target phrase = . This method is rather simple, but the length of target phrase and source phrase may differ greatly. So we can set a threshold of length or translation probability to make the result more reasonable. In training corpus, each sentence pair (F,E) is represented as a two-dimensional matrix R n*m . f is composed of n words (f 1 , f 2 \u2026\u2026f n ), and e is made up of m words (e 1 , e 2 \u2026\u2026e m ). To measure the \"goodness\" of translating a source word to a target word, we use the value of Point-wise Mutual Information (MI) between these two words. Thus, we can mark the value (e,f) in the matrix as I(e,f) ,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-1 Extracting directly through IBM Word-Based model",
                "sec_num": null
            },
            {
                "text": "(2-1) The value of P(e), P(f), P(e,f) can be numerated from the training result. With all nodes value computed in the matrix, we can get an MI matrix of a sentence pair.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-1 Extracting directly through IBM Word-Based model",
                "sec_num": null
            },
            {
                "text": "We extract the phrases as the following steps: a. Select a point with the highest value in matrix, and mark it as max(i,j). b. Confine it with an evaluating function (for example, the ratio of two nodes' value in the matrix, I(f x1 ,e y1 )/ I(f x2 ,e y2 )>num). c.Expand the 'max' cell to the largest possible rectangle regions (R start , R end , C start ,C end ) under two constraints: (1) . all the cells in the expanded region accord with the evaluation function ; (2) . all the cells should not be marked. d. The words in this area make up of a phrase pair. Mark all nodes between x-coordinate and y-coordinate in this matrix, then search other max points and corresponding rectangles among the rest unmarked nodes until all nodes in the MI matrix of this sentence pair are marked. [7] ",
                "cite_spans": [
                    {
                        "start": 387,
                        "end": 390,
                        "text": "(1)",
                        "ref_id": null
                    },
                    {
                        "start": 468,
                        "end": 471,
                        "text": "(2)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 786,
                        "end": 789,
                        "text": "[7]",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-1 Extracting directly through IBM Word-Based model",
                "sec_num": null
            },
            {
                "text": "A simple way to extract phrase pairs is using a word alignment model. We use the HMM-based alignment model introduced in [8] . For a source phrase that ranges from position 1 j to 2 j in sentence, we can get the corresponding target phrase's beginning position and ending position to extract the phrase translation. Just like the method described in 2-1, a given factor that prevents the length of the phrase pairs differ greatly is needed.",
                "cite_spans": [
                    {
                        "start": 121,
                        "end": 124,
                        "text": "[8]",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-3 Extracting Phrase Pairs From HMM Word Alignment Model",
                "sec_num": null
            },
            {
                "text": "The Giza++ toolkit can be used to establish word-based alignments. There are some heuristic functions can improve the quality of alignment and extract phrase pair. In [6] , the parallel corpus is aligned bidirectionally, some additional alignment points are added to the intersection of the two alignments. All aligned phrase pairs are connected to be consistent with the word alignment: each word corresponds strictly to another word in a legal phrase pair, not to any word outside the pair [9] .",
                "cite_spans": [
                    {
                        "start": 167,
                        "end": 170,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 492,
                        "end": 495,
                        "text": "[9]",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-4 Extracting phrase pair by Giza++ toolkit",
                "sec_num": null
            },
            {
                "text": "CMU used the phrase translation probability formula based on the IBM1 alignment model:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-5 Phrase Translation Probability",
                "sec_num": null
            },
            {
                "text": "( | ) ( | ) i j i j p c e p c e = \u220f\u2211 (2-2)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-5 Phrase Translation Probability",
                "sec_num": null
            },
            {
                "text": "There is a drawback for this method: If only one word of source phrase has no appropriate corresponding word in target phrase, the phrase translation probability will be small. Since there are many auxiliary words and mood words in Chinese, this issue becomes more serious. To prevent this, we use the word alignment generated by the IBM model 4 to divide the whole phrase pair into several small phrase pair blocks. If one source word aligns to several target words or several source words align to one target word, then they are selected to form a block. Thus, the phrase translation probability formula becomes:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-5 Phrase Translation Probability",
                "sec_num": null
            },
            {
                "text": "1 ( | ) ( ( | )) ik j i k j i p c e p c e n = \u220f \u2211\u2211 (2-3)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-5 Phrase Translation Probability",
                "sec_num": null
            },
            {
                "text": "where i is the sequence number of the small phrase translation blocks, k is the sequence number of the words in the i phrase block, j is the sequence number of the target word in the phrase, and is the total number of words in block i. i n",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2-5 Phrase Translation Probability",
                "sec_num": null
            },
            {
                "text": "We can always find numeral in translation. In Chinese, the express method of numeral is rather simple. While in English, it's more complicated, which makes several possible translation results from Chinese to English. In our system, numeral are picked out for special treatment to reduce mistakes in translation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "We summarized 5 ways of numeral translation: number translation, count translation, ordinal translation, year translation, and rule translation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "Number translation: For phone numbers and room numbers, they are only cardinal numbers. We can just translate them directly from Chinese to English.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "Count translation: For integers, they are entirely constituted by numbers, and they may have digit numeral ,such as \" \u767e \". In Chinese, we count numbers by 4 digits. While in English, we count numbers by 3 digits. So it's inappropriate to translate them directly. We adopted Arabic Numerals as an intermediary in translation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "Ordinal translation: For ordinal numbers, we can just translate them by corresponding English ordinal numbers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "Year translation: Divide it into 2 two-number, then translate accordingly.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "Rule translation: Some numeral are made up of numbers and other words, and numbers only mean some sequence. In translation, there would be no numbers in English, such as Monday, March, and so on. For these numeral phrase extracted from the training materials, we build up a template depository. Each pair of templates include a template of Chinese numeral phrase, a template of English numeral phrase, and a property item of representing the numeral sequence in Chinese-English translation. The variable of template of Chinese numeral phrase is the numeral itself. For each numeral variable, there are a property of its meaning and a property of the translation method. For each Chinese numeral phrase template, there would be exactly one English numeral phrase template corresponding to it.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "When we translate, firstly we need to extract all numeral from the sentence and put them into the identifiable numeral depository. Then replace these numeral with the uniform variables. Secondly, search in the template depository, which stores all identifiable numeral phrase templates. Find out the templates most suitable for this sentence. Next, decide the translation method by the property of each variable, and translate them one by one by using the identifiable numeral depository. Then determine the sequence of each variable in the English template by using the sequence property. Last, compare to the English template, translate those Chinese numeral phrases into English.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "With the experiment in Section 5, the result raises from 0.2882 to 0.3117, increased by 0.0235.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Numeral",
                "sec_num": "3."
            },
            {
                "text": "The decoding process consists of two steps: a, the phrase translations are generated for the input text, which is done before the searching begins. b, the search process takes place, through which phrase translation model, language model, distortion model are applied. Both steps will be described in detail.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "decoding",
                "sec_num": "4"
            },
            {
                "text": "A phrase translation table can be achieved through a bilingual corpus by the methods introduced in Section 2. Given an input text, all the phrase translations concerned can be applied by searching through the translation table, each applicable phrase translation for the source language phrase is a translation option [6] . Each translation option stores some information of the source phrase, the target phrase and phrase translation probability.",
                "cite_spans": [
                    {
                        "start": 318,
                        "end": 321,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4-1 Translation Options",
                "sec_num": null
            },
            {
                "text": "The phrase-based decoder we developed employs a beam search similar to the one used by [6] . Considering the difference of expression habit between Chinese and English, many words must be complemented when translating Chinese sentence into English, such as a, an, the, of\u2026Such word is difficult to extract. Its fertility is zero and corresponds to NULL in IBM Model 4. We call them F-zerowords. So after every new hypothesis expanded, F-zerowords can be added, which means, a NULL is added after the source phrase translated. Since perhaps not all words of the input sentence are necessary to be translated, we select the final hypothesis of the best translation in the last several stacks according to their scores when tracing back. This is different from [6] . Let's describe it in detail.",
                "cite_spans": [
                    {
                        "start": 87,
                        "end": 90,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 758,
                        "end": 761,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4-2 Searching Algorithm",
                "sec_num": null
            },
            {
                "text": "The decoder starts with an initial hypothesis. There are two kinds of initial hypothesis: one is an empty hypothesis where no source phrase are translated and no target phrases are generated, the other is generating F-zerowords and corresponding to a NULL we supposed at the beginning of the input text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4-2 Searching Algorithm",
                "sec_num": null
            },
            {
                "text": "New hypotheses are expanded from the currently existing hypotheses as follows: If the target phrase of the existing hypothesis is F-zeroword, an untranslated phrase and one of it's translation options are selected. If it is not F-zeroword, there are two choices: one is expanding to a hypothesis which is achieved as described, the other is expanding to a hypothesis by selecting one of the F-zerowords as output, and corresponding to a NULL which added into the input text after the source phrase of the existing hypothesis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4-2 Searching Algorithm",
                "sec_num": null
            },
            {
                "text": "The hypotheses are stored in different stacks. Each of them has a sequence number. The odd stack hypotheses whose target phrases are F-zerowords and in which p source words have been translated accumulatively. We recombine search hypotheses as described in [10] , and prune out weak hypotheses based on the probability they incurred so far and a future score estimated as in [6] . All these reduce the number of hypotheses stored in stacks to speed up the decoder.",
                "cite_spans": [
                    {
                        "start": 257,
                        "end": 261,
                        "text": "[10]",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 375,
                        "end": 378,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4-2 Searching Algorithm",
                "sec_num": null
            },
            {
                "text": "The current probability of the new hypothesis is the probability of the original hypothesis multiplied with the translation, distortion and language probability of the added phrasal translation, the probability formula is: i \u2212 \u03bb value here as 1 temporarily. We can also take output sentence length model into account.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "p s",
                "sec_num": "2"
            },
            {
                "text": "( | ) ( | ) ( ) ( , )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "p s",
                "sec_num": "2"
            },
            {
                "text": "The hypotheses are generated continuously until all the words of the input sentence are translated. However, considering there are many auxiliary words and mood words in Chinese, and these words have no corresponding English words, we don't require all words in source language to be translated. Supposing the length of source language sentence is L, we take a ratio 'a' according to experience. Then select the best sentence as the translation result from all candidate sentences longer than L*a.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "p s",
                "sec_num": "2"
            },
            {
                "text": "arg max{ } , The method we used (denoted as back1) is different from that in [6] (denoted as back2). And the experiments show our method has better performance in Section 4.",
                "cite_spans": [
                    {
                        "start": 77,
                        "end": 80,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "p s",
                "sec_num": "2"
            },
            {
                "text": "We carried a number of experiments on Chinese-to-English translation tasks. A 31.6M bilingual corpus is used as training data for comparing different phrase translation extraction methods, investigating the effect of F-zerowords and the trace back method we used . We used a 60.9M bilingual corpus as training data to test the different effect of some maximum numbers of translation options for each source phrase. 1000 sentences of length 5-20 were reserved for testing of all the experiments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S 5 Experiments",
                "sec_num": null
            },
            {
                "text": "First, we compared the performance of the four methods and their combination for phrase translation extraction: extracting phrase pairs directly through IBM Model 4 (EDM), from HMM alignment model (HMM), integrated segmentation and phrase alignment (ISA) and Giza++ toolkit (Giza++). Table  1 shows the results of each method and their combination. All experiments used the decoder we described in Section 3. From table 1, we see that each phrase translation extraction approach gives different phrase pair numbers and translation results. The phrase pairs number from ISA is the smallest, EDM only extracts phrase pairs whose source language phrase is composed of two or three words, but the translation results of EDM and ISA are almost the same, the HMM a little inferior to them. The Giza++ extracts the most phrase pairs of the four methods, the translation result from it is superior to other methods. Combing these methods always leads to a little better result.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 284,
                        "end": 292,
                        "text": "Table  1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Comparison of Different Phrase Translation Extraction Approaches",
                "sec_num": "5.1"
            },
            {
                "text": "We also performed experiments to compare back1 and back2, the results are shown in table2. In the table, M means word-based translation model, +NF0 We can see the result of the word-based system with no F-zerowords and BACK2 is the lowest . When the tracing back method used in [6] is replaced by the method proposed by us, the result rises (increases) 0.0086 from 0.1833 to 0.1919 with no F-zerowords. The result increases more obviously form 0.2372 with back2 to 0.2663 when F-zerowords are added. When extracting phrase by Giza++, the result also goes up owning to using back1. All these show back1 is superior to back2 because some source language words are not necessary to be translated .",
                "cite_spans": [
                    {
                        "start": 278,
                        "end": 281,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Comparison of back2 and back1",
                "sec_num": "5.2"
            },
            {
                "text": "From table 2, when F-zerowords are added through the decoding of word-based system, the result goes up sharply from 0.1919 to 0.2663 with back1, increasing by 0.0744, which denotes F-zerowords play a important role. This is because some words such as art. , prep. are complemented under the drive of language model, distortion model, which makes the output sentence more reasonable. The same conclusion can be drawn when phrased extracted directly. But when we use Giza++ to extract phrase pair, the results almost remain the same when the same trace back method is used, which is because with the phrase number rising, some F-zerowords are extracted in the phrase, and the effect of F-zerowords is minified.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "the role of F-zerowords",
                "sec_num": "5.3"
            },
            {
                "text": "A strategy to limit the search space is reducing the number of translation options for each source phrase, we experiment on a 60.9M corpus, the results are shown in Table 3 . Table 3 , _sortn means selecting n translation options of the highest probability for each source phrase, 100 translation options (_sort100) proved to be sufficient. When translating 1000 sentences of 5-20 words, the result increases from 0.3418 to 0.3452, and the decoding time drops form 126 minutes to 45 minutes. Obviously we achieved fast decoding and better performance.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 165,
                        "end": 172,
                        "text": "Table 3",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 175,
                        "end": 182,
                        "text": "Table 3",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "the number of translation options for each source phrase",
                "sec_num": "5.4"
            },
            {
                "text": "The test result of C_star in 2005 is shown in Table 4 . ASR is the result after speech recognise. We just selected the first of 20 candidates of speech recognise result to translate. Manual transcript is the result of directly text translation. Because we need to translate numeral phrase first, we didn't use the result of given document. We combine them, seperate and mark them, then handle the result. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 46,
                        "end": 53,
                        "text": "Table 4",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "C_star Test Result",
                "sec_num": "5.5"
            },
            {
                "text": "In summary, this paper presents a phrase-based statistical machine translation system including methods to extract phrase translations from a bilingual corpus, the phrase translation model, along with the decoding framework. Our experiments show that phrase-based translation gets much better performance than traditional word-based methods. The F-zerowords usually play an important role in decoding, and the tracing back method we used is superior to that used in [6] . Selecting a certain number of top-high-probability translation options for each source phrase may lead to fast decoding speed and high quality.",
                "cite_spans": [
                    {
                        "start": 466,
                        "end": 469,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            },
            {
                "text": "Although we apply four methods to extract phrase pairs, for some source language phrase, the better translation option's probability is not ensured to be higher than that of bad ones. We plan to do some studies about processing the phrase pairs extracted and computing the phrase translation's probability.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Fast Decoding for Statistical Machine Translation",
                "authors": [
                    {
                        "first": "Yeyi",
                        "middle": [],
                        "last": "Wang",
                        "suffix": ""
                    },
                    {
                        "first": "Alex",
                        "middle": [],
                        "last": "Waibel",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proc. ICSLP 98",
                "volume": "6",
                "issue": "",
                "pages": "2775--2778",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yeyi Wang and Alex Waibel. Fast Decoding for Statistical Machine Translation. Proc. ICSLP 98, Vol. 6,pp.2775-2778,1998",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Improved Statistical Alignment Model. Proceeding of ACL-00",
                "authors": [
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "440--447",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. J. Och and H. Ney. Improved Statistical Alignment Model. Proceeding of ACL-00,PP. 440-447,2000.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "A Syntax-based Statistical Translation Model",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Yamada",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proc. of the 39 th Annual Meeting of ACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yamada, K. and Knight. A Syntax-based Statistical Translation Model. In Proc. of the 39 th Annual Meeting of ACL, 2001",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "A Phrased-Based, Joint Probability Model for Statistical Machine Translation",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "March",
                        "suffix": ""
                    },
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Wong",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "March, D. and Wong W. A Phrased-Based, Joint Probability Model for Statistical Machine Translation. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, EMNLP.2002.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Statistical Phrase-Based Translation",
                "authors": [
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Koehn",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Marcu",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the Joint Conference on Human Language Technologies and the Annual Meeting of the North American Chapter of the Association of Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Koehn, P. ,Och, F. J., and Marcu , D. Statistical Phrase-Based Translation. In Proceedings of the Joint Conference on Human Language Technologies and the Annual Meeting of the North American Chapter of the Association of Computational Linguistics. 2003.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Integrated Phrase Segmentation and Alignment Model for Statistical Machine Translation",
                "authors": [
                    {
                        "first": "Ying",
                        "middle": [],
                        "last": "Zhang",
                        "suffix": ""
                    },
                    {
                        "first": "Stephan",
                        "middle": [],
                        "last": "Vogel",
                        "suffix": ""
                    },
                    {
                        "first": "Alex",
                        "middle": [],
                        "last": "Waibel",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Submitted to Proc. of International Conference on Natural Language Processing and Knowledge Engineering",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ying Zhang, Stephan Vogel and Alex Waibel. Integrated Phrase Segmentation and Alignment Model for Statistical Machine Translation. Submitted to Proc. of International Conference on Natural Language Processing and Knowledge Engineering(NLP-KE), 2003.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "HMM-based Word Alignment in Statistical Translation",
                "authors": [
                    {
                        "first": "Stephan",
                        "middle": [],
                        "last": "Vogel",
                        "suffix": ""
                    },
                    {
                        "first": "Hermann",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    },
                    {
                        "first": "Christoph",
                        "middle": [],
                        "last": "Tillmann",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "COLING'96: The 16 th Int. Conf. On Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "836--841",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Stephan Vogel, Hermann Ney, and Christoph Tillmann . HMM-based Word Alignment in Statistical Translation. in COLING'96: The 16 th Int. Conf. On Computational Linguistics,pp.836-841, 1996.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "improved alignment models for statistical machine translation",
                "authors": [
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Tillmann",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "proc. of the Joint Conf. of Empirical Methods in Natural Language Processing and Very Large Corpora",
                "volume": "",
                "issue": "",
                "pages": "20--28",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Och, F. J., Tillmann, C., and Ney, H. improved alignment models for statistical machine translation. In proc. of the Joint Conf. of Empirical Methods in Natural Language Processing and Very Large Corpora, pages 20-28,1999.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "An efficient search algorithm for statistical machine translation",
                "authors": [
                    {
                        "first": "F",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Ueffi Ng",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Data-Driven MT Workshop",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Och, F. J., Ueffi ng, N., and Ney, H. An efficient search algorithm for statistical machine translation. In Data-Driven MT Workshop. 2001.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "words form the source language phrase. According to IBM model 4, each word of phrase can find its correspondent target language word in"
            },
            "FIGREF2": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "all hypotheses whose target phrases are not F-zerowords and in which p source words have been translated so far. (If the target phrase of the hypothesis is not F-zeroword, it stored in the stack 2p-1, p is the number of source words translated), the even stack contains all"
            },
            "FIGREF3": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "is the distortion model which allows for reordering of the input sentence, it is computed as follows: position of the source phrase that was translated into the th target phrase, and denotes the end position of the source phrase that was translated into the th target phrase. Each model is weighted by a parameter. We take the"
            },
            "FIGREF4": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "is the accumulative probability of the hypothesis"
            },
            "TABREF0": {
                "text": "",
                "num": null,
                "type_str": "table",
                "content": "<table><tr><td/><td>Training</td><td>Size</td><td>of</td><td>Bleu</td></tr><tr><td>Method</td><td>corpus</td><td>phrase</td><td/><td>(4-gram)</td></tr><tr><td/><td>size</td><td>pair</td><td/></tr><tr><td/><td/><td colspan=\"2\">extracted</td></tr><tr><td>EDM</td><td>31.6M</td><td>194802</td><td/><td>0.2683</td></tr><tr><td/><td/><td>pairs</td><td/></tr><tr><td>ISA</td><td>31.6M</td><td>187011</td><td/><td>0.2751</td></tr><tr><td/><td/><td>pairs</td><td/></tr><tr><td>HMM</td><td>31.6M</td><td>278770</td><td/><td>0.2637</td></tr><tr><td/><td/><td>pairs</td><td/></tr><tr><td>Giza++</td><td>31.6M</td><td>695486</td><td/><td>0.2882</td></tr><tr><td/><td/><td>pairs</td><td/></tr><tr><td>Combing</td><td>31.6M</td><td colspan=\"2\">1077049</td><td>0.2887</td></tr><tr><td>methods</td><td/><td>pairs</td><td/></tr><tr><td>above</td><td/><td/><td/></tr></table>",
                "html": null
            },
            "TABREF1": {
                "text": "",
                "num": null,
                "type_str": "table",
                "content": "<table><tr><td>Method</td><td>Training</td><td>Bleu</td></tr><tr><td/><td>corpus</td><td>(4-gram)</td></tr></table>",
                "html": null
            },
            "TABREF2": {
                "text": "",
                "num": null,
                "type_str": "table",
                "content": "<table><tr><td>Methods</td><td>Bleu</td><td>Decoding</td></tr><tr><td/><td>(4-gram)</td><td>time</td></tr><tr><td>G+F0+back1</td><td>0.3418</td><td>2H6M</td></tr><tr><td>G+F0+back1_sort100</td><td>0.3452</td><td>40M</td></tr><tr><td>G+F0+back1_sort150</td><td>0.3446</td><td>54M</td></tr><tr><td>G+F0+back1_sort200</td><td>0.3423</td><td>64M</td></tr><tr><td>G+F0+back1_sort50</td><td>0.3366</td><td>23M</td></tr><tr><td>In</td><td/><td/></tr></table>",
                "html": null
            },
            "TABREF3": {
                "text": "",
                "num": null,
                "type_str": "table",
                "content": "<table><tr><td/><td colspan=\"2\">Output</td><td colspan=\"2\">manual transcrip</td></tr><tr><td>Training</td><td>1.5M</td><td>bilingual</td><td>1.5M</td><td>bilingual</td></tr><tr><td>corpus</td><td>sentences</td><td/><td>sentences</td><td/></tr><tr><td>size</td><td/><td/><td/><td/></tr><tr><td>BLEU</td><td>0.3845</td><td/><td>0.5279</td><td/></tr><tr><td>NIST</td><td>8.0406</td><td/><td>10.2499</td><td/></tr></table>",
                "html": null
            }
        }
    }
}