File size: 72,000 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
{
    "paper_id": "I05-1011",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:26:35.538541Z"
    },
    "title": "Aligning Needles in a Haystack: Paraphrase Acquisition Across the Web",
    "authors": [
        {
            "first": "Marius",
            "middle": [],
            "last": "Pa\u015fca",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Google Inc",
                "location": {
                    "addrLine": "1600 Amphitheatre Parkway, Mountain View",
                    "postCode": "94043",
                    "region": "California",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "P\u00e9ter",
            "middle": [],
            "last": "Dienes",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Google Inc",
                "location": {
                    "addrLine": "1600 Amphitheatre Parkway, Mountain View",
                    "postCode": "94043",
                    "region": "California",
                    "country": "USA"
                }
            },
            "email": "dienes@google.com"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper presents a lightweight method for unsupervised extraction of paraphrases from arbitrary textual Web documents. The method differs from previous approaches to paraphrase acquisition in that 1) it removes the assumptions on the quality of the input data, by using inherently noisy, unreliable Web documents rather than clean, trustworthy, properly formatted documents; and 2) it does not require any explicit clue indicating which documents are likely to encode parallel paraphrases, as they report on the same events or describe the same stories. Large sets of paraphrases are collected through exhaustive pairwise alignment of small needles, i.e., sentence fragments, across a haystack of Web document sentences. The paper describes experiments on a set of about one billion Web documents, and evaluates the extracted paraphrases in a natural-language Web search application.",
    "pdf_parse": {
        "paper_id": "I05-1011",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper presents a lightweight method for unsupervised extraction of paraphrases from arbitrary textual Web documents. The method differs from previous approaches to paraphrase acquisition in that 1) it removes the assumptions on the quality of the input data, by using inherently noisy, unreliable Web documents rather than clean, trustworthy, properly formatted documents; and 2) it does not require any explicit clue indicating which documents are likely to encode parallel paraphrases, as they report on the same events or describe the same stories. Large sets of paraphrases are collected through exhaustive pairwise alignment of small needles, i.e., sentence fragments, across a haystack of Web document sentences. The paper describes experiments on a set of about one billion Web documents, and evaluates the extracted paraphrases in a natural-language Web search application.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The information captured in textual documents frequently encodes semantically equivalent ideas through different lexicalizations. Indeed, given the generative power of natural language, different people employ different words or phrases to convey the same meaning, depending on factors such as background knowledge, level of expertise, style, verbosity and personal preferences. Two equivalent fragments of text may differ only slightly, as a word or a phrase in one of them is paraphrased in the other, e.g., through a synonym. Yet even small lexical variations represent challenges to any automatic decision on whether two text fragments have the same meaning, or are relevant to each other, since they are no longer lexically identical. Many natural-language intensive applications make such decisions internally. In document summarization, the generated summaries have a higher quality if redundant information has been discarded by detecting text fragments with the same meaning [1] . In information extraction, extraction templates will not be filled consistently whenever there is a mismatch in the trigger word or the applicable extraction pattern [2] . Similarly, a question answering system could incorrectly discard a relevant document passage based on the absence of a question phrase deemed as very important [3] , even if the passage actually contains a legitimate paraphrase.",
                "cite_spans": [
                    {
                        "start": 984,
                        "end": 987,
                        "text": "[1]",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 1156,
                        "end": 1159,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 1322,
                        "end": 1325,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In information retrieval, deciding whether a text fragment (e.g., a document) is relevant to another text fragment (i.e., the query) is crucial to the overall output, rather than merely useful within some internal system module. Indeed, relevant documents or passages may be missed, due to the apparent mismatch between their terms and the paraphrases occurring in the users' queries. The previously proposed solutions to the mismatch problem vary with respect to the source of the data used for enriching the query with alternative terms. In automatic query expansion, the top documents provide additional query terms [4] . An alternative is to attempt to identify the concepts captured in the queries and find semantically similar concepts in external resources, e.g., lexical databases [5, 6] . This paper explores a different direction, namely the unsupervised acquisition of large sets of paraphrases from unstructured text within Web documents, and their exploitation in natural-language Web search.",
                "cite_spans": [
                    {
                        "start": 619,
                        "end": 622,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 789,
                        "end": 792,
                        "text": "[5,",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 793,
                        "end": 795,
                        "text": "6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We present a lightweight method for unsupervised extraction of paraphrases from arbitrary, textual Web documents. The method taps the textual contents provided by millions of anonymous Web document contributors. The remainder of the paper is structured as follows. After a condensed overview of the paraphrase acquisition method and a contrast to previous literature in Section 2, Section 3 presents the method in more detail. Section 4 describes evaluation results when applying the method to textual documents from a Web repository snapshot of the Google search engine.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The proposed acquisition method collects large sets of word and phrase-level paraphrases via exhaustive pairwise alignment of small needles, i.e., sentence fragments, across a haystack of Web document sentences. The acquisition of paraphrases is a side-effect of the alignment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method at a Glance",
                "sec_num": "2"
            },
            {
                "text": "In the example in Figure 1 , if two sentence fragments have common word sequences at both extremities, then the variable word sequences in the middle are potential paraphrases of each other. A significant advantage of this extraction mechanism is that it can acquire paraphrases from sentences whose information content overlaps only partially, as long as the fragments align. Indeed, the source sentences of the paraphrase (withdrew from, pulled out of), as well as of (took effect, came into force), are arguably quite different overall in Figure 1 . Moreover, the sentences are part of documents whose content intersection is very small.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 18,
                        "end": 26,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 542,
                        "end": 550,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Method at a Glance",
                "sec_num": "2"
            },
            {
                "text": "In addition to its relative simplicity when compared to more complex, sentence-level paraphrase acquisition [7] , the method introduced in this paper is a departure from previous approaches in several respects. First, the paraphrases are not limited to variations of specialized, domain-specific terms as in [8] , nor are they restricted to a narrow class such as verb paraphrases [9] . Second, as opposed to virtually all previous approaches, the method does not require high-quality, clean, trustworthy, properly-formatted input data. Instead, it uses inherently noisy, unreliable Web documents. The source data in [10] is also a set of Web documents. However, it is based on top search results collected Web repository http://www.rantburg.com/default.asp?D=1/13/2004&C=India\u2212Pakistan After 1989, when Soviet troops withdrew from Afghanistan, the mujahedeen fought a civil war against the Afghan government, which devastated the country, Kabul in particular. from external search engines, and its quality benefits implicitly from the ranking functions of the search engines. Third, the input documents here are not restricted to a particular genre, whereas virtually all other recent approaches are designed for collections of parallel news articles, whether the articles are part of a carefully-compiled collection [11] or aggressively collected from Web news sources [12] . Fourth, the acquisition of paraphrases in this paper does not rely on external clues and attributes that two documents are parallel and must report on the same or very similar events. Comparatively, previous work has explicit access to, and relies strongly on clues such as the same or very similar timestamps being associated to two news article documents [11] , or knowledge that two documents are translations by different people of the same book into the same language [13] .",
                "cite_spans": [
                    {
                        "start": 108,
                        "end": 111,
                        "text": "[7]",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 308,
                        "end": 311,
                        "text": "[8]",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 381,
                        "end": 384,
                        "text": "[9]",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 617,
                        "end": 621,
                        "text": "[10]",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 1318,
                        "end": 1322,
                        "text": "[11]",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 1371,
                        "end": 1375,
                        "text": "[12]",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 1735,
                        "end": 1739,
                        "text": "[11]",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 1851,
                        "end": 1855,
                        "text": "[13]",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method at a Glance",
                "sec_num": "2"
            },
            {
                "text": "The use of the Web as input data source strongly impacts the design of the method, since the average Web document is much noisier and less reliable than documents in standard textual collections. Furthermore, the separation of useful textual information from other items within the document is trivial in standard collections. In contrast, Web documents contain extraneous html information, formatting errors, intra-and inter-document inconsistencies, spam and other adversarial information, and in general they lack any assumptions regarding a common document structure. Consequently, the acquisition of paraphrases must be robust, handle Web documents with only minimal linguistic processing, avoid expensive operations, and scale to billions of sentences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Mining the Web for Paraphrases",
                "sec_num": "3"
            },
            {
                "text": "As a pre-requisite to the actual acquisition of paraphrases, the Web documents are converted from raw string representations into more meaningful linguistic units. After filtering out html tags, the documents are tokenized, split into sentences and part-of-speech tagged with the TnT tagger [14] . Many of the candidate sentences are in fact random noise caused by the inconsistent structure (or complete lack thereof) of Web documents, among other factors. To improve the quality of the data, sentences are retained for further processing only if they satisfy the following lightweight sanity checks: 1) they are reasonably sized: sentences containing less than 5 words or more than 30 words are discarded; 2) they contain at least one verb that is neither a gerund nor a modal verb; 3) they contain at least one non-verbal word starting in lower-case; 4) none of the words is longer than 30 characters; and 5) less than half of the words are numbers.",
                "cite_spans": [
                    {
                        "start": 291,
                        "end": 295,
                        "text": "[14]",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Document Pre-processing",
                "sec_num": "3.1"
            },
            {
                "text": "Since the experiments use a collection of English documents, these checks are geared towards English.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Document Pre-processing",
                "sec_num": "3.1"
            },
            {
                "text": "At Web scale, the number of sentences that pass the fairly aggressive sanity checks during document pre-processing is still extremely large, easily exceeding one billion. Any brute-force alignment of all pairs of document sentences is therefore unfeasible. Instead, the acquisition of paraphrases operates at the level of text fragments (ngrams) as shown in Figure 2 . The extraction algorithm roughly consists of the following three phases:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 358,
                        "end": 366,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Acquisition via Text Fragment Alignment",
                "sec_num": "3.2"
            },
            {
                "text": "-Generate candidate ngrams from all sentences (steps 1 through 5 in Figure 2 ); -Convert each ngram into a ready-to-align pair of a variable fragment (a candidate paraphrase) and a constant textual anchor (steps 6 through 13); -Group the pairs with the same anchors; collect the variable fragments within each group of pairs as potential paraphrases of one another (steps 14 to 20).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 68,
                        "end": 76,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Acquisition via Text Fragment Alignment",
                "sec_num": "3.2"
            },
            {
                "text": "The algorithm starts with the generation of candidate ngrams, by collecting all possible ngrams such that their length varies within pre-defined boundaries. More precisely, an ngram starts and ends in a fixed number of words (L C ); the count of the additional (ngram) words in-between varies within pre-defined limits (M in P and M ax P , respectively).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquisition via Text Fragment Alignment",
                "sec_num": "3.2"
            },
            {
                "text": "The concatenation of the fixed-length left (Cst L ) and right (Cst R ) extremities of the ngram forms a textual anchor for the variable fragment (V ar) in the middle. The variable fragment becomes a potential candidate for a paraphrase: Whenever the anchors of two or more ngrams are the same, their variable fragments are considered to be potential paraphrases of each other, thus implementing a const-var-const type of alignment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquisition via Text Fragment Alignment",
                "sec_num": "3.2"
            },
            {
                "text": "According to the simplified discussion from above, the algorithm in Figure 2 may align two sentence fragments \"decided to read the government report published last month\" and \"decided to read the edition published last month\" to incorrectly produce government report and edition as potential paraphrases of each other. To avoid such alignments, Steps 4 and 12 of the algorithm enrich the anchoring text around each paraphrase candidate, namely by extending the anchors to include additional information from the source sentence. By doing so, the anchors become longer and more specific, and thus closer to expressing the same information content. In turn, this reduces the chances of any two ngrams to align, since ngram alignment requires the complete matching of the corresponding anchors. In other words, the amount of information captured in the anchors is a trade-off between coverage (when anchors are less specific) and accuracy of the acquired paraphrases (when the anchors are more specific). At the low end, less specific anchors include only immediate contextual information. This corresponds to the algorithm in Figure 2 , when nothing is attached to any of the ngrams in Step 4. At the high end, one could collect all the remaining words of the sentence outside the ngram, and attach them to more specific anchors in Step 4. This is equivalent to pairwise alignment of full-length sentences. We explore three different ways of collecting additional anchoring information from the sentences: ing words of the adverbial relative clause in which the variable part of the ngram appears, e.g., \"when Soviet Union troops pulled out of Afghanistan\", or \"which came into force in 2000\" in Figure 1 . The clause must modify a named entity or a date, which is also included in the anchor. Sentences not containing such clauses are rejected. 1 The intuitive motivation in that the entity is related to part of the ngram via the adverbial particle.",
                "cite_spans": [
                    {
                        "start": 1844,
                        "end": 1845,
                        "text": "1",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 68,
                        "end": 76,
                        "text": "Figure 2",
                        "ref_id": null
                    },
                    {
                        "start": 1124,
                        "end": 1132,
                        "text": "Figure 2",
                        "ref_id": null
                    },
                    {
                        "start": 1694,
                        "end": 1702,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Alignment Anchors",
                "sec_num": "3.3"
            },
            {
                "text": "For illustration, consider the earlier example of the sentence S 1 from Section 3.2. With Ngram-Entity, Platte River (preceding entity) and Mexico (following entity) are included in the anchor. In comparison, with Ngram-Relative the additional information combines Platte River (entity) and of Mexico (remainder of relative clause). In this example, the difference between Ngram-Entity and Ngram-Relative happens to be quite small. In general, however, the differences are more significant. Table 1 illustrates paraphrases collected from the Web by only one of the two anchoring mechanisms.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 491,
                        "end": 498,
                        "text": "Table 1",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Alignment Anchors",
                "sec_num": "3.3"
            },
            {
                "text": "To ensure robustness on Web document sentences, simple heuristics rather than complex tools are used to approximate the additional information attached to ngrams in Ngram-Entity and Ngram-Relative. Named entities are approximated by proper nouns, as indicated by part-of-speech tags. Adverbial relative clauses, together with the entities or dates they modify, are detected according to a small set of lexico-syntactic patterns which can be summarized as:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Alignment Anchors",
                "sec_num": "3.3"
            },
            {
                "text": "[Date|Entity] [,|-|(|nil] [Wh] RelClause [,|-|)|.]",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Alignment Anchors",
                "sec_num": "3.3"
            },
            {
                "text": "where Wh is one of who, when, which or where. The patterns are based mainly on wh-words and punctuation. The matching adverbial clause RelClause must satisfy a few other constraints, which aim at avoiding, rather than solving, complex linguistic phenomena. First, personal and possessive pronouns are often references to other entities. Therefore clauses containing such pronouns are discarded as ambiguous. Second, appositives and other similar pieces of information are confusing when detecting the end of the current clause. Consequently, during pattern matching, if the current clause does not contain a verb, the clause is either extended to the right, or discarded upon reaching the end of the sentence.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Alignment Anchors",
                "sec_num": "3.3"
            },
            {
                "text": "The input data for paraphrase acquisition is a collection of 972 million Web documents, from a Web repository snapshot of the Google search engine taken in 2003. All documents are in English. The parameters controlling the length of the ngrams and candidate paraphrases, introduced in Figure 2 , are L C =3, M in P =1 and M ax P =4. 2 The anchors use additional information from the sentences, resulting in separate runs and sets of paraphrases extracted with Ngram-Only, Ngram-Entity and Ngram-Relative respectively. The experiments use a parallel programming model [15] . The extracted paraphrase pairs that co-occur very infrequently (i.e., in less than 5 unique ngram pairs) are discarded.",
                "cite_spans": [
                    {
                        "start": 333,
                        "end": 334,
                        "text": "2",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 567,
                        "end": 571,
                        "text": "[15]",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 285,
                        "end": 293,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "The sanity checks applied in document pre-processing (see Figure 3 shows that the number of acquired paraphrases varies more or less linearly in the size of the input data.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 58,
                        "end": 66,
                        "text": "Figure 3",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Quantitative Results",
                "sec_num": "4.1"
            },
            {
                "text": "The large majority of the paraphrase pairs contain either two single-word phrases (40% for Ngram-Entity, and 49% for Ngram-Relative), or one singleword and one multi-word phrase (22% for Ngram-Entity, and 43% for Ngram-Relative). Table 2 illustrates the top paraphrase pairs with two multi-word phrases, after removal of paraphrases containing only stop words, or upper/lower Table 2 . In contrast, many of the top paraphrases with Ngram-Entity end in a linking word, such as the pair (center of, centre of). Note that every time this pair is extracted, the smaller single-word paraphrase pair that folds the common linking word into the anchor, e.g., (center, centre), is also extracted. Table 2 shows that the extracted paraphrases are not equally useful. The pair (became effective, took effect) is arguably more useful than (one hour, two hours). Table 3 with Ngram-Only in Table 3 are precisely such words. Class (8) contains pairs in which a portion of one of the elements is a synonym or phrasal equivalent of the other element, such as (poliomyelitis globally, polio) and (UNC, UNC-CH), whereas (9) captures what can be thought of as entailment, e.g., (governs, owns) and (holds, won). Finally, the last two classes from Table 3 correspond to incorrect extractions, due to either antonyms like (lost, won) and (your greatest strength, your greatest weakness) in class (10) , or other factors in (11) . The aggregated evaluation results, shown in bold in Table 3 , suggest that Ngram-Only leads to paraphrases of lower quality than those extracted with Ngram-Entity and Ngram-Relative. In particular, the samples from the middle and bottom of the Ngram-Only paraphrases contain a much higher percentage of incorrect pairs. The results also show that, for Ngram-Entity and Ngram-Relative, the quality of paraphrases is similar at different ranks in the paraphrase lists sorted by the number of different ngrams they co-occur in. For instance, the total number of correct pairs has comparable values for the top, middle and bottom pairs. This confirms the usefulness of the heuristics introduced in Section 3.3 to discard irrelevant sentences with Ngram-Entity and Ngram-Relative.",
                "cite_spans": [
                    {
                        "start": 918,
                        "end": 921,
                        "text": "(8)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 1103,
                        "end": 1106,
                        "text": "(9)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 1376,
                        "end": 1380,
                        "text": "(10)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 1403,
                        "end": 1407,
                        "text": "(11)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 230,
                        "end": 237,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    },
                    {
                        "start": 376,
                        "end": 383,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    },
                    {
                        "start": 689,
                        "end": 696,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    },
                    {
                        "start": 851,
                        "end": 858,
                        "text": "Table 3",
                        "ref_id": "TABREF4"
                    },
                    {
                        "start": 878,
                        "end": 885,
                        "text": "Table 3",
                        "ref_id": "TABREF4"
                    },
                    {
                        "start": 1229,
                        "end": 1236,
                        "text": "Table 3",
                        "ref_id": "TABREF4"
                    },
                    {
                        "start": 1462,
                        "end": 1469,
                        "text": "Table 3",
                        "ref_id": "TABREF4"
                    }
                ],
                "eq_spans": [],
                "section": "Quantitative Results",
                "sec_num": "4.1"
            },
            {
                "text": "The usefulness of paraphrases in Web search is assessed via an existing experimental repository of more than 8 million factual nuggets associated with a date. Repositories of factual nuggets are built offline, by matching lightweight, opendomain lexico-semantic patterns on unstructured text. In the repository used in this paper, a factual nugget is a sentence fragment from a Web document, paired with a date extracted from the same document, when the event encoded in the A test set of temporal queries is used to extract direct results (dates) from the repository of factual nuggets, by matching the queries against the sentence fragments, and retrieving the associated dates. The test queries are all queries that start with either When or What year, namely 207 out of the total count of 1893 main-task queries, from the Question Answering track [16] of past editions (1999 through 2002). The metric for measuring the accuracy of the retrieved results is the de-facto scoring metric for fact-seeking queries, that is, the reciprocal rank of the first returned result that is correct (in the gold standard) [16] . If there is no correct result among the top 10 returned, the query receives no credit. Individual scores are aggregated (i.e., summed) over the entire query set.",
                "cite_spans": [
                    {
                        "start": 851,
                        "end": 855,
                        "text": "[16]",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 1111,
                        "end": 1115,
                        "text": "[16]",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Paraphrases in Natural-Language Web Search",
                "sec_num": "4.3"
            },
            {
                "text": "In a series of parallel experiments, all phrases from the test queries are expanded into Boolean disjunctions with their top-ranked paraphrases. Query words with no paraphrase are placed into the expanded queries in their original form. The other query words are expanded only if they are single words, for simplicity. Examples of implicitly-Boolean queries expanded disjunctively, before removal of stop words and wh-words, are: Table 4 illustrates the impact of paraphrases on the accuracy of the dates retrieved from the repository of factual nuggets associated with dates. When compared to non-expanded queries, paraphrases consistently improve the accuracy of the returned dates. Incremental addition of more paraphrases results in more individual queries with a better score than for their non-expanded version, and higher overall scores for the returned dates. The paraphrases extracted with Ngram-Entity produce scores that are higher than those of Ngram-Relative, due mainly to higher coverage. Since the temporal queries represent an external, objective test set, they provide additional evidence regarding the quality of paraphrases in a practical application.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 430,
                        "end": 437,
                        "text": "Table 4",
                        "ref_id": "TABREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Paraphrases in Natural-Language Web Search",
                "sec_num": "4.3"
            },
            {
                "text": "-When did",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Paraphrases in Natural-Language Web Search",
                "sec_num": "4.3"
            },
            {
                "text": "The Web has gradually grown into a noisy, unreliable, yet powerful resource of human knowledge. This knowledge ranges from basic word usage statistics to intricate facts, background knowledge and associated inferences made by humans reading Web documents. This paper describes a method for unsupervised acquisition of lexical knowledge across the Web, by exploiting the numerous textual forms that people use to share similar ideas, or refer to common events. Large sets of paraphrases are collected through pairwise alignment of ngrams occurring within the unstructured text of Web documents. Several mechanisms are explored to cope with the inherent lack of quality of Web content. The quality of the extracted paraphrases improves significantly when the textual anchors used for aligning potential paraphrases attempt to approximate, even at a very coarse level, the presence of additional information within the sentences. In addition to the known role of the extracted paraphrases in natural-language intensive applications, the experiments in this paper illustrate their impact in returning direct results to natural-language queries.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "The final output of the extraction algorithm lacks any distinction among paraphrases that apply to only one of the several senses or part of speech tags that a word or phrase may have. For instance, hearts, center and middle mix the medical and positioning senses of the word heart. Conversely, the extracted paraphrases may capture only one sense of the word, which may not match the sense of the same word in the queries. As an example, in the expansion of one of the test queries, \"Where is the massive North Korean (nuclear|atomic) (complex|real) (located|situated|found)?\", a less-than-optimal paraphrase of complex not only provides a sibling rather than a near synonym, but may incorrectly shift the focus of the search towards the mathematical sense of the word (complex versus real numbers). Aggregated contextual information from the source ngrams could provide a means for selecting only some of the paraphrases, based on the query. As another direction for future work, we plan to revise the need for language-dependent resources (namely, the part of speech tagger) in the current approach, and explore possibilities of minimizing or removing their use for seamless transfer of the approach to other languages.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "R. Dale et al. (Eds.): IJCNLP 2005, LNAI 3651, pp. 119-130, 2005. c Springer-Verlag Berlin Heidelberg 2005",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "By discarding many sentences, Ngram-Relative sacrifices recall in favor of precision.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "No experiments were performed with higher values for MaxP (to collect longer paraphrases), or higher/lower values for LC (to use more/less context for alignment).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Corpus and evaluation measures for multiple document summarization with multiple sources",
                "authors": [
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Hirao",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Fukusima",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Okumura",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Nobata",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Nanba",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the 20th International Conference on Computational Linguistics (COLING-04)",
                "volume": "",
                "issue": "",
                "pages": "535--541",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hirao, T., Fukusima, T., Okumura, M., Nobata, C., Nanba, H.: Corpus and eval- uation measures for multiple document summarization with multiple sources. In: Proceedings of the 20th International Conference on Computational Linguistics (COLING-04), Geneva, Switzerland (2004) 535-541",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Paraphrase acquisition for information extraction",
                "authors": [
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Shinyama",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Sekine",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the 41st Annual Meeting of the Association of Computational Linguistics (ACL-03), 2nd Workshop on Paraphrasing: Paraphrase Acquisition and Applications",
                "volume": "",
                "issue": "",
                "pages": "65--71",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Shinyama, Y., Sekine, S.: Paraphrase acquisition for information extraction. In: Proceedings of the 41st Annual Meeting of the Association of Computational Lin- guistics (ACL-03), 2nd Workshop on Paraphrasing: Paraphrase Acquisition and Applications, Sapporo, Japan (2003) 65-71",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Open-Domain Question Answering from Large Text Collections",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Pa\u015fca",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "CSLI Studies in Computational Linguistics. CSLI Publications, Distributed by the University of",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Pa\u015fca, M.: Open-Domain Question Answering from Large Text Collections. CSLI Studies in Computational Linguistics. CSLI Publications, Distributed by the Uni- versity of Chicago Press, Stanford, California (2003)",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Improving automatic query expansion",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Mitra",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Singhal",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Buckley",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 21st ACM Conference on Research and Development in Information Retrieval (SIGIR-98)",
                "volume": "",
                "issue": "",
                "pages": "206--214",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mitra, M., Singhal, A., Buckley, C.: Improving automatic query expansion. In: Proceedings of the 21st ACM Conference on Research and Development in Infor- mation Retrieval (SIGIR-98), Melbourne, Australia (1998) 206-214",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Information retrieval based on word senses",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Schutze",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Pedersen",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proceedings of the 4th Annual Symposium on Document Analysis and Information Retrieval",
                "volume": "",
                "issue": "",
                "pages": "161--175",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Schutze, H., Pedersen, J.: Information retrieval based on word senses. In: Pro- ceedings of the 4th Annual Symposium on Document Analysis and Information Retrieval. (1995) 161-175",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Lexical query paraphrasing for document retrieval",
                "authors": [
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Zukerman",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Raskutti",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the 19th International Conference on Computational Linguistics (COLING-02)",
                "volume": "",
                "issue": "",
                "pages": "1177--1183",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Zukerman, I., Raskutti, B.: Lexical query paraphrasing for document retrieval. In: Proceedings of the 19th International Conference on Computational Linguistics (COLING-02), Taipei, Taiwan (2002) 1177-1183",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Learning to paraphrase: An unsupervised approach using multiple-sequence alignment",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Barzilay",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the 2003 Human Language Technology Conference (HLT-NAACL-03)",
                "volume": "",
                "issue": "",
                "pages": "16--23",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Barzilay, R., Lee, L.: Learning to paraphrase: An unsupervised approach using multiple-sequence alignment. In: Proceedings of the 2003 Human Language Tech- nology Conference (HLT-NAACL-03), Edmonton, Canada (2003) 16-23",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Expansion of multi-word terms for indexing and retrieval using morphology and syntax",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Jacquemin",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Klavans",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Tzoukermann",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proceedings of the 35th Annual Meeting of the Association of Computational Linguistics (ACL-97)",
                "volume": "",
                "issue": "",
                "pages": "24--31",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jacquemin, C., Klavans, J., Tzoukermann, E.: Expansion of multi-word terms for indexing and retrieval using morphology and syntax. In: Proceedings of the 35th Annual Meeting of the Association of Computational Linguistics (ACL-97), Madrid, Spain (1997) 24-31",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Acquiring Lexical Paraphrases from a Single Corpus",
                "authors": [
                    {
                        "first": "O",
                        "middle": [],
                        "last": "Glickman",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Dagan",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Recent Advances in Natural Language Processing III",
                "volume": "",
                "issue": "",
                "pages": "81--90",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Glickman, O., Dagan, I.: Acquiring Lexical Paraphrases from a Single Corpus. In: Recent Advances in Natural Language Processing III. John Benjamins Publishing, Amsterdam, Netherlands (2004) 81-90",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Using the Web as a linguistic resource for learning reformulations automatically",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Duclaye",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Yvon",
                        "suffix": ""
                    },
                    {
                        "first": "O",
                        "middle": [],
                        "last": "Collin",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the 3rd Conference on Language Resources and Evaluation (LREC-02)",
                "volume": "",
                "issue": "",
                "pages": "390--396",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Duclaye, F., Yvon, F., Collin, O.: Using the Web as a linguistic resource for learning reformulations automatically. In: Proceedings of the 3rd Conference on Language Resources and Evaluation (LREC-02), Las Palmas, Spain (2002) 390-396",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Automatic paraphrase acquisition from news articles",
                "authors": [
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Shinyama",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Sekine",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Sudo",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Grishman",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the Human Language Technology Conference (HLT-02)",
                "volume": "",
                "issue": "",
                "pages": "40--46",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Shinyama, Y., Sekine, S., Sudo, K., Grishman, R.: Automatic paraphrase acqui- sition from news articles. In: Proceedings of the Human Language Technology Conference (HLT-02), San Diego, California (2002) 40-46",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Unsupervised construction of large paraphrase corpora: Exploiting massively parallel news sources",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Dolan",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Quirk",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Brockett",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the 20th International Conference on Computational Linguistics (COLING-04)",
                "volume": "",
                "issue": "",
                "pages": "350--356",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dolan, W., Quirk, C., Brockett, C.: Unsupervised construction of large para- phrase corpora: Exploiting massively parallel news sources. In: Proceedings of the 20th International Conference on Computational Linguistics (COLING-04), Geneva, Switzerland (2004) 350-356",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Extracting paraphrases from a parallel corpus",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Barzilay",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Mckeown",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proceedings of the 39th Annual Meeting of the Association for Computational Linguistics (ACL-01)",
                "volume": "",
                "issue": "",
                "pages": "50--57",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Barzilay, R., McKeown, K.: Extracting paraphrases from a parallel corpus. In: Proceedings of the 39th Annual Meeting of the Association for Computational Linguistics (ACL-01), Toulouse, France (2001) 50-57",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "TnT -a statistical part of speech tagger",
                "authors": [
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Brants",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 6th Conference on Applied Natural Language Processing (ANLP-00)",
                "volume": "",
                "issue": "",
                "pages": "224--231",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Brants, T.: TnT -a statistical part of speech tagger. In: Proceedings of the 6th Conference on Applied Natural Language Processing (ANLP-00), Seattle, Wash- ington (2000) 224-231",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "MapReduce: Simplified data processing on large clusters",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Dean",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Ghemawat",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the 6th Symposium on Operating Systems Design and Implementation (OSID-04)",
                "volume": "",
                "issue": "",
                "pages": "137--150",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dean, J., Ghemawat, S.: MapReduce: Simplified data processing on large clus- ters. In: Proceedings of the 6th Symposium on Operating Systems Design and Implementation (OSID-04), San Francisco, California (2004) 137-150",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Building a question-answering test collection",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Voorhees",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Tice",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 23rd International Conference on Research and Development in Information Retrieval (SIGIR-00)",
                "volume": "",
                "issue": "",
                "pages": "200--207",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Voorhees, E., Tice, D.: Building a question-answering test collection. In: Pro- ceedings of the 23rd International Conference on Research and Development in Information Retrieval (SIGIR-00), Athens, Greece (2000) 200-207",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "num": null,
                "text": "Paraphrase acquisition from unstructured text across the Web",
                "uris": null
            },
            "FIGREF1": {
                "type_str": "figure",
                "num": null,
                "text": "Section 3.1) discard a total of 187 billion candidate sentences from the input documents, with an average of 3 words per sentence. In the case of Ngram-Only, paraphrases are extracted from the remaining 9.5 billion sentences, which have 17 words on average. As explained in Section 3.3, Ngram-Entity and Ngram-Relative apply a set of additional constraints as they search the sentences for more anchoring information. Ngram-Entity discards 72 million additional sentences. In contrast, as many as 9.3 billion sentences are rejected by the constraints encoded in Ngram-Relative.The number of paraphrase pairs extracted from the Web varies with the particular kind of anchoring mechanism. The simplest one, i.e., Ngram-Only, produces 41,763,994 unique pairs that co-occur in at least 5 different ngrams. With Ngram-Relative, the output consists of 13,930 unique pairs. In comparison, Ngram-Entity generates 101,040 unique pairs.",
                "uris": null
            },
            "FIGREF2": {
                "type_str": "figure",
                "num": null,
                "text": "Variation of the number of acquired paraphrase pairs with the input data size",
                "uris": null
            },
            "FIGREF3": {
                "type_str": "figure",
                "num": null,
                "text": "is a side-by-side comparison of the accuracy of the paraphrases with Ngram-Only, Ngram-Entity and Ngram-Relative respectively. The values are the result of manual classification of the top, middle and bottom 100 paraphrase pairs from each run into 11 categories. The first six categories correspond to pairs classified as correct. For instance (Univeristy, University) is classified in class (1); (Treasury, treasury) in (2); (is, are) in (3); (e-mail, email) in (4); and (can, could) in (5). The pairs in class (6) are considered to be the most useful; they include (trip, visit), (condition, status), etc. The next three classes do not contain synonyms but are still useful. The pairs in (7) are siblings rather than direct synonyms; examples are (twice a year, weekly) and (French, welsh). Furthermore, modal verbs such as (may, should), numbers, and prepositions like (up, back) also fall under class (7). Many of the 63 pairs classified as siblings",
                "uris": null
            },
            "TABREF1": {
                "type_str": "table",
                "content": "<table><tr><td>Input:</td><td>V ar For each ngram Cst L flows</td><td>Cst R into the Gulf</td><td>of Mexico.</td></tr></table>",
                "html": null,
                "text": "Ni in {N } {S} set of sentences 7 LN i = length of Ni LC length of constant extremities 8 Cst L| = subseq [0, LC -1] of Ni MinP , MaxP paraphrase length bounds 9 CstR = subseq [LN i LC , LN i -1] of Ni Vars: 10 V ari = subseq [LC , LN i -LC-1] of Ni {N } set of ngrams with attached info 11 Anchori = concat of Cst L| and CstR {P } set of pairs (anchor, candidate) 12 Anchori = concat of Atti and Anchori {R} set of paraphrase pairs with freq info 13 Insert pair (Anchori,V ari) into {P } Output: {R} 14 Sort pairs in {P } based on their anchor Steps: 15 For each {Pi} \u2282 {P } with same anchor 1 {R} = {N } = {P } = empty set; 16 For all item pairs Pi 1 and Pi 2 in {Pi} 2 For each sentence Si in {S} 17 V ari 1 = variable part of pair Pi 1 3 Generate ngrams Nij between length 18 V ari 2 = variable part of pair Pi 2 2 \u00d7 LC + MinP and 2 \u00d7 LC + MaxP 19 Incr. count of (V ari 1 ,V ari 2 ) in {R} 4 For each Nij , attach addtl. info Attij 20 Incr. count of (V ari 2 ,V ari 1 ) in {R} 5 Insert Nij with Attij into {N } 21 Return {R}",
                "num": null
            },
            "TABREF2": {
                "type_str": "table",
                "content": "<table><tr><td>Only with Ngram-Entity</td><td>Only with Ngram-Relative</td></tr><tr><td>abduction, kidnapping</td><td>abolished, outlawed</td></tr><tr><td>bachelor degree, bachelors degree</td><td>abolished slavery, freed the slaves</td></tr><tr><td>cause, result in</td><td>causes, results in</td></tr><tr><td>indicate, specify</td><td>carries, transmits</td></tr><tr><td>inner product space, vector space</td><td>died from, succumbed to</td></tr><tr><td>kill, murder</td><td>empties into, flows to</td></tr><tr><td>obligations, responsibilities</td><td>funds, pays for</td></tr><tr><td>registered service marks, registered trademarks</td><td>means, stands for</td></tr><tr><td>video poker betting, video poker gambling</td><td>penned, wrote</td></tr><tr><td>x-mas gift, x-mas present</td><td>seized, took over</td></tr></table>",
                "html": null,
                "text": "Examples of paraphrase pairs collected from the Web with one of Ngram-Entity or Ngram-Relative, but not with the other",
                "num": null
            },
            "TABREF3": {
                "type_str": "table",
                "content": "<table><tr><td>co-occurrence</td><td/></tr><tr><td># Ngram-Entity</td><td>Ngram-Relative</td></tr><tr><td>1 DVD Movie, VHS Movie</td><td>became effective, took effect</td></tr><tr><td colspan=\"2\">2 betting is excited, wagering is excited came into force, took effect</td></tr><tr><td>3 betting is, wagering is</td><td>became effective, went into effect</td></tr><tr><td colspan=\"2\">4 betting is excited, gambling is excited became effective, came into force</td></tr><tr><td>5 Annual Meeting of, meeting of</td><td>became effective, came into effect</td></tr><tr><td>6 center of, centre of</td><td>entered into force, took effect</td></tr><tr><td>7 betting is, gambling is</td><td>one hour, two hours</td></tr></table>",
                "html": null,
                "text": "Top ranked multi-word paraphrase pairs in decreasing order of frequency of",
                "num": null
            },
            "TABREF4": {
                "type_str": "table",
                "content": "<table><tr><td/><td colspan=\"9\">Ngram-Only Ngram-Entity Ngram-Relative</td></tr><tr><td>Classification of Pairs</td><td colspan=\"9\">Top Mid Low Top Mid Low Top Mid Low</td></tr><tr><td/><td colspan=\"8\">100 100 100 100 100 100 100 100</td><td>100</td></tr><tr><td>(1) Correct; punct., symbols, spelling</td><td>1</td><td colspan=\"3\">5 11 12</td><td colspan=\"4\">6 20 18 11</td><td>15</td></tr><tr><td>(2) Correct; equal if case-insensitive</td><td>0</td><td>5</td><td colspan=\"2\">0 27</td><td colspan=\"2\">2 11</td><td>9</td><td>2</td><td>14</td></tr><tr><td>(3) Correct; both are stop words</td><td>4</td><td>0</td><td>0</td><td>3</td><td>0</td><td>1</td><td>1</td><td>0</td><td>0</td></tr><tr><td>(4) Correct; hyphenation</td><td>0</td><td>1</td><td colspan=\"3\">4 10 35</td><td>8</td><td colspan=\"2\">2 19</td><td>43</td></tr><tr><td>(5) Correct; morphological variation</td><td>8</td><td colspan=\"2\">1 10</td><td colspan=\"5\">9 10 20 20 15</td><td>6</td></tr><tr><td>(6) Correct; synonyms</td><td>16</td><td colspan=\"2\">8 21</td><td colspan=\"5\">5 32 14 33 23</td><td>6</td></tr><tr><td>Total correct</td><td colspan=\"8\">29 20 46 66 85 74 83 70</td><td>84</td></tr><tr><td>(7) Siblings rather than synonyms</td><td colspan=\"4\">63 29 19 32</td><td colspan=\"2\">8 15</td><td>5</td><td>7</td><td>7</td></tr><tr><td>(8) One side adds an elaboration</td><td>0</td><td>0</td><td>3</td><td>0</td><td>0</td><td>0</td><td>1</td><td>2</td><td>1</td></tr><tr><td>(9) Entailment</td><td>0</td><td>3</td><td>2</td><td>0</td><td>0</td><td>1</td><td>3</td><td>1</td><td>0</td></tr><tr><td>Total siblings</td><td colspan=\"4\">63 32 24 32</td><td colspan=\"2\">8 16</td><td colspan=\"2\">9 10</td><td>8</td></tr><tr><td>(10) Incorrect; antonyms</td><td>6</td><td>0</td><td>2</td><td>0</td><td>1</td><td>4</td><td>4</td><td>3</td><td>4</td></tr><tr><td>(11) Incorrect; other</td><td colspan=\"3\">2 48 28</td><td>2</td><td>6</td><td>6</td><td colspan=\"2\">4 17</td><td>4</td></tr><tr><td>Total incorrect</td><td colspan=\"3\">8 48 30</td><td>2</td><td colspan=\"2\">7 10</td><td colspan=\"2\">8 20</td><td>8</td></tr></table>",
                "html": null,
                "text": "Quality of the acquired paraphrases",
                "num": null
            },
            "TABREF5": {
                "type_str": "table",
                "content": "<table><tr><td>Max. nr. disjunctions</td><td>QH</td><td/><td>QL</td><td/><td>Score</td></tr><tr><td>per expanded phrase</td><td colspan=\"2\">NE NR</td><td colspan=\"2\">NE NR</td><td>NE</td><td>NR</td></tr><tr><td>1 (no paraphrases)</td><td>0</td><td>0</td><td>0</td><td>0</td><td colspan=\"2\">52.70 52.70</td></tr><tr><td>2 (1 paraphrase)</td><td>17</td><td>8</td><td>7</td><td>6</td><td colspan=\"2\">64.50 57.62</td></tr><tr><td>3 (2 paraphrases)</td><td>22</td><td>13</td><td>6</td><td>9</td><td colspan=\"2\">70.38 60.46</td></tr><tr><td>4 (3 paraphrases)</td><td>23</td><td>15</td><td>6</td><td>7</td><td colspan=\"2\">71.42 60.39</td></tr><tr><td>5 (4 paraphrases)</td><td>26</td><td>18</td><td>12</td><td>5</td><td colspan=\"2\">71.73 63.35</td></tr><tr><td colspan=\"7\">sentence fragment occurred according to the text, e.g., 1937, Golden Gate was</td></tr><tr><td colspan=\"4\">built , and 1947, Bell Labs invented the transistor .</td><td/><td/></tr></table>",
                "html": null,
                "text": "Impact of expansion of the test queries (QH/QL=count of queries with higher/lower scores than without expansion, NE=Ngram-Entity, NR=Ngram-Relative)",
                "num": null
            },
            "TABREF6": {
                "type_str": "table",
                "content": "<table/>",
                "html": null,
                "text": "Amtrak (begin | start | began | continue | commence) (operations | operation | activities | Business | operational)? -When was the De Beers (company | Co. | firm | Corporation | group) (founded | established | started | created | co-founded)?",
                "num": null
            }
        }
    }
}