File size: 66,053 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
{
    "paper_id": "I05-1043",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:26:04.074159Z"
    },
    "title": "Instance-Based Generation for Interactive Restricted Domain Question Answering Systems",
    "authors": [
        {
            "first": "Matthias",
            "middle": [],
            "last": "Denecke",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "NTT Communication Science Laboratories",
                "location": {
                    "addrLine": "2-4 Hikaridai, Seika-Cho",
                    "settlement": "Soraku-gun, Kyoto"
                }
            },
            "email": "denecke@cslab.kecl.ntt.co.jp"
        },
        {
            "first": "Hajime",
            "middle": [],
            "last": "Tsukada",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "NTT Communication Science Laboratories",
                "location": {
                    "addrLine": "2-4 Hikaridai, Seika-Cho",
                    "settlement": "Soraku-gun, Kyoto"
                }
            },
            "email": "tsukada@cslab.kecl.ntt.co.jp"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "One important component of interactive systems is the generation component. While template-based generation is appropriate in many cases (for example, task oriented spoken dialogue systems), interactive question answering systems require a more sophisticated approach. In this paper, we propose and compare two example-based methods for generation of information seeking questions.",
    "pdf_parse": {
        "paper_id": "I05-1043",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "One important component of interactive systems is the generation component. While template-based generation is appropriate in many cases (for example, task oriented spoken dialogue systems), interactive question answering systems require a more sophisticated approach. In this paper, we propose and compare two example-based methods for generation of information seeking questions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Question answering is the task of providing natural language answers to natural language questions using an information retrieval engine. Due to the unrestricted nature of the problem, shallow and statistical methods are paramount.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Spoken dialogue systems address the problem of accessing information from a structured database (such as time table information) or controlling appliances by voice. Due to the fact that the scope of the application defined by the back-end, the domain of the system is well-defined. Therefore, in the presence of vague, ill-defined or misrecognized input from the user, dialogue management, relying on the domain restrictions as given by the application, can interactively request more information from the user until the users' intent has been determined. In this paper, we are interested in generation of information seeking questions in interactive question-answering systems.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We implemented a system that combines features of question answering systems with those of spoken dialogue systems. We integrated the following two features in an interactive restricted domain question answering system: (1) As in question answering systems, the system draws its knowledge from a database of unstructured text. (2) As in spoken dialogue systems, the system can interactively query for more information in the case of vague or ill-defined user queries.",
                "cite_spans": [
                    {
                        "start": 327,
                        "end": 330,
                        "text": "(2)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Our System",
                "sec_num": "1.1"
            },
            {
                "text": "Restricted domain question answering systems can be deployed in interactive problem solving solutions, for example, software trouble shooting. In these scenarios, interactivity becomes a necessity. This is because it is highly unlikely that all facts relevant to retrieving the appropriate response are stated in the query. For example, in the software trouble shooting task described in [5] , a frequent system generated information seeking question is for the version of the software. Therefore, there is a need to inquire additional problem relevant information from the user, depending on the interaction history and the problem to be solved.",
                "cite_spans": [
                    {
                        "start": 388,
                        "end": 391,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Problem Addressed in This Paper",
                "sec_num": "1.2"
            },
            {
                "text": "In this paper, we specifically address the problem of how to generate information seeking questions in the case of ambiguous, vague or ill-defined user questions. We assume that the decision of whether an information seeking question is needed is made outside of the module described here. More formally, the problem we address can be described as follows: Given 1. A representation of the previous interaction history, consisting of user and system utterances, and retrieval results from the IR subsystem, 2. A decision for a information seeking question Produce An information seeking question.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Problem Addressed in This Paper",
                "sec_num": "1.2"
            },
            {
                "text": "Problems of this kind have appeared traditionally in task oriented spoken dialogue systems, where missing information needs to be prompted. However, in the case of spoken dialogue systems, question generation is typically not a substantial problem: the fact that the back-end is well-structured allows for simple template-based generation in many cases. For example, missing values for database queries or remote method invocations can be queried that way. (But see also Oh and Rudnicky [7] or Walker et al [12] for more elaborated approaches to generation for spoken dialogue systems).",
                "cite_spans": [
                    {
                        "start": 487,
                        "end": 490,
                        "text": "[7]",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 507,
                        "end": 511,
                        "text": "[12]",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Problem Addressed in This Paper",
                "sec_num": "1.2"
            },
            {
                "text": "In our case, however, a template-based approach is unrealistic. This is due to the unstructured back-end application. Unlike as spoken dialogue systems, we cannot make assumptions over what kind of questions to ask as this is determined by the result set of articles as returned by the information retrieval engine. Existing interactive question-answering systems (see section 7.1 for a more detailed description) either use canned text on dialogue cards [5] , break down the dialogue representation into frames and then techniques from spoken dialogue systems [8] , or make simplifying assumptions to the extent that generation essentially becomes equivalent to template-based generation.",
                "cite_spans": [
                    {
                        "start": 455,
                        "end": 458,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 561,
                        "end": 564,
                        "text": "[8]",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Problem Addressed in This Paper",
                "sec_num": "1.2"
            },
            {
                "text": "For reasons discussed above, we propose an example-based approach to generation. More specifically, we use an existing dialogue corpus to retrieve appropriate questions and modify in order to fit the situation at hand. We describe two algorithms for instance-based natural language questions generation by first selecting appropriate candidates from the corpus, then modifying the candidates to fit the situation at hand, and finally re-rank the candidates. This is an example of a memory-based learning approach, which in turn is a kind of a case-based reasoning. To the best of our knowledge, this is the first work addressing the problem of example-based generation information seeking questions in the absence of a structured back-end application.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Proposed Solution",
                "sec_num": "1.3"
            },
            {
                "text": "In this section, we review the background in memory-based learning and its application in natural language generation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Instance Based Natural Language Generation",
                "sec_num": "2"
            },
            {
                "text": "Memory-based reasoning (MBR) is often considered a subtype of Case-based reasoning. Case-based reasoning was proposed in the 80's as an alternative to rule-based approaches. Instead of expressing regularities about the domain to be modeled in rules, the primary knowledge source in case-based reasoning is a memory of cases representing episodes of encountered problems. Generating a solution to a given problem consists of retrieving an appropriate case from memory and adapting it to the problem at hand.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory-Based Reasoning",
                "sec_num": "2.1"
            },
            {
                "text": "MBR solves problems by retrieving stored precedents as a starting point for new problem-solving (e.g., [9] ). However, its primary focus is on the retrieval process, and in particular on the use of parallel retrieval schemes to enable retrieval without conventional index selection. One aspect of memory-based systems is to choose a distance that appropriately selects candidate exemplars.",
                "cite_spans": [
                    {
                        "start": 103,
                        "end": 106,
                        "text": "[9]",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory-Based Reasoning",
                "sec_num": "2.1"
            },
            {
                "text": "Memory-based reasoning has been applied to machine translation, parsing, unit selection text-to-speech synthesis, part-of-speech tagging, and others. An overview of memory-based approaches to natural language processing can be found in the introduction to the special issue [2] .",
                "cite_spans": [
                    {
                        "start": 274,
                        "end": 277,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Memory-Based Reasoning",
                "sec_num": "2.1"
            },
            {
                "text": "The most prominent example for statistical generation is Nitrogen [6] . This system has been designed to allows large scale generation while requiring only a minimal knowledge base. An abstract meaning representation is turned into a lattice of surface sentences using a simple keyword based grammar. Using statistical information acquired from a corpus, the sentences in the lattices are re-ranked to determine the optimal surface string.",
                "cite_spans": [
                    {
                        "start": 66,
                        "end": 69,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical and Instance-Based Generation",
                "sec_num": "2.2"
            },
            {
                "text": "More recently, example-based natural language generation using a corpus was proposed [11] . It is assumed in this work that content determination has already taken place and the input has been broken down to sentence-size pieces. The approach is to use a learned grammar to generate a list of candidates using a traditional chart based generation algorithm. The grammar is learned using statistical methods. During generation, edges that are added to the chart are ranked depending on their distance to the closest instance in the example base. This is where the memory-based approach comes into play. In order to allow for careful generalization in the instance base, the authors propose to add a list of tag (\"slots\") with which the corpus is annotated. Based on this annotated corpus, a semantic grammar is learned. For ranking the edge based on the instances, the authors propose the well-known tf-idf scheme with the difference that those words that are annotated with a semantic tag are replaced by their tag.",
                "cite_spans": [
                    {
                        "start": 85,
                        "end": 89,
                        "text": "[11]",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical and Instance-Based Generation",
                "sec_num": "2.2"
            },
            {
                "text": "Memory-based learning requires a distance metric in order to identify instances similar to the problem at hand. We propose to use convolution kernels as distance metric. A kernel K can be seen as a generalized form of a distance metric that performs the following calculation",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Kernels",
                "sec_num": "3"
            },
            {
                "text": "K(x, y) = \u03c6(x), \u03c6(y) ,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Kernels",
                "sec_num": "3"
            },
            {
                "text": "where \u03c6 is a non-linear mapping from the input space into some higher dimensional feature space, and \u2022, \u2022 is the inner product in the feature space. Calculating the inner product in some space of higher dimension than the input space is desirable for classifiers because non linearly separable sets can be linearly separated in the higher dimensional feature space. Kernel methods are computationally attractive because the kernel can calculate the mapping and the inner product implicitly rather than explicitly determining the image under \u03c6 of the input.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Kernels",
                "sec_num": "3"
            },
            {
                "text": "While Bag-of-Words techniques can be employed as an approximation to derive feature vectors for classifiers, the loss of structure is not desirable. To address this problem, Haussler [3] proposed Convolution Kernels that are capable of processing structured objects x and y. The structured objects x and y consist of components x 1 , . . . , x m and y 1 , . . . , y n . The convolution kernel of x and y is given by the sum of the products of the components' convolution kernels. This approach can be applied to structured objects of various kinds, and results have been reported for string kernels and tree kernels.",
                "cite_spans": [
                    {
                        "start": 183,
                        "end": 186,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Kernels",
                "sec_num": "3"
            },
            {
                "text": "The idea behind Convolution Kernels is that the kernel of two structures is defined as the sum of the kernels of their parts. Formally, let D be a positive integer and X, X 1 , . . . , X D separable metric spaces. Furthermore, let x and y be two structured objects, and",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Hierarchical Tree Kernel",
                "sec_num": "3.1"
            },
            {
                "text": "x = x 1 , . . . , x D and y = y 1 , . . . , y D their parts. The relation R \u2286 X 1 \u00d7 . . . \u00d7 X D \u00d7 X holds for x and x if x are the parts of x. The inverse R \u22121 maps each structured object onto its parts, i.e. R \u22121 (x) = {x : R(x, x)}.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Hierarchical Tree Kernel",
                "sec_num": "3.1"
            },
            {
                "text": "Then the kernel of x and y is given by the following generalized convolution:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Hierarchical Tree Kernel",
                "sec_num": "3.1"
            },
            {
                "text": "K(x, y) = x\u2208R \u22121 (x) y\u2208R \u22121 (y) D 1 K d (x d , y d )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Hierarchical Tree Kernel",
                "sec_num": "3.1"
            },
            {
                "text": "Informally, the value of a convolution kernel for two objects X and Y is given by the sum of the kernel value for each of the substructures, i.e. their convolution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Hierarchical Tree Kernel",
                "sec_num": "3.1"
            },
            {
                "text": "Suzuki et al [10] proposed Hierarchical Directed Acyclic Graph kernels in which the substructures contain nodes which can contain graphs themselves. The hierarchy of graphs allows extended information from multiple components to be represented and used in classification. In addition, nodes may be annotated with attributes, such as part of speech tags, in order to add information. For example, in a Question-Answering system, components such as Named Entity Extraction, Question Classification, Chunking and so on may each add to the graph.",
                "cite_spans": [
                    {
                        "start": 13,
                        "end": 17,
                        "text": "[10]",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Hierarchical Tree Kernel",
                "sec_num": "3.1"
            },
            {
                "text": "We collected a corpus for our instance based generation system as follows. We set up communications between a wizard and users. The wizard was instructed to \"act like the system\" we intend to build, that is, she was required to interact with the user either by prompting for more information or give the user the information she thought he wanted. Altogether, 20 users participated in the Due to the strict dialogue regiment prescribed in the onset of the data collection, each dialogue consists either of an equal number of user and wizard turns (in case the user ends the dialogue; 14 cases) or one wizard turn more than user turn in case the wizard ends the dialogue (187 cases). Figure 1 shows the first part of a dialogue from the corpus.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 683,
                        "end": 691,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Corpus",
                "sec_num": "4"
            },
            {
                "text": "We now describe our algorithm informally. Given the dialogue history up until now, the last user utterance and the result list as a response to the last user utterance, it is the task of the algorithm to generate an appropriate question to elicit more information from the user. Recall an external dialogue module (not described in this paper) decides whether an information seeking question should be generated (as opposed to, say, turning the information found in the highest ranking article into an answer).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the Algorithm",
                "sec_num": "5.1"
            },
            {
                "text": "Informally, the algorithm works as follows. Initially, the dialogue corpus is preprocessed, including word segmentation and part-of-speech labeling (see section 5.2). In step 1, a ranked list of question candidates is generated (see section 5.3). In step 2, for each of the candidates, a list of change positions is determined (see section 5.4). These indicate the part of the questions that need to be adapted to the current situation. Subsequently, the portions indicated by the change positions are replaced by appropriate constituents. In the step 3, the candidates generated in the previous step are re-ranked (see section 5.5). Reranking takes place by using the same distance as the one in step 1. The highest ranking candidate is then presented to the user.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the Algorithm",
                "sec_num": "5.1"
            },
            {
                "text": "Since Japanese does not provide word segmentation, we need to preprocess the corpus. The corpus consists of a set of dialogues. Each dialogue consists of a set of utterances. Each utterance is annotated for speaker and utterance type. In a dialogue, wizard and user utterance strictly alternate, with no interjections.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Corpus Preprocessing",
                "sec_num": "5.2"
            },
            {
                "text": "Preprocessing is done as follows. Each utterance is stripped of its annotations and presented to the part-of-speech tagger Chasen [1] . Chasen segments the input sentence, reduces inflected words to their base forms and assigns part of speech tags to the base forms. We use the notation cw(u) to designate the content words in utterance, sentence or newspaper article u. For our purposes, content words are adjectives, nouns and verbs, de-inflected to their base form, if necessary. A subsequent processing step assigns semantic labels and named entity classes to the de-inflected word forms.",
                "cite_spans": [
                    {
                        "start": 130,
                        "end": 133,
                        "text": "[1]",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Corpus Preprocessing",
                "sec_num": "5.2"
            },
            {
                "text": "In order to understand the motivation for our approaches to sentence selection, it is necessary to recall the context in which sentences are selected. We would like to find a information seeking question similar to the one we want to generate. The question to be generated is determined by the dialogue context. A natural approach is to choose a bag-of-word distance measure for sentences, define a distance for partial dialogues based on this distance and then choose the dialogue, and a sentence from that dialogue with the lowest distance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "5.3"
            },
            {
                "text": "It turns out, however, that this approach does not work too well. One problem is that in the beginning of a dialogue not many informative words are contained in the utterances, therefore making an informed selection of utterances difficult. The point of this paper is to determine how to overcome this problem. In the following two sections, we propose two approaches. The first uses additional information in the retrieved documents, and the second uses additional syntactic and semantic information when calculating the distance between sentences. Both methods consists of calculating a score for candidate sentences and selecting the highest ranking one. Method 1. Information retrieval over large corpora works well due to the redundancy in the document data, a fact that for example Latent Semantic Indexing exploits. The principal idea of the first method is to use the redundancy in the unrestricted document corpus when scoring sentence candidates. Instead of determining the bag-of-word score between a candidate sentence and the query sentence, we submit the information extracted from the candidate dialogue and the current dialogue to the information retrieval engine, resulting in two n best lists of articles L and L . In order to score the degree of similarity, we determine the the intersection of content words in the retrieved articles. The larger the intersection, the higher the score is to be ranked. In order to take relevance in the result set into account, the scores are discounted by the position of the article in the n best list. More specifically, we calculate the similarity score between the current dialogue and an example dialogue as follows. Let d be the currently developing dialogue consisting of t user utterances and u 1 , . . . u t be the user utterances in the current dialogue up until now. Furthermore, let d be an example dialogue from the corpus and let u 1 , . . . u t be the first t user utterances in the example dialogue. Then:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "5.3"
            },
            {
                "text": "1. Form the union of content words CW = t cw(u t ), CW = t cw(u t ) 2. Submit two queries to the information retrieval engine consisting of CW and CW , respectively and obtain two article n best lists L and L . 3. Calculate the similarity score according to",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "5.3"
            },
            {
                "text": "sim(u t , u t ) = l\u2208L l \u2208L cw(l) \u2229 cw(l ) rank(l) + rank(l )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "5.3"
            },
            {
                "text": "Method 2. In the first method described above, we seek to overcome poor scoring function by adding redundancy from the information retrieval engine. The second method we propose attempts to improve scoring by adding syntactic and semantic structure to the distance metric. More specifically, we directly compare the last user utterance in the current dialogue with the last utterance in the example dialogue, but do so in a more detailed manner. To this end, we determine the similarity score as the output of the hierarchical directed acyclic graph kernel. The similarity is thus defined as sim(u t , u t ) = K(u t , u t ).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "5.3"
            },
            {
                "text": "The adaptation of the highest ranking question to the current dialogue consists of four steps. First, we determine the location(s) where change should take place. Second, we determine constraints for the substituting constituent. Third, we determine a list of substituents for each location of change. Fourth, we replace the phrase(s) at the location(s) of change with the highest ranking element from the corresponding list of substituents.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Adaptation",
                "sec_num": "5.4"
            },
            {
                "text": "Determining Locations of Change. After the example sentences have been retrieved from the corpus, we need to determine where and how the questions need to be adapted to the current dialogue. We determine the locations of change l i by identifying suitable head words of phrase to be exchanged. What are the criteria for suitable head words? Recall that the example sentences are drawn from dialogue similar in topics but in which the content words are exchanged. This limits the part-of-speech of the words to be exchanged to nouns and verbs. Therefore, we construct a list l of nouns and verbs that are part of the retrieved sentence but cannot be found in the current user query. Second, since we are interested in replacing those content words that are specific to the retrieved dialogue with those specific to the current dialogue, we would like to incorporate some measure of informativeness. For that reason, we determine the unigram count for all content words in l. High ranking candidates for change are those words that are specific (i.e., have a low unigram count above a certain threshold).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Adaptation",
                "sec_num": "5.4"
            },
            {
                "text": "The constraints for the substituents are given by the semantic and syntactic information of the phrase at the change location. More specifically, the constraints include the following features: Part of speech, type of named entity, if applicable (the type includes location, state, person name and so on), and semantic class.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Constraints for Substituents.",
                "sec_num": null
            },
            {
                "text": "Determining Substituents. After having determined the change locations and constraints of the substituents, we proceed to determine the substituents.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Constraints for Substituents.",
                "sec_num": null
            },
            {
                "text": "The primary source for substituents are the retrieved newspaper articles. However, since we wish to apply the generation component in a dialogue system, we need to take implicit confirmation into account as well. For this reason, we determine whether a phrase matching the phrase at change location l i occurs before l i previously in the dialogue. If this is the case, the source for the substituent is to be the current dialogue.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Constraints for Substituents.",
                "sec_num": null
            },
            {
                "text": "Given the constraints for a change location determined in the previous step, we add all content words from the highest ranking article to the candidate list for that change location. The score for a content word is given by the number of constraints it fulfills. Ties are broken by unigram counts so that rare words get a higher score due to their informativeness.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Constraints for Substituents.",
                "sec_num": null
            },
            {
                "text": "Applying the change simply consists of removing the phrase whose head word is located at the change location and replacing it with the highest ranking word from the candidate list for that score.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Application of Change.",
                "sec_num": null
            },
            {
                "text": "The previous steps produce a list of sentence candidates. For each of the sentence candidates, we calculate the similarity between the generated sentence with the sentences from a small corpus of desirable sentences. Finally, the sentence with the highest score is presented to the user. Examples of generated sentences are shown in figure 2. The complete algorithm is given in figure 3. Fig. 2 . Generated questions. The substituent in the first question comes from the dialogue context, while the other substituents come from retrieved articles.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 388,
                        "end": 394,
                        "text": "Fig. 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Reranking",
                "sec_num": "5.5"
            },
            {
                "text": "The evaluation was done as follows. We divided the corpus in a example base and a test set. The example base consists of 151 randomly selected dialogues, the test set consists of the remaining 50 dialogues. From each of the test examples, we supplied the initial wizard greeting and the initial user utterance as context for the dialogue. Given this context, each method generated an n best list consisting of 3 information seeking questions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "6"
            },
            {
                "text": "The generated lists were labeled by three annotators according to the following criteria. For each of the three questions in the n best lists, the annotators had to determine a syntactic, a semantic and an overall score. The scores range over the labels poor, acceptable, good. The same score could be assigned more Step 1: Determine sim(ut, u t ) for all user utterances u t from the dialogue corpus Select the w 1 , . . . w k wizard utterances directly following the k highest ranking utterances",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "6"
            },
            {
                "text": "Step 2: for each w i \u2208 {w 1 , . . . , w k }: Step 3: Determine and return highest ranking vi * .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "6"
            },
            {
                "text": "Determine",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "6"
            },
            {
                "text": "than once, for example, in case the sentence selection algorithm produced an unreliable candidate, the overall score for all three sentence candidates could be bad. Furthermore, the evaluators had to re-arrange the 3 best list according to the quality of the generated questions. Finally, the annotators had provide a sentence they consider good. For easy comparison, the symbolic scores poor, acceptable, good translate to 0,0.5 and 1, respectively, in the tables below.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 3. Generation algorithm",
                "sec_num": null
            },
            {
                "text": "The results of the three best syntactic and semantic sentence scoring are shown in table 1 (a) and 1 (b). The inter-annotator agreement is given by their kappa scores for each method separately. Table 1 (c) shows the average of syntactic and semantic scores. The kappa coefficient for the inter-annotator agreement for these scores are 0.68, 0.72, and 0.71, respectively. The syntactic scores rank higher than the semantic scores. This is explained by the fact that the corpus contains syntactically relatively well-formed example sentences, and the replacement operator, in addition to being constrained by part-of-speech as well as semantic information, does not have much opportunity to create a syntactically malformed sentence. Furthermore, method 1 produces sentences that are semantically more accurate than method 2.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 195,
                        "end": 202,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Scoring Results",
                "sec_num": "6.1"
            },
            {
                "text": "In order to determine the quality of the ranking, the annotators had to rerank the generated questions. We determine the distance between two rankings according to the Edit distance. Since the generated lists are only of length 3, there are only three possibilties: the lists are equal (edit distance 0), one element in both lists is the same (edit distance 2), and no element in the lists is the same, (edit distance 3). In order to allow easy comparison with the table above, we award scores of 1, 0.5 and 0 for edit distances of 0, 2 and 3, respectively (i.e., 1 is best, 0 is worst). The annotators were asked to rank the questions according to syntactic criteria alone, semantic criteria alone and all criteria. The results are shown in Table 2 . It can be seen that method 2 ranks the example sentences in a way that is more in line with the choices of the annotators than method 1.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 742,
                        "end": 749,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Ranking Results",
                "sec_num": "6.2"
            },
            {
                "text": "We hypothesize that the differences in the performance of the algorithms is due to the different selection mechanisms. In order to validate this point, we asked the three annotators to each provide one utterance they would rank highest for each system question (called gold standard). Then, we formed a list of 6 sentences u 1 , . . . u 6 (3 generated by the generation algorithm and 3 by the annotators) and compared for each dialogue context the scores sim(u t , u i ) for those 6 sentences where u t is the user utterance from the corresponding test case. We expect a perfect ranking algorithm to value the gold standard as least as high as any sentence from the corpus, and to value the gold standard higher every time the annotators found the generated sentences faulty. It turns out that method 1 places the sentences of the gold standard in the top 3 in 42.3% of the cases while method 2 does this in 59.3% of the cases.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Quality of Ranking",
                "sec_num": "6.3"
            },
            {
                "text": "It can be seen that in general, method 1 produces higher quality sentences while method 2 ranks the sentences better. We interpret this as follows. For sentence selection, the redundancy as provided by the IR engine is helpful, whereas for ranking of example sentences, the additional structure as expressed in the kernel helps.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "7"
            },
            {
                "text": "Kiyota and colleagues [5] describe an interactive restricted domain question answering system where users can interactively retrieve causes for problems with a computers' operating system. Here, the problem of missing structure is solved by providing so-called dialogue cards which provide the knowledge necessary for dialogue processing. A dialogue card contains keywords, a question as asked by the user in natural language (for example \"Windows does not boot\"), an information seeking question to be issued by the system (for example \"Which version of Windows do you use\") and a list of options associated with actions. The actions are executed in function of the users' answer to the question. Dialogue processing takes place by retrieving relevant dialogue cards, where relevance is determined by matching the users' question and keywords with the question and keywords noted on the dialogue card. Compared to our method, this method requires substantially more structure to be represented in the dialogue cards and is therefore more expensive to develop. Furthermore, the absence of any sort of change operators to adapt the question from the dialogue card to the current situation does not provide as much flexibility as our method. On the other hand, the highly structured dialogue cards give the developers more control (at the price of a higher development cost) over the systems behavior than our method and is therefore less risky in situations where failure is expensive.",
                "cite_spans": [
                    {
                        "start": 22,
                        "end": 25,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "7.1"
            },
            {
                "text": "In Small et al [8] , retrieved documents are forced into frame structures. Mismatches or between the fillers of the frame structures or missing fillers trigger information seeking questions to the user. While the generation as it is actually used is not described in the paper, we believe that the frames provide sufficient structure for template-based approaches.",
                "cite_spans": [
                    {
                        "start": 15,
                        "end": 18,
                        "text": "[8]",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "7.1"
            },
            {
                "text": "Hori and coworkers [4] developed an interactive question answering system based on a Japanese newspaper corpus. The purpose of information seeking questions is to prompt the user for missing or disambiguating information. From a generation point of view, strong assumptions are made on the surface form of the generated information seeking question. More specifically, ambiguous keywords are combined with disambiguating options by means of the Japanese particle 'no'.",
                "cite_spans": [
                    {
                        "start": 19,
                        "end": 22,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "7.1"
            },
            {
                "text": "To summarize, the presented approaches attempt in different ways to compensate for the lack of structure in an question answering system. Structure can be provided explicitly as in the case of the dialogue cards, can be introduced during processing as in the case of the frame-based document representations, and can be assumed in the target expression as in the case of the generation templates. In contrast to the described methods, our method does not require an explicit representation of structure. Rather, the structure is given by whatever structure the kernel and the change operators construct during generation. In other words, the structure our approach uses is (1) restricted to the question to be generated and does not apply to the document level, and (2) in tradition with the lazy learning characteristics of memory-based approaches is generated on the fly on an as-needed basis, as opposed to being dictated from the outset at design time.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Summary",
                "sec_num": "7.2"
            }
        ],
        "back_matter": [
            {
                "text": "We acknowledge the help of Takuya Suzuki with the implementation. Jun Suzuki provided the implementation of the HDAG kernel. We would like to thank Hideki Isozaki and our colleagues at NTT CS labs for discussion and encouragement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Extended Models and Tools for High-Performance Part-of-Speech Tagger",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Asahara",
                        "suffix": ""
                    },
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Matsumoto",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of The 18th International Conference on Computational Linguistics, Coling",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Asahara and Y. Matsumoto. 2000. Extended Models and Tools for High- Performance Part-of-Speech Tagger. In Proceedings of The 18th International Conference on Computational Linguistics, Coling 2000, Saarbr\u00fccken, Germany.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Introduction to the Special Issue on Memory-Based Language Processing",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Daelemans",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Journal of Experimental and Theoretical Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "W. Daelemans. 1999. Introduction to the Special Issue on Memory-Based Language Processing. Journal of Experimental and Theoretical Artificial Intelligence.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Convolution kernels on discrete structures",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Haussler",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. Haussler. 1999. Convolution kernels on discrete structures. Technical report, UC Santa Cruz.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Spoken interactive odqa system: Spiqa",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Hori",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Hori",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Tsukada",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Isozaki",
                        "suffix": ""
                    },
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Sasaki",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Maeda",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proc. of the 41th Annual Meeting of Association for Computational Linguistics (ACL-2003)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "C. Hori, T. Hori, H. Tsukada, H. Isozaki, Y. Sasaki, and E. Maeda. 2003. Spoken interactive odqa system: Spiqa. In Proc. of the 41th Annual Meeting of Association for Computational Linguistics (ACL-2003), Sapporo, Japan.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Dialog Navigator\": A Question Answering System based on Large Text Knowledge Base",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Kiyota",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Kurohashi",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Kido",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of The 19th International Conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. Kiyota, S. Kurohashi, and F. Kido. 2002. \"Dialog Navigator\": A Question Answering System based on Large Text Knowledge Base. In Proceedings of The 19th International Conference on Computational Linguistics, Coling 2002,Taipei, Taiwan.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Generation that exploits Corpus-Based Statistical Knowledge",
                "authors": [
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Langkilde",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the Conference of the Association for Computational Linguistics (COLING/ACL)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "I. Langkilde and K. Knight. 1998. Generation that exploits Corpus-Based Statis- tical Knowledge. In Proceedings of the Conference of the Association for Compu- tational Linguistics (COLING/ACL).",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Stochastic Language Generation for Spoken Dialogue Systems",
                "authors": [
                    {
                        "first": "A",
                        "middle": [
                            "H"
                        ],
                        "last": "Oh",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Rudnicky",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "ANLP/NAACL 2000 Workshop on Conversational Systems",
                "volume": "",
                "issue": "",
                "pages": "27--32",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "A.H. Oh and A. Rudnicky. 2000. Stochastic Language Generation for Spoken Dialogue Systems. In ANLP/NAACL 2000 Workshop on Conversational Systems, pages 27-32.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Hitiqa: Towards analytical question answering",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Small",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Strzalkowski",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of The 20th International Conference on Computational Linguistics, Coling",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Small and T. Strzalkowski. 2004. Hitiqa: Towards analytical question answering. In Proceedings of The 20th International Conference on Computational Linguistics, Coling 2004,Geneva Switzerland.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Toward Memory-based Reasoning",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Stanfill",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Waltz",
                        "suffix": ""
                    }
                ],
                "year": 1986,
                "venue": "Communications of the ACM",
                "volume": "29",
                "issue": "",
                "pages": "1213--1228",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "C. Stanfill and D. Waltz. 1986. Toward Memory-based Reasoning. Communications of the ACM, vol. 29, pages 1213-1228.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Hierarchical directed acyclic graph kernel: Methods for structured natural language data",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Suzuki",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Hirao",
                        "suffix": ""
                    },
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Sasaki",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Maeda",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proc. of the 41th Annual Meeting of Association for Computational Linguistics (ACL-2003), Sapporo",
                "volume": "",
                "issue": "",
                "pages": "32--39",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Suzuki, T. Hirao, Y. Sasaki, and E. Maeda. 2003. Hierarchical directed acyclic graph kernel: Methods for structured natural language data. In Proc. of the 41th Annual Meeting of Association for Computational Linguistics (ACL-2003), Sap- poro, Japan, pages 32-39.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Instance-based natural language generation",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Varges",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Mellish",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proceedings of the 2nd Meeting of the North American Chapter of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "1--8",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Varges and C. Mellish. 2001. Instance-based natural language generation. In Proceedings of the 2nd Meeting of the North American Chapter of the Association for Computational Linguistics, pages 1-8.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "SPoT: A Trainable Sentence Planner",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Walker",
                        "suffix": ""
                    },
                    {
                        "first": "O",
                        "middle": [],
                        "last": "Rambow",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Rogati",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proceedings of the North American Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Walker, O. Rambow, and M. Rogati. 2001. SPoT: A Trainable Sentence Planner. In Proceedings of the North American Meeting of the Association for Computational Linguistics.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "Extract from the dialogue corpus data collection effort. Each user contributed to 8 to 15 dialogues. The length of the dialogues varies between 11 and 84 turns, the median being 34 turns. Altogether, the corpus consists of 201 dialogues. The corpus consists of 6785 turns, 3299 of which are user turns and the remaining 3486 are wizard turns.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF1": {
                "text": "Preprocessed dialogue corpus C = {d 1 , . . . , d n } Current dialogue d with user utterances u1, . . . , ut Output: Information seeking question",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "TABREF1": {
                "text": "Average of syntactic and semantic scores",
                "num": null,
                "html": null,
                "content": "<table><tr><td colspan=\"2\">Method 1 Method 2</td><td colspan=\"2\">Method 1 Method 2</td><td colspan=\"2\">Method 1 Method 2</td></tr><tr><td>1 0.796</td><td>0.800</td><td>1 0.573</td><td>0.393</td><td>1 0.685</td><td>0.596</td></tr><tr><td>2 0.657</td><td>0.790</td><td>2 0.393</td><td>0.426</td><td>2 0.525</td><td>0.608</td></tr><tr><td>3 0.787</td><td>0.780</td><td>3 0.416</td><td>0.376</td><td>3 0.602</td><td>0.578</td></tr><tr><td>(a)</td><td/><td>(b)</td><td/><td>(c)</td><td/></tr></table>",
                "type_str": "table"
            },
            "TABREF2": {
                "text": "Comparison of ranking: Syntactic, semantic and overall",
                "num": null,
                "html": null,
                "content": "<table><tr><td colspan=\"2\">Method 1 Method 2</td><td colspan=\"2\">Method 1 Method 2</td><td colspan=\"2\">Method 1 Method 2</td></tr><tr><td>1 0.493</td><td>0.893</td><td>1 0.720</td><td>0.873</td><td>1 0.766</td><td>0.853</td></tr><tr><td>2 0.813</td><td>0.860</td><td>2 0.760</td><td>0.780</td><td>2 0.740</td><td>0.726</td></tr><tr><td>3 0.767</td><td>0.227</td><td>3 0.567</td><td>0.353</td><td>3 0.573</td><td>0.213</td></tr><tr><td>(a)</td><td/><td>(b)</td><td/><td>(c)</td><td/></tr></table>",
                "type_str": "table"
            }
        }
    }
}