|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:24:32.272632Z" |
|
}, |
|
"title": "Encoding Explanatory Knowledge for Zero-shot Science Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "Zili", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Manchester", |
|
"location": { |
|
"country": "United Kingdom" |
|
} |
|
}, |
|
"email": "zili.zhou@manchester.ac.uk" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Valentino", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Manchester", |
|
"location": { |
|
"country": "United Kingdom" |
|
} |
|
}, |
|
"email": "marco.valentino@manchester.ac.uk" |
|
}, |
|
{ |
|
"first": "D\u00f3nal", |
|
"middle": [], |
|
"last": "Landers", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Manchester Institute", |
|
"location": { |
|
"country": "UK, United Kingdom" |
|
} |
|
}, |
|
"email": "donal.landers@digitalecmt.org" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Freitas", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Manchester", |
|
"location": { |
|
"country": "United Kingdom" |
|
} |
|
}, |
|
"email": "andre.freitas@idiap.ch" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes N-XKT (Neural encoding based on eXplanatory Knowledge Transfer), a novel method for the automatic transfer of explanatory knowledge through neural encoding mechanisms. We demonstrate that N-XKT is able to improve accuracy and generalization on science Question Answering (QA). Specifically, by leveraging facts from background explanatory knowledge corpora, the N-XKT model shows a clear improvement on zero-shot QA. Furthermore, we show that N-XKT can be fine-tuned on a target QA dataset, enabling faster convergence and more accurate results. A systematic analysis is conducted to quantitatively analyze the performance of the N-XKT model and the impact of different categories of knowledge on the zero-shot generalization task.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes N-XKT (Neural encoding based on eXplanatory Knowledge Transfer), a novel method for the automatic transfer of explanatory knowledge through neural encoding mechanisms. We demonstrate that N-XKT is able to improve accuracy and generalization on science Question Answering (QA). Specifically, by leveraging facts from background explanatory knowledge corpora, the N-XKT model shows a clear improvement on zero-shot QA. Furthermore, we show that N-XKT can be fine-tuned on a target QA dataset, enabling faster convergence and more accurate results. A systematic analysis is conducted to quantitatively analyze the performance of the N-XKT model and the impact of different categories of knowledge on the zero-shot generalization task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Contemporary Question Answering (QA) is evolving in the direction of addressing more abstractive reasoning tasks (Thayaparan et al., 2020; Dua et al., 2019; Mihaylov et al., 2018) , supported by multi-hop inference (Khot et al., 2020; Yang et al., 2018) and explanatory scientific facts (Jansen and Ustalov, 2019; Jansen et al., 2018 Jansen et al., , 2016 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 138, |
|
"text": "(Thayaparan et al., 2020;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 156, |
|
"text": "Dua et al., 2019;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 179, |
|
"text": "Mihaylov et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 234, |
|
"text": "(Khot et al., 2020;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 253, |
|
"text": "Yang et al., 2018)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 287, |
|
"end": 313, |
|
"text": "(Jansen and Ustalov, 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 333, |
|
"text": "Jansen et al., 2018", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 355, |
|
"text": "Jansen et al., , 2016", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This trend of aiming to address more complex, multi-evidence and chained inference is pushing the envelope for novel representation and architectural patterns (Ding et al., 2019; Qiu et al., 2019; Asai et al., 2020; Thayaparan et al., 2019; Kundu et al., 2019; Valentino et al., 2021) , which are moving from modelling meaning from immediate distributional semantics patterns into deeper abstractive capabilities. This poses a paradigmatic challenge on the design of QA architectures, which need to operate over high-level semantic patterns and acquire the necessary knowledge to perform abstraction . At the same time, the design of new strategies to incorporate explanatory knowledge into neural representation has the potential to address fundamental data efficiency problems and promote zero-shot generalisation on out-of-distribution examples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 178, |
|
"text": "(Ding et al., 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 196, |
|
"text": "Qiu et al., 2019;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 215, |
|
"text": "Asai et al., 2020;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 240, |
|
"text": "Thayaparan et al., 2019;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 260, |
|
"text": "Kundu et al., 2019;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 284, |
|
"text": "Valentino et al., 2021)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Explanation-based Science QA (Jansen et al., 2018) provides a rich framework to evaluate these emerging requirements, as the task typically requires multi-hop reasoning through the composition of explanatory facts. While existing approaches in the field mainly focus on the construction of natural language explanations (Jansen et al., 2018; Jansen and Ustalov, 2019) , this work aims to explore the impact of explanatory knowledge on zero-shot generalisation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 50, |
|
"text": "(Jansen et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 341, |
|
"text": "(Jansen et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 367, |
|
"text": "Jansen and Ustalov, 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we argue that explanation-centred corpora can serve as a resource to boost zero-shot capabilities on Question Answering tasks which demand deeper inference. To this end, we explore the adoption of latent knowledge representations for supporting generalisation on downstream QA tasks requiring multi-hop inference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our hypothesis is that explanatory scientific knowledge expressed in natural language can be transferred into neural network representations, and subsequently used to achieve knowledge based inference on scientific QA tasks. To validate this hypothesis, this paper proposes a unified approach that frames Question Answering as an explanatory knowledge reasoning problem. The unification between the two tasks allows us to explore the adoption of pre-training strategies over explanatory knowledge bases, and subsequently leverage the same paradigm to generalise on the Question Answering task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "An empirical evaluation is performed on Transformers-based architectures adopting the WorldTree corpus as a knowledge base (Xie et al., 2020; Jansen et al., 2018) and measuring generalisation on ARC and OpenbookQA (Mihaylov et al., 2018) . The main contributions of this paper are as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 141, |
|
"text": "(Xie et al., 2020;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 162, |
|
"text": "Jansen et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 237, |
|
"text": "(Mihaylov et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose N-XKT, a neural mechanism for encoding and transferring explanatory knowledge for science QA. To the best of our knowledge, N-XKT is the first work tackling science QA tasks through the transfer of external explanatory knowledge via neural encoding mechanisms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We introduce the explanatory knowledge transfer task on explanation-centred knowledge bases, describing the methodology to implement N-XKT for knowledge acquisition and downstream Question Answering using Transformer-based models as neural encoders.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We conduct a systematic empirical analysis to demonstrate the effectiveness of N-XKT on improving downstream QA accuracy and overall convergence speed in the training phase. An ablation analysis on different types of knowledge facts is performed to measure the impact of different knowledge categories.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section we describe several works related to knowledge-based scientific QA.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Explanation Bank Explanation Bank 1 is a core component of the WorldTree corpus (Jansen et al., 2018; Xie et al., 2020) . The dataset provides explanations for multiple-choice science questions in the form of graphs connecting questions and correct answers, where multiple sentences from a knowledge base (KB) are aggregated through lexical overlap between terms. The background knowledge used for the explanations is grouped in semi-structured tables, whose facts range from common-sense to core scientific statements. Explanation Bank has been proposed for the task of explanation regeneration (Jansen and Ustalov, 2019) -i.e. given a multiple-choice science question, regenerate the gold explanation supporting the correct answer. The explanation regeneration task has been framed as an Information Retrieval (IR) problem (Valentino et al., 2021) . In this paper, we aim to leverage the knowledge expressed in the explanations to enhance generalisation and zero-shot capability on multiple-choice scientific question answering.", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 101, |
|
"text": "(Jansen et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 119, |
|
"text": "Xie et al., 2020)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 825, |
|
"end": 849, |
|
"text": "(Valentino et al., 2021)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "1 http://cognitiveai.org/explanationbank/ Bidirectional Encoder Representations from Transformers BERT represents the foundation which defines the state-of-the-art in several NLP tasks (Devlin et al., 2019) . This model adopts a Transformer-based architecture composed of several layers of attention (Vaswani et al., 2017) that are used to learn a deep bidirectional representation of language. BERT-based models have demonstrated remarkable results in Question Answering when directly fine-tuned on the answer prediction task or additionally pre-trained using domain specific knowledge (Clark et al., 2020; Beltagy et al., 2019) . A recent line of research attempts to enrich the input of BERT with background knowledge in the form of explanations in order to boost generalisation and accuracy for challenging QA settings. Here, the explanations are explicitly constructed through the adoption of language models (Rajani et al., 2019) or information retrieval (IR) approaches (Valentino et al., 2021; Yadav et al., 2019) . Conversely, this paper explores mechanisms to implicitly encode explanatory knowledge in the neural representation to improve the capability of performing downstream inference. Specifically, in this work, we adopt Transformers as text neural encoders.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 206, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 322, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 607, |
|
"text": "(Clark et al., 2020;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 629, |
|
"text": "Beltagy et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 977, |
|
"end": 1001, |
|
"text": "(Valentino et al., 2021;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1002, |
|
"end": 1021, |
|
"text": "Yadav et al., 2019)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Leveraging External Knowledge for Scientific QA Recently, many solutions have been proposed for science QA that leverage either external reference corpora (Khot et al., 2017; Khashabi et al., 2018; or existing knowledge graphs (Li and Clark, 2015; Sachan et al., 2016; Wang et al., 2018; Musa et al., 2019; Zhong et al., 2019) . Generally, previous works rely on Information Retrieval models or on structural embeddings for Knowledge Bases, while our work focuses on directly encoding explanatory knowledge, evaluating it in a downstream scientific QA setting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 174, |
|
"text": "(Khot et al., 2017;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 197, |
|
"text": "Khashabi et al., 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 227, |
|
"end": 247, |
|
"text": "(Li and Clark, 2015;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 268, |
|
"text": "Sachan et al., 2016;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 287, |
|
"text": "Wang et al., 2018;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 306, |
|
"text": "Musa et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 326, |
|
"text": "Zhong et al., 2019)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Scientific Question Answering has the distinctive property of requiring the articulation of multi-hop and explanatory reasoning. This can be contrasted with the lexical-retrieval style of factoid Question Answering. Additionally, the explanatory chains required to arrive at the correct answer typically operate at an abstract level, through the combination of definitions and scientific laws (Thayaparan et al., 2020) . This characteristic makes the generalisation process more challenging, as the answer prediction model needs to acquire the ability to perform abstraction from the specific context in the question. This paper hypothesises that it is possible to automatically transfer abstractive knowledge from explanatory facts into neural encoding representation for more accurate scientific QA, and for enabling zero-shot generalization. To this end, we propose N-XKT (Neural encoding based on eXplanatory Knowledge Transfer) which encodes abstractive knowledge into neural representation to improve the effectiveness in both zero-shot QA task and fine-tuning based QA task. The general neural encoding mechanism is evaluated adopting the following training tasks:", |
|
"cite_spans": [ |
|
{ |
|
"start": 393, |
|
"end": 418, |
|
"text": "(Thayaparan et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u03b8F q t , c t l pred l pred q K , a K , l K q Q , a Q , l Q q S , a S , l S", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1. Explanatory Knowledge Acquisition: In this pre-training task, the N-XKT model encodes the explanatory textual knowledge from a set of explanatory facts into supporting embeddings. This process aims to acquire the necessary explanatory knowledge to test generalization on downstream science QA. We frame this problem as a knowledge base completion task. Specifically, after casting each explanatory fact in the knowledge base into a tuple composed of subject, object, and predicate, the model is trained on completing each fact by alternatively masking each element in the tuple (additional details can be found in section 3.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "2. Cloze-style Question Answering: To keep the encoding mechanism consistent with the pre-training explanatory knowledge acquisition task, we cast Multiple-choice Question Answering into a cloze-style QA problem. Specifically, we train the N-XKT model to complete the question with the expected candidate answer. This task aims to acquire additional knowledge for addressing downstream science QA since the patterns in the questions are typically more complex than the background explanatory facts (additional details can be found in section 3.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The training tasks defined above can be used to encode different types and levels of knowledge into the N-XKT model, allowing us to perform a detailed evaluation on both zero-shot and finetuning-based Question Answering tasks. Figure 1 shows a schematic representation of the proposed approach.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 227, |
|
"end": 235, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The WorldTree corpus (Jansen et al., 2018) contains natural language explanatory facts, which are stored in semi-structured tables whose columns correspond to semantic roles. The knowledge base contains a total of 82 tables, where each table represents a different knowledge type, with different arity and argument types. N-XKT can be used as a unified approach for transferring knowledge from heterogeneous explanatory facts via a neural encoding mechanism.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 42, |
|
"text": "(Jansen et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To acquire the explanatory knowledge in a unified way for subsequent transfer learning, we normalize the semi-structured facts using a binary predicate-argument structure as typical practice in standard knowledge-base completion tasks (Bordes et al., 2013; Wang et al., 2014; Lin et al., 2015) . Specifically, for each table, we map the columns into three main components: subject, predicate, and object. After performing the mapping for each table in the knowledge base, we generate triples for all the facts in the knowledge base.", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 256, |
|
"text": "(Bordes et al., 2013;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 275, |
|
"text": "Wang et al., 2014;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 293, |
|
"text": "Lin et al., 2015)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "By framing the explanatory knowledge acquisition task as a knowledge base completion problem, we alternatively mask subjects and objects from the triples and train the model to predict the missing component in the triple by giving in input the remaining ones. Specifically, we simulate a question answering problem adopting either subject or object as an answer, and the other two components in the triple as a question.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The neural encoder of N-XKT learns an embedding representation for each pair in input. A softmax layer is added on top of the embedding to predict the probability of the missing component in the triple. The configuration adopted for the N-XKT model is described in equation 1;.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u03b8 K \u2190 argmin \u03b8 L(N-XKT \u03b8 (q K , a K ), l K ) (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Here, q K and a K represent the simulated questionanswer pair generated from a generic explanatory fact triple, while l K represents the target labels (i.e. 1 if a is the correct component for completing the triple, 0 otherwise). \u03b8 K is the set of parameters optimised during the explanatory knowledge acquisition stage. The negative samples are generated by replacing each correct answer with a random component extracted from different explanatory facts in the knowledge base.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The transformer neural network is used as a textual neural encoder component of N-XKT, where each question-answer pair is compiled into the input token sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "[CLS][question][SEP ][answer][SEP ] (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The final hidden vector C \u2208 R H of the Transformer neural network that corresponds to the first input token ([CLS]) is used as an embedding to perform the final classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Knowledge Acquisition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Normally, the explanatory knowledge patterns do not contain the complete information to address downstream Question Answering. However, the questions in WorldTree can be used as additional knowledge to deal with complex structured science questions, allowing N-XKT to learn to recognize more complex patterns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cloze-style Question Answering", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To acquire additional knowledge while keeping the encoding mechanism consistent with the pretraining explanatory knowledge acquisition task, we cast Multiple-choice Question Answering into a cloze-style QA problem. The particular encoding configuration of the N-XKT model can be used in fact to address this type of question answering problems, where the model is trained to complete the question with the expected candidate answer. The detailed parameters and inputs adopted for cloze-style QA are described in equation 3:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cloze-style Question Answering", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b8 K+Q \u2190 argmin \u03b8 L(N-XKT \u03b8 K (q Q , a Q ), l Q )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Cloze-style Question Answering", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The setting adopted for cloze-style QA is similar to the one adopted for explanatory knowledge acquisition, but with two main differences: 1) In this case, the question q Q , the answer a Q , and the target label l K are generated from the WorldTree multiple-choice question answering set, where the right candidate answer of each question acts as a positive sample, and the incorrect candidate answers act as the negative samples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cloze-style Question Answering", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "2) The initial parameters are initially set with \u03b8 K , that is, we adopt the parameters that have been optimised during the explanatory knowledge acquisition stage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cloze-style Question Answering", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Given a multiple-choice science question, N-XKT can perform question answering by framing it as a sequence classification problem, where the question is paired with each candidate answer to compute a probability score. The candidate choice with highest score can then be selected as the predicted answer. We evaluate N-XKT in two different settings: zero-shot and fine-tuning-based QA.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-shot and Fine-tuning Settings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Regarding the zero-shot setting, the N-XKT is trained only on the explanatory knowledge acquisition task and then directly tested on downstream Question Answering. We also evaluate the model trained jointly on explanatory knowledge and science questions in WorldTree, evaluating its generalization capabilities on different multiple-choice Question Answering datasets, such as ARC 2 and OpenBook QA 3 (Mihaylov et al., 2018) . For each pair of question and candidate answer, the scores are computed as described in equation 4. Here, (q T , c T ) represent the test question and a candidate answer, while l T pred is the score predicted by the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 401, |
|
"end": 424, |
|
"text": "(Mihaylov et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-shot and Fine-tuning Settings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "l T pred = N-XKT \u03b8 K+Q (q T , c T )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Zero-shot and Fine-tuning Settings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In the fine-tuning setting, the N-XKT model is additionally fine-tuned on each target QA dataset as in equation 6. Here, (q S , a S ) represents a questionanswer pair from the target QA training set, while l S is the label indicating whether the answer is correct or not.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-shot and Fine-tuning Settings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u03b8 F \u2190 argmin \u03b8 L(N-XKT \u03b8 K+Q (q S , a S ), l S ) (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-shot and Fine-tuning Settings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As shown in equation 6, we adopt the same configuration as in the zero-shot setting, where the only difference is represented by the fine-tuned parameters set \u03b8 F :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-shot and Fine-tuning Settings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "l T pred = N-XKT \u03b8 F (q T , c T )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Zero-shot and Fine-tuning Settings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We conduct our experiments on four widely used science QA datasets, WorldTree V2.0 (Xie et al., 2020) , ARC Easy and Challenge , and Openbook QA (Mihaylov et al., 2018) . The results tend to confirm our research hypothesis that explanatory knowledge encoding can improve generalization in downstream science Question Answering (QA) tasks. Furthermore, we systematically analyze several factors which may have an impact on the final results, including the use of Transformer-based models with a larger number of parameters (BERT-large), testing the model on QA tasks using different types of explanatory background knowledge, and measuring training and test performance by further fine-tuning the model on other datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 101, |
|
"text": "(Xie et al., 2020)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 168, |
|
"text": "(Mihaylov et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Empirical Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "QA dataset size. In order to conduct a thorough quantitative analysis, we use four science QA datasets, WorldTree V2.0 (Xie et al., 2020) , ARC Easy and Challenge , and Openbook QA (Mihaylov et al., 2018) . The number of question-answer pairs in each dataset is listed in Table. 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 137, |
|
"text": "(Xie et al., 2020)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 204, |
|
"text": "(Mihaylov et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 272, |
|
"end": 278, |
|
"text": "Table.", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Explanatory knowledge dataset size. We encode different types of explanatory knowledge in the WorldTree corpus into Transformer neural networks. The statistics of the adopted explanatory facts are reported in Table 2 . Because we further analyze the impact of different types of knowledge, the number of each knowledge type is also given in the table.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 216, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Hyperparameters configuration. We adjust two major hyperparameters for the training of the model, namely batch size and learning rate. We optimize the parameters considering the following combinations: we adopt training batch sizes in {16, 32}, and learning rate in {1e \u2212 5, 3e \u2212 5, 5e \u2212 5}. The best results are obtained with batch size 32 and learning rate 3e \u2212 5 for the BERT-base model, and batch size 16 and learning rate 1e \u2212 5 for BERT-large (Devlin et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 449, |
|
"end": 470, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Information Retrieval baseline. We adopt an Information Retrieval (IR) baseline similar to the one described in . Given a question q, for each candidate answer c i \u2208 C = {c 1 , . . . , c n }, the IR solver uses BM25 vectors and cosine similarity to retrieve the top K sentences in the WorldTree corpus that are most similar to the concatenation of q and c i . The score of a candidate answer c i is then obtained by considering the sum of the BM25 relevance scores associated to the retrieved sentences. The predicted answer corresponds to the candidate choice with the highest score. To test the generalisation of this approach on ARC and OpenbookQA, we keep the same background knowledge throughout the experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Configuration Setting. We adopt different configurations in the experiments to control for training data, Transformer model, and target QA test dataset fine-tuning. We report the different configurations in the \"Config\" column of Table 6 and Table 7 . The label \"K\" indicates that the model is trained only on the explanatory knowledge acquisition task, \"Q\" means that the model is trained only on the cloze-style QA task using WorldTree as reference dataset, \"K+Q\" means that the model is pre-trained for explanatory knowledge acquisition and then further fine-tuned on cloze-style QA (again using only WorldTree as training dataset). Moreover, \"base\" means using BERT-base as Transformer model, while \"large\" means using BERT-large. Finally, \"FT\" means that the model is additionally finetuned on the target QA dataset's training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 230, |
|
"end": 249, |
|
"text": "Table 6 and Table 7", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In Table 6 , we report the performance of N-XKT under different configurations along with the accuracy of the BM25 baseline with K = 5 number of facts. The models are tested across multiple QA datasets including WorldTree, ARC, and Open-bookQA.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Results on Zero-shot Science Question Answering", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "From the results, we derive the following conclusions. First, the proposed N-XKT model can clearly achieve better accuracy than the BM25 baseline since N-XKT uses Transformer-based neural mechanisms to acquire and encode external knowledge. Second, using BERT-large instead of BERTbase as initial Transformer can improve the performance since BERT-large contains more parameters than BERT-base. However, we found that the advantage of using BERT-large is not significant since more parameters implies more resources needed for training. Third, we observe than N-XKT obtains better performance than pre-trained BERT when fine-tuning on the target datasets. : Accuracy comparison between N-XKT and othe approaches. External KB adopted by the models: 1.ARCcorpus , 2.ConceptNet (Speer et al., 2017) , 3.Wikipedia (https://www.wikipedia.org/), 4.SciTail ), 5.SNLI (Bowman et al., 2015 , 6.MultiNLI (Williams et al., 2018) , 7.RACE (Lai et al., 2017) , 8.MCScript (Ostermann et al., 2018) , 9.WorldTree (Jansen et al., 2018 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 775, |
|
"end": 795, |
|
"text": "(Speer et al., 2017)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 850, |
|
"end": 880, |
|
"text": "), 5.SNLI (Bowman et al., 2015", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 894, |
|
"end": 917, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 927, |
|
"end": 945, |
|
"text": "(Lai et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 959, |
|
"end": 983, |
|
"text": "(Ostermann et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 998, |
|
"end": 1018, |
|
"text": "(Jansen et al., 2018", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Results on Zero-shot Science Question Answering", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To understand the impact of different types of explanation on the final accuracy, we breakdown the facts stored in the knowledge base using three different categories (i.e., retrieval, inferencesupporting and complex inference) and rerun the training of the N-XKT model using only one category per time. The adopted categories are provided in the WorldTree corpus and can be described as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Analysis on Impact of Different Explanatory Knowledge Types", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 Retrieval: facts expressing knowledge about taxonomic relations and/or properties.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Analysis on Impact of Different Explanatory Knowledge Types", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 Inference-Supporting: Facts expressing knowledge about actions, affordances, requirements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Analysis on Impact of Different Explanatory Knowledge Types", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 Complex Inference: Facts expressing knowledge about causality, processes, and if/then relationships.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Analysis on Impact of Different Explanatory Knowledge Types", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The obtained accuracy is showed in Table 7 . The results highlight the importance of using all the explanation categories to achieve the final accuracy for the combined approach. However, the retrieval category seems to have a higher impact on the generalisation. We believe that this result is due to the taxonomic knowledge encoded in the retrieval category (i.e. \"x is a kind of y\"), which facilitates the acquisition of the implicit explanatory capabilities necessary for answering science questions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 42, |
|
"text": "Table 7", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Analysis on Impact of Different Explanatory Knowledge Types", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In Table 7 , we compare the impact of different explanatory knowledge types and get the following conclusion. 1) All three types of explanatory knowledge are helpful for further science QA task. The results using all three types of knowledge are significantly better than the results obtained when using no explanatory knowledge at all (first rown in Table 7) . 2) The model trained on all explanatory knowledge outperforms the models using each individual type of knowledge alone, confirming that different types of knowledge are complementary for achieving the final performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 7", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 359, |
|
"text": "Table 7)", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Analysis on Impact of Different Explanatory Knowledge Types", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Start-of-the-art baselines", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluating Zero-shot N-XKT with", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "In Table 5 , we evaluate several start-of-the-art methods as baselines along with N-XKT trained only on the WorldTree. The table reports the accuracy results on ARC and OpenbookQA. In the \"External KB\" column, we list the external Knowledge Bases (KB) adopted by different models. The \"IR-based\" column indicates whether the model adopts Information Retrieval (IR) techniques, and the \"Finetuned\" column indicates whether the approach is fine-tuned on the target dataset. Table 5 is intended to provide a general comparative analysis between N-XKT and the baseline models, most of them fine-tuned on the target datasets. N-XKT is able to achieve comparable performance under a transfer learning setting. The generalization performance of the proposed model is more noticeable for the ARC Challenge dataset, which requires the implicit encoding of more complex explanatory knowledge. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 5", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 479, |
|
"text": "Table 5", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluating Zero-shot N-XKT with", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "In Figure 2 , we visualize the convergence curve for the fine-tuning over three science QA tasks (ARC Easy, ARC Challenge and OpenBookQA), comparing a pure BERT-based N-XKT model with a pre-trained N-XKT models using different configurations, AFK (pre-trained on explanatory knowledge acquisition), QAP (pre-trained on WorldTree cloze-style QA), AFK+QAP (pre-trained on both). It is noticeable that the encoding of explanatory knowledge impacts the convergence of the model for all three datasets, with a particular emphasis on the two ARC variants.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Improvement on Fine-tuning Convergence", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "In this paper, we proposed a neural encoding mechanism for explanatory knowledge acquisition and transfer, N-XKT. We evaluated the impact of the encoding mechanism on downstream science QA. The proposed model delivers better generalisation and accuracy for QA tasks that require multi-hop and explanatory inference. The proposed encoding mechanism can be used to deliver zero-shot inference capabilities, providing comparable performance when compared to supervised models on QA. These results supports the hypothesis that pretraining tasks targeting abstract and explanatory knowledge acquisition can constitute and impor-tant direction to improve inference capabilities and generalization of state-of-the-art neural models. Tab. 6 is for overall accuracy of N-XKT model on QA tasks, and Tab. 7 is for ablation analysis results, only use part of explanations in training process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://allenai.org/data/arc 3 https://allenai.org/data/open-book-qa", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The N-XKT mainly use a transformer network as natural language encoder component, the hyperparameters of transformer network training have been tuned manually for the optimisation is the maximisation of the accuracy in answer prediction. Specifically, 3 parameters should be set for training, train batch size \u03b2, learning rate \u03b1, and train epoch N . The values used in pre-training on explanation knowledge base are as follows:The values used in fine-tuning on Question Answer are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Hyperparameters tuning", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use two versions of Explanation Bank Scientific Question Answer datasets in this paper. The version 1 of Explanation Bank dataset can be downloaded at the following URL: http: //cognitiveai.org/dist/worldtree_corpus_ textgraphs2019sharedtask_withgraphvis.zip. The version 2 of Explanation Bank dataset is available at the following URL: https: //github.com/cognitiveailab/tg2020task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To accelerate the training process of the experiments, we adopt a NVIDIA Tesla V100 GPU.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C Computing Infrastructure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We repeat the N-XKT model Question Answering training process on all the dataset for 5 times, each time with random parameters initialization. Addition to the tables provided in paper, we report the detailed results with standard deviation in following tables.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D Accuracy Results Including Standard Deviation", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Learning to retrieve reasoning paths over wikipedia graph for question answering", |
|
"authors": [ |
|
{ |
|
"first": "Akari", |
|
"middle": [], |
|
"last": "Asai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuma", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akari Asai, Kazuma Hashimoto, Hannaneh Hajishirzi, Richard Socher, and Caiming Xiong. 2020. Learn- ing to retrieve reasoning paths over wikipedia graph for question answering. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "SciB-ERT: A pretrained language model for scientific text", |
|
"authors": [ |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3615--3620", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1371" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. SciB- ERT: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3615- 3620, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Translating embeddings for modeling multirelational data", |
|
"authors": [ |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Usunier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Garcia-Duran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oksana", |
|
"middle": [], |
|
"last": "Yakhnenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "26", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antoine Bordes, Nicolas Usunier, Alberto Garcia- Duran, Jason Weston, and Oksana Yakhnenko. 2013. Translating embeddings for modeling multi- relational data. In Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A large annotated corpus for learning natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "632--642", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1075" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 632-642, Lisbon, Portugal. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Think you have solved question answering? try arc, the ai2 reasoning challenge", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isaac", |
|
"middle": [], |
|
"last": "Cowhey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carissa", |
|
"middle": [], |
|
"last": "Schoenick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oyvind", |
|
"middle": [], |
|
"last": "Tafjord", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. 2018. Think you have solved question an- swering? try arc, the ai2 reasoning challenge.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Niket Tandon, Sumithra Bhakthavatsalam, Dirk Groeneveld, Michal Guerquin, and Michael Schmitz. 2020. From 'f' to 'a' on the n.y. regents science exams: An overview of the aristo project", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Khashabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bhavana", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carissa", |
|
"middle": [], |
|
"last": "Schoenick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carissa", |
|
"middle": [], |
|
"last": "Schoenick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oyvind", |
|
"middle": [], |
|
"last": "Tafjord", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "AI Magazine", |
|
"volume": "41", |
|
"issue": "4", |
|
"pages": "39--53", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aimag.v41i4.5304" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Clark, Oren Etzioni, Tushar Khot, Daniel Khashabi, Bhavana Mishra, Kyle Richardson, Ashish Sabharwal, Carissa Schoenick, Carissa Schoenick, Oyvind Tafjord, Niket Tandon, Sum- ithra Bhakthavatsalam, Dirk Groeneveld, Michal Guerquin, and Michael Schmitz. 2020. From 'f' to 'a' on the n.y. regents science exams: An overview of the aristo project. AI Magazine, 41(4):39-53.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Cognitive graph for multi-hop reading comprehension at scale", |
|
"authors": [ |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chang", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qibin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongxia", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2694--2703", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1259" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ming Ding, Chang Zhou, Qibin Chen, Hongxia Yang, and Jie Tang. 2019. Cognitive graph for multi-hop reading comprehension at scale. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 2694-2703, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "DROP: A reading comprehension benchmark requiring discrete reasoning over paragraphs", |
|
"authors": [ |
|
{ |
|
"first": "Dheeru", |
|
"middle": [], |
|
"last": "Dua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradeep", |
|
"middle": [], |
|
"last": "Dasigi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Stanovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2368--2378", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1246" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, and Matt Gardner. 2019. DROP: A reading comprehension benchmark requir- ing discrete reasoning over paragraphs. In Proceed- ings of the 2019 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2368-2378, Min- neapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "What's in an explanation? characterizing knowledge and inference requirements for elementary science exams", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Jansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niranjan", |
|
"middle": [], |
|
"last": "Balasubramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2956--2965", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Jansen, Niranjan Balasubramanian, Mihai Sur- deanu, and Peter Clark. 2016. What's in an expla- nation? characterizing knowledge and inference re- quirements for elementary science exams. In Pro- ceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Techni- cal Papers, pages 2956-2965, Osaka, Japan. The COLING 2016 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "TextGraphs 2019 shared task on multi-hop inference for explanation regeneration", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Jansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Ustalov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Thirteenth Workshop on Graph-Based Methods for Natural Language Processing (TextGraphs-13)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--77", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5309" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Jansen and Dmitry Ustalov. 2019. TextGraphs 2019 shared task on multi-hop inference for expla- nation regeneration. In Proceedings of the Thir- teenth Workshop on Graph-Based Methods for Nat- ural Language Processing (TextGraphs-13), pages 63-77, Hong Kong. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "WorldTree: A corpus of explanation graphs for elementary science questions supporting multi-hop inference", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Jansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Wainwright", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Marmorstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clayton", |
|
"middle": [], |
|
"last": "Morrison", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Jansen, Elizabeth Wainwright, Steven Mar- morstein, and Clayton Morrison. 2018. WorldTree: A corpus of explanation graphs for elementary sci- ence questions supporting multi-hop inference. In Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Re- sources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Question answering as global reasoning over semantic abstractions", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Khashabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Khashabi, Tushar Khot, Ashish Sabharwal, and D. Roth. 2018. Question answering as global rea- soning over semantic abstractions. In AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Qasc: A dataset for question answering via sentence composition", |
|
"authors": [ |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Guerquin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Jansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "8082--8090", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v34i05.6319" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tushar Khot, Peter Clark, Michal Guerquin, Peter Jansen, and Ashish Sabharwal. 2020. Qasc: A dataset for question answering via sentence compo- sition. Proceedings of the AAAI Conference on Arti- ficial Intelligence, 34(05):8082-8090.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Answering complex questions using open information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "311--316", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-2049" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tushar Khot, Ashish Sabharwal, and Peter Clark. 2017. Answering complex questions using open informa- tion extraction. In Proceedings of the 55th Annual Meeting of the Association for Computational Lin- guistics (Volume 2: Short Papers), pages 311-316, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Scitail: A textual entailment dataset from science question answering", |
|
"authors": [ |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5189--5197", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tushar Khot, Ashish Sabharwal, and Peter Clark. 2018. Scitail: A textual entailment dataset from science question answering. In Proceedings of the Thirty- Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Arti- ficial Intelligence (IAAI-18), and the 8th AAAI Sym- posium on Educational Advances in Artificial Intel- ligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pages 5189-5197. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Exploiting explicit paths for multi-hop reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Souvik", |
|
"middle": [], |
|
"last": "Kundu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2737--2747", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1263" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Souvik Kundu, Tushar Khot, Ashish Sabharwal, and Peter Clark. 2019. Exploiting explicit paths for multi-hop reading comprehension. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 2737-2747, Flo- rence, Italy. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "RACE: Large-scale ReAding comprehension dataset from examinations", |
|
"authors": [ |
|
{ |
|
"first": "Guokun", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qizhe", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanxiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "785--794", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1082" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. 2017. RACE: Large-scale ReAd- ing comprehension dataset from examinations. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 785-794, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Answering elementary science questions by constructing coherent scenes using background knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2007--2012", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1236" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Li and Peter Clark. 2015. Answering elementary science questions by constructing coherent scenes using background knowledge. In Proceedings of the 2015 Conference on Empirical Methods in Nat- ural Language Processing, pages 2007-2012, Lis- bon, Portugal. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Learning entity and relation embeddings for knowledge graph completion", |
|
"authors": [ |
|
{ |
|
"first": "Yankai", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, AAAI'15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2181--2187", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yankai Lin, Zhiyuan Liu, Maosong Sun, Yang Liu, and Xuan Zhu. 2015. Learning entity and relation em- beddings for knowledge graph completion. In Pro- ceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, AAAI'15, page 2181-2187. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Can a suit of armor conduct electricity? a new dataset for open book question answering", |
|
"authors": [ |
|
{ |
|
"first": "Todor", |
|
"middle": [], |
|
"last": "Mihaylov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tushar", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sabharwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Todor Mihaylov, Peter Clark, Tushar Khot, and Ashish Sabharwal. 2018. Can a suit of armor conduct elec- tricity? a new dataset for open book question answer- ing. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Answering science exam questions using query reformulation with background knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Musa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Achille", |
|
"middle": [], |
|
"last": "Fokoue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Mattei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavan", |
|
"middle": [], |
|
"last": "Kapanipathi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bassem", |
|
"middle": [], |
|
"last": "Makni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartik", |
|
"middle": [], |
|
"last": "Talamadupula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Witbrock", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Automated Knowledge Base Construction (AKBC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Musa, Xiaoyan Wang, Achille Fokoue, Nicholas Mattei, Maria Chang, Pavan Kapanipathi, Bassem Makni, Kartik Talamadupula, and Michael Wit- brock. 2019. Answering science exam questions using query reformulation with background knowl- edge. In Automated Knowledge Base Construction (AKBC).", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Learning to attend on essential terms: An enhanced retriever-reader model for opendomain question answering", |
|
"authors": [ |
|
{ |
|
"first": "Jianmo", |
|
"middle": [], |
|
"last": "Ni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenguang", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weizhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Mcauley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "335--344", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1030" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianmo Ni, Chenguang Zhu, Weizhu Chen, and Julian McAuley. 2019. Learning to attend on essential terms: An enhanced retriever-reader model for open- domain question answering. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long and Short Papers), pages 335-344, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "MCScript: A novel dataset for assessing machine comprehension using script knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Ostermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Modi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Ostermann, Ashutosh Modi, Michael Roth, Ste- fan Thater, and Manfred Pinkal. 2018. MCScript: A novel dataset for assessing machine comprehension using script knowledge.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Improving retrieval-based question answering with deep inference models", |
|
"authors": [ |
|
{ |
|
"first": "George-Sebastian", |
|
"middle": [], |
|
"last": "P\u00eertoac\u0203", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Traian", |
|
"middle": [], |
|
"last": "Rebedea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Tefan Rus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "2019 International Joint Conference on Neural Networks (IJCNN)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/IJCNN.2019.8851826" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George-Sebastian P\u00eertoac\u0203, Traian Rebedea, and S , tefan Rus , et , i. 2019. Improving retrieval-based question an- swering with deep inference models. In 2019 In- ternational Joint Conference on Neural Networks (IJCNN), pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Dynamically fused graph network for multi-hop reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunxuan", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanru", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weinan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6140--6150", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1617" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lin Qiu, Yunxuan Xiao, Yanru Qu, Hao Zhou, Lei Li, Weinan Zhang, and Yong Yu. 2019. Dynami- cally fused graph network for multi-hop reasoning. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6140-6150, Florence, Italy. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Improving language understanding by generative pre-training", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Radford. 2018. Improving language understanding by generative pre-training.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Explain yourself! leveraging language models for commonsense reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Nazneen Fatema Rajani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4932--4942", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1487" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nazneen Fatema Rajani, Bryan McCann, Caiming Xiong, and Richard Socher. 2019. Explain yourself! leveraging language models for commonsense rea- soning. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 4932-4942, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Science question answering using instructional materials", |
|
"authors": [ |
|
{ |
|
"first": "Mrinmaya", |
|
"middle": [], |
|
"last": "Sachan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kumar", |
|
"middle": [], |
|
"last": "Dubey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "467--473", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-2076" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mrinmaya Sachan, Kumar Dubey, and Eric Xing. 2016. Science question answering using instructional ma- terials. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 467-473, Berlin, Germany. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Conceptnet 5.5: An open multilingual graph of general knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Robyn", |
|
"middle": [], |
|
"last": "Speer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Chin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Catherine", |
|
"middle": [], |
|
"last": "Havasi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence, AAAI'17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4444--4451", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robyn Speer, Joshua Chin, and Catherine Havasi. 2017. Conceptnet 5.5: An open multilingual graph of general knowledge. In Proceedings of the Thirty- First AAAI Conference on Artificial Intelligence, AAAI'17, page 4444-4451. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Improving machine reading comprehension with general reading strategies", |
|
"authors": [ |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dian", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2633--2643", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1270" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Sun, Dian Yu, Dong Yu, and Claire Cardie. 2019. Improving machine reading comprehension with general reading strategies. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long and Short Papers), pages 2633-2643, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "A survey on explainability in machine reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mokanarangan", |
|
"middle": [], |
|
"last": "Thayaparan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Valentino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Freitas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mokanarangan Thayaparan, Marco Valentino, and Andr\u00e9 Freitas. 2020. A survey on explainability in machine reading comprehension.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Identifying supporting facts for multi-hop question answering with document graph networks", |
|
"authors": [ |
|
{ |
|
"first": "Mokanarangan", |
|
"middle": [], |
|
"last": "Thayaparan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Valentino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viktor", |
|
"middle": [], |
|
"last": "Schlegel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Freitas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Thirteenth Workshop on Graph-Based Methods for Natural Language Processing (TextGraphs-13)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--51", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5306" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mokanarangan Thayaparan, Marco Valentino, Viktor Schlegel, and Andr\u00e9 Freitas. 2019. Identifying supporting facts for multi-hop question answering with document graph networks. In Proceedings of the Thirteenth Workshop on Graph-Based Methods for Natural Language Processing (TextGraphs-13), pages 42-51, Hong Kong. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Unification-based reconstruction of multi-hop explanations for science questions", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Valentino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mokanarangan", |
|
"middle": [], |
|
"last": "Thayaparan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Freitas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "200--211", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Valentino, Mokanarangan Thayaparan, and Andr\u00e9 Freitas. 2021. Unification-based reconstruc- tion of multi-hop explanations for science questions. In Proceedings of the 16th Conference of the Euro- pean Chapter of the Association for Computational Linguistics: Main Volume, pages 200-211, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, volume 30. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Yuanfudao at SemEval-2018 task 11: Three-way attention and relational knowledge for commonsense machine comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kewei", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingming", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "758--762", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S18-1120" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liang Wang, Meng Sun, Wei Zhao, Kewei Shen, and Jingming Liu. 2018. Yuanfudao at SemEval- 2018 task 11: Three-way attention and relational knowledge for commonsense machine comprehen- sion. pages 758-762.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Knowledge graph embedding by translating on hyperplanes", |
|
"authors": [ |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianwen", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianlin", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence, AAAI'14", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1112--1119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhen Wang, Jianwen Zhang, Jianlin Feng, and Zheng Chen. 2014. Knowledge graph embedding by trans- lating on hyperplanes. In Proceedings of the Twenty- Eighth AAAI Conference on Artificial Intelligence, AAAI'14, page 1112-1119. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1112--1122", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "WorldTree v2: A corpus of sciencedomain structured explanations and inference patterns supporting multi-hop inference", |
|
"authors": [ |
|
{ |
|
"first": "Zhengnan", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Thiem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaycie", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Wainwright", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Marmorstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Jansen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5456--5473", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhengnan Xie, Sebastian Thiem, Jaycie Martin, Eliz- abeth Wainwright, Steven Marmorstein, and Peter Jansen. 2020. WorldTree v2: A corpus of science- domain structured explanations and inference pat- terns supporting multi-hop inference. In Proceed- ings of the 12th Language Resources and Evaluation Conference, pages 5456-5473, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Quick and (not so) dirty: Unsupervised selection of justification sentences for multi-hop question answering", |
|
"authors": [ |
|
{ |
|
"first": "Vikas", |
|
"middle": [], |
|
"last": "Yadav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2578--2589", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1260" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikas Yadav, Steven Bethard, and Mihai Surdeanu. 2019. Quick and (not so) dirty: Unsupervised se- lection of justification sentences for multi-hop ques- tion answering. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 2578-2589, Hong Kong, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Sanity check: A strong alignment and information retrieval baseline for question answering", |
|
"authors": [ |
|
{ |
|
"first": "Vikas", |
|
"middle": [], |
|
"last": "Yadav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Sharp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "The 41st International ACM SIGIR Conference on Research Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikas Yadav, Rebecca Sharp, and M. Surdeanu. 2018. Sanity check: A strong alignment and information retrieval baseline for question answering. The 41st International ACM SIGIR Conference on Research Development in Information Retrieval.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "HotpotQA: A dataset for diverse, explainable multi-hop question answering", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saizheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2369--2380", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1259" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William Cohen, Ruslan Salakhutdinov, and Christo- pher D. Manning. 2018. HotpotQA: A dataset for diverse, explainable multi-hop question answer- ing. In Proceedings of the 2018 Conference on Em- pirical Methods in Natural Language Processing, pages 2369-2380, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Kg2: Learning to reason science exam questions with contextual knowledge graph embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kamil", |
|
"middle": [], |
|
"last": "Toraman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Zhang, H. Dai, Kamil Toraman, and L. Song. 2018. Kg2: Learning to reason science exam ques- tions with contextual knowledge graph embeddings. ArXiv, abs/1805.12393.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Improving question answering by commonsense-based pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Wanjun", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duyu", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiahai", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NLPCC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wanjun Zhong, Duyu Tang, Nan Duan, M. Zhou, Ji- ahai Wang, and J. Yin. 2019. Improving question answering by commonsense-based pre-training. In NLPCC.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Outline of the proposed approach." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Convergence curve when fine-tuning different version of N-XTK on the target QA datasets." |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td>Dataset</td><td colspan=\"3\">#Train #Dev #Test</td></tr><tr><td colspan=\"4\">WorldTree V2.0 3,947 1,019 4,165</td></tr><tr><td>ARC Easy</td><td>2,251</td><td colspan=\"2\">570 2,376</td></tr><tr><td colspan=\"2\">ARC Challenge 1,119</td><td colspan=\"2\">299 1,172</td></tr><tr><td>Openbook QA</td><td>4,957</td><td>500</td><td>500</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "QA datasets size." |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td colspan=\"2\">: Number of instances in each explanatory</td></tr><tr><td>knowledge category.</td><td/></tr><tr><td>Type</td><td>Size</td></tr><tr><td>All</td><td>9,701</td></tr><tr><td>Retrieval</td><td>7,006</td></tr><tr><td colspan=\"2\">Inference-supporting 1,670</td></tr><tr><td>Complex Inference</td><td>1,025</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td>Config</td><td colspan=\"2\">Explanation Bank Dev Test</td><td>ARC Easy Dev Test</td><td>ARC Challenge Dev Test</td><td>Openbook QA Dev Test</td></tr><tr><td>IR BM25 (K = 5Q base</td><td colspan=\"5\">44.86% 40.34% 50.81% 47.43% 24.41% 26.86% 27.92% 33.12%</td></tr><tr><td>K+Q base</td><td colspan=\"5\">58.14% 50.42% 58.53% 57.98% 37.46% 35.87% 35.32% 37.60%</td></tr><tr><td>K large</td><td colspan=\"5\">51.62% 45.85% 52.81% 52.58% 37.53% 33.07% 31.72% 34.12%</td></tr><tr><td>Q large</td><td colspan=\"5\">47.54% 43.47% 53.61% 51.41% 27.09% 28.63% 28.24% 36.04%</td></tr><tr><td>K+Q large</td><td colspan=\"5\">60.16% 50.98% 61.19% 58.24% 39.00% 37.63% 35.64% 38.20%</td></tr><tr><td>base FT</td><td>-</td><td>-</td><td colspan=\"3\">53.61% 53.82% 36.72% 32.71% 53.64% 53.16%</td></tr><tr><td>K base FT</td><td>-</td><td>-</td><td colspan=\"3\">53.61% 52.81% 35.79% 34.90% 53.60% 54.60%</td></tr><tr><td>Q base FT</td><td>-</td><td>-</td><td colspan=\"3\">59.05% 58.44% 33.65% 35.09% 56.04% 57.08%</td></tr><tr><td>K+Q base FT</td><td/><td/><td colspan=\"3\">59.33% 58.79% 38.13% 38.09% 56.12% 56.56%</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "N-XKT Question Answering accuracy results. ) 50.29% 44.55% 54.56% 50.00% 37.46% 31.14% 24.80% 26.80% K base 49.30% 44.74% 50.18% 50.89% 34.38% 33.17% 30.96% 32.72%" |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td>Knowledge</td><td>Config</td><td>Explanation Bank Dev Test</td><td>ARC Easy Dev Test</td><td>ARC Challenge Dev Test</td><td>Openbook QA Dev Test</td></tr><tr><td>None</td><td>Q base</td><td>44.86% 40.34%</td><td>50.81% 47.43%</td><td>24.41% 26.86%</td><td>27.92% 33.12%</td></tr><tr><td>Retrieval</td><td colspan=\"2\">K base K+Q base 51.00% 46.08% 39.05% 38.72%</td><td>44.42% 45.25% 51.79% 53.22%</td><td>23.75% 26.25% 34.65% 33.00%</td><td>27.12% 29.96% 31.96% 32.96%</td></tr><tr><td>Inference-supporting</td><td colspan=\"2\">K base K+Q base 52.72% 47.33% 41.60% 38.24%</td><td>45.96% 44.77% 54.35% 54.32%</td><td>26.09% 26.02% 34.85% 34.40%</td><td>27.40% 30.88% 33.64% 37.16%</td></tr><tr><td>Complex inference</td><td colspan=\"2\">K base K+Q base 52.99% 46.12% 41.01% 38.58%</td><td>46.32% 45.98% 55.30% 52.74%</td><td>24.95% 23.75% 34.78% 34.51%</td><td>26.96% 29.76% 32.08% 35.08%</td></tr><tr><td>All</td><td colspan=\"2\">K base K+Q base 58.49.30% 44.74%</td><td>50.18% 50.89%</td><td>34.38% 33.17%</td><td>30.96% 32.72%</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Question Answering accuracy results using different explanatory knowledge categories. 14% 50.42% 58.53% 57.98% 37.46% 35.87% 35.32% 37.60%" |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td/><td colspan=\"6\">ARC Easy ARC Challenge Openbook QA External KB IR-based Fine-tuned</td></tr><tr><td>IR BM25 (K = 5)</td><td>50.00%</td><td>31.14%</td><td>26.80%</td><td>9</td><td>yes</td><td>no</td></tr><tr><td>Clark et al. (2018)</td><td>62.60%</td><td>20.30%</td><td>-</td><td>1</td><td>yes</td><td>yes</td></tr><tr><td>Mihaylov et al. (2018)</td><td>-</td><td>-</td><td>50.20%</td><td>2, 3</td><td>yes</td><td>yes</td></tr><tr><td>Khot et al. (2018)</td><td>59.00%</td><td>27.10%</td><td>24.40%</td><td>4</td><td>yes</td><td>yes</td></tr><tr><td>Zhang et al. (2018)</td><td>-</td><td>31.70%</td><td>-</td><td>1</td><td>no</td><td>yes</td></tr><tr><td>Yadav et al. (2018)</td><td>58.40%</td><td>26.60%</td><td>-</td><td>none</td><td>no</td><td>yes</td></tr><tr><td>Musa et al. (2019)</td><td>52.20%</td><td>33.20%</td><td>-</td><td>1</td><td>yes</td><td>yes</td></tr><tr><td>Zhong et al. (2019)</td><td>-</td><td>33.40%</td><td>-</td><td>2</td><td>no</td><td>yes</td></tr><tr><td>P\u00eertoac\u0203 et al. (2019)</td><td>61.10%</td><td>26.90%</td><td>-</td><td>4, 5, 6</td><td>no</td><td>yes</td></tr><tr><td>Ni et al. (2019)</td><td>-</td><td>36.60%</td><td>-</td><td>7, 8</td><td>no</td><td>yes</td></tr><tr><td>GP T II (Radford, 2018)</td><td>57.00%</td><td>38.20%</td><td>52.00%</td><td>7</td><td>no</td><td>yes</td></tr><tr><td>RS II (Sun et al., 2019)</td><td>66.60%</td><td>40.70%</td><td>55.20%</td><td>7</td><td>no</td><td>yes</td></tr><tr><td>N-XKT K+Q base (ours)</td><td>57.98%</td><td>35.87%</td><td>37.60%</td><td>9</td><td>no</td><td>no</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": ")." |
|
}, |
|
"TABREF7": { |
|
"content": "<table><tr><td>Config</td><td colspan=\"2\">Explanation Bank Dev Test</td><td colspan=\"2\">ARC Easy Dev Test</td><td colspan=\"2\">ARC Challenge Dev Test</td><td colspan=\"2\">Openbook QA Dev Test</td></tr><tr><td>IR BM25 (K = 5)</td><td>50.29%</td><td>44.55%</td><td>54.56%</td><td>50.00%</td><td>37.46%</td><td>31.14%</td><td>24.80%</td><td>26.80%</td></tr><tr><td>K base</td><td colspan=\"8\">49.30% \u00b10.0238 \u00b10.0166 \u00b10.0167 \u00b10.0198 \u00b10.0255 \u00b10.0165 \u00b10.0359 \u00b10.0273 44.74% 50.18% 50.89% 34.38% 33.17% 30.96% 32.72%</td></tr><tr><td>Q base</td><td colspan=\"8\">44.86% \u00b10.0229 \u00b10.0087 \u00b10.0258 \u00b10.0136 \u00b10.0101 \u00b10.0049 \u00b10.0342 \u00b10.0176 40.34% 50.81% 47.43% 24.41% 26.86% 27.92% 33.12%</td></tr><tr><td>K+Q base</td><td>58.14%</td><td>50.42%</td><td>58.53%</td><td>57.98%</td><td>37.46%</td><td>35.87%</td><td>35.32%</td><td>37.60%</td></tr><tr><td>K large</td><td colspan=\"2\">51.62% \u00b10.0159 \u00b10.0089 45.85%</td><td>52.81% \u00b10.004</td><td colspan=\"5\">52.58% \u00b10.0136 \u00b10.0109 \u00b10.0129 \u00b10.0199 \u00b10.0232 37.53% 33.07% 31.72% 34.12%</td></tr><tr><td>Q large</td><td colspan=\"4\">47.54% \u00b10.0131 \u00b10.0061 \u00b10.0176 \u00b10.0073 43.47% 53.61% 51.41%</td><td>27.09% \u00b10.012</td><td colspan=\"3\">28.63% \u00b10.0125 \u00b10.0118 \u00b10.0167 28.24% 36.04%</td></tr><tr><td>K+Q large</td><td>60.16%</td><td>50.98%</td><td>61.19%</td><td>58.24%</td><td>39.00%</td><td>37.63%</td><td>35.64%</td><td>38.20%</td></tr><tr><td>base FT</td><td>--</td><td>--</td><td colspan=\"6\">53.61% \u00b10.0168 \u00b10.0093 \u00b10.0104 \u00b10.0086 \u00b10.0182 \u00b10.0223 53.82% 36.72% 32.71% 53.64% 53.16%</td></tr><tr><td>K base FT</td><td>--</td><td>--</td><td colspan=\"6\">53.61% \u00b10.0159 \u00b10.0241 \u00b10.0218 \u00b10.0239 \u00b10.0248 \u00b10.0281 52.81% 35.79% 34.90% 53.60% 54.60%</td></tr><tr><td>Q base FT</td><td>--</td><td>--</td><td colspan=\"6\">59.05% \u00b10.0177 \u00b10.0070 \u00b10.0280 \u00b10.0065 \u00b10.0126 \u00b10.0178 58.44% 33.65% 35.09% 56.04% 57.08%</td></tr><tr><td>K+Q base FT</td><td>-</td><td>-</td><td>59.33%</td><td>58.79%</td><td>38.13%</td><td>38.09%</td><td>56.12%</td><td>56.56%</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "N-XKT Question Answering accuracy result comparison \u00b10.0119 \u00b10.0039 \u00b10.0047 \u00b10.0014 \u00b10.0135 \u00b10.0149 \u00b10.0124 \u00b10.0085 \u00b10.0168 \u00b10.0102 \u00b10.0108 \u00b10.0076 \u00b10.0268 \u00b10.0155 \u00b10.0076 \u00b10.0161 \u00b10.0187 \u00b10.0087 \u00b10.0224 \u00b10.0124 \u00b10.0186 \u00b10.0111" |
|
}, |
|
"TABREF8": { |
|
"content": "<table><tr><td>Knowledge</td><td>Config</td><td colspan=\"2\">Explanation Bank Dev Test</td><td colspan=\"2\">ARC Easy Dev Test</td><td colspan=\"2\">ARC Challenge Dev Test</td><td colspan=\"2\">Openbook QA Dev Test</td></tr><tr><td>None</td><td>Q base</td><td>44.86%</td><td>40.34%</td><td>50.81%</td><td>47.43%</td><td>24.41%</td><td>26.86%</td><td>27.92%</td><td>33.12%</td></tr><tr><td>RET</td><td>K base K+Q base</td><td colspan=\"2\">39.05% \u00b10.0258 \u00b10.0106 38.72% 51.00% 46.08%</td><td>44.42% \u00b10.011 51.79%</td><td colspan=\"5\">45.25% \u00b10.0139 \u00b10.0165 \u00b10.0141 \u00b10.0099 \u00b10.0202 23.75% 26.25% 27.12% 29.96% 53.22% 34.65% 33.00% 31.96% 32.96%</td></tr><tr><td/><td>K base</td><td>41.60%</td><td>38.24%</td><td>45.96%</td><td>44.77%</td><td>26.09%</td><td>26.02%</td><td>27.40%</td><td>30.88%</td></tr><tr><td>INSUPP</td><td>K+Q base</td><td colspan=\"4\">52.72% \u00b10.0247 \u00b10.0062 \u00b10.0206 \u00b10.0092 47.33% 54.35% 54.32%</td><td>34.85% \u00b10.031</td><td colspan=\"3\">34.40% \u00b10.0128 \u00b10.0279 \u00b10.0306 33.64% 37.16%</td></tr><tr><td>COMPLEX</td><td>K base K+Q base</td><td colspan=\"6\">41.01% \u00b10.0132 \u00b10.0035 \u00b10.0134 \u00b10.0091 \u00b10.0263 \u00b10.0066 38.58% 46.32% 45.98% 24.95% 23.75% 52.99% 46.12% 55.30% 52.74% 34.78% 34.51% \u00b10.0098 \u00b10.0131 \u00b10.0081 \u00b10.0087 \u00b10.0112 \u00b10.0194</td><td>26.96% \u00b10.012 32.08% \u00b10.018</td><td>29.76% \u00b10.0163 35.08% \u00b10.0153</td></tr><tr><td/><td>K base</td><td>49.30%</td><td>44.74%</td><td>50.18%</td><td>50.89%</td><td>34.38%</td><td>33.17%</td><td>30.96%</td><td>32.72%</td></tr><tr><td>All</td><td>K+Q base</td><td>58.14%</td><td>50.42%</td><td>58.53%</td><td>57.98%</td><td>37.46%</td><td>35.87%</td><td>35.32%</td><td>37.60%</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Question Answering accuracy result in different abstractive knowledge categories \u00b10.0229 \u00b10.0087 \u00b10.0258 \u00b10.0136 \u00b10.0101 \u00b10.0049 \u00b10.0342 \u00b10.0176 \u00b10.0173 \u00b10.0135 \u00b10.0178 \u00b10.0141 \u00b10.0321 \u00b10.0128 \u00b10.0192 \u00b10.0182 \u00b10.0149 \u00b10.0075 \u00b10.0127 \u00b10.0118 \u00b10.0164 \u00b10.0099 \u00b10.0168 \u00b10.0122 \u00b10.0238 \u00b10.0166 \u00b10.0167 \u00b10.0198 \u00b10.0255 \u00b10.0165 \u00b10.0359 \u00b10.0273 \u00b10.0119 \u00b10.0039 \u00b10.0047 \u00b10.0014 \u00b10.0135 \u00b10.0149 \u00b10.0124 \u00b10.0085" |
|
} |
|
} |
|
} |
|
} |