|
{ |
|
"paper_id": "O15-3001", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T08:10:12.056419Z" |
|
}, |
|
"title": "Designing a Tag-Based Statistical Math Word Problem Solver with Reasoning and Explanation", |
|
"authors": [ |
|
{ |
|
"first": "Yi-Chung", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chao-Chun", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kuang-Yi", |
|
"middle": [], |
|
"last": "Hsu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chien-Tsung", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shen-Yun", |
|
"middle": [], |
|
"last": "Miao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Wei-Yun", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lun-Wei", |
|
"middle": [], |
|
"last": "Ku", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Churn-Jung", |
|
"middle": [], |
|
"last": "Liau", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Keh-Yih", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Academia Sinica", |
|
"location": { |
|
"addrLine": "128 Academia Road, Section 2", |
|
"postCode": "11529", |
|
"settlement": "Nankang", |
|
"region": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper proposes a tag-based statistical framework to solve math word problems with understanding and reasoning. It analyzes the body and question texts into their associated tag-based logic forms, and then performs inference on them. Comparing to those rule-based approaches, the proposed statistical approach alleviates rules coverage and ambiguity resolution problems, and our tag-based approach also provides the flexibility of handling various kinds of related questions with the same body logic form. On the other hand, comparing to those purely statistical approaches, the proposed approach is more robust to the irrelevant information and could more accurately provide the answer. The major contributions of our work are: (1) proposing a tag-based logic representation such that the system is less sensitive to the irrelevant information and could provide answer more precisely; (2) proposing a unified statistical framework for performing reasoning from the given text.", |
|
"pdf_parse": { |
|
"paper_id": "O15-3001", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper proposes a tag-based statistical framework to solve math word problems with understanding and reasoning. It analyzes the body and question texts into their associated tag-based logic forms, and then performs inference on them. Comparing to those rule-based approaches, the proposed statistical approach alleviates rules coverage and ambiguity resolution problems, and our tag-based approach also provides the flexibility of handling various kinds of related questions with the same body logic form. On the other hand, comparing to those purely statistical approaches, the proposed approach is more robust to the irrelevant information and could more accurately provide the answer. The major contributions of our work are: (1) proposing a tag-based logic representation such that the system is less sensitive to the irrelevant information and could provide answer more precisely; (2) proposing a unified statistical framework for performing reasoning from the given text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Since Big Data mainly aims to explore the correlation between surface features but not their underlying causality relationship, the Big Mechanism 1 program was initiated by DARPA 2 Yi-Chung Lin et al. (from July 2014) to find out \"why\" behind the \"Big Data\". However, the pre-requisite for it is that the machine can read each document and learn its associated knowledge, which is the task of Machine Reading (MR) (Strassel et al., 2010) . Therefore, the Natural Language and Knowledge Processing Group, under the Institute of Information Science of Academia Sinica, formally launched a 3-year MR project (from January 2015) to attack this problem.", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 200, |
|
"text": "Lin et al.", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 437, |
|
"text": "(Strassel et al., 2010)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "As a domain-independent MR system is complicated and difficult to build, the math word problem (MWP) (Mukherjee & Garain, 2008) is chosen as the first task to study MR for the following reasons: (1) Since the answer for the MWP cannot be extracted by simply performing keyword matching (as Q&A usually does), MWP thus can act as a test-bed for understanding the text and then drawing the answer via inference. (2) MWP usually possesses less complicated syntax and requires less amount of domain knowledge. It can let the researcher focus on the task of understanding and reasoning, not on how to build a wide-coverage grammar and acquire domain knowledge. (3) The body part of MWP (which mentions the given information for solving the problem) usually consists of only a few sentences. Therefore, the understanding and reasoning procedure could be checked more efficiently. (4) The MWP solver could have its own standalone applications, such as computer tutor, etc. It is not just a toy test case.", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 127, |
|
"text": "(Mukherjee & Garain, 2008)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "According to the framework of making the decision while there are several candidates, previous MWP algebra solvers can be classified into: (1) Rule-based approaches with logic inference (Bobrow, 1964; Slagle, 1965; Charniak, 1968 Charniak, , 1969 Dellarosa, 1986; Bakman, 2007) , which apply rules to get the answer (via identifying entities, quantities, operations, etc.) with a logic inference engine. (2) Rule-based approaches without logic inference (Gelb, 1971; Ballard & Biermann, 1979; Biermann & Ballard, 1980; Biermann et al., 1982; Fletcher, 1985; Hosseini et al., 2014) , which apply rules to get the answer without a logic inference engine. (3) Purely statistics-based approaches (Kushman et al., 2014; Roy et al., 2015) , which use statistical models to identify entities, quantities, operations, and get the answer without conducting language analysis or inference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 200, |
|
"text": "(Bobrow, 1964;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 214, |
|
"text": "Slagle, 1965;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 229, |
|
"text": "Charniak, 1968", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 246, |
|
"text": "Charniak, , 1969", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 263, |
|
"text": "Dellarosa, 1986;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 277, |
|
"text": "Bakman, 2007)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 454, |
|
"end": 466, |
|
"text": "(Gelb, 1971;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 492, |
|
"text": "Ballard & Biermann, 1979;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 518, |
|
"text": "Biermann & Ballard, 1980;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 541, |
|
"text": "Biermann et al., 1982;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 557, |
|
"text": "Fletcher, 1985;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 580, |
|
"text": "Hosseini et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 692, |
|
"end": 714, |
|
"text": "(Kushman et al., 2014;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 732, |
|
"text": "Roy et al., 2015)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The main problem of the rule-based approaches mentioned above is that the coverage rate problem is serious, as rules with wide coverage are difficult and expensive to construct. Also, it is awkward in resolving ambiguity problems. Besides, since they adopt Go/No-Go approach (unlike statistical approaches which can adopt a large Top-N to have high including rates), the error accumulation problem would be severe. On the other hand, the main problem of those approaches not adopting logic inference is that they usually need to implement a new handling procedure for each new type of problems (as the general logic inference mechanism is not adopted). Also, as there is no inference engine to generate the reasoning chain, additional effort would be required for generating the explanation. In contrast, the main problem of those purely statistical approaches is that they are sensitive to irrelevant Designing a with Reasoning and Explanation information (Hosseini et al., 2014) (as the problem is solved without first understanding the text). Also, the performance deteriorates significantly when they encounter complicated problems due to the same reason.", |
|
"cite_spans": [ |
|
{ |
|
"start": 957, |
|
"end": 980, |
|
"text": "(Hosseini et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "To avoid the problems mentioned above, a tag-based statistical framework which is able to perform understanding and reasoning is proposed in this paper. For each body statement (which specifies the given information), the text will be first analyzed into its corresponding semantic tree (with its anaphora/ellipses resolved and semantic roles labeled), and then converted into its associated logic form (via a few mapping rules). The obtained logic form is then mapped into its corresponding domain dependent generic concepts (also expressed in logic form). The same process also goes for the question text (which specifies the desired answer). Finally, the inference (based on the question logic form) is performed on the logic statements derived from the body text. Please note that a statistical model will be applied each time when we have choices.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Furthermore, to reply any kind of questions associated with the given information, we keep all related semantic roles (such as agent, patient, etc.) and associated specifiers (which restrict the given quantity, and is freely exchangeable with the term tag) in the logic form (such as verb(q1,\u9032\u8ca8), agent(q1,\u6587\u5177\u5e97), head(n1 p ,\u7b46), color(n1 p ,\u7d05), etc.), which are regarded as various tags (or conditions) for selecting the appropriate information related to the given question. Therefore, the proposed approach can be regarded as a tag-based statistical approach with logic inference. Since extra-linguistic knowledge would be required for bridging the gap between the linguistic semantic form and the desired logic form, we will extract the desired background knowledge (ontology) from E-HowNet (Chen et al., 2005) for verb-entailment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 792, |
|
"end": 811, |
|
"text": "(Chen et al., 2005)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In comparison with those rule-based approaches, the proposed approach alleviates the ambiguity resolution problem (i.e., selecting the appropriate semantic tree, anaphora/co-reference, domain-dependent concepts, inference rules) via a statistical framework. Furthermore, our tag-based approach provides the flexibility of handling various kinds of possible questions with the same body logic form. On the other hand, in comparison with those purely statistical approaches, the proposed approach is more robust to the irrelevant information (Hosseini et al., 2014) and could provide the answer more precisely (as the semantic analysis and the tag-based logic inference are adopted). In addition, with the given reasoning chain, the explanation could be more easily generated. Last, since logic inference is a general problem solving mechanism, the proposed approach can solve various types of problems that the inference engine could handle (i.e., not only arithmetic or algebra as most approaches aim to).", |
|
"cite_spans": [ |
|
{ |
|
"start": 540, |
|
"end": 563, |
|
"text": "(Hosseini et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The contributions of our work are: (1) Proposing a semantic composition form for abstracting the text meaning to perform semantic reasoning; (2) Proposing verb entailment via E-HowNet for bridging the lexical gap (Moldovan & Rus, 2001 ); (3) Proposing a tag-based logic representation to adopt one body logic form for handling various possible questions; (4) Proposing a set of domain dependent (for math algebra) generic concepts for solving MWP; (5) Proposing a statistical solution type classifier to indicate the way for solving MWP; (6) Proposing a semantic matching method for performing unification; (7) Proposing a statistical framework for performing reasoning from the given text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 234, |
|
"text": "(Moldovan & Rus, 2001", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Since we will have various design options in implementing a math word problem solver, we need some guidelines to judge which option is better when there is a choice. Some principles are thus proposed as follows for this purpose:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Design Principles", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "(1) Solutions should be given via understanding and inference (versus the template matching approach proposed in (Kushman et al., 2014) , as the math word problem is just the first case for our text understanding project and we should be able to explain how the answer is obtained.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 135, |
|
"text": "(Kushman et al., 2014)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Design Principles", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "(2) The expressiveness of the adopted body logical form should be powerful enough for handling various kinds of possible questions related to the body, which implies that logic form transformation should be information lossless. In other words, all the information carried by the semantic representation should be kept in the corresponding logical form. It also implies that the associated body logical form should be independent on the given question (as we don't know which question will be asked later).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Design Principles", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "(3) The dynamically constructed knowledge should not favor any specific kind of problem/question. This principle suggests that the Inference Engine (IE) should regard logic statements as a flat list, instead of adopting a pre-specified hierarchical structure (e.g., the container adopted in (Hosseini et al., 2014) , which is tailored to some kinds of problems/questions). Any desired information will be located from the list via the same mechanism according to the specified conditions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 291, |
|
"end": 314, |
|
"text": "(Hosseini et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Design Principles", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "(4) The Logic Form Converter (LFC) should be compositional (Moldovan & Rus, 2001 ) after giving co-reference and solution type 2 , which implies that each sub-tree (or nonterminal node) should be independently transformed regardless of other nodes not under it, and the logic form of a given nonterminal node is formed by concatenating the corresponding logic forms of its associated child-nodes. Order Logic (FOL) predicate like \"\u88dd\u6210(100,\u9846,\u7cd6,5,\u76d2,\u7cd6)\", the problem-independent FOL functions/predicates like \"quan(q1, \u9846 , \u7cd6 ) = 100\", \"quan(q2, \u76d2 , \u7cd6 ) = 5\", \"qmap(m1,q1,q2)\", and \"verb(m1,\u88dd\u6210)\" are adopted to represent the facts provided by problem description 3 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 80, |
|
"text": "(Moldovan & Rus, 2001", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Design Principles", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "(6) The LFC should know the global skeleton of the whole given text (which is implicitly implied by the associated semantic segments linked via the given co-reference information) to achieve a reasonable balance between it and the IE.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Design Principles", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "(7) The IE should separate the knowledge from the reasoning procedures to ease porting, which denotes that those domain dependent concepts and inference rules should be kept in a declarative form (and could be imported from some separated files); and the inference rules should not be a part of the IE's source code. The block diagram of the proposed MWP solver is shown in Figure 1 . First, every sentence in the MWP, including both body text and the question text, is analyzed by the Language Analysis module, which transforms each sentence into its corresponding Semantic Representation (SR) tree. The sequence of SR trees is then sent to the Problem Resolution module, which adopts logic inference approach to obtain the answer for each question. Finally,", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 374, |
|
"end": 382, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Design Principles", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Yi-Chung Lin et al. the Explanation Generation module will explain how the answer is obtained (in natural language text) according to the given reasoning chain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 19, |
|
"text": "Lin et al.", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "6", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As the figure depicted, the Problem Resolution module in our system consists of three components: Solution Type Classifier (STC), LFC and IE. The STC suggests a scenario to solve the problem for every question in an MWP. In order to perform logic inference, the LFC first extracts the related facts from the given SR tree and then represents them as FOL predicates/functions (Russell & Norvig, 2009) . It also transforms each question into an FOL-like utility function according to the assigned solution type. Finally, according to inference rules, the IE derives new facts from the old ones provided by the LFC. Besides, it is also responsible for providing utilities to perform math operations on related facts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 375, |
|
"end": 399, |
|
"text": "(Russell & Norvig, 2009)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "6", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The entities (like noun phrases) or events (like verb phrases) described in the given sentence may be associated with modifiers, which usually restrict the scope (or specify the property) of the entities/events that they are associated. Since the system does not know which kind of questions will be asked when it reads the body sentences, modifiers should be also included in logic expressions (act as specifiers) and involved in binding. Therefore, the reification technique (Jurafsky & Martin, 2000) is employed to map the nonterminals in the given semantic tree, including verb phrases and noun phrases, into quantified objects which can be related to other objects via specified relations. For example, the logic form of the noun phrase \"\u7d05\u7b46(red pens)\" would be \"color(n1,\u7d05)&head(n1,\u7b46)\", where \"n1\" is an identified object referring to the noun phrase. Usually, the specifiers in the Body Logic Form (BLF) are optional in Question Logic Form (QLF), as the body might contain irrelevant text. On the contrary, the specifiers in the QLF are NOT optional (at least in principle) in BLF (i.e., the same (or corresponding) specifier must exist in BLF). This restriction is important as we want to make sure that each argument (which will act as a filtering-condition) in the QLF will be exactly matched to keep irrelevant facts away during the inference procedure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 477, |
|
"end": 502, |
|
"text": "(Jurafsky & Martin, 2000)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "6", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Take the MWP \"\u6587\u5177\u5e97\u9032\u8ca8 2361 \u679d\u7d05\u7b46\u548c 1587 \u679d\u85cd\u7b46(A stationer bought 2361 red pens and 1587 blue pens), \u6587\u5177\u5e97\u5171\u9032\u8ca8\u5e7e\u679d\u7b46(How many pens did the stationer buy)?\" as an example. The STC will assign the operation type \"Sum\" to it. The LFC will extract the following facts from the first sentence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "6", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "quan(q1,\u679d,n1 p )=2361&verb(q1,\u9032\u8ca8)&agent(q1,\u6587\u5177\u5e97)&head(n1 p ,\u7b46)&color(n1 p ,\u7d05) quan(q2,\u679d,n2 p )=1587&verb(q2,\u9032\u8ca8)&agent(q2,\u6587\u5177\u5e97)&head(n2 p ,\u7b46)&color(n2 p ,\u85cd)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "6", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Designing a Tag-Based Statistical Math Word Problem Solver", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "6", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The quantity-fact \"2361 \u679d\u7d05\u7b46(2361 red pens)\" is represented by \"quan(q1,\u679d,n1 p )=2361\", where the argument \"n1 p \" 4 denotes \"\u7d05\u7b46(red pens)\" due to the facts \"head(n1 p ,\u7b46)\" and \"color(n1 p , \u7d05 )\". Also, those specifiers \"verb(q1, \u9032 \u8ca8 )&agent(q1, \u6587 \u5177 \u5e97 )&head(n1 p , \u7b46)&color(n1 p ,\u7d05)\" are regarded as various tags which will act as different conditions for selecting the appropriate information related to the question specified later. Likewise, the quantity-fact \"1587 \u679d\u85cd\u7b46(1587 blue pens)\" is represented by \"quan(q2,\u679d,n2 p )=1587\". The LFC also issues the utility call \"ASK Sum(quan(?q,\u679d,\u7b46),verb(?q,\u9032\u8ca8)&agent(?q,\u6587\u5177\u5e97))\" (based on the assigned solution type) for the question. Finally, the IE will select out two quantity-facts \"quan(q1, \u679d ,n1 p )=2361\" and \"quan(q2, \u679d ,n2 p )=1587\", and then perform \"Sum\" operation on them to obtain \"3948\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "with Reasoning and Explanation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "If the question in the above example is \"\u6587\u5177\u5e97\u5171\u9032\u8ca8\u5e7e\u679d\u7d05\u7b46(How many red pens did the stationer buy)?\", the LFC will generate the following facts and utility call for this new question:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "with Reasoning and Explanation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "head(n3 p ,\u7b46)&color(n3 p ,\u7d05) ASK Sum(quan(?q,\u679d,n3 p ),verb(?q,\u9032\u8ca8)&agent(?q,\u6587\u5177\u5e97))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "with Reasoning and Explanation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "As the result, the IE will only select the quantity-fact \"quan(q1,\u679d,n1 p )=2361\", because the specifier in QLF (i.e., \"color(n3 p ,\u7d05)\") cannot match the associated specifier \"\u85cd(blue)\" (i.e., \"color(n2 p ,\u85cd)\") of \"quan(q2,\u679d,n2 p )=1587\". After performing \"Sum\" operation on it, we thus obtain the answer \"2361\". Each module will be described in detail as follows (We will skip Explanation Generation due to space limitation. Please refer to for the details).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "with Reasoning and Explanation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Since the Chinese sentence is a string of characters with no delimiters to mark word boundaries, the first step for analyzing the MWP text is to segment each given sentence string into its corresponding word sequence. Our Chinese word segmentation system (Chen & Ma, 2002; Ma & Chen, 2003) adopts a modularized approach. Independent modules were designed to solve the problems of segmentation ambiguities and identifying unknown words. Segmentation ambiguities are resolved by a hybrid method of using heuristic and statistical rules. Regular-type unknown words are identified by associated regular expressions, and irregular types of unknown words are detected first by their occurrence and then extracted by morphological rules with statistical and morphological constraints. Part-of-Speech tagging is also included in the segmentation system for both known and unknown words by using HMM models and morphological rules. Please refer to (Tseng & Chen, 2002; Tsai & Chen, 2004) for the details.", |
|
"cite_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 272, |
|
"text": "(Chen & Ma, 2002;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 289, |
|
"text": "Ma & Chen, 2003)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 939, |
|
"end": 959, |
|
"text": "(Tseng & Chen, 2002;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 960, |
|
"end": 978, |
|
"text": "Tsai & Chen, 2004)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Analysis (Jurafsky & Martin, 2000)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In order to design a high precision and broad coverage Chinese parser, we had constructed a Chinese grammar via generalizing and specializing the grammar extracted from Sinica Treebank (Hsieh et al., 2013; Hsieh et al., 2014) to achieve this goal. The designed F-PCFG (Feature-embedded Probabilistic Context-free Grammar) parser was based on the probabilities of the grammar rules. It evaluates the plausibility of each syntactic structure to resolve parsing ambiguities. We refine the probability estimation of a syntactic tree (for tree-structure disambiguation) by incorporating word-to-word association strengths. The word-to-word association strengths were self-learned from parsing the CKIP corpus (Hsieh et al., 2007) . A semantic-role assignment capability is also incorporated into the system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 205, |
|
"text": "(Hsieh et al., 2013;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 225, |
|
"text": "Hsieh et al., 2014)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 704, |
|
"end": 724, |
|
"text": "(Hsieh et al., 2007)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Analysis (Jurafsky & Martin, 2000)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Once the syntactic structure (with semantic roles) for a sentence is obtained, its semantic representation can be further derived through a process of semantic composition (from lexical senses) and achieved near-canonical representations. To represent lexical senses, we had implemented a universal concept-representation mechanism, called E-HowNet (Chen et al., 2005; Huang et al., 2014) . It is a frame-based entity-relation model where word senses are expressed by both primitives (or well-defined senses) and their semantic relations. We utilize E-HowNet to disambiguate word senses by referencing its ontology and the related synsets of the target words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 349, |
|
"end": 368, |
|
"text": "(Chen et al., 2005;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 388, |
|
"text": "Huang et al., 2014)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Composition", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "To solve math word problems, it is crucial to know who or what entity is being talked about in the descriptions of problems. This task is called reference resolution, and it can be classified into two types -anaphora resolution and co-reference resolution. Anaphora resolution is the task of finding the antecedent for a single pronoun while co-reference is the task of finding referring expressions (within the problem description) that refer to the same entity. We attack these two types of resolution mainly based on assessing whether a target pronoun/entity coincides its referent candidate in E-HowNet definition. For example, the definition of \"\u5979 (she)\" is \"{3rdPerson|\u4ed6\u4eba:gender={female|\u5973 }}\". Therefore, it would restrict that the valid referent candidates must be a female human, and result in a much fewer number of candidates for further consideration.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Composition", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "In the following example, the semantic composition, anaphora resolution and co-reference resolution are shown in the 1 We use numbers following words to represent words'positions in a sentence. For instance, \"\u6709(2)\" is the second word in the first sentence. The semantic representation uses a near-canonical representation form, where semantic role labels, such as \"agent\", \"theme\" and \"range\", are marked on each word, and each word is identified with its sense, such as \"\u6709(2): {own|\u6709}\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Composition", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "The co-referents of all sentences in a math problem are marked with the same \"x[#]\". For example, we mark the proper noun \"\u5c0f\u8c6a(1)\" with \"[x1]\" to co-refer with the pronoun \"\u4ed6(4)\" and the second occurrence of the proper noun \"\u5c0f\u8c6a(1)\". In the second sentence of the example, the head of the quantifier \"\uff15\uff16\u5f35\" is omitted in the text but it is recovered in the semantic representation and annotated with a decimal point in its word position. The missing head is recovered as \"\u8cbc\u7d19(5.1)\", which is an extra word with its constructed position based on decimal point.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Composition", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "However, even we know what the given math word problem means, we still might not know how to solve it if we have not been taught for solving the same type of problems in a math class before (i.e., without enough math training/background). Therefore, we need to collect various types of math operations (e.g., addition, subtraction, multiplication, division, sum, etc.), aggregative operations (e.g., Comparison, Set-Operation, etc.) and specific problem types (e.g., Algebra, G.C.D., L.C.M., etc.) that have been taught in the math class. And the LFC needs to know which math operation, aggregative operation or specific problem type should be adopted to solve the given problem. Therefore, we need to map the given semantic representation to a specific problem type. However, this mapping is frequently decided based on the global information across various input sentences (even across body text and question text). Without giving the corresponding mathematic utility/operation, the logic form transformation would be very complicated. A Solution Type Classifier (STC) is thus proposed to decide the desired utility/operation that LFC should adopt (i.e., to perform the mapping).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Solution Type Identification", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Currently, 16 different solution types are specified (in Table 1 ; most of them are self-explained with their names) to cover a wide variety of questions found in our elementary math word corpus. They are listed according to their frequencies found in 75 manually labeled questions. The STC is similar to the Question Type Classifier commonly adopted at Q&A (Loni, 2011) . For mathematic operation type, it will judge which top-level math operation is expected (based on the equation used to get the final answer). For example, if the associated equation is \"Answer = q1 -(q2 \u00d7 q3)\", then \"Subtraction\" will be the assigned math operation type, which matches human reasoning closely.", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 370, |
|
"text": "(Loni, 2011)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 64, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Solution Type Identification", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "with frequency in the training set (75 questions in total).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Table 1. Various solution types for solving elementary school math word problems", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Multiply (24%) Utility (6%) Surplus (4%) L.C.M (2%) Sum (14%) Algebra (5%) Difference (4%) G.C.D (2%) Subtraction (12%) Comparison (5%) Ceil-Division (3%) Addition (1%) Floor-Division (7%) Ratio (5%) Common-Division (3%) Set-Operation (1%)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Table 1. Various solution types for solving elementary school math word problems", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Take the following math word problem as an example, \"\u4e00\u8258\u8f2a\u8239 20 \u5206\u9418\u53ef\u4ee5\u884c\u99db 25 \u516c\u91cc(A boat sails 25 kilometers in 20 minutes)\uff0c 2.5 \u5c0f\u6642\u53ef\u4ee5\u884c\u99db\u591a\u5c11\u516c\u91cc(How far can it sail in 2.5 hours)\uff1f\". Its associated equation is \"Answer = 150 \u00d7 (25\u00f720)\". Therefore, the top-level operation is \"Multiplication\", and it will be the assigned solution type for this example. However, for the problem \"\u67d0\u6578\u4e58\u4ee5 11(Multiply a number with 11)\uff0c \u518d\u9664\u4ee5 4 \u7684 \u7b54\u6848\u662f 22(then divide it by 4. The answer is 22)\uff0c \u67d0\u6578\u662f\u591a\u5c11(What is the number)\uff1f\", its associated equation is \"Answer\u00d711\u00f74 = 22\"; since there is no specific natural top-level operation, the \"Algebra\" solution type will be assigned 5 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Table 1. Various solution types for solving elementary school math word problems", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The STC will check the SR trees from both the body and the question to make the decision. Therefore, it provides a kind of global decision, and the LFC will perform logic transformation based on it (i.e., the statistical model of the LFC is formulated to condition on the solution type). Currently, a SVM classifier with linear kernel functions (Chang & Lin, 2011) is used, and it adopted four different kinds of feature-sets: (1) all word unigrams in the text, (2) head word of each nonterminal (inspired by the analogous feature adopted in (Huang et al., 2008) for question classification), (3) E-HowNet semantic features, and (4) pattern-matching indicators (currently, patterns/rules are manually created).", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 364, |
|
"text": "(Chang & Lin, 2011)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 562, |
|
"text": "(Huang et al., 2008)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Table 1. Various solution types for solving elementary school math word problems", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A two-stage approach is adopted to transform the SR tree of an input sentence to its corresponding logic forms. In the first stage, the syntactic/semantic relations between the words are deterministically transformed into their domain-independent logic forms. Afterwards, crucial generic math facts and the possible math operations are non-deterministically generated (as domain-dependent logic forms) in the second stage. Basically, logic forms are expressed with the first-order logic (FOL) formalism (Russell & Norvig, 2009) In the first stage, FOL predicates are generated by traversing the input SR tree which mainly depicts the syntactic/semantic relations between its words (with associated word-senses). For example, the SR tree of the sentence \"100 \u9846\u7cd6\u88dd\u6210 5 \u76d2(If 100 candies are packed into 5 boxes)\" is shown as follows: {\u88dd\u6210(t1); theme={\u7cd6(t2); quantity=100(t3); unit=\u9846(t4)}; result={\u7cd6(t5); quantity=5(t6); unit=\u76d2(t7)} } Where \"theme\" and \"result\" are semantic roles, and information within brace are their associated attributes. Also, the symbols within parentheses are the identities of the terminals in the SR tree. Note that the terminal t5 is created via zero anaphora resolution in the language analysis phase. The above FOL predicates are also called logic-form-1 (LF1) facts. The predicate names of LF1 facts are just the domain-independent syntactic/semantic roles of the constituents in a sub-tree. Therefore, the LF1 facts are also domain-independent.", |
|
"cite_spans": [ |
|
{ |
|
"start": 503, |
|
"end": 527, |
|
"text": "(Russell & Norvig, 2009)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logic Form Transformation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The domain-dependent logic-form-2 (LF2) facts are generated in the second stage. The LF2 facts are derived from some crucial generic math facts associated with quantities and relations between quantities. The FOL function \"quan(quan_id, unit_id, object_id) = number\" is used to describe the facts about quantities. The first argument is a unique identity to represent this quantity-fact. The other arguments and the function value describe the meaning of this fact. For example, \"qaun(q1,\u9846,\u7cd6) = 100\" means \"100 \u9846\u7cd6(100 candies)\" and \"qaun(q2, \u76d2 , \u7cd6 ) = 5\" means \"5 \u76d2 \u7cd6 (five boxes of candies)\". The FOL predicate \"qmap(map_id, quan_id 1 , quan_id 2 )\" (denotes the mapping from quan_id 1 to quan_id 2 ) is used to describe a relation between two quantity-facts, where the first argument is a unique identity to represent this relation. For example, \"qmap(m1, q1, q2)\" indicates that there is a relation between \"100 \u9846\u7cd6\" and \"5 \u76d2\u7cd6\". Now, LF2 facts are transformed by rules with a predefined set of lexico-semantic patterns as conditions. When more cases are exploited, a nondeterministic approach would be required.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logic Form Transformation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In additional to domain-dependent facts like \"quan(\u2026)\" and \"qmap(\u2026)\", some auxiliary domain-independent facts associated with quan_id and map_id are also created in this stage to help the IE find the solution. The auxiliary facts of the quan_id are created by 4 steps: First, locate the nonterminal (said n q ) which quan_id is coming from. Second, traverse upward from n q to find the nearest nonterminal (said n v ) which directly connects to a verb terminal. Third, duplicate all LF1 facts whose first arguments are n v , except the one whose second argument is n q . Finally, replace the first arguments of the duplicated facts with quan_id. In the above Designing a Tag-Based Statistical Math Word Problem Solver 13 with Reasoning and Explanation example, for the quantity-fact q1, n q is n1 and n v is v1 in the first and second steps. \"verb(v1, \u88dd\u6210)\" and \"result(v1, n2)\" will be copied at the third step. Finally, \"verb(q1, \u88dd\u6210)\" and \"result(q1, n2)\" are created. Likewise, \"verb(q2, \u88dd\u6210)\" and \"theme(q2, n1)\" are created for q2. The auxiliary facts of \"qmap(map_id, quan_id 1 , quan_id 2 )\" are created by copying all facts of the forms \"verb(quan_id 1 , *)\" and \"verb(quan_id 2 , *)\" (where \"*\" is a wildcard), and then replace all the first arguments of the copied facts with map_id. So, \"verb(m1, \u88dd\u6210)\" is created for m1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logic Form Transformation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Sometimes, the third argument of a quantity-fact (i.e., object_id) is a pseudo nonterminal identity created in the second stage. For example, the LF1 facts of the phrase \"2361 \u679d\u7d05\u7b46 (2361 red pens)\" are \"quantity(n1, 2361)\", \"unit(n1, \u679d)\", \"color(n1, \u7d05)\" and \"head(n1, \u7b46)\", where n1 is the nonterminal identity of the phrase. A pseudo nonterminal identity, said n1 p , is created to carry the terminals \"\u7d05(red)\" and \"\u7b46(pen)\" so that the quantity-fact \"2361 \u679d\u7d05\u7b46(2361 red pens)\" can be expressed as \"quan(q1, \u679d, n1 p ) = 2361\". The subscript \"p\" in n1 p indicates that n1 p is a pseudo nonterminal derived from the n1. To express that fact that n1 p carries the terminals \"\u7d05(red)\" and \"\u7b46(pen)\", two auxiliary facts \"color(n1 p , \u7d05)\" and \"head(n1 p , \u7b46)\" are also generated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logic Form Transformation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The questions in an MWP are transformed into FOL-like utility functions provided by the IE. One utility function is issued for each question to find the answer. For example, the question \"\u6587\u5177\u5e97\u5171\u9032\u8ca8\u5e7e\u679d\u7b46(How many pens did the stationer buy)\" is converted into \"ASK Sum(quan(?q,\u679d,\u7b46), verb(?q,\u9032\u8ca8)&agent(?q,\u6587\u5177\u5e97))\". This conversion is completed by two steps. First, select an IE utility (e.g., \"Sum(\u2026)\") to be called. Since the solution type of the question is \"Sum\", the IE utility \"Sum(function, condition) = value\" is selected. Second, instantiate the arguments of the selected IE utility. In this case, the first argument function is set to \"quan(?q, \u679d, \u7b46)\" because an unknown quantity fact is detected in the phrase \"\u5e7e\u679d\u7b46 (how many pens)\". Let the FOL variable \"?q\" play the role of quan_id in the steps of finding the auxiliary facts. The auxiliary facts \"verb(?q, \u9032\u8ca8)\" and \"agent(?q, \u6587\u5177\u5e97)\" are obtained to compose the second argument condition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logic Form Transformation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To sum up, the LFC transforms the semantic representation obtained by language analysis into domain dependent FOL expressions on which inference can be performed. In contrast, most researches of semantic parsing (Jurcicek et al., 2009; Das et al., 2014; Berant et al., 2013; Allen, 2014) seek to directly map the input text into the corresponding logic form. Therefore, across sentences deep analysis of the input text (e.g., anaphora and co-reference resolution) cannot be handled. The proposed two-stage approach (i.e., language analysis and then logic form transformation) thus provides the freedom to enhance the system capability for handling complicated problems which require deep semantic analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 212, |
|
"end": 235, |
|
"text": "(Jurcicek et al., 2009;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 236, |
|
"end": 253, |
|
"text": "Das et al., 2014;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 274, |
|
"text": "Berant et al., 2013;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 287, |
|
"text": "Allen, 2014)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logic Form Transformation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In our design, an IE is used to find the solution for an MWP. It is responsible for providing utilities to select desired facts and then obtaining the answer by taking math operations on those selected facts. In addition, it is also responsible for using inference rules to derive new facts from the facts directly provided from the description of the MWP. Facts and inference rules are represented in first-order logic (FOL) (Russell & Norvig, 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 426, |
|
"end": 450, |
|
"text": "(Russell & Norvig, 2009)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "In some simple cases, the desired answer can be calculated from the facts directly derived from the MWP. For those cases, the IE only needs to provide a utility function to calculate the answer. In the example of Figure 2 , quantities 300, 600, 186 and 234 are mentioned in the MWP. The LFC transforms the question into \"ASK Sum(quan(?q,\u6735,\u767e\u5408), verb(?q,\u8ce3\u51fa)&agent(?q,\u82b1\u5e97)\" to ask the IE to find the answer, where \"Sum(\u2026)\" is a utility function provided by the IE. The first argument of \"Sum(\u2026)\" is an FOL function to indicate which facts should be selected. In this case, the unification procedure of the IE will successfully unify the first argument \"quan(?q, \u6735, \u767e\u5408)\" with three facts \"quan(q2, \u6735, \u767e \u5408)\", \"quan(q3, \u6735, \u767e\u5408)\" and \"quan(q4, \u6735, \u767e\u5408)\". When unifying \"quan(?q, \u6735, \u767e\u5408)\" with \"quan(q2, \u6735, \u767e\u5408)\", the FOL variable \"?q\" will be bound/substituted with q2. The second argument of \"Sum(\u2026)\" (i.e., \"verb(?q,\u8ce3\u51fa)&agent(?q,\u82b1\u5e97)\") is the condition to be satisfied. Since \"quan(q2, \u6735, \u767e\u5408)\" is rejected by the given condition, \"Sum(\u2026)\" will sum the values of the remaining facts (i.e., q3 and q4) to obtain the desired answer \"420\". Table 2 lists the utilities provided by the IE. The first one, as we have just described, returns the sum of the values of FOL function instances which can be unified with the function argument and satisfy the condition argument. The Addition utility simply returns the value of \"value 1 +value 2 \", where value i is either a constant number, or an FOL function value, or a value returned by a utility. Likewise, Subtraction and Multiplication utilities return Designing a with Reasoning and Explanation \"value 1 \u2212value 2 \" and \"value 1 \u00d7value 2 \" respectively. Difference returns the absolute value of Subtraction. CommonDiv returns the value of \"value 1 \u00f7value 2 \". FloorDiv returns the largest integer value not greater than \"value 1 \u00f7value 2 \" and CeilDiv returns the smallest integer value not less than \"value 1 \u00f7value 2 \". Surplus returns the remainder after division of value 1 by value 2. Figure 3 , the MWP provides the facts that \"\u7238\u7238(Papa)\" bought something but it does not provide any facts associated to the money that \"\u7238\u7238(Papa)\" must pay. As a result, we are not able to obtain the answer from the question logic form \"Sum(quan(?q,\u5143,#), verb(?q,\u4ed8)&agent(?q,\u7238\u7238))\". However, it is common sense that people must pay some money to buy something. The following inference rule implements this common-sense implication.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 221, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 1124, |
|
"end": 1131, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 2022, |
|
"end": 2030, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "quan(?q,?u,?o)&verb(?q,\u8cb7)&agent(?q,?a)&price (?o,?p) \u2192quan($q,\u5143,#)=quan(?q,?u,?o)\u00d7?p&verb($q,\u4ed8)&agent($q,?a)", |
|
"cite_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 52, |
|
"text": "(?o,?p)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "In the above implication inference rule, \"quan(?q,?u,?o)&\u2026&price(?o,?p)\" is the premise of the rule and \"quan($q,\u5143,#)=\u2026&agent($q,?a)\" is the consequence of the rule. Please note that \"$q\" indicates a unique ID generated by the IE.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "\u7238\u7238\u8cb7\u4e86 3 \u672c 329 \u5143\u7684\u6545\u4e8b\u66f8\u548c 2 \u679d 465 \u5143\u7684\u92fc\u7b46(Papa bought three $329 books and two $465 pens)\uff0c\u7238\u7238\u5171\u8981\u4ed8\u5e7e\u5143(How much money did Papa pay)? quan(q1,\u672c,n1 p )=3&verb(q1,\u8cb7)&agent(q1,\u7238\u7238)&head(n1 p ,\u6545\u4e8b\u66f8)&price(n1 p ,329) quan(q2,\u679d,n2 p )=2&verb(q2,\u8cb7)&agent(q2,\u7238\u7238)&head(n2 p ,\u92fc\u7b46)&price(n2 p ,465) ASK Sum(quan(?q,\u5143,#),verb(?q,\u4ed8)&agent(?q,\u7238\u7238)) After unifying this inference rule with the facts in Figure 3 , we can get two possible bindings (for q1 and q2, respectively). The following shows the binding of q1.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 369, |
|
"end": 377, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "quan(q1,\u672c,n1)&verb(q1,\u8cb7)&agent(q1,\u7238\u7238)&price(n1,329) \u2192quan(q3,\u5143,#)=quan(q1,\u672c,n1)\u00d7329&verb(q3,\u4ed8)&agent(q3,\u7238\u7238)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "Since \"quan(q1,\u672c,n1)\u00d7329 = 3\u00d7329 = 987\", the consequence of the above inference will generate three new facts \"quan(q3, \u5143, #) = 987\", \"verb(q3, \u4ed8)\" and \"agent(q3, \u7238\u7238)\". The semantics of the consequence is \"\u7238\u7238\u4ed8 987 \u5143(Papa pays 987 dollars)\". Likewise, the consequence of another binding of this inference rule will also generate three new facts \"quan(q4, \u5143, #) = 930\", \"verb(q4, \u4ed8)\" and \"agent(q4, \u7238\u7238)\". By taking these new facts into account, the utility call \"Sum(quan(?q,\u5143,#), verb(?q,\u4ed8)&agent(?q,\u7238\u7238))\" can thus return the correct answer \"1917\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "Furthermore, the unification process in a conventional IE is based on string-matching. The expression \"qaun(?q, \u679d, \u7b46)\" can be unified with a fact \"quan(q1, \u679d, \u7b46)\". However, it cannot be unified with the fact \"quan(q2, \u6735, \u82b1)\". String-matching guarantees that the IE will not operate on undesired quantities. But, it sometimes prevents the IE from operating on desired quantities. For instance, in Figure 4 , two quantity-facts \"quan(q1,\u679d,n1 p ) = 2361\" and \"quan(q2,\u679d,n2 p ) = 1587\" are converted from \"2361 \u679d\u7d05\u7b46(2361 red pens)\" and \"1587 \u679d\u85cd \u7b46(1587 blue pens)\", respectively. The first argument of \"Sum(\u2026)\" is \"quan(?q, \u679d, \u7b46)\" because \"\u5e7e\u679d\u7b46(how many pens)\" is concerned in the question. The conventional unification is not able to unify \"quan(?q, \u679d, \u7b46)\" to either \"quan(q1, \u679d, n1 p )\" or \"quan(q2, \u679d, n2 p )\" due to different strings of the third arguments. However, from the semantic point of view, \"quan(?q, \u679d, \u7b46)\" should be unified with both \"quan(q1, \u679d, n1 p )\" and \"quan(q2, \u679d, n2 p )\", because n1 p and n2 p represent \"\u7d05\u7b46(red pens)\" and \"\u85cd\u7b46(blue pens)\" respectively (and either one is a kind of \"\u7b46(pen)\").", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 404, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "\u6587\u5177\u5e97\u9032\u8ca8 2361 \u679d\u7d05\u7b46\u548c 1587 \u679d\u85cd\u7b46(A stationer bought 2361 red pens and 1587 blue pens), \u6587\u5177\u5e97\u5171\u9032\u8ca8\u5e7e\u679d\u7b46(How many pens did the stationer buy)? quan(q1,\u679d,n1 p )=2361&verb(q1,\u9032\u8ca8)&agent(q1,\u6587\u5177\u5e97)&head(n1 p ,\u7b46)&color(n1 p ,\u7d05) quan(q2,\u679d,n2 p )=1587&verb(q2,\u9032\u8ca8)&agent(q2,\u6587\u5177\u5e97)&head(n2 p ,\u7b46)&color(n2 p ,\u85cd) ASK Sum(quan(?q,\u679d,\u7b46),verb(?q,\u9032\u8ca8)&agent(?q,\u6587\u5177\u5e97))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Operation", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "Therefore, a semantic matching method is proposed to be incorporated into the unification procedure. The idea is to match the semantic constituent sets of the two arguments Designing a with Reasoning and Explanation involved in unification. For example, while matching the third arguments of two functions during unifying the request 6 \"quan(?q, \u679d, \u7b46)\" with the fact \"quan(q1, \u679d, n1 p )\", IE will construct and compare two semantic constituent sets, one is for \"\u7b46\" and the other is for \"n1 p \". Let SCS denote \"semantic constituent set\" and SCS(x) denote the semantic constituent set of x. In our approach, \"SCS(\u7b46) = {\u7b46}\" 7 and \"SCS(n1 p ) = {\u7b46, color(\u7d05)}\" 8 . Since \"SCS(\u7b46)\" is covered by the \"SCS(n1 p )\", \"quan(?q, \u679d, \u7b46)\" can be unified with \"quan(q1, \u679d, n1 p )\". Likewise, \"quan(?q, \u679d, \u7b46)\" can be unified with \"quan(q2, \u679d, n2 p )\" because \"SCS(n2 p ) = {\u7b46, color(\u85cd)}\" covers \"SCS(\u7b46)\". As the result, the utility call \"Sum(quan(?q,\u679d,\u7b46), verb(?q,\u9032\u8ca8)&agent(?q,\u6587\u5177\u5e97))\" will obtain the correct answer \"3948\". On the other hand, if the question is \"\u6587\u5177\u5e97\u5171\u9032\u8ca8\u5e7e\u679d\u7d05\u7b46(How many red pens did the stationer buy)?\", the request will become \"quan(?q, \u679d, n3 p )\", where n3 p is a pseudo nonterminal consisting of the terminals \"\u7d05(red)\" and \"\u7b46(pen)\" under the noun phrase \"\u5e7e\u679d\u7d05\u7b46(how many red pens)\". Since \"SCS(n3 p ) = {\u7b46, color(\u7d05)}\", \"quan(?q, \u679d, n3 p )\" can be unified only with \"quan(q1, \u679d, n1 p )\". It cannot be unified with \"quan(q2, \u679d, n2 p )\" because SCS(n3 p ) cannot be covered by SCS(n2 p ). Therefore, the quantity of \"\u85cd\u7b46(blue pens)\" will not be taken into account for the question \"\u6587\u5177\u5e97\u5171\u9032\u8ca8\u5e7e\u679d\u7d05\u7b46(How many red pens did the stationer buy)?\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 4. An example for requiring semantic-matching", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since we might adopt the verb \"\u8cb7(buy)\" in the body text \"\u7238\u7238\u8cb7\u4e86 3 \u672c 329 \u5143\u7684\u6545\u4e8b\u66f8 (Papa bought three $329 books)\", but adopt the verb \"\u4ed8(pay)\" in the question text \"\u7238\u7238\u5171\u8981 \u4ed8\u5e7e\u5143(How much money did Papa pay)\uff1f\" (as illustrated in the previous section), we need the knowledge that \"buy\" implies \"pay\" to perform logic binding (Moldovan & Rus, 2001 ). Verb entailment is thus required to identify whether there is an entailment relation between these two verbs (Hashimoto et al., 2009) . Verb entailment detection is an important function for the IE (de Salvo Braz et al., 2006) , as it can indicate the event progress and the status changing. In the math problem \"Bill had no money. Mom gave Bill two dollars, and Dad gave Bill three dollars. How much money Bill had then?\", the entailment between \"give (\u7d66)\" and \"have (\u6709)\" can update the status of Bill from \"no money\", then \"two dollars\", and to the final answer \"five dollars\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 313, |
|
"end": 334, |
|
"text": "(Moldovan & Rus, 2001", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 471, |
|
"text": "(Hashimoto et al., 2009)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 564, |
|
"text": "IE (de Salvo Braz et al., 2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Verb Entailment (Jurafsky & Martin, 2000)", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "We define the verb entailment problem as follows: given an ordered verb pair \"(v1, v2)\" as input, we want to detect whether the entailment relation 'v1 \u2192 v2' holds for this pair. E-HowNet (Chen et al., 2009; Huang et al., 2014) is adopted as the knowledge base for solving this problem. For the previous example verb \"give (\u7d66)\", we can find its conflation of events, which has been described as the phenomenon involved in predicates where the verb expresses a co-event or accompanying event, rather than the main event (Talmy, 1972; Haugen, 2009; Mateu, 2012) , from E-HowNet as shown in Figure 5 . The conflations of events are defined by predicates and their arguments , as shown in Figure 5 . Verb entailment is vital for solving the elementary school math problem. Consider the following math problem as a simple example:", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 207, |
|
"text": "(Chen et al., 2009;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 227, |
|
"text": "Huang et al., 2014)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 532, |
|
"text": "(Talmy, 1972;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 546, |
|
"text": "Haugen, 2009;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 559, |
|
"text": "Mateu, 2012)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 588, |
|
"end": 596, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 685, |
|
"end": 693, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Verb Entailment (Jurafsky & Martin, 2000)", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "\u8001\u5e2b\u539f\u6709 9 \u679d\u925b\u7b46,\u9001\u7d66\u5c0f\u670b\u53cb 5 \u679d\u5f8c,\u8001\u5e2b\u9084\u6709\u5e7e\u679d\u7b46\uff1f(The teacher has 9 pencils. After giving his students 5 pencils, how many pencils he has?) The verbs are \"\u6709(have)\" and \"\u9001\u7d66(give as a gift)\" in this problem. If we want to derive the concept of \"\u6709(have)\" from \"\u9001\u7d66(give as a gift)\", we can follow the direction of their definitions in E-HowNet: \"\u9001\u7d66(give as a gift)\" is a hyponym of \"\u7d66(give)\", and one of its implication from the conflation of events is \"\u5f97\u5230(obtain)\", which is a hyponym of \"\u6709 (have)\". However, for the four verbs in this derivation, implications are defined only in the verb \"\u7d66(give)\". As we can see, given all those definitions of words in E-HowNet, we need to find a valid path (which may involve word sense disambiguation) to determine whether there is an entailment between two verbs. Therefore, we need a model to automatically build the relations of these verbs by finding paths from E-HowNet or other resources, and then rank or validate these paths to find the verb entailment. The conflation of events also indicates that when the entailed verb pair is detected, we may further map semantic roles of these two verbs to Designing a with Reasoning and Explanation proceed the inference and find the solution (Wang & Zhang, 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1208, |
|
"end": 1228, |
|
"text": "(Wang & Zhang, 2009)", |
|
"ref_id": "BIBREF51" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Verb Entailment (Jurafsky & Martin, 2000)", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "Since the accuracy rate of the Top-1 SR tree cannot be 100%, and the decisions made in the following phases (i.e., STC, LFC and IE) are also uncertain, we need a statistical framework to handle those non-deterministic phenomena. Under this framework, the problem of getting the desired answer for a given WMP can be formulated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Statistical Framework", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "\uf0b7 \uf028 \uf029", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Statistical Framework", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "arg max P , body text (and question text) to the final obtained answer. If the annotated answer match some of the obtained answers (within the search-beam), simply pick up the matched path with the maximal likelihood value. We then re-estimate the parameter-set (of the current iteration) from those picked up paths. If the annotated answer cannot match any of the obtained answers (within the search-beam), we simply drop that case, and then repeat the above re-estimation procedure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Statistical Framework", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "Currently, we have completed all the associated modules (including Word Segmenter, Syntactic Parser, Semantic Composer, STC, LFC, IE, and Explanation Generation), and have manually annotated 75 samples (from our elementary school math corpus) as the seed corpus (with syntactic tree, semantic tree, logic form, and reasoning chain annotated). Besides, we have cleaned the original elementary school math corpus and encoded it into the appropriate XML format. There are total 23,493 problems from six different grades; and the average number of words of the body text is 18.2 per problem. Table 3 shows the statistics of the converted corpus. We have completed a prototype system which is able to solve 11 different solution types (including Multiplication, Summation, Subtraction, Floor-Division, Algebra, Comparison, Surplus, Difference, Ceil-Division, Common-Division and Addition) , and have tested it on the seed corpus. The success of our pilot run has demonstrated the feasibility of the proposed approach. We plan to use the next few months to perform weakly supervised learning, as mentioned above, and fine tune the system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 730, |
|
"end": 883, |
|
"text": "(including Multiplication, Summation, Subtraction, Floor-Division, Algebra, Comparison, Surplus, Difference, Ceil-Division, Common-Division and Addition)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 588, |
|
"end": 595, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Current Status and Future Work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, all those MWP solvers proposed before year 2014 adopted the rule-based approach (Mukherjee & Garain, 2008 (Bobrow, 1964; Slagle, 1965) used format matching to map the input English sentence into the corresponding logic statement (all start with predicate \"EQUAL\"). Another system, WORDPRO, was developed by Fletcher (1985) to understand and solve simple one-step addition and subtraction arithmetic word problems designed for third-grade children. It did not accept the surface representation of text as input. Instead it begins with a set of propositions (manually created) that represent the text's meaning. Afterwards, the problem was solved with a set of rules (also called schemas), which matched the given proposition and then took the corresponding actions. Besides, it adopted key word match to obtain the answer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 135, |
|
"text": "(Mukherjee & Garain, 2008", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 150, |
|
"text": "(Bobrow, 1964;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 151, |
|
"end": 164, |
|
"text": "Slagle, 1965)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 352, |
|
"text": "Fletcher (1985)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "Solving the problem with schemata was then adopted in almost every later system (Mukherjee & Garain, 2008) . In 1986, ARITHPRO was designed with an inheritance network in which word classes inherit attributes from those classes above them on a verb hierarchy (Dellarosa, 1986) . The late development of ROBUST (Bakman, 2007) demonstrated how it could solve free format word problems with multi-step arithmetic through splitting one single sentence into two formula propositions. In this way, transpositions of problem sentences or additional irrelevant data to the problem text do not affect the problem solution. However, it only handles state change scenario. In 2010, Ma et al. (Ma et al., 2010 ) proposed a MSWPAS system to simulate people's arithmetic multi-step addition and subtraction word problems behavior. It uses frame-based calculus and means-end analysis (AI planning) to solve the problem with pre-specified rules. In 2012, Liguda and Pfeiffer (Liguda & Pfeiffer, 2012) proposed a model based on augmented semantic networks to represent the mathematical structure behind word problems. It read and solved mathematical text problems from German primary school books. With more attributes associated with the semantic network, it claimed that the system was able to solve multi-step word problems and complex equation systems and was more robust to irrelevant information. Also, it was declared that it was able to solve all classes of problems that could be solved by the schema-based systems, and could solve around 20 other classes of word problems from a school book which were in most cases not solvable by other systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 106, |
|
"text": "(Mukherjee & Garain, 2008)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 276, |
|
"text": "(Dellarosa, 1986)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 324, |
|
"text": "(Bakman, 2007)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 681, |
|
"end": 697, |
|
"text": "(Ma et al., 2010", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 959, |
|
"end": 984, |
|
"text": "(Liguda & Pfeiffer, 2012)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "Recently, Hosseini et al. (2014) proposed a Container-Entity based approach, which solved the math word problem with a state transition sequence. Each state consists of a set of containers, and each container specifies a set of entities identified by a few heuristic rules. How the quantity of each entity type changes depends on the associated verb category. Each time a verb is encountered, it will be classified (via a SVM, which is the only statistical module adopted) into one of the seven categories which pre-specify how to change the states of associated entities. Therefore, logic inference is not adopted. Furthermore, the anaphora and co-reference are left un-resolved, and it only handles addition and subtraction. Kushman et al. (2014) proposed the first statistical approach, which used a few heuristic rules to extract the algebra equation templates (consists of variable slots and number slots) from a set of problems annotated with equations. For a given problem, all possible variable/number slots are identified first. Afterwards, they are aligned with those templates. The best combination of the template and alignment (scored with a statistical model) is then picked up. Finally, the answer is obtained from those equations instantiated from the selected template. However, without really understanding the problem (i.e., no semantic analysis is performed), the performance that this approach can reach is limited; also, it is sensitive to those irrelevant statements (Hosseini et al., 2014) . Furthermore, it can only solve algebra related problems. Last, it cannot explain how the answer is obtained.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 32, |
|
"text": "Hosseini et al. (2014)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 748, |
|
"text": "Kushman et al. (2014)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1490, |
|
"end": 1513, |
|
"text": "(Hosseini et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "The most recent statistical approach was proposed by Roy et al. (2015) , which used 4 cascade statistical classifiers to solve the elementary school math word problems: quantity identifier (used to find out the related quantities), quantity pair classifier (used to find out the operands), operation classifier (used to pick an arithmetic operation), and order classifier (used to order operands for subtraction and division cases). It not only shares all the drawbacks associated with Kushman et al. (2014) , but also limits itself for allowing only one basic arithmetic operation (i.e., among addition, subtraction, multiplication, division) with merely 2 or 3 operand candidates.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 70, |
|
"text": "Roy et al. (2015)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 486, |
|
"end": 507, |
|
"text": "Kushman et al. (2014)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "Our proposed approach differs from those previous approaches by combining the statistical framework with logic inference. Besides, the tag-based approach adopted for selecting the appropriate information also distinguishes our approach from that of others.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "A tag-based statistical framework is proposed in this paper to perform understanding and reasoning for solving MWP. It first analyzes the body and question texts into their corresponding semantic trees (with anaphora/ellipse resolved and semantic role labeled), and then converted them into their associated tag-based logic forms. Afterwards, the inference (based on the question logic form) is performed on the logic facts derived from the body text. The combination of the statistical frame and logic inference distinguishes the proposed approach from other approaches. Comparing to those rule-based approaches, the proposed statistical approach alleviates the ambiguity resolution problem; also, our tag-based approach provides the flexibility of handling various kinds of related questions with the same body logic form. On the other hand, comparing to those purely statistical approaches, the proposed approach is more robust to the irrelevant information and could more accurately provide the answer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "The contributions of our work mainly lie in: (1) proposing a tag-based logic representation which makes the system less sensitive to the irrelevant information and could provide answer more precisely; (2) proposing a statistical framework for performing reasoning from the given text. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "The subscript \"p\" in \"n1 p \" indicates that \"n1 p \" is a pseudo nonterminal derived from the nonterminal \"n1\", which has four terminals \"2361\", \"\u679d\", \"\u7d05\" and \"\u7b46\". More details about pseudo nonterminal will be given at Section 3.3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "However, the \"Algebra\" solution type in this case is useless to LFC because the body text has already mentioned how to solve it, and the LFC actually does not need STC to tell it how to solve the problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "An FOL predicate/function in an IE utility or in the premise of an inference rule is called a request. A request usually consists of FOL variables.7 The SCS of a terminal consists of the terminal string only (e.g., \"SCS(\u7b46) = {\u7b46}\"). 8 SCS(n1 p ) is constructed by two steps. First, enumerate all facts whose first arguments are n1 p . Second, for each enumerated fact, denote the predicate name as Child-Role and the SCS of the second argument as Child-SCS. If Child-Role is \"head\", put the elements of Child-SCS into SCS(n1 p ). Otherwise, for each string s in Child-SCS, put the string \"Child-Role(s)\" into SCS(n1 p ). In the first step, the facts \"head(n1 p , \u7b46)\" and \"color(n1 p , \u7d05)\" are picked out. In the second step, the strings \"\u7b46\" and \"color(\u7d05)\" are put into SCS(n1 p ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Prof. Wen-Lian Hsu for suggesting this research topic and making the original elementary school math corpus available to us, and Prof. Keh-Jiann Chen for providing the resources and supporting this project. Besides, our thanks should be extended to Dr. Yu-Ming Hsieh and Dr. Ming-Hong Bai for implementing the syntactic parser and the semantic composer, respectively. Also, we would like to thank Prof. Chin-Hui Lee for suggesting the solution type. Last, our thanks should also go to Ms. Su-Chu Lin for manually annotating the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgment", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ans Body Qus \uf03d(1) Where \uf0b7Ans is the obtained answer, Ans denotes a specific possible answer, Body denotes the given body text of the problem, and Qus denotes the question text of the problem.The probability factor in the above equation can be further derived as follows via introducing some related intermediate/latent random variables: ST : Solution Type. In the above equation, we will further assume that P(Ans|IR,LF B ,LF Q )\u2248P(Rm), where Rm is the remaining logic factors in LF Q after the IE has bound it with LF B (with referring to the knowledge-base adopted). Last, Viterbi decoding (Seshadri & Sundberg, 1994 ) could be used to search the most likely answer with the above statistical model.To obtain the associated parameters of the model, we will first get the initial parameter-set from a small seed corpus annotated with various intermediate/latent variables involved in the model. Afterwards, we perform weakly supervised learning (Artzi & Zettlemoyer, 2013 ) on a partially annotated training-set (in which only the answer is annotated with each question). That is, we iteratively conduct beam-search (with the parameter-set obtained from the last iteration) on this partially annotated training-set starting from the given", |
|
"cite_spans": [ |
|
{ |
|
"start": 592, |
|
"end": 618, |
|
"text": "(Seshadri & Sundberg, 1994", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 946, |
|
"end": 972, |
|
"text": "(Artzi & Zettlemoyer, 2013", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ans", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Learning a Lexicon for Broad-Coverage Semantic Parsing", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Allen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "the Proceedings of the ACL 2014 Workshop on Semantic Parsing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allen, J. F. (2014). Learning a Lexicon for Broad-Coverage Semantic Parsing. In the Proceedings of the ACL 2014 Workshop on Semantic Parsing, 1-6.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Weakly supervised learning of semantic parsers for mapping instructions to actions", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "49--62", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Artzi, Y., & Zettlemoyer, L. (2013). Weakly supervised learning of semantic parsers for mapping instructions to actions. Transactions of the Association for Computational Linguistics, 1(2013), 49-62.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Robust Understanding of Word Problems With Extraneous Information", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bakman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:math/0701393" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bakman, Y. (2007). Robust Understanding of Word Problems With Extraneous Information. Retrieved from arXiv:math/0701393.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "PROGRAMMING IN NATURAL LANGUAGE", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ballard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Biermann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1979, |
|
"venue": "NLC\" AS A PROTOTYPE. ACM-Webinar", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/800177.810072" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ballard, B. & Biermann, A. (1979). PROGRAMMING IN NATURAL LANGUAGE : \"NLC\" AS A PROTOTYPE. ACM-Webinar, 1979, DOI: 10.1145/800177.810072.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Semantic Parsing on Freebase from Question-Answer Pairs", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Chou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Frostig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Conference on Empirical Methods in Natural Language Processing (EMNLP)2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1533--1544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Berant, J., Chou, A., Frostig, R., & Liang, P. (2013). Semantic Parsing on Freebase from Question-Answer Pairs. Conference on Empirical Methods in Natural Language Processing (EMNLP)2013, 1533-1544.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Toward Natural Language Computation", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Biermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Ballard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "American Journal of Computational Linguistic", |
|
"volume": "6", |
|
"issue": "2", |
|
"pages": "71--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Biermann, A. W., & Ballard, B. W. (1980). Toward Natural Language Computation. American Journal of Computational Linguistic, 6(2), 71-86.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "INTERACTIVE NATURAL LANGUAGE PROBLEM SOLVING:A PRAGMATIC APPROACH", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Biermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Rodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ballard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Betancourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Bilbro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Deas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Fineman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Fink", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Gilbert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Gregory", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Heidlage", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1982, |
|
"venue": "Proc. of the first conference on applied natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--191", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Biermann, A., Rodman, R., Ballard, B., Betancourt, T., Bilbro, G., Deas, H., Fineman, L., Fink, P., Gilbert, K., Gregory, D., & Heidlage, F. (1982). INTERACTIVE NATURAL LANGUAGE PROBLEM SOLVING:A PRAGMATIC APPROACH. In Proc. of the first conference on applied natural language processing, 180-191.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Natural language input for a computer problem solving system", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Bobrow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1964, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bobrow, D. G. (1964). Natural language input for a computer problem solving system. Ph.D. Dissertation, Massachusetts Institute of Technology.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "LIBSVM: A library for support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "C.-C", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-J", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "ACM Transactions on Intelligent Systems and Technology", |
|
"volume": "2", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1961189.1961199" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chang, C.-C., & Lin, C.-J. (2011). LIBSVM: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology, 2(3). Doi:10.1145/1961189.1961199.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "CARPS, a program which solves calculus word problems", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1968, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charniak, E. (1968). CARPS, a program which solves calculus word problems. Report MAC-TR-51, Project MAC, MIT.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Computer solution of calculus word problems", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1969, |
|
"venue": "IJCAI'69 Proc. of International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "303--316", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charniak, E. (1969). Computer solution of calculus word problems. In IJCAI'69 Proc. of International Joint Conference on Artificial Intelligence, 303-316.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Extended-HowNet-A Representational Framework for Concepts", |
|
"authors": [ |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S.-L", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y.-Y", |
|
"middle": [], |
|
"last": "Shih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "OntoLex 2005 -Ontologies and Lexical Resources IJCNLP-05 Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen, K.-J., Huang, S.-L., Shih, Y.-Y., & Chen, Y.-J. (2005). Extended-HowNet-A Representational Framework for Concepts. OntoLex 2005 -Ontologies and Lexical Resources IJCNLP-05 Workshop, Jeju Island, South Korea.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Unknown Word Extraction for Chinese Documents", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of Coling", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "169--175", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen, K.J., & Ma, W.Y. (2002). Unknown Word Extraction for Chinese Documents. In Proceedings of Coling 2002, 169-175.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Frame-Semantic Parsing", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"F T" |
|
], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computational Linguistics", |
|
"volume": "40", |
|
"issue": "1", |
|
"pages": "9--56", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Das, D., Chen, D., Martins, A. F. T., Schneider, N., & Smith, N. A. (2014). Frame-Semantic Parsing. Computational Linguistics, 40(1), 9-56.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A computer simulation of children's arithmetic word-problem solving", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Dellarosa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "Behavior Research Methods, Instruments, & Computers", |
|
"volume": "18", |
|
"issue": "2", |
|
"pages": "147--154", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dellarosa, D. (1986). A computer simulation of children's arithmetic word-problem solving. Behavior Research Methods, Instruments, & Computers, 18(2), 147-154.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "COMPUTER SIMULATION --Understanding and solving arithmetic word problems: A computer simulation", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Fletcher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1985, |
|
"venue": "Behavior Research Methods, Instruments, & Computers", |
|
"volume": "17", |
|
"issue": "5", |
|
"pages": "565--571", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fletcher, C. R. (1985). COMPUTER SIMULATION --Understanding and solving arithmetic word problems: A computer simulation. Behavior Research Methods, Instruments, & Computers, 17(5,) 565-571.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Experiments with a natural language problem solving system", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Gelb", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1971, |
|
"venue": "Proc. of IJCAI-71", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "455--462", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gelb, J. P. (1971). Experiments with a natural language problem solving system. In Proc. of IJCAI-71, 455-462.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Large-scale verb entailment acquisition from the web", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kuroda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "De Saeger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Murata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Kazama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hashimoto, C., Torisawa, K., Kuroda, K., De Saeger, S., Murata, M., & Kazama, J. J. (2009). Large-scale verb entailment acquisition from the web. In Proceedings of the 2009", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Conference on Empirical Methods in Natural Language Processing", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "1172--1181", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Conference on Empirical Methods in Natural Language Processing, 3, 1172-1181.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Hyponymous objects and Late Insertion", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Haugen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Lingua", |
|
"volume": "119", |
|
"issue": "", |
|
"pages": "242--262", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haugen, J. D. (2009). Hyponymous objects and Late Insertion. Lingua, 119, 242-262.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Learning to Solve Arithmetic Word Problems with Verb Categorization", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Hosseini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Kushman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "523--533", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hosseini, M. J., Hajishirzi, H., Etzioni, O., & Kushman, N. (2014). Learning to Solve Arithmetic Word Problems with Verb Categorization. EMNLP(2014), 523-533.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Ambiguity Resolution for Vt-N Structures in Chinese", |
|
"authors": [ |
|
{ |
|
"first": "Y.-M", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "928--937", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hsieh, Y.-M., Chang, J. S., & Chen, K.-J. (2014). Ambiguity Resolution for Vt-N Structures in Chinese. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, 928-937.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Improving Chinese Parsing with Special-Case Probability Re-estimation", |
|
"authors": [ |
|
{ |
|
"first": "Y.-M", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S.-C", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of 2013 International Conference on Asian Language Processing (IALP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hsieh, Y.-M., Lin, S.-C., Chang, J. S., & Chen, K.-J. (2013). Improving Chinese Parsing with Special-Case Probability Re-estimation. In Proceedings of 2013 International Conference on Asian Language Processing (IALP), 177-180.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Improve Parsing Performance by Self-Learning", |
|
"authors": [ |
|
{ |
|
"first": "Y.-M", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D.-C", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "International Journal of Computational Linguistics and Chinese Language Processing", |
|
"volume": "12", |
|
"issue": "2", |
|
"pages": "195--216", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hsieh, Y.-M., Yang, D.-C., & Chen, K.-J. (2007). Improve Parsing Performance by Self-Learning. International Journal of Computational Linguistics and Chinese Language Processing, 12(2), 195-216.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Resolving the Representational Problems of Polarity and Interaction between Process and State Verbs", |
|
"authors": [ |
|
{ |
|
"first": "S.-L", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y.-M", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S.-C", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Journal of Computational Linguistics and Chinese Language Processing (IJCLCLP)", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "33--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang, S.-L., Hsieh, Y.-M., Lin, S.-C., & Chen, K.-J. (2014). Resolving the Representational Problems of Polarity and Interaction between Process and State Verbs. International Journal of Computational Linguistics and Chinese Language Processing (IJCLCLP), 19(2), 33-52.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Semantic Roles and Semantic Role Labeling", |
|
"authors": [ |
|
{ |
|
"first": "S.-L", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S.-C", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W.-Y", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "CKIP technical report", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang, S.-L., Lin, S.-C., Ma, W.-Y., & Chen, K.-J. (2015). Semantic Roles and Semantic Role Labeling. (CKIP technical report no. 2015-01). Institute of Information Science, Academia Sinica.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Explanation Generation for a Math Word Problem Solver", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Journal of Computational Linguistics and Chinese Language Processing (IJCLCLP)", |
|
"volume": "20", |
|
"issue": "2", |
|
"pages": "27--44", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang, C. T., Lin, Y. C., & Su, K. Y. (2015). Explanation Generation for a Math Word Problem Solver. International Journal of Computational Linguistics and Chinese Language Processing (IJCLCLP), 20(2), 27-44.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Designing a Tag-Based Statistical Math Word Problem Solver 25 with Reasoning and Explanation", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Designing a Tag-Based Statistical Math Word Problem Solver 25 with Reasoning and Explanation", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Question classification using head words and their hypernyms", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Thint", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceeding of EMNLP '08 Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "927--936", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang, Z., Thint, M., & Qin, Z.(2008). Question classification using head words and their hypernyms. In Proceeding of EMNLP '08 Proceedings of the Conference on Empirical Methods in Natural Language Processing, 927-936.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Speech and Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jurafsky, D., & Martin, J. H. (2000). Speech and Language Processing. New Jersey: Prentice Hall.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Transformation-based Learning for Semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Jurc\u0131cek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Mairesse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ga\u0161ic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Keizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Gasic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of INTERSPEECH 2009", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2719--2722", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jurc\u0131cek, F., Mairesse, F., Ga\u0161ic, M., Keizer, S., Thomson, B., Yu, K., Young, S., & Gasic, M. (2009). Transformation-based Learning for Semantic parsing. In Proceedings of INTERSPEECH 2009, 2719-2722.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning to Automatically Solve Algebra Word Problems", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Kushman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "271--281", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kushman, N., Artzi, Y., Zettlemoyer, L., & Barzilay, R. (2014). Learning to Automatically Solve Algebra Word Problems. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics, 271-281.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Modeling math word problems with augmented semantic networks", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Liguda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Pfeiffer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "247--252", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liguda, C., & Pfeiffer, T. (2012). Modeling math word problems with augmented semantic networks. NLDB 2012, 247-252.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "A survey of State-of-the-Art Methods on Question Classification", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Loni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Literature Survey", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Loni, B. (2011). A survey of State-of-the-Art Methods on Question Classification. Literature Survey, Published on TU Delft Repository, 2011 Jun.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Introduction to CKIP Chinese Word Segmentation System for the First International Chinese Word Segmentation Bakeoff", |
|
"authors": [ |
|
{ |
|
"first": "W.-Y", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of ACL, Second SIGHAN Workshop on Chinese Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "168--171", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ma, W.-Y., & Chen, K.-J. (2003). Introduction to CKIP Chinese Word Segmentation System for the First International Chinese Word Segmentation Bakeoff. In Proceedings of ACL, Second SIGHAN Workshop on Chinese Language Processing, 168-171.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Frame-Based Calculus of solving Arithmetic MultiStep Addition and Subtraction word problems", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"Z" |
|
], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Second International Workshop on Education Technology and Computer Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "476--479", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ma, Y. H., Zhou, Y., Cui, G. Z., Ren, Y., & Huang, R. H. (2010). Frame-Based Calculus of solving Arithmetic MultiStep Addition and Subtraction word problems. In 2010 Second International Workshop on Education Technology and Computer Science, 476-479.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Conflation and incorporation processes in resultative constructions", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Mateu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Telicity, Change, and State: A Cross-Categorial View of Event Structure", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "252--278", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mateu, J. (2012). Conflation and incorporation processes in resultative constructions. In Violeta Demonte & Louise McNally (eds.), Telicity, Change, and State: A Cross-Categorial View of Event Structure, Oxford: Oxford University Press, 252-278.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Logic Form Transformation of WordNet and Its Applicability to Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Moldovan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Rus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "ACL '01 Proceedings of the 39th Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "402--409", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moldovan, D., & Rus, V. (2001). Logic Form Transformation of WordNet and Its Applicability to Question Answering. In ACL '01 Proceedings of the 39th Annual Meeting on Association for Computational Linguistics, 402-409.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "A review of methods for automatic understanding of natural language mathematical problems", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "U", |
|
"middle": [], |
|
"last": "Garain", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Artif Intell Rev", |
|
"volume": "29", |
|
"issue": "2", |
|
"pages": "93--122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mukherjee, A., & Garain, U. (2008). A review of methods for automatic understanding of natural language mathematical problems. Artif Intell Rev, 29(2), 93-122.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Reasoning about Quantities in Natural Language", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Roy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"J H" |
|
], |
|
"last": "Vieira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "TACL", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "1--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roy, S. I., Vieira, T. J. H., & Roth, D. I.(2015). Reasoning about Quantities in Natural Language. TACL, 3, 1-13.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Artificial Intelligence : A Modern Approach", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Russell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Norvig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Russell, S. J. & Norvig, P. (2009). Artificial Intelligence : A Modern Approach(3rd Edition), Prentice Hall.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "An inference model for semantic entailment in natural language", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "De Salvo Braz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Girju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Punyakanok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sammons", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Machine Learning Challenges", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "de Salvo Braz, R., Girju, R., Punyakanok, V., Roth, D., & Sammons, M. (2006). An inference model for semantic entailment in natural language. In Machine Learning Challenges.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Evaluating Predictive Uncertainty, Visual Object Classification, and Recognising Textual Entailment", |
|
"authors": [], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "261--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Evaluating Predictive Uncertainty, Visual Object Classification, and Recognising Textual Entailment., Springer Berlin Heidelberg, 2006, 261-286.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "List Viterbi Decoding Algorithms with Applications", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Seshadri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-E", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Sundberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "IEEE Transactions on Communications", |
|
"volume": "42", |
|
"issue": "234", |
|
"pages": "313--323", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seshadri, N., Sundberg, C.-E.W. (1994). List Viterbi Decoding Algorithms with Applications. IEEE Transactions on Communications, 42(234), 313-323.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Experiments with a deductive question-answering program", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Slagle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1965, |
|
"venue": "J-CACM", |
|
"volume": "8", |
|
"issue": "12", |
|
"pages": "792--798", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slagle, J. R. (1965). Experiments with a deductive question-answering program. J-CACM, 8(12), 792-798.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "The DARPA Machine Reading Program -Encouraging Linguistic and Reasoning Research with a Series of Reading Tasks", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Strassel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Adams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Herr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Keesing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Oblinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Simpson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Schrag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Wright", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Strassel, S., Adams, D., Goldberg, H., Herr, J., Keesing, R., Oblinger, D., Simpson, H., Schrag, R., & Wright, J. (2010). The DARPA Machine Reading Program -Encouraging Linguistic and Reasoning Research with a Series of Reading Tasks. LREC 2010.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Semantic Structures in English and Atsugewi", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Talmy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1972, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Talmy, L. (1972). Semantic Structures in English and Atsugewi. PhD thesis, Berkeley: University of California at Berkeley.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Reliable and Cost-Effective Pos-Tagging", |
|
"authors": [ |
|
{ |
|
"first": "Y.-F", |
|
"middle": [], |
|
"last": "Tsai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "International Journal of Computational Linguistics & Chinese Language Processing", |
|
"volume": "9", |
|
"issue": "1", |
|
"pages": "83--96", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsai, Y.-F., & Chen, K.-J. (2004). Reliable and Cost-Effective Pos-Tagging. International Journal of Computational Linguistics & Chinese Language Processing, 9(1), 83-96.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Design of Chinese Morphological Analyzer", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Tseng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of SIGHAN 2002", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--55", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tseng, H. H., & Chen, K.-J. (2002). Design of Chinese Morphological Analyzer. In Proceedings of SIGHAN 2002, 49-55.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Recognizing textual relatedness with predicate-argument structures", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "784--792", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang, R., & Zhang, Y. (2009). Recognizing textual relatedness with predicate-argument structures. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing, 2, 784-792.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "The block diagram of the proposed Math Word Problem Solver.", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": ": {human|\u4eba:name={\"\u5c0f\u8c6a\"}} \u6709(2): {own|\u6709} \uff16\uff12\u5f35(3): quantifier={\u5f35.null|\u7121 \u7fa9", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "\u82b1\u5e97\u9032\u8ca8 300 \u6735\u73ab\u7470\u548c 600 \u6735\u767e\u5408(A flower store bought 300 roses and 600 lilies ), \u4e0a\u5348\u8ce3\u51fa 186 \u6735\u767e\u5408(It sold 186 lilies in the morning) \uff0c\u4e0b\u5348\u8ce3\u51fa 234 \u6735(It sold 234 lilies in the afternoon)\uff0c\u554f\u82b1\u5e97\u5171\u8ce3\u51fa\u5e7e\u6735\u767e\u5408(How many lilies did the flower store sell)? quan(q1,\u6735,\u73ab\u7470)=300&verb(q1,\u9032\u8ca8)&agent(q1,\u82b1\u5e97)&\u2026 quan(q2,\u6735,\u767e\u5408)=600&verb(q2,\u9032\u8ca8)&agent(q2,\u82b1\u5e97)&\u2026 quan(q3,\u6735,\u767e\u5408)=186&verb(q3,\u8ce3\u51fa)&agent(q3,\u82b1\u5e97)&\u2026 quan(q4,\u6735,\u767e\u5408)=234&verb(q4,\u8ce3\u51fa)&agent(q4,\u82b1\u5e97)&\u2026 ASK Sum(quan(?q,\u6735,\u767e\u5408), verb(?q,\u8ce3\u51fa)&agent(?q,\u82b1\u5e97))", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "A simple problem and its essential corresponding logic forms.", |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "An example for deriving new facts.", |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": ")=theme({lose|\u5931\u53bb}); lose\u2192theme({give| \u7d66})=possession({lose|\u5931\u53bb}); obtain\u2192theme({give| \u7d66})=possession({obtain|\u5f97\u5230}); obtain\u2192target({give| \u7d66})=theme({obtain|\u5f97\u5230}); receive\u2192target({give| \u7d66})=agent({receive|\u6536\u53d7}); receive\u2192theme({give| \u7d66})=possession({receive|\u6536\u53d7}) The conflation events of the verb \"give (\u7d66)\".", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"text": "\u5c0f\u8c6a \u6709 62 \u5f35 \u8cbc\u7d19 \uff0c \u54e5\u54e5 \u518d \u7d66 \u4ed6 56 \u5f35 \uff0c \u5c0f\u8c6a \u73fe\u5728 \u5171 \u6709 \u5e7e\u5f35 \u8cbc\u7d19 \uff1f (Xiaohao had 64 stickers, and his brother gave him 56 more. How many stickers does Xiahao", |
|
"content": "<table><tr><td colspan=\"3\">Designing a Tag-Based Statistical Math Word Problem Solver</td><td>9</td></tr><tr><td/><td>with Reasoning and Explanation</td><td/></tr><tr><td>have now?)</td><td/><td/></tr><tr><td>\u5c0f\u8c6a\u6709 62 \u5f35\u8cbc\u7d19\uff0c</td><td>\u54e5\u54e5\u518d\u7d66\u4ed6 56 \u5f35\uff0c</td><td colspan=\"2\">\u5c0f\u8c6a\u73fe\u5728\u5171\u6709\u5e7e\u5f35\u8cbc\u7d19\uff1f</td></tr><tr><td>{\u6709(2):</td><td/><td/></tr><tr><td>theme={[x1]\u5c0f\u8c6a(1)},</td><td/><td/></tr><tr><td>range={\u8cbc\u7d19(4):</td><td/><td/></tr><tr><td>quantifier={\uff16\uff12\u5f35(3)}</td><td/><td/></tr><tr><td>}</td><td/><td/></tr><tr><td>}</td><td/><td/></tr><tr><td>\u5c0f\u8c6a</td><td/><td/></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"html": null, |
|
"text": "The above SR tree is transformed into the following FOL predicates separated by the logic AND operator &.All the first arguments of the above FOL predicates (i.e., v1, n1 and n2) are the identities to the nonterminals in the SR tree. To ease reading, the terminal identities in logic forms are replaced with their corresponding terminal strings in the rest of this paper.", |
|
"content": "<table><tr><td>verb(v1,t1)&theme(v1,n1)&result(v1,n2)&</td></tr><tr><td>head(n1,t2)&quantity(n1,t3)&unit(n1,t4)&</td></tr><tr><td>head(n2,t5)&quantity(n2,t6)&unit(n2,t7)</td></tr><tr><td>After replacement,</td></tr><tr><td>the above logic forms become more readable as follows:</td></tr><tr><td>verb(v1,\u88dd\u6210)&theme(v1,n1)&result(v1,n2)&head(n1,\u7cd6)&quantity(n1,100)&</td></tr><tr><td>unit(n1,\u9846)&head(n2,\u7cd6)&quantity(n2,5)&unit(n2,\u76d2)</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"content": "<table><tr><td>Sum(function, condition)=value</td><td>CommonDiv(value 1 , value 2 )=value</td></tr><tr><td>Addition(value 1 , value 2 )=value</td><td>FloorDiv(value 1 , value 2 )=value</td></tr><tr><td>Subtraction(value 1 , value 2 )=value</td><td>CeilDiv(value 1 , value 2 )=value</td></tr><tr><td>Difference(value 1 , value 2 )=value</td><td>Surplus(value 1 , value 2 )=value</td></tr><tr><td>Multiplication(value 1 , value 2 )=value</td><td/></tr><tr><td colspan=\"2\">Solving MWPs may require deriving new facts according to common sense or domain</td></tr><tr><td>knowledge. In</td><td/></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"content": "<table><tr><td>Corpus Training Set</td><td>Num. of problems 20,093</td><td>Corpus</td><td>Avg. Chinese Chars.</td><td>Avg. Chinese Words</td></tr><tr><td>Develop Set</td><td>1,700</td><td>Body</td><td>27</td><td>18.2</td></tr><tr><td>Test Set</td><td>1,700</td><td>Question</td><td>9.4</td><td>6.8</td></tr><tr><td>Total</td><td>23,493</td><td/><td/><td/></tr><tr><td colspan=\"2\">MWP corpus statistics</td><td colspan=\"3\">Average length per problem</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |