|
{ |
|
"paper_id": "O08-6002", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T08:02:22.728040Z" |
|
}, |
|
"title": "Corpus Cleanup of Mistaken Agreement Using Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Liang-Chih", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Yuan-Ze University", |
|
"location": { |
|
"addrLine": "Chung-Li", |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "lcyu@saturn.yzu.edu.tw" |
|
}, |
|
{ |
|
"first": "Chung-Hsien", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Cheng Kung University", |
|
"location": { |
|
"settlement": "Tainan", |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "chwu@csie.ncku.edu.tw" |
|
}, |
|
{ |
|
"first": "Jui-Feng", |
|
"middle": [], |
|
"last": "Yeh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Chiayi University", |
|
"location": { |
|
"settlement": "Chiayi", |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "hovy@isi.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Word sense annotated corpora are useful resources for many text mining applications. Such corpora are only useful if their annotations are consistent. Most large-scale annotation efforts take special measures to reconcile inter-annotator disagreement. To date, however, nobody has investigated how to automatically determine exemplars in which the annotators agree but are wrong. In this paper, we use OntoNotes, a large-scale corpus of semantic annotations, including word senses, predicate-argument structure, ontology linking, and coreference. To determine the mistaken agreements in word sense annotation, we employ word sense disambiguation (WSD) to select a set of suspicious candidates for human evaluation. Experiments are conducted from three aspects (precision, cost-effectiveness ratio, and entropy) to examine the performance of WSD. The experimental results show that WSD is most effective in identifying erroneous annotations for highly-ambiguous words, while a baseline is better for other cases. The two methods can be combined to improve the cleanup process. This procedure allows us to find approximately 2% of the remaining erroneous agreements in the OntoNotes corpus. A similar procedure can be easily defined to check other annotated corpora.", |
|
"pdf_parse": { |
|
"paper_id": "O08-6002", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Word sense annotated corpora are useful resources for many text mining applications. Such corpora are only useful if their annotations are consistent. Most large-scale annotation efforts take special measures to reconcile inter-annotator disagreement. To date, however, nobody has investigated how to automatically determine exemplars in which the annotators agree but are wrong. In this paper, we use OntoNotes, a large-scale corpus of semantic annotations, including word senses, predicate-argument structure, ontology linking, and coreference. To determine the mistaken agreements in word sense annotation, we employ word sense disambiguation (WSD) to select a set of suspicious candidates for human evaluation. Experiments are conducted from three aspects (precision, cost-effectiveness ratio, and entropy) to examine the performance of WSD. The experimental results show that WSD is most effective in identifying erroneous annotations for highly-ambiguous words, while a baseline is better for other cases. The two methods can be combined to improve the cleanup process. This procedure allows us to find approximately 2% of the remaining erroneous agreements in the OntoNotes corpus. A similar procedure can be easily defined to check other annotated corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Word sense annotated corpora are useful resources for many text mining applications, such as thesaurus construction (Tseng, 2002; Yeh, 2004; , paraphrase extraction (Zhao et al., 2008; Bhaget & Ravichandran, 2008) , opinion mining (Ku & Chen, 2007; Kim & Hovy, 2007) , and medical information extraction (Wu et al., 2005; Yu et al., 2008) . Various machine learning algorithms can then be trained on these corpora to improve the applications' effectiveness. Lately, many such corpora have been developed in different languages, including SemCor (Miller et al., 1993) , LDC-DSO (Ng & Lee, 1996) , Hinoki (Kasahara et al., 2004) , and the sense annotated corpora with the help of Web users (Chklovski & Mihalcea, 2002) . The SENSEVAL 1 (Kilgarriff & Palmer, 2000; Kilgarriff, 2001; Mihalcea & Edmonds, 2004) and SemEval-2007 2 evaluations have also created large amounts of sense tagged data for word sense disambiguation (WSD) competitions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 129, |
|
"text": "(Tseng, 2002;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 140, |
|
"text": "Yeh, 2004;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 184, |
|
"text": "(Zhao et al., 2008;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 185, |
|
"end": 213, |
|
"text": "Bhaget & Ravichandran, 2008)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 248, |
|
"text": "(Ku & Chen, 2007;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 266, |
|
"text": "Kim & Hovy, 2007)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 304, |
|
"end": 321, |
|
"text": "(Wu et al., 2005;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 338, |
|
"text": "Yu et al., 2008)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 566, |
|
"text": "(Miller et al., 1993)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 577, |
|
"end": 593, |
|
"text": "(Ng & Lee, 1996)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 626, |
|
"text": "(Kasahara et al., 2004)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 688, |
|
"end": 716, |
|
"text": "(Chklovski & Mihalcea, 2002)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 734, |
|
"end": 761, |
|
"text": "(Kilgarriff & Palmer, 2000;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 762, |
|
"end": 779, |
|
"text": "Kilgarriff, 2001;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 780, |
|
"end": 805, |
|
"text": "Mihalcea & Edmonds, 2004)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 810, |
|
"end": 824, |
|
"text": "SemEval-2007 2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The OntoNotes (Pradhan et al., 2007a; Hovy et al., 2006) project has created a multilingual corpus of large-scale semantic annotations, including word senses, predicate-argument structure, ontology linking, and coreference 3 . In word sense creation, sense creators generate sense definitions by grouping fine-grained sense distinctions obtained from WordNet and dictionaries into more coarse-grained senses. There are two reasons for using this grouping instead of using WordNet senses directly. First, people have trouble distinguishing many of the WordNet-level distinctions in real text and make inconsistent choices; thus, the use of coarse-grained senses can improve inter-annotator agreement (ITA) (Palmer et al., 2004; . Second, improved ITA enables machines to more accurately learn to perform sense tagging automatically. Sense grouping in OntoNotes has been calibrated to ensure that ITA averages at least 90%. Table 1 shows the OntoNotes sense tags and definitions for the word arm (noun sense). The OntoNotes sense tags have been used for many applications, including the SemEval-2007 evaluation (Pradhan et al., 2007b) , sense merging (Snow et al., 2007) , sense pool verification (Yu et al., 2007) , and class imbalance problems (Zhu & Hovy, 2007 In creating Onto Notes, each word sense annotation involves two annotators and an adjudicator. First, all sentences containing the target word along with its sense distinctions are presented independently to two annotators for sense annotation. If the two annotators agree on the same sense for the target word in a given sentence, then their selection is stored in the corpus. Otherwise, this sentence is double-checked by the adjudicator for the final decision. The major problem of the above annotation scheme is that only the instances where the two annotators disagree are double-checked, while those showing agreement are stored directly without any adjudication. Therefore, if the annotators happen to agree but are both wrong, the corpus becomes polluted by the erroneous annotations. Table 2 shows an actual occurrence of an erroneous instance (sentence) for the target word management. In this example sentence, the actual sense of the target word is management.01, but both of our annotators made a decision of management.02. (Note that there is no difficulty in making this decision; the joint error might have occurred due to annotator fatigue, habituation after a long sequence of management.02 decisions, etc.) Management wants to start downsizing. John was promoted to Management. I spoke to their management, and they're ready to make a deal.", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 37, |
|
"text": "(Pradhan et al., 2007a;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 38, |
|
"end": 56, |
|
"text": "Hovy et al., 2006)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 705, |
|
"end": 726, |
|
"text": "(Palmer et al., 2004;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1109, |
|
"end": 1132, |
|
"text": "(Pradhan et al., 2007b)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1149, |
|
"end": 1168, |
|
"text": "(Snow et al., 2007)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1195, |
|
"end": 1212, |
|
"text": "(Yu et al., 2007)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1244, |
|
"end": 1261, |
|
"text": "(Zhu & Hovy, 2007", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 922, |
|
"end": 929, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 2055, |
|
"end": 2062, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Although most annotations in OntoNotes are correct, there is still a small (but unknown) fraction of erroneous annotations in the corpus. Therefore, a cleanup procedure is necessary to produce a high-quality corpus. It is, however, impractical for human experts to evaluate the whole corpus for cleanup. Given that we are focusing on word senses, this study proposes the use of WSD to facilitate the corpus cleanup process. WSD has shown promising accuracy in recent SENSEVAL and SemEval-2007 evaluations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The rest of this work is organized as follows. Section 2 describes the corpus cleanup procedure. Section 3 presents the features for WSD. Section 4 summarizes the experimental results. Conclusions are drawn in Section 5. Figure 1 shows the cleanup procedure (dashed lines) for the OntoNotes corpus. As mentioned earlier, each word, along with its sentence instances, is annotated by two annotators. The annotated corpus, thus, can be divided into two parts according to the annotation results. The first part includes the annotation with disagreement among the two annotators, which is double-checked by the adjudicator. The final decisions made by the adjudicator are stored into the corpus. Since this part is double-checked by the adjudicator, it will not be evaluated by the cleanup procedure. The second part of the corpus is the focus of the cleanup procedure. The WSD system evaluates each instance in the second part. If the output of the WSD system disagrees with the two annotators, the instance is considered to be a suspicious candidate, otherwise it is considered to be clean and is stored into the corpus. The set of suspicious candidates is collected and subsequently evaluated by the adjudicator to identify erroneous annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 229, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "This study takes a supervised learning approach to build a WSD system from the OntoNotes corpus. The feature set used herein is similar to several state-of-the-art WSD systems (Lee & Ng, 2002; Ando, 2006; Tratz et al., 2007; Cai et al., 2007; Agirre & Lopez de Lacalle, 2007; Specia et al., 2007) , which is further integrated into a Na\u00efve Bayes classifier (Lee & Ng, 2002; Mihalcea, 2007) . In addition, a new feature, predicate-argument structure, provided by the OntoNotes corpus is integrated as well. The feature set includes:", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 192, |
|
"text": "(Lee & Ng, 2002;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 204, |
|
"text": "Ando, 2006;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 224, |
|
"text": "Tratz et al., 2007;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 242, |
|
"text": "Cai et al., 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 275, |
|
"text": "Agirre & Lopez de Lacalle, 2007;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 296, |
|
"text": "Specia et al., 2007)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 373, |
|
"text": "(Lee & Ng, 2002;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 389, |
|
"text": "Mihalcea, 2007)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "Part-of-Speech (POS) tags: This feature includes the POS tags in the positions (P -3 , P -2 , P -1 , P 0 , P 1 , P 2 , P 3 ), relative to the POS tag of the target word. For instance, the POS sequence of the constituent \"\u2026mediator in an attempt to break the\u2026\" is \"NN NN IN DT TO VB DT\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "Local Collocations: This feature includes single words and multi-word n-grams. The single words include (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "W -3 , W -2 , W -1 , W 0 , W 1 , W 2 , W 3 ), relative to the target word W 0 . Similarly, the multi-word n-grams include (W -2,-1 , W -1,1 , W 1,2 , W -3,-2,-1 , W -2,-1,1 , W -1,1,2 , W 1,2,3 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": ". For instance, the multi-word n-grams of the above example constituent include {in_an, an_to, to_break, mediator_in_an, in_an_to, an_to_break, to_break_the}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "This feature can be considered as a global feature, consisting of 5 words prior to and after the target word, without regard to position.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-Words:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Predicate-Argument Structure: The predicate-argument structure captures the semantic relations between the predicates and their arguments within a sentence. Consider the following example sentence. The argument label Arg0 is usually assigned to the agent, causer, and experiencer, while Arg1 is usually assigned to the patient. The ArgM-TMP represents a temporal modifier (Babko-Malaya, 2006; Palmer et al., 2005) . The predicate-argument structure of the above sentence is illustrated in Figure 2 . The semantic relations can be either direct or indirect. A direct relation is used to model a verb-noun (VN), whereas an indirect relation is used to model a noun-noun (NN) relation. Additionally, an NN-relation can be built from the combination of two VN-relations with the same predicate. Table 3 presents some examples. For instance, NN1 can be built by combining VN1 and VN2. Therefore, the two features, VN1 and NN3, can be used to disambiguate the noun arm 4 . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 372, |
|
"end": 392, |
|
"text": "(Babko-Malaya, 2006;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 413, |
|
"text": "Palmer et al., 2005)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 489, |
|
"end": 497, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 791, |
|
"end": 798, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bag-of-Words:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The experiment data used herein consisted of the 35 nouns from the SemEval-2007 English Lexical Sample Task (Pradhan et al., 2007b) . All sentences containing the 35 nouns were selected from the OntoNotes corpus, resulting in a set of 16,329 sentences. This data set was Word Sense Disambiguation randomly split into training and test sets using different proportions (1:9 to 9:1, 10% increments). The WSD systems (described in Section 3) were then built from the different portions of the training set, called WSD_1 to WSD_9, respectively, and applied to their corresponding test sets. In each test set, the instances with disagreement among the annotators were excluded, since they have already been double-checked by the adjudicator. A baseline system was also implemented using the principle of most frequent sense (MFS), where each word sense distribution was retrieved from the OntoNotes corpus. Table 4 shows the accuracy of the baseline and WSD systems. The output of WSD may agree or disagree with the annotators. The instances with disagreement were selected from each WSD system as suspicious candidates. This experiment randomly selected at most 20 suspicious instances for each noun then unified these instances to form a suspicious set of 687 instances. An adjudicator who is a linguistic expert then evaluated the suspicious set, and agreed in 42 instances with the WSD systems, indicating about 6% (42/687) truly erroneous annotations. This corresponds to 2.6% (42/16329) erroneous annotations in the corpus as a whole, which we verified by an independent random spot check.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 131, |
|
"text": "(Pradhan et al., 2007b)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 902, |
|
"end": 909, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiment Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In the following sections, we examine the performance of WSD from three aspects: precision, cost-effectiveness ratio, and entropy. In addition, we summarize a general cleanup procedure for other sense-annotated corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The cleanup precision for a single WSD system can be defined as the number of erroneous instances identified by the WSD system, divided by the number of suspicious candidates selected by the WSD system. An erroneous instance refers to an instance where the annotators agree with each other but disagree with the adjudicator. Table 5 lists the cleanup precision of the baseline and WSD systems. The experimental results show that WSD_7 (trained on 70% training data) identified 17 erroneous instances out of 120 selected suspicious candidates, thus yielding the highest precision of 0.142. Another observation is that the upper bound of WSD_7 was 0.35 (42/120) under the assumption that it identified all erroneous instances. This low precision discourages the use of WSD to automatically correct erroneous annotations. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 325, |
|
"end": 332, |
|
"text": "Table 5", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Cleanup Precision Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The cleanup procedure used herein is a semi-automatic process; that is, WSD is applied in the first stage to select suspicious candidates for human evaluation in the later stage. Obviously, we would like to minimize the number of candidates the adjudicator has to examine. Thus, we use the metric cost-effectiveness (CE) ratio, which is defined as effectiveness divided by cost, to measure the performance of WSD. The cost rate is defined as the number of suspicious instances selected by a single WSD system, divided by the total number of suspicious instances in the suspicious set. The effectiveness rate is defined as the number of erroneous instances identified by a single WSD system, divided by the total number of erroneous instances in the suspicious set. On the other hand, the missing rate can be defined as 1-effectiveness rate. In this experiment, the baseline value of the cost-effectiveness ratio is 1, which means that the human expert needs to evaluate all 687 instances in the suspicious set to identify the 42 erroneous instances. Figure 3 illustrates the CE ratio of the WSD systems. The most cost-effective WSD system was WSD_7. The CE ratios of the baseline and WSD_7 are listed in Table 6 . The experimental results indicate that 17.5% of all suspicious instances were required to be evaluated to identify about 40% of the erroneous annotations when using WSD_7. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1050, |
|
"end": 1058, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 1204, |
|
"end": 1211, |
|
"text": "Table 6", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Cleanup Cost-Effectiveness Analysis", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "So far, the experimental results show that the best WSD system can help human experts identify about 40% erroneous annotations, but it still missed the other 60%. To improve performance, we conducted experiments to analyze the effect of word entropy with respect to WSD performance on identifying erroneous annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entropy Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "For the SemEval 35 nouns used in this experiment, some words are very ambiguous and some words are not. This property of ambiguity may affect the performance of WSD systems in identifying erroneous annotation. To this end, this experiment used entropy to measure the ambiguity of words (Melamed, 1997) . The entropy of a word can be computed by the word sense distribution, defined as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 286, |
|
"end": 301, |
|
"text": "(Melamed, 1997)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entropy Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "2 ( ) ( )log ( ), i i i ws W H W Pw s Pw s \u2208 = \u2212 \u2211", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Entropy Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "where ( ) H W denotes the entropy of a word W, and P( i ws ) denotes the probability of a word sense. A high entropy value indicates a high ambiguity level. For instance, the noun defense has 7 senses (see Table 8 ) in the OntoNotes corpus, occurring with the distribution {. 14, .18, .19, .08, .04, .28 , .09}, thus yielding a relative high entropy value (2.599). Conversely, the entropy of the noun rate is low (0.388), since it has only two senses with very skewed distribution {.92, .08}.", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 303, |
|
"text": "14, .18, .19, .08, .04, .28", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 206, |
|
"end": 213, |
|
"text": "Table 8", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Entropy Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Consider the two groups of the SemEval nouns: the nouns for which at least one (Group 1) or none (Group 2) of their erroneous instances can be identified by the machine. The use of the criteria \"at least one\" and \"none\" is to distinguish whether or not the machine can identify the erroneous instances in these two groups of nouns. The average entropy of these two groups of nouns was computed, as shown in Table 7 . An independent t-test was then used to determine whether or not the difference of the average entropy among these two groups was statistically significant. The experimental results show that WSD_7 was more effective on identifying erroneous annotations occurring in highly-ambiguous words (p<0.05), while the baseline system has no such tendency (p=0.368). Table 8 shows the detailed analysis of WSD performance on different words. As indicated, WSD_7 identified the erroneous instances (7/7) occurring in the two top-ranked highly-ambiguous nouns, i.e., defense and position, but missed all those (0/12) occurring in the two most unambiguous words, i.e., move and rate. The major reason is that the sense distribution of unambiguous words is often skewed, thus, WSD systems built from such imbalanced data tend to suffer from the over-fitting problem; that is, they tend to over-fit the predominant sense class and ignore small sense classes (Zhu & Hovy, 2007) . Fortunately, the over-fitting problem can be greatly reduced when the entropy of words exceeds a certain threshold (e.g., the dashed line in Table 8 ), since the word sense has become more evenly distributed. authority, bill, capital, carrier, development, drug, effect, exchange, future, network, people, point, policy, president, share, source, space ", |
|
"cite_spans": [ |
|
{ |
|
"start": 1360, |
|
"end": 1378, |
|
"text": "(Zhu & Hovy, 2007)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1590, |
|
"end": 1733, |
|
"text": "authority, bill, capital, carrier, development, drug, effect, exchange, future, network, people, point, policy, president, share, source, space", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 414, |
|
"text": "Table 7", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 781, |
|
"text": "Table 8", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 1522, |
|
"end": 1529, |
|
"text": "Table 8", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Entropy Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Another observation from Table 8 is that WSD_7 identified more erroneous instances when the word entropy exceeded the cut-point, since the over-fitting problem was reduced. Conversely, MFS identified more instances when the word entropy was below the cut-point. This finding encourages the use of a combination of WSD_7 and MFS for corpus cleanup; that is, different strategies can be used with different entropy intervals. For this experimental data, MFS and WSD_7 can be applied below and above the cut-point, respectively, to select the suspicious instances for human evaluation. Therefore, the final suspicious set can be generated by combining the suspicious instances suggested by MFS and WSD_7. As illustrated in Figure 4 , when the entropy of words increased, the accumulated effectiveness rates of both WSD_7 and MFS increased accordingly, since more erroneous instances were identified. Additionally, the difference of the accumulated effect rate of MFS and WSD_7 increased gradually from the beginning until the cut-point, since MFS identified more erroneous instances than WSD_7 did in this stage. When the entropy exceeded the cut-point, WSD_7 was more effective and, thus, its effectiveness rate kept increasing, while that of MFS increased slowly, thus, their difference was decreased with the rise of the entropy. For the combination of MFS and WSD_7, its effectiveness rate before the cut-point was the same as that of MFS, since MFS was used in this stage to select the suspicious set. When WSD was used after the cut-point, the effectiveness rate of the combination system increased continuously, and finally reached 0.5 (21/42). Based on the above experimental results, the most cost-effective method for corpus cleanup is to use the combination method and begin with the most ambiguous words, since the WSD system in the combination method is more effective in identifying erroneous instances occurring in highly-ambiguous words and these words are also more important for many applications. Figure 5 shows the curve of the CE ratios of the combination method by starting with the most ambiguous word. The results indicate that the CE ratio of the combination method decreased gradually after more words with lower entropy were involved in the cleanup procedure. Additionally, the CE ratio of the combination method was improved by using MFS after the cut-point and finally reached 2.50, indicating that 50% (21/42) erroneous instances can be identified by double-checking 20% (137/687) of the suspicious set. This CE ratio was better than 2.31 and 1.48, reached by WSD_7 and MFS, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 32, |
|
"text": "Table 8", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 720, |
|
"end": 729, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 2014, |
|
"end": 2022, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Combination of WSD and MFS", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "The proposed cleanup procedure can be applied to other sense annotated corpora by the following steps:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination of WSD and MFS", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Build the baseline (MFS) and WSD systems from the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination of WSD and MFS", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Create a suspicious set from the WSD systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination of WSD and MFS", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Calculate the entropy for each word in terms of it sense distribution in the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination of WSD and MFS", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Choose a cut-point value. Select a small portion of words with entropy within a certain interval (e.g., 1.0 ~ 1.5 in Table 8 ) for human evaluation to decide an appropriate cut-point value. The cut-point value should not be too low or too high, since WSD systems may suffer from the over-fitting problem if the value is too low, and the performance would be dominated by the baseline system if the value is too high.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 124, |
|
"text": "Table 8", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Combination of WSD and MFS", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Combine the baseline and best single WSD system through the cut-point.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus Cleanup of Mistaken Agreement Using 417 Word Sense Disambiguation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Start the cleanup procedure in the descending order of word entropy until the CE ratio is below a predefined threshold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus Cleanup of Mistaken Agreement Using 417 Word Sense Disambiguation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This study has presented a cleanup procedure to identify incorrect sense annotation in a corpus. The cleanup procedure incorporates WSD systems to select a set of suspicious instances for human evaluation. The experiments are conducted from three aspects: precision, cost-effectiveness ratio, and entropy, to examine the performance of WSD. The experimental results show that the WSD systems are more effective on highly-ambiguous words. Additionally, the most cost-effective cleanup strategy is to use the combination method and begin with the most ambiguous words. The incorrect sense annotations found in this study can be used for SemEval-2007 to improve the accuracy of WSD evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The absence of related work on (semi-) automatically determining cases of erroneous agreement among annotators in a corpus is rather surprising. Variants of the method described here, replacing WSD for whatever procedure is appropriate for the phenomenon annotated in the corpus (sentiment recognition for a sentiment corpus, etc.), are easy to implement and may produce useful results for corpora in current use. Future work will focus on devising an algorithm to perform the cleanup procedure iteratively on the whole corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Our WSD system does not include the sense identifier (except for the target word) for word-level training and testing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "UBC-ALM: Combining k-NN with SVD for WSD", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Lopez De Lacalle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the 4th International Workshop on Semantic Evaluations (SemEval-2007) at ACL-07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "342--345", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Agirre, E., & Lopez de Lacalle, O. (2007). UBC-ALM: Combining k-NN with SVD for WSD. In Proc. of the 4th International Workshop on Semantic Evaluations (SemEval-2007) at ACL-07, 342-345.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Applying Alternating Structure Optimization to Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Ando", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "77--84", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ando, R.K. (2006). Applying Alternating Structure Optimization to Word Sense Disambiguation. In Proc. of CoNLL, 77-84.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "PropBank Annotation Guidelines", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Babko-Malaya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Babko-Malaya, O. (2006). PropBank Annotation Guidelines.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Large Scale Acquisition of Paraphrases for Learning Surface Patterns", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Bhagat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ravichandran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of the 46th Annual Meeting of the Association of Computational Linguistics (ACL-08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "574--682", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhagat, R., & Ravichandran, D. (2008). Large Scale Acquisition of Paraphrases for Learning Surface Patterns. In Proc. of the 46th Annual Meeting of the Association of Computational Linguistics (ACL-08), 574-682.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Improving Word Sense Disambiguation Using Topic Features", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "The", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1015--1023", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cai, J.F., Lee, W.S., & The, Y.W. (2007). Improving Word Sense Disambiguation Using Topic Features. In Proc. of EMNLP-CoNLL, 1015-1023.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Building a Sense Tagged Corpus with Open Mind Word Expert", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Chklovski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of the Workshop on Word Sense Disambiguation: Recent Successes and Future Directions at ACL-02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "116--122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chklovski, T., & Mihalcea, R. (2002). Building a Sense Tagged Corpus with Open Mind Word Expert. In Proc. of the Workshop on Word Sense Disambiguation: Recent Successes and Future Directions at ACL-02, 116-122.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "WordNet: An Electronic Lexical Database", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fellbaum, C. (1998). WordNet: An Electronic Lexical Database. Cambridge, MA: MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "OntoNotes: The 90% Solution", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of HLT/NAACL-06", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "57--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hovy, E.H., Marcus, M., Palmer, M., Ramshaw, L., & Weischedel, R. (2006). OntoNotes: The 90% Solution. In Proc. of HLT/NAACL-06, 57-60.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Construction of a apanese Semantic Lexicon: Lexeed", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kasahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Sato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Bond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Tanaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Fujita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kanasugi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "IPSG SIG: 2004-NLC-159", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "75--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahara, K., Sato, H., Bond, F., Tanaka, T., Fujita, S., Kanasugi, T., et al. (2004). Construction of a apanese Semantic Lexicon: Lexeed. In IPSG SIG: 2004-NLC-159, Tokyo, 75-82.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "English Lexical Sample Task Description", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kilgarriff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of the SENSEVAL-2 Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kilgarriff, A. (2001). English Lexical Sample Task Description. In Proc. of the SENSEVAL-2 Workshop, 17-20.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "SENSEVAL: Evaluating Word Sense Disambiguation Programs", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kilgarriff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Computer and the Humanities", |
|
"volume": "34", |
|
"issue": "1-2", |
|
"pages": "1--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kilgarriff, A., & Palmer, M. editors. (2000). SENSEVAL: Evaluating Word Sense Disambiguation Programs, Computer and the Humanities, 34(1-2),1-13.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "CRYSTAL: Analyzing Predictive Opinions on the Web", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim, S.M., & Hovy, E.H. (2007). CRYSTAL: Analyzing Predictive Opinions on the Web. In Proc. of EMNLP-CoNLL, Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Mining Opinions from the Web: Beyond Relevance Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Ku", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Journal of American Society for Information Science and Technology", |
|
"volume": "58", |
|
"issue": "12", |
|
"pages": "1838--1850", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ku, L.W., & Chen, H.H. (2007). Mining Opinions from the Web: Beyond Relevance Retrieval. Journal of American Society for Information Science and Technology, 58(12), 1838-1850.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "An Empirical Evaluation of Knowledge Sources and Learning Algorithms for Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lee, Y.K., & Ng, H.T. (2002). An Empirical Evaluation of Knowledge Sources and Learning Algorithms for Word Sense Disambiguation. In Proc. of EMNLP, 41-48.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Measuring Semantic Entropy", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Melamed", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proc. of ACL-SIGLEX Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melamed, I.D. (1997). Measuring Semantic Entropy. In Proc. of ACL-SIGLEX Workshop, 41-46.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Using Wikipedia for AutomaticWord Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of NAACL/HLT-07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "196--203", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihalcea, R. (2007). Using Wikipedia for AutomaticWord Sense Disambiguation. In Proc. of NAACL/HLT-07, 196-203.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Proc. of SENSEVAL-3", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Edmonds", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihalcea, R., & Edmonds, P. editors. (2004). In Proc. of SENSEVAL-3.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A Semantic Concordance", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Leacock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Tengi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Bunker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Proc. of the 3rd DARPA Workshop on Human Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "303--308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miller, G., Leacock, C., Tengi, R., & Bunker, R. (1993). A Semantic Concordance. In Proc. of the 3rd DARPA Workshop on Human Language Technology, 303-308.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Integrating Multiple Knowledge Sources to Disambiguate Word Sense: An Exemplar-based Approach", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proc. of the 34th Meeting of the Association for Computational Linguistics (ACL-96)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ng, H.T., & Lee, H.B. (1996). Integrating Multiple Knowledge Sources to Disambiguate Word Sense: An Exemplar-based Approach. In Proc. of the 34th Meeting of the Association for Computational Linguistics (ACL-96), 40-47.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Different Sense Granularities for Different Applications", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Babko-Malaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the 2nd International Workshop on Scalable Natural Language Understanding at HLT/NAACL-04", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Palmer, M., Babko-Malaya, O., & Dang, H.T. (2004). Different Sense Granularities for Different Applications. In Proc. of the 2nd International Workshop on Scalable Natural Language Understanding at HLT/NAACL-04.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Making Fine-grained and Coarse-grained Sense Distinctions, Both Manually and Automatically", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Journal of Natural Language Engineering", |
|
"volume": "13", |
|
"issue": "", |
|
"pages": "137--163", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Palmer, M., Dang, H.T., & Fellbaum, C. (2006). Making Fine-grained and Coarse-grained Sense Distinctions, Both Manually and Automatically. Journal of Natural Language Engineering, 13, 137-163.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The Proposition Bank: An Annotated Corpus of Semantic Roles", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Kingsbury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Computational Linguistics", |
|
"volume": "31", |
|
"issue": "1", |
|
"pages": "71--106", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Palmer, M., Gildea, D., & Kingsbury, P. (2005). The Proposition Bank: An Annotated Corpus of Semantic Roles. Computational Linguistics, 31(1), 71-106.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "OntoNotes: A Unified Relational Semantic Representation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the First IEEE International Conference on Semantic Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "517--524", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pradhan, S., Hovy, E.H., Marcus, M., Palmer, M., Ramshaw, L., & Weischedel, R. (2007a). OntoNotes: A Unified Relational Semantic Representation. In Proc. of the First IEEE International Conference on Semantic Computing (ICSC-07), 517-524.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "SemEval-2007 Task 17: English Lexical Sample, SRL and All Words", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Loper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Dligach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the 4th International Workshop on Corpus Cleanup of Mistaken Agreement Using", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pradhan, S., Loper, E., Dligach, D., & Palmer, M. (2007b). SemEval-2007 Task 17: English Lexical Sample, SRL and All Words. In Proc. of the 4th International Workshop on Corpus Cleanup of Mistaken Agreement Using 419", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Word Sense Disambiguation Semantic Evaluations (SemEval-2007) at ACL-07", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "87--92", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Word Sense Disambiguation Semantic Evaluations (SemEval-2007) at ACL-07, 87-92.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Automatic Thesaurus Generation for Chinese Documents", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Tseng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Journal of the American Society for Information Science and Technology", |
|
"volume": "53", |
|
"issue": "13", |
|
"pages": "1130--1138", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tseng, Y.S. (2002). Automatic Thesaurus Generation for Chinese Documents. Journal of the American Society for Information Science and Technology, 53(13), 1130-1138.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Learning to Merge Word Senses", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Snow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Prakash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1005--1014", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Snow, R., Prakash, S., Jurafsky, D., & Ng, A.Y. (2007). Learning to Merge Word Senses. In Proc. of EMNLP-CoNLL, 1005-1014.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Learning Expressive Models for Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Stevenson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "&", |
|
"middle": [], |
|
"last": "Das Gracas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Nunes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the 45th Annual Meeting of the Association of Computational Linguistics (ACL-07)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Specia, L., Stevenson, M., &. das Gracas V. Nunes, M. (2007). Learning Expressive Models for Word Sense Disambiguation. In Proc. of the 45th Annual Meeting of the Association of Computational Linguistics (ACL-07), 41-48.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "PNNL: A Supervised Maximum Entropy Approach to Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Tratz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Sanfilippo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Gregory", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Chappell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Posse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Whitney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the 4th International Workshop on Semantic Evaluations (SemEval-2007) at ACL-07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "264--267", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tratz, S., Sanfilippo, A., Gregory, M., Chappell, A., Posse, C., & Whitney, P. (2007). PNNL: A Supervised Maximum Entropy Approach to Word Sense Disambiguation. In Proc. of the 4th International Workshop on Semantic Evaluations (SemEval-2007) at ACL-07, 264-267.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Using Semantic Dependencies to Mine Depressive Symptoms from Consultation Records", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Jang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "IEEE Intelligent Systems", |
|
"volume": "20", |
|
"issue": "6", |
|
"pages": "50--58", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wu, C.H., Yu, L.C., & Jang, F.L. (2005). Using Semantic Dependencies to Mine Depressive Symptoms from Consultation Records. IEEE Intelligent Systems, 20(6), 50-58.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Automated Alignment and Extraction of Bilingual Domain Ontology for Cross-Language Domain-Specific Applications", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Yeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the 20th COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1140--1146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yeh, J.F., Wu, C.H., Chen, M.J., & Yu, L.C. (2004). Automated Alignment and Extraction of Bilingual Domain Ontology for Cross-Language Domain-Specific Applications. In Proc. of the 20th COLING, 1140-1146.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Ontology-based Speech Act Identification in a Bilingual Dialog System Using Partial Pattern Trees", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Yeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Journal of the American Society for Information Science and Technology", |
|
"volume": "59", |
|
"issue": "5", |
|
"pages": "684--694", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yeh, J.F., Wu, C.H., & Chen, M.J. (2008). Ontology-based Speech Act Identification in a Bilingual Dialog System Using Partial Pattern Trees. Journal of the American Society for Information Science and Technology, 59(5), 684-694.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "OntoNotes: Sense Pool Verification Using Google N-gram and Statistical Tests", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Philpot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the OntoLex Workshop at the 6th International Semantic Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu, L.C., Wu, C.H., Philpot, A., & Hovy, E.H. (2007). OntoNotes: Sense Pool Verification Using Google N-gram and Statistical Tests. In Proc. of the OntoLex Workshop at the 6th International Semantic Web Conference (ISWC 2007).", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "HAL-based Evolutionary Inference for Pattern Induction from Psychiatry Web Resources", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Yeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Jang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "IEEE Trans. Evolutionary Computation", |
|
"volume": "12", |
|
"issue": "2", |
|
"pages": "160--170", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu, L.C., Wu, C.H., Yeh, J.F., & Jang, F.L. (2008). HAL-based Evolutionary Inference for Pattern Induction from Psychiatry Web Resources. IEEE Trans. Evolutionary Computation, 12(2), 160-170.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Pivot Approach for Extracting Paraphrase Patterns from Bilingual Corpora", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of the 46th Annual Meeting of the Association of Computational Linguistics (ACL-08)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "780--788", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhao, S., Wang, H., Liu, T., & Li, S. (2008). Pivot Approach for Extracting Paraphrase Patterns from Bilingual Corpora. In Proc. of the 46th Annual Meeting of the Association of Computational Linguistics (ACL-08), 780-788.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Active Learning for Word Sense Disambiguation with Methods for Addressing the Class Imbalance Problem", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "783--790", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhu, J., & Hovy, E.H. (2007). Active Learning for Word Sense Disambiguation with Methods for Addressing the Class Imbalance Problem. In Proc. of EMNLP-CoNLL, 783-790.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Corpus cleanup procedure.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Arg0 The New York arm of the London-based firm] auctioned off [ Arg1 the estate of John T. Dorrance Jr., the Campbell's Soup Co. heir,] [ ArgM-TMP last week].", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"text": "Example of predicate-argument structure.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"text": "CE ratio of WSD systems with different training portions.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"text": "Effectiveness rate against word entropy.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"text": "CE ratio against word entropy.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>).</td></tr></table>", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Example sentence:</td></tr><tr><td>The 45-year-old Mr. Kuehn, who has a background in crisis management, succeeds Alan D.</td></tr><tr><td>Rubendall, 45.</td></tr><tr><td>management.01: Overseeing or directing. Refers to the act of managing something.</td></tr><tr><td>He was given overall management of the program.</td></tr><tr><td>I'm a specialist in risk management.</td></tr><tr><td>The economy crashed because of poor management.</td></tr><tr><td>management.02: The people in charge. The ones actually doing the managing.</td></tr></table>", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"2\">Relation Type</td><td/><td>Example</td></tr><tr><td>VN relation</td><td/><td/><td>VN1: (auction.01, Arg0, arm.03)</td></tr><tr><td>V</td><td>ARG1</td><td>N</td><td>VN2: (auction.01, Arg1, estate.01) VN3: (auction.01, ArgM-TMP, <DATE>)</td></tr><tr><td>NN relation:</td><td/><td/><td/></tr><tr><td/><td>V</td><td/><td>NN1: (arm.03, Arg0-Arg1, estate.01)</td></tr><tr><td>A RG 0</td><td colspan=\"2\">A R G 1</td><td>NN2: (estate.01, Arg1-ArgM-TMP, <DATE>)</td></tr><tr><td/><td/><td/><td>NN3: (arm.03, Arg0-ArgM-TMP, <DATE>)</td></tr><tr><td>N</td><td/><td>N</td><td/></tr></table>", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Baseline</td><td/><td/><td/><td/><td>WSD</td><td/><td/><td/></tr><tr><td/><td>(MFS)</td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td><td>0.5</td><td>0.6</td><td>0.7</td><td>0.8</td><td>0.9</td></tr><tr><td>Accuracy</td><td>0.696</td><td colspan=\"9\">0.751 0.798 0.809 0.819 0.822 0.824 0.831 0.836 0.832</td></tr></table>", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Baseline</td><td/><td/><td/><td/><td>WSD</td><td/><td/><td/><td/></tr><tr><td/><td>(MFS)</td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td><td>0.5</td><td>0.6</td><td>0.7</td><td>0.8</td><td>0.9</td></tr><tr><td>Prec</td><td>0.090 (17/188)</td><td>0.113 (20/177)</td><td>0.112 (16/143)</td><td>0.113 (17/150)</td><td>0.124 (16/129)</td><td>0.123 (15/122)</td><td>0.127 (16/126)</td><td>0.142 (17/120)</td><td>0.130 (14/108)</td><td>0.125 (14/112)</td></tr></table>", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Cost</td><td>Effectiveness</td><td>CE Ratio</td></tr><tr><td>Baseline (MFS)</td><td>0.274 (188/687)</td><td>0.405 (17/42)</td><td>1.48</td></tr><tr><td>WSD_7</td><td>0.175 (120/687)</td><td>0.405 (17/42)</td><td>2.31</td></tr></table>", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF6": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Group 1</td><td>Group 2</td><td>Difference</td><td>p-value</td></tr><tr><td>Baseline (MFS)</td><td>1.226</td><td>1.040</td><td>0.186</td><td>0.368</td></tr><tr><td>WSD_7</td><td>1.401</td><td>0.932</td><td>0.469*</td><td>0.013</td></tr><tr><td>*p<0.05</td><td/><td/><td/><td/></tr></table>", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Noun</td><td>#sense</td><td>Major Sense</td><td>Entropy</td><td>#err. instances</td><td>WSD_7</td><td>MFS</td><td>WSD_7+ MFS</td></tr><tr><td>defense</td><td>7</td><td>0.28</td><td>2.599</td><td>5</td><td>5</td><td>4</td><td>5</td></tr><tr><td>position</td><td>7</td><td>0.30</td><td>2.264</td><td>2</td><td>2</td><td>2</td><td>2</td></tr><tr><td>base</td><td>6</td><td>0.35</td><td>2.023</td><td>1</td><td>1</td><td>0</td><td>1</td></tr><tr><td>system</td><td>6</td><td>0.54</td><td>1.525</td><td>2</td><td>1</td><td>0</td><td>1</td></tr><tr><td>chance</td><td>4</td><td>0.49</td><td>1.361</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>order</td><td>8</td><td>0.72</td><td>1.348</td><td>4</td><td>1</td><td>0</td><td>1</td></tr><tr><td>part</td><td>5</td><td>0.70</td><td>1.288</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>power</td><td>3</td><td>0.51</td><td>1.233</td><td>3</td><td>1</td><td>3</td><td>3</td></tr><tr><td>area</td><td>3</td><td>0.72</td><td>1.008</td><td>2</td><td>1</td><td>2</td><td>2</td></tr><tr><td>management</td><td>2</td><td>0.62</td><td>0.959</td><td>2</td><td>1</td><td>0</td><td>0</td></tr><tr><td>condition</td><td>3</td><td>0.71</td><td>0.906</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>job</td><td>3</td><td>0.78</td><td>0.888</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>state</td><td>4</td><td>0.83</td><td>0.822</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>hour</td><td>4</td><td>0.85</td><td>0.652</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>value</td><td>3</td><td>0.90</td><td>0.571</td><td>2</td><td>1</td><td>1</td><td>1</td></tr><tr><td>plant</td><td>3</td><td>0.88</td><td>0.556</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>move</td><td>4</td><td>0.93</td><td>0.447</td><td>6</td><td>0</td><td>0</td><td>0</td></tr><tr><td>rate</td><td>2</td><td>0.92</td><td>0.388</td><td>6</td><td>0</td><td>1</td><td>1</td></tr><tr><td>Total</td><td>-</td><td>-</td><td>-</td><td>42</td><td>17</td><td>17</td><td>21</td></tr><tr><td colspan=\"3\">Nouns without erroneous instances:</td><td/><td/><td/><td/><td/></tr></table>", |
|
"html": null, |
|
"num": null |
|
} |
|
} |
|
} |
|
} |