|
{ |
|
"paper_id": "I11-1021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:31:15.919353Z" |
|
}, |
|
"title": "Enhancing Active Learning for Semantic Role Labeling via Compressed Dependency Trees", |
|
"authors": [ |
|
{ |
|
"first": "Chenhua", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Saarland University", |
|
"location": { |
|
"settlement": "Saarbr\u00fccken", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Saarland University", |
|
"location": { |
|
"settlement": "Saarbr\u00fccken", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "apalmer@coli.uni-sb.de" |
|
}, |
|
{ |
|
"first": "Caroline", |
|
"middle": [], |
|
"last": "Sporleder", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Saarland University", |
|
"location": { |
|
"settlement": "Saarbr\u00fccken", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "csporled@coli.uni-sb.de" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper explores new approaches to active learning (AL) for semantic role labeling (SRL), focusing in particular on combining typical informativity-based sampling strategies with a novel measure of representativeness based on compressed dependency trees (CDTs). In essence, the compressed representation encodes the target predicate and the key dependents of the verb complex in the sentence. We first present our method for producing CDTs from the output of an existing dependency parser. The compressed trees are used as features for training a supervised SRL system. Second, we present a study of AL for SRL. We investigate a number of different sample selection strategies, and the best results are achieved by incorporating CDTs for example selection based on both informativity and representativeness. We show that our approach can reduce by up to 50% the amount of training data needed to attain a given level of performance.", |
|
"pdf_parse": { |
|
"paper_id": "I11-1021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper explores new approaches to active learning (AL) for semantic role labeling (SRL), focusing in particular on combining typical informativity-based sampling strategies with a novel measure of representativeness based on compressed dependency trees (CDTs). In essence, the compressed representation encodes the target predicate and the key dependents of the verb complex in the sentence. We first present our method for producing CDTs from the output of an existing dependency parser. The compressed trees are used as features for training a supervised SRL system. Second, we present a study of AL for SRL. We investigate a number of different sample selection strategies, and the best results are achieved by incorporating CDTs for example selection based on both informativity and representativeness. We show that our approach can reduce by up to 50% the amount of training data needed to attain a given level of performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The focus of this paper is active learning for semantic role labeling, a little-studied intersection of two rather substantial bodies of work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One aim of active learning (AL) is to reduce the number of labeled training instances required to reach a given performance level using supervised machine learning techniques. This is accomplished by allowing the learner to guide the selection of examples to be annotated and added to the training set; at each iteration the learner queries for the example (or set of examples) that will be most informative to its present state. AL is an attractive idea for natural language processing (NLP) because of its potential to dramatically reduce the need for expensive expert annotation, and it has been successfully applied in various areas of natural language processing (Tang et al., 2002; Settles and Craven, 2008) , including named entity recognition (Shen et al., 2004 ),text classification (Yang et al., 2009) , image retrieval (Zhou, 2006) , partof-speech tagging (Ringger et al., 2007) , morpheme glossing (Baldridge and Palmer, 2009) , and syntactic parsing (Hwa, 2004; Osborne and Baldridge, 2004) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 668, |
|
"end": 687, |
|
"text": "(Tang et al., 2002;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 688, |
|
"end": 713, |
|
"text": "Settles and Craven, 2008)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 751, |
|
"end": 769, |
|
"text": "(Shen et al., 2004", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 792, |
|
"end": 811, |
|
"text": "(Yang et al., 2009)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 830, |
|
"end": 842, |
|
"text": "(Zhou, 2006)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 867, |
|
"end": 889, |
|
"text": "(Ringger et al., 2007)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 910, |
|
"end": 938, |
|
"text": "(Baldridge and Palmer, 2009)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 963, |
|
"end": 974, |
|
"text": "(Hwa, 2004;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 975, |
|
"end": 1003, |
|
"text": "Osborne and Baldridge, 2004)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The problems of scarce annotated data and the expense of annotating new data are at least as relevant for semantic role labeling (SRL) as for the above-mentioned areas of NLP. Existing work on automatic SRL usually explores supervised machine learning approaches to mark the semantic roles of predicates automatically by training classifiers using large annotated corpora. 1 Although such approaches can achieve reasonably good performance, annotating a large corpus is still expensive and time consuming. Moreover, the performance of trained classifiers may degrade remarkably when they are applied to out-of-domain data (Johansson and Nugues, 2008a) . There is very little work on AL for SRL (e.g. Roth and Small (2006) ), although much interesting work has been done with semi-supervised and unsupervised approaches to the problem (Grenager and Manning, 2006; F\u00fcrstenau and Lapata, 2009; Lang and Lapata, 2010; Titov and Klementiev, 2011, among others) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 373, |
|
"end": 374, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 622, |
|
"end": 651, |
|
"text": "(Johansson and Nugues, 2008a)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 700, |
|
"end": 721, |
|
"text": "Roth and Small (2006)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 834, |
|
"end": 862, |
|
"text": "(Grenager and Manning, 2006;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 863, |
|
"end": 890, |
|
"text": "F\u00fcrstenau and Lapata, 2009;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 891, |
|
"end": 913, |
|
"text": "Lang and Lapata, 2010;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 914, |
|
"end": 955, |
|
"text": "Titov and Klementiev, 2011, among others)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper we explore the use of compressed dependency trees (CDTs) as features for supervised semantic role labeling and, most importantly, as a way of measuring how representative an individual instance is of the input data. We then incorporate representativeness as part of the metric used for sample selection in active learning. The compressed dependency trees encode the target predicate and the key dependents of the verb complex in a sentence. As illustrated in Section 3, the structural relationships defined by the compressed dependency trees well encapsulate key features used in automatic SRL.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For a more complete picture of the potential for AL with respect to SRL, we investigate a set of strategies designed to select the most informative training examples. We further develop a more effective approach to select training examples concerning both their informativity and representativeness. We use the compressed dependency trees to measure the similarity of two sentences, and select the training examples with a higher priority which are more informative and representative among the unlabeled sentences in the pool. The experimental results show that our approaches can reduce up to 50% of training examples compared to traditional supervised learning solutions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We begin with a brief description of the semantic role labeling task and our supervised learning model. Section 3 presents our method for compressing dependency tree representations, followed by the active learning model, including definitions of all sampling strategies investigated in this work (Section 4). Experiments and results are presented and discussed in Section 5 and Section 6. We end with related work (Section 7) and brief conclusions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Parsing the semantic argument structure of a sentence involves identification and disambiguation of target predicates as well as identification and labeling of their arguments. Because our focus is on the active learning more so than on the semantic role labeling itself, we address only the argument labeling stage of the process, assuming that predicates and argument spans alike have already been identified and correctly labeled.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Role Labeling", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Broadly speaking, there are two different styles of semantic parsing and semantic role labeling (SRL): those based on FrameNet-style analysis (Ruppenhofer et al., 2006) and those using PropBank-style analysis (Palmer et al., 2005) . This work takes the PropBank approach, which considers only verbal predicates and is strongly tied to syntactic structure. In (1), for example, the two arguments of the predicate idolize are labeled as Arg0 and Arg1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 168, |
|
"text": "(Ruppenhofer et al., 2006)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 230, |
|
"text": "(Palmer et al., 2005)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Role Labeling", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Role Labeling", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "[John] Arg0 idolizes [his sister] Arg1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Role Labeling", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this text, we refer to each argument to be labeled, together with its target predicate, as an instance; the sentence in (1) contains two instances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Role Labeling", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The aim of the current work is not to surpass stateof-the-art performance on semantic role labeling. Therefore, although state-of-the-art semantic role labelers are freely available, we chose to implement our own labeler in order to have more control over the underlying machinery. This allows straightforward access to the predicted probability of outputs, which is crucial for the informativitybased selection strategies in Section 4. In addition, compressed dependency trees (Section 3) serve as features for our labeler as well as guiding sample selection in the active learning experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Learning Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In our study, we applied an L1-regularized 2 logistic regression model (Lee et al., 2006) for labeling instances, using the liblinear package (Lin et al., 2007) to build one classifier per label. There are 6 core and 13 non-core argument labels in PropBank annotations. Thus our SRL system is a suite of binary classifiers, and we then use the one-versus-all method (Duda et al., 2001) to assign labels to each instance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 89, |
|
"text": "(Lee et al., 2006)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 160, |
|
"text": "(Lin et al., 2007)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 385, |
|
"text": "(Duda et al., 2001)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Learning Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We used the version of PropBank provided for the CoNLL-2008 SRL shared task (Surdeanu et al., 2008) . A test set of 500 randomly selected sentences was constructed at the outset of the project; this was used only for evaluation of both supervised and active learning models. In all AL experiments, we simulate the oracle by hiding and then uncovering gold-standard labels.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 99, |
|
"text": "(Surdeanu et al., 2008)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Features", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The CoNLL-2008 data set includes both goldstandard dependency parses and automatic dependency parses from the Malt parser (Nivre and Hall, 2005) . We use a combination of features taken directly from the gold-standard parses, 3 features derived from the Malt parses, and features from the output of the Stanford dependency parser (de Marneffe et al., 2006) . To apply the logistic regression model, the features are represented in a binary fashion. The features are described in Table 1, in three groups separated by double lines. The derived features, including a heuristicallyidentified verb complex and altered dependency labels, are described in more detail in Section 3. We use cross-validation on the training data to select for each individual classifier the subset of features most relevant for that label. In feature selection, features are ranked based on their Fisher score calculated using the training data set (as in Duda et al. (2001) ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 144, |
|
"text": "(Nivre and Hall, 2005)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 356, |
|
"text": "Marneffe et al., 2006)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 931, |
|
"end": 949, |
|
"text": "Duda et al. (2001)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Features", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Given a sentence, the task of dependency parsing is to identify the head word and its corresponding dependents and to classify their functional relationships according to a set of dependency relations (e.g., subject, modifier). Thus, a dependency tree of a sentence encodes the dependency relation between the head words and their dependents. It has been reported that SRL can benefit from phrase-structure and dependency-based syntactic parsing (Hacioglu, 2004 Pradhan et al., 2005) . At the same time, much of the structural and relational information represented in a dependency tree is not relevant for the SRL task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 446, |
|
"end": 461, |
|
"text": "(Hacioglu, 2004", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 483, |
|
"text": "Pradhan et al., 2005)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency Tree Compression", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use a compressed dependency tree (CDT) to encode just the relationships between a target predicate and the key dependents of the verb complex. The new tree is always rooted in the target predicate, which often means resetting the root from an auxiliary or other finite main verb. We generate the CDT from the output of an existing dependency parser through the process described in a simplified form below, using the example sentence in Fig. 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 440, |
|
"end": 446, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dependency Tree Compression", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1. Fix target predicate (e.g. set) as root of CDT. 2. Identify the verb chain to which the target predicate belongs; this group of tokens will now be treated as the verb complex. The verb chain is produced by collecting elements connected by relevant dependency relations (VC, IM, CONJ), stopping when a ROOT node, a subordinate clause (SUB), or a verbal OBJ node is encountered. 3. Collect direct dependents of each word in the new verb complex; set these as dependents of the target predicate in the CDT, transferring the dependency relation to the target predicate. (e.g. date is a dependent of have). 4. Negation, modal verbs, and other main verbs in the verb complex also become dependents of the root predicate in the CDT. In some cases of 'new' dependency relations introduced by the tree compression process, we use output from the Stanford parser to complement the dependency relations found in the gold-standard data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency Tree Compression", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "5. Heuristically determine voice of clause and alter some CDT dependency labels(e.g. SBJ PASSIVE becomes OBJ*); these are the asterisk-marked relations in Table. 1.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 161, |
|
"text": "Table.", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dependency Tree Compression", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For example, in (2):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency Tree Compression", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(2) At the same time, the government did not want to appear to favor GM by allowing a minority stake that might preclude a full bid by Ford.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency Tree Compression", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "the verb complex is {did, n't, want, appear, fa-vor}. The subject phrase the government, originally a dependent of did, becomes a dependent of the new three-verb predicate {want, appear, fa-vor}; the negation word n't is a dependent of the target predicate want.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency Tree Compression", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This section provides some background on the active learning process, as well as detailing the various sampling strategies we investigate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this study we apply a standard active learning model (Settles, 2010; Lewis and Gale, 1994) to the task of semantic role labeling. Algorithm 1 illustrates this model as we use it. 4", |
|
"cite_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 71, |
|
"text": "(Settles, 2010;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 72, |
|
"end": 93, |
|
"text": "Lewis and Gale, 1994)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The basic model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Algorithm 1 Active learning for SRL.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The basic model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "1: Randomly select initial seed of labeled instances; 2: Add initial seed to the training data; 3: Apply logistic regression model to train system of classifiers, one for each label; 4: while number of instances in training data is less than X do 5:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The basic model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Randomly select pool of Y unlabeled sentences; 6: Select a sentence or sentences from the unlabeled pool according to a given selection strategy; 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The basic model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Ask oracle to label the selected unlabeled sentence; 8:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The basic model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Add instances from selected sentence to training data; 9: Re-train system using the updated training data; 10:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The basic model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Use system to label test data, record accuracy; 11: end while Much recent work in AL has to do with Step 6 of Algorithm 1, designing and refining selection strategies. The main selection criterion used to date has been informativity, measuring how much a training example can help to reduce the uncertainty of a statistical model. A less-frequently considered criterion, especially in AL for NLP, is representativeness, or how well a training example represents the overall input patterns of the unlabeled data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The basic model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "While some results from AL are robust across different datasets and even different tasks, it is clear that there is no single approach to AL that is suitable for all situations (Tomanek and Olsson, 2009) . Because there is very little previous work on AL for the task of semantic role labeling, we do not assume previous solutions but rather investigate a number of different strategies.", |
|
"cite_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 203, |
|
"text": "(Tomanek and Olsson, 2009)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The basic model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Informativity is exploited in our approaches in terms of uncertainty, which is measured based on how confidently the system labels instances and, by extension, sentences. The lower the confidence on labeling a particular sentence, the more uncertainty is assigned to the sentence. At each iteration, then, we select from the unlabeled pool the single sentence with the greatest uncertainty. We compare 4 different scoring functions for measuring the system's certainty (CER) regarding an unlabeled sentence. These are presented below as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "INF1-INF4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Let s represent an unlabeled sentence with instances i = 1 to n. Given a set of binary classifiers, one each for labels y = 1 to m, let p i,y be the probability of i being labeled as y. Finally, P is a pool of unlabeled sentences. At each iteration, we select the single s \u2208 P with the lowest value for CER.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "RAND: Random selection. Random selection (randomly select an unlabeled sentence s \u2208 P ) serves as a strong baseline in active learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "INF1: Average uncertainty. After labeling each instance in a sentence with the most-likely predicted label, we calculate uncertainty for the sentence as the average of the classifiers' confidence in assigning the predicted labels. Let T op ", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 239, |
|
"text": "Let T op", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "(i) = p i,y k , where \u2200h = k, p i,y k > p i,y h ; CER(s) = ( n j=1 T op(i j ))/n. INF2:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Average uncertainty variance. Our second informativity-based strategy evaluates the uncertainty of the labeling for an instance using the variance of the confidence for each instance. A smaller variance implies that it is more difficult for the system to differentiate between possible label assignments for the instance. We then calculate sentence uncertainty as the average variance for all instances. Let AV G", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "(i) = ( m k=1 p i,y k )/m, V AR(i) = m k=1 (p i,y k \u2212 AV G(i)) 2 /(m \u2212 1); CER(s) = n j=1 V AR(i j )/n. INF3: Average top-2 Margin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The intuition behind this approach is that the top 2 most confident labels are likely to be more informative than other labels. Therefore, we only select the two most likely labels to calculate uncertainty. 5 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 208, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Let M argin(i) = p i,y k 1 \u2212 p i,y k 2 , where p i,y k 1 > p i,y k 2 \u2227 \u2200h = k 1 , k 2 , p i,y k 2 > p i,k h ; CER(s) = ( n j=1 M argin(i j ))/n. INF4: Most top-2 Margin Instances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Finally, we further extend the approach of INF3 by selecting the sentence which has the greatest number of instances with a small margin between the top 2 labels (which means that the sentence is more uncertain than other sentences). Let Q be a set of instances with the top-2 margin less than a small threshold (i.e., M argin(i) \u2264 0.1). CER(s) is defined as the inverse of the number of instances of s that are in Q (i.e. 1/# qualifying instances). Ties are resolved by random selection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "A disadvantage of selecting examples based only on informativity is the tendency of the learner to query outliers (Settles, 2010) . It has therefore been proposed (Dredze and Crammer, 2008; Settles and Craven, 2008) to temper such selection strategies with a notion of relevance or representativeness. Ours is the first work to use such a combined strategy for SRL. We measure the representativeness of unlabeled sentences based on sentence similarity, taking two different approaches: cosine similarity, and a measure based on CDTs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 129, |
|
"text": "(Settles, 2010)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 189, |
|
"text": "(Dredze and Crammer, 2008;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 215, |
|
"text": "Settles and Craven, 2008)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representativeness", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "COS: Cosine Similarity. Given two sentences s and s , let i 1 , i 2 , . . . , i m , and i 1 , i 2 ,. . . ,i n be their instances, respectively. The similarity of the two sentences, denoted as similarity(s, s ), is defined as m j=1 n k=1 sim(i j , i k ), where sim(i j , i k ) is the similarity between the instances i j and i k , defined as the cosine of the two feature vectors. 6 For purposes of comparison, we use the same formulation of COS as Settles and Craven (2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 448, |
|
"end": 473, |
|
"text": "Settles and Craven (2008)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representativeness", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Given a pool P of unlabeled sentences, for every unlabeled sentence s \u2208 P , the representativeness of the sentence, denoted as rep(s), is measured as the sum of the similarity between the sentence and all the other sentences in the pool, that is, rep(s) = sim(s, s ), where s \u2208 P \u2227 s = s.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representativeness", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "COS evaluates the similarity of two sentences based on the cosine of their instances. This may not be accurate enough because the instances include more information than the relationships between the target predicate and the key dependents of the verb complex in the sentence. Therefore, we exploit the compressed dependency trees as a metric to evaluate the similarity between two sentences, as illustrated below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representativeness", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "CDT: Compressed Dependency Trees. For target predicate p, let (p, r i , a i ) be the edges of the CDT rooted in p, where a i is an argument and r i is the dependency relationship between p and a i . We call two edges similar if all of p, r, and a meet their respective similarity criteria. Two predicates are considered to be similar if they have the same value for the PREDICATE PROPERTIES feature as defined in Table 1 (e.g. both are transitive verbs). Two relations are considered to be similar if they have the same dependency relation label (e.g. SBJ, TMP, MOD, etc.). Finally, two arguments are considered to be similar if they share the same coarsegrained part-of-speech tag.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 413, |
|
"end": 420, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Representativeness", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Given a pool P of unlabeled sentences, for every unlabeled sentence s \u2208 P , the representativeness of the sentence, denoted as rep(s), is defined as n similar , representing the number of edges in the pool that are similar to the edges of the CDT for s. Intuitively, the larger the number of similar CDT edges in the unlabeled pool, the more representative the sentence is overall of the input data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representativeness", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The final step in our model is to define a selection strategy that incorporates both selection criteria. We define the priority of selecting a sentence as priority(s) = \u03b1 \u00d7 rep(s) \u2212 (1 \u2212 \u03b1) \u00d7 CER(s). Given a pool P , we select the single s \u2208 P with the highest value for priority(s). This approach is very similar to the information density (ID) approach of Settles and Craven (2008) ; the key difference is in the balance between the two criteria.", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 383, |
|
"text": "Settles and Craven (2008)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Informativity and Representativeness", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Ours is a linear combination; ID instead multiplies informativity by a weighted measure of rep- ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Informativity and Representativeness", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "To evaluate our approach to AL for SRL, we investigate three different questions. First, which informativity strategy is most appropriate for the task? Second, which representativeness measure works best? And third, how shall we weight the trade-off between the two selection criteria?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "All of our active learning experiments share some characteristics. First, we randomly select a seed of 50 instances from the labeled training data. The seed set, as well as the test data, are kept consistent across all experimental conditions. In each iteration of the training-selection cycle (see Algorithm 1), a new unlabeled pool (n=500) is selected, and from that pool a single example is labeled by the oracle and added to the training set. We stop once 500 examples have been labeled.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To evaluate the effectiveness of each strategy, we tested the classifier in each interaction, and measured the accuracy of the predicted labels. The accuracy measure is defined as the number of correct labelings divided by the total number of labelings in the test data. Results are presented as the average over 20 runs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To investigate the influence of representativeness, we run the same experiment with all cross-combinations of {INF1,INF2,INF3,INF4} and {COS,CDT}. For weighting the two criteria, we use both information density (ID) as defined in Settles and Craven (2008) and our priority metric (Section 4.4) with \u03b1 set at 0.3, 0.5, and 0.7.", |
|
"cite_spans": [ |
|
{ |
|
"start": 230, |
|
"end": 255, |
|
"text": "Settles and Craven (2008)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this section, we analyze and discuss the experimental results. The gains achieved by AL can be measured in a number of different ways; first, we plot number of labeled training examples against system accuracy (Figure 2 and Figure 3) . The figures presented here stop at 500 training examples, with averaged accuracies in the range of 80%. For comparison, the fully-supervised system when trained on 20000 instances performed at 89.71%. Second, we calculate the percent reduction in error of each strategy compared to the random selection baseline (Table 2) , following Melville and Mooney (2004) . Because most gains from AL happen early in the learning curve, we consider performance at two different points. Fig. 2a shows the expected result that the four informativity-based strategies outperform the random selection baseline. INF3 performs best early in the learning curve, but is overtaken by INF2 at the end of our curve. To reach the accuracy achieved by the four informativity strategies at the halfway point (250 training instances), RAND needs 100-150 additional instances. Fig. 2b shows the result of combining the informativity (INF3) and representativeness (both COS and CDT). As illustrated in Section 6.1, INF3 outperforms the other informativity-based strategies. However, we see that Fig. 2b shows combining CDT with INF3 achieves a better performance than using INF3 only (\u03b1 = 0.3); representativeness improves performance, outperforming RAND by approximately 250 training instances. For INF3, COS is a less effective measure of representativeness. This may be because the feature vectors for the training instances share too much information, including stop words and a large number of 0-valued features, to make them easily differentiated. As a result, the most representative sentence selected using COS may not reflect the real simi- larity of the sentences. In CDT, we choose only the structural relation between the predicate and its arguments to measure the similarity between sentences. As a result, the sentences selected using CDT are more representative than that of using COS, as confirmed by the result in Fig. 2b . We also applied the solution of combining informativity and representativeness (4.3) to other informativity-based strategies. However, the advantage the combined solution for other strategies is less obvious than for INF3. For example, Fig. 2c shows the result of combining INF2 (\u03b1 = 0.3) with both COS and CDT. The result shows that the combined solution with CDT performs slightly better than using INF2 only when the number of training instances is less than 200. However, when the number of instances is larger than 350, the solution of using INF2 only achieves a higher accuracy than the combined solution. This may be due to a conflict between the two selection criteria. In any event, there is clearly a trade-off between informativity and representativeness, and results are influenced by the details of the manner of combining the two.", |
|
"cite_spans": [ |
|
{ |
|
"start": 573, |
|
"end": 599, |
|
"text": "Melville and Mooney (2004)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 236, |
|
"text": "(Figure 2 and Figure 3)", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 551, |
|
"end": 560, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 714, |
|
"end": 721, |
|
"text": "Fig. 2a", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1089, |
|
"end": 1096, |
|
"text": "Fig. 2b", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1306, |
|
"end": 1313, |
|
"text": "Fig. 2b", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 2142, |
|
"end": 2149, |
|
"text": "Fig. 2b", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 2388, |
|
"end": 2395, |
|
"text": "Fig. 2c", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The results of other INF/REP combinations are presented in Table 2 , in terms of their reduction in error compared to random selection.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 66, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Informativity plus Representativeness", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Finally, we set \u03b1 with different values (i.e., 0.3, 0.5 and 0.7) to investigate how the trade-off between informativity and representativeness may affect the SRL performance. We also compare our solution to the information density solution proposed by et al. (Settles and Craven, 2008 ) (denoted as ID) multiplies the informativity and representativeness instead of summing them. Here we display only the results of INF2 and INF4 combining with CDT in Fig. 3 . Other combinations share a similar pattern with these results and their error reduction percentage can be found in Table. 2. Fig. 3a and Fig. 3b compare the two representativity measures for INF3, as the best overall result was achieved by INF3 in combination with CDT. We see that parameter tuning seems to be more influential for the CDT measure than for the COS measure. Fig. 3c shows how parameter tuning affects INF2; \u03b1 = 0.3 has a higher accuracy than that of 0.5 and 0.7. We can observe that when \u03b1 = 0.3, our solution (INF2) has a better performance than that of ID. However, regarding the combination of INF4 and CDT, ID performs better (no graph; see 2. Note that the INF4 selects the sentences which has greatest number of instances with a small margin. Then representativeness of the sentences within the margin was calculated. In other word, the combination was done step by step not in parallel as the other combination. Therefore, the combination of INF4 and CDT accounts for informativity prior to representativeness; this may be why ID is more successful.", |
|
"cite_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 284, |
|
"text": "(Settles and Craven, 2008", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 452, |
|
"end": 458, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 576, |
|
"end": 582, |
|
"text": "Table.", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 593, |
|
"text": "Fig. 3a", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 598, |
|
"end": 605, |
|
"text": "Fig. 3b", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 835, |
|
"end": 842, |
|
"text": "Fig. 3c", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Weighting the two criteria", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "In general, the balance and trade-offs between the two criteria deserve further investigation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weighting the two criteria", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Much research efforts have been devoted to statistical machine learning methodologies for SRL (Bjkelund et al., 2009; Gildea and Jurafsky, 2002; Shi et al., 2009; Johansson and Nugues, 2008a; Lang and Lapata, 2010; Pradhan et al., 2008; F\u00fcrstenau and Lapata, 2009; Titov and Klementiev, 2011, among others) . For example, Johansson et al. (Johansson and Nugues, 2008a) applied logistic regression with L2 norm to dependency-based SRL. Similarly, we also use logistic regression to train the classifier with a probabilistic explanation. However, we use L1 normed logistic regression due to its desirable property that can result in few nonzero feature weights. This allows us to select the most important features from an otherwise very large feature set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 117, |
|
"text": "(Bjkelund et al., 2009;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 144, |
|
"text": "Gildea and Jurafsky, 2002;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 162, |
|
"text": "Shi et al., 2009;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 191, |
|
"text": "Johansson and Nugues, 2008a;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 192, |
|
"end": 214, |
|
"text": "Lang and Lapata, 2010;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 236, |
|
"text": "Pradhan et al., 2008;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 264, |
|
"text": "F\u00fcrstenau and Lapata, 2009;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 306, |
|
"text": "Titov and Klementiev, 2011, among others)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 339, |
|
"end": 368, |
|
"text": "(Johansson and Nugues, 2008a)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Roth et al. (Roth and Small, 2006) proposed a margin based active learning framework for structured output and experiment on SRL task. They defined structured output by constraining the relations among class labels, e.g., one predicate only has one of the labels. The classification problem is defined via constraints among output labels. The most uncertain instances are selected to satisfy predefined constraints. Rather than a structured relation between output labels, our work exploits the structure of the sentences themselves via compressed dependency trees.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 34, |
|
"text": "(Roth and Small, 2006)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In the area of sentence similarity measurement, most current work focuses on semantic similarity (Haghighi et al., 2005; Tang et al., 2002; Shen and Lapata, 2007) . We define similarity between sentences in terms of the nodes and edges in the dependency tree instead of semantic/lexical similarity of the sentences. We are interested in the structure of a sentence and how it is constructed due to the need of SRL tasks. Wang and Neumann (2007) use a similar sort of compressed dependency tree comprised of keywords and collapsed dependency relations to calculate the semantic similarity of sentences for the textual entailment task. Under their approach, dependency relations themselves are collapsed; we keep the specific dependency relations and collapse the trees, aiming for structural rather than semantic similarity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 120, |
|
"text": "(Haghighi et al., 2005;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 139, |
|
"text": "Tang et al., 2002;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 162, |
|
"text": "Shen and Lapata, 2007)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 444, |
|
"text": "Wang and Neumann (2007)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In addition, Filippova et al. (Filippova and Strube, 2008) proposed to compress a sentence using dependency trees and take the importance of words as weight. They found compressed dependency tree can better ensure the grammaticality of the sentences to preserve the same lexical meaning as much as possible. In our work, we are more interested in the explicit dependency relation of predicate-argument pairs. Our goal is to apply compressed dependency tree to extract explicit relation between predicate and argument as precise as possible for SRL purpose. Therefore, we construct the compressed tree by identifying predicate-argument units and then re-linking them if there exist dependency relation among them. Consequently, most of the nodes in our compressed tree are predicates and arguments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 58, |
|
"text": "(Filippova and Strube, 2008)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "This paper investigates the use of active learning for semantic role labeling. To improve the learning accuracy and reduce the size of training set, compressed dependency trees are exploited as features. Strategies to select informative unlabeled sentences are proposed. Moreover, the compressed dependency trees are also utilized as a criterion to measure the representativeness of unlabeled sentences. A solution to select unlabeled sentences combining both informativeness and representativeness is developed. The experimental results show that our solution can save up to 50% on a small training data set compared to the supervised learning solution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Possibilities for future work include exploring the use of constraints on label outputs, implementation of entropy-based informativity metrics, and perhaps combining COS andCDT for measuring representativeness. Another potentially promising direction is to employ multi-kernel based methods as a structure-oriented similarity measurement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "For recent work on SRL, see, among others:(Das et al., 2010;Haji\u010d et al., 2009;Surdeanu et al., 2008;Carreras and M\u00e0rquez, 2005;Baker et al., 2007).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that logistic regression is used together with a regularized term to avoid the overfitting problem by penalizing the complexity of the trained model. Generally, the regularized term is defined as a function of the learned parameters over the weights. The L1 regularization, also called lasso penalty, is used to penalize both large and small weights.3 In ongoing work, we replace gold-standard parses with more realistic automatic parses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Recall that each sentence contains one or more instances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that in the binary classification case, INF3 is equivalent toINF1. 6 Features are extracted from CDTs rather than full sentences, reducing to some extent the appearance of noisy information (e.g. stop words). Whether this can be further reduced by a modified implementation of COS is a question for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Our thanks to the anonymous reviewers for their valuable commentary and suggestions, and to Ivan Titov for invaluable, insightful discussions and feedback. This research has been funded by the German Research Foundation (DFG) under the MMCI Cluster of Excellence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "SemEval-2007 task 19: Frame semantic structure extraction", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Baker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ellsworth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of SemEval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Baker, M. Ellsworth, K. Erk. 2007. SemEval-2007 task 19: Frame semantic structure extraction. In Proc. of SemEval-2007.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "How well does active learning actually work? time-based evaluation of cost-reduction strategies for language documentation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Baldridge, A. Palmer. 2009. How well does ac- tive learning actually work? time-based evaluation of cost-reduction strategies for language documen- tation. In Proc. of EMNLP 2009.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Multilingual Semantic Role Labeling", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bjkelund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Hafdell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Nugues", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Bjkelund, L. Hafdell, P. Nugues, 2009. Multilingual Semantic Role Labeling, 43-48. 2009.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Introduction to the CoNLL-2005 shared task: Semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of CoNLL-2005", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Carreras, L. M\u00e0rquez. 2005. Introduction to the CoNLL-2005 shared task: Semantic role labeling. In Proc. of CoNLL-2005.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Probabilistic frame-semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Das, N. Schneider, D. Chen, N. A. Smith. 2010. Probabilistic frame-semantic parsing. In Proc. of NAACL-HLT 2010.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Generating typed dependency parses from phrase structure parses", |
|
"authors": [ |
|
{ |
|
"first": "M.-C", |
|
"middle": [], |
|
"last": "De Marneffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Maccartney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M.-C. de Marneffe, B. MacCartney, C. D. Manning. 2006. Generating typed dependency parses from phrase structure parses. In Proc. of LREC 2006.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Active learning with confidence", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Crammer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Dredze, K. Crammer. 2008. Active learning with confidence. In Proc. of ACL 2008.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Pattern classification", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Duda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Hart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Stork", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Duda, P. Hart, D. Stork. 2001. Pattern classifica- tion, volume 2. Wiley.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Dependency tree based sentence compression", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Filippova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of INLG", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Filippova, M. Strube. 2008. Dependency tree based sentence compression. In Proc. of INLG 2008.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Semi-supervised semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "F\u00fcrstenau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. F\u00fcrstenau, M. Lapata. 2009. Semi-supervised se- mantic role labeling. In Proc. of EACL 2009.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Automatic labeling of semantic roles", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Gildea, D. Jurafsky. 2002. Automatic labeling of semantic roles. Computational Linguistics, 28.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Unsupervised discovery of a statistical verb lexicon", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Grenager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grenager, C. Manning. 2006. Unsupervised discov- ery of a statistical verb lexicon. In Proc. of EMNLP 2006.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Semantic role labeling using dependency trees", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Hacioglu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Hacioglu. 2004. Semantic role labeling using de- pendency trees. In Proc. of COLING 2004.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Robust textual inference via graph matching", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. D. Haghighi, A. Y. Ng, C. D. Manning. 2005. Ro- bust textual inference via graph matching. In Proc. of HLT 2005.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "The CoNLL 2009 shared task: Syntactic and semantic dependencies in multiple languages", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Haji\u010d", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ciaramita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Johansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Mart\u00ed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Meyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "\u0160t\u011bp\u00e1nek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Stra\u0148\u00e1k", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Haji\u010d, M. Ciaramita, R. Johansson, D. Kawahara, M. A. Mart\u00ed, L. M\u00e0rquez, A. Meyers, J. Nivre, S. Pad\u00f3, J.\u0160t\u011bp\u00e1nek, P. Stra\u0148\u00e1k, M. Surdeanu, N. Xue, Y. Zhang. 2009. The CoNLL 2009 shared task: Syntactic and semantic dependencies in multi- ple languages. In Proceedings of CoNLL 2009.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Sample selection for statistical parsing", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Computational Linguistics", |
|
"volume": "30", |
|
"issue": "3", |
|
"pages": "253--276", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Hwa. 2004. Sample selection for statistical parsing. Computational Linguistics, 30(3):253-276.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Dependency-based semantic role labeling of propbank", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Johansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Nugues", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Johansson, P. Nugues. 2008a. Dependency-based semantic role labeling of propbank. In Proc. of EMNLP 2008.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The effect of syntactic representation on semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Johansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Nugues", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Johansson, P. Nugues. 2008b. The effect of syntac- tic representation on semantic role labeling. In Proc. of COLING 2008.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Unsupervised induction of semantic roles", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Lang, M. Lapata. 2010. Unsupervised induction of semantic roles. In Proc. of HLT 2010.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Efficient L1 regularized logistic regression", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Abbeel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of the Ntl. Conf. on AI", |
|
"volume": "21", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Lee, H. Lee, P. Abbeel, A. Ng. 2006. Efficient L1 regularized logistic regression. In Proc. of the Ntl. Conf. on AI, volume 21.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A sequential algorithm for training text classifiers", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Gale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Proc. of SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. D. Lewis, W. A. Gale. 1994. A sequential algorithm for training text classifiers. In Proc. of SIGIR 1994.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Trust region newton methods for large-scale logistic regression", |
|
"authors": [ |
|
{ |
|
"first": "C.-J", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Keerthi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C.-J. Lin, R. C. Weng, S. S. Keerthi. 2007. Trust region newton methods for large-scale logistic regression. In Proc. of ICML 2007.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Diverse ensembles for active learning", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Melville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Melville, R. J. Mooney. 2004. Diverse ensembles for active learning. In Proc. of ICML 2004.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Maltparser: A languageindependent system for data-driven dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of the TLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Nivre, J. Hall. 2005. Maltparser: A language- independent system for data-driven dependency parsing. In Proc. of the TLT 2005.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Ensemble-based active learning for parse selection", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Osborne, J. Baldridge. 2004. Ensemble-based ac- tive learning for parse selection. In Proc. of HLT- NAACL 2004.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "The Proposition Bank: An Annotated Corpus of Semantic Roles", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Kingsbury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Computational Linguistics", |
|
"volume": "31", |
|
"issue": "1", |
|
"pages": "71--105", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Palmer, D. Gildea, P. Kingsbury. 2005. The Propo- sition Bank: An Annotated Corpus of Semantic Roles. Computational Linguistics, 31(1):71-105.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Semantic role labeling using different syntactic views", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Hacioglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Pradhan, W. Ward, K. Hacioglu, J. H. Martin, D. Ju- rafsky. 2005. Semantic role labeling using different syntactic views. In Proc. of ACL 2005.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Towards robust semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Computational Linguistics", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "289--310", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. S. Pradhan, W. Ward, J. H. Martin. 2008. Towards robust semantic role labeling. Computational Lin- guistics, 34:289-310.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Active learning for part-of-speech tagging: Accelerating corpus annotation", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Ringger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Mcclanahan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Haertel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Busby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Carmen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Carroll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Lonsdale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the Linguistic Annotation Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Ringger, P. McClanahan, R. Haertel, G. Busby, M. Carmen, J. Carroll, D. Lonsdale. 2007. Ac- tive learning for part-of-speech tagging: Accelerat- ing corpus annotation. In Proc. of the Linguistic An- notation Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Active learning with perceptron for structured output", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Small", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "ICML Workshop on Learning in Structured Output Spaces", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Roth, K. Small. 2006. Active learning with percep- tron for structured output. In ICML Workshop on Learning in Structured Output Spaces.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "FrameNet II: Extended Theory and Practice", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ruppenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ellsworth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"R L" |
|
], |
|
"last": "Petruck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Scheffczyk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Ruppenhofer, M. Ellsworth, M. R. L. Petruck, C. R. Johnson, J. Scheffczyk. 2006. FrameNet II: Ex- tended Theory and Practice.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "An analysis of active learning strategies for sequence labeling tasks", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Settles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Craven", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Settles, M. Craven. 2008. An analysis of active learning strategies for sequence labeling tasks. In Proc. of EMNLP 2008.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Active learning literature survey", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Settles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Settles. 2010. Active learning literature survey. Technical Report Computer Sciences Technical Re- port 1648, University of Wisconsin-Madison, 2010.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Using semantic roles to improve question answering", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of EMNLP-2007", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shen, M. Lapata. 2007. Using semantic roles to improve question answering. In Proc. of EMNLP- 2007.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Multi-criteria-based active learning for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-L", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Shen, J. Zhang, J. Su, G. Zhou, C.-L. Tan. 2004. Multi-criteria-based active learning for named entity recognition. In Proc. of ACL 2004.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Semantic role labeling based on dependency tree with multifeatures", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of IJCBS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Shi, G. Zhou, P. Qian, X. Li. 2009. Semantic role labeling based on dependency tree with multi- features. In Proc. of IJCBS 2009.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "The CoNLL 2008 shared task on joint parsing of syntactic and semantic dependencies", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Johansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Meyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Surdeanu, R. Johansson, A. Meyers, L. M\u00e0rquez, J. Nivre. 2008. The CoNLL 2008 shared task on joint parsing of syntactic and semantic dependen- cies. In Proc. of CoNLL 2008.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Active learning for statistical natural language parsing", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Tang, X. Luo, S. Roukos. 2002. Active learning for statistical natural language parsing. In Proc. of ACL 2002.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "A bayesian model for unsupervised semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Klementiev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I. Titov, A. Klementiev. 2011. A bayesian model for unsupervised semantic parsing. In Proc. of ACL 2011.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "A Web Survey on the Use of Active learning to support annotation of text data", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Tomanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Olsson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of AL-NLP workshop, NAACL HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Tomanek, F. Olsson. 2009. A Web Survey on the Use of Active learning to support annotation of text data. In Proc. of AL-NLP workshop, NAACL HLT 2009.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Recognizing textual entailment using sentence similarity based on dependency tree skeletons", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of RTE", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Wang, G. Neumann. 2007. Recognizing textual en- tailment using sentence similarity based on depen- dency tree skeletons. In Proc. of RTE 2007.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Effective multi-label active learning for text classification", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-T", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of KDD", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Yang, J.-T. Sun, T. Wang, Z. Chen. 2009. Effective multi-label active learning for text classification. In Proc. of KDD 2009.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Learning with unlabeled data and its application to image retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Z.-H", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of PRICAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Z.-H. Zhou. 2006. Learning with unlabeled data and its application to image retrieval. In Proc. of PRICAI 2006.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Producing compressed dependency tree 2008b;", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Combining informativity and representativeness.resentativeness.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "Trade-off between informativity and representativeness.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td>FEATURE TYPE</td><td>EXPLANATION/EXAMPLE</td></tr><tr><td>Part of Speech</td><td>JJR, JJS, LS, CD, etc.</td></tr><tr><td>Head word</td><td>Head words of predicate and argument</td></tr><tr><td>isNEG</td><td>Instance includes NOT or NEVER</td></tr><tr><td>Argument position</td><td>Before or after predicate</td></tr><tr><td>Argument chunk position</td><td>Beginning or end of corresponding chunk</td></tr><tr><td>Lemma of argument</td><td>Lemma of argument whose dependency role is PRD or DIR</td></tr><tr><td>Lemma context</td><td>Two words before and after argument</td></tr><tr><td>Cue words</td><td>DIR ('up', 'toward', 'forward', 'along') REC ('self' as suffix) PRD ('as', 'as if') CAU ('because', 'why', 'as a result of')</td></tr><tr><td>Voice of predicate</td><td>Active or passive</td></tr><tr><td>Dependency relation of predicate and argument</td><td>LOC, TMP, etc. 1) Sbj*, obj* are defined as: Sbj* \u2190 Obj Passive Sbj* \u2190 LGS passive Sbj* \u2190 Active vt sbj</td></tr><tr><td>Predicate Properties</td><td>Obj* \u2190 Sbj Passive Obj* \u2190 Sbj VI (intransitive verb) Obj* \u2190 Obj Active VT = 1; transitive VI = 2; intransitive TO IM=3; begins with 'to' V Adj = 4; verb followed by adjective words (e.g. 'sounds good', 'looks pretty') PV = 5; phrasal verb (e.g. 'pick up')</td></tr><tr><td>Verb Complex</td><td>e.g. \"has not been set\" in figure 1</td></tr><tr><td>Acomp</td><td>adjectival complement</td></tr><tr><td>Advmod</td><td>adverbial modifier</td></tr><tr><td>Infmod</td><td>infinitival modifier</td></tr><tr><td>Rcmod</td><td>relative clause modifier</td></tr><tr><td>Rel</td><td>relative (word introducing an rcmod)</td></tr><tr><td>Xsbj</td><td>controlling subject</td></tr><tr><td>Iobj</td><td>indirect object</td></tr><tr><td>Advcl</td><td>adverbial clause modifier</td></tr><tr><td>Prep to,Prep in, Prep for, Prep with</td><td/></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Three feature groups: CoNLL basic, CoNLL derived, and from additional parser" |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td/><td/><td>NMOD</td><td>NMOD</td><td>SBJ ROOT</td><td>ADV</td><td>VC</td><td>P</td><td>VC</td></tr><tr><td>Index</td><td>1</td><td>2</td><td>3</td><td>4</td><td/><td>5</td><td>6</td><td>7</td><td>8</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"2\">Transfer</td><td/></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">SBJ</td><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td>ADV</td><td>P</td></tr><tr><td/><td/><td/><td colspan=\"2\">date</td><td colspan=\"2\">not</td><td/><td colspan=\"2\">set .</td></tr><tr><td/><td/><td/><td>3</td><td/><td/><td>5</td><td/><td colspan=\"2\">7 8</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"4\">Alternation if applicable</td></tr><tr><td colspan=\"5\">Compressed Dependency Tree</td><td colspan=\"2\">OBJ*</td><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td>ADV</td><td>P</td></tr><tr><td/><td/><td/><td colspan=\"2\">date</td><td colspan=\"2\">not</td><td/><td colspan=\"2\">set .</td></tr><tr><td/><td/><td/><td>3</td><td/><td/><td>5</td><td/><td>7</td><td>8</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "; Johansson and Nugues,A record date has not been set ." |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td>NOREP</td><td>COS</td><td>CDT</td><td>COS-ID</td><td>CDT-ID</td></tr><tr><td colspan=\"5\">INF1 6.56 / 5.18 3.68 / -0.86 2.60 / -0.74 7.43 / 6.45 6.44 / 6.60</td></tr><tr><td colspan=\"2\">INF2 5.12 / 8.31 5.51 / 5.37</td><td colspan=\"3\">7.74 / 8.19 7.21 / 5.67 3.49 / 2.24</td></tr><tr><td colspan=\"2\">INF3 5.07 / 5.54 6.13 / 5.52</td><td colspan=\"3\">8.15 / 9.54 5.94 / 5.72 5.65 / 7.18</td></tr><tr><td colspan=\"5\">INF4 7.37 / 5.79 1.41 / 2.01 -0.01 / -5.08 2.29 / 2.85 3.31 / 3.29</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Percentage error reduction overRAND(200 / 500 examples)" |
|
} |
|
} |
|
} |
|
} |