|
{ |
|
"paper_id": "2019", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:29:33.265705Z" |
|
}, |
|
"title": "A Language Invariant Neural Method for TimeML Event Detection", |
|
"authors": [ |
|
{ |
|
"first": "Suhan", |
|
"middle": [], |
|
"last": "Prabhu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "International Institute of Information Technology Hyderabad", |
|
"location": { |
|
"region": "Telangana", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "suhan.prabhuk@research.iiit.ac.in" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "International Institute of Information Technology Hyderabad", |
|
"location": { |
|
"region": "Telangana", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "pranav.goel@research.iiit.ac.in" |
|
}, |
|
{ |
|
"first": "Alok", |
|
"middle": [], |
|
"last": "Debnath", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "International Institute of Information Technology Hyderabad", |
|
"location": { |
|
"region": "Telangana", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "alok.debnath@research.iiit.ac.in" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Shrivastava", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "International Institute of Information Technology Hyderabad", |
|
"location": { |
|
"region": "Telangana", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "m.shrivastava@iiit.ac.in" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Detection of TimeML events in text have traditionally been done on corpora such as TimeBanks. However, deep learning methods have not been applied to these corpora, because these datasets seldom contain more than 10,000 event mentions. Traditional architectures revolve around highly feature engineered, language specific statistical models. In this paper, we present a Language Invariant Neural Event Detection (ALINED) architecture. ALINED uses an aggregation of both sub-word level features as well as lexical and structural information. This is achieved by combining convolution over character embeddings, with recurrent layers over contextual word embeddings. We find that our model extracts relevant features for event span identification without relying on language specific features. We compare the performance of our language invariant model to the current state-of-the-art in English, Spanish, Italian and French. We outperform the F1-score of the state of the art in English by 1.65 points. We achieve F1scores of 84.96, 80.87 and 74.81 on Spanish, Italian and French respectively which is comparable to the current states of the art for these languages. We also introduce the automatic annotation of events in Hindi, a low resource language, with an F1-Score of 77.13.", |
|
"pdf_parse": { |
|
"paper_id": "2019", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Detection of TimeML events in text have traditionally been done on corpora such as TimeBanks. However, deep learning methods have not been applied to these corpora, because these datasets seldom contain more than 10,000 event mentions. Traditional architectures revolve around highly feature engineered, language specific statistical models. In this paper, we present a Language Invariant Neural Event Detection (ALINED) architecture. ALINED uses an aggregation of both sub-word level features as well as lexical and structural information. This is achieved by combining convolution over character embeddings, with recurrent layers over contextual word embeddings. We find that our model extracts relevant features for event span identification without relying on language specific features. We compare the performance of our language invariant model to the current state-of-the-art in English, Spanish, Italian and French. We outperform the F1-score of the state of the art in English by 1.65 points. We achieve F1scores of 84.96, 80.87 and 74.81 on Spanish, Italian and French respectively which is comparable to the current states of the art for these languages. We also introduce the automatic annotation of events in Hindi, a low resource language, with an F1-Score of 77.13.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic extraction of events has gained sizable attention in subfields of NLP and information retrieval such as automatic summarization, question answering and knowledge graph embeddings (Chieu and Lee, 2004; Glava\u0161 and\u0160najder, 2014) , as events are a representation of temporal information and sequences in text. Various developments in guidelines and datasets for event detection have been met with equally fast paced evolution of automatic event annotation and detection methodologies in the last few years (Doddington et al., 2004; Pustejovsky et al., 2010; O'Gorman et al., 2016) . On a larger scale, event extraction has extended to many languages beyond English, including French (Bittar et al., 2011) , Spanish (Saur\u0131, 2010) , Italian (Caselli et al., 2011a) and very recently, Hindi (Goud et al., 2019b) . Event detection architectures have their origins in statistical models such as K-means and hierarchical clustering methods (Arnulphy et al., 2015) , which have more recently given way to neural models. Deep neural architectures on event annotation vary based on the approach taken to identifying and handling the data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 189, |
|
"end": 210, |
|
"text": "(Chieu and Lee, 2004;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 235, |
|
"text": "Glava\u0161 and\u0160najder, 2014)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 512, |
|
"end": 537, |
|
"text": "(Doddington et al., 2004;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 563, |
|
"text": "Pustejovsky et al., 2010;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 564, |
|
"end": 586, |
|
"text": "O'Gorman et al., 2016)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 710, |
|
"text": "(Bittar et al., 2011)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 734, |
|
"text": "(Saur\u0131, 2010)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 745, |
|
"end": 768, |
|
"text": "(Caselli et al., 2011a)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 794, |
|
"end": 814, |
|
"text": "(Goud et al., 2019b)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 940, |
|
"end": 963, |
|
"text": "(Arnulphy et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, event detection as a problem shifts when we move away from the annotation paradigm of datasets such as ACE (Doddington et al., 2004) and TAC KBP (Mitamura et al., 2015) to TimeML datasets such as TimeBank (Pustejovsky et al., 2006) , which are used in this paper. There has been limited use of deep learning methods on TimeBanks due to fewer event mentions and a need for data augmentation and bootstrapping. However, in this paper, we show that using subword level information, a language invariant deep learning model can provide similar event detection accuracies as heavily feature engineered language specific statistical methods without using any augmented data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 141, |
|
"text": "(Doddington et al., 2004)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 177, |
|
"text": "(Mitamura et al., 2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 240, |
|
"text": "(Pustejovsky et al., 2006)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper has two main contributions. First, we introduce our model, the Architecture for Language Invariant Neural Event Detection (ALINED), which is a deep learning model for event extraction from TimeML event annotated datasets from five languages. We show that for four of these languages, using no augmented data, we achieve comparable F1 score on these datasets to heavily feature engineered language specific statistical models, with less than 12,000 event mentions in each. Secondly, to the best of our knowledge, we present the first ever baseline for neural event detection in Hindi using this model. Our architecture uses both word and character embeddings and captures information from them distinctly, before combining them into a coherent representation of both. This is then used to determine the label for each input word. The proposed architecture is language invariant as well, such that no part of the system undergoes a change when training on different languages. In presenting this architecture, we highlight the importance of using subword level information in order to incorporate morphological as well as syntactic features in event extraction. This can also be extended to other semantically oriented sequence labeling tasks", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Neural approaches to sequence tagging are common due to extensive developments in named entity recognition. Huang et al. (2015) introduced and cultivated the use of bidirectional LSTMs to incorporate features that could be used for sequence tagging using a CRF. Ma and Hovy (2016) 's architecture and the NeuroNER program (Dernoncourt et al., 2017) provided a basic architecture and influenced multiple developments to most sequence labeling tasks, including event detection and extraction (Araki, 2018) . The task of event extraction in any language involves the identification of the event nugget (Ahn, 2006) . Prominent work has been done to analyze the lexical and semantic features of event representation (Li et al., 2013) , which served as a basis for neural event nugget detection (Liang et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 127, |
|
"text": "Huang et al. (2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 280, |
|
"text": "Ma and Hovy (2016)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 348, |
|
"text": "(Dernoncourt et al., 2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 503, |
|
"text": "(Araki, 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 599, |
|
"end": 610, |
|
"text": "(Ahn, 2006)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 711, |
|
"end": 728, |
|
"text": "(Li et al., 2013)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 789, |
|
"end": 809, |
|
"text": "(Liang et al., 2017)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The task of neural event detection has been attempted using a combination of networks, but mostly revolving around the use of convolutional neural architectures. Work in this approach focused on various aspects such as max-pooling to retrieve the structure of event nugget information (Nguyen and Grishman, 2015), modeling the skipgram architecture to learn lexical feature representations (Chen et al., 2015) as well as using dynamic CNNs in order to extract lexical and syntactic features in parallel . Recurrent neural architectures have also been employed for this task, which predict the location of the trigger based on combining the for-ward and backward features of sentences in which events occur Ghaeini et al., 2016) . Note that in both cases architectures focused on dealing with structural, lexical and contextual features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 409, |
|
"text": "(Chen et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 706, |
|
"end": 727, |
|
"text": "Ghaeini et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the domain of multi-lingual and cross lingual event detection, Feng et al. (2018) uses a combination of both LSTMs and CNNs for creating a language independent architecture for capturing events, while Goud et al. (2019a) used stacked RNNs for sequence labeling and a language discriminator to learn language features. The latter architecture implements the use of the character embeddings, but does not identify the relevant features independent of the word embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 84, |
|
"text": "Feng et al. (2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 223, |
|
"text": "Goud et al. (2019a)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section, we describe the ALINED model for the event detection. Primarily, we focus on how to capture event representation at both a character and a word level. In this model, we had to focus on the following major considerations:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1. Syntactic and lexical information captured by previous event detection tasks should be accounted for.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "2. Furthermore, sub-word information is essential as morphological features are also useful in identifying event semantics if the language is morphologically rich, or has a free word order structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Fundamentally, our architecture generates character embeddings through convolution and aggregates this information using bidirectional LSTMs (Hochreiter and Schmidhuber, 1997) . The same is done over pretrained word embeddings in parallel, creating distinct intermediate representations. These representations are combined using a highway architecture for a final representation, which is used for the sequence tagging task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 175, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In order to generate character embeddings from the input sentence, we first use a CharCNN (Kim et al., 2016) . Let C be the dictionary of all the characters in the language and V be all the words in the language. We first define the character embeddings matrix E \u2208 R d\u00d7|C | , where d is the dimensionality of the character embeddings, with the constraint that d < ", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 108, |
|
"text": "(Kim et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "|C |. Let word w i \u2208 V", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "i = [c w i 1 , c w i 2 , . . . , c w i n ]. The character representation of w i is therefore given by E w i \u2208 R d\u00d7n .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We define a filter W \u2208 R d\u00d7b where b is the width of the filter. We apply a narrow convolution between E w i and W , to obtain the embedding of w i as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "e w i i = f (W \u2022 E w i [ * , i : i + b \u2212 1]) + b (1) where E w i [i : i + b \u2212 1]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "accounts for all the characters of given window size of the word. The obtained embedding e w i \u2208 R n\u2212b+1 . The function f is a non-linear function such as a hyperbolic tangent or a sigmoid. It is applied over the Frobenius inner product of the filter and the embedding value as A \u2022 B = Tr(AB T ) for any two matrices A and B.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We use max-pooling over the output embedding (instead of mean-pooling as it better incorporates the nature of natural language sequences (Xiang et al., 2016) ) as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 157, |
|
"text": "(Xiang et al., 2016)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "w c i = max i e w i i", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For a total of h filters, each of varying widths, we get different representations of w i . Therefore", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "w c i = [w c 1 , w c 2 , . . . , w c h ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "is the representation of the ith word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The aggregated word representations based on character information now capture the features that represent the event semantics at a sub-word level accurately. However, the contextual information has not been accounted for yet. This is done by using a bidirectional LSTM, as mentioned above.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "h c i = bi-LSTM(w c i , h c i\u22121 , h c i+1 ) \u2208 R k\u00d7l (3) The bi-LSTM hidden state vector h c = [h c 1 , h c 2 , ..., h c k ], each h c i of dimension R l", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "is now propagated to the rest of the network. h c can be seen as a lexically context-aware character representation of the words of the input sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Contextual Character Embedding", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To capture structural information well, we use contextual word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Contextual Word Embeddings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Let w = [w 1 , w 2 , ..., w k ] be the words in a sentence. Let their corresponding pre-trained word embeddings be e w = [e w 1 , e w 2 , ..., e w k ]. We aggregate the meaning of the sentence by passing the word embeddings through a bidirectional LSTM layer, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Contextual Word Embeddings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "h w i = bi-LSTM(e w i , h w i\u22121 , h w i+1 ) \u2208 R k\u00d7l (4) Now each hidden state of h w = [h w 1 , h w 2 , ..., h w k ], i.e., each h w i of dimension R l", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Contextual Word Embeddings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": ", is used in the rest of the network. Since the pre-trained word embeddings are already contextual in nature, we do not process it further. Note that h w can be seen as the semantically context-aware representation of the words of the input sentence. This also includes the structure of event representation in that sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Contextual Word Embeddings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Given the representations of the hidden states from characters and words, we combine the two using a concatenation function followed by a highway network. The concatenation is represented as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h i = f (h w i , h c i )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The function f (\u2022) is the concatenation function, which can be represented as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "f (h w i , h c i ) = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 h w i h c i (6) W \u2022 h w i (1 \u2212 W ) \u2022 h c i (7) W w \u2022 h w i W c \u2022 h c i (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Equation 6 is a direct concatenation of the hidden states h c and h w . A direct concatenation automatically implies that the information gathered from the representations are given equal weight. However, this is not true for all languages, as languages with fewer inflections require less information from the character representations and more from the word representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Equations 7 and 8 attempt to account for this by using a shared weight concatenation and a weighted concatenation respectively. In equation 7, W \u2208 R k\u00d7k is a weight matrix, where the values are scaled down to 1, in order to capture the relative importance of each h c i and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "h w i \u2200h c i \u2208 h c , h w i \u2208 h w .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "This shared weighting is a modification of the concept of leaky integration (Bengio et al., 2013) . On the other hand, equation 8 uses two independent weight matrices, W c , W w \u2208 R k\u00d7k , which does not constrain the network to use on other the other hidden representation. However, the gradients are still clipped at a low value (\u2248 1) to avoid explosion.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 97, |
|
"text": "(Bengio et al., 2013)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We then use the highway network (Srivastava et al., 2015) on the combined hidden state vector h. This network adaptively \"carries\" some dimensions of h to the output for predicting the correct label sequence. Therefore, the hidden states undergo the following transformation (Wen et al., 2016) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 275, |
|
"end": 293, |
|
"text": "(Wen et al., 2016)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "h i = \u03c1(h i ) g(W H \u2022h i +b H )+(1\u2212\u03c1(h)) h i (9) The function \u03c1(h w ) = \u03c3(W \u03c1 \u2022 h i + b \u03c1 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": ", which is a simple activation function. g is any non-linear function, such as sigmoid or hyperbolic tangent. Following the highway network's output, we pass the hidden embeddings to a dropout layer, which effectively reduces the number of hidden units by a fraction d, so h drop \u2208 R k/d\u00d7l , and a linear layer, which maps the h drop to a smaller embedding space. We label this space h \u2208 R k/d\u00d7f (f being the dimensions of the feature space) for brevity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Character and Word Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In the sequence tagging layer, we use the combined embeddings to identify the most likely sequence of tags for the input sentence. With the aggregated combined hidden state h, we have the information required to assign tags to the words of the input sentence. For this, we use conditional random fields (CRF). The traditional formulation of a CRF can be written, given a set of observations sequences X = x 1 , x 2 , ..., x k and sequence of labels Y = y 1 , y 2 , ..., y k as,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(Y |X; W, b) = k i=1 exp (y i\u22121 , y i , X) y \u2208L i=1 k exp (y i\u22121 , y i , X)", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where L is the set of possible labels in the tagset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Since the observation sequence in our formulation is essentially the output vector h, we can simplify the above equation by performing softmax to score the likelihood of a label being assigned. Therefore, the probability distribution is computed as,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (y i = t|h i ) = exp (h T i w j + b j ) k exp (h T i w m + b m )", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "with j, m \u2208 L as tag labels. We also compute the transition probability T of the label y i being assigned to h i given the labels of h i\u22121 . Therefore, the probability of the sequence of labels over the hidden states can be computed as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Seq(Y, h) = k i=1 P (y i = t|h i )+ k i=1 T (y i = t|y i\u22121 = t ); t, t \u2208 L", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Therefore the probability of that sequence Y computed above is calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(Y |h) = exp (Seq(Y, h) y \u2208L exp (Seq(y , h))", |
|
"eq_num": "(13)" |
|
} |
|
], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "4 Experimental Setup", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "In this section, we go over the various experiments, implementation details such as number of epochs, training time, datasets and the like. These are covered in detail for the replicability of our results, which are highlighted in section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence Tagging Layer", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "To train and evaluate our model, we use the following datasets for each of the languages we work with multiple corpora, as our experiments span multiple languages. 3. For Italian, we use Ita-TimeBank's ILC corpus (Caselli et al., 2011a) the Italian corpus annotated using ISO-TimeML rules for events and temporal information. The corpus consists of 68,000 tokens and 10,591 event mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 236, |
|
"text": "(Caselli et al., 2011a)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "4. For French, we use the French TimeBank as it is the ISO-TimeML annotated reference corpus for event annotation tasks (Bittar et al., 2011) . The corpus consists of 16,208 tokens and 2,100 event mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 141, |
|
"text": "(Bittar et al., 2011)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "5. For Hindi, we use the gold-standard corpus of Goud et al. (2019b) , which consists of 810 event annotated news articles based on modified TimeML rules. The dataset has 242,201 tokens and 20,190 event mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 68, |
|
"text": "Goud et al. (2019b)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The datasets are annotated in the IOB format. At a word level, B represents the first token of an event, I represents all the other tokens of an event and O represents the tokens which are not a part of any event in the sentence. We train the model for 50 epochs, but the loss tends to stabilize at 25 to 35 epochs. We use a 40 dimensional character embedding, which we create ourselves, as mentioned in section 3.1. The CNN uses 40 filters with a window size of 3. For our contextual word embeddings, we use fastText embeddings for English (Bojanowski et al., 2017) which are pretrained on common-Crawl and the Wikipedia corpus. FastText embeddings are also used for Hindi, French, Spanish and Italian word representations (Grave et al., 2018) . The bi-LSTM trains on a fixed 300 hidden dimensions for all the bi-LSTMs in the architecture.", |
|
"cite_spans": [ |
|
{ |
|
"start": 541, |
|
"end": 566, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 724, |
|
"end": 744, |
|
"text": "(Grave et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Implementation and Training Details", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For the linear and dropout layers, the dropout is fixed to 0.3. The initial learning rate parameter is 0.015, which increases with a momentum of 0.9. On approaching the end of an epoch, the learning rate decays at a rate of 0.05. We train on a negative log-likelihood loss function", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Implementation and Training Details", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this section, we analyze the results of the ALINED model, and compare them to the current state of the art systems for the various languages we train on. We also provide a rigorous error analysis of our system and methodology.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Since no single system has compared work in event detection across the five languages that we have chosen for the experiments here, we draw comparisons to the various systems that trained on the individual or group of languages that have been used. Table 1 ahows the direct comparison of results. 1. For English, we compare our system to the SemEval-2013 Task 1 Task B (UzZaman et al., 2013), detection of event extents. We compare our models' scores with those of the best performing models of SemEval-2013.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 297, |
|
"text": "Table 1 ahows the direct comparison of results.", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "2. SemEval-2013 Task 1 Task B (UzZaman et al., 2013) performs the task of detecting event extents in Spanish texts. We compare our model performance to FSS-TimeEX and TipSemB-F, the best performing models in that task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "3. Caselli et al. (2011b) establishes the current state of the art for data driven models in temporal and event extent information in Italian.", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 25, |
|
"text": "Caselli et al. (2011b)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Language Model Precision Recall F1-Score English ATT-1 (Jung and Stent, 2013) 81.44 80.67 81.05 ATT-2 (Jung and Stent, 2013) 81.02 80.81 80.91 ATT-3 (Jung and Stent, 2013) 81.95 75.57 78.63 KUL (Kolomiyets and Moens, 2013) 80 (Caselli et al., 2011b) 90.00 77.00 83.00 TIPSemIT FPC5 (Caselli et al., 2011b) 89.00 81.00 85.00 TIPSemIT FPC5Sem (Caselli et al., 2011b) 91 The system is a modification of the TipSem system. We compares our models to their reported scores. However, the corpus used in Caselli et al. (2011b) is the Ita-TimeBank which has been augmented with further annotations and resources, while our system uses just the Ita-TimeBank for event extraction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 77, |
|
"text": "(Jung and Stent, 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 124, |
|
"text": "(Jung and Stent, 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 171, |
|
"text": "(Jung and Stent, 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 222, |
|
"text": "(Kolomiyets and Moens, 2013)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 249, |
|
"text": "(Caselli et al., 2011b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 305, |
|
"text": "(Caselli et al., 2011b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 364, |
|
"text": "(Caselli et al., 2011b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 518, |
|
"text": "Caselli et al. (2011b)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "4. For French, we did not find systems that did event extraction from the French TimeBank corpus. The existing literature either creates and evaluates on a modified corpus (Bittar, 2009) or provides annotations trained on the TimeML annotated data and tested on Fr-TempEval2) (Arnulphy et al., 2015) . Therefore, we compare our performance to those, while also understanding that the comparison is not a strict metric. We hope to establish the scores here as baseline for further improvement over models in event detection in French.", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 186, |
|
"text": "(Bittar, 2009)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 299, |
|
"text": "(Arnulphy et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "5. To the best of our knowledge, there is no baseline system available for event detection in Hindi, therefore, we provide our model as the first performance metric in that direction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In most comparisons, our models perform equally well or better than the current systems for each of the above languages. we do not annotate or augment any of our data sources for using this model, so the reference corpora are being trained and tested upon, which are mentioned in section 4.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The calculation of the metrics of comparison, precision, recall and accuracy are calculated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "precision = tp/(tp + f p) recall = tp/(tp + f n) f \u2212 measure = 2 * (P * R)/(P + R)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "where tp is a true positive, where the part of the extent identified in the system output is the same as the expected output, f p is a false positive, where the token identified as part of the extent by the system is not a part of the expect output, and f n is a false negative, where a token not identified as a part of the extent by the system output, is a part of the expected output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We note a lower precision score in case of English and Spanish, as the number of false positives are slightly higher. We attribute this difference to the fact that due to the combination of sub-word level features, the model seems to sometimes \"spill over\" the boundary of single word or nominal. However, higher recall implies that there are fewer false negatives, meaning the model more accurately identifies those words which are in the event span. More labeled data would be very useful in learning the span boundaries, especially for nominal events, as the network would have more samples to learn the variations in the methods of event representation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For English, surprisingly, we see that an increase in the F1-scores. We attribute this to a combination of factors, including well defined verbal affixes which are attributed to events, and effective weighted combination of character and word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For Italian, we train and test solely on the Ita-TimeBank, whereas the current state of the art system trained on an augmented Ita-TimeBank (Caselli et al., 2011b) , which was enriched with more labeled data. Similarly, in French, we use the established French TimeBank, while experiments in French so far have been on self-annotated (Arnulphy et al., 2015) or TimeML corpora (Bittar, 2009) . Since these repositories of augmented data were not available to us at the time of writing this paper, the values reflect the same. However, it is to be noted that our system does provide an accuracy that is close to the currently reported stateof-the-art even in the absence of language specific features, explaining the fact that sub-word information is necessary for event detection in Italian and French as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 163, |
|
"text": "(Caselli et al., 2011b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 357, |
|
"text": "(Arnulphy et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 390, |
|
"text": "(Bittar, 2009)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For Hindi, our architecture provides a good baseline. However, the training data consists of far too many words that are out of vocabulary, which is a major issue in working with word embeddings. While the concatenation of sub-word information mitigates this, a system focused on a better representation of out of vocabulary words would significantly help the network. However, this required a larger labeled corpus as well, which makes this a challenge as Hindi is a low-resource language in terms of corpora for event detection and extraction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we show the development of ALINED, a language invariant neural sequence tagging architecture for event detection in five different languages, namely, English, Spanish, Italian, French and Hindi. We develop insight into the use of sub-word level information and combining it effectively. with the lexical and syntactic infor-mation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For our training and testing, we use only established corpora, which have not been augmented or changed in any way. We perform almost at par or better then the current state of the art in all the languages we train in. We establish a new best F-score for event extraction in English. We also establish the baseline for training and testing on the French TimeBank and for event extraction as a task in Hindi.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our model has been thoroughly error-analyzed, which we have explained based on the comparison of system output and expected tags. Given the nature of our results, we aim to establish the importance of sub-word level information in event detection. Further work in this task could be done by providing augmented reference corpora, so that problems based on lack of labeled data do not limit further research in this topic. This could also be tackled by effectively introducing transfer learning to neural event detection, where the model learns the representation of events irrespective of language, while accounting for sub-word, lexical and structural information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The stages of event extraction", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Workshop on Annotating and Reasoning about Time and Events", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Ahn. 2006. The stages of event extraction. In Proceedings of the Workshop on Annotating and Reasoning about Time and Events, pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Extraction of Event Structures from Text", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Araki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jun Araki. 2018. Extraction of Event Structures from Text. Ph.D. thesis, Ph. D. thesis, Carnegie Mellon University.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Supervised machine learning techniques to detect timeml events in french and english", |
|
"authors": [ |
|
{ |
|
"first": "B\u00e9atrice", |
|
"middle": [], |
|
"last": "Arnulphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Claveau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Tannier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anne", |
|
"middle": [], |
|
"last": "Vilnat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Applications of Natural Language to Information Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B\u00e9atrice Arnulphy, Vincent Claveau, Xavier Tannier, and Anne Vilnat. 2015. Supervised machine learn- ing techniques to detect timeml events in french and english. In International Conference on Applica- tions of Natural Language to Information Systems, pages 19-32. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Advances in optimizing recurrent networks", |
|
"authors": [ |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Boulanger-Lewandowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Pascanu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "2013 IEEE International Conference on Acoustics, Speech and Signal Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8624--8628", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshua Bengio, Nicolas Boulanger-Lewandowski, and Razvan Pascanu. 2013. Advances in optimizing re- current networks. In 2013 IEEE International Con- ference on Acoustics, Speech and Signal Processing, pages 8624-8628. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Annotation of events and temporal expressions in french texts", |
|
"authors": [ |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Bittar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Third Linguistic Annotation Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andr\u00e9 Bittar. 2009. Annotation of events and temporal expressions in french texts. In Proceedings of the Third Linguistic Annotation Workshop, pages 48- 51. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "French timebank: an isotimeml annotated reference corpus", |
|
"authors": [ |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Bittar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Amsili", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Denis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurence", |
|
"middle": [], |
|
"last": "Danlos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies: short papers", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "130--134", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andr\u00e9 Bittar, Pascal Amsili, Pascal Denis, and Lau- rence Danlos. 2011. French timebank: an iso- timeml annotated reference corpus. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Tech- nologies: short papers-Volume 2, pages 130-134. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Annotating events, temporal expressions and relations in italian: the it-timeml experience for the ita-timebank", |
|
"authors": [ |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valentina", |
|
"middle": [ |
|
"Bartalesi" |
|
], |
|
"last": "Lenzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachele", |
|
"middle": [], |
|
"last": "Sprugnoli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 5th Linguistic Annotation Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "143--151", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tommaso Caselli, Valentina Bartalesi Lenzi, Rachele Sprugnoli, Emanuele Pianta, and Irina Prodanof. 2011a. Annotating events, temporal expressions and relations in italian: the it-timeml experience for the ita-timebank. In Proceedings of the 5th Linguistic Annotation Workshop, pages 143-151. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Data-driven approach using semantics for recognizing and classifying timeml events in italian", |
|
"authors": [ |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hector", |
|
"middle": [], |
|
"last": "Llorens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Borja", |
|
"middle": [], |
|
"last": "Navarro-Colorado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Estela", |
|
"middle": [], |
|
"last": "Saquete", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "533--538", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tommaso Caselli, Hector Llorens, Borja Navarro- Colorado, and Estela Saquete. 2011b. Data-driven approach using semantics for recognizing and classi- fying timeml events in italian. In Proceedings of the International Conference Recent Advances in Natu- ral Language Processing 2011, pages 533-538.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Event extraction via dynamic multi-pooling convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Yubo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liheng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daojian", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "167--176", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yubo Chen, Liheng Xu, Kang Liu, Daojian Zeng, and Jun Zhao. 2015. Event extraction via dy- namic multi-pooling convolutional neural networks. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers), vol- ume 1, pages 167-176.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Query based event extraction along a timeline", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Leong Chieu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoong Keok", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 27th annual international ACM SIGIR conference on Research and development in information retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "425--432", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Leong Chieu and Yoong Keok Lee. 2004. Query based event extraction along a timeline. In Proceed- ings of the 27th annual international ACM SIGIR conference on Research and development in infor- mation retrieval, pages 425-432. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Neuroner: an easy-to-use program for named-entity recognition based on neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [ |
|
"Young" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franck Dernoncourt, Ji Young Lee, and Peter Szolovits. 2017. Neuroner: an easy-to-use program for named-entity recognition based on neural net- works. EMNLP 2017, page 97.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The automatic content extraction (ace) program-tasks, data, and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "George R Doddington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Przybocki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Lance", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Strassel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Lrec", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George R Doddington, Alexis Mitchell, Mark A Przy- bocki, Lance A Ramshaw, Stephanie M Strassel, and Ralph M Weischedel. 2004. The automatic content extraction (ace) program-tasks, data, and evaluation. In Lrec, volume 2, page 1.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A language-independent neural network for event detection", |
|
"authors": [ |
|
{ |
|
"first": "Xiaocheng", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Science China Information Sciences", |
|
"volume": "61", |
|
"issue": "9", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaocheng Feng, Bing Qin, and Ting Liu. 2018. A language-independent neural network for event detection. Science China Information Sciences, 61(9):092106.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Event nugget detection with forward-backward recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Reza", |
|
"middle": [], |
|
"last": "Ghaeini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoli", |
|
"middle": [], |
|
"last": "Fern", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasad", |
|
"middle": [], |
|
"last": "Tadepalli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "369--373", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reza Ghaeini, Xiaoli Fern, Liang Huang, and Prasad Tadepalli. 2016. Event nugget detection with forward-backward recurrent neural networks. In Proceedings of the 54th Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), volume 2, pages 369-373.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Event graphs for information retrieval and multi-document summarization. Expert systems with applications", |
|
"authors": [ |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan\u0161najder", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "41", |
|
"issue": "", |
|
"pages": "6904--6916", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Goran Glava\u0161 and Jan\u0160najder. 2014. Event graphs for information retrieval and multi-document sum- marization. Expert systems with applications, 41(15):6904-6916.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Leveraging multilingual resources for open-domain event detection", |
|
"authors": [ |
|
{ |
|
"first": "Jaipal", |
|
"middle": [], |
|
"last": "Goud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Allen", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Antony", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Shrivastava", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings 15th Joint ACL-ISO Workshop on Interoperable Semantic Annotation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "76--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jaipal Goud, Pranav Goel, Allen J. Antony, and Man- ish Shrivastava. 2019a. Leveraging multilingual re- sources for open-domain event detection. In Pro- ceedings 15th Joint ACL-ISO Workshop on Interop- erable Semantic Annotation, pages 76-82.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A semantico-syntactic approach to event-mention detection and extraction in hindi", |
|
"authors": [ |
|
{ |
|
"first": "Jaipal", |
|
"middle": [], |
|
"last": "Goud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alok", |
|
"middle": [], |
|
"last": "Debnath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suhan", |
|
"middle": [], |
|
"last": "Prabhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Shrivastava", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings 15th Joint ACL-ISO Workshop on Interoperable Semantic Annotation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jaipal Goud, Pranav Goel, Alok Debnath, Suhan Prabhu, and Manish Shrivastava. 2019b. A semantico-syntactic approach to event-mention de- tection and extraction in hindi. In Proceedings 15th Joint ACL-ISO Workshop on Interoperable Semantic Annotation, pages 63-75.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Learning word vectors for 157 languages", |
|
"authors": [ |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prakhar", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edouard Grave, Piotr Bojanowski, Prakhar Gupta, Ar- mand Joulin, and Tomas Mikolov. 2018. Learning word vectors for 157 languages. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC-2018).", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Bidirectional lstm-crf models for sequence tagging", |
|
"authors": [ |
|
{ |
|
"first": "Zhiheng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.01991" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiheng Huang, Wei Xu, and Kai Yu. 2015. Bidirec- tional lstm-crf models for sequence tagging. arXiv preprint arXiv:1508.01991.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Att1: Temporal annotation using big windows and rich syntactic and semantic features", |
|
"authors": [ |
|
{ |
|
"first": "Hyuckchul", |
|
"middle": [], |
|
"last": "Jung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Stent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Seventh International Workshop on Semantic Evaluation (Se-mEval 2013)", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "20--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hyuckchul Jung and Amanda Stent. 2013. Att1: Tem- poral annotation using big windows and rich syn- tactic and semantic features. In Second Joint Con- ference on Lexical and Computational Semantics (* SEM), Volume 2: Proceedings of the Seventh In- ternational Workshop on Semantic Evaluation (Se- mEval 2013), volume 2, pages 20-24.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Character-aware neural language models", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Sontag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander M", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Thirtieth AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim, Yacine Jernite, David Sontag, and Alexan- der M Rush. 2016. Character-aware neural language models. In Thirtieth AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Kul: data-driven approach to temporal parsing of newswire articles", |
|
"authors": [ |
|
{ |
|
"first": "Oleksandr", |
|
"middle": [], |
|
"last": "Kolomiyets", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Francine", |
|
"middle": [], |
|
"last": "Moens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Seventh International Workshop on Semantic Evaluation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "83--87", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oleksandr Kolomiyets and Marie-Francine Moens. 2013. Kul: data-driven approach to temporal pars- ing of newswire articles. In Second Joint Confer- ence on Lexical and Computational Semantics (* SEM), Volume 2: Proceedings of the Seventh In- ternational Workshop on Semantic Evaluation (Se- mEval 2013), volume 2, pages 83-87.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Joint event extraction via structured prediction with global features", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Heng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "73--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global fea- tures. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), volume 1, pages 73-82.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Combining word-level and character-level representations for relation classification of informal text", |
|
"authors": [ |
|
{ |
|
"first": "Dongyun", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiran", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinge", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2nd Workshop on Representation Learning for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dongyun Liang, Weiran Xu, and Yinge Zhao. 2017. Combining word-level and character-level represen- tations for relation classification of informal text. In Proceedings of the 2nd Workshop on Representation Learning for NLP, pages 43-47.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "End-to-end sequence labeling via bi-directional lstm-cnns-crf", |
|
"authors": [ |
|
{ |
|
"first": "Xuezhe", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1064--1074", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuezhe Ma and Eduard Hovy. 2016. End-to-end se- quence labeling via bi-directional lstm-cnns-crf. In Proceedings of the 54th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 1064-1074.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Overview of tac kbp 2015 event nugget track", |
|
"authors": [ |
|
{ |
|
"first": "Teruko", |
|
"middle": [], |
|
"last": "Mitamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengzhong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Teruko Mitamura, Zhengzhong Liu, and Eduard H Hovy. 2015. Overview of tac kbp 2015 event nugget track. In TAC.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Joint event extraction via recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Thien Huu Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "300--309", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen, Kyunghyun Cho, and Ralph Gr- ishman. 2016. Joint event extraction via recurrent neural networks. In Proceedings of the 2016 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 300-309.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Event detection and domain adaptation with convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Huu", |
|
"middle": [], |
|
"last": "Thien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "365--371", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen and Ralph Grishman. 2015. Event detection and domain adaptation with convolutional neural networks. In Proceedings of the 53rd Annual Meeting of the Association for Computational Lin- guistics and the 7th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), volume 2, pages 365-371.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Modeling skip-grams for event detection with convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Huu", |
|
"middle": [], |
|
"last": "Thien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "886--891", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen and Ralph Grishman. 2016. Mod- eling skip-grams for event detection with convolu- tional neural networks. In Proceedings of the 2016 Conference on Empirical Methods in Natural Lan- guage Processing, pages 886-891.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Modes timebank 1.0. Linguistic Data Consortium (LDC)", |
|
"authors": [ |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Guerrero Nieto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roser", |
|
"middle": [], |
|
"last": "Saur\u00ed", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marta Guerrero Nieto and Roser Saur\u00ed. 2012. Modes timebank 1.0. Linguistic Data Consortium (LDC), Philadelphia, PA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Richer event description: Integrating event coreference with temporal, causal and bridging annotation", |
|
"authors": [ |
|
{ |
|
"first": "Kristin", |
|
"middle": [], |
|
"last": "Tim O'gorman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Wright-Bettner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2nd Workshop on Computing News Storylines (CNS 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "47--56", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tim O'Gorman, Kristin Wright-Bettner, and Martha Palmer. 2016. Richer event description: Integrating event coreference with temporal, causal and bridg- ing annotation. In Proceedings of the 2nd Workshop on Computing News Storylines (CNS 2016), pages 47-56.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Iso-timeml: An international standard for semantic annotation", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Pustejovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiyong", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harry", |
|
"middle": [], |
|
"last": "Bunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Romary", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "LREC", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "394--397", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Pustejovsky, Kiyong Lee, Harry Bunt, and Lau- rent Romary. 2010. Iso-timeml: An international standard for semantic annotation. In LREC, vol- ume 10, pages 394-397.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Timebank 1.2 documentation. Event London", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Pustejovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jessica", |
|
"middle": [], |
|
"last": "Littman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Pustejovsky, Jessica Littman, Roser Saur\u00ed, and Marc Verhagen. 2006. Timebank 1.2 documenta- tion. Event London, no. April, pages 6-11.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Annotating temporal relations in catalan and spanish timeml annotation guidelines", |
|
"authors": [ |
|
{ |
|
"first": "Roser", |
|
"middle": [], |
|
"last": "Saur\u0131", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roser Saur\u0131. 2010. Annotating temporal relations in catalan and spanish timeml annotation guidelines. Technical report, Technical Report BM 2010-04, Barcelona Media.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Tempeval-3: Evaluating events, time expressions, and temporal relations", |
|
"authors": [ |
|
{ |
|
"first": "Naushad", |
|
"middle": [], |
|
"last": "Uzzaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hector", |
|
"middle": [], |
|
"last": "Llorens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Allen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Verhagen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Pustejovsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1206.5333" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naushad UzZaman, Hector Llorens, James Allen, Leon Derczynski, Marc Verhagen, and James Pustejovsky. 2012. Tempeval-3: Evaluating events, time ex- pressions, and temporal relations. arXiv preprint arXiv:1206.5333.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Semeval-2013 task 1: Tempeval-3: Evaluating time expressions, events, and temporal relations", |
|
"authors": [ |
|
{ |
|
"first": "Naushad", |
|
"middle": [], |
|
"last": "Uzzaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hector", |
|
"middle": [], |
|
"last": "Llorens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Allen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Verhagen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Pustejovsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Seventh International Workshop on Semantic Evaluation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naushad UzZaman, Hector Llorens, Leon Derczyn- ski, James Allen, Marc Verhagen, and James Puste- jovsky. 2013. Semeval-2013 task 1: Tempeval-3: Evaluating time expressions, events, and temporal relations. In Second Joint Conference on Lexical and Computational Semantics (* SEM), Volume 2: Proceedings of the Seventh International Workshop on Semantic Evaluation (SemEval 2013), volume 2, pages 1-9.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Learning text representation using recurrent convolutional neural network with highway layers", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Neu-IR: The SIGIR 2016 Workshop on Neural Information Retrieval", |
|
"volume": "2016", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y Wen, R Luo, J Wang, et al. 2016. Learning text rep- resentation using recurrent convolutional neural net- work with highway layers. In Neu-IR: The SIGIR 2016 Workshop on Neural Information Retrieval, volume 2016. Association for Computing Machin- ery (ACM).", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Incorporating label dependency for answer quality tagging in community question answering via cnn-lstm-crf", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoqiang", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingcai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhihui", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Buzhou", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaolong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1231--1241", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Xiang, Xiaoqiang Zhou, Qingcai Chen, Zhihui Zheng, Buzhou Tang, Xiaolong Wang, and Yang Qin. 2016. Incorporating label dependency for an- swer quality tagging in community question answer- ing via cnn-lstm-crf. In Proceedings of COLING 2016, the 26th International Conference on Compu- tational Linguistics: Technical Papers, pages 1231- 1241.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Fss-timex for tempeval-3: Extracting temporal information from text", |
|
"authors": [ |
|
{ |
|
"first": "Vanni", |
|
"middle": [], |
|
"last": "Zavarella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hristo", |
|
"middle": [], |
|
"last": "Tanev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Seventh International Workshop on Semantic Evaluation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "58--63", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vanni Zavarella and Hristo Tanev. 2013. Fss-timex for tempeval-3: Extracting temporal information from text. In Second Joint Conference on Lexical and Computational Semantics (* SEM), Volume 2: Pro- ceedings of the Seventh International Workshop on Semantic Evaluation (SemEval 2013), volume 2, pages 58-63.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "The proposed ALINED model be made up of n characters, such that c w", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |