|
{ |
|
"paper_id": "2019", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:29:16.234423Z" |
|
}, |
|
"title": "An LSTM-Based Deep Learning Approach for Detecting Self-Deprecating Sarcasm in Textual Data", |
|
"authors": [ |
|
{ |
|
"first": "Ashraf", |
|
"middle": [], |
|
"last": "Kamal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Jamia Millia Islamia, (A Central University", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "ashrafkamal.mca@gmail.com" |
|
}, |
|
{ |
|
"first": "Muhammad", |
|
"middle": [], |
|
"last": "Abulaish", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Asian University", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "abulaish@sau.ac.in" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Self-deprecating sarcasm is a special category of sarcasm, which is nowadays popular and useful for many real-life applications, such as brand endorsement, product campaign, digital marketing, and advertisement. The selfdeprecating style of campaign and marketing strategy is mainly adopted to excel brand endorsement and product sales value. In this paper, we propose an LSTM-based deep learning approach for detecting self-deprecating sarcasm in textual data. To the best of our knowledge, there is no prior work related to self-deprecating sarcasm detection using deep learning techniques. Starting with a filtering step to identify self-referential tweets, the proposed approach adopts a deep learning model using LSTM for detecting self-deprecating sarcasm. The proposed approach is evaluated over three Twitter datasets and performs significantly better in terms of precision, recall, and f-score. 1 https://bit.ly/2WsUkUk (last accessed on 15-Nov-19) 2 https://literarydevices.net/sarcasm/ (last accessed on 15-Nov-19) 3 https://bit.ly/2vwjtid (last accessed on 15-Nov-19", |
|
"pdf_parse": { |
|
"paper_id": "2019", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Self-deprecating sarcasm is a special category of sarcasm, which is nowadays popular and useful for many real-life applications, such as brand endorsement, product campaign, digital marketing, and advertisement. The selfdeprecating style of campaign and marketing strategy is mainly adopted to excel brand endorsement and product sales value. In this paper, we propose an LSTM-based deep learning approach for detecting self-deprecating sarcasm in textual data. To the best of our knowledge, there is no prior work related to self-deprecating sarcasm detection using deep learning techniques. Starting with a filtering step to identify self-referential tweets, the proposed approach adopts a deep learning model using LSTM for detecting self-deprecating sarcasm. The proposed approach is evaluated over three Twitter datasets and performs significantly better in terms of precision, recall, and f-score. 1 https://bit.ly/2WsUkUk (last accessed on 15-Nov-19) 2 https://literarydevices.net/sarcasm/ (last accessed on 15-Nov-19) 3 https://bit.ly/2vwjtid (last accessed on 15-Nov-19", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Over a decade, the popularity of the microblogging platform, Twitter, has significantly increased for analyzing its content for varied realworld applications. The information extracted from Twitter can shed light on numerous applications, such as text categorization, sentiment analysis, election campaign and result prediction, opensource intelligence, and event detection. However, the contents available on Twitter in the form of tweets are short and limited to maximum 280 characters. Moreover, tweets are informal and mainly consist of misspelled words, slangs, bashes, acronyms, shortened words, nonliteral unstructured phrases, and emoticons. Due to existence of such volumunious informal texts in the form of tweets text information processing is a challenging task. Moreover, analysis of the tweets has become more challenging due to presence of figurative language, especially sarcasm. The main role of a sarcastic tweet is to reverse the actual polarity and alter the literal semantics. However, the computational detection of sarcasm benefits many applications, especially opinion mining and sentiment analysis systems (Bouazizi and Ohtsuki, 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1131, |
|
"end": 1159, |
|
"text": "(Bouazizi and Ohtsuki, 2015)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The online Macmillan dictionary defines sarcasm 1 as \"the activity of saying or writing the opposite of what you mean, or of speaking in a way intended to make someone else feel stupid or show them that you are angry\". Sarcasm is the most seen figurative language category over online social media platforms. The presence of sarcasm in tweets is dramatically rising and computational detection of sarcasm is a challenging and interesting task. It is widely covered by researchers in recent years, but the study on different categories 2 of sarcasm, such as self-deprecating sarcasm, is very limited. Self-deprecating sarcasm 3 is a special category of sarcasm in which users mainly apply sarcasm over themselves using disparage, ridicule, and contemptuous remarks in a sarcastic style using humor. It is defined as a \"sarcasm that plays off of an exaggerated sense of worthlessness and inferiority\". For example, the phrase love going to the office on Sunday in the text \"Really, I always love going to the office on Sunday\" represents a self-deprecating sarcasm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Nowadays, self-deprecating sarcasm has become a new style of product marketing and campaign strategy. It is mainly used for product endorsement purposes. This new marketing and campaign strategy is mainly used to excel the busi-ness growth, but without losing the brand value (Kamal and Abulaish, 2019) . The main aim of this strategy is to draw the attention of the customer towards the brand. As per the American marketing association 4 , \"self-deprecating advertising means consumers can see a different side to brands, making them more relatable and downto-earth\". Interestingly, after an in-depth analysis of tweets, we found that there are many tweets in which users refer themselves. We consider such tweets as self-referential or self-deprecating. For example, \"Really, I just love it\" is a self-referential tweet. Our analysis further reveals that some of the self-referential tweets are self-deprecating using sarcasm, i.e., in these tweets users undervalue, criticize, insult, and disparage themselves using sarcastic phrases. We consider all such selfreferential tweets as self-deprecating sarcasm.", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 302, |
|
"text": "(Kamal and Abulaish, 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a deep learning approach using Long Short-Term Memory (LSTM) to detect self-deprecating sarcasm in textual data like tweets. Initially, after preprocessing, we first identify self-referential tweets from the dataset based on a set of patterns, and rest of tweets are filtered out. The main motivation behind the filtration of the non-self-referential tweets is to increases the overall efficiency of the selfdeprecating sarcasm detection process. In brief, the main role of the self-referential tweets identification module can be summarized as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Identification of explicit self-referential tweets: After an in-depth analysis across all the datasets, we identify a set of patterns followed by the self-referential tweets. Table 1 presents a set of regular expression based patterns and it is categorized as specific patterns and generic patterns. The specific patterns are based on tags and tokens present in the tweet which indicate self-referential nature of the tweet. On the other hand, generic patterns are based on the presence of first person singular/plural personal pronoun. These patterns are found as strong indicator of self-referential tweets. We consider such self-referential tweets as explicit, otherwise implicit.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 185, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Identification of clusters from explicit selfreferential tweets: We identify explicit self-referential tweets clusters based on overlapping contents (i.e., tri-grams) and using Jaccard similarity between the explicit selfreferential tweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Pattern-mining from clusters: Once the explicit self-referential tweets clusters are identified, we fetch the most frequent substring (i.e, tri-gram) as a referential pattern from each cluster.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Identification of implicit self-referential tweets: If an implicit tweet matches with the referential pattern of any cluster, then it is considered as a self-referential tweet.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Merge with explicit tweets: Finally, all identified implicit self-referential tweets are merged with explicit tweets to generate a list of the self-referential tweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Once the list of self-referential tweets is generated, it is passed to the model learning and classification module for self-deprecating sarcasm detection. To this end, each self-referential tweet is converted into an input vector, it is fed to pretrained GloVe word embedding, and model learning and classification task is accomplished using LSTM for detecting self-deprecating sarcasm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This remainder of this paper is organized as follows. Section 2 presents a brief review of the state-of-the-art techniques and approaches for computational sarcasm detection. It also highlights the uniqueness of our proposed approach over the existing state-of-the-art techniques. Section 3 presents the functional details of the proposed approach, including model learning and classification using LSTM. Section 4 presents the experimental and evaluation results. Finally, section 5 concludes the paper and discusses future research directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Automatic sarcasm detection is considered as a classification task (Zhang et al., 2016) , and the main task is to classify any piece of texts as sarcasm or non-sarcasm. applied semi-supervised approach to detect sarcasm in Amazon product reviews. applied the same approach to detect sarcasm in tweets and product reviews. Gonz\u00e1lez-Ib\u00e1nez et al. (2011) considered lexical and pragmatics features to detect sarcasm on Twitter datasets. Riloff et al. (2013) identified sarcastic contrastbased patterns and considered words with positive sentiment and negative phrases in a tweet containing sarcasm. Liebrecht et al. (2013) discussed the role of hyperrbole in sarcasm detection. Pt\u00e1cek et al. (2014) detected sarcasm in English and Czech tweets. Bharti et al. 2015proposed rule-based algorithms based on some patterns for sarcasm detection. They also highlighted the importance of hyperbole in sarcastic texts. Bamman and Smith (2015) extracted extra-linguistic information based on the context of the instances for sarcasm detection. Rajadesingan et al. (2015) applied three machine learning classifiers -Support Vector Machine (SVM), logistic regression, and decision tree for sarcasm detection, considering the behavioral modeling-based approach. Ghosh et al. (2015) proposed SemEval-2015 (task-11) and considered sarcasm, irony, and metaphor for sentiment analysis in Twitter data. Joshi et al. (2015) discussed the role of incongruity for sarcasm detection. Bouazizi and Ohtsuki (2016) considered a pattern-based approach. Mishra et al. (2016) considered lexicaland contextual-based features. Joshi et al. 2016proposed word-embedding related features using Word2Vec 5 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 87, |
|
"text": "(Zhang et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 454, |
|
"text": "Riloff et al. (2013)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 596, |
|
"end": 619, |
|
"text": "Liebrecht et al. (2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 695, |
|
"text": "Pt\u00e1cek et al. (2014)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 907, |
|
"end": 930, |
|
"text": "Bamman and Smith (2015)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1031, |
|
"end": 1057, |
|
"text": "Rajadesingan et al. (2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1246, |
|
"end": 1265, |
|
"text": "Ghosh et al. (2015)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1382, |
|
"end": 1401, |
|
"text": "Joshi et al. (2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1524, |
|
"end": 1544, |
|
"text": "Mishra et al. (2016)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Recently, deep learning models have been used as a popular technique for sarcasm detection problem. Zhang et al. (2016) applied a bi-directional gated recurrent neural network for sarcasm detection. They considered syntactic and semantic information and extracted contextual features. Amir et al. (2016) applied content-and user embedding-based Convolutional Neural Network (CNN) model. Ghosh and Veale (2016) considered CNN, LSTM, and Deep Neural Network (DNN) for sarcasm detection. Poria et al. (2016) considered features, such as sentiment, emotion, and personality and applied SVM and CNN classifiers. Tay et al. (2018) considered attention-based neural model for sarcasm detection. Hazarika et al. (2018) proposed a contextual sarcasm detector using CNN-based textual model in which context and content related information are used for sarcasm detection. Recently, Dubey et al. (2019a) converted sarcastic texts into non-sarcastic interpretation using encoder-decoder, attention, and pointer generator architectures. Dubey et al. 5 https://code.google.com/archive/p/ word2vec/ (last accessed on 15-Nov-19) (2019b) detected sarcasm in numerical portion of tweets using CNN and attention network.", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 303, |
|
"text": "Amir et al. (2016)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 485, |
|
"end": 504, |
|
"text": "Poria et al. (2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 624, |
|
"text": "Tay et al. (2018)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 688, |
|
"end": 710, |
|
"text": "Hazarika et al. (2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 871, |
|
"end": 891, |
|
"text": "Dubey et al. (2019a)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1036, |
|
"end": 1037, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Though sarcasm detection is widely covered by the researchers, studies related to the varied categories of sarcasm are still not explored. Recently, Abulaish and Kamal (2018) noticed the use of self-deprecating sarcasm in Twitter, mainly for the purpose of brand endorsement and sales campaign. They considered self-deprecating sarcasm as a special category of sarcasm in which users express sarcasm over themselves. They also proposed a rule-based and machine learning-based approach for detecting self-deprecating sarcasm detection in Twitter. The proposed work in this paper is new LSTM-based deep learning approach for self-deprecating sarcasm detection in textual data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 174, |
|
"text": "Abulaish and Kamal (2018)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section, we discuss the proposed LSTMbased deep learning approach for self-deprecating sarcasm detection. Figure 1 presents the workflow of the proposed approach. It can be seen from this figure that besides data crawling and data pre-processing, the main functionalities of the proposed approach are self-referential tweets detection, and self-deprecating sarcasm detection using deep learning technique. Further details about all functional modules are presented in the following sub-sections.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 122, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The data crawling module aims to retrieve English tweets using Twitter's REST API and it is implemented in Python 2.7. We have considered tweet ids provided as a part of two benchmark datasets - Pt\u00e1cek et al. (2014) and SemEval-2015 6 to curate tweets using our data crawling module. In addition, we have also created our own Twitter dataset containing tweets crawled for the period 1st April 2019 to 19th May 2019.", |
|
"cite_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 215, |
|
"text": "Pt\u00e1cek et al. (2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Crawling", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The data pre-processing module aims to apply various pre-processing tasks on the curated tweets to produce fine-grained data for self-deprecating sarcasm detection. The pre-processing consists of data cleaning (removal of dots, retweets, numbers, hashtags, emoticons, @mention, URL's, am- ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Pre-Processing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Pk i t o t x t C t-1 h t-1 x + ct h t x t \u03c3 f t tanh x x \u03c3 \u03c3 Pe Ce", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Pre-Processing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Self-referential tweets identification Figure 1 : Work-flow of the proposed approach persands, double quotes, and extra white spaces) and lower-case conversion. Thereafter, spacy 7 is used to tokenize the tweets and generate POS tags for each token.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 47, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Pre-Processing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "After an in-depth analysis of the datasets it is observed that all tweets are not self-referential or self-deprecating in nature. To this end, this module presents a filtration mechanism to generate a corpus of self-referential tweets. The non-self-referential tweets are filtered from further consideration because they rarely contain a self-deprecating sarcasm. Motivated by Zhao et al. (2015) , identification of self-referential tweets is performed using the following sequence of steps.", |
|
"cite_spans": [ |
|
{ |
|
"start": 377, |
|
"end": 395, |
|
"text": "Zhao et al. (2015)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(i) Identification of Explicit Self-Referential Tweets:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this step, we identify the self-referential tweets that have explicit pattern in the text and these tweets are considered for further processing to mine implicit patterns (signals) of self-referential behavior in tweets. The explicit self-referential tweets have certain patterns, which can be defined using the regular expressions given in Table 1 . The tweets from the pre-processed corpus are matched using these regular expressions to identify the explicit self-referential tweets. The pattern for explicit nature of self-referential tweets are of two types -specific and generic.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 344, |
|
"end": 351, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The specific patterns are based on either sequential order of tokens and tags, or sequential order of tokens. If any of the specific pattern 7 https://spacy.io/ (last accessed on 15-Nov-19) from table 1 founds in the pre-processed tweets, then it is added to the explicit set, otherwise it is checked further from generic patterns. The generic patterns are based on the first person singular/plural personal pronoun, such as 'i', 'we', and their objective and possessive cases, such as 'my', 'me', 'mine', 'myself ', 'are', 'our', 'us', and 'ourselves'. The first person singular/plural personal pronoun and its grammatical variants are strong indicator for a tweet to be referred as selfreferential.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Patterns Category U H (i | my) Specific (we | i) [love] (it | when) Specific when (my | our) Specific (am | are) [still] Specific (i | my | me | mine | myself ) Generic (we | are | us | our | ourselves) Generic", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "If any of the token from the pre-processed tweet matches with any generic patterns, then such tweet is considered as explicit self-referential tweet, and added to the explicit set of selfreferential tweets, E s . Otherwise, the tweet is added to the set of implicit tweets, I t . Further, the identified explicit tweets are modeled as a undirected weighted graph and given to a clustering algorithm for further processing, which is defined in the next step.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(ii) Identification of Clusters from Explicit Self-Referential Tweets: This step clusters the tweets in E s to identify the near-duplicate (similar) explicit self-referential tweets. To this end, first E s tweets are modeled as an undirected graph, where each node of the graph represents a tweet and edge represents the similarity between the underlying pair of nodes. The similarity between two tweets (nodes), say t i and t j , is calculated using Jaccard coefficient to observe the overlapping set of tri-grams between the tweets, as defined in equation 1, where T i and T j represents the set of tri-grams for tweets t i and t j , respectively. We choose tri-grams in our experiment because self-deprecating phrases in a tweet generally contain at least three words. We create an edge between a pair of near-duplicate tweets if the Jaccard similarity based on set of tri-grams is greater than a threshold 0.6 as defined in (Zhao et al., 2015) . Thereafter, depth first search algorithm is applied on the constructed graph to extract clusters (connected components), where each cluster represents the set of identical explicit self-referential tweets. The extraction process only extract clusters having atleast three tweets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 928, |
|
"end": 947, |
|
"text": "(Zhao et al., 2015)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "J(t i , t j ) = |T i \u2229 T j | |T i \u222a T j |", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(iii) Pattern-Mining from Clusters: Following the cluster identification process in the previous step, this step mines frequent patterns from the extracted clusters. To this end, the occurrence probability of every pattern of each cluster is computed and patterns having probability greater than 0.8 are regarded as patterns. For example, if a cluster has 5 tweets and a tri-gram \"great way start\" occurs in four out of 5 tweets, then it can be regarded as a frequent pattern (tri-gram). This procedure is repeated for every pattern in each cluster to extract the list of frequent patterns. Thereafter, the duplicate frequent patterns identified from two or more clusters are filtered to generate unique set of frequent patterns P .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(iv) Identification of Implicit Self-Referential Tweets:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The first step of this whole procedure held tweets which have no explicit pattern as self-referential tweets, called implicit tweets. This step will improve the recall of the self-referential tweets identification process. This step matches the identified patterns from previous step in implicit tweets to extract implicit self-referential tweets. To this end, first an implicit tweet is tokenized in to tri-grams and thereafter these set of tri-grams are matched with the set of frequent patterns P using Jaccard similarity. Finally, a tweet that has Jaccard similarity greater than a threshold 0.6 is considered as a implicit self-referential tweets. This procedure is repeated for every tweets of I t to generate a set of implicit self-referential tweets, I s . For example, Finally, in this step, the identified implicit selfreferential tweets are added to the set of explicit self-referential tweets to generate a final set of self-referential tweets i.e. S = E s \u222a I s . In the remaining paper, this curated corpus of selfreferential tweets is used for experimental evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Referential Tweets Identification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In recent years, deep learning has become an emerging trend in the field of text mining and natural language processing. The semantic modeling of textual data using deep learning approaches has drawn significant attention among the research community. Various neural network-based models including CNN, LSTM, and DNN are used for diverse text modeling applications such as document classification, machine translation, speech recognition, and so on. Detection of self-deprecating sarcasm is one such application that is largely unexplored. To this end, we modeled the selfdeprecating sarcasm detection as a deep-learning problem. On analysis, it is found that the long sequence of words or phrases plays an important role to construct a self-deprecating sarcastic patterns, such as love being ignored, office on sunday, and happy to be late in a tweet. Therefore, to model the long-sequences based self-deprecating sarcastic pattern, LSTM seems a perfect fit. Using model learning through LSTM, a self-referential tweet is classified as a Self-Deprecating Sarcasm (SDS) or Non Self-Deprecating Sarcasm (NSDS).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Learning and Classification", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "A detailed discussion about the model learning and classification is presented in following subsection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Learning and Classification", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "In this layer, a self-referential tweet, containing n words, is given as an input. In this manner, each self-referential tweet is converted to a self-referential input vector where every word is replaced with its index value of the dictionary, i.e., S R 1\u00d7n . Further, each self-referential input vector is padded and converted in the matrix form. The padding is used to make every input of same length. Thereafter, padded input vector is passed to the next layer (i.e., embedding layer).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Layer:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the padded vector from the input layer, all the words are replaced with their corresponding representation vector or embeddings. In this paper, we have used pre-trained GloVe 200dimensional embeddings trained on a Twitter corpus of 27 billion tokens. As a result of this procedure, the self-referential input tweet matrix is converted to S R L\u00d7D , where L is the maximum tweets length and D represents embedding dimension.Thereafter, the embedding layer output is passed to the LSTM layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedding Layer:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Hochreiter and Schmidhuber (1997) proposed LSTM architecture, which is a type of RNN. It is easier to train an LSTM model in comparison to an RNN model. Moreover, it also overcomes the vanishing gradient problem while back propagation through time. In LSTM, the long term temporal dependencies can be easily captured between two time steps using the memory cell. Figure 2 presents the architecture of LSTM, where each memory cell consists of input gate i t , forget gate f t , and output gate o t . These digital gates are responsible for memory update mechanism, and it acts as a function for the current input x t and previous hidden state h t\u22121 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 369, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "LSTM:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "An LSTM model is trained using equations 2, 3, 4, 5, 6, and 7. Equations 2 and 3 present input and forget gates, whereas equations 5, 6, and 7 present output gate, new cell state, and hidden state, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LSTM:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "i t = \u03c3(W i [h t\u22121 , x t ] + b i )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "LSTM:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Self-deprecating sarcasm tweets (SDS)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LSTM:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "+ tanh h t C t-1 h t-1 x + c t x t \u03c3 ft tanh x x \u03c3 \u03c3 i t o t tanh x", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model learning and classification", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "h t c t Figure 2 : The architecture of LSTM", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 16, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model learning and classification", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f t = \u03c3(W f [h t\u22121 , x t ] + b f )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Model learning and classification", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "q t = tanh(W q [h t\u22121 , x t ] + b q ) (4) o t = \u03c3(W o [h t\u22121 , x t ] + b 0 ) (5) c t = f t c t\u22121 + i t q t (6) h t = o t tanh(c t )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Model learning and classification", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In equation 4, the non-linear activation function tanh is used to squash the value between -1 and 1, and it plays a role for cell state to forget the memory. On the other hand, non-linear activation function, sigmoid (\u03c3) generates an output in the interval [0, 1] . LSTM works as the gating function for the three gates, which are discussed in the previous paragraph. Since it has a value in interval [0, 1], the information across the gates are either passed completely or not.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 260, |
|
"text": "[0,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 263, |
|
"text": "1]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model learning and classification", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The output from the LSTM layer is passed to the fully connected dense layer followed by a sigmoid activation function. We have used binary crossentropy as the loss function, used 40 epochs for training the model, batch-size of 256, verbose is 2, and adam as an optimizer. The dataset is divided into training and testing parts for experimental evaluations wherein 80% of the data is used for training and remaining 20% is used for testing procedure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "FC and Output Layers:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we discuss the experimental evaluation of the proposed approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments Setup and Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We have implemented the experimental setup for data crawling, data pre-processing, and selfreferential tweets identification tasks in Python 2.7, model training and classification tasks in Python 3.5, and used Keras neural network API for LSTM model. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The proposed approach is evaluated over three Twitter datasets including two benchmark datasets by Pt\u00e1cek et al. (2014) and SemEval-2015. The authors released only tweet-ids for these benchmark datasets due to privacy concerns. Therefore, a crawler is developed in Python 2.7 to curate tweets corresponding to provided tweet-ids using Twitter REST API. However, few tweets were deleted or protected and, as a result, we were unable to crawl all the tweets. A brief statistics about these two datasets is given in the first two rows of table 4. Apart from the two benchmark datasets, we curated a Twitter dataset from 1st April to 19th May 2019 using \"#sarcasm\" hashtag. We refer this dataset as Twitter-280 and its statistical summary is given in the third row of table 4. Similarly, we crawled non-sarcastic tweets using two #not, #hate hastags. Table 5 presents the statistics of identified self-referential tweets after the selfreferential tweets identification module. Table 6 presents the final statistics of the balanced and unbalanced datasets generated from table 5. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 119, |
|
"text": "Pt\u00e1cek et al. (2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 847, |
|
"end": 854, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 973, |
|
"end": 980, |
|
"text": "Table 6", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "This section discusses the standard data mining metrics -precision, recall, and f-score, which are used to evaluate the proposed approach. Formally, these metrics in terms of True Positives (TP), False Positives (FP), and False Negatives (FN) are define in equations 8, 9, and 10, where TP is defined as the number of correctly classified as SDS tweets, FP is defined as number of NSDS tweets misclassified as SDS tweets, and FN is defined as number of SDS tweets misclassified as NSDS tweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Precision (\u03c0) = TP FP + TP (8) Recall (\u03c1) = TP FN + TP (9) F-score (F1) = 2 \u00d7 \u03c0 \u00d7 \u03c1 \u03c0 + \u03c1", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "This section presents the experimental evaluation results over the three datasets discussed in subsection 4.2. All the experimental evaluations are performed using an LSTM model trained on 40 epoch. Table 7 presents the performance evaluation results of our proposed approach using the LSTM model on balanced and unbalanced datasets in terms of three evaluation metrics. On analysis, it can be observed from this table that in terms of all the three evaluation metrics, the proposed approach performs comparatively better on balanced datasets and shows slightly lower performance on unbalanced datasets. Another interesting observation from this table is that, in terms of all the three evaluation metrics, proposed approach performs best on Pt\u00e1cek et al. (2014) dataset. Further, table 7 shows that the proposed approach performs comparatively better on the balanced version of our created dataset. Table 7 : Performance evaluation of our proposed approach using LSTM on balanced and unbalanced datasets presented in table 6", |
|
"cite_spans": [ |
|
{ |
|
"start": 742, |
|
"end": 762, |
|
"text": "Pt\u00e1cek et al. (2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 206, |
|
"text": "Table 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 900, |
|
"end": 907, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "To be the best of authors knowledge there is no prior work on self-deprecating sarcasm detection using deep learning approach. However, a rule and machine learning-based approach was presented by the authors in Abulaish and Kamal (2018) and proposed approach is compared with that one. In Abulaish and Kamal (2018) , authors considered Pt\u00e1cek et al. (2014) dataset to detect selfdeprecating sarcasm in tweets. We implemented Abulaish and Kamal (2018) to evaluated its efficacy over the three datasets. Figures 3 and 4 present the comparative performance evaluation of the proposed approach with Abulaish and Kamal (2018) in terms of precision, recall, and f-score over balanced and unbalanced version of all the three datasets, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 211, |
|
"end": 236, |
|
"text": "Abulaish and Kamal (2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 289, |
|
"end": 314, |
|
"text": "Abulaish and Kamal (2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 356, |
|
"text": "Pt\u00e1cek et al. (2014)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 450, |
|
"text": "Abulaish and Kamal (2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 620, |
|
"text": "Abulaish and Kamal (2018)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 502, |
|
"end": 517, |
|
"text": "Figures 3 and 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparative Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "It can be observed from figures 3 and 4 that the proposed LSTM-based deep learning approach outperforms Abulaish and Kamal (2018) in terms of precision, recall, and f-score on both balanced and unbalanced datasets. However, Abulaish and Kamal (2018) reported slightly better performance in terms of precision and f-score results on Pt\u00e1cek et al. (2014) dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 129, |
|
"text": "Abulaish and Kamal (2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 249, |
|
"text": "Abulaish and Kamal (2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 352, |
|
"text": "Pt\u00e1cek et al. (2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparative Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "In this paper, we have proposed a new approach using LSTM-based deep learning for detecting self-deprecating sarcasm in textual data. The self-deprecating sarcasm is a special category of sarcasm in which users apply sarcasm on themselves. One of the major applications of this work is to promote self-deprecating marketing strategies. The proposed approach is evaluated over three Twitter datasets, including two benchmark datasets, and the experimental results are promising. It also performs significantly better than one of the state-of-the-art methods, which used rulebased and machine learning techniques for selfdeprecating sarcasm detection. Exploring new patterns and consideration of multimedia contents for self-deprecating sarcasm detection seems one of the promising directions of future research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://bit.ly/2EEuQGQ (last accessed on 15-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://bit.ly/34OnGgB (last accessed on 15-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This publication is an outcome of the R&D work undertaken project under the Visvesvaraya PhD Scheme of Ministry of Electronics & Information Technology, Government of India, being implemented by Digital India Corporation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Selfdeprecating sarcasm detection: An amalgamation of rule-based and machine learning approach", |
|
"authors": [ |
|
{ |
|
"first": "Muhammad", |
|
"middle": [], |
|
"last": "Abulaish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashraf", |
|
"middle": [], |
|
"last": "Kamal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the International Conference on Web Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "574--579", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Muhammad Abulaish and Ashraf Kamal. 2018. Self- deprecating sarcasm detection: An amalgamation of rule-based and machine learning approach. In Pro- ceedings of the International Conference on Web In- telligence (IEEE/WIC/ACM), Santiago, Chile, pages 574-579. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Modelling context with user embeddings for sarcasm detection in social media", |
|
"authors": [ |
|
{ |
|
"first": "Silvio", |
|
"middle": [], |
|
"last": "Amir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Lyu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paula", |
|
"middle": [], |
|
"last": "Carvalho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M\u00e1rio", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Silva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 20th Special Interest Group on Natural Language Learning Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "167--177", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Silvio Amir, Byron C. Wallace, Hao Lyu, Paula Carvalho, and M\u00e1rio J. Silva. 2016. Modelling context with user embeddings for sarcasm detec- tion in social media. In Proceedings of the 20th Special Interest Group on Natural Language Learning Conference on Computational Natural Language Learning((SIGNLL-CoNLL), Berlin, Ger- many, pages 167-177. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Contextualized sarcasm detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Bamman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 9th International Association for the Advancement of Artificial Intelligence Conference on Web and Social Media (ICWSM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "574--577", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Bamman and Noah A. Smith. 2015. Contex- tualized sarcasm detection on twitter. In Proceed- ings of the 9th International Association for the Ad- vancement of Artificial Intelligence Conference on Web and Social Media (ICWSM), Oxford, UK, pages 574-577. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A pattern-based approach for sarcasm detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Mondher", |
|
"middle": [], |
|
"last": "Bouazizi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoaki", |
|
"middle": [], |
|
"last": "Ohtsuki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "IEEE Access", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "5477--5488", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mondher Bouazizi and Tomoaki Ohtsuki. 2016. A pattern-based approach for sarcasm detection on twitter. IEEE Access, 4:5477-5488.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Semi-supervised recognition of sarcastic sentences in twitter and amazon", |
|
"authors": [ |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Davidov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Tsur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Rappoport", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 14th Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--116", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dmitry Davidov, Oren Tsur, and Ari Rappoport. 2010. Semi-supervised recognition of sarcastic sentences in twitter and amazon. In Proceedings of the 14th Conference on Computational Natural Language Learning (CoNLL), Uppsala, Sweden, pages 107- 116. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Deep models for converting sarcastic utterances into their non sarcastic interpretation", |
|
"authors": [ |
|
{ |
|
"first": "Abhijeet", |
|
"middle": [], |
|
"last": "Dubey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the ACM India Joint International Conference on Data Science and Management of Data (CoDS-COMAD)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "289--292", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhijeet Dubey, Aditya Joshi, and Pushpak Bhat- tacharyya. 2019a. Deep models for converting sar- castic utterances into their non sarcastic interpreta- tion. In Proceedings of the ACM India Joint In- ternational Conference on Data Science and Man- agement of Data (CoDS-COMAD), Kolkata India, pages 289-292. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "when numbers matter!\": Detecting sarcasm in numerical portions of text", |
|
"authors": [ |
|
{ |
|
"first": "Abhijeet", |
|
"middle": [], |
|
"last": "Dubey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lakshya", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arpan", |
|
"middle": [], |
|
"last": "Somani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 10th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Medi Analysis (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "72--80", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhijeet Dubey, Lakshya Kumar, Arpan Somani, Aditya Joshi, and Pushpak Bhattacharyya. 2019b. \"when numbers matter!\": Detecting sarcasm in nu- merical portions of text. In Proceedings of the 10th Workshop on Computational Approaches to Subjec- tivity, Sentiment and Social Medi Analysis (NAACL), Minneapolis, USA, pages 72-80. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Semeval-2015 task 11: Sentiment analysis of figurative language in twitter", |
|
"authors": [ |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Ghosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guofu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [], |
|
"last": "Veale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Shutova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Barnden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Reyes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 9th International Workshop on Semantic Evaluation (SemEval)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "470--478", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aniruddha Ghosh, Guofu Li, Tony Veale, Paolo Rosso, Ekaterina Shutova, John Barnden, and Antonio Reyes. 2015. Semeval-2015 task 11: Sentiment analysis of figurative language in twitter. In Pro- ceedings of the 9th International Workshop on Se- mantic Evaluation (SemEval), Denver, Colorado, pages 470-478. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Fracking sarcasm using neural network", |
|
"authors": [ |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Ghosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [], |
|
"last": "Veale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 15th North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aniruddha Ghosh and Tony Veale. 2016. Fracking sarcasm using neural network. In Proceedings of the 15th North American Chapter of the Associa- tion for Computational Linguistics: Human Lan- guage Technologies (NAACL-HLT), San Diego, Cal- ifornia, USA, pages 161-169. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Identifying sarcasm in twitter: A closer look", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Gonz\u00e1lez-Ib\u00e1nez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Smaranda", |
|
"middle": [], |
|
"last": "Muresan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nina", |
|
"middle": [], |
|
"last": "Wacholder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "581--586", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Gonz\u00e1lez-Ib\u00e1nez, Smaranda Muresan, and Nina Wacholder. 2011. Identifying sarcasm in twit- ter: A closer look. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics (ACL), Portland, Oregon, pages 581- 586. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Cascade: Contextual sarcasm detection in online discussion forums", |
|
"authors": [ |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sruthi", |
|
"middle": [], |
|
"last": "Gorantla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Zimmermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics (COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1837--1848", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Devamanyu Hazarika, Soujanya Poria, Sruthi Gorantla, Erik Cambria, Roger Zimmermann, and Rada Mi- halcea. 2018. Cascade: Contextual sarcasm detec- tion in online discussion forums. In Proceedings of the 27th International Conference on Computa- tional Linguistics (COLING), Santa Fe, New Mex- ico, USA, pages 1837-1848. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Harnessing context incongruity for sarcasm detection", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinita", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd nnual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (ACL-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "757--762", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Joshi, Vinita Sharma, and Pushpak Bhat- tacharyya. 2015. Harnessing context incongruity for sarcasm detection. In Proceedings of the 53rd nnual Meeting of the Association for Computational Lin- guistics and the 7th International Joint Conference on Natural Language Processing (ACL-IJCNLP), Beijing, China, page 757-762. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Are word embedding-based features useful for sarcasm detection?", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vaibhav", |
|
"middle": [], |
|
"last": "Tripathi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Carman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1006--1011", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Joshi, Vaibhav Tripathi, Kevin Patel, Pushpak Bhattacharyya, and Mark Carman. 2016. Are word embedding-based features useful for sarcasm detec- tion? In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing (EMNLP), Austin, Texas, pages 1006-1011. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Selfdeprecating humor detection: A machine learning approach", |
|
"authors": [ |
|
{ |
|
"first": "Ashraf", |
|
"middle": [], |
|
"last": "Kamal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhammad", |
|
"middle": [], |
|
"last": "Abulaish", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 16th International Conference of the Pacific Association for Computational Linguistics (PACLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashraf Kamal and Muhammad Abulaish. 2019. Self- deprecating humor detection: A machine learning approach. In Proceedings of the 16th International Conference of the Pacific Association for Compu- tational Linguistics (PACLING), Hanoi, Vietnam, pages 1-13. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The perfect solution for detecting sarcasm in tweets #not", |
|
"authors": [ |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Liebrecht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Kunneman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Bosch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 4th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis (WASSA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "29--37", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christine Liebrecht, Florian Kunneman, and Antal V. D. Bosch. 2013. The perfect solution for de- tecting sarcasm in tweets #not. In Proceedings of the 4th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis (WASSA), Atlanta, Georgia, pages 29-37. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Predicting readers' sarcasm understandability by modeling gaze behavior", |
|
"authors": [ |
|
{ |
|
"first": "Abhijit", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diptesh", |
|
"middle": [], |
|
"last": "Kanojia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 13th Association for the Advancement of Artificial Intelligence Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhijit Mishra, Diptesh Kanojia, and Pushpak Bhat- tacharyya. 2016. Predicting readers' sarcasm under- standability by modeling gaze behavior. In Proceed- ings of the 13th Association for the Advancement of Artificial Intelligence Conference on Artificial Intel- ligence, Phoenix, Arizona, USA. AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A deeper look into sarcastic tweets using deep convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prateek", |
|
"middle": [], |
|
"last": "Vij", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 25th International Conference on Computational Linguistics (COLING) Osaka", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1601--1612", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soujanya Poria, Erik Cambria, Devamanyu Hazarika, and Prateek Vij. 2016. A deeper look into sarcas- tic tweets using deep convolutional neural networks. In Proceedings of the 25th International Confer- ence on Computational Linguistics (COLING) Os- aka, Japan, pages 1601-1612.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Sarcasm detection on czech and english twitter", |
|
"authors": [ |
|
{ |
|
"first": "Tom\u00e1s", |
|
"middle": [], |
|
"last": "Pt\u00e1cek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Habernal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 25th International Conference on Computational Linguistics (COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "213--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom\u00e1s Pt\u00e1cek, Ivan Habernal, and Jun Hong. 2014. Sarcasm detection on czech and english twitter. In Proceedings of the 25th International Conference on Computational Linguistics (COLING), Dublin, Ire- land, pages 213-223.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Sarcasm detection on twitter: A behavioral modeling approach", |
|
"authors": [ |
|
{ |
|
"first": "Ashwin", |
|
"middle": [], |
|
"last": "Rajadesingan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reza", |
|
"middle": [], |
|
"last": "Zafarani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 8th Association for Computing Machinery International Conference on Web Search and Data Mining (WSDM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "97--106", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashwin Rajadesingan, Reza Zafarani, and Huan Liu. 2015. Sarcasm detection on twitter: A behav- ioral modeling approach. In Proceedings of the 8th Association for Computing Machinery Interna- tional Conference on Web Search and Data Mining (WSDM), Shanghai, China, pages 97-106. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Sarcasm as contrast between a positive sentiment and negative situation", |
|
"authors": [ |
|
{ |
|
"first": "Ellen", |
|
"middle": [], |
|
"last": "Riloff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashequl", |
|
"middle": [], |
|
"last": "Qadir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Surve", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lalindra", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Silva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Gilbert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruihong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "704--714", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ellen Riloff, Ashequl Qadir, Prafulla Surve, Lalin- dra D. Silva, Nathan Gilbert, and Ruihong Huang. 2013. Sarcasm as contrast between a positive sen- timent and negative situation. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), Seattle, Washing- ton, USA, pages 704-714. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Reasoning with sarcasm by reading inbetween", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Tay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anh", |
|
"middle": [ |
|
"Tuan" |
|
], |
|
"last": "Luu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siu", |
|
"middle": [ |
|
"Cheung" |
|
], |
|
"last": "Hui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1010--1020", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Tay, Anh Tuan Luu, Siu Cheung Hui, and Jian Su. 2018. Reasoning with sarcasm by reading in- between. In Proceedings of the 56th Annual Meet- ing of the Association for Computational Linguistics (ACL), Melbourne, Australia, pages 1010-1020. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Icwsm-a great catchy name: Semi-supervised recognition of sarcastic sentences in online product reviews", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Tsur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Davidov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Rappoport", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 4th International Association for the Advancement of Artificial Intelligence Conference on Weblogs and Social Media (ICWSM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "162--169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Tsur, Dmitry Davidov, and Ari Rappoport. 2010. Icwsm-a great catchy name: Semi-supervised recog- nition of sarcastic sentences in online product re- views. In Proceedings of the 4th International Asso- ciation for the Advancement of Artificial Intelligence Conference on Weblogs and Social Media (ICWSM), Washington, DC, USA, pages 162-169. AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Tweet sarcasm detection using deep neural network", |
|
"authors": [ |
|
{ |
|
"first": "Meishan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guohong", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 26th International Conference on Computational Linguistics (COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2449--2460", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meishan Zhang, Yue Zhang, and Guohong Fu. 2016. Tweet sarcasm detection using deep neural network. In Proceedings of the 26th International Confer- ence on Computational Linguistics (COLING), Os- aka, Japan, pages 2449-2460.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Enquiring minds: Early detection of rumors in social media from enquiry posts", |
|
"authors": [ |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Resnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiaozhu", |
|
"middle": [], |
|
"last": "Mei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 24th International Conference on World Wide Web (WWW)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1395--1405", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhe Zhao, Paul Resnick, and Qiaozhu Mei. 2015. En- quiring minds: Early detection of rumors in social media from enquiry posts. In Proceedings of the 24th International Conference on World Wide Web (WWW), Florence, Italy, pages 1395-1405.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Regular expressions to identify explicit selfreferential tweets", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Implicit self-referential tweets identified from I", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">presents the hyper-</td></tr><tr><td colspan=\"2\">parameters values of LSTM model used in the pro-</td></tr><tr><td>posed approach.</td><td/></tr><tr><td>Hyper-parameters</td><td>Value</td></tr><tr><td>Embedding dimension</td><td>200</td></tr><tr><td>Padding sequences</td><td>20</td></tr><tr><td>Spatial dropout (after embedding layer)</td><td>0.4</td></tr><tr><td>Number of neurons</td><td>256</td></tr><tr><td>Dropout (after LSTM layer)</td><td>0.4</td></tr></table>", |
|
"text": "", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"content": "<table><tr><td>: Hyper-parameters values for LSTM model</td></tr><tr><td>used in our proposed approach</td></tr></table>", |
|
"text": "", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"content": "<table><tr><td>Datasets</td><td colspan=\"2\">#Sarcasm #Non-sarcasm</td><td>Total (#tweets)</td></tr><tr><td>Pt\u00e1cek et al. (2014)</td><td>29580</td><td>37767</td><td>67347</td></tr><tr><td>SemEval-2015</td><td>761</td><td>1609</td><td>2370</td></tr><tr><td>Twitter-280</td><td>6971</td><td>7017</td><td>13988</td></tr><tr><td>Total (#tweets)</td><td>37312</td><td>46393</td><td>83705</td></tr></table>", |
|
"text": "Statistics of the crawled datasets", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Statistics of identified self-referential tweets by the self-referential tweets identification module", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF10": { |
|
"num": null, |
|
"content": "<table><tr><td>Datasets</td><td>Evaluation results \u03c0 \u03c1 F1</td></tr><tr><td>Pt\u00e1cek et al. (2014)</td><td>Balanced Unbalanced 0.92 0.89 0.90 0.93 0.94 0.93</td></tr><tr><td>SemEval-2015</td><td>Balanced Unbalanced 0.93 0.75 0.83 0.86 0.84 0.85</td></tr><tr><td>Twitter-280</td><td>Balanced Unbalanced 0.89 0.86 0.88 0.90 0.92 0.93</td></tr></table>", |
|
"text": "Statistics of the balanced and unbalanced datasets generated from table 5", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |