|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T08:06:20.592395Z" |
|
}, |
|
"title": "AraNet: A Deep Learning Toolkit for Arabic Social Media", |
|
"authors": [ |
|
{ |
|
"first": "Muhammad", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Natural Language Processing Lab", |
|
"institution": "University of British Columbia", |
|
"location": {} |
|
}, |
|
"email": "muhammad.mageeed@ubc.ca" |
|
}, |
|
{ |
|
"first": "Chiyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Natural Language Processing Lab", |
|
"institution": "University of British Columbia", |
|
"location": {} |
|
}, |
|
"email": "chiyuzh@mail.ubc.ca" |
|
}, |
|
{ |
|
"first": "Azadeh", |
|
"middle": [], |
|
"last": "Hashemi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Natural Language Processing Lab", |
|
"institution": "University of British Columbia", |
|
"location": {} |
|
}, |
|
"email": "azadeh.hashemi@ubc.ca" |
|
}, |
|
{ |
|
"first": "El", |
|
"middle": [], |
|
"last": "Moatez", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Natural Language Processing Lab", |
|
"institution": "University of British Columbia", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Billah", |
|
"middle": [], |
|
"last": "Nagoudi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Natural Language Processing Lab", |
|
"institution": "University of British Columbia", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We describe AraNet, a collection of deep learning Arabic social media processing tools. Namely, we exploit an extensive host of both publicly available and novel social media datasets to train bidirectional encoders from transformers (BERT) focused at social meaning extraction. AraNet models predict age, dialect, gender, emotion, irony, and sentiment. AraNet either delivers state-of-the-art performance on a number of these tasks and performs competitively on others. AraNet is exclusively based on a deep learning framework, giving it the advantage of being feature-engineering free. To the best of our knowledge, AraNet is the first to performs predictions across such a wide range of tasks for Arabic NLP. As such, AraNet has the potential to meet critical needs. We publicly release AraNet to accelerate research, and to facilitate model-based comparisons across the different tasks.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We describe AraNet, a collection of deep learning Arabic social media processing tools. Namely, we exploit an extensive host of both publicly available and novel social media datasets to train bidirectional encoders from transformers (BERT) focused at social meaning extraction. AraNet models predict age, dialect, gender, emotion, irony, and sentiment. AraNet either delivers state-of-the-art performance on a number of these tasks and performs competitively on others. AraNet is exclusively based on a deep learning framework, giving it the advantage of being feature-engineering free. To the best of our knowledge, AraNet is the first to performs predictions across such a wide range of tasks for Arabic NLP. As such, AraNet has the potential to meet critical needs. We publicly release AraNet to accelerate research, and to facilitate model-based comparisons across the different tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The proliferation of social media has made it possible to study large online communities at scale. This offers opportunities to make important discoveries, facilitate decision making, guide policies, improve health and well-being, aid disaster response, attend to population needs in pandemics such as the current COVID-19, etc. The wide host of languages, languages varieties, and dialects used on social media and the nuanced differences between users of various backgrounds (e.g., different age groups, gender identities) make it especially difficult to derive sufficiently valuable insights based on single prediction tasks. For these reasons, it is highly desirable to develop natural language processing (NLP) tools that can help piece together more complete pictures of events impacting individuals of different identities across different geographic regions. In this work, we propose AraNet , a suit of tools that has the promise to play such a role of Arabic social media processing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "For Arabic, a collection of languages and varieties spoken by a wide population of \u223c 400 million native speakers covering a vast geographical region (shown in Figure 1 ), no such suite of tools currently exists. Many works have focused on sentiment analysis, e.g., (Abdul-Mageed et al., 2014a; Nabil et al., 2015; ElSahar and El-Beltagy, 2015; Al Sallab et al., 2015; Al-Moslmi et al., 2018; Al-Smadi et al., 2019; Al-Ayyoub et al., 2019; Farha and Magdy, 2019) and dialect identification (Elfardy and Diab, 2013; Zaidan and Callison-Burch, 2011; Zaidan and Callison-Burch, 2014; Cotterell and Callison-Burch, 2014; Zhang and Abdul-Mageed, 2019b; Bouamor et al., 2019a) . However, there is rarity of tools for other tasks such as gender and age detection. This motivates our toolkit, which we hope can meet the current critical need for studying Arabic communities online. This is especially valuable given the waves of protests, uprisings, and revolutions that have swept the region during the last decade.", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 293, |
|
"text": "(Abdul-Mageed et al., 2014a;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 313, |
|
"text": "Nabil et al., 2015;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 343, |
|
"text": "ElSahar and El-Beltagy, 2015;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 367, |
|
"text": "Al Sallab et al., 2015;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 391, |
|
"text": "Al-Moslmi et al., 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 414, |
|
"text": "Al-Smadi et al., 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 438, |
|
"text": "Al-Ayyoub et al., 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 461, |
|
"text": "Farha and Magdy, 2019)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 513, |
|
"text": "(Elfardy and Diab, 2013;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 546, |
|
"text": "Zaidan and Callison-Burch, 2011;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 579, |
|
"text": "Zaidan and Callison-Burch, 2014;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 615, |
|
"text": "Cotterell and Callison-Burch, 2014;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 616, |
|
"end": 646, |
|
"text": "Zhang and Abdul-Mageed, 2019b;", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 647, |
|
"end": 669, |
|
"text": "Bouamor et al., 2019a)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 167, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Although we create new models for tasks such as sentiment analysis and gender detection as part of AraNet, our primary focus is to provide strong baselines across the various tasks. We believe this will facilitate comparisons across models. This is particularly useful due to absence of standardization across datasets for many of the tasks, and given the somewhat ephemeral nature of parts of some types of these data. In particular, many tasks are developed based on social media posts such as tweets that are distributed under restrictive conditions. For example, Twitter terms require release of data only in the form of tweet ids, making it challenging to acquire 100% of these tweets especially once the data are several months old. These reasons make modelbased comparisons appealing, as a way to measure research progress in absence of easy benchmarking. Our general approach is to adopt sensible baselines across the various AraNet tasks, but we do not necessarily explicitly compare to all previous research. This is the case since most existing works either exploit smaller data (and so it will not be a fair comparison), use methods pre-dating BERT (and so will likely be outperformed by our models). In addition, we note that although it would have been possible to acquire better results by feature engineering (especially on smaller datasets), our main goal is to keep our models free of laborious feature engineering. In some tasks, we even acquire better results that what is reported here by adopting more involved methods. But, again, we do our best here to keep all models relatively comparable (and as simple as possible) in terms of the methods employed to acquire them. Our hope is that, by adopting model-based comparisons, we can help accelerate progress on Arabic social media processing. For these reasons, we also package models from our recent works on dialect (Zhang and Abdul-Mageed, 2019b ) and irony (Zhang and Abdul-Mageed, 2019a) as part of AraNet.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1890, |
|
"end": 1920, |
|
"text": "(Zhang and Abdul-Mageed, 2019b", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 1933, |
|
"end": 1964, |
|
"text": "(Zhang and Abdul-Mageed, 2019a)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows: In Section 2. we describe our methods. In Section 3., we describe or refer to published literature for the data we exploit for each task. Also in Section 3., we provide results from our models. Section 4. is about AraNet design and use, and Section 5. is about ethical considerations. We overview related works in Section 6., and conclude in Section 7..", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Transformer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised BERT", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "Across all our tasks, we use Bidirectional Encoder Representations from Transformers (BERT). BERT is based on the Transformer architecture of (Vaswani et al., 2017) , which we briefly introduce here. The Transformer depends solely on self-attention, thus allowing for parallelizing the network (unlike RNNs). It is an encoder-decoder architecture where the encoder takes a sequence of symbol representations x (i) . . . x (n) , maps them into a sequence of continuous representations z (i) . . . x (n) that are then used by the decoder to generate an output sequence y (i) . . . y (n) , one symbol at a time. This is performed using self-attention, where different positions of a single sequence are related to one another. The Transformer employs an attention mechanism based on a function that operates on queries, keys, and values. The attention function maps a query and a set of key-value pairs to an output, where the output is a weighted sum of the values. For each value, a weight is computed as a compatibility function of the query with the corresponding key. This particular version of attention is a scaled dot product of queries and keys (each of d k ) that is scaled by a factor of 1", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 164, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 413, |
|
"text": "(i)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 425, |
|
"text": "(n)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 486, |
|
"end": 489, |
|
"text": "(i)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 501, |
|
"text": "(n)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 572, |
|
"text": "(i)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 584, |
|
"text": "(n)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised BERT", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "\u221a d k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised BERT", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "on which a softmax is applied to acquire the weights on the values. The scaled dot product attention is computed as as a set of queries, keys, and values in three matrices Q, K, and V, respectively, follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised BERT", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Attention (Q, K, V) = sof tmax QK T \u221a d k V", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Supervised BERT", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "Encoder of the Transformer in (Vaswani et al., 2017) has 6 attention layers, each of which has h attention heads (multihead attention) to allow the model to jointly attend to information from different representation subspaces across different positions. Each of the 6 layers also has a simple, fully-connected feed-forward network (FFN) that is applied to each position separately and identically that different parameters across the different layers. Decoder of the Transformer is similar to the encoder but has a third sub-layer that performs multi-head attention over the encoder stack. Since the Transformer discards with both recurrence and convolution, it resorts to the so-called positional encoding (based on sin and cosine functions) at the bottoms of the encoder and decoder stacks as a way to capture order of the sequence. We now introduce BERT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 52, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised BERT", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "BERT. BERT involves two self-supervised learning tasks, (1) masked language models (Masked LM) and (2) next sentence prediction. Since BERT uses bidirectional conditioning, a given percentage of random input tokens are masked and the model attempts to predict these masked tokens. This is the Masked LM task, where masked tokens are simply replaced by a string [MASK] . (Devlin et al., 2018 ) mask 15% of the tokens (the authors use Word-Pieces) and feed the final hidden vectors of these masked tokens to an output softmax over the vocabulary. The next sentence prediction task is just binary classification. For a given sentence S, two sentences A and B are generated where A (positive class) is an actual sentence from the corpus and B is a randomly chosen sentence (negative class). Once trained on an unlabeled dataset, BERT can then be fine-tuned with supervised data for a downstream task (e.g., text classification, question answering).", |
|
"cite_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 367, |
|
"text": "[MASK]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 390, |
|
"text": "(Devlin et al., 2018", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised BERT", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "All our models are trained in a fully supervised fashion, with dialect id being the only task where we leverage semisupervised learning. We briefly outline our semi-supervised methods next.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised BERT", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "Only for the dialect id task, we investigate augmenting our human-labeled training data with automaticallypredicted data from self-training. Self-training is a wrapper method for semi-supervised learning (Triguero et al., 2015; Pavlinek and Podgorelec, 2017) where a classifier is initially trained on a (usually small) set of labeled samples D l , then is used to classify an unlabeled sample set D u . Most confident predictions acquired by the original supervised model are added to the labeled set, and the model is iteratively re-trained. We perform self-training using different confidence thresholds and choose different percentages from predicted data to add to our dialect training set. We only report best settings here, and the reader is referred to our winning system on the MADAR shared task for more details on these different settings (Zhang and Abdul-Mageed, 2019b ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 227, |
|
"text": "(Triguero et al., 2015;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 228, |
|
"end": 258, |
|
"text": "Pavlinek and Podgorelec, 2017)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 850, |
|
"end": 880, |
|
"text": "(Zhang and Abdul-Mageed, 2019b", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-Training", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "For all our tasks, we use the BERT-Base Multilingual Cased model released by the authors 1 . The model is trained on 104 languages (including Arabic) with 12 layer, 768 hidden units each, 12 attention heads, and has 110M parameters in entire model. The model has 119,547 shared WordPieces vocabulary, and was pre-trained on the entire Wikipedia for each language. For fine-tuning, we use a maximum sequence size of 50 tokens and a batch size of 32. We set the learning rate to 2e \u2212 5 and train for 15 epochs 2 and choose the best model based on performance on a development set. We use the same hyper-parameters in all of our BERT models. We fine-tune BERT on each respective labeled dataset for each task. For BERT input, we apply WordPiece tokenization, setting the maximal sequence length to 50 words/WordPieces. For all tasks, we use a TensorFlow implementation. An exception is the sentiment analysis task, where we used a PyTorch implementation with the same hyper-parameters but with a learning rate 2e \u2212 6. 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation & Models Parameters", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "Pre-processing. Most of our training data in all tasks come from Twitter. Exceptions are in some of the datasets we use for sentiment analysis, which we point out in Section 3.5.. Our pre-processing thus incorporates methods to clean tweets, other datasets (e.g., from the news domain) being much less noisy. For pre-processing, we remove all usernames, URLs, and diacritics in the data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation & Models Parameters", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "Arab-Tweet. For modeling age and gender, we use Arap-Tweet (Zaghouani and Charfi, 2018) 4 , which we will refer to as Arab-Tweet. Arab-tweet comprises 11 Arabic regions from 17 different countries. 5 For each region, data from 100 Twitter users were crawled. Users needed to have posted at least 2,000 tweets and were selected based on an initial list of seed words characteristic of each region. The seed list included words such as /barsha/ 'many' for Tunisian Arabic and /wayed/ 'many' for Gulf Arabic. (Zaghouani and Charfi, 2018) employed human annotators to verify that users do belong to each respective region. Annotators also assigned gender labels from the set male, female and age group labels from the set under-25, 25-to34, above-35 at the user-level, which in turn is the tag for tweet level. Tweets with less than 3 words and re-tweets were removed. Refer to (Zaghouani and Charfi, 2018) for details about how annotation was carried out. We provide a description of the data in Table 1. Table 1 also provides class breakdown across our splits.We note that (Zaghouani and Charfi, 2018) do not report classification models exploiting the data. Although age and gender are user-level tasks, note that we train tweet-level age and gender models. However, tweet-level models can easily be ported to userlevel by simply taking the majority class based on softmaxthresholding as we show in (Zhang and Abdul-Mageed, 2019b ). 6 We shuffle the Arab-tweet dataset and split it into 80% training (TRAIN), 10% development (DEV), and 10% test (TEST). The distribution of classes in our splits is in Table 1. For pre-processing, we reduce 2 or more consecutive repetitions of the same character into only 2 and remove diacritics. With this dataset, we train a small unidirectional GRU (small-GRU) with a single 500-units hidden layer and dropout= 0.5 as a baseline. Small-GRU is trained with the TRAIN set, batch size = 8, and up to 30 words of each sequence. Each word in the input sequence is represented as a trainable 300-dimension vector. We use the top 100K words from TRAIN which are weighted by mutual information as our vocabulary in the embedding layer. We evaluate the model on the blind TEST set. Table 2 shows that small-GRU obtains 36.29% acc. on age classification, and 53.37% acc. on gender detection. Table 2 also shows performance of the fine-tuned BERT model. BERT significantly outperforms our baseline on the two tasks. It improves 15.13% acc. (for age) and 11.93% acc. (for gender) over the small-GRU.", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 87, |
|
"text": "(Zaghouani and Charfi, 2018)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 88, |
|
"end": 89, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 199, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 534, |
|
"text": "(Zaghouani and Charfi, 2018)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 874, |
|
"end": 902, |
|
"text": "(Zaghouani and Charfi, 2018)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 1398, |
|
"end": 1428, |
|
"text": "(Zhang and Abdul-Mageed, 2019b", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 1432, |
|
"end": 1433, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 993, |
|
"end": 1009, |
|
"text": "Table 1. Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 2209, |
|
"end": 2216, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 2318, |
|
"end": 2325, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Age and Gender", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "UBC Twitter Gender Dataset. We also develop an in-house Twitter dataset for gender. We manually labeled 1,989 users from each of the 21 Arab countries. The data had 1,246 \"male\", 528 \"female\", and 215 unknown users. We remove the \"unknown\" category and balance the dataset to have 528 from each of the two \"male\" and \"female\" categories. We ended with 69,509 tweets for \"male\" and 67,511 tweets for \"female\". We split the users into 80% TRAIN (110,750 tweets for 845 users), 10% DEV (14,158 tweets for 106 users), and 10% TEST (12,112 tweets for 105 users). We then model this dataset with BERT and evaluate on DEV and TEST. Table 3 shows that fine-tuned model obtains 62.42% acc. on DEV and 60.54% acc. on TEST. These results are 2.89% and 4.76% less than performance on Arab-Tweet, perhaps reflecting more diversity in UBC-Gender data which also makes it more challenging. Another potential reason for this accuracy drop could be that, for this tweet-level task, some tweets from the same user occur across our TRAIN/DEV/TEST splits. This was unavoidable since Arab-Tweet is distributed without user ids, thus not making it possible for us to prevent user-level data leakage into the two tweet-level classification tasks of age and gender we report here. We alleviate this issue for gender by annotating and developing on UBC-Gender where we control for user-level data distribution across the splits as explained earlier.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 625, |
|
"end": 632, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Age and Gender", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "We also combine the Arab-tweet gender dataset with our UBC-Gender dataset for gender on training, development, and test, respectively, to obtain new TRAIN, DEV, and TEST. We fine-tune BERT on the combined TRAIN and evaluate on combined DEV and TEST. As Table 3 shows, the model obtains 65.32% acc. on combined DEV, and 65.32% acc. on combined TEST. This is the model we package in AraNet.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 260, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Age and Gender", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "The dialect identification model in AraNet is based on our winning system in the MADAR shared task 2 (Bouamor et al., 2019b) as described in (Zhang and Abdul-Mageed, 2019b and test; and the organizers masked test set labels. We lost some tweets from TRAIN when we crawled using tweet ids, ultimately acquiring 2,036 (TRAIN-A), 281 (DEV) and 466 (TEST). We also make use of the task 1 corpus (95,000 sentences (Bouamor et al., 2018) ). More specifically, we concatenate the task 1 data to the training data of task 2, to create TRAIN-B. Again, note that TEST labels were only released to participants after the official task evaluation. Table 4 shows statistics of the data. More information about the data is in (Bouamor et al., 2018) . We use TRAIN-A to perform supervised modeling with BERT and TRAIN-B for self training, under various conditions. We refer the reader to (Zhang and Abdul-Mageed, 2019b) for more information about our different experimental settings on dialect id. We acquire our best results with self-training, with a classification accuracy of 49.39% and F 1 score at 35.44. This is the winning system model in the MADAR shared task and we showed in (Zhang and Abdul-Mageed, 2019b) that our tweet-level predictions can be ported to user-level prediction. On user-level detection, our models perform superbly, with 77.40% acc. and 71.70% F 1 score on unseen MADAR TEST. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 124, |
|
"text": "(Bouamor et al., 2019b)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 171, |
|
"text": "(Zhang and Abdul-Mageed, 2019b", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 431, |
|
"text": "(Bouamor et al., 2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 734, |
|
"text": "(Bouamor et al., 2018)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dialect", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "We make use of two datasets, LAMA-DINA and LAMA-DIST (Alhuzali et al., 2018) . The LAMA-DINA dataset is a Twitter dataset with a combination of gold labels from (Abdul-Mageed et al., 2016) and distant supervision labels. The tweets are labeled with the Plutchik 8 primary emotions from the set: {anger, anticipation, disgust, fear, joy, sadness, surprise, trust}. The distant supervision approach depends on use of seed phrases with the Arabic first person pronoun (Eng. \"I\") + a seed word expressing an emotion, e.g., (Eng. \"happy\"). The manually labeled part of the data comprises tweets carrying the seed phrases verified by human annotators 9, 064 tweets for inclusion of the respective emotion. LAMA-DIST (182, 605 tweets) 7 is only labeled using distant supervision. For more information about the dataset, readers are referred to (Alhuzali et al., 2018) . The data distribution over the emotion classes is in Table 5 . We combine LAMA+DINA and LAMA-DIST training set and refer to this new training set as LAMA-D2 (189, 903 tweets). We fine-tune BERT on the LAMA-D2 and evaluate the model with same DEV and TEST sets from LAMA+DINA. On DEV set, the fine-tuned BERT model obtains 61.43% acc. and 58.83 F 1 . On TEST set, we acquire 62.38% acc. and 60.32% F 1 . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 76, |
|
"text": "(Alhuzali et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 188, |
|
"text": "(Abdul-Mageed et al., 2016)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 837, |
|
"end": 860, |
|
"text": "(Alhuzali et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 916, |
|
"end": 923, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Emotion", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "We use the dataset for irony identification on Arabic tweets released by IDAT@FIRE2019 shared task (Ghanem et al., 2019) . The shared task dataset contains 5, 030 tweets related to different political issues and events in the Middle East taking place between 2011 and 2018. Tweets are collected using pre-defined keywords (i.e., targeted political figures or events) and the positive class involves ironic hashtags such as #sokhria, #tahakoum, and #maskhara (Arabic variants for \"irony\"). Duplicates, retweets, and non-intelligible tweets are removed by organizers. Tweets involve both MSA as well as dialects at various degrees of granularity such as Egyptian, Gulf, and Levantine. IDAT@FIRE2019 (Ghanem et al., 2019) is set up as a binary classification task where tweets are assigned labels from the set {ironic, non-ironic}. A total of 4, 024 tweets were released by organizers as training data. In addition, a total of 1, 006 tweets were used by organizers as TEST data. TEST labels were not release; and teams were expected to submit the predictions produced by their systems on the TEST split. For our models, we split the 4, 024 released training data into 90% TRAIN (n = 3, 621 tweets; 'ironic'= 1, 882 and 'non-ironic'= 1, 739) and 10% DEV (n = 403 tweets; 'ironic'= 209 and 'non-ironic'= 194).", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 120, |
|
"text": "(Ghanem et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 697, |
|
"end": 718, |
|
"text": "(Ghanem et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Irony", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "We use the same small-GRU architecture of Section 3.1 as our baselines. We fine-tune BERT on our TRAIN, and evaluate on DEV. The small-GRU obtain 73.70% acc. and 73.47% F 1 score. BERT model significantly outperforms the small-GRU, acquiring 81.64% acc. and 81.62% F 1 score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Irony", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "Acc F 1 small-GRU 73.70 73.47 BERT 81.64 81.62 Table 6 : Model performance on irony detection.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 54, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Irony", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "We collect 15 datasets related to sentiment analysis of Arabic, including MSA and dialects (Abdul-Mageed and Diab, 2012; Abdulla et al., 2013; Abdul-Mageed et al., 2014b; Nabil et al., 2015; Kiritchenko et al., 2016; Aly and Atiya, 2013; Salameh et al., 2015; Rosenthal et al., 2017; Alomari et al., 2017; Mohammad et al., 2018; Baly et al., 2019) . Table 8 shows all the corpora we use. The datasets involve different types of sentiment analysis tasks such as binary classification (i.e., negative or positive), 3-way classification (i.e., negative, neutral, or positive), and subjective language detection. To combine these datasets for binary sentiment classification, we normalize different types of labels to binary tags in the set {'positive , 'negative } using the following rules:", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 120, |
|
"text": "Diab, 2012;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 142, |
|
"text": "Abdulla et al., 2013;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 170, |
|
"text": "Abdul-Mageed et al., 2014b;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 190, |
|
"text": "Nabil et al., 2015;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 216, |
|
"text": "Kiritchenko et al., 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 237, |
|
"text": "Aly and Atiya, 2013;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 259, |
|
"text": "Salameh et al., 2015;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 283, |
|
"text": "Rosenthal et al., 2017;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 305, |
|
"text": "Alomari et al., 2017;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 328, |
|
"text": "Mohammad et al., 2018;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 347, |
|
"text": "Baly et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 350, |
|
"end": 357, |
|
"text": "Table 8", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "3.5." |
|
}, |
|
{ |
|
"text": "\u2022 Map {Positive, Pos, or High-Pos} to 'positive'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "3.5." |
|
}, |
|
{ |
|
"text": "\u2022 Map {Negative, Neg, or High-Neg} to 'negative'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "3.5." |
|
}, |
|
{ |
|
"text": "\u2022 Exclude samples whose label is not 'positive' or 'negative' such as 'obj', 'mixed', 'neut', or 'neutral'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "3.5." |
|
}, |
|
{ |
|
"text": "After label normalization, we obtain 126, 766 samples. We split this resulting dataset into 80% training (TRAIN), 10% development (DEV), and 10% test (TEST). The distribution of classes in our splits is presented in ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "3.5." |
|
}, |
|
{ |
|
"text": "AraNet consists of identifier tools including age, gender, dialect, emotion, irony and sentiment. Each tool comes with an embedded model. The tool comes with modules for performing normalization and tokenization. AraNet can be used either as (1) a Python library or (2) a command-line and interactive tool, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AraNet Design and Use", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "AraNet as a Python Library: Importing AraNet module as a Python library provides identifier functions. Prediction is based on a text input or a path to a file, and returns the identified class label. The library also returns the probability distribution over all available class labels if needed. This probability is the outcome of the softmax function applied to the last layer (with logits) in each model. Figure 2 shows two examples of using the tool as Python library.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 408, |
|
"end": 416, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "AraNet Design and Use", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "AraNet as a Command-Line and Interactive Tool: AraNet provides scripts supporting both command-line and interactive mode. Command-line mode accepts a text or file path. Interaction mode is good for quick interactive line-by-line experiments and also pipeline re-directions. AraNet is available through pip or from source on GitHub 12 with detailed documentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AraNet Design and Use", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "AraNet is trained on data collected from publicly available sources. The distribution of classes across the different tasks are reasonably balanced as listed in the respective sections in the current paper. Meanwhile, we note that we have not used AraNet in real-world situations, nor tested any bias its decisions could involve. As a result, we advise against using AraNet in decision making without prior research as to what its deployment could involve and how best it can be tested. We also do not approve any use of the AraNet or its decisions in any form for manipulative, unfair, malicious, dangerous, or otherwise unlawful (including by international standards) causes by individuals or organizations. Our conviction is that machine-learning-based software can be very powerful and useful, if not at times necessary, but must be tested and deployed only carefully and ethically. AraNet is no exception.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethical Considerations", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "As we pointed out earlier, there are several works on some of the tasks but less on others. By far, Arabic sentiment analysis has been the most popular task. Works focused on both MSA (Abdul-Mageed et al., 2011; Abdul-Mageed et al., 2014a) and dialects (Nabil et al., 2015; ElSahar and El-Beltagy, 2015; Al Sallab et al., 2015; Al-Moslmi et al., 2018; Al-Smadi et al., 2019; Al-Ayyoub et al., 2019; Farha and Magdy, 2019) . A number of studies have been published on dialect detection, including (Zaidan and Callison-Burch, 2011; Zaidan and Callison-Burch, 2014; Elfardy and Diab, 2013; Cotterell and Callison-Burch, 2014) . Some works took as their target the tasks of age detection (Zaghouani and Charfi, 2018; Rangel et al., 2019) , gender detection (Zaghouani and Charfi, 2018; Rangel et al., 2019) , irony identification (Karoui et al., 2017; Ghanem et al., 2019) , and emotion analysis (Abdul-Mageed et al., 2016; Alhuzali et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 211, |
|
"text": "(Abdul-Mageed et al., 2011;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 239, |
|
"text": "Abdul-Mageed et al., 2014a)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 273, |
|
"text": "(Nabil et al., 2015;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 303, |
|
"text": "ElSahar and El-Beltagy, 2015;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 304, |
|
"end": 327, |
|
"text": "Al Sallab et al., 2015;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 351, |
|
"text": "Al-Moslmi et al., 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 374, |
|
"text": "Al-Smadi et al., 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 398, |
|
"text": "Al-Ayyoub et al., 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 421, |
|
"text": "Farha and Magdy, 2019)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 529, |
|
"text": "(Zaidan and Callison-Burch, 2011;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 530, |
|
"end": 562, |
|
"text": "Zaidan and Callison-Burch, 2014;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 586, |
|
"text": "Elfardy and Diab, 2013;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 622, |
|
"text": "Cotterell and Callison-Burch, 2014)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 684, |
|
"end": 712, |
|
"text": "(Zaghouani and Charfi, 2018;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 733, |
|
"text": "Rangel et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 753, |
|
"end": 781, |
|
"text": "(Zaghouani and Charfi, 2018;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 782, |
|
"end": 802, |
|
"text": "Rangel et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 826, |
|
"end": 847, |
|
"text": "(Karoui et al., 2017;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 848, |
|
"end": 868, |
|
"text": "Ghanem et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 892, |
|
"end": 919, |
|
"text": "(Abdul-Mageed et al., 2016;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 920, |
|
"end": 942, |
|
"text": "Alhuzali et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "A number of resources and tools exist for Arabic natural language processing, including Penn Arabic treebank (Maamouri et al., 2004) , Buckwalter Morphological Analyzer (Buckwalter, 2002) , segmenters (Abdelali et al., 2016) , POS taggers (Abumalloh et al., 2016; Diab et al., 2004) , morpho-syntactic analyzers (Abdul-Mageed et al., 2013; Pasha et al., 2014) , subjectivity and sentiment analysis (Abdul-Mageed, 2019; Farha and Magdy, 2019), offensive and hateful language (Elmadany et al., 2020) , and dangerous speech (Alshehri et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 132, |
|
"text": "(Maamouri et al., 2004)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 187, |
|
"text": "(Buckwalter, 2002)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 224, |
|
"text": "(Abdelali et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 263, |
|
"text": "(Abumalloh et al., 2016;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 282, |
|
"text": "Diab et al., 2004)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 339, |
|
"text": "(Abdul-Mageed et al., 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 359, |
|
"text": "Pasha et al., 2014)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 497, |
|
"text": "(Elmadany et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 521, |
|
"end": 544, |
|
"text": "(Alshehri et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "We presented AraNet, a deep learning toolkit for a host of Arabic social media processing. AraNet predicts age, dialect, gender, emotion, irony, and sentiment from social media posts. It delivers either state-of-the-art or competitive performance on these tasks. It also has the advantage of using a unified, simple framework based on the recently-developed BERT model. AraNet has the potential to alleviate issues related to comparing across different Arabic social media NLP tasks, by providing one way to test new models against AraNet predictions (i.e., model-based comparisons). Our toolkit can be used to make important discoveries about the Arab world, a vast geographical region of strategic importance. It can enhance also enhance our understating of Arabic online communities, and the Arabic digital culture in general.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "Abdelali, A., Darwish, K., Durrani, N., and Mubarak, H. (2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 62, |
|
"text": "Darwish, K., Durrani, N., and Mubarak, H. (2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bibliographic References", |
|
"sec_num": "8." |
|
}, |
|
{ |
|
"text": "Farasa: A fast and furious segmenter for arabic. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, pages 11-16. Abdul-Mageed, M. and Diab, M. T. (2012) . Awatif: A multigenre corpus for modern standard arabic subjectivity and sentiment analysis. In LREC, volume 515, pages 3907-3914. Citeseer. Abdul-Mageed, M., Korayem, M., and YoussefAgha, A. (2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 235, |
|
"text": "Diab, M. T. (2012)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 435, |
|
"text": "Korayem, M., and YoussefAgha, A. (2011)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bibliographic References", |
|
"sec_num": "8." |
|
}, |
|
{ |
|
"text": "\"yes we can?\": Subjectivity annotation and tagging for the health domain. In Proceedings of RANLP2011. Abdul-Mageed, M., Diab, M., and K\u00fcbler, S. (2013) . Asma:", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 152, |
|
"text": "Diab, M., and K\u00fcbler, S. (2013)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bibliographic References", |
|
"sec_num": "8." |
|
}, |
|
{ |
|
"text": "A system for automatic segmentation and morpho-syntactic disambiguation of modern standard arabic. In Proceedings of the International Conference Recent Advances in Natural Language Processing RANLP 2013, pages 1-8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bibliographic References", |
|
"sec_num": "8." |
|
}, |
|
{ |
|
"text": "https://github.com/google-research/bert/ blob/master/multilingual.md.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For dialect id, we trained only for 10 epochs. This was based on monitoring loss on a development set.3 We find this learning rate to work better when we use Py-Torch.4 The resource is an Arabic profiling dataset, and hence the sequence \"Arap\" with an \"p\".5 Counts are based on the distribution we received from the authors.6 Arab-Tweet is also distribute only with tweet-level labels (i.e., without user ids), thus making it not possible to model age and gender at the user level exploiting the data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "These statistics are based on minor cleaning of the data to remove short tweets < 3 words and residuals of the seeds used for collecting the data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/UBC-NLP/aranet", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Samar: Subjectivity and sentiment analysis for arabic social media", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computer Speech & Language", |
|
"volume": "28", |
|
"issue": "1", |
|
"pages": "20--37", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdul-Mageed, M., Diab, M., and K\u00fcbler, S. (2014a). Samar: Subjectivity and sentiment analysis for arabic social media. Computer Speech & Language, 28(1):20-37.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Samar: Subjectivity and sentiment analysis for arabic social media", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computer Speech & Language", |
|
"volume": "28", |
|
"issue": "1", |
|
"pages": "20--37", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdul-Mageed, M., Diab, M., and K\u00fcbler, S. (2014b). Samar: Subjectivity and sentiment analysis for arabic social media. Computer Speech & Language, 28(1):20-37.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Dina: A multidialect dataset for arabic emotion analysis", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Alhuzli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duaa'abu", |
|
"middle": [], |
|
"last": "Elhija", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "The 2nd Workshop on Arabic Corpora and Processing Tools", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdul-Mageed, M., AlHuzli, H., and Duaa'Abu Elhija, M. D. (2016). Dina: A multidialect dataset for arabic emotion analy- sis. In The 2nd Workshop on Arabic Corpora and Processing Tools, page 29.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Modeling arabic subjectivity and sentiment in lexical space", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Information Processing & Management", |
|
"volume": "56", |
|
"issue": "2", |
|
"pages": "291--307", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdul-Mageed, M. (2019). Modeling arabic subjectivity and sen- timent in lexical space. Information Processing & Manage- ment, 56(2):291-307.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Arabic sentiment analysis: Corpus-based and lexiconbased", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Abdulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Mahyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Shehab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Al-Ayyoub", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of The IEEE conference on Applied Electrical Engineering and Computing Technologies (AEECT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdulla, N., Mahyoub, N., Shehab, M., and Al-Ayyoub, M. (2013). Arabic sentiment analysis: Corpus-based and lexicon- based. In Proceedings of The IEEE conference on Applied Electrical Engineering and Computing Technologies (AEECT).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Arabic part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Abumalloh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Al-Sarhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Ibrahim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Abu-Ulbeh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Journal of Soft Computing and Decision Support Systems", |
|
"volume": "3", |
|
"issue": "2", |
|
"pages": "45--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abumalloh, R. A., Al-Sarhan, H. M., Ibrahim, O., and Abu-Ulbeh, W. (2016). Arabic part-of-speech tagging. Journal of Soft Computing and Decision Support Systems, 3(2):45-52.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A comprehensive survey of arabic sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Al-Ayyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Khamaiseh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Jararweh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Al-Kabi", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Information Processing & Management", |
|
"volume": "56", |
|
"issue": "2", |
|
"pages": "320--342", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Al-Ayyoub, M., Khamaiseh, A. A., Jararweh, Y., and Al-Kabi, M. N. (2019). A comprehensive survey of arabic sentiment analysis. Information Processing & Management, 56(2):320- 342.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Arabic senti-lexicon: Constructing publicly available language resources for arabic sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Al-Moslmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Albared", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Al-Shabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Omar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdullah", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Information Science", |
|
"volume": "44", |
|
"issue": "3", |
|
"pages": "345--362", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Al-Moslmi, T., Albared, M., Al-Shabi, A., Omar, N., and Ab- dullah, S. (2018). Arabic senti-lexicon: Constructing pub- licly available language resources for arabic sentiment analysis. Journal of Information Science, 44(3):345-362.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Deep learning models for sentiment analysis in arabic", |
|
"authors": [ |
|
{ |
|
"first": "Al", |
|
"middle": [], |
|
"last": "Sallab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Badaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Baly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "El Hajj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Shaban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the second workshop on Arabic natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Al Sallab, A., Hajj, H., Badaro, G., Baly, R., El Hajj, W., and Sha- ban, K. B. (2015). Deep learning models for sentiment analy- sis in arabic. In Proceedings of the second workshop on Arabic natural language processing, pages 9-17.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Using long short-term memory deep neural networks for aspect-based sentiment analysis of arabic reviews", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Al-Smadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Talafha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Al-Ayyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Jararweh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Journal of Machine Learning and Cybernetics", |
|
"volume": "10", |
|
"issue": "8", |
|
"pages": "2163--2175", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Al-Smadi, M., Talafha, B., Al-Ayyoub, M., and Jararweh, Y. (2019). Using long short-term memory deep neural networks for aspect-based sentiment analysis of arabic reviews. In- ternational Journal of Machine Learning and Cybernetics, 10(8):2163-2175.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Enabling deep learning of emotion with first-person seed expressions", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Alhuzali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Ungar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Second Workshop on Computational Modeling of People's Opinions, Personality, and Emotions in Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alhuzali, H., Abdul-Mageed, M., and Ungar, L. (2018). Enabling deep learning of emotion with first-person seed expressions. In Proceedings of the Second Workshop on Computational Mod- eling of People's Opinions, Personality, and Emotions in Social Media, pages 25-35.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Arabic tweets sentimental analysis using machine learning", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Alomari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Elsherif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Shaalan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "In International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "602--610", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alomari, K. M., ElSherif, H. M., and Shaalan, K. (2017). Ara- bic tweets sentimental analysis using machine learning. In In- ternational Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems, pages 602-610. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Understanding and detecting dangerous speech in social media", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Alshehri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"M B" |
|
], |
|
"last": "Nagoudi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdul-Mageed", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The 4th Workshop on Open-Source Arabic Corpora and Processing Tools (OSACT4), LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alshehri, A., Nagoudi, E. M. B., and Abdul-Mageed, M. (2020). Understanding and detecting dangerous speech in social me- dia. In The 4th Workshop on Open-Source Arabic Corpora and Processing Tools (OSACT4), LREC.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Labr: A large scale arabic book reviews dataset", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Aly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Atiya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "494--498", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aly, M. and Atiya, A. (2013). Labr: A large scale arabic book reviews dataset. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), volume 2, pages 494-498.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Arsentd-lev: A multi-topic corpus for target-based sentiment analysis in arabic levantine tweets", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Baly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Khaddaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "El-Hajj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Shaban", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.01830" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baly, R., Khaddaj, A., Hajj, H., El-Hajj, W., and Shaban, K. B. (2019). Arsentd-lev: A multi-topic corpus for target-based sentiment analysis in arabic levantine tweets. arXiv preprint arXiv:1906.01830.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The madar arabic dialect corpus and lexicon", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Bouamor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Salameh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Zaghouani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Rambow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Abdulrahim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Obeid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Khalifa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Eryani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Erdmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bouamor, H., Habash, N., Salameh, M., Zaghouani, W., Rambow, O., Abdulrahim, D., Obeid, O., Khalifa, S., Eryani, F., Erd- mann, A., et al. (2018). The madar arabic dialect corpus and lexicon. In Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC-2018).", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "The madar shared task on arabic fine-grained dialect identification", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Bouamor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hassan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop (WANLP19)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bouamor, H., Hassan, S., and Habash, N. (2019a). The madar shared task on arabic fine-grained dialect identification. In Pro- ceedings of the Fourth Arabic Natural Language Processing Workshop (WANLP19), Florence, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The MADAR Shared Task on Arabic Fine-Grained Dialect Identification", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Bouamor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hassan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop (WANLP19)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bouamor, H., Hassan, S., and Habash, N. (2019b). The MADAR Shared Task on Arabic Fine-Grained Dialect Identification. In Proceedings of the Fourth Arabic Natural Language Process- ing Workshop (WANLP19), Florence, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Buckwalter arabic morphological analyzer version 1.0. Linguistic Data Consortium", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Buckwalter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Buckwalter, T. (2002). Buckwalter arabic morphological ana- lyzer version 1.0. Linguistic Data Consortium, University of Pennsylvania.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A multi-dialect, multi-genre corpus of informal written arabic", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "241--245", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cotterell, R. and Callison-Burch, C. (2014). A multi-dialect, multi-genre corpus of informal written arabic. In LREC, pages 241-245.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M.-W", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. (2018). Bert: Pre-training of deep bidirectional transformers for lan- guage understanding. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Automatic tagging of arabic text: From raw text to base phrase chunks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Hacioglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of HLT-NAACL 2004: Short papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--152", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diab, M., Hacioglu, K., and Jurafsky, D. (2004). Automatic tag- ging of arabic text: From raw text to base phrase chunks. In Proceedings of HLT-NAACL 2004: Short papers, pages 149- 152. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Sentence level dialect identification in arabic", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Elfardy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Diab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL (2)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "456--461", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elfardy, H. and Diab, M. T. (2013). Sentence level dialect identi- fication in arabic. In ACL (2), pages 456-461.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Leveraging affective bidirectional transformers for offensive language detection", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Elmadany", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hashemi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The 4th Workshop on Open-Source Arabic Corpora and Processing Tools (OSACT4), LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elmadany, A., Zhang, C., Abdul-Mageed, M., and Hashemi, A. (2020). Leveraging affective bidirectional transformers for offensive language detection. In The 4th Workshop on Open-Source Arabic Corpora and Processing Tools (OSACT4), LREC.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Building large arabic multi-domain resources for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Elsahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "El-Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Intelligent Text Processing and Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "ElSahar, H. and El-Beltagy, S. R. (2015). Building large ara- bic multi-domain resources for sentiment analysis. In Interna- tional Conference on Intelligent Text Processing and Computa- tional Linguistics, pages 23-34. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Mazajak: An online arabic sentiment analyser", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Farha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Magdy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--198", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Farha, I. A. and Magdy, W. (2019). Mazajak: An online arabic sentiment analyser. In Proceedings of the Fourth Arabic Natu- ral Language Processing Workshop, pages 192-198.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Idat@fire2019: Overview of the track on irony detection in arabic tweets", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ghanem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Karoui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Benamara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Moriceau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "CEUR Workshop Proceedings. In: CEUR-WS.org", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ghanem, B., Karoui, J., Benamara, F., Moriceau, V., and Rosso, P. (2019). Idat@fire2019: Overview of the track on irony de- tection in arabic tweets. In Mehta P., Rosso P., Majumder P., Mitra M. (Eds.) Working Notes of the Forum for Information Retrieval Evaluation (FIRE 2019). CEUR Workshop Proceed- ings. In: CEUR-WS.org, Kolkata, India, December 12-15.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Soukhria: Towards an irony detection system for arabic in social media", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Karoui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Zitoune", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Moriceau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Procedia Computer Science", |
|
"volume": "117", |
|
"issue": "", |
|
"pages": "161--168", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karoui, J., Zitoune, F. B., and Moriceau, V. (2017). Soukhria: Towards an irony detection system for arabic in social media. Procedia Computer Science, 117:161-168.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Determining sentiment intensity of english and arabic phrases", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of the 10th international workshop on semantic evaluation (SEMEVAL-2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Semeval-2016 task 7: Determining sentiment intensity of en- glish and arabic phrases. In Proceedings of the 10th inter- national workshop on semantic evaluation (SEMEVAL-2016), pages 42-51.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "The penn arabic treebank: Building a large-scale annotated arabic corpus", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Maamouri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bies", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Buckwalter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Mekki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "NEMLAR conference on Arabic language resources and tools", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "466--467", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maamouri, M., Bies, A., Buckwalter, T., and Mekki, W. (2004). The penn arabic treebank: Building a large-scale annotated ara- bic corpus. In NEMLAR conference on Arabic language re- sources and tools, volume 27, pages 466-467. Cairo.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Semeval-2018 task 1: Affect in tweets", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Bravo-Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Salameh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of The 12th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad, S., Bravo-Marquez, F., Salameh, M., and Kir- itchenko, S. (2018). Semeval-2018 task 1: Affect in tweets. In Proceedings of The 12th International Workshop on Seman- tic Evaluation, pages 1-17.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Astd: Arabic sentiment tweets dataset", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nabil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Aly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Atiya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2515--2519", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nabil, M., Aly, M., and Atiya, A. (2015). Astd: Arabic senti- ment tweets dataset. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2515-2519.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Madamira: A fast, comprehensive tool for morphological analysis and disambiguation of arabic", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Pasha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Al-Badrashiny", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "El Kholy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Eskander", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Pooleery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Rambow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roth", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "In LREC", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "1094--1101", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pasha, A., Al-Badrashiny, M., Diab, M. T., El Kholy, A., Eskan- der, R., Habash, N., Pooleery, M., Rambow, O., and Roth, R. (2014). Madamira: A fast, comprehensive tool for morpho- logical analysis and disambiguation of arabic. In LREC, vol- ume 14, pages 1094-1101.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Text classification method based on self-training and lda topic models", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Pavlinek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Podgorelec", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Expert Systems with Applications", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "83--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pavlinek, M. and Podgorelec, V. (2017). Text classification method based on self-training and lda topic models. Expert Systems with Applications, 80:83-93.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Overview of the track on author profiling and deception detection in arabic", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Rangel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Charfi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Zaghouani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ghanem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "S\u00e1nchez-Junquera", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rangel, F., Rosso, P., Charfi, A., Zaghouani, W., Ghanem, B., and S\u00e1nchez-Junquera, J. (2019). Overview of the track on author profiling and deception detection in arabic. In Mehta P., Rosso P., Majumder P., Mitra M. (Eds.) Working Notes of the Forum for Information Retrieval Evaluation (FIRE 2019).", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "CEUR-WS.org, Kolkata, India", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "CEUR Workshop Proceedings. In: CEUR-WS.org, Kolkata, In- dia, December 12-15.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Semeval-2017 task 4: Sentiment analysis in twitter", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 11th international workshop on semantic evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "502--518", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rosenthal, S., Farra, N., and Nakov, P. (2017). Semeval-2017 task 4: Sentiment analysis in twitter. In Proceedings of the 11th international workshop on semantic evaluation (SemEval- 2017), pages 502-518.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Sentiment after translation: A case-study on arabic social media posts", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Salameh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 conference of the North American chapter of the association for computational linguistics: Human language technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "767--777", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Salameh, M., Mohammad, S., and Kiritchenko, S. (2015). Sen- timent after translation: A case-study on arabic social media posts. In Proceedings of the 2015 conference of the North American chapter of the association for computational linguis- tics: Human language technologies, pages 767-777.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Self-labeled techniques for semi-supervised learning: taxonomy, software and empirical study", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Triguero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Garc\u00eda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Herrera", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Knowledge and Information Systems", |
|
"volume": "42", |
|
"issue": "2", |
|
"pages": "245--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Triguero, I., Garc\u00eda, S., and Herrera, F. (2015). Self-labeled techniques for semi-supervised learning: taxonomy, software and empirical study. Knowledge and Information Systems, 42(2):245-284, Feb.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6000--6010", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, \u0141., and Polosukhin, I. (2017). Attention is all you need. In Advances in Neural Information Processing Systems, pages 6000-6010.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Arap-tweet: A large multidialect twitter corpus for gender, age and language variety identification", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Zaghouani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Charfi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zaghouani, W. and Charfi, A. (2018). Arap-tweet: A large multi- dialect twitter corpus for gender, age and language variety iden- tification. In Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC-2018).", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "The arabic online commentary dataset: an annotated dataset of informal arabic with high dialectal content", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Zaidan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies: short papers", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "37--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zaidan, O. F. and Callison-Burch, C. (2011). The arabic online commentary dataset: an annotated dataset of informal arabic with high dialectal content. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies: short papers-Volume 2, pages 37-41. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Arabic dialect identification", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Zaidan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computational Linguistics", |
|
"volume": "40", |
|
"issue": "1", |
|
"pages": "171--202", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zaidan, O. F. and Callison-Burch, C. (2014). Arabic dialect iden- tification. Computational Linguistics, 40(1):171-202.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Multi-task bidirectional transformer representations for irony detection", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The 11th meeting of the Forum for Information Retrieval Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhang, C. and Abdul-Mageed, M. (2019a). Multi-task bidirec- tional transformer representations for irony detection. In The 11th meeting of the Forum for Information Retrieval Evalua- tion 2019.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "No army, no navy: Bert semi-supervised learning of arabic dialects", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "279--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhang, C. and Abdul-Mageed, M. (2019b). No army, no navy: Bert semi-supervised learning of arabic dialects. In Proceed- ings of the Fourth Arabic Natural Language Processing Work- shop, pages 279-284.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "A map of Arab countries. Our different datasets cover varying regions of the Arab world as we describe in each section." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "AraNet usage and output as a Python library." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "AraNet usage examples as command-line mode, pipeline, and interactive mode." |
|
}, |
|
"TABREF0": { |
|
"text": "). The corpus is divided into training, development,", |
|
"content": "<table><tr><td>Data split</td><td colspan=\"2\">Under 25 Female Male</td><td colspan=\"2\">25 until 34 Female Male</td><td colspan=\"2\">35 and up Female Male</td><td># of tweets</td></tr><tr><td>TRAIN</td><td colspan=\"7\">215,950 213,249 207,184 248,769 174,511 226,132 1,285,795</td></tr><tr><td>DEV</td><td>27,076</td><td>26,551</td><td>25,750</td><td>31,111</td><td>21,942</td><td>28,294</td><td>160,724</td></tr><tr><td>TEST</td><td>26,878</td><td>26,422</td><td>25,905</td><td>31,211</td><td>21,991</td><td>28,318</td><td>160,725</td></tr><tr><td>ALL</td><td colspan=\"7\">269,904 266,222 258,839 311,091 218,444 282,744 1,607,244</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"text": "Distribution of age and gender classes in our Arab-Tweet data splits", |
|
"content": "<table><tr><td/><td>Age</td><td>Gender</td></tr><tr><td/><td colspan=\"2\">DEV TEST DEV TEST</td></tr><tr><td colspan=\"3\">small-GRU 36.13 36.29 53.39 53.37</td></tr><tr><td>BERT</td><td colspan=\"2\">50.95 51.42 65.31 65.30</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"text": "Model performance in accuracy of Arab-Tweet age and gender classification tasks.", |
|
"content": "<table><tr><td/><td>DEV TEST</td></tr><tr><td colspan=\"2\">UBC TW Gender 62.42 60.54</td></tr><tr><td>Gender comb</td><td>65.32 65.32</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "Model performance in accuracy. UBC TW Gender refers to the model trained on UBC Twitter Gender dataset. Gender Comb denotes the model trained on the Arab-Tweet and UBC-Gender combined TRAIN data split. Each model is evaluated on the corresponding DEV and TEST sets.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"text": "Distribution of classes within the MADAR twitter corpus.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"text": "Emotion class distribution in LAMA+DINA and LAMA-DIST datasets.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"text": "We finetune pre-trained BERT on the TRAIN set using PyTorch implementation with 2e \u2212 6 learning rate and 15 epochs, as explained in Section 2.. Our best model on the DEV set obtains 80.24% acc. and 80.24% F 1 . We evaluate this best model on TEST set and obtain 77.31% acc. and 76.67% F 1 .", |
|
"content": "<table><tr><td/><td>TRAIN</td><td colspan=\"2\">DEV TEST</td></tr><tr><td># pos</td><td>61,555</td><td>7,030</td><td>7,312</td></tr><tr><td># neg</td><td>39,044</td><td>7,314</td><td>4,511</td></tr><tr><td colspan=\"4\">Total 100,599 14,344 11,823</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF9": { |
|
"text": "Distribution of sentiment classes in our data splits.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF11": { |
|
"text": "Sentiment analysis datasets. SA: Sentiment analysis. SSA: Subjectivity and sentiment analysis.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |