bibtex_url
stringlengths 41
53
| proceedings
stringlengths 38
50
| bibtext
stringlengths 528
3.02k
| abstract
stringlengths 17
2.35k
| authors
sequencelengths 1
44
| title
stringlengths 18
190
| id
stringlengths 7
19
| arxiv_id
stringlengths 0
10
| GitHub
sequencelengths 1
1
| paper_page
stringclasses 528
values | n_linked_authors
int64 -1
15
| upvotes
int64 -1
77
| num_comments
int64 -1
10
| n_authors
int64 -1
52
| Models
sequencelengths 0
100
| Datasets
sequencelengths 0
15
| Spaces
sequencelengths 0
46
| paper_page_exists_pre_conf
int64 0
1
| type
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
https://aclanthology.org/2023.arabicnlp-1.60.bib | https://aclanthology.org/2023.arabicnlp-1.60/ | @inproceedings{shukla-etal-2023-raphael,
title = "Raphael at {A}r{AIE}val Shared Task: Understanding Persuasive Language and Tone, an {LLM} Approach",
author = "Shukla, Utsav and
Vyas, Manan and
Tiwari, Shailendra",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.60",
doi = "10.18653/v1/2023.arabicnlp-1.60",
pages = "589--593",
abstract = "The widespread dissemination of propaganda and disinformation on both social media and mainstream media platforms has become an urgent concern, attracting the interest of various stakeholders such as government bodies and social media companies. The challenge intensifies when dealing with understudied languages like Arabic. In this paper, we outline our approach for detecting persuasion techniques in Arabic tweets and news article paragraphs. We submitted our system to ArAIEval 2023 Shared Task 1, covering both subtasks. Our main contributions include utilizing GPT-3 to discern tone and potential persuasion techniques in text, exploring various base language models, and employing a multi-task learning approach for the specified subtasks.",
}
| The widespread dissemination of propaganda and disinformation on both social media and mainstream media platforms has become an urgent concern, attracting the interest of various stakeholders such as government bodies and social media companies. The challenge intensifies when dealing with understudied languages like Arabic. In this paper, we outline our approach for detecting persuasion techniques in Arabic tweets and news article paragraphs. We submitted our system to ArAIEval 2023 Shared Task 1, covering both subtasks. Our main contributions include utilizing GPT-3 to discern tone and potential persuasion techniques in text, exploring various base language models, and employing a multi-task learning approach for the specified subtasks. | [
"Shukla, Utsav",
"Vyas, Manan",
"Tiwari, Shailendra"
] | Raphael at ArAIEval Shared Task: Understanding Persuasive Language and Tone, an LLM Approach | arabicnlp-1.60 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.61.bib | https://aclanthology.org/2023.arabicnlp-1.61/ | @inproceedings{ojo-etal-2023-legend,
title = "Legend at {A}r{AIE}val Shared Task: Persuasion Technique Detection using a Language-Agnostic Text Representation Model",
author = "Ojo, Olumide and
Adebanji, Olaronke and
Calvo, Hiram and
Dieke, Damian and
Ojo, Olumuyiwa and
Akinsanya, Seye and
Abiola, Tolulope and
Feldman, Anna",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.61",
doi = "10.18653/v1/2023.arabicnlp-1.61",
pages = "594--599",
abstract = "In this paper, we share our best performing submission to the Arabic AI Tasks Evaluation Challenge (ArAIEval) at ArabicNLP 2023. Our focus was on Task 1, which involves identifying persuasion techniques in excerpts from tweets and news articles. The persuasion technique in Arabic texts was detected using a training loop with XLM-RoBERTa, a language-agnostic text representation model. This approach proved to be potent, leveraging fine-tuning of a multilingual language model. In our evaluation of the test set, we achieved a micro F1 score of 0.64 for subtask A of the competition.",
}
| In this paper, we share our best performing submission to the Arabic AI Tasks Evaluation Challenge (ArAIEval) at ArabicNLP 2023. Our focus was on Task 1, which involves identifying persuasion techniques in excerpts from tweets and news articles. The persuasion technique in Arabic texts was detected using a training loop with XLM-RoBERTa, a language-agnostic text representation model. This approach proved to be potent, leveraging fine-tuning of a multilingual language model. In our evaluation of the test set, we achieved a micro F1 score of 0.64 for subtask A of the competition. | [
"Ojo, Olumide",
"Adebanji, Olaronke",
"Calvo, Hiram",
"Dieke, Damian",
"Ojo, Olumuyiwa",
"Akinsanya, Seye",
"Abiola, Tolulope",
"Feldman, Anna"
] | Legend at ArAIEval Shared Task: Persuasion Technique Detection using a Language-Agnostic Text Representation Model | arabicnlp-1.61 | 2310.09661 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.arabicnlp-1.62.bib | https://aclanthology.org/2023.arabicnlp-1.62/ | @inproceedings{abdul-mageed-etal-2023-nadi,
title = "{NADI} 2023: The Fourth Nuanced {A}rabic Dialect Identification Shared Task",
author = "Abdul-Mageed, Muhammad and
Elmadany, AbdelRahim and
Zhang, Chiyu and
Nagoudi, El Moatez Billah and
Bouamor, Houda and
Habash, Nizar",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.62",
doi = "10.18653/v1/2023.arabicnlp-1.62",
pages = "600--613",
abstract = "We describe the findings of the fourth Nuanced Arabic Dialect Identification Shared Task (NADI 2023). The objective of NADI is to help advance state-of-the-art Arabic NLP by creating opportunities for teams of researchers to collaboratively compete under standardized conditions. It does so with a focus on Arabic dialects, offering novel datasets and defining subtasks that allow for meaningful comparisons between different approaches. NADI 2023 targeted both dialect identification (Subtask1) and dialect-to-MSA machine translation (Subtask 2 and Subtask 3). A total of 58 unique teams registered for the shared task, of whom 18 teams have participated (with 76 valid submissions during test phase). Among these, 16 teams participated in Subtask 1, 5 participated in Subtask 2, and 3 participated in Subtask 3. The winning teams achieved 87.27 F1 on Subtask 1, 14.76 Bleu in Subtask 2, and 21.10 Bleu in Subtask 3, respectively. Results show that all three subtasks remain challenging, thereby motivating future work in this area. We describe the methods employed by the participating teams and briefly offer an outlook for NADI.",
}
| We describe the findings of the fourth Nuanced Arabic Dialect Identification Shared Task (NADI 2023). The objective of NADI is to help advance state-of-the-art Arabic NLP by creating opportunities for teams of researchers to collaboratively compete under standardized conditions. It does so with a focus on Arabic dialects, offering novel datasets and defining subtasks that allow for meaningful comparisons between different approaches. NADI 2023 targeted both dialect identification (Subtask1) and dialect-to-MSA machine translation (Subtask 2 and Subtask 3). A total of 58 unique teams registered for the shared task, of whom 18 teams have participated (with 76 valid submissions during test phase). Among these, 16 teams participated in Subtask 1, 5 participated in Subtask 2, and 3 participated in Subtask 3. The winning teams achieved 87.27 F1 on Subtask 1, 14.76 Bleu in Subtask 2, and 21.10 Bleu in Subtask 3, respectively. Results show that all three subtasks remain challenging, thereby motivating future work in this area. We describe the methods employed by the participating teams and briefly offer an outlook for NADI. | [
"Abdul-Mageed, Muhammad",
"Elmadany, AbdelRahim",
"Zhang, Chiyu",
"Nagoudi, El Moatez Billah",
"Bouamor, Houda",
"Habash, Nizar"
] | NADI 2023: The Fourth Nuanced Arabic Dialect Identification Shared Task | arabicnlp-1.62 | 2310.16117 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.arabicnlp-1.63.bib | https://aclanthology.org/2023.arabicnlp-1.63/ | @inproceedings{veeramani-etal-2023-dialectnlu,
title = "{D}ialect{NLU} at {NADI} 2023 Shared Task: Transformer Based Multitask Approach Jointly Integrating Dialect and Machine Translation Tasks in {A}rabic",
author = "Veeramani, Hariram and
Thapa, Surendrabikram and
Naseem, Usman",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.63",
doi = "10.18653/v1/2023.arabicnlp-1.63",
pages = "614--619",
abstract = "With approximately 400 million speakers worldwide, Arabic ranks as the fifth most-spoken language globally, necessitating advancements in natural language processing. This paper addresses this need by presenting a system description of the approaches employed for the subtasks outlined in the Nuanced Arabic Dialect Identification (NADI) task at EMNLP 2023. For the first subtask, involving closed country-level dialect identification classification, we employ an ensemble of two Arabic language models. Similarly, for the second subtask, focused on closed dialect to Modern Standard Arabic (MSA) machine translation, our approach combines sequence-to-sequence models, all trained on an Arabic-specific dataset. Our team ranks 10th and 3rd on subtask 1 and subtask 2 respectively.",
}
| With approximately 400 million speakers worldwide, Arabic ranks as the fifth most-spoken language globally, necessitating advancements in natural language processing. This paper addresses this need by presenting a system description of the approaches employed for the subtasks outlined in the Nuanced Arabic Dialect Identification (NADI) task at EMNLP 2023. For the first subtask, involving closed country-level dialect identification classification, we employ an ensemble of two Arabic language models. Similarly, for the second subtask, focused on closed dialect to Modern Standard Arabic (MSA) machine translation, our approach combines sequence-to-sequence models, all trained on an Arabic-specific dataset. Our team ranks 10th and 3rd on subtask 1 and subtask 2 respectively. | [
"Veeramani, Hariram",
"Thapa, Surendrabikram",
"Naseem, Usman"
] | DialectNLU at NADI 2023 Shared Task: Transformer Based Multitask Approach Jointly Integrating Dialect and Machine Translation Tasks in Arabic | arabicnlp-1.63 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.64.bib | https://aclanthology.org/2023.arabicnlp-1.64/ | @inproceedings{nwesri-etal-2023-uot,
title = "{U}o{T} at {NADI} 2023 shared task: Automatic {A}rabic Dialect Identification is Made Possible",
author = "Nwesri, Abduslam F A and
Shinbir, Nabila A S and
Ebrahem, Hassan",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.64",
doi = "10.18653/v1/2023.arabicnlp-1.64",
pages = "620--624",
abstract = "In this paper we present our approach towards Arabic Dialect identification which was part of the The Fourth Nuanced Arabic Dialect Identification Shared Task (NADI 2023). We tested several techniques to identify Arabic dialects. We obtained the best result by fine-tuning the pre-trained MARBERTv2 model with a modified training dataset. The training set was expanded by sorting tweets based on dialects, concatenating every two adjacent tweets, and adding them to the original dataset as new tweets. We achieved 82.87 on F1 score and we were at the seventh position among 16 participants.",
}
| In this paper we present our approach towards Arabic Dialect identification which was part of the The Fourth Nuanced Arabic Dialect Identification Shared Task (NADI 2023). We tested several techniques to identify Arabic dialects. We obtained the best result by fine-tuning the pre-trained MARBERTv2 model with a modified training dataset. The training set was expanded by sorting tweets based on dialects, concatenating every two adjacent tweets, and adding them to the original dataset as new tweets. We achieved 82.87 on F1 score and we were at the seventh position among 16 participants. | [
"Nwesri, Abduslam F A",
"Shinbir, Nabila A S",
"Ebrahem, Hassan"
] | UoT at NADI 2023 shared task: Automatic Arabic Dialect Identification is Made Possible | arabicnlp-1.64 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.65.bib | https://aclanthology.org/2023.arabicnlp-1.65/ | @inproceedings{almarwani-aloufi-2023-sana,
title = "{SANA} at {NADI} 2023 shared task: Ensemble of Layer-Wise {BERT}-based models for Dialectal {A}rabic Identification",
author = "Almarwani, Nada and
Aloufi, Samah",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.65",
doi = "10.18653/v1/2023.arabicnlp-1.65",
pages = "625--630",
abstract = "Our system, submitted to the Nuanced Arabic Dialect Identification (NADI-23), tackles the first sub-task: Closed Country-level dialect identification. In this work, we propose a model that is based on an ensemble of layer-wise fine-tuned BERT-based models. The proposed model ranked fourth out of sixteen submissions, with an F1-macro score of 85.43.",
}
| Our system, submitted to the Nuanced Arabic Dialect Identification (NADI-23), tackles the first sub-task: Closed Country-level dialect identification. In this work, we propose a model that is based on an ensemble of layer-wise fine-tuned BERT-based models. The proposed model ranked fourth out of sixteen submissions, with an F1-macro score of 85.43. | [
"Almarwani, Nada",
"Aloufi, Samah"
] | SANA at NADI 2023 shared task: Ensemble of Layer-Wise BERT-based models for Dialectal Arabic Identification | arabicnlp-1.65 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.66.bib | https://aclanthology.org/2023.arabicnlp-1.66/ | @inproceedings{adel-elmadany-2023-isl,
title = "{ISL}-{AAST} at {NADI} 2023 shared task: Enhancing {A}rabic Dialect Identification in the Era of Globalization and Technological Progress",
author = "Adel, Shorouk and
Elmadany, Noureldin",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.66",
doi = "10.18653/v1/2023.arabicnlp-1.66",
pages = "631--636",
abstract = "Arabic dialects have extensive global usage owing to their significance and the vast number of Arabic speakers. However, technological progress and globalization are leading to significant transformations within Arabic dialects. They are acquiring new characteristics involving novel vocabulary and integrating of linguistic elements from diverse dialects. Consequently, sentiment analysis of these dialects is becoming more challenging. This study categorizes dialects among 18 countries, as introduced by the Nuanced Arabic Dialect Identification (NADI) shared task competition. Our approach incorporates the utilization of the MARABERT and MARABERT v2 models with a range of methodologies, including a feature extraction process. Our findings reveal that the most effective model is achieved by applying averaging and concatenation to the hidden layers of MARABERT v2, followed by feeding the resulting output into convolutional layers. Furthermore, employing the ensemble method on various methods enhances the model{'}s performance. Our system secures the 6th position among the top performers in the First subtask, achieving an F1 score of 83.73{\%}.",
}
| Arabic dialects have extensive global usage owing to their significance and the vast number of Arabic speakers. However, technological progress and globalization are leading to significant transformations within Arabic dialects. They are acquiring new characteristics involving novel vocabulary and integrating of linguistic elements from diverse dialects. Consequently, sentiment analysis of these dialects is becoming more challenging. This study categorizes dialects among 18 countries, as introduced by the Nuanced Arabic Dialect Identification (NADI) shared task competition. Our approach incorporates the utilization of the MARABERT and MARABERT v2 models with a range of methodologies, including a feature extraction process. Our findings reveal that the most effective model is achieved by applying averaging and concatenation to the hidden layers of MARABERT v2, followed by feeding the resulting output into convolutional layers. Furthermore, employing the ensemble method on various methods enhances the model{'}s performance. Our system secures the 6th position among the top performers in the First subtask, achieving an F1 score of 83.73{\%}. | [
"Adel, Shorouk",
"Elmadany, Noureldin"
] | ISL-AAST at NADI 2023 shared task: Enhancing Arabic Dialect Identification in the Era of Globalization and Technological Progress | arabicnlp-1.66 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.67.bib | https://aclanthology.org/2023.arabicnlp-1.67/ | @inproceedings{azizov-etal-2023-frank-nadi,
title = "Frank at {NADI} 2023 Shared Task: Trio-Based Ensemble Approach for {A}rabic Dialect Identification",
author = "Azizov, Dilshod and
Li, Jiyong and
Liang, Shangsong",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.67",
doi = "10.18653/v1/2023.arabicnlp-1.67",
pages = "637--641",
abstract = "We present our system designed for Subtask 1 in the shared task NADI on Arabic Dialect Identification, which is part of ArabicNLP 2023. In our approach, we utilized models such as: MARBERT, MARBERTv2 (A) and MARBERTv2 (B). Subsequently, we created a majority voting ensemble of these models. We used MARBERTv2 with different hyperparameters, which significantly improved the overall performance of the ensemble model. In terms of performance, our systems achieved a competitive an F1 score of \textbf{84.76}. Overall, our system secured the $5^\text{th}$ position out of 16 participating teams.",
}
| We present our system designed for Subtask 1 in the shared task NADI on Arabic Dialect Identification, which is part of ArabicNLP 2023. In our approach, we utilized models such as: MARBERT, MARBERTv2 (A) and MARBERTv2 (B). Subsequently, we created a majority voting ensemble of these models. We used MARBERTv2 with different hyperparameters, which significantly improved the overall performance of the ensemble model. In terms of performance, our systems achieved a competitive an F1 score of \textbf{84.76}. Overall, our system secured the $5^\text{th}$ position out of 16 participating teams. | [
"Azizov, Dilshod",
"Li, Jiyong",
"Liang, Shangsong"
] | Frank at NADI 2023 Shared Task: Trio-Based Ensemble Approach for Arabic Dialect Identification | arabicnlp-1.67 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.68.bib | https://aclanthology.org/2023.arabicnlp-1.68/ | @inproceedings{elkaref-etal-2023-nlpeople-nadi,
title = "{NLP}eople at {NADI} 2023 Shared Task: {A}rabic Dialect Identification with Augmented Context and Multi-Stage Tuning",
author = "Elkaref, Mohab and
Moses, Movina and
Tanaka, Shinnosuke and
Barry, James and
Mel, Geeth",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.68",
doi = "10.18653/v1/2023.arabicnlp-1.68",
pages = "642--646",
abstract = "This paper presents the approach of the NLPeople team to the Nuanced Arabic Dialect Identification (NADI) 2023 shared task. Subtask 1 involves identifying the dialect of a source text at the country level. Our approach to Subtask 1 makes use of language-specific language models, a clustering and retrieval method to provide additional context to a target sentence, a fine-tuning strategy which makes use of the provided data from the 2020 and 2021 shared tasks, and finally, ensembling over the predictions of multiple models. Our submission achieves a macro-averaged F1 score of 87.27, ranking 1st among the other participants in the task.",
}
| This paper presents the approach of the NLPeople team to the Nuanced Arabic Dialect Identification (NADI) 2023 shared task. Subtask 1 involves identifying the dialect of a source text at the country level. Our approach to Subtask 1 makes use of language-specific language models, a clustering and retrieval method to provide additional context to a target sentence, a fine-tuning strategy which makes use of the provided data from the 2020 and 2021 shared tasks, and finally, ensembling over the predictions of multiple models. Our submission achieves a macro-averaged F1 score of 87.27, ranking 1st among the other participants in the task. | [
"Elkaref, Mohab",
"Moses, Movina",
"Tanaka, Shinnosuke",
"Barry, James",
"Mel, Geeth"
] | NLPeople at NADI 2023 Shared Task: Arabic Dialect Identification with Augmented Context and Multi-Stage Tuning | arabicnlp-1.68 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.69.bib | https://aclanthology.org/2023.arabicnlp-1.69/ | @inproceedings{lichouri-etal-2023-usthb-nadi,
title = "{USTHB} at {NADI} 2023 shared task: Exploring Preprocessing and Feature Engineering Strategies for {A}rabic Dialect Identification",
author = "Lichouri, Mohamed and
Lounnas, Khaled and
Zitouni, Aicha and
Latrache, Houda and
Djeradi, Rachida",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.69",
doi = "10.18653/v1/2023.arabicnlp-1.69",
pages = "647--651",
abstract = "In this paper, we conduct an in-depth analysis of several key factors influencing the performance of Arabic Dialect Identification NADI{'}2023, with a specific focus on the first subtask involving country-level dialect identification. Our investigation encompasses the effects of surface preprocessing, morphological preprocessing, FastText vector model, and the weighted concatenation of TF-IDF features. For classification purposes, we employ the Linear Support Vector Classification (LSVC) model. During the evaluation phase, our system demonstrates noteworthy results, achieving an F$_1$ score of 62.51{\%}. This achievement closely aligns with the average F$_1$ scores attained by other systems submitted for the first subtask, which stands at 72.91{\%}.",
}
| In this paper, we conduct an in-depth analysis of several key factors influencing the performance of Arabic Dialect Identification NADI{'}2023, with a specific focus on the first subtask involving country-level dialect identification. Our investigation encompasses the effects of surface preprocessing, morphological preprocessing, FastText vector model, and the weighted concatenation of TF-IDF features. For classification purposes, we employ the Linear Support Vector Classification (LSVC) model. During the evaluation phase, our system demonstrates noteworthy results, achieving an F$_1$ score of 62.51{\%}. This achievement closely aligns with the average F$_1$ scores attained by other systems submitted for the first subtask, which stands at 72.91{\%}. | [
"Lichouri, Mohamed",
"Lounnas, Khaled",
"Zitouni, Aicha",
"Latrache, Houda",
"Djeradi, Rachida"
] | USTHB at NADI 2023 shared task: Exploring Preprocessing and Feature Engineering Strategies for Arabic Dialect Identification | arabicnlp-1.69 | 2312.10536 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.arabicnlp-1.70.bib | https://aclanthology.org/2023.arabicnlp-1.70/ | @inproceedings{abdel-salam-2023-rematchka-nadi,
title = "rematchka at {NADI} 2023 shared task: Parameter Efficient tuning for Dialect Identification and Dialect Machine Translation",
author = "Abdel-Salam, Reem",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.70",
doi = "10.18653/v1/2023.arabicnlp-1.70",
pages = "652--657",
abstract = "Dialect identification systems play a significant role in various fields and applications as in speech and language technologies, facilitating language education, supporting sociolinguistic research, preserving linguistic diversity, enhancing text-to-speech systems. In this paper, we provide our findings and results in NADI 2023 shared task for country-level dialect identification and machine translation (MT) from dialect to MSA. The proposed models achieved an F1-score of 86.18 at the dialect identification task, securing second place in first subtask. Whereas for the machine translation task, the submitted model achieved a BLEU score of 11.37 securing fourth and third place in second and third subtask. The proposed model utilizes parameter efficient training methods which achieves better performance when compared to conventional fine-tuning during the experimentation phase.",
}
| Dialect identification systems play a significant role in various fields and applications as in speech and language technologies, facilitating language education, supporting sociolinguistic research, preserving linguistic diversity, enhancing text-to-speech systems. In this paper, we provide our findings and results in NADI 2023 shared task for country-level dialect identification and machine translation (MT) from dialect to MSA. The proposed models achieved an F1-score of 86.18 at the dialect identification task, securing second place in first subtask. Whereas for the machine translation task, the submitted model achieved a BLEU score of 11.37 securing fourth and third place in second and third subtask. The proposed model utilizes parameter efficient training methods which achieves better performance when compared to conventional fine-tuning during the experimentation phase. | [
"Abdel-Salam, Reem"
] | rematchka at NADI 2023 shared task: Parameter Efficient tuning for Dialect Identification and Dialect Machine Translation | arabicnlp-1.70 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.71.bib | https://aclanthology.org/2023.arabicnlp-1.71/ | @inproceedings{khered-etal-2023-unimanc,
title = "{U}ni{M}anc at {NADI} 2023 Shared Task: A Comparison of Various T5-based Models for Translating {A}rabic Dialectical Text to {M}odern {S}tandard {A}rabic",
author = "Khered, Abdullah and
Abdelhalim, Ingy and
Abdelhalim, Nadine and
Soliman, Ahmed and
Batista-Navarro, Riza",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.71",
doi = "10.18653/v1/2023.arabicnlp-1.71",
pages = "658--664",
abstract = "This paper presents the methods we developed for the Nuanced Arabic Dialect Identification (NADI) 2023 shared task, specifically targeting the two subtasks focussed on sentence-level machine translation (MT) of text written in any of four Arabic dialects (Egyptian, Emirati, Jordanian and Palestinian) to Modern Standard Arabic (MSA). Our team, UniManc, employed models based on T5: multilingual T5 (mT5), multi-task fine-tuned mT5 (mT0) and AraT5. These models were trained based on two configurations: joint model training for all regional dialects (J-R) and independent model training for every regional dialect (I-R). Based on the results of the official NADI 2023 evaluation, our I-R AraT5 model obtained an overall BLEU score of 14.76, ranking first in the Closed Dialect-to-MSA MT subtask. Moreover, in the Open Dialect-to-MSA MT subtask, our J-R AraT5 model also ranked first, obtaining an overall BLEU score of 21.10.",
}
| This paper presents the methods we developed for the Nuanced Arabic Dialect Identification (NADI) 2023 shared task, specifically targeting the two subtasks focussed on sentence-level machine translation (MT) of text written in any of four Arabic dialects (Egyptian, Emirati, Jordanian and Palestinian) to Modern Standard Arabic (MSA). Our team, UniManc, employed models based on T5: multilingual T5 (mT5), multi-task fine-tuned mT5 (mT0) and AraT5. These models were trained based on two configurations: joint model training for all regional dialects (J-R) and independent model training for every regional dialect (I-R). Based on the results of the official NADI 2023 evaluation, our I-R AraT5 model obtained an overall BLEU score of 14.76, ranking first in the Closed Dialect-to-MSA MT subtask. Moreover, in the Open Dialect-to-MSA MT subtask, our J-R AraT5 model also ranked first, obtaining an overall BLEU score of 21.10. | [
"Khered, Abdullah",
"Abdelhalim, Ingy",
"Abdelhalim, Nadine",
"Soliman, Ahmed",
"Batista-Navarro, Riza"
] | UniManc at NADI 2023 Shared Task: A Comparison of Various T5-based Models for Translating Arabic Dialectical Text to Modern Standard Arabic | arabicnlp-1.71 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.72.bib | https://aclanthology.org/2023.arabicnlp-1.72/ | @inproceedings{hatekar-abdo-2023-iunadi,
title = "{IUNADI} at {NADI} 2023 shared task: Country-level {A}rabic Dialect Classification in Tweets for the Shared Task {NADI} 2023",
author = "Hatekar, Yash and
Abdo, Muhammad",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.72",
doi = "10.18653/v1/2023.arabicnlp-1.72",
pages = "665--669",
abstract = "In this paper, we describe our participation in the NADI2023 shared task for the classification of Arabic dialects in tweets. For training, evaluation, and testing purposes, a primary dataset comprising tweets from 18 Arab countries is provided, along with three older datasets. The main objective is to develop a model capable of classifying tweets from these 18 countries. We outline our approach, which leverages various machine learning models. Our experiments demonstrate that large language models, particularly Arabertv2-Large, Arabertv2-Base, and CAMeLBERT-Mix DID MADAR, consistently outperform traditional methods such as SVM, XGBOOST, Multinomial Naive Bayes, AdaBoost, and Random Forests.",
}
| In this paper, we describe our participation in the NADI2023 shared task for the classification of Arabic dialects in tweets. For training, evaluation, and testing purposes, a primary dataset comprising tweets from 18 Arab countries is provided, along with three older datasets. The main objective is to develop a model capable of classifying tweets from these 18 countries. We outline our approach, which leverages various machine learning models. Our experiments demonstrate that large language models, particularly Arabertv2-Large, Arabertv2-Base, and CAMeLBERT-Mix DID MADAR, consistently outperform traditional methods such as SVM, XGBOOST, Multinomial Naive Bayes, AdaBoost, and Random Forests. | [
"Hatekar, Yash",
"Abdo, Muhammad"
] | IUNADI at NADI 2023 shared task: Country-level Arabic Dialect Classification in Tweets for the Shared Task NADI 2023 | arabicnlp-1.72 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.73.bib | https://aclanthology.org/2023.arabicnlp-1.73/ | @inproceedings{scherrer-etal-2023-helsinki,
title = "The {H}elsinki-{NLP} Submissions at {NADI} 2023 Shared Task: Walking the Baseline",
author = "Scherrer, Yves and
Mileti{\'c}, Aleksandra and
Kuparinen, Olli",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.73",
doi = "10.18653/v1/2023.arabicnlp-1.73",
pages = "670--677",
abstract = "The Helsinki-NLP team participated in the NADI 2023 shared tasks on Arabic dialect translation with seven submissions. We used statistical (SMT) and neural machine translation (NMT) methods and explored character- and subword-based data preprocessing. Our submissions placed second in both tracks. In the open track, our winning submission is a character-level SMT system with additional Modern Standard Arabic language models. In the closed track, our best BLEU scores were obtained with the leave-as-is baseline, a simple copy of the input, and narrowly followed by SMT systems. In both tracks, fine-tuning existing multilingual models such as AraT5 or ByT5 did not yield superior performance compared to SMT.",
}
| The Helsinki-NLP team participated in the NADI 2023 shared tasks on Arabic dialect translation with seven submissions. We used statistical (SMT) and neural machine translation (NMT) methods and explored character- and subword-based data preprocessing. Our submissions placed second in both tracks. In the open track, our winning submission is a character-level SMT system with additional Modern Standard Arabic language models. In the closed track, our best BLEU scores were obtained with the leave-as-is baseline, a simple copy of the input, and narrowly followed by SMT systems. In both tracks, fine-tuning existing multilingual models such as AraT5 or ByT5 did not yield superior performance compared to SMT. | [
"Scherrer, Yves",
"Mileti{\\'c}, Aleks",
"ra",
"Kuparinen, Olli"
] | The Helsinki-NLP Submissions at NADI 2023 Shared Task: Walking the Baseline | arabicnlp-1.73 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.74.bib | https://aclanthology.org/2023.arabicnlp-1.74/ | @inproceedings{deshpande-etal-2023-mavericks,
title = "Mavericks at {NADI} 2023 Shared Task: Unravelling Regional Nuances through Dialect Identification using Transformer-based Approach",
author = "Deshpande, Vedant and
Patwardhan, Yash and
Deshpande, Kshitij and
Mangalvedhekar, Sudeep and
Murumkar, Ravindra",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.74",
doi = "10.18653/v1/2023.arabicnlp-1.74",
pages = "678--682",
abstract = "In this paper, we present our approach for the {``}Nuanced Arabic Dialect Identification (NADI) Shared Task 2023{''}. We highlight our methodology for subtask 1 which deals with country-level dialect identification. Recognizing dialects plays an instrumental role in enhancing the performance of various downstream NLP tasks such as speech recognition and translation. The task uses the Twitter dataset (TWT-2023) that encompasses 18 dialects for the multi-class classification problem. Numerous transformer-based models, pre-trained on Arabic language, are employed for identifying country-level dialects. We fine-tune these state-of-the-art models on the provided dataset. Ensembling method is leveraged to yield improved performance of the system. We achieved an F1-score of 76.65 (11th rank on leaderboard) on the test dataset.",
}
| In this paper, we present our approach for the {``}Nuanced Arabic Dialect Identification (NADI) Shared Task 2023{''}. We highlight our methodology for subtask 1 which deals with country-level dialect identification. Recognizing dialects plays an instrumental role in enhancing the performance of various downstream NLP tasks such as speech recognition and translation. The task uses the Twitter dataset (TWT-2023) that encompasses 18 dialects for the multi-class classification problem. Numerous transformer-based models, pre-trained on Arabic language, are employed for identifying country-level dialects. We fine-tune these state-of-the-art models on the provided dataset. Ensembling method is leveraged to yield improved performance of the system. We achieved an F1-score of 76.65 (11th rank on leaderboard) on the test dataset. | [
"Deshp",
"e, Vedant",
"Patwardhan, Yash",
"Deshp",
"e, Kshitij",
"Mangalvedhekar, Sudeep",
"Murumkar, Ravindra"
] | Mavericks at NADI 2023 Shared Task: Unravelling Regional Nuances through Dialect Identification using Transformer-based Approach | arabicnlp-1.74 | 2311.18739 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.arabicnlp-1.75.bib | https://aclanthology.org/2023.arabicnlp-1.75/ | @inproceedings{derouich-etal-2023-anlp,
title = "{ANLP}-{RG} at {NADI} 2023 shared task: Machine Translation of {A}rabic Dialects: A Comparative Study of Transformer Models",
author = "Derouich, Wiem and
Kchaou, Sameh and
Boujelbane, Rahma",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.75",
doi = "10.18653/v1/2023.arabicnlp-1.75",
pages = "683--689",
abstract = "In this paper, we present our findings within the context of the NADI-2023 Shared Task (Subtask 2). Our task involves developing a translation model from the Palestinian, Jordanian, Emirati, and Egyptian dialects to Modern Standard Arabic (MSA) using the MADAR parallel corpus, even though it lacks a parallel subset for the Emirati dialect. To address this challenge, we conducted a comparative analysis, evaluating the fine-tuning results of various transformer models using the MADAR corpus as a learning resource. Additionally, we assessed the effectiveness of existing translation tools in achieving our translation objectives. The best model achieved a BLEU score of 11.14{\%} on the dev set and 10.02 on the test set.",
}
| In this paper, we present our findings within the context of the NADI-2023 Shared Task (Subtask 2). Our task involves developing a translation model from the Palestinian, Jordanian, Emirati, and Egyptian dialects to Modern Standard Arabic (MSA) using the MADAR parallel corpus, even though it lacks a parallel subset for the Emirati dialect. To address this challenge, we conducted a comparative analysis, evaluating the fine-tuning results of various transformer models using the MADAR corpus as a learning resource. Additionally, we assessed the effectiveness of existing translation tools in achieving our translation objectives. The best model achieved a BLEU score of 11.14{\%} on the dev set and 10.02 on the test set. | [
"Derouich, Wiem",
"Kchaou, Sameh",
"Boujelbane, Rahma"
] | ANLP-RG at NADI 2023 shared task: Machine Translation of Arabic Dialects: A Comparative Study of Transformer Models | arabicnlp-1.75 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.76.bib | https://aclanthology.org/2023.arabicnlp-1.76/ | @inproceedings{malhas-etal-2023-quran,
title = "Qur{'}an {QA} 2023 Shared Task: Overview of Passage Retrieval and Reading Comprehension Tasks over the Holy Qur{'}an",
author = "Malhas, Rana and
Mansour, Watheq and
Elsayed, Tamer",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.76",
doi = "10.18653/v1/2023.arabicnlp-1.76",
pages = "690--701",
abstract = "Motivated by the need for intelligent question answering (QA) systems on the Holy Qur{'}an and the success of the first Qur{'}an Question Answering shared task (Qur{'}an QA 2022 at OSACT 2022), we have organized the second version at ArabicNLP 2023. The Qur{'}an QA 2023 is composed of two sub-tasks: the passage retrieval (PR) task and the machine reading comprehension (MRC) task. The main aim of the shared task is to encourage state-of-the-art research on Arabic PR and MRC on the Holy Qur{'}an. Our shared task has attracted 9 teams to submit 22 runs for the PR task, and 6 teams to submit 17 runs for the MRC task. In this paper, we present an overview of the task and provide an outline of the approaches employed by the participating teams in both sub-tasks.",
}
| Motivated by the need for intelligent question answering (QA) systems on the Holy Qur{'}an and the success of the first Qur{'}an Question Answering shared task (Qur{'}an QA 2022 at OSACT 2022), we have organized the second version at ArabicNLP 2023. The Qur{'}an QA 2023 is composed of two sub-tasks: the passage retrieval (PR) task and the machine reading comprehension (MRC) task. The main aim of the shared task is to encourage state-of-the-art research on Arabic PR and MRC on the Holy Qur{'}an. Our shared task has attracted 9 teams to submit 22 runs for the PR task, and 6 teams to submit 17 runs for the MRC task. In this paper, we present an overview of the task and provide an outline of the approaches employed by the participating teams in both sub-tasks. | [
"Malhas, Rana",
"Mansour, Watheq",
"Elsayed, Tamer"
] | Qur'an QA 2023 Shared Task: Overview of Passage Retrieval and Reading Comprehension Tasks over the Holy Qur'an | arabicnlp-1.76 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.77.bib | https://aclanthology.org/2023.arabicnlp-1.77/ | @inproceedings{alawwad-etal-2023-ahjl,
title = "{AHJL} at Qur{'}an {QA} 2023 Shared Task: Enhancing Passage Retrieval using Sentence Transformer and Translation",
author = "Alawwad, Hessa and
Alawwad, Lujain and
Alharbi, Jamilah and
Alharbi, Abdullah",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.77",
doi = "10.18653/v1/2023.arabicnlp-1.77",
pages = "702--707",
abstract = "The Holy Qur{'}an is central to Islam, influencing around two billion Muslims globally, and is known for its linguistic richness and complexity. This article discusses our involvement in the PR task (Task A) of the Qur{'}an QA 2023 Shared Task. We used two models: one employing the Sentence Transformer and the other using OpenAI{'}s embeddings for document retrieval. Both models, equipped with a translation feature, help interpret and understand Arabic language queries by translating them, executing the search, and then reverting the results to Arabic. Our results show that incorporating translation functionalities improves the performance in Arabic Question-Answering systems. The model with translation enhancement performed notably better in all metrics compared to the non-translation model.",
}
| The Holy Qur{'}an is central to Islam, influencing around two billion Muslims globally, and is known for its linguistic richness and complexity. This article discusses our involvement in the PR task (Task A) of the Qur{'}an QA 2023 Shared Task. We used two models: one employing the Sentence Transformer and the other using OpenAI{'}s embeddings for document retrieval. Both models, equipped with a translation feature, help interpret and understand Arabic language queries by translating them, executing the search, and then reverting the results to Arabic. Our results show that incorporating translation functionalities improves the performance in Arabic Question-Answering systems. The model with translation enhancement performed notably better in all metrics compared to the non-translation model. | [
"Alawwad, Hessa",
"Alawwad, Lujain",
"Alharbi, Jamilah",
"Alharbi, Abdullah"
] | AHJL at Qur'an QA 2023 Shared Task: Enhancing Passage Retrieval using Sentence Transformer and Translation | arabicnlp-1.77 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.78.bib | https://aclanthology.org/2023.arabicnlp-1.78/ | @inproceedings{veeramani-etal-2023-lowrescontextqa,
title = "{L}ow{R}es{C}ontext{QA} at Qur{'}an {QA} 2023 Shared Task: Temporal and Sequential Representation Augmented Question Answering Span Detection in {A}rabic",
author = "Veeramani, Hariram and
Thapa, Surendrabikram and
Naseem, Usman",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.78",
doi = "10.18653/v1/2023.arabicnlp-1.78",
pages = "708--713",
abstract = "The Qur{'}an holds immense theological and historical significance, and developing a technology-driven solution for answering questions from this sacred text is of paramount importance. This paper presents our approach to task B of Qur{'}an QA 2023, part of EMNLP 2023, addressing this challenge by proposing a robust method for extracting answers from Qur{'}anic passages. Leveraging the Qur{'}anic Reading Comprehension Dataset (QRCD) v1.2, we employ innovative techniques and advanced models to improve the precision and contextuality of answers derived from Qur{'}anic passages. Our methodology encompasses the utilization of start and end logits, Long Short-Term Memory (LSTM) networks, and fusion mechanisms, contributing to the ongoing dialogue at the intersection of technology and spirituality.",
}
| The Qur{'}an holds immense theological and historical significance, and developing a technology-driven solution for answering questions from this sacred text is of paramount importance. This paper presents our approach to task B of Qur{'}an QA 2023, part of EMNLP 2023, addressing this challenge by proposing a robust method for extracting answers from Qur{'}anic passages. Leveraging the Qur{'}anic Reading Comprehension Dataset (QRCD) v1.2, we employ innovative techniques and advanced models to improve the precision and contextuality of answers derived from Qur{'}anic passages. Our methodology encompasses the utilization of start and end logits, Long Short-Term Memory (LSTM) networks, and fusion mechanisms, contributing to the ongoing dialogue at the intersection of technology and spirituality. | [
"Veeramani, Hariram",
"Thapa, Surendrabikram",
"Naseem, Usman"
] | LowResContextQA at Qur'an QA 2023 Shared Task: Temporal and Sequential Representation Augmented Question Answering Span Detection in Arabic | arabicnlp-1.78 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.79.bib | https://aclanthology.org/2023.arabicnlp-1.79/ | @inproceedings{mahmoudi-etal-2023-gym,
title = "{GYM} at Qur{'}an {QA} 2023 Shared Task: Multi-Task Transfer Learning for {Q}uranic Passage Retrieval and Question Answering with Large Language Models",
author = "Mahmoudi, Ghazaleh and
Morshedzadeh, Yeganeh and
Eetemadi, Sauleh",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.79",
doi = "10.18653/v1/2023.arabicnlp-1.79",
pages = "714--719",
abstract = "This work addresses the challenges of question answering for vintage texts like the Quran. It introduces two tasks: passage retrieval and reading comprehension. For passage retrieval, it employs unsupervised fine-tuning sentence encoders and supervised multi-task learning. In reading comprehension, it fine-tunes an Electra-based model, demonstrating significant improvements over baseline models. Our best AraElectra model achieves 46.1{\%} partial Average Precision (pAP) on the unseen test set, outperforming the baseline by 23{\%}.",
}
| This work addresses the challenges of question answering for vintage texts like the Quran. It introduces two tasks: passage retrieval and reading comprehension. For passage retrieval, it employs unsupervised fine-tuning sentence encoders and supervised multi-task learning. In reading comprehension, it fine-tunes an Electra-based model, demonstrating significant improvements over baseline models. Our best AraElectra model achieves 46.1{\%} partial Average Precision (pAP) on the unseen test set, outperforming the baseline by 23{\%}. | [
"Mahmoudi, Ghazaleh",
"Morshedzadeh, Yeganeh",
"Eetemadi, Sauleh"
] | GYM at Qur'an QA 2023 Shared Task: Multi-Task Transfer Learning for Quranic Passage Retrieval and Question Answering with Large Language Models | arabicnlp-1.79 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.80.bib | https://aclanthology.org/2023.arabicnlp-1.80/ | @inproceedings{alnefaie-etal-2023-lkau23,
title = "{LKAU}23 at Qur{'}an {QA} 2023: Using Transformer Models for Retrieving Passages and Finding Answers to Questions from the Qur{'}an",
author = "Alnefaie, Sarah and
Alsaleh, Abdullah and
Atwell, Eric and
Alsalka, Mohammad and
Altahhan, Abdulrahman",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.80",
doi = "10.18653/v1/2023.arabicnlp-1.80",
pages = "720--727",
abstract = "The Qur{'}an QA 2023 shared task has two sub tasks: Passage Retrieval (PR) task and Machine Reading Comprehension (MRC) task. Our participation in the PR task was to further train several Arabic pre-trained models using a Sentence-Transformers architecture and to ensemble the best performing models. The results of the test set did not reflect the results of the development set. CL-AraBERT achieved the best results, with a 0.124 MAP. We also participate in the MRC task by further fine-tuning the base and large variants of AraBERT using Classical Arabic and Modern Standard Arabic datasets. Base AraBERT achieved the best result with the development set with a partial average precision (pAP) of 0.49, while it achieved 0.5 with the test set. In addition, we applied the ensemble approach of best performing models and post-processing steps to the final results. Our experiments with the development set showed that our proposed model achieved a 0.537 pAP. On the test set, our system obtained a pAP score of 0.49.",
}
| The Qur{'}an QA 2023 shared task has two sub tasks: Passage Retrieval (PR) task and Machine Reading Comprehension (MRC) task. Our participation in the PR task was to further train several Arabic pre-trained models using a Sentence-Transformers architecture and to ensemble the best performing models. The results of the test set did not reflect the results of the development set. CL-AraBERT achieved the best results, with a 0.124 MAP. We also participate in the MRC task by further fine-tuning the base and large variants of AraBERT using Classical Arabic and Modern Standard Arabic datasets. Base AraBERT achieved the best result with the development set with a partial average precision (pAP) of 0.49, while it achieved 0.5 with the test set. In addition, we applied the ensemble approach of best performing models and post-processing steps to the final results. Our experiments with the development set showed that our proposed model achieved a 0.537 pAP. On the test set, our system obtained a pAP score of 0.49. | [
"Alnefaie, Sarah",
"Alsaleh, Abdullah",
"Atwell, Eric",
"Alsalka, Mohammad",
"Altahhan, Abdulrahman"
] | LKAU23 at Qur'an QA 2023: Using Transformer Models for Retrieving Passages and Finding Answers to Questions from the Qur'an | arabicnlp-1.80 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.81.bib | https://aclanthology.org/2023.arabicnlp-1.81/ | @inproceedings{elkomy-sarhan-2023-tce,
title = "{TCE} at Qur{'}an {QA} 2023 Shared Task: Low Resource Enhanced Transformer-based Ensemble Approach for Qur{'}anic {QA}",
author = "Elkomy, Mohammed and
Sarhan, Amany",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.81",
doi = "10.18653/v1/2023.arabicnlp-1.81",
pages = "728--742",
abstract = "In this paper, we present our approach to tackle Qur{'}an QA 2023 shared tasks A and B. To address the challenge of low-resourced training data, we rely on transfer learning together with a voting ensemble to improve prediction stability across multiple runs. Additionally, we employ different architectures and learning mechanisms for a range of Arabic pre-trained transformer-based models for both tasks. To identify unanswerable questions, we propose using a thresholding mechanism. Our top-performing systems greatly surpass the baseline performance on the hidden split, achieving a MAP score of 25.05{\%} for task A and a partial Average Precision (pAP) of 57.11{\%} for task B.",
}
| In this paper, we present our approach to tackle Qur{'}an QA 2023 shared tasks A and B. To address the challenge of low-resourced training data, we rely on transfer learning together with a voting ensemble to improve prediction stability across multiple runs. Additionally, we employ different architectures and learning mechanisms for a range of Arabic pre-trained transformer-based models for both tasks. To identify unanswerable questions, we propose using a thresholding mechanism. Our top-performing systems greatly surpass the baseline performance on the hidden split, achieving a MAP score of 25.05{\%} for task A and a partial Average Precision (pAP) of 57.11{\%} for task B. | [
"Elkomy, Mohammed",
"Sarhan, Amany"
] | TCE at Qur'an QA 2023 Shared Task: Low Resource Enhanced Transformer-based Ensemble Approach for Qur'anic QA | arabicnlp-1.81 | 2401.13060 | [
"https://github.com/mohammed-elkomy/quran-qa"
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.arabicnlp-1.82.bib | https://aclanthology.org/2023.arabicnlp-1.82/ | @inproceedings{zekiye-amroush-2023-al,
title = "Al-Jawaab at Qur{'}an {QA} 2023 Shared Task: Exploring Embeddings and {GPT} Models for Passage Retrieval and Reading Comprehension",
author = "Zekiye, Abdulrezzak and
Amroush, Fadi",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.82",
doi = "10.18653/v1/2023.arabicnlp-1.82",
pages = "743--747",
abstract = "This paper introduces a comprehensive system designed to address two natural language processing tasks: Passage Retrieval (Task A) and Reading Comprehension (Task B), applied to datasets related to the Holy Qur{'}an. Task A was treated as a measurement of a textual similarity problem where the system leverages OpenAI{'}s {``}text-embedding-ada-002{''} embedding model to transform textual content into numerical representations, with cosine similarity serving as the proximity metric. Task B focuses on the extraction of answers from Qur{'}anic passages, employing the Generative Pre-trained Transformer-4 (GPT-4) language model. In Task A, the system is evaluated using the Mean Average Precision (MAP) metric, achieving MAP scores of 0.109438 and 0.06426543057 on the development and test datasets with an optimal similarity threshold set at 0.85. Task B evaluation employs partial Average Precision (pAP), where our system surpasses a baseline whole-passage retriever with pAP scores of 0.470 and 0.5393130538 on the development and test datasets, respectively.",
}
| This paper introduces a comprehensive system designed to address two natural language processing tasks: Passage Retrieval (Task A) and Reading Comprehension (Task B), applied to datasets related to the Holy Qur{'}an. Task A was treated as a measurement of a textual similarity problem where the system leverages OpenAI{'}s {``}text-embedding-ada-002{''} embedding model to transform textual content into numerical representations, with cosine similarity serving as the proximity metric. Task B focuses on the extraction of answers from Qur{'}anic passages, employing the Generative Pre-trained Transformer-4 (GPT-4) language model. In Task A, the system is evaluated using the Mean Average Precision (MAP) metric, achieving MAP scores of 0.109438 and 0.06426543057 on the development and test datasets with an optimal similarity threshold set at 0.85. Task B evaluation employs partial Average Precision (pAP), where our system surpasses a baseline whole-passage retriever with pAP scores of 0.470 and 0.5393130538 on the development and test datasets, respectively. | [
"Zekiye, Abdulrezzak",
"Amroush, Fadi"
] | Al-Jawaab at Qur'an QA 2023 Shared Task: Exploring Embeddings and GPT Models for Passage Retrieval and Reading Comprehension | arabicnlp-1.82 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.83.bib | https://aclanthology.org/2023.arabicnlp-1.83/ | @inproceedings{jarrar-etal-2023-wojoodner,
title = "{W}ojood{NER} 2023: The First {A}rabic Named Entity Recognition Shared Task",
author = "Jarrar, Mustafa and
Abdul-Mageed, Muhammad and
Khalilia, Mohammed and
Talafha, Bashar and
Elmadany, AbdelRahim and
Hamad, Nagham and
Omar, Alaa{'}",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.83",
doi = "10.18653/v1/2023.arabicnlp-1.83",
pages = "748--758",
abstract = "We present WojoodNER-2023, the first Arabic Named Entity Recognition (NER) Shared Task. The primary focus of WojoodNER 2023 is on Arabic NER, offering a novel NER datasets (i.e., Wojood) and the definition of subtasks designed to facilitate meaningful comparisons between different NER approaches. WojoodNER-2023 encompassed two Subtasks: FlatNER and NestedNER. A total of 45 unique teams registered for this shared task, with 11 of them actively participating in the test phase. Specifically, 11 teams participated in FlatNER, while 8 teams tackled NestedNER. The winning team achieved F1 score of 91.96 and 93.73 in FlatNER and NestedNER respectively.",
}
| We present WojoodNER-2023, the first Arabic Named Entity Recognition (NER) Shared Task. The primary focus of WojoodNER 2023 is on Arabic NER, offering a novel NER datasets (i.e., Wojood) and the definition of subtasks designed to facilitate meaningful comparisons between different NER approaches. WojoodNER-2023 encompassed two Subtasks: FlatNER and NestedNER. A total of 45 unique teams registered for this shared task, with 11 of them actively participating in the test phase. Specifically, 11 teams participated in FlatNER, while 8 teams tackled NestedNER. The winning team achieved F1 score of 91.96 and 93.73 in FlatNER and NestedNER respectively. | [
"Jarrar, Mustafa",
"Abdul-Mageed, Muhammad",
"Khalilia, Mohammed",
"Talafha, Bashar",
"Elmadany, AbdelRahim",
"Hamad, Nagham",
"Omar, Alaa{'}"
] | WojoodNER 2023: The First Arabic Named Entity Recognition Shared Task | arabicnlp-1.83 | 2310.16153 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.arabicnlp-1.84.bib | https://aclanthology.org/2023.arabicnlp-1.84/ | @inproceedings{laouirine-etal-2023-elyadata,
title = "{ELYADATA} at {W}ojood{NER} Shared Task: Data and Model-centric Approaches for {A}rabic Flat and Nested {NER}",
author = "Laouirine, Imen and
Elleuch, Haroun and
Bougares, Fethi",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.84",
doi = "10.18653/v1/2023.arabicnlp-1.84",
pages = "759--764",
abstract = "This paper describes our submissions to the WojoodNER shared task organized during the first ArabicNLP conference. We participated in the two proposed sub-tasks of flat and nested Named Entity Recognition (NER). Our systems were ranked first over eight and third over eleven in the Nested NER and Flat NER, respectively. All our primary submissions are based on DiffusionNER models (Shen et al., 2023), where the NER task is formulated as a boundary-denoising diffusion process. Experiments on nested WojoodNER achieves the best results with a micro F1-score of 93.73{\%}. For the flat sub-task, our primary system was the third-best system, with a micro F1-score of 91.92{\%}.",
}
| This paper describes our submissions to the WojoodNER shared task organized during the first ArabicNLP conference. We participated in the two proposed sub-tasks of flat and nested Named Entity Recognition (NER). Our systems were ranked first over eight and third over eleven in the Nested NER and Flat NER, respectively. All our primary submissions are based on DiffusionNER models (Shen et al., 2023), where the NER task is formulated as a boundary-denoising diffusion process. Experiments on nested WojoodNER achieves the best results with a micro F1-score of 93.73{\%}. For the flat sub-task, our primary system was the third-best system, with a micro F1-score of 91.92{\%}. | [
"Laouirine, Imen",
"Elleuch, Haroun",
"Bougares, Fethi"
] | ELYADATA at WojoodNER Shared Task: Data and Model-centric Approaches for Arabic Flat and Nested NER | arabicnlp-1.84 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.85.bib | https://aclanthology.org/2023.arabicnlp-1.85/ | @inproceedings{li-etal-2023-lotus,
title = "Lotus at {W}ojood{NER} Shared Task: Multilingual Transformers: Unveiling Flat and Nested Entity Recognition",
author = "Li, Jiyong and
Azizov, Dilshod and
AlQuabeh, Hilal and
Liang, Shangsong",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.85",
doi = "10.18653/v1/2023.arabicnlp-1.85",
pages = "765--770",
abstract = "We introduce our systems developed for two subtasks in the shared task {``}Wojood{''} on Arabic NER detection, part of ArabicNLP 2023. For Subtask 1, we employ the XLM-R model to predict Flat NER labels for given tokens using a single classifier capable of categorizing all labels. For Subtask 2, we use the XLM-R encoder by building 21 individual classifiers. Each classifier corresponds to a specific label and is designed to determine the presence of its respective label. In terms of performance, our systems achieved competitive \textit{micro-F1} scores of \textbf{0.83} for Subtask 1 and \textbf{0.76} for Subtask 2, according to the leaderboard scores.",
}
| We introduce our systems developed for two subtasks in the shared task {``}Wojood{''} on Arabic NER detection, part of ArabicNLP 2023. For Subtask 1, we employ the XLM-R model to predict Flat NER labels for given tokens using a single classifier capable of categorizing all labels. For Subtask 2, we use the XLM-R encoder by building 21 individual classifiers. Each classifier corresponds to a specific label and is designed to determine the presence of its respective label. In terms of performance, our systems achieved competitive \textit{micro-F1} scores of \textbf{0.83} for Subtask 1 and \textbf{0.76} for Subtask 2, according to the leaderboard scores. | [
"Li, Jiyong",
"Azizov, Dilshod",
"AlQuabeh, Hilal",
"Liang, Shangsong"
] | Lotus at WojoodNER Shared Task: Multilingual Transformers: Unveiling Flat and Nested Entity Recognition | arabicnlp-1.85 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.86.bib | https://aclanthology.org/2023.arabicnlp-1.86/ | @inproceedings{elkordi-etal-2023-alexu,
title = "{A}lex{U}-{AIC} at {W}ojood{NER} shared task: Sequence Labeling vs {MRC} and {SWA} for {A}rabic Named Entity Recognition",
author = "Elkordi, Shereen and
Adly, Noha and
Torki, Marwan",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.86",
doi = "10.18653/v1/2023.arabicnlp-1.86",
pages = "771--776",
abstract = "Named entity recognition (NER) is one of many challenging tasks in Arabic Natural Language Processing. It is also the base of many critical downstream tasks to help understand the source of major trends and public opinion. In this paper, we will describe our submission in the NER Shared Task of ArabicNLP 2023. We used a simple machine reading comprehension-based technique in the Flat NER Subtask ranking eighth on the leaderboard, while we fine-tuned a language model for the Nested NER Subtask ranking third on the leaderboard.",
}
| Named entity recognition (NER) is one of many challenging tasks in Arabic Natural Language Processing. It is also the base of many critical downstream tasks to help understand the source of major trends and public opinion. In this paper, we will describe our submission in the NER Shared Task of ArabicNLP 2023. We used a simple machine reading comprehension-based technique in the Flat NER Subtask ranking eighth on the leaderboard, while we fine-tuned a language model for the Nested NER Subtask ranking third on the leaderboard. | [
"Elkordi, Shereen",
"Adly, Noha",
"Torki, Marwan"
] | AlexU-AIC at WojoodNER shared task: Sequence Labeling vs MRC and SWA for Arabic Named Entity Recognition | arabicnlp-1.86 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.87.bib | https://aclanthology.org/2023.arabicnlp-1.87/ | @inproceedings{mahdaouy-etal-2023-um6p,
title = "{UM}6{P} {\&} {UL} at {W}ojood{NER} shared task: Improving Multi-Task Learning for Flat and Nested {A}rabic Named Entity Recognition",
author = "El Mahdaouy, Abdelkader and
Lamsiyah, Salima and
Alami, Hamza and
Schommer, Christoph and
Berrada, Ismail",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.87",
doi = "10.18653/v1/2023.arabicnlp-1.87",
pages = "777--782",
abstract = "In this paper, we present our submitted system for the WojoodNER Shared Task, addressing both flat and nested Arabic Named Entity Recognition (NER). Our system is based on a BERT-based multi-task learning model that leverages the existing Arabic Pretrained Language Models (PLMs) to encode the input sentences. To enhance the performance of our model, we have employed a multi-task loss variance penalty and combined several training objectives, including the Cross-Entropy loss, the Dice loss, the Tversky loss, and the Focal loss. Besides, we have studied the performance of three existing Arabic PLMs for sentence encoding. On the official test set, our system has obtained a micro-F1 score of 0.9113 and 0.9303 for Flat (Sub-Task 1) and Nested (Sub-Task 2) NER, respectively. It has been ranked in the 6th and the 2nd positions among all participating systems in Sub-Task 1 and Sub-Task 2, respectively.",
}
| In this paper, we present our submitted system for the WojoodNER Shared Task, addressing both flat and nested Arabic Named Entity Recognition (NER). Our system is based on a BERT-based multi-task learning model that leverages the existing Arabic Pretrained Language Models (PLMs) to encode the input sentences. To enhance the performance of our model, we have employed a multi-task loss variance penalty and combined several training objectives, including the Cross-Entropy loss, the Dice loss, the Tversky loss, and the Focal loss. Besides, we have studied the performance of three existing Arabic PLMs for sentence encoding. On the official test set, our system has obtained a micro-F1 score of 0.9113 and 0.9303 for Flat (Sub-Task 1) and Nested (Sub-Task 2) NER, respectively. It has been ranked in the 6th and the 2nd positions among all participating systems in Sub-Task 1 and Sub-Task 2, respectively. | [
"El Mahdaouy, Abdelkader",
"Lamsiyah, Salima",
"Alami, Hamza",
"Schommer, Christoph",
"Berrada, Ismail"
] | UM6P & UL at WojoodNER shared task: Improving Multi-Task Learning for Flat and Nested Arabic Named Entity Recognition | arabicnlp-1.87 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.88.bib | https://aclanthology.org/2023.arabicnlp-1.88/ | @inproceedings{ehsan-etal-2023-alphabrains-wojoodner,
title = "{A}lpha{B}rains at {W}ojood{NER} shared task: {A}rabic Named Entity Recognition by Using Character-based Context-Sensitive Word Representations",
author = "Ehsan, Toqeer and
Ali, Amjad and
Al-Fuqaha, Ala",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.88",
doi = "10.18653/v1/2023.arabicnlp-1.88",
pages = "783--788",
abstract = "This paper presents Arabic named entity recognition models by employing the single-task and the multi-task learning paradigms. The models have been developed using character-based contextualized Embeddings from Language Model (ELMo) in the input layers of the bidirectional long-short term memory networks. The ELMo embeddings are quite capable of learning the morphology and contextual information of the tokens in word sequences. The single-task learning models outperformed the multi-task learning models and achieved micro F1-scores of 0.8751 and 0.8884 for the flat and nested annotations, respectively.",
}
| This paper presents Arabic named entity recognition models by employing the single-task and the multi-task learning paradigms. The models have been developed using character-based contextualized Embeddings from Language Model (ELMo) in the input layers of the bidirectional long-short term memory networks. The ELMo embeddings are quite capable of learning the morphology and contextual information of the tokens in word sequences. The single-task learning models outperformed the multi-task learning models and achieved micro F1-scores of 0.8751 and 0.8884 for the flat and nested annotations, respectively. | [
"Ehsan, Toqeer",
"Ali, Amjad",
"Al-Fuqaha, Ala"
] | AlphaBrains at WojoodNER shared task: Arabic Named Entity Recognition by Using Character-based Context-Sensitive Word Representations | arabicnlp-1.88 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.89.bib | https://aclanthology.org/2023.arabicnlp-1.89/ | @inproceedings{elkhbir-etal-2023-lipn,
title = "{LIPN} at {W}ojood{NER} shared task: A Span-Based Approach for Flat and Nested {A}rabic Named Entity Recognition",
author = "El Elkhbir, Niama and
Zaratiana, Urchade and
Tomeh, Nadi and
Charnois, Thierry",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.89",
doi = "10.18653/v1/2023.arabicnlp-1.89",
pages = "789--796",
abstract = "The Wojood Named Entity Recognition (NER) shared task introduces a comprehensive Arabic NER dataset encompassing both flat and nested entity tasks, addressing the challenge of limited Arabic resources. In this paper, we present our team \textbf{LIPN} approach to addressing the two subtasks of WojoodNER SharedTask. We frame NER as a span classification problem. We employ a pretrained language model for token representations and neural network classifiers. We use global decoding for flat NER and a greedy strategy for nested NER. Our model secured the first position in flat NER and the fourth position in nested NER during the competition, with an F-score of 91.96 and 92.45 respectively. Our code is publicly available (\url{https://github.com/niamaelkhbir/LIPN-at-WojoodSharedTask}).",
}
| The Wojood Named Entity Recognition (NER) shared task introduces a comprehensive Arabic NER dataset encompassing both flat and nested entity tasks, addressing the challenge of limited Arabic resources. In this paper, we present our team \textbf{LIPN} approach to addressing the two subtasks of WojoodNER SharedTask. We frame NER as a span classification problem. We employ a pretrained language model for token representations and neural network classifiers. We use global decoding for flat NER and a greedy strategy for nested NER. Our model secured the first position in flat NER and the fourth position in nested NER during the competition, with an F-score of 91.96 and 92.45 respectively. Our code is publicly available (\url{https://github.com/niamaelkhbir/LIPN-at-WojoodSharedTask}). | [
"El Elkhbir, Niama",
"Zaratiana, Urchade",
"Tomeh, Nadi",
"Charnois, Thierry"
] | LIPN at WojoodNER shared task: A Span-Based Approach for Flat and Nested Arabic Named Entity Recognition | arabicnlp-1.89 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.90.bib | https://aclanthology.org/2023.arabicnlp-1.90/ | @inproceedings{hussein-etal-2023-alex,
title = "{A}lex-{U} 2023 {NLP} at {W}ojood{NER} shared task: {A}ra{BINDER} (Bi-encoder for {A}rabic Named Entity Recognition)",
author = "Hussein, Mariam and
Khaled, Sarah and
Torki, Marwan and
El-Makky, Nagwa",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.90",
doi = "10.18653/v1/2023.arabicnlp-1.90",
pages = "797--802",
abstract = "Named Entity Recognition (NER) is a crucial task in natural language processing that facilitates the extraction of vital information from text. However, NER for Arabic presents a significant challenge due to the language{'}s unique characteristics. In this paper, we introduce AraBINDER, our submission to the Wojood NER Shared Task 2023 (ArabicNLP 2023). The shared task comprises two sub-tasks: sub-task 1 focuses on Flat NER, while sub-task 2 centers on Nested NER. We have participated in both sub-tasks. The Bi-Encoder has proven its efficiency for NER in English. We employ AraBINDER (Arabic Bi-Encoder for Named Entity Recognition), which uses the power of two transformer encoders and employs contrastive learning to map candidate text spans and entity types into the same vector representation space. This approach frames NER as a representation learning problem that maximizes the similarity between the vector representations of an entity mention and its type. Our experiments reveal that AraBINDER achieves a micro F-1 score of 0.918 for Flat NER and 0.9 for Nested NER on the Wojood dataset.",
}
| Named Entity Recognition (NER) is a crucial task in natural language processing that facilitates the extraction of vital information from text. However, NER for Arabic presents a significant challenge due to the language{'}s unique characteristics. In this paper, we introduce AraBINDER, our submission to the Wojood NER Shared Task 2023 (ArabicNLP 2023). The shared task comprises two sub-tasks: sub-task 1 focuses on Flat NER, while sub-task 2 centers on Nested NER. We have participated in both sub-tasks. The Bi-Encoder has proven its efficiency for NER in English. We employ AraBINDER (Arabic Bi-Encoder for Named Entity Recognition), which uses the power of two transformer encoders and employs contrastive learning to map candidate text spans and entity types into the same vector representation space. This approach frames NER as a representation learning problem that maximizes the similarity between the vector representations of an entity mention and its type. Our experiments reveal that AraBINDER achieves a micro F-1 score of 0.918 for Flat NER and 0.9 for Nested NER on the Wojood dataset. | [
"Hussein, Mariam",
"Khaled, Sarah",
"Torki, Marwan",
"El-Makky, Nagwa"
] | Alex-U 2023 NLP at WojoodNER shared task: AraBINDER (Bi-encoder for Arabic Named Entity Recognition) | arabicnlp-1.90 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.arabicnlp-1.91.bib | https://aclanthology.org/2023.arabicnlp-1.91/ | @inproceedings{elkaref-elkaref-2023-el,
title = "El-Kawaref at {W}ojood{NER} shared task: {S}taged{NER} for {A}rabic Named Entity Recognition",
author = "Elkaref, Nehal and
Elkaref, Mohab",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.arabicnlp-1.91",
doi = "10.18653/v1/2023.arabicnlp-1.91",
pages = "803--808",
abstract = "Named Entity Recognition (NER) is the task of identifying word-units that correspond to mentions as location, organization, person, or currency. In this shared task we tackle flat-entity classification for Arabic, where for each word-unit a single entity should be identified. To resolve the classification problem we propose StagedNER a novel technique to fine-tuning NER downstream tasks that divides the learning process of a transformer-model into two phases, where a model is tasked to learn sequence tags and then entity tags rather than learn both together simultaneously for an input sequence. We create an ensemble of two base models using this method that yield a score of on the development set and an F1 performance of 90.03{\%} on the validation set and 91.95{\%} on the test set.",
}
| Named Entity Recognition (NER) is the task of identifying word-units that correspond to mentions as location, organization, person, or currency. In this shared task we tackle flat-entity classification for Arabic, where for each word-unit a single entity should be identified. To resolve the classification problem we propose StagedNER a novel technique to fine-tuning NER downstream tasks that divides the learning process of a transformer-model into two phases, where a model is tasked to learn sequence tags and then entity tags rather than learn both together simultaneously for an input sequence. We create an ensemble of two base models using this method that yield a score of on the development set and an F1 performance of 90.03{\%} on the validation set and 91.95{\%} on the test set. | [
"Elkaref, Nehal",
"Elkaref, Mohab"
] | El-Kawaref at WojoodNER shared task: StagedNER for Arabic Named Entity Recognition | arabicnlp-1.91 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.1.bib | https://aclanthology.org/2023.argmining-1.1/ | @inproceedings{ruiz-dolz-lawrence-2023-detecting,
title = "Detecting Argumentative Fallacies in the Wild: Problems and Limitations of Large Language Models",
author = "Ruiz-Dolz, Ramon and
Lawrence, John",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.1",
doi = "10.18653/v1/2023.argmining-1.1",
pages = "1--10",
abstract = "Previous work on the automatic identification of fallacies in natural language text has typically approached the problem in constrained experimental setups that make it difficult to understand the applicability and usefulness of the proposals in the real world. In this paper, we present the first analysis of the limitations that these data-driven approaches could show in real situations. For that purpose, we first create a validation corpus consisting of natural language argumentation schemes. Second, we provide new empirical results to the emerging task of identifying fallacies in natural language text. Third, we analyse the errors observed outside of the testing data domains considering the new validation corpus. Finally, we point out some important limitations observed in our analysis that should be taken into account in future research in this topic. Specifically, if we want to deploy these systems in the Wild.",
}
| Previous work on the automatic identification of fallacies in natural language text has typically approached the problem in constrained experimental setups that make it difficult to understand the applicability and usefulness of the proposals in the real world. In this paper, we present the first analysis of the limitations that these data-driven approaches could show in real situations. For that purpose, we first create a validation corpus consisting of natural language argumentation schemes. Second, we provide new empirical results to the emerging task of identifying fallacies in natural language text. Third, we analyse the errors observed outside of the testing data domains considering the new validation corpus. Finally, we point out some important limitations observed in our analysis that should be taken into account in future research in this topic. Specifically, if we want to deploy these systems in the Wild. | [
"Ruiz-Dolz, Ramon",
"Lawrence, John"
] | Detecting Argumentative Fallacies in the Wild: Problems and Limitations of Large Language Models | argmining-1.1 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.2.bib | https://aclanthology.org/2023.argmining-1.2/ | @inproceedings{stodden-etal-2023-using,
title = "Using Masked Language Model Probabilities of Connectives for Stance Detection in {E}nglish Discourse",
author = "Stodden, Regina and
Kallmeyer, Laura and
Kawaletz, Lea and
Dorgeloh, Heidrun",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.2",
doi = "10.18653/v1/2023.argmining-1.2",
pages = "11--18",
abstract = "This paper introduces an approach which operationalizes the role of discourse connectives for detecting argument stance. Specifically, the study investigates the utility of masked language model probabilities of discourse connectives inserted between a claim and a premise that supports or attacks it. The research focuses on a range of connectives known to signal support or attack, such as because, but, so, or although. By employing a LightGBM classifier, the study reveals promising results in stance detection in English discourse. While the proposed system does not aim to outperform state-of-the-art architectures, the classification accuracy is surprisingly high, highlighting the potential of these features to enhance argument mining tasks, including stance detection.",
}
| This paper introduces an approach which operationalizes the role of discourse connectives for detecting argument stance. Specifically, the study investigates the utility of masked language model probabilities of discourse connectives inserted between a claim and a premise that supports or attacks it. The research focuses on a range of connectives known to signal support or attack, such as because, but, so, or although. By employing a LightGBM classifier, the study reveals promising results in stance detection in English discourse. While the proposed system does not aim to outperform state-of-the-art architectures, the classification accuracy is surprisingly high, highlighting the potential of these features to enhance argument mining tasks, including stance detection. | [
"Stodden, Regina",
"Kallmeyer, Laura",
"Kawaletz, Lea",
"Dorgeloh, Heidrun"
] | Using Masked Language Model Probabilities of Connectives for Stance Detection in English Discourse | argmining-1.2 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.3.bib | https://aclanthology.org/2023.argmining-1.3/ | @inproceedings{guerraoui-etal-2023-teach,
title = "Teach Me How to Argue: A Survey on {NLP} Feedback Systems in Argumentation",
author = "Guerraoui, Camelia and
Reisert, Paul and
Inoue, Naoya and
Mim, Farjana Sultana and
Singh, Keshav and
Choi, Jungmin and
Robbani, Irfan and
Naito, Shoichi and
Wang, Wenzhi and
Inui, Kentaro",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.3",
doi = "10.18653/v1/2023.argmining-1.3",
pages = "19--34",
abstract = "The use of argumentation in education has shown improvement in students{'} critical thinking skills, and computational models for argumentation have been developed to further assist this process. Although these models are useful for evaluating the quality of an argument, they often cannot explain why a particular argument score was predicted, i.e., why the argument is good or bad, which makes it difficult to provide constructive feedback to users, e.g., students, so that they can strengthen their critical thinking skills. In this survey, we explore current NLP feedback systems by categorizing each into four important dimensions of feedback (Richness, Visualization, Interactivity and Personalization). We discuss limitations for each dimension and provide suggestions to enhance the power of feedback and explanations to ultimately improve user critical thinking skills.",
}
| The use of argumentation in education has shown improvement in students{'} critical thinking skills, and computational models for argumentation have been developed to further assist this process. Although these models are useful for evaluating the quality of an argument, they often cannot explain why a particular argument score was predicted, i.e., why the argument is good or bad, which makes it difficult to provide constructive feedback to users, e.g., students, so that they can strengthen their critical thinking skills. In this survey, we explore current NLP feedback systems by categorizing each into four important dimensions of feedback (Richness, Visualization, Interactivity and Personalization). We discuss limitations for each dimension and provide suggestions to enhance the power of feedback and explanations to ultimately improve user critical thinking skills. | [
"Guerraoui, Camelia",
"Reisert, Paul",
"Inoue, Naoya",
"Mim, Farjana Sultana",
"Singh, Keshav",
"Choi, Jungmin",
"Robbani, Irfan",
"Naito, Shoichi",
"Wang, Wenzhi",
"Inui, Kentaro"
] | Teach Me How to Argue: A Survey on NLP Feedback Systems in Argumentation | argmining-1.3 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.4.bib | https://aclanthology.org/2023.argmining-1.4/ | @inproceedings{guilluy-etal-2023-constituency,
title = "Constituency Tree Representation for Argument Unit Recognition",
author = "Guilluy, Samuel and
Mehats, Florian and
Chouli, Billal",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.4",
doi = "10.18653/v1/2023.argmining-1.4",
pages = "35--44",
abstract = "The conventional method of extracting arguments from sentences solely relies on word proximity, disregarding the syntactic structure of the sentence. This approach often leads to inaccuracies, especially when identifying argumentative span boundaries. In this research, we investigate the benefits of utilizing a constituency tree representation of sentences to predict Argument Discourse Units (ADUs) at the token level. We first evaluate the effectiveness of utilizing the constituency tree representation for capturing the structural attributes of arguments within sentences. We demonstrate empirically that the constituency structure surpasses simple linear dependencies among neighboring words in terms of effectiveness. Our approach involves leveraging graph neural networks in conjunction with the constituency tree, adapting it specifically for argument unit recognition. Through extensive evaluation, our model outperforms existing approaches in recognizing argument units at the token level. Furthermore, we employ explainability methods to assess the suitability of our model architecture, providing insights into its performance.",
}
| The conventional method of extracting arguments from sentences solely relies on word proximity, disregarding the syntactic structure of the sentence. This approach often leads to inaccuracies, especially when identifying argumentative span boundaries. In this research, we investigate the benefits of utilizing a constituency tree representation of sentences to predict Argument Discourse Units (ADUs) at the token level. We first evaluate the effectiveness of utilizing the constituency tree representation for capturing the structural attributes of arguments within sentences. We demonstrate empirically that the constituency structure surpasses simple linear dependencies among neighboring words in terms of effectiveness. Our approach involves leveraging graph neural networks in conjunction with the constituency tree, adapting it specifically for argument unit recognition. Through extensive evaluation, our model outperforms existing approaches in recognizing argument units at the token level. Furthermore, we employ explainability methods to assess the suitability of our model architecture, providing insights into its performance. | [
"Guilluy, Samuel",
"Mehats, Florian",
"Chouli, Billal"
] | Constituency Tree Representation for Argument Unit Recognition | argmining-1.4 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.5.bib | https://aclanthology.org/2023.argmining-1.5/ | @inproceedings{reimer-etal-2023-stance,
title = "Stance-Aware Re-Ranking for Non-factual Comparative Queries",
author = {Reimer, Jan Heinrich and
Bondarenko, Alexander and
Fr{\"o}be, Maik and
Hagen, Matthias},
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.5",
doi = "10.18653/v1/2023.argmining-1.5",
pages = "45--51",
abstract = "We propose a re-ranking approach to improve the retrieval effectiveness for non-factual comparative queries like {`}Which city is better, London or Paris?{'} based on whether the results express a stance towards the comparison objects (London vs. Paris) or not. Applied to the 26 runs submitted to the Touch{\'e} 2022 task on comparative argument retrieval, our stance-aware re-ranking significantly improves the retrieval effectiveness for all runs when perfect oracle-style stance labels are available. With our most effective practical stance detector based on GPT-3.5 (Fâ of 0.49 on four stance classes), our re-ranking still improves the effectiveness for all runs but only six improvements are significant. Artificially {``}deteriorating{''} the oracle-style labels, we further find that an Fâ of 0.90 for stance detection is necessary to significantly improve the retrieval effectiveness for the best run via stance-aware re-ranking.",
}
| We propose a re-ranking approach to improve the retrieval effectiveness for non-factual comparative queries like {`}Which city is better, London or Paris?{'} based on whether the results express a stance towards the comparison objects (London vs. Paris) or not. Applied to the 26 runs submitted to the Touch{\'e} 2022 task on comparative argument retrieval, our stance-aware re-ranking significantly improves the retrieval effectiveness for all runs when perfect oracle-style stance labels are available. With our most effective practical stance detector based on GPT-3.5 (Fâ of 0.49 on four stance classes), our re-ranking still improves the effectiveness for all runs but only six improvements are significant. Artificially {``}deteriorating{''} the oracle-style labels, we further find that an Fâ of 0.90 for stance detection is necessary to significantly improve the retrieval effectiveness for the best run via stance-aware re-ranking. | [
"Reimer, Jan Heinrich",
"Bondarenko, Alex",
"er",
"Fr{\\\"o}be, Maik",
"Hagen, Matthias"
] | Stance-Aware Re-Ranking for Non-factual Comparative Queries | argmining-1.5 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.6.bib | https://aclanthology.org/2023.argmining-1.6/ | @inproceedings{ali-etal-2023-legal,
title = "Legal Argument Extraction from Court Judgements using Integer Linear Programming",
author = "Ali, Basit and
Pawar, Sachin and
Palshikar, Girish and
Sinha Banerjee, Anindita and
Singh, Dhirendra",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.6",
doi = "10.18653/v1/2023.argmining-1.6",
pages = "52--63",
abstract = "Legal arguments are one of the key aspects of legal knowledge which are expressed in various ways in the unstructured text of court judgements. A large database of past legal arguments can be created by extracting arguments from court judgements, categorizing them, and storing them in a structured format. Such a database would be useful for suggesting suitable arguments for any new case. In this paper, we focus on extracting arguments from Indian Supreme Court judgements using minimal supervision. We first identify a set of certain sentence-level argument markers which are useful for argument extraction such as whether a sentence contains a claim or not, whether a sentence is argumentative in nature, whether two sentences are part of the same argument, etc. We then model the legal argument extraction problem as a text segmentation problem where we combine multiple weak evidences in the form of argument markers using Integer Linear Programming (ILP), finally arriving at a global document-level solution giving the most optimal legal arguments. We demonstrate the effectiveness of our technique by comparing it against several competent baselines.",
}
| Legal arguments are one of the key aspects of legal knowledge which are expressed in various ways in the unstructured text of court judgements. A large database of past legal arguments can be created by extracting arguments from court judgements, categorizing them, and storing them in a structured format. Such a database would be useful for suggesting suitable arguments for any new case. In this paper, we focus on extracting arguments from Indian Supreme Court judgements using minimal supervision. We first identify a set of certain sentence-level argument markers which are useful for argument extraction such as whether a sentence contains a claim or not, whether a sentence is argumentative in nature, whether two sentences are part of the same argument, etc. We then model the legal argument extraction problem as a text segmentation problem where we combine multiple weak evidences in the form of argument markers using Integer Linear Programming (ILP), finally arriving at a global document-level solution giving the most optimal legal arguments. We demonstrate the effectiveness of our technique by comparing it against several competent baselines. | [
"Ali, Basit",
"Pawar, Sachin",
"Palshikar, Girish",
"Sinha Banerjee, Anindita",
"Singh, Dhirendra"
] | Legal Argument Extraction from Court Judgements using Integer Linear Programming | argmining-1.6 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.7.bib | https://aclanthology.org/2023.argmining-1.7/ | @inproceedings{kashefi-etal-2023-argument,
title = "Argument Detection in Student Essays under Resource Constraints",
author = "Kashefi, Omid and
Chan, Sophia and
Somasundaran, Swapna",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.7",
doi = "10.18653/v1/2023.argmining-1.7",
pages = "64--75",
abstract = "Learning to make effective arguments is vital for the development of critical-thinking in students and, hence, for their academic and career success. Detecting argument components is crucial for developing systems that assess students{'} ability to develop arguments. Traditionally, supervised learning has been used for this task, but this requires a large corpus of reliable training examples which are often impractical to obtain for student writing. Large language models have also been shown to be effective few-shot learners, making them suitable for low-resource argument detection. However, concerns such as latency, service reliability, and data privacy might hinder their practical applicability. To address these challenges, we present a low-resource classification approach that combines the intrinsic entailment relationship among the argument elements with a parameter-efficient prompt-tuning strategy. Experimental results demonstrate the effectiveness of our method in reducing the data and computation requirements of training an argument detection model without compromising the prediction accuracy. This suggests the practical applicability of our model across a variety of real-world settings, facilitating broader access to argument classification for researchers spanning various domains and problem scenarios.",
}
| Learning to make effective arguments is vital for the development of critical-thinking in students and, hence, for their academic and career success. Detecting argument components is crucial for developing systems that assess students{'} ability to develop arguments. Traditionally, supervised learning has been used for this task, but this requires a large corpus of reliable training examples which are often impractical to obtain for student writing. Large language models have also been shown to be effective few-shot learners, making them suitable for low-resource argument detection. However, concerns such as latency, service reliability, and data privacy might hinder their practical applicability. To address these challenges, we present a low-resource classification approach that combines the intrinsic entailment relationship among the argument elements with a parameter-efficient prompt-tuning strategy. Experimental results demonstrate the effectiveness of our method in reducing the data and computation requirements of training an argument detection model without compromising the prediction accuracy. This suggests the practical applicability of our model across a variety of real-world settings, facilitating broader access to argument classification for researchers spanning various domains and problem scenarios. | [
"Kashefi, Omid",
"Chan, Sophia",
"Somasundaran, Swapna"
] | Argument Detection in Student Essays under Resource Constraints | argmining-1.7 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.8.bib | https://aclanthology.org/2023.argmining-1.8/ | @inproceedings{schaefer-etal-2023-towards,
title = "Towards Fine-Grained Argumentation Strategy Analysis in Persuasive Essays",
author = "Schaefer, Robin and
Knaebel, Ren{\'e} and
Stede, Manfred",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.8",
doi = "10.18653/v1/2023.argmining-1.8",
pages = "76--88",
abstract = "We define an argumentation strategy as the set of rhetorical and stylistic means that authors employ to produce an effective, and often persuasive, text. First computational accounts of such strategies have been relatively coarse-grained, while in our work we aim to move to a more detailed analysis. We extend the annotations of the Argument Annotated Essays corpus (Stab and Gurevych, 2017) with specific types of claims and premises, propose a model for their automatic identification and show first results, and then we discuss usage patterns that emerge with respect to the essay structure, the {``}flows{''} of argument component types, the claim-premise constellations, the role of the essay prompt type, and that of the individual author.",
}
| We define an argumentation strategy as the set of rhetorical and stylistic means that authors employ to produce an effective, and often persuasive, text. First computational accounts of such strategies have been relatively coarse-grained, while in our work we aim to move to a more detailed analysis. We extend the annotations of the Argument Annotated Essays corpus (Stab and Gurevych, 2017) with specific types of claims and premises, propose a model for their automatic identification and show first results, and then we discuss usage patterns that emerge with respect to the essay structure, the {``}flows{''} of argument component types, the claim-premise constellations, the role of the essay prompt type, and that of the individual author. | [
"Schaefer, Robin",
"Knaebel, Ren{\\'e}",
"Stede, Manfred"
] | Towards Fine-Grained Argumentation Strategy Analysis in Persuasive Essays | argmining-1.8 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.9.bib | https://aclanthology.org/2023.argmining-1.9/ | @inproceedings{segura-tinoco-cantador-2023-dimensionality,
title = "Dimensionality Reduction for Machine Learning-based Argument Mining",
author = "Segura-Tinoco, Andr{\'e}s and
Cantador, Iv{\'a}n",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.9",
doi = "10.18653/v1/2023.argmining-1.9",
pages = "89--99",
abstract = "Recent approaches to argument mining have focused on training machine learning algorithms from annotated text corpora, utilizing as input high-dimensional linguistic feature vectors. Differently to previous work, in this paper, we preliminarily investigate the potential benefits of reducing the dimensionality of the input data. Through an empirical study, testing SVD, PCA and LDA techniques on a new argumentative corpus in Spanish for an underexplored domain (e-participation), and using a novel, rich argument model, we show positive results in terms of both computation efficiency and argumentative information extraction effectiveness, for the three major argument mining tasks: argumentative fragment detection, argument component classification, and argumentative relation recognition. On a space with dimension around 3-4{\%} of the number of input features, the argument mining methods are able to reach 95-97{\%} of the performance achieved by using the entire corpus, and even surpass it in some cases.",
}
| Recent approaches to argument mining have focused on training machine learning algorithms from annotated text corpora, utilizing as input high-dimensional linguistic feature vectors. Differently to previous work, in this paper, we preliminarily investigate the potential benefits of reducing the dimensionality of the input data. Through an empirical study, testing SVD, PCA and LDA techniques on a new argumentative corpus in Spanish for an underexplored domain (e-participation), and using a novel, rich argument model, we show positive results in terms of both computation efficiency and argumentative information extraction effectiveness, for the three major argument mining tasks: argumentative fragment detection, argument component classification, and argumentative relation recognition. On a space with dimension around 3-4{\%} of the number of input features, the argument mining methods are able to reach 95-97{\%} of the performance achieved by using the entire corpus, and even surpass it in some cases. | [
"Segura-Tinoco, Andr{\\'e}s",
"Cantador, Iv{\\'a}n"
] | Dimensionality Reduction for Machine Learning-based Argument Mining | argmining-1.9 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.10.bib | https://aclanthology.org/2023.argmining-1.10/ | @inproceedings{kikteva-etal-2023-impact,
title = "On the Impact of Reconstruction and Context for Argument Prediction in Natural Debate",
author = "Kikteva, Zlata and
Trautsch, Alexander and
Katzer, Patrick and
Oest, Mirko and
Herbold, Steffen and
Hautli-Janisz, Annette",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.10",
doi = "10.18653/v1/2023.argmining-1.10",
pages = "100--106",
abstract = "Debate naturalness ranges on a scale from small, highly structured, and topically focused settings to larger, more spontaneous and less constrained environments. The more unconstrained a debate, the more spontaneous speakers act: they build on contextual knowledge and use anaphora or ellipses to construct their arguments. They also use rhetorical devices such as questions and imperatives to support or attack claims. In this paper, we study how the reconstruction of the actual debate contributions, i.e., utterances which contain pronouns, ellipses and fuzzy language, into full-fledged propositions which are interpretable without context impacts the prediction of argument relations and investigate the effect of incorporating contextual information for the task. We work with highly complex spontaneous debates with more than 10 speakers on a wide variety of topics. We find that in contrast to our initial hypothesis, reconstruction does not improve predictions and context only improves them when used in combination with propositions.",
}
| Debate naturalness ranges on a scale from small, highly structured, and topically focused settings to larger, more spontaneous and less constrained environments. The more unconstrained a debate, the more spontaneous speakers act: they build on contextual knowledge and use anaphora or ellipses to construct their arguments. They also use rhetorical devices such as questions and imperatives to support or attack claims. In this paper, we study how the reconstruction of the actual debate contributions, i.e., utterances which contain pronouns, ellipses and fuzzy language, into full-fledged propositions which are interpretable without context impacts the prediction of argument relations and investigate the effect of incorporating contextual information for the task. We work with highly complex spontaneous debates with more than 10 speakers on a wide variety of topics. We find that in contrast to our initial hypothesis, reconstruction does not improve predictions and context only improves them when used in combination with propositions. | [
"Kikteva, Zlata",
"Trautsch, Alex",
"er",
"Katzer, Patrick",
"Oest, Mirko",
"Herbold, Steffen",
"Hautli-Janisz, Annette"
] | On the Impact of Reconstruction and Context for Argument Prediction in Natural Debate | argmining-1.10 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.11.bib | https://aclanthology.org/2023.argmining-1.11/ | @inproceedings{heinisch-etal-2023-unsupervised,
title = "Unsupervised argument reframing with a counterfactual-based approach",
author = "Heinisch, Philipp and
Mindlin, Dimitry and
Cimiano, Philipp",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.11",
doi = "10.18653/v1/2023.argmining-1.11",
pages = "107--119",
abstract = "Framing is an important mechanism in argumentation, as participants in a debate tend to emphasize those aspects or dimensions of the issue under debate that support their standpoint. The task of reframing an argument, that is changing the underlying framing, has received increasing attention recently. We propose a novel unsupervised approach to argument reframing that takes inspiration from counterfactual explanation generation approaches in the field of eXplainable AI (XAI). We formalize the task as a mask-and-replace approach in which an LLM is tasked to replace masked tokens associated with a set of frames to be eliminated by other tokens related to a set of target frames to be added. Our method relies on two key mechanisms: framed decoding and reranking based on a number of metrics similar to those used in XAI to search for a suitable counterfactual. We evaluate our approach on three topics using the dataset by Ruckdeschel and Wiedemann (2022). We show that our two key mechanisms outperform an unguided LLM as a baseline by increasing the ratio of successfully reframed arguments by almost an order of magnitude.",
}
| Framing is an important mechanism in argumentation, as participants in a debate tend to emphasize those aspects or dimensions of the issue under debate that support their standpoint. The task of reframing an argument, that is changing the underlying framing, has received increasing attention recently. We propose a novel unsupervised approach to argument reframing that takes inspiration from counterfactual explanation generation approaches in the field of eXplainable AI (XAI). We formalize the task as a mask-and-replace approach in which an LLM is tasked to replace masked tokens associated with a set of frames to be eliminated by other tokens related to a set of target frames to be added. Our method relies on two key mechanisms: framed decoding and reranking based on a number of metrics similar to those used in XAI to search for a suitable counterfactual. We evaluate our approach on three topics using the dataset by Ruckdeschel and Wiedemann (2022). We show that our two key mechanisms outperform an unguided LLM as a baseline by increasing the ratio of successfully reframed arguments by almost an order of magnitude. | [
"Heinisch, Philipp",
"Mindlin, Dimitry",
"Cimiano, Philipp"
] | Unsupervised argument reframing with a counterfactual-based approach | argmining-1.11 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.12.bib | https://aclanthology.org/2023.argmining-1.12/ | @inproceedings{liu-etal-2023-overview,
title = "Overview of {I}mage{A}rg-2023: The First Shared Task in Multimodal Argument Mining",
author = "Liu, Zhexiong and
Elaraby, Mohamed and
Zhong, Yang and
Litman, Diane",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.12",
doi = "10.18653/v1/2023.argmining-1.12",
pages = "120--132",
abstract = "This paper presents an overview of the ImageArg shared task, the first multimodal Argument Mining shared task co-located with the 10th Workshop on Argument Mining at EMNLP 2023. The shared task comprises two classification subtasks - (1) Subtask-A: Argument Stance Classification; (2) Subtask-B: Image Persuasiveness Classification. The former determines the stance of a tweet containing an image and a piece of text toward a controversial topic (e.g., gun control and abortion). The latter determines whether the image makes the tweet text more persuasive. The shared task received 31 submissions for Subtask-A and 21 submissions for Subtask-B from 9 different teams across 6 countries. The top submission in Subtask-A achieved an F1-score of 0.8647 while the best submission in Subtask-B achieved an F1-score of 0.5561.",
}
| This paper presents an overview of the ImageArg shared task, the first multimodal Argument Mining shared task co-located with the 10th Workshop on Argument Mining at EMNLP 2023. The shared task comprises two classification subtasks - (1) Subtask-A: Argument Stance Classification; (2) Subtask-B: Image Persuasiveness Classification. The former determines the stance of a tweet containing an image and a piece of text toward a controversial topic (e.g., gun control and abortion). The latter determines whether the image makes the tweet text more persuasive. The shared task received 31 submissions for Subtask-A and 21 submissions for Subtask-B from 9 different teams across 6 countries. The top submission in Subtask-A achieved an F1-score of 0.8647 while the best submission in Subtask-B achieved an F1-score of 0.5561. | [
"Liu, Zhexiong",
"Elaraby, Mohamed",
"Zhong, Yang",
"Litman, Diane"
] | Overview of ImageArg-2023: The First Shared Task in Multimodal Argument Mining | argmining-1.12 | 2310.12172 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.argmining-1.13.bib | https://aclanthology.org/2023.argmining-1.13/ | @inproceedings{nobakhtian-etal-2023-iust,
title = "{IUST} at {I}mage{A}rg: The First Shared Task in Multimodal Argument Mining",
author = "Nobakhtian, Melika and
Zamaninejad, Ghazal and
Moosavi Monazzah, Erfan and
Eetemadi, Sauleh",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.13",
doi = "10.18653/v1/2023.argmining-1.13",
pages = "133--138",
abstract = "ImageArg is a shared task at the 10th ArgMining Workshop at EMNLP 2023. It leverages the ImageArg dataset to advance multimodal persuasiveness techniques. This challenge comprises two distinct subtasks: 1) Argumentative Stance (AS) Classification: Assessing whether a given tweet adopts an argumentative stance. 2) Image Persuasiveness (IP) Classification: Determining if the tweet image enhances the persuasive quality of the tweet. We conducted various experiments on both subtasks and ranked sixth out of the nine participating teams.",
}
| ImageArg is a shared task at the 10th ArgMining Workshop at EMNLP 2023. It leverages the ImageArg dataset to advance multimodal persuasiveness techniques. This challenge comprises two distinct subtasks: 1) Argumentative Stance (AS) Classification: Assessing whether a given tweet adopts an argumentative stance. 2) Image Persuasiveness (IP) Classification: Determining if the tweet image enhances the persuasive quality of the tweet. We conducted various experiments on both subtasks and ranked sixth out of the nine participating teams. | [
"Nobakhtian, Melika",
"Zamaninejad, Ghazal",
"Moosavi Monazzah, Erfan",
"Eetemadi, Sauleh"
] | IUST at ImageArg: The First Shared Task in Multimodal Argument Mining | argmining-1.13 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.14.bib | https://aclanthology.org/2023.argmining-1.14/ | @inproceedings{zong-etal-2023-tilfa,
title = "{TILFA}: A Unified Framework for Text, Image, and Layout Fusion in Argument Mining",
author = "Zong, Qing and
Wang, Zhaowei and
Xu, Baixuan and
Zheng, Tianshi and
Shi, Haochen and
Wang, Weiqi and
Song, Yangqiu and
Wong, Ginny and
See, Simon",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.14",
doi = "10.18653/v1/2023.argmining-1.14",
pages = "139--147",
abstract = "A main goal of Argument Mining (AM) is to analyze an author{'}s stance. Unlike previous AM datasets focusing only on text, the shared task at the 10th Workshop on Argument Mining introduces a dataset including both texts and images. Importantly, these images contain both visual elements and optical characters. Our new framework, TILFA (A Unified Framework for Text, Image, and Layout Fusion in Argument Mining), is designed to handle this mixed data. It excels at not only understanding text but also detecting optical characters and recognizing layout details in images. Our model significantly outperforms existing baselines, earning our team, KnowComp, the 1st place in the leaderboard of Argumentative Stance Classification subtask in this shared task.",
}
| A main goal of Argument Mining (AM) is to analyze an author{'}s stance. Unlike previous AM datasets focusing only on text, the shared task at the 10th Workshop on Argument Mining introduces a dataset including both texts and images. Importantly, these images contain both visual elements and optical characters. Our new framework, TILFA (A Unified Framework for Text, Image, and Layout Fusion in Argument Mining), is designed to handle this mixed data. It excels at not only understanding text but also detecting optical characters and recognizing layout details in images. Our model significantly outperforms existing baselines, earning our team, KnowComp, the 1st place in the leaderboard of Argumentative Stance Classification subtask in this shared task. | [
"Zong, Qing",
"Wang, Zhaowei",
"Xu, Baixuan",
"Zheng, Tianshi",
"Shi, Haochen",
"Wang, Weiqi",
"Song, Yangqiu",
"Wong, Ginny",
"See, Simon"
] | TILFA: A Unified Framework for Text, Image, and Layout Fusion in Argument Mining | argmining-1.14 | 2310.05210 | [
"https://github.com/hkust-knowcomp/tilfa"
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.argmining-1.15.bib | https://aclanthology.org/2023.argmining-1.15/ | @inproceedings{soltani-romberg-2023-general,
title = "A General Framework for Multimodal Argument Persuasiveness Classification of Tweets",
author = "Soltani, Mohammad and
Romberg, Julia",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.15",
doi = "10.18653/v1/2023.argmining-1.15",
pages = "148--156",
abstract = "An important property of argumentation concerns the degree of its persuasiveness, which can be influenced by various modalities. On social media platforms, individuals usually have the option of supporting their textual statements with images. The goals of the ImageArg shared task, held with ArgMining 2023, were therefore (A) to classify tweet stances considering both modalities and (B) to predict the influence of an image on the persuasiveness of a tweet text. In this paper, we present our proposed methodology that shows strong performance on both tasks, placing 3rd team on the leaderboard in each case with F1 scores of 0.8273 (A) and 0.5281 (B). The framework relies on pre-trained models to extract text and image features, which are then fed into a task-specific classification model. Our experiments highlighted that the multimodal vision and language model CLIP holds a specific importance in the extraction of features, in particular for task (A).",
}
| An important property of argumentation concerns the degree of its persuasiveness, which can be influenced by various modalities. On social media platforms, individuals usually have the option of supporting their textual statements with images. The goals of the ImageArg shared task, held with ArgMining 2023, were therefore (A) to classify tweet stances considering both modalities and (B) to predict the influence of an image on the persuasiveness of a tweet text. In this paper, we present our proposed methodology that shows strong performance on both tasks, placing 3rd team on the leaderboard in each case with F1 scores of 0.8273 (A) and 0.5281 (B). The framework relies on pre-trained models to extract text and image features, which are then fed into a task-specific classification model. Our experiments highlighted that the multimodal vision and language model CLIP holds a specific importance in the extraction of features, in particular for task (A). | [
"Soltani, Mohammad",
"Romberg, Julia"
] | A General Framework for Multimodal Argument Persuasiveness Classification of Tweets | argmining-1.15 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.16.bib | https://aclanthology.org/2023.argmining-1.16/ | @inproceedings{torky-etal-2023-webis,
title = "{W}ebis @ {I}mage{A}rg 2023: Embedding-based Stance and Persuasiveness Classification",
author = "Torky, Islam and
Ruth, Simon and
Sharma, Shashi and
Salama, Mohamed and
Chaitanya, Krishna and
Gollub, Tim and
Kiesel, Johannes and
Stein, Benno",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.16",
doi = "10.18653/v1/2023.argmining-1.16",
pages = "157--161",
abstract = "This paper reports on the submissions of Webis to the two subtasks of ImageArg 2023. For the subtask of argumentative stance classification, we reached an F1 score of 0.84 using a BERT model for sequence classification. For the subtask of image persuasiveness classification, we reached an F1 score of 0.56 using CLIP embeddings and a neural network model, achieving the best performance for this subtask in the competition. Our analysis reveals that seemingly clear sentences (e.g., {``}I support gun control{''}) are still problematic for our otherwise competitive stance classifier and that ignoring the tweet text for image persuasiveness prediction leads to a model that is similarly effective to our top-performing model.",
}
| This paper reports on the submissions of Webis to the two subtasks of ImageArg 2023. For the subtask of argumentative stance classification, we reached an F1 score of 0.84 using a BERT model for sequence classification. For the subtask of image persuasiveness classification, we reached an F1 score of 0.56 using CLIP embeddings and a neural network model, achieving the best performance for this subtask in the competition. Our analysis reveals that seemingly clear sentences (e.g., {``}I support gun control{''}) are still problematic for our otherwise competitive stance classifier and that ignoring the tweet text for image persuasiveness prediction leads to a model that is similarly effective to our top-performing model. | [
"Torky, Islam",
"Ruth, Simon",
"Sharma, Shashi",
"Salama, Mohamed",
"Chaitanya, Krishna",
"Gollub, Tim",
"Kiesel, Johannes",
"Stein, Benno"
] | Webis @ ImageArg 2023: Embedding-based Stance and Persuasiveness Classification | argmining-1.16 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.17.bib | https://aclanthology.org/2023.argmining-1.17/ | @inproceedings{shokri-levitan-2023-gc,
title = "{GC}-Hunter at {I}mage{A}rg Shared Task: Multi-Modal Stance and Persuasiveness Learning",
author = "Shokri, Mohammad and
Levitan, Sarah Ita",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.17",
doi = "10.18653/v1/2023.argmining-1.17",
pages = "162--166",
abstract = "With the rising prominence of social media, users frequently supplement their written content with images. This trend has brought about new challenges in automatic processing of social media messages. In order to fully understand the meaning of a post, it is necessary to capture the relationship between the image and the text. In this work we address the two main objectives of the ImageArg shared task. Firstly, we aim to determine the stance of a multi-modal tweet toward a particular issue. We propose a strong baseline, fine-tuning transformer based models on concatenation of tweet text and image text. The second goal is to predict the impact of an image on the persuasiveness of the text in a multi-modal tweet. To capture the persuasiveness of an image, we train vision and language models on the data and explore other sets of features merged with the model, to enhance prediction power. Ultimately, both of these goals contribute toward the broader aim of understanding multi-modal messages on social media and how images and texts relate to each other.",
}
| With the rising prominence of social media, users frequently supplement their written content with images. This trend has brought about new challenges in automatic processing of social media messages. In order to fully understand the meaning of a post, it is necessary to capture the relationship between the image and the text. In this work we address the two main objectives of the ImageArg shared task. Firstly, we aim to determine the stance of a multi-modal tweet toward a particular issue. We propose a strong baseline, fine-tuning transformer based models on concatenation of tweet text and image text. The second goal is to predict the impact of an image on the persuasiveness of the text in a multi-modal tweet. To capture the persuasiveness of an image, we train vision and language models on the data and explore other sets of features merged with the model, to enhance prediction power. Ultimately, both of these goals contribute toward the broader aim of understanding multi-modal messages on social media and how images and texts relate to each other. | [
"Shokri, Mohammad",
"Levitan, Sarah Ita"
] | GC-Hunter at ImageArg Shared Task: Multi-Modal Stance and Persuasiveness Learning | argmining-1.17 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.18.bib | https://aclanthology.org/2023.argmining-1.18/ | @inproceedings{sharma-etal-2023-argumentative,
title = "Argumentative Stance Prediction: An Exploratory Study on Multimodality and Few-Shot Learning",
author = "Sharma, Arushi and
Gupta, Abhibha and
Bilalpur, Maneesh",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.18",
doi = "10.18653/v1/2023.argmining-1.18",
pages = "167--174",
abstract = "To advance argumentative stance prediction as a multimodal problem, the First Shared Task in Multimodal Argument Mining hosted stance prediction in crucial social topics of gun control and abortion. Our exploratory study attempts to evaluate the necessity of images for stance prediction in tweets and compare out-of-the-box text-based large-language models (LLM) in few-shot settings against fine-tuned unimodal and multimodal models. Our work suggests an ensemble of fine-tuned text-based language models (0.817 F1-score) outperforms both the multimodal (0.677 F1-score) and text-based few-shot prediction using a recent state-of-the-art LLM (0.550 F1-score). In addition to the differences in performance, our findings suggest that the multimodal models tend to perform better when image content is summarized as natural language over their native pixel structure and, using in-context examples improves few-shot learning of LLMs performance.",
}
| To advance argumentative stance prediction as a multimodal problem, the First Shared Task in Multimodal Argument Mining hosted stance prediction in crucial social topics of gun control and abortion. Our exploratory study attempts to evaluate the necessity of images for stance prediction in tweets and compare out-of-the-box text-based large-language models (LLM) in few-shot settings against fine-tuned unimodal and multimodal models. Our work suggests an ensemble of fine-tuned text-based language models (0.817 F1-score) outperforms both the multimodal (0.677 F1-score) and text-based few-shot prediction using a recent state-of-the-art LLM (0.550 F1-score). In addition to the differences in performance, our findings suggest that the multimodal models tend to perform better when image content is summarized as natural language over their native pixel structure and, using in-context examples improves few-shot learning of LLMs performance. | [
"Sharma, Arushi",
"Gupta, Abhibha",
"Bilalpur, Maneesh"
] | Argumentative Stance Prediction: An Exploratory Study on Multimodality and Few-Shot Learning | argmining-1.18 | 2310.07093 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.argmining-1.19.bib | https://aclanthology.org/2023.argmining-1.19/ | @inproceedings{zhang-etal-2023-split,
title = "{SPLIT}: Stance and Persuasion Prediction with Multi-modal on Image and Textual Information",
author = "Zhang, Jing and
Yu, Shaojun and
Li, Xuan and
Geng, Jia and
Zheng, Zhiyuan and
Ho, Joyce",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.19",
doi = "10.18653/v1/2023.argmining-1.19",
pages = "175--180",
abstract = "Persuasiveness is a prominent personality trait that measures the extent to which a speaker can impact the beliefs, attitudes, intentions, motivations, and actions of their audience. The ImageArg task is a featured challenge at the 10th ArgMining Workshop during EMNLP 2023, focusing on harnessing the potential of the ImageArg dataset to advance techniques in multimodal persuasion. In this study, we investigate the utilization of dual-modality datasets and evaluate three distinct multi-modality models. By enhancing multi-modality datasets, we demonstrate both the advantages and constraints of cutting-edge models.",
}
| Persuasiveness is a prominent personality trait that measures the extent to which a speaker can impact the beliefs, attitudes, intentions, motivations, and actions of their audience. The ImageArg task is a featured challenge at the 10th ArgMining Workshop during EMNLP 2023, focusing on harnessing the potential of the ImageArg dataset to advance techniques in multimodal persuasion. In this study, we investigate the utilization of dual-modality datasets and evaluate three distinct multi-modality models. By enhancing multi-modality datasets, we demonstrate both the advantages and constraints of cutting-edge models. | [
"Zhang, Jing",
"Yu, Shaojun",
"Li, Xuan",
"Geng, Jia",
"Zheng, Zhiyuan",
"Ho, Joyce"
] | SPLIT: Stance and Persuasion Prediction with Multi-modal on Image and Textual Information | argmining-1.19 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.20.bib | https://aclanthology.org/2023.argmining-1.20/ | @inproceedings{rajaraman-etal-2023-semantists,
title = "Semantists at {I}mage{A}rg-2023: Exploring Cross-modal Contrastive and Ensemble Models for Multimodal Stance and Persuasiveness Classification",
author = "Rajaraman, Kanagasabai and
Veeramani, Hariram and
Rajamanickam, Saravanan and
Westerski, Adam Maciej and
Kim, Jung-Jae",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.20",
doi = "10.18653/v1/2023.argmining-1.20",
pages = "181--186",
abstract = "In this paper, we describe our system for ImageArg-2023 Shared Task that aims to identify an image{'}s stance towards a tweet and determine its persuasiveness score concerning a specific topic. In particular, the Shared Task proposes two subtasks viz. subtask (A) Multimodal Argument Stance (AS) Classification, and subtask (B) Multimodal Image Persuasiveness (IP) Classification, using a dataset composed of tweets (images and text) from controversial topics, namely gun control and abortion. For subtask A, we employ multiple transformer models using a text based approach to classify the argumentative stance of the tweet. For sub task B we adopted text based as well as multimodal learning methods to classify image persuasiveness of the tweet. Surprisingly, the text-based approach of the tweet overall performed better than the multimodal approaches considered. In summary, our best system achieved a F1 score of 0.85 for sub task (A) and 0.50 for subtask (B), and ranked 2nd in subtask (A) and 4th in subtask (B), among all teams submissions.",
}
| In this paper, we describe our system for ImageArg-2023 Shared Task that aims to identify an image{'}s stance towards a tweet and determine its persuasiveness score concerning a specific topic. In particular, the Shared Task proposes two subtasks viz. subtask (A) Multimodal Argument Stance (AS) Classification, and subtask (B) Multimodal Image Persuasiveness (IP) Classification, using a dataset composed of tweets (images and text) from controversial topics, namely gun control and abortion. For subtask A, we employ multiple transformer models using a text based approach to classify the argumentative stance of the tweet. For sub task B we adopted text based as well as multimodal learning methods to classify image persuasiveness of the tweet. Surprisingly, the text-based approach of the tweet overall performed better than the multimodal approaches considered. In summary, our best system achieved a F1 score of 0.85 for sub task (A) and 0.50 for subtask (B), and ranked 2nd in subtask (A) and 4th in subtask (B), among all teams submissions. | [
"Rajaraman, Kanagasabai",
"Veeramani, Hariram",
"Rajamanickam, Saravanan",
"Westerski, Adam Maciej",
"Kim, Jung-Jae"
] | Semantists at ImageArg-2023: Exploring Cross-modal Contrastive and Ensemble Models for Multimodal Stance and Persuasiveness Classification | argmining-1.20 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.21.bib | https://aclanthology.org/2023.argmining-1.21/ | @inproceedings{dycke-etal-2023-overview,
title = "Overview of {P}rag{T}ag-2023: Low-Resource Multi-Domain Pragmatic Tagging of Peer Reviews",
author = "Dycke, Nils and
Kuznetsov, Ilia and
Gurevych, Iryna",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.21",
doi = "10.18653/v1/2023.argmining-1.21",
pages = "187--196",
abstract = "Peer review is the key quality control mechanism in science. The core component of peer review are the review reports {--} argumentative texts where the reviewers evaluate the work and make suggestions to the authors. Reviewing is a demanding expert task prone to bias. An active line of research in NLP aims to support peer review via automatic analysis of review reports. This research meets two key challenges. First, NLP to date has focused on peer reviews from machine learning conferences. Yet, NLP models are prone to domain shift and might underperform when applied to reviews from a new research community. Second, while some venues make their reviewing processes public, peer reviewing data is generally hard to obtain and expensive to label. Approaches to low-data NLP processing for peer review remain under-investigated. Enabled by the recent release of open multi-domain corpora of peer reviews, the PragTag-2023 Shared Task explored the ways to increase domain robustness and address data scarcity in pragmatic tagging {--} a sentence tagging task where review statements are classified by their argumentative function. This paper describes the shared task, outlines the participating systems, and summarizes the results.",
}
| Peer review is the key quality control mechanism in science. The core component of peer review are the review reports {--} argumentative texts where the reviewers evaluate the work and make suggestions to the authors. Reviewing is a demanding expert task prone to bias. An active line of research in NLP aims to support peer review via automatic analysis of review reports. This research meets two key challenges. First, NLP to date has focused on peer reviews from machine learning conferences. Yet, NLP models are prone to domain shift and might underperform when applied to reviews from a new research community. Second, while some venues make their reviewing processes public, peer reviewing data is generally hard to obtain and expensive to label. Approaches to low-data NLP processing for peer review remain under-investigated. Enabled by the recent release of open multi-domain corpora of peer reviews, the PragTag-2023 Shared Task explored the ways to increase domain robustness and address data scarcity in pragmatic tagging {--} a sentence tagging task where review statements are classified by their argumentative function. This paper describes the shared task, outlines the participating systems, and summarizes the results. | [
"Dycke, Nils",
"Kuznetsov, Ilia",
"Gurevych, Iryna"
] | Overview of PragTag-2023: Low-Resource Multi-Domain Pragmatic Tagging of Peer Reviews | argmining-1.21 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.22.bib | https://aclanthology.org/2023.argmining-1.22/ | @inproceedings{ding-etal-2023-catalpa,
title = "{CATALPA}{\_}{E}du{NLP} at {P}rag{T}ag-2023",
author = "Ding, Yuning and
Bexte, Marie and
Horbach, Andrea",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.22",
doi = "10.18653/v1/2023.argmining-1.22",
pages = "197--201",
abstract = "This paper describes our contribution to the PragTag-2023 Shared Task. We describe and compare different approaches based on sentence classification, sentence similarity, and sequence tagging. We find that a BERT-based sentence labeling approach integrating positional information outperforms both sequence tagging and SBERT-based sentence classification. We further provide analyses highlighting the potential of combining different approaches.",
}
| This paper describes our contribution to the PragTag-2023 Shared Task. We describe and compare different approaches based on sentence classification, sentence similarity, and sequence tagging. We find that a BERT-based sentence labeling approach integrating positional information outperforms both sequence tagging and SBERT-based sentence classification. We further provide analyses highlighting the potential of combining different approaches. | [
"Ding, Yuning",
"Bexte, Marie",
"Horbach, Andrea"
] | CATALPA_EduNLP at PragTag-2023 | argmining-1.22 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.23.bib | https://aclanthology.org/2023.argmining-1.23/ | @inproceedings{luo-etal-2023-deepblueai,
title = "{D}eep{B}lue{AI} at {P}rag{T}ag-2023:Ensemble-based Text Classification Approaches under Limited Data Resources",
author = "Luo, Zhipeng and
Wang, Jiahui and
Guo, Yihao",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.23",
doi = "10.18653/v1/2023.argmining-1.23",
pages = "202--206",
abstract = "Due to the scarcity of review data and the high annotation cost, in this paper, we primarily delve into the fine-tuning of pretrained models using limited data. To enhance the robustness of the model, we employ adversarial training techniques. By introducing subtle perturbations, we compel the model to better cope with adversarial attacks, thereby increasing the stability of the model in input data. We utilize pooling techniques to aid the model in extracting critical information, reducing computational complexity, and improving the model{'}s generalization capability. Experimental results demonstrate the effectiveness of our proposed approach on a review paper dataset with limited data volume.",
}
| Due to the scarcity of review data and the high annotation cost, in this paper, we primarily delve into the fine-tuning of pretrained models using limited data. To enhance the robustness of the model, we employ adversarial training techniques. By introducing subtle perturbations, we compel the model to better cope with adversarial attacks, thereby increasing the stability of the model in input data. We utilize pooling techniques to aid the model in extracting critical information, reducing computational complexity, and improving the model{'}s generalization capability. Experimental results demonstrate the effectiveness of our proposed approach on a review paper dataset with limited data volume. | [
"Luo, Zhipeng",
"Wang, Jiahui",
"Guo, Yihao"
] | DeepBlueAI at PragTag-2023:Ensemble-based Text Classification Approaches under Limited Data Resources | argmining-1.23 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.24.bib | https://aclanthology.org/2023.argmining-1.24/ | @inproceedings{lee-etal-2023-milab,
title = "{MILAB} at {P}rag{T}ag-2023: Enhancing Cross-Domain Generalization through Data Augmentation with Reduced Uncertainty",
author = "Lee, Yoonsang and
Lee, Dongryeol and
Jung, Kyomin",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.24",
doi = "10.18653/v1/2023.argmining-1.24",
pages = "207--211",
abstract = "This paper describes our submission to the PragTag task, which aims to categorize each sentence from peer reviews into one of the six distinct pragmatic tags. The task consists of three conditions: full, low, and zero, each distinguished by the number of training data and further categorized into five distinct domains. The main challenge of this task is the domain shift, which is exacerbated by non-uniform distribution and the limited availability of data across the six pragmatic tags and their respective domains. To address this issue, we predominantly employ two data augmentation techniques designed to mitigate data imbalance and scarcity: pseudo-labeling and synonym generation. We experimentally demonstrate the effectiveness of our approaches, achieving the first rank under the zero condition and the third in the full and low conditions.",
}
| This paper describes our submission to the PragTag task, which aims to categorize each sentence from peer reviews into one of the six distinct pragmatic tags. The task consists of three conditions: full, low, and zero, each distinguished by the number of training data and further categorized into five distinct domains. The main challenge of this task is the domain shift, which is exacerbated by non-uniform distribution and the limited availability of data across the six pragmatic tags and their respective domains. To address this issue, we predominantly employ two data augmentation techniques designed to mitigate data imbalance and scarcity: pseudo-labeling and synonym generation. We experimentally demonstrate the effectiveness of our approaches, achieving the first rank under the zero condition and the third in the full and low conditions. | [
"Lee, Yoonsang",
"Lee, Dongryeol",
"Jung, Kyomin"
] | MILAB at PragTag-2023: Enhancing Cross-Domain Generalization through Data Augmentation with Reduced Uncertainty | argmining-1.24 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.25.bib | https://aclanthology.org/2023.argmining-1.25/ | @inproceedings{gollapalli-etal-2023-nus,
title = "{NUS}-{IDS} at {P}rag{T}ag-2023: Improving Pragmatic Tagging of Peer Reviews through Unlabeled Data",
author = "Gollapalli, Sujatha Das and
Huang, Yixin and
Ng, See-Kiong",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.25",
doi = "10.18653/v1/2023.argmining-1.25",
pages = "212--217",
abstract = "We describe our models for the Pragmatic Tagging of Peer Reviews Shared Task at the 10th Workshop on Argument Mining at EMNLP-2023. We trained multiple sentence classification models for the above competition task by employing various state-of-the-art transformer models that can be fine-tuned either in the traditional way or through instruction-based fine-tuning. Multiple model predictions on unlabeled data are combined to tentatively label unlabeled instances and augment the dataset to further improve performance on the prediction task. In particular, on the F1000RD corpus, we perform on-par with models trained on 100{\%} of the training data while using only 10{\%} of the data. Overall, on the competition datasets, we rank among the top-2 performers for the different data conditions.",
}
| We describe our models for the Pragmatic Tagging of Peer Reviews Shared Task at the 10th Workshop on Argument Mining at EMNLP-2023. We trained multiple sentence classification models for the above competition task by employing various state-of-the-art transformer models that can be fine-tuned either in the traditional way or through instruction-based fine-tuning. Multiple model predictions on unlabeled data are combined to tentatively label unlabeled instances and augment the dataset to further improve performance on the prediction task. In particular, on the F1000RD corpus, we perform on-par with models trained on 100{\%} of the training data while using only 10{\%} of the data. Overall, on the competition datasets, we rank among the top-2 performers for the different data conditions. | [
"Gollapalli, Sujatha Das",
"Huang, Yixin",
"Ng, See-Kiong"
] | NUS-IDS at PragTag-2023: Improving Pragmatic Tagging of Peer Reviews through Unlabeled Data | argmining-1.25 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.argmining-1.26.bib | https://aclanthology.org/2023.argmining-1.26/ | @inproceedings{suri-etal-2023-suryakiran,
title = "{S}urya{K}iran at {P}rag{T}ag 2023 - Benchmarking Domain Adaptation using Masked Language Modeling in Natural Language Processing For Specialized Data",
author = "Suri, Kunal and
Mishra, Prakhar and
Nanda, Albert",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.26",
doi = "10.18653/v1/2023.argmining-1.26",
pages = "218--222",
abstract = "Most transformer models are trained on English language corpus that contain text from forums like Wikipedia and Reddit. While these models are being used in many specialized domains such as scientific peer review, legal, and healthcare, their performance is subpar because they do not contain the information present in data relevant to such specialized domains. To help these models perform as well as possible on specialized domains, one of the approaches is to collect labeled data of that particular domain and fine-tune the transformer model of choice on such data. While a good approach, it suffers from the challenge of collecting a lot of labeled data which requires significant manual effort. Another way is to use unlabeled domain-specific data to pre-train these transformer model and then fine-tune this model on labeled data. We evaluate how transformer models perform when fine-tuned on labeled data after initial pre-training with unlabeled data. We compare their performance with a transformer model fine-tuned on labeled data without initial pre-training with unlabeled data. We perform this comparison on a dataset of Scientific Peer Reviews provided by organizers of PragTag-2023 Shared Task and observe that a transformer model fine-tuned on labeled data after initial pre-training on unlabeled data using Masked Language Modelling outperforms a transformer model fine-tuned only on labeled data without initial pre-training with unlabeled data using Masked Language Modelling.",
}
| Most transformer models are trained on English language corpus that contain text from forums like Wikipedia and Reddit. While these models are being used in many specialized domains such as scientific peer review, legal, and healthcare, their performance is subpar because they do not contain the information present in data relevant to such specialized domains. To help these models perform as well as possible on specialized domains, one of the approaches is to collect labeled data of that particular domain and fine-tune the transformer model of choice on such data. While a good approach, it suffers from the challenge of collecting a lot of labeled data which requires significant manual effort. Another way is to use unlabeled domain-specific data to pre-train these transformer model and then fine-tune this model on labeled data. We evaluate how transformer models perform when fine-tuned on labeled data after initial pre-training with unlabeled data. We compare their performance with a transformer model fine-tuned on labeled data without initial pre-training with unlabeled data. We perform this comparison on a dataset of Scientific Peer Reviews provided by organizers of PragTag-2023 Shared Task and observe that a transformer model fine-tuned on labeled data after initial pre-training on unlabeled data using Masked Language Modelling outperforms a transformer model fine-tuned only on labeled data without initial pre-training with unlabeled data using Masked Language Modelling. | [
"Suri, Kunal",
"Mishra, Prakhar",
"N",
"a, Albert"
] | SuryaKiran at PragTag 2023 - Benchmarking Domain Adaptation using Masked Language Modeling in Natural Language Processing For Specialized Data | argmining-1.26 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.1.bib | https://aclanthology.org/2023.banglalp-1.1/ | @inproceedings{raihan-etal-2023-offensive,
title = "Offensive Language Identification in Transliterated and Code-Mixed {B}angla",
author = "Raihan, Md Nishat and
Tanmoy, Umma and
Islam, Anika Binte and
North, Kai and
Ranasinghe, Tharindu and
Anastasopoulos, Antonios and
Zampieri, Marcos",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.1",
doi = "10.18653/v1/2023.banglalp-1.1",
pages = "1--6",
abstract = "Identifying offensive content in social media is vital to create safe online communities. Several recent studies have addressed this problem by creating datasets for various languages. In this paper, we explore offensive language identification in texts with transliterations and code-mixing, linguistic phenomena common in multilingual societies, and a known challenge for NLP systems. We introduce TB-OLID, a transliterated Bangla offensive language dataset containing 5,000 manually annotated comments. We train and fine-tune machine learning models on TB-OLID, and we evaluate their results on this dataset. Our results show that English pre-trained transformer-based models, such as fBERT and HateBERT achieve the best performance on this dataset.",
}
| Identifying offensive content in social media is vital to create safe online communities. Several recent studies have addressed this problem by creating datasets for various languages. In this paper, we explore offensive language identification in texts with transliterations and code-mixing, linguistic phenomena common in multilingual societies, and a known challenge for NLP systems. We introduce TB-OLID, a transliterated Bangla offensive language dataset containing 5,000 manually annotated comments. We train and fine-tune machine learning models on TB-OLID, and we evaluate their results on this dataset. Our results show that English pre-trained transformer-based models, such as fBERT and HateBERT achieve the best performance on this dataset. | [
"Raihan, Md Nishat",
"Tanmoy, Umma",
"Islam, Anika Binte",
"North, Kai",
"Ranasinghe, Tharindu",
"Anastasopoulos, Antonios",
"Zampieri, Marcos"
] | Offensive Language Identification in Transliterated and Code-Mixed Bangla | banglalp-1.1 | 2311.15023 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.banglalp-1.2.bib | https://aclanthology.org/2023.banglalp-1.2/ | @inproceedings{rahman-etal-2023-bspell,
title = "{BS}pell: A {CNN}-Blended {BERT} Based {B}angla Spell Checker",
author = "Rahman, Chowdhury and
Rahman, MD.Hasibur and
Zakir, Samiha and
Rafsan, Mohammad and
Ali, Mohammed Eunus",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.2",
doi = "10.18653/v1/2023.banglalp-1.2",
pages = "7--17",
abstract = "Bangla typing is mostly performed using English keyboard and can be highly erroneous due to the presence of compound and similarly pronounced letters. Spelling correction of a misspelled word requires understanding of word typing pattern as well as the context of the word usage. A specialized BERT model named BSpell has been proposed in this paper targeted towards word for word correction in sentence level. BSpell contains an end-to-end trainable CNN sub-model named SemanticNet along with specialized auxiliary loss. This allows BSpell to specialize in highly inflected Bangla vocabulary in the presence of spelling errors. Furthermore, a hybrid pretraining scheme has been proposed for BSpell that combines word level and character level masking. Comparison on two Bangla and one Hindi spelling correction dataset shows the superiority of our proposed approach.",
}
| Bangla typing is mostly performed using English keyboard and can be highly erroneous due to the presence of compound and similarly pronounced letters. Spelling correction of a misspelled word requires understanding of word typing pattern as well as the context of the word usage. A specialized BERT model named BSpell has been proposed in this paper targeted towards word for word correction in sentence level. BSpell contains an end-to-end trainable CNN sub-model named SemanticNet along with specialized auxiliary loss. This allows BSpell to specialize in highly inflected Bangla vocabulary in the presence of spelling errors. Furthermore, a hybrid pretraining scheme has been proposed for BSpell that combines word level and character level masking. Comparison on two Bangla and one Hindi spelling correction dataset shows the superiority of our proposed approach. | [
"Rahman, Chowdhury",
"Rahman, MD.Hasibur",
"Zakir, Samiha",
"Rafsan, Mohammad",
"Ali, Mohammed Eunus"
] | BSpell: A CNN-Blended BERT Based Bangla Spell Checker | banglalp-1.2 | 2208.09709 | [
"https://github.com/hasiburshanto/bangla-spell-checker"
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.banglalp-1.3.bib | https://aclanthology.org/2023.banglalp-1.3/ | @inproceedings{bijoy-etal-2023-advancing,
title = "Advancing {B}angla Punctuation Restoration by a Monolingual Transformer-Based Method and a Large-Scale Corpus",
author = "Bijoy, Mehedi Hasan and
Faria, Mir Fatema Afroz and
E Sobhani, Mahbub and
Ferdoush, Tanzid and
Shatabda, Swakkhar",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.3",
doi = "10.18653/v1/2023.banglalp-1.3",
pages = "18--25",
abstract = "Punctuation restoration is the endeavor of reinstating and rectifying missing or improper punctuation marks within a text, thereby eradicating ambiguity in written discourse. The Bangla punctuation restoration task has received little attention and exploration, despitethe rising popularity of textual communication in the language. The primary hindrances in the advancement of the task revolve aroundthe utilization of transformer-based methods and an openly accessible extensive corpus, challenges that we discovered remainedunresolved in earlier efforts. In this study, we propose a baseline by introducing a mono-lingual transformer-based method named Jatikarok, where the effectiveness of transfer learning has been meticulously scrutinized, and a large-scale corpus containing 1.48M source-target pairs to resolve the previous issues. The Jatikarok attains accuracy rates of 95.2{\%}, 85.13{\%}, and 91.36{\%} on the BanglaPRCorpus, Prothom-Alo Balanced, and BanglaOPUS corpora, thereby establishing itself as the state-of-the-art method through its superior performance compared to BanglaT5 and T5-Small. Jatikarok and BanglaPRCorpus are publicly available at: https://github.com/mehedihasanbijoy/Jatikarok-and-BanglaPRCorpus",
}
| Punctuation restoration is the endeavor of reinstating and rectifying missing or improper punctuation marks within a text, thereby eradicating ambiguity in written discourse. The Bangla punctuation restoration task has received little attention and exploration, despitethe rising popularity of textual communication in the language. The primary hindrances in the advancement of the task revolve aroundthe utilization of transformer-based methods and an openly accessible extensive corpus, challenges that we discovered remainedunresolved in earlier efforts. In this study, we propose a baseline by introducing a mono-lingual transformer-based method named Jatikarok, where the effectiveness of transfer learning has been meticulously scrutinized, and a large-scale corpus containing 1.48M source-target pairs to resolve the previous issues. The Jatikarok attains accuracy rates of 95.2{\%}, 85.13{\%}, and 91.36{\%} on the BanglaPRCorpus, Prothom-Alo Balanced, and BanglaOPUS corpora, thereby establishing itself as the state-of-the-art method through its superior performance compared to BanglaT5 and T5-Small. Jatikarok and BanglaPRCorpus are publicly available at: https://github.com/mehedihasanbijoy/Jatikarok-and-BanglaPRCorpus | [
"Bijoy, Mehedi Hasan",
"Faria, Mir Fatema Afroz",
"E Sobhani, Mahbub",
"Ferdoush, Tanzid",
"Shatabda, Swakkhar"
] | Advancing Bangla Punctuation Restoration by a Monolingual Transformer-Based Method and a Large-Scale Corpus | banglalp-1.3 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.4.bib | https://aclanthology.org/2023.banglalp-1.4/ | @inproceedings{guo-etal-2023-pipeline,
title = "Pipeline Enabling Zero-shot Classification for {B}angla Handwritten Grapheme",
author = "Guo, Linsheng and
Sifat, Md Habibur and
Ahmed, Tashin",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.4",
doi = "10.18653/v1/2023.banglalp-1.4",
pages = "26--33",
abstract = "This research investigates Zero-Shot Learning (ZSL), and proposes CycleGAN-based image synthesis and accurate label mapping to build a strong association between labels and graphemes. The objective is to enhance model accuracy in detecting unseen classes by employing advanced font image categorization and a CycleGAN-based generator. The resulting representations of abstract character structures demonstrate a significant improvement in recognition, accommodating both seen and unseen classes. This investigation addresses the complex issue of Optical Character Recognition (OCR) in the specific context of the Bangla language. Bangla script is renowned for its intricate nature, consisting of a total of 49 letters, which include 11 vowels, 38 consonants, and 18 diacritics. The combination of letters in this complex arrangement provides the opportunity to create almost 13,000 unique variations of graphemes, which exceeds the number of graphemic units found in the English language. Our investigation presents a new strategy for ZSL in the context of Bangla OCR. This approach combines generative models with careful labeling techniques to enhance the progress of Bangla OCR, specifically focusing on grapheme categorization. Our goal is to make a substantial impact on the digitalization of educational resources in the Indian subcontinent.",
}
| This research investigates Zero-Shot Learning (ZSL), and proposes CycleGAN-based image synthesis and accurate label mapping to build a strong association between labels and graphemes. The objective is to enhance model accuracy in detecting unseen classes by employing advanced font image categorization and a CycleGAN-based generator. The resulting representations of abstract character structures demonstrate a significant improvement in recognition, accommodating both seen and unseen classes. This investigation addresses the complex issue of Optical Character Recognition (OCR) in the specific context of the Bangla language. Bangla script is renowned for its intricate nature, consisting of a total of 49 letters, which include 11 vowels, 38 consonants, and 18 diacritics. The combination of letters in this complex arrangement provides the opportunity to create almost 13,000 unique variations of graphemes, which exceeds the number of graphemic units found in the English language. Our investigation presents a new strategy for ZSL in the context of Bangla OCR. This approach combines generative models with careful labeling techniques to enhance the progress of Bangla OCR, specifically focusing on grapheme categorization. Our goal is to make a substantial impact on the digitalization of educational resources in the Indian subcontinent. | [
"Guo, Linsheng",
"Sifat, Md Habibur",
"Ahmed, Tashin"
] | Pipeline Enabling Zero-shot Classification for Bangla Handwritten Grapheme | banglalp-1.4 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.5.bib | https://aclanthology.org/2023.banglalp-1.5/ | @inproceedings{mukherjee-etal-2023-low,
title = "Low-Resource Text Style Transfer for {B}angla: Data {\&} Models",
author = "Mukherjee, Sourabrata and
Bansal, Akanksha and
Majumdar, Pritha and
Ojha, Atul Kr. and
Du{\v{s}}ek, Ond{\v{r}}ej",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.5",
doi = "10.18653/v1/2023.banglalp-1.5",
pages = "34--47",
abstract = "Text style transfer (TST) involves modifying the linguistic style of a given text while retaining its core content. This paper addresses the challenging task of text style transfer in the Bangla language, which is low-resourced in this area. We present a novel Bangla dataset that facilitates text sentiment transfer, a subtask of TST, enabling the transformation of positive sentiment sentences to negative and vice versa. To establish a high-quality base for further research, we refined and corrected an existing English dataset of 1,000 sentences for sentiment transfer based on Yelp reviews, and we introduce a new human-translated Bangla dataset that parallels its English counterpart. Furthermore, we offer multiple benchmark models that serve as a validation of the dataset and baseline for further research.",
}
| Text style transfer (TST) involves modifying the linguistic style of a given text while retaining its core content. This paper addresses the challenging task of text style transfer in the Bangla language, which is low-resourced in this area. We present a novel Bangla dataset that facilitates text sentiment transfer, a subtask of TST, enabling the transformation of positive sentiment sentences to negative and vice versa. To establish a high-quality base for further research, we refined and corrected an existing English dataset of 1,000 sentences for sentiment transfer based on Yelp reviews, and we introduce a new human-translated Bangla dataset that parallels its English counterpart. Furthermore, we offer multiple benchmark models that serve as a validation of the dataset and baseline for further research. | [
"Mukherjee, Sourabrata",
"Bansal, Akanksha",
"Majumdar, Pritha",
"Ojha, Atul Kr.",
"Du{\\v{s}}ek, Ond{\\v{r}}ej"
] | Low-Resource Text Style Transfer for Bangla: Data & Models | banglalp-1.5 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.6.bib | https://aclanthology.org/2023.banglalp-1.6/ | @inproceedings{sakib-etal-2023-intent,
title = "Intent Detection and Slot Filling for Home Assistants: Dataset and Analysis for {B}angla and {S}ylheti",
author = "Sakib, Fardin Ahsan and
Karim, A H M Rezaul and
Khan, Saadat Hasan and
Rahman, Md Mushfiqur",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.6",
doi = "10.18653/v1/2023.banglalp-1.6",
pages = "48--55",
abstract = "As voice assistants cement their place in our technologically advanced society, there remains a need to cater to the diverse linguistic landscape, including colloquial forms of low-resource languages. Our study introduces the first-ever comprehensive dataset for intent detection and slot filling in formal Bangla, colloquial Bangla, and Sylheti languages, totaling 984 samples across 10 unique intents. Our analysis reveals the robustness of large language models for tackling downstream tasks with inadequate data. The GPT-3.5 model achieves an impressive F1 score of 0.94 in intent detection and 0.51 in slot filling for colloquial Bangla.",
}
| As voice assistants cement their place in our technologically advanced society, there remains a need to cater to the diverse linguistic landscape, including colloquial forms of low-resource languages. Our study introduces the first-ever comprehensive dataset for intent detection and slot filling in formal Bangla, colloquial Bangla, and Sylheti languages, totaling 984 samples across 10 unique intents. Our analysis reveals the robustness of large language models for tackling downstream tasks with inadequate data. The GPT-3.5 model achieves an impressive F1 score of 0.94 in intent detection and 0.51 in slot filling for colloquial Bangla. | [
"Sakib, Fardin Ahsan",
"Karim, A H M Rezaul",
"Khan, Saadat Hasan",
"Rahman, Md Mushfiqur"
] | Intent Detection and Slot Filling for Home Assistants: Dataset and Analysis for Bangla and Sylheti | banglalp-1.6 | 2310.10935 | [
"https://github.com/mushfiqur11/bangla-sylheti-snips"
] | https://huggingface.co/papers/2310.10935 | 0 | 0 | 0 | 4 | [] | [] | [] | 1 | Poster |
https://aclanthology.org/2023.banglalp-1.7.bib | https://aclanthology.org/2023.banglalp-1.7/ | @inproceedings{kabir-etal-2023-bemolexbert,
title = "{BE}mo{L}ex{BERT}: A Hybrid Model for Multilabel Textual Emotion Classification in {B}angla by Combining Transformers with Lexicon Features",
author = "Kabir, Ahasan and
Roy, Animesh and
Taheri, Zaima",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.7",
doi = "10.18653/v1/2023.banglalp-1.7",
pages = "56--61",
abstract = "Multilevel textual emotion classification involves the extraction of emotions from text data, a task that has seen significant progress in high resource languages. However, resource-constrained languages like Bangla have received comparatively less attention in the field of emotion classification. Furthermore, the availability of a comprehensive and accurate emotion lexiconspecifically designed for the Bangla language is limited. In this paper, we present a hybrid model that combines lexicon features with transformers for multilabel emotion classification in the Bangla language. We have developed a comprehensive Bangla emotion lexicon consisting of 5336 carefully curated lexicons across nine emotion categories. We experimented with pre-trained transformers including mBERT, XLM-R, BanglishBERT, and BanglaBERT on the EmoNaBa (Islam et al.,2022) dataset. By integrating lexicon features from our emotion lexicon, we evaluate the performance of these transformers in emotion detection tasks. The results demonstrate that incorporating lexicon features significantly improves the performance of transformers. Among the evaluated models, our hybrid approach achieves the highest performance using BanglaBERT(large) (Bhattacharjee et al., 2022) as the pre-trained transformer along with our emotion lexicon, achieving an impressive weighted F1 score of 82.73{\%}. The emotion lexicon is publicly available at https://github.com/Ahasannn/BEmoLex-Bangla{\_}Emotion{\_}Lexicon",
}
| Multilevel textual emotion classification involves the extraction of emotions from text data, a task that has seen significant progress in high resource languages. However, resource-constrained languages like Bangla have received comparatively less attention in the field of emotion classification. Furthermore, the availability of a comprehensive and accurate emotion lexiconspecifically designed for the Bangla language is limited. In this paper, we present a hybrid model that combines lexicon features with transformers for multilabel emotion classification in the Bangla language. We have developed a comprehensive Bangla emotion lexicon consisting of 5336 carefully curated lexicons across nine emotion categories. We experimented with pre-trained transformers including mBERT, XLM-R, BanglishBERT, and BanglaBERT on the EmoNaBa (Islam et al.,2022) dataset. By integrating lexicon features from our emotion lexicon, we evaluate the performance of these transformers in emotion detection tasks. The results demonstrate that incorporating lexicon features significantly improves the performance of transformers. Among the evaluated models, our hybrid approach achieves the highest performance using BanglaBERT(large) (Bhattacharjee et al., 2022) as the pre-trained transformer along with our emotion lexicon, achieving an impressive weighted F1 score of 82.73{\%}. The emotion lexicon is publicly available at https://github.com/Ahasannn/BEmoLex-Bangla{\_}Emotion{\_}Lexicon | [
"Kabir, Ahasan",
"Roy, Animesh",
"Taheri, Zaima"
] | BEmoLexBERT: A Hybrid Model for Multilabel Textual Emotion Classification in Bangla by Combining Transformers with Lexicon Features | banglalp-1.7 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.8.bib | https://aclanthology.org/2023.banglalp-1.8/ | @inproceedings{thapa-etal-2023-assessing,
title = "Assessing Political Inclination of {B}angla Language Models",
author = "Thapa, Surendrabikram and
Maratha, Ashwarya and
Hasib, Khan Md and
Nasim, Mehwish and
Naseem, Usman",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.8",
doi = "10.18653/v1/2023.banglalp-1.8",
pages = "62--71",
abstract = "Natural language processing has advanced with AI-driven language models (LMs), that are applied widely from text generation to question answering. These models are pre-trained on a wide spectrum of data sources, enhancing accuracy and responsiveness. However, this process inadvertently entails the absorption of a diverse spectrum of viewpoints inherent within the training data. Exploring political leaning within LMs due to such viewpoints remains a less-explored domain. In the context of a low-resource language like Bangla, this area of research is nearly non-existent. To bridge this gap, we comprehensively analyze biases present in Bangla language models, specifically focusing on social and economic dimensions. Our findings reveal the inclinations of various LMs, which will provide insights into ethical considerations and limitations associated with deploying Bangla LMs.",
}
| Natural language processing has advanced with AI-driven language models (LMs), that are applied widely from text generation to question answering. These models are pre-trained on a wide spectrum of data sources, enhancing accuracy and responsiveness. However, this process inadvertently entails the absorption of a diverse spectrum of viewpoints inherent within the training data. Exploring political leaning within LMs due to such viewpoints remains a less-explored domain. In the context of a low-resource language like Bangla, this area of research is nearly non-existent. To bridge this gap, we comprehensively analyze biases present in Bangla language models, specifically focusing on social and economic dimensions. Our findings reveal the inclinations of various LMs, which will provide insights into ethical considerations and limitations associated with deploying Bangla LMs. | [
"Thapa, Surendrabikram",
"Maratha, Ashwarya",
"Hasib, Khan Md",
"Nasim, Mehwish",
"Naseem, Usman"
] | Assessing Political Inclination of Bangla Language Models | banglalp-1.8 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.9.bib | https://aclanthology.org/2023.banglalp-1.9/ | @inproceedings{saha-etal-2023-vio,
title = "Vio-Lens: A Novel Dataset of Annotated Social Network Posts Leading to Different Forms of Communal Violence and its Evaluation",
author = "Saha, Sourav and
Junaed, Jahedul Alam and
Saleki, Maryam and
Sen Sharma, Arnab and
Rifat, Mohammad Rashidujjaman and
Rahouti, Mohamed and
Ahmed, Syed Ishtiaque and
Mohammed, Nabeel and
Amin, Mohammad Ruhul",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.9",
doi = "10.18653/v1/2023.banglalp-1.9",
pages = "72--84",
abstract = "This paper presents a computational approach for creating a dataset on communal violence in the context of Bangladesh and West Bengal of India and benchmark evaluation. In recent years, social media has been used as a weapon by factions of different religions and backgrounds to incite hatred, resulting in physical communal violence and causing death and destruction. To prevent such abusive use of online platforms, we propose a framework for classifying online posts using an adaptive question-based approach. We collected more than 168,000 YouTube comments from a set of manually selected videos known for inciting violence in Bangladesh and West Bengal. Using both unsupervised and later semi-supervised topic modeling methods on those unstructured data, we discovered the major word clusters to interpret the related topics of peace and violence. Topic words were later used to select 20,142 posts related to peace and violence of which we annotated a total of 6,046 posts. Finally, we applied different modeling techniques based on linguistic features, and sentence transformers to benchmark the labeled dataset with the best-performing model reaching {\textasciitilde}71{\%} macro F1 score.",
}
| This paper presents a computational approach for creating a dataset on communal violence in the context of Bangladesh and West Bengal of India and benchmark evaluation. In recent years, social media has been used as a weapon by factions of different religions and backgrounds to incite hatred, resulting in physical communal violence and causing death and destruction. To prevent such abusive use of online platforms, we propose a framework for classifying online posts using an adaptive question-based approach. We collected more than 168,000 YouTube comments from a set of manually selected videos known for inciting violence in Bangladesh and West Bengal. Using both unsupervised and later semi-supervised topic modeling methods on those unstructured data, we discovered the major word clusters to interpret the related topics of peace and violence. Topic words were later used to select 20,142 posts related to peace and violence of which we annotated a total of 6,046 posts. Finally, we applied different modeling techniques based on linguistic features, and sentence transformers to benchmark the labeled dataset with the best-performing model reaching {\textasciitilde}71{\%} macro F1 score. | [
"Saha, Sourav",
"Junaed, Jahedul Alam",
"Saleki, Maryam",
"Sen Sharma, Arnab",
"Rifat, Mohammad Rashidujjaman",
"Rahouti, Mohamed",
"Ahmed, Syed Ishtiaque",
"Mohammed, Nabeel",
"Amin, Mohammad Ruhul"
] | Vio-Lens: A Novel Dataset of Annotated Social Network Posts Leading to Different Forms of Communal Violence and its Evaluation | banglalp-1.9 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.10.bib | https://aclanthology.org/2023.banglalp-1.10/ | @inproceedings{khan-etal-2023-banglachq,
title = "{B}angla{CHQ}-Summ: An Abstractive Summarization Dataset for Medical Queries in {B}angla Conversational Speech",
author = "Khan, Alvi and
Kamal, Fida and
Chowdhury, Mohammad Abrar and
Ahmed, Tasnim and
Laskar, Md Tahmid Rahman and
Ahmed, Sabbir",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.10",
doi = "10.18653/v1/2023.banglalp-1.10",
pages = "85--93",
abstract = "Online health consultation is steadily gaining popularity as a platform for patients to discuss their medical health inquiries, known as Consumer Health Questions (CHQs). The emergence of the COVID-19 pandemic has also led to a surge in the use of such platforms, creating a significant burden for the limited number of healthcare professionals attempting to respond to the influx of questions. Abstractive text summarization is a promising solution to this challenge, since shortening CHQs to only the information essential to answering them reduces the amount of time spent parsing unnecessary information. The summarization process can also serve as an intermediate step towards the eventual development of an automated medical question-answering system. This paper presents {`}BanglaCHQ-Summ{'}, the first CHQ summarization dataset for the Bangla language, consisting of 2,350 question-summary pairs. It is benchmarked on state-of-the-art Bangla and multilingual text generation models, with the best-performing model, BanglaT5, achieving a ROUGE-L score of 48.35{\%}. In addition, we address the limitations of existing automatic metrics for summarization by conducting a human evaluation. The dataset and all relevant code used in this work have been made publicly available.",
}
| Online health consultation is steadily gaining popularity as a platform for patients to discuss their medical health inquiries, known as Consumer Health Questions (CHQs). The emergence of the COVID-19 pandemic has also led to a surge in the use of such platforms, creating a significant burden for the limited number of healthcare professionals attempting to respond to the influx of questions. Abstractive text summarization is a promising solution to this challenge, since shortening CHQs to only the information essential to answering them reduces the amount of time spent parsing unnecessary information. The summarization process can also serve as an intermediate step towards the eventual development of an automated medical question-answering system. This paper presents {`}BanglaCHQ-Summ{'}, the first CHQ summarization dataset for the Bangla language, consisting of 2,350 question-summary pairs. It is benchmarked on state-of-the-art Bangla and multilingual text generation models, with the best-performing model, BanglaT5, achieving a ROUGE-L score of 48.35{\%}. In addition, we address the limitations of existing automatic metrics for summarization by conducting a human evaluation. The dataset and all relevant code used in this work have been made publicly available. | [
"Khan, Alvi",
"Kamal, Fida",
"Chowdhury, Mohammad Abrar",
"Ahmed, Tasnim",
"Laskar, Md Tahmid Rahman",
"Ahmed, Sabbir"
] | BanglaCHQ-Summ: An Abstractive Summarization Dataset for Medical Queries in Bangla Conversational Speech | banglalp-1.10 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.11.bib | https://aclanthology.org/2023.banglalp-1.11/ | @inproceedings{fahim-etal-2023-contextual,
title = "Contextual {B}angla Neural Stemmer: Finding Contextualized Root Word Representations for {B}angla Words",
author = "Fahim, Md and
Ali, Amin Ahsan and
Amin, M Ashraful and
Rahman, Akmmahbubur",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.11",
doi = "10.18653/v1/2023.banglalp-1.11",
pages = "94--103",
abstract = "Stemmers are commonly used in NLP to reduce words to their root form. However, this process may discard important information and yield incorrect root forms, affecting the accuracy of NLP tasks. To address these limitations, we propose a Contextual Bangla Neural Stemmer for Bangla language to enhance word representations. Our method involves splitting words into characters within the Neural Stemming Block, obtaining vector representations for both stem words and unknown vocabulary words. A loss function aligns these representations with Word2Vec representations, followed by contextual word representations from a Universal Transformer encoder. Mean Pooling generates sentence-level representations that are aligned with BanglaBERT{'}s representations using a MLP layer. The proposed model also tries to build good representations for out-of-vocabulary (OOV) words. Experiments with our model on five Bangla datasets shows around 5{\%} average improvement over the vanilla approach. Notably, our method avoids BERT retraining, focusing on root word detection and addressing OOV and sub-word issues. By incorporating our approach into a large corpus-based Language Model, we expect further improvements in aspects like explainability.",
}
| Stemmers are commonly used in NLP to reduce words to their root form. However, this process may discard important information and yield incorrect root forms, affecting the accuracy of NLP tasks. To address these limitations, we propose a Contextual Bangla Neural Stemmer for Bangla language to enhance word representations. Our method involves splitting words into characters within the Neural Stemming Block, obtaining vector representations for both stem words and unknown vocabulary words. A loss function aligns these representations with Word2Vec representations, followed by contextual word representations from a Universal Transformer encoder. Mean Pooling generates sentence-level representations that are aligned with BanglaBERT{'}s representations using a MLP layer. The proposed model also tries to build good representations for out-of-vocabulary (OOV) words. Experiments with our model on five Bangla datasets shows around 5{\%} average improvement over the vanilla approach. Notably, our method avoids BERT retraining, focusing on root word detection and addressing OOV and sub-word issues. By incorporating our approach into a large corpus-based Language Model, we expect further improvements in aspects like explainability. | [
"Fahim, Md",
"Ali, Amin Ahsan",
"Amin, M Ashraful",
"Rahman, Akmmahbubur"
] | Contextual Bangla Neural Stemmer: Finding Contextualized Root Word Representations for Bangla Words | banglalp-1.11 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.12.bib | https://aclanthology.org/2023.banglalp-1.12/ | @inproceedings{dehan-etal-2023-investigating,
title = "Investigating the Effectiveness of Graph-based Algorithm for {B}angla Text Classification",
author = "Dehan, Farhan and
Fahim, Md and
Ali, Amin Ahsan and
Amin, M Ashraful and
Rahman, Akmmahbubur",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.12",
doi = "10.18653/v1/2023.banglalp-1.12",
pages = "104--116",
abstract = "In this study, we examine and analyze the behavior of several graph-based models for Bangla text classification tasks. Graph-based algorithms create heterogeneous graphs from text data. Each node represents either a word or a document, and each edge indicates relationship between any two words or word and document. We applied the BERT model and different graph-based models including TextGCN, GAT, BertGAT, and BertGCN on five different datasets including SentNoB, Sarcasm detection, BanFakeNews, Hate speech detection, and Emotion detection datasets for Bangla text. BERT{'}s model bested the TextGCN and the GAT models by a large difference in terms of accuracy, Macro F1 score, and weighted F1 score. BertGCN and BertGAT are shown to outperform standalone graph models and BERT model. BertGAT excelled in the Emotion detection dataset and achieved a 1{\%}-2{\%} performance boost in Sarcasm detection, Hate speech detection, and BanFakeNews datasets from BERT{'}s performance. Whereas, BertGCN outperformed BertGAT by 1{\%} for SetNoB, and BanFakeNews datasets while beating BertGAT by 2{\%} for Sarcasm detection, Hate Speech, and Emotion detection datasets. We also examined different variations in graph structure and analyzed their effects.",
}
| In this study, we examine and analyze the behavior of several graph-based models for Bangla text classification tasks. Graph-based algorithms create heterogeneous graphs from text data. Each node represents either a word or a document, and each edge indicates relationship between any two words or word and document. We applied the BERT model and different graph-based models including TextGCN, GAT, BertGAT, and BertGCN on five different datasets including SentNoB, Sarcasm detection, BanFakeNews, Hate speech detection, and Emotion detection datasets for Bangla text. BERT{'}s model bested the TextGCN and the GAT models by a large difference in terms of accuracy, Macro F1 score, and weighted F1 score. BertGCN and BertGAT are shown to outperform standalone graph models and BERT model. BertGAT excelled in the Emotion detection dataset and achieved a 1{\%}-2{\%} performance boost in Sarcasm detection, Hate speech detection, and BanFakeNews datasets from BERT{'}s performance. Whereas, BertGCN outperformed BertGAT by 1{\%} for SetNoB, and BanFakeNews datasets while beating BertGAT by 2{\%} for Sarcasm detection, Hate Speech, and Emotion detection datasets. We also examined different variations in graph structure and analyzed their effects. | [
"Dehan, Farhan",
"Fahim, Md",
"Ali, Amin Ahsan",
"Amin, M Ashraful",
"Rahman, Akmmahbubur"
] | Investigating the Effectiveness of Graph-based Algorithm for Bangla Text Classification | banglalp-1.12 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.13.bib | https://aclanthology.org/2023.banglalp-1.13/ | @inproceedings{monsur-etal-2023-synthnid,
title = "{S}ynth{NID}: Synthetic Data to Improve End-to-end {B}angla Document Key Information Extraction",
author = "Monsur, Syed Mostofa and
Kabir, Shariar and
Chowdhury, Sakib",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.13",
doi = "10.18653/v1/2023.banglalp-1.13",
pages = "117--123",
abstract = "End-to-end Document Key Information Extraction models require a lot of compute and labeled data to perform well on real datasets. This is particularly challenging for low-resource languages like Bangla where domain-specific multimodal document datasets are scarcely available. In this paper, we have introduced SynthNID, a system to generate domain-specific document image data for training OCR-less end-to-end Key Information Extraction systems. We show the generated data improves the performance of the extraction model on real datasets and the system is easily extendable to generate other types of scanned documents for a wide range of document understanding tasks. The code for generating synthetic data is available at https://github.com/dv66/synthnid",
}
| End-to-end Document Key Information Extraction models require a lot of compute and labeled data to perform well on real datasets. This is particularly challenging for low-resource languages like Bangla where domain-specific multimodal document datasets are scarcely available. In this paper, we have introduced SynthNID, a system to generate domain-specific document image data for training OCR-less end-to-end Key Information Extraction systems. We show the generated data improves the performance of the extraction model on real datasets and the system is easily extendable to generate other types of scanned documents for a wide range of document understanding tasks. The code for generating synthetic data is available at https://github.com/dv66/synthnid | [
"Monsur, Syed Mostofa",
"Kabir, Shariar",
"Chowdhury, Sakib"
] | SynthNID: Synthetic Data to Improve End-to-end Bangla Document Key Information Extraction | banglalp-1.13 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.14.bib | https://aclanthology.org/2023.banglalp-1.14/ | @inproceedings{oshin-etal-2023-bateclacor,
title = "{B}a{TEC}la{C}or: A Novel Dataset for {B}angla Text Error Classification and Correction",
author = "Oshin, Nabilah and
Hoque, Syed and
Fahim, Md and
Ali, Amin Ahsan and
Amin, M Ashraful and
Rahman, Akmmahbubur",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.14",
doi = "10.18653/v1/2023.banglalp-1.14",
pages = "124--135",
abstract = "In the context of the dynamic realm of Bangla communication, online users are often prone to bending the language or making errors due to various factors. We attempt to detect, categorize, and correct those errors by employing several machine learning and deep learning models. To contribute to the preservation and authenticity of the Bangla language, we introduce a meticulously categorized organic dataset encompassing 10,000 authentic Bangla comments from a commonly used social media platform. Through rigorous comparative analysis of distinct models, our study highlights BanglaBERT{'}s superiority in error-category classification and underscores the effectiveness of BanglaT5 for text correction. BanglaBERT achieves accuracy of 79.1{\%} and 74.1{\%} for binary and multiclass error-category classification while the BanglaBERT is fine-tuned and tested with our proposed dataset. Moreover, BanglaT5 achieves the best Rouge-L score (0.8459) when BanglaT5 is fine-tuned and tested with our corrected ground truths. Beyond algorithmic exploration, this endeavor represents a significant stride in enhancing the quality of digital discourse in the Bangla-speaking community, fostering linguistic precision and coherence in online interactions. The dataset and code is available at https://github.com/SyedT1/BaTEClaCor.",
}
| In the context of the dynamic realm of Bangla communication, online users are often prone to bending the language or making errors due to various factors. We attempt to detect, categorize, and correct those errors by employing several machine learning and deep learning models. To contribute to the preservation and authenticity of the Bangla language, we introduce a meticulously categorized organic dataset encompassing 10,000 authentic Bangla comments from a commonly used social media platform. Through rigorous comparative analysis of distinct models, our study highlights BanglaBERT{'}s superiority in error-category classification and underscores the effectiveness of BanglaT5 for text correction. BanglaBERT achieves accuracy of 79.1{\%} and 74.1{\%} for binary and multiclass error-category classification while the BanglaBERT is fine-tuned and tested with our proposed dataset. Moreover, BanglaT5 achieves the best Rouge-L score (0.8459) when BanglaT5 is fine-tuned and tested with our corrected ground truths. Beyond algorithmic exploration, this endeavor represents a significant stride in enhancing the quality of digital discourse in the Bangla-speaking community, fostering linguistic precision and coherence in online interactions. The dataset and code is available at https://github.com/SyedT1/BaTEClaCor. | [
"Oshin, Nabilah",
"Hoque, Syed",
"Fahim, Md",
"Ali, Amin Ahsan",
"Amin, M Ashraful",
"Rahman, Akmmahbubur"
] | BaTEClaCor: A Novel Dataset for Bangla Text Error Classification and Correction | banglalp-1.14 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.15.bib | https://aclanthology.org/2023.banglalp-1.15/ | @inproceedings{li-etal-2023-crosslingual,
title = "Crosslingual Retrieval Augmented In-context Learning for {B}angla",
author = "Li, Xiaoqian and
Nie, Ercong and
Liang, Sheng",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.15",
doi = "10.18653/v1/2023.banglalp-1.15",
pages = "136--151",
abstract = "The promise of Large Language Models (LLMs) in Natural Language Processing has often been overshadowed by their limited performance in low-resource languages such as Bangla. To address this, our paper presents a pioneering approach that utilizes cross-lingual retrieval augmented in-context learning. By strategically sourcing semantically similar prompts from high-resource language, we enable multilingual pretrained language models (MPLMs), especially the generative model BLOOMZ, to successfully boost performance on Bangla tasks. Our extensive evaluation highlights that the cross-lingual retrieval augmented prompts bring steady improvements to MPLMs over the zero-shot performance.",
}
| The promise of Large Language Models (LLMs) in Natural Language Processing has often been overshadowed by their limited performance in low-resource languages such as Bangla. To address this, our paper presents a pioneering approach that utilizes cross-lingual retrieval augmented in-context learning. By strategically sourcing semantically similar prompts from high-resource language, we enable multilingual pretrained language models (MPLMs), especially the generative model BLOOMZ, to successfully boost performance on Bangla tasks. Our extensive evaluation highlights that the cross-lingual retrieval augmented prompts bring steady improvements to MPLMs over the zero-shot performance. | [
"Li, Xiaoqian",
"Nie, Ercong",
"Liang, Sheng"
] | Crosslingual Retrieval Augmented In-context Learning for Bangla | banglalp-1.15 | 2311.00587 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.banglalp-1.16.bib | https://aclanthology.org/2023.banglalp-1.16/ | @inproceedings{nandi-etal-2023-pseudo,
title = "Pseudo-Labeling for Domain-Agnostic {B}angla Automatic Speech Recognition",
author = "Nandi, Rabindra Nath and
Menon, Mehadi and
Muntasir, Tareq and
Sarker, Sagor and
Muhtaseem, Quazi Sarwar and
Islam, Md. Tariqul and
Chowdhury, Shammur and
Alam, Firoj",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.16",
doi = "10.18653/v1/2023.banglalp-1.16",
pages = "152--162",
abstract = "One of the major challenges for developing automatic speech recognition (ASR) for low-resource languages is the limited access to labeled data with domain-specific variations. In this study, we propose a pseudo-labeling approach to develop a large-scale domain-agnostic ASR dataset. With the proposed methodology, we developed a 20k+ hours labeled Bangla speech dataset covering diverse topics, speaking styles, dialects, noisy environments, and conversational scenarios. We then exploited the developed corpus to design a conformer-based ASR system. We benchmarked the trained ASR with publicly available datasets and compared it with other available models. To investigate the efficacy, we designed and developed a human-annotated domain-agnostic test set composed of news, telephony, and conversational data among others. Our results demonstrate the efficacy of the model trained on psuedo-label data for the designed test-set along with publicly-available Bangla datasets. The experimental resources will be publicly available.https://github.com/hishab-nlp/Pseudo-Labeling-for-Domain-Agnostic-Bangla-ASR",
}
| One of the major challenges for developing automatic speech recognition (ASR) for low-resource languages is the limited access to labeled data with domain-specific variations. In this study, we propose a pseudo-labeling approach to develop a large-scale domain-agnostic ASR dataset. With the proposed methodology, we developed a 20k+ hours labeled Bangla speech dataset covering diverse topics, speaking styles, dialects, noisy environments, and conversational scenarios. We then exploited the developed corpus to design a conformer-based ASR system. We benchmarked the trained ASR with publicly available datasets and compared it with other available models. To investigate the efficacy, we designed and developed a human-annotated domain-agnostic test set composed of news, telephony, and conversational data among others. Our results demonstrate the efficacy of the model trained on psuedo-label data for the designed test-set along with publicly-available Bangla datasets. The experimental resources will be publicly available.https://github.com/hishab-nlp/Pseudo-Labeling-for-Domain-Agnostic-Bangla-ASR | [
"N",
"i, Rabindra Nath",
"Menon, Mehadi",
"Muntasir, Tareq",
"Sarker, Sagor",
"Muhtaseem, Quazi Sarwar",
"Islam, Md. Tariqul",
"Chowdhury, Shammur",
"Alam, Firoj"
] | Pseudo-Labeling for Domain-Agnostic Bangla Automatic Speech Recognition | banglalp-1.16 | 2311.03196 | [
"https://github.com/hishab-nlp/pseudo-labeling-for-domain-agnostic-bangla-asr"
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.banglalp-1.17.bib | https://aclanthology.org/2023.banglalp-1.17/ | @inproceedings{saha-nanda-2023-banglanlp,
title = "{B}angla{NLP} at {BLP}-2023 Task 1: Benchmarking different Transformer Models for Violence Inciting Text Detection in {B}angla",
author = "Saha, Saumajit and
Nanda, Albert",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.17",
doi = "10.18653/v1/2023.banglalp-1.17",
pages = "163--167",
abstract = "This paper presents the system that we have developed while solving this shared task on violence inciting text detection in Bangla. We explain both the traditional and the recent approaches that we have used to make our models learn. Our proposed system helps to classify if the given text contains any threat. We studied the impact of data augmentation when there is a limited dataset available. Our quantitative results show that finetuning a multilingual-e5-base model performed the best in our task compared to other transformer-based architectures. We obtained a macro F1 of 68.11{\%} in the test set and our performance in this shared task is ranked at 23 in the leaderboard.",
}
| This paper presents the system that we have developed while solving this shared task on violence inciting text detection in Bangla. We explain both the traditional and the recent approaches that we have used to make our models learn. Our proposed system helps to classify if the given text contains any threat. We studied the impact of data augmentation when there is a limited dataset available. Our quantitative results show that finetuning a multilingual-e5-base model performed the best in our task compared to other transformer-based architectures. We obtained a macro F1 of 68.11{\%} in the test set and our performance in this shared task is ranked at 23 in the leaderboard. | [
"Saha, Saumajit",
"N",
"a, Albert"
] | BanglaNLP at BLP-2023 Task 1: Benchmarking different Transformer Models for Violence Inciting Text Detection in Bangla | banglalp-1.17 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.18.bib | https://aclanthology.org/2023.banglalp-1.18/ | @inproceedings{alamgir-haque-2023-team,
title = "Team {C}entre{B}ack at {BLP}-2023 Task 1: Analyzing performance of different machine-learning based methods for detecting violence-inciting texts in {B}angla",
author = "Alamgir, Refaat Mohammad and
Haque, Amira",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.18",
doi = "10.18653/v1/2023.banglalp-1.18",
pages = "168--173",
abstract = "Like all other things in the world, rapid growth of social media comes with its own merits and demerits. While it is providing a platform for the world to easily communicate with each other, on the other hand the room it has opened for hate speech has led to a significant impact on the well-being of the users. These types of texts have the potential to result in violence as people with similar sentiments may be inspired to commit violent acts after coming across such comments. Hence, the need for a system to detect and filter such texts is increasing drastically with time. This paper summarizes our experimental results and findings for the shared task on The First Bangla Language Processing Workshop at EMNLP 2023 - Singapore. We participated in the shared task 1 : Violence Inciting Text Detection (VITD). The objective was to build a system that classifies the given comments as either non-violence, passive violence or direct violence. We tried out different techniques, such as fine-tuning language models, few-shot learning with SBERT and a 2 stage training where we performed binary violence/non-violence classification first, then did a fine-grained classification of direct/passive violence. We found that the best macro-F1 score of 69.39 was yielded by fine-tuning the BanglaBERT language model and we attained a position of 21 among 27 teams in the final leaderboard. After the competition ended, we found that with some preprocessing of the dataset, we can get the score up to 71.68.",
}
| Like all other things in the world, rapid growth of social media comes with its own merits and demerits. While it is providing a platform for the world to easily communicate with each other, on the other hand the room it has opened for hate speech has led to a significant impact on the well-being of the users. These types of texts have the potential to result in violence as people with similar sentiments may be inspired to commit violent acts after coming across such comments. Hence, the need for a system to detect and filter such texts is increasing drastically with time. This paper summarizes our experimental results and findings for the shared task on The First Bangla Language Processing Workshop at EMNLP 2023 - Singapore. We participated in the shared task 1 : Violence Inciting Text Detection (VITD). The objective was to build a system that classifies the given comments as either non-violence, passive violence or direct violence. We tried out different techniques, such as fine-tuning language models, few-shot learning with SBERT and a 2 stage training where we performed binary violence/non-violence classification first, then did a fine-grained classification of direct/passive violence. We found that the best macro-F1 score of 69.39 was yielded by fine-tuning the BanglaBERT language model and we attained a position of 21 among 27 teams in the final leaderboard. After the competition ended, we found that with some preprocessing of the dataset, we can get the score up to 71.68. | [
"Alamgir, Refaat Mohammad",
"Haque, Amira"
] | Team CentreBack at BLP-2023 Task 1: Analyzing performance of different machine-learning based methods for detecting violence-inciting texts in Bangla | banglalp-1.18 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.19.bib | https://aclanthology.org/2023.banglalp-1.19/ | @inproceedings{das-etal-2023-emptymind,
title = "{E}mpty{M}ind at {BLP}-2023 Task 1: A Transformer-based Hierarchical-{BERT} Model for {B}angla Violence-Inciting Text Detection",
author = "Das, Udoy and
Fatema, Karnis and
Mia, Md Ayon and
Yahan, Mahshar and
Mowla, Md Sajidul and
Ullah, Md Fayez and
Sarker, Arpita and
Murad, Hasan",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.19",
doi = "10.18653/v1/2023.banglalp-1.19",
pages = "174--178",
abstract = "The availability of the internet has made it easier for people to share information via social media. People with ill intent can use this widespread availability of the internet to share violent content easily. A significant portion of social media users prefer using their regional language which makes it quite difficult to detect violence-inciting text. The objective of our research work is to detect Bangla violence-inciting text from social media content. A shared task on Bangla violence-inciting text detection has been organized by the First Bangla Language Processing Workshop (BLP) co-located with EMNLP, where the organizer has provided a dataset named VITD with three categories: nonviolence, passive violence, and direct violence text. To accomplish this task, we have implemented three machine learning models (RF, SVM, XGBoost), two deep learning models (LSTM, BiLSTM), and two transformer-based models (BanglaBERT, Hierarchical-BERT). We have conducted a comparative study among different models by training and evaluating each model on the VITD dataset. We have found that Hierarchical-BERT has provided the best result with an F1 score of 0.73797 on the test set and ranked 9th position among all participants in the shared task 1 of the BLP Workshop co-located with EMNLP 2023.",
}
| The availability of the internet has made it easier for people to share information via social media. People with ill intent can use this widespread availability of the internet to share violent content easily. A significant portion of social media users prefer using their regional language which makes it quite difficult to detect violence-inciting text. The objective of our research work is to detect Bangla violence-inciting text from social media content. A shared task on Bangla violence-inciting text detection has been organized by the First Bangla Language Processing Workshop (BLP) co-located with EMNLP, where the organizer has provided a dataset named VITD with three categories: nonviolence, passive violence, and direct violence text. To accomplish this task, we have implemented three machine learning models (RF, SVM, XGBoost), two deep learning models (LSTM, BiLSTM), and two transformer-based models (BanglaBERT, Hierarchical-BERT). We have conducted a comparative study among different models by training and evaluating each model on the VITD dataset. We have found that Hierarchical-BERT has provided the best result with an F1 score of 0.73797 on the test set and ranked 9th position among all participants in the shared task 1 of the BLP Workshop co-located with EMNLP 2023. | [
"Das, Udoy",
"Fatema, Karnis",
"Mia, Md Ayon",
"Yahan, Mahshar",
"Mowla, Md Sajidul",
"Ullah, Md Fayez",
"Sarker, Arpita",
"Murad, Hasan"
] | EmptyMind at BLP-2023 Task 1: A Transformer-based Hierarchical-BERT Model for Bangla Violence-Inciting Text Detection | banglalp-1.19 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.20.bib | https://aclanthology.org/2023.banglalp-1.20/ | @inproceedings{raihan-etal-2023-nlpbdpatriots,
title = "nlp{BD}patriots at {BLP}-2023 Task 1: Two-Step Classification for Violence Inciting Text Detection in {B}angla - Leveraging Back-Translation and Multilinguality",
author = "Raihan, Md Nishat and
Goswami, Dhiman and
Puspo, Sadiya Sayara Chowdhury and
Zampieri, Marcos",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.20",
doi = "10.18653/v1/2023.banglalp-1.20",
pages = "179--184",
abstract = "In this paper, we discuss the nlpBDpatriots entry to the shared task on Violence Inciting Text Detection (VITD) organized as part of the first workshop on Bangla Language Processing (BLP) co-located with EMNLP. The aim of this task is to identify and classify the violent threats, that provoke further unlawful violent acts. Our best-performing approach for the task is two-step classification using back translation and multilinguality which ranked $6^{th}$ out of 27 teams with a macro F1 score of 0.74.",
}
| In this paper, we discuss the nlpBDpatriots entry to the shared task on Violence Inciting Text Detection (VITD) organized as part of the first workshop on Bangla Language Processing (BLP) co-located with EMNLP. The aim of this task is to identify and classify the violent threats, that provoke further unlawful violent acts. Our best-performing approach for the task is two-step classification using back translation and multilinguality which ranked $6^{th}$ out of 27 teams with a macro F1 score of 0.74. | [
"Raihan, Md Nishat",
"Goswami, Dhiman",
"Puspo, Sadiya Sayara Chowdhury",
"Zampieri, Marcos"
] | nlpBDpatriots at BLP-2023 Task 1: Two-Step Classification for Violence Inciting Text Detection in Bangla - Leveraging Back-Translation and Multilinguality | banglalp-1.20 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.21.bib | https://aclanthology.org/2023.banglalp-1.21/ | @inproceedings{ahmed-etal-2023-score,
title = "{S}core{\_}{I}s{A}ll{\_}{Y}ou{\_}{N}eed at {BLP}-2023 Task 1: A Hierarchical Classification Approach to Detect Violence Inciting Text using Transformers",
author = "Ahmed, Kawsar and
Osama, Md and
Islam, Md. Sirajul and
Islam, Md Taosiful and
Das, Avishek and
Hoque, Mohammed Moshiul",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.21",
doi = "10.18653/v1/2023.banglalp-1.21",
pages = "185--189",
abstract = "Violence-inciting text detection has become critical due to its significance in social media monitoring, online security, and the prevention of violent content. Developing an automatic text classification model for identifying violence in languages with limited resources, like Bangla, poses significant challenges due to the scarcity of resources and complex morphological structures. This work presents a transformer-based method that can classify Bangla texts into three violence classes: direct, passive, and non-violence. We leveraged transformer models, including BanglaBERT, XLM-R, and m-BERT, to develop a hierarchical classification model for the downstream task. In the first step, the BanglaBERT is employed to identify the presence of violence in the text. In the next step, the model classifies stem texts that incite violence as either direct or passive. The developed system scored 72.37 and ranked 14th among the participants.",
}
| Violence-inciting text detection has become critical due to its significance in social media monitoring, online security, and the prevention of violent content. Developing an automatic text classification model for identifying violence in languages with limited resources, like Bangla, poses significant challenges due to the scarcity of resources and complex morphological structures. This work presents a transformer-based method that can classify Bangla texts into three violence classes: direct, passive, and non-violence. We leveraged transformer models, including BanglaBERT, XLM-R, and m-BERT, to develop a hierarchical classification model for the downstream task. In the first step, the BanglaBERT is employed to identify the presence of violence in the text. In the next step, the model classifies stem texts that incite violence as either direct or passive. The developed system scored 72.37 and ranked 14th among the participants. | [
"Ahmed, Kawsar",
"Osama, Md",
"Islam, Md. Sirajul",
"Islam, Md Taosiful",
"Das, Avishek",
"Hoque, Mohammed Moshiul"
] | Score_IsAll_You_Need at BLP-2023 Task 1: A Hierarchical Classification Approach to Detect Violence Inciting Text using Transformers | banglalp-1.21 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.22.bib | https://aclanthology.org/2023.banglalp-1.22/ | @inproceedings{page-etal-2023-mavericks,
title = "Mavericks at {BLP}-2023 Task 1: Ensemble-based Approach Using Language Models for Violence Inciting Text Detection",
author = "Page, Saurabh and
Mangalvedhekar, Sudeep and
Deshpande, Kshitij and
Chavan, Tanmay and
Sonawane, Sheetal",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.22",
doi = "10.18653/v1/2023.banglalp-1.22",
pages = "190--195",
abstract = "This paper presents our work for the Violence Inciting Text Detection shared task in the First Workshop on Bangla Language Processing. Social media has accelerated the propagation of hate and violence-inciting speech in society. It is essential to develop efficient mechanisms to detect and curb the propagation of such texts. The problem of detecting violence-inciting texts is further exacerbated in low-resource settings due to sparse research and less data. The data provided in the shared task consists of texts in the Bangla language, where each example is classified into one of the three categories defined based on the types of violence-inciting texts. We try and evaluate several BERT-based models, and then use an ensemble of the models as our final submission. Our submission is ranked 10th in the final leaderboard of the shared task with a macro F1 score of 0.737.",
}
| This paper presents our work for the Violence Inciting Text Detection shared task in the First Workshop on Bangla Language Processing. Social media has accelerated the propagation of hate and violence-inciting speech in society. It is essential to develop efficient mechanisms to detect and curb the propagation of such texts. The problem of detecting violence-inciting texts is further exacerbated in low-resource settings due to sparse research and less data. The data provided in the shared task consists of texts in the Bangla language, where each example is classified into one of the three categories defined based on the types of violence-inciting texts. We try and evaluate several BERT-based models, and then use an ensemble of the models as our final submission. Our submission is ranked 10th in the final leaderboard of the shared task with a macro F1 score of 0.737. | [
"Page, Saurabh",
"Mangalvedhekar, Sudeep",
"Deshp",
"e, Kshitij",
"Chavan, Tanmay",
"Sonawane, Sheetal"
] | Mavericks at BLP-2023 Task 1: Ensemble-based Approach Using Language Models for Violence Inciting Text Detection | banglalp-1.22 | 2311.18778 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.banglalp-1.23.bib | https://aclanthology.org/2023.banglalp-1.23/ | @inproceedings{chatterjee-etal-2023-vaclm,
title = "{V}ac{LM} at {BLP}-2023 Task 1: Leveraging {BERT} models for Violence detection in {B}angla",
author = "Chatterjee, Shilpa and
Evenss, P J Leo and
Bhattacharyya, Pramit",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.23",
doi = "10.18653/v1/2023.banglalp-1.23",
pages = "196--200",
abstract = "This study introduces the system submitted to the BLP Shared Task 1: Violence Inciting Text Detection (VITD) by the VacLM team. In this work, we analyzed the impact of various transformer-based models for detecting violence in texts. BanglaBERT outperforms all the other competing models. We also observed that the transformer-based models are not adept at classifying Passive Violence and Direct Violence class but can better detect violence in texts, which was the task{'}s primary objective. On the shared task, we secured a rank of 12 with macro F1-score of 72.656{\%}.",
}
| This study introduces the system submitted to the BLP Shared Task 1: Violence Inciting Text Detection (VITD) by the VacLM team. In this work, we analyzed the impact of various transformer-based models for detecting violence in texts. BanglaBERT outperforms all the other competing models. We also observed that the transformer-based models are not adept at classifying Passive Violence and Direct Violence class but can better detect violence in texts, which was the task{'}s primary objective. On the shared task, we secured a rank of 12 with macro F1-score of 72.656{\%}. | [
"Chatterjee, Shilpa",
"Evenss, P J Leo",
"Bhattacharyya, Pramit"
] | VacLM at BLP-2023 Task 1: Leveraging BERT models for Violence detection in Bangla | banglalp-1.23 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.24.bib | https://aclanthology.org/2023.banglalp-1.24/ | @inproceedings{fahim-2023-aambela,
title = "Aambela at {BLP}-2023 Task 1: Focus on {UNK} tokens: Analyzing Violence Inciting {B}angla Text with Adding Dataset Specific New Word Tokens",
author = "Fahim, Md",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.24",
doi = "10.18653/v1/2023.banglalp-1.24",
pages = "201--207",
abstract = "The BLP-2023 Task 1 aims to develop a Natural Language Inference system tailored for detecting and analyzing threats from Bangla YouTube comments. Bangla language models like BanglaBERT have demonstrated remarkable performance in various Bangla natural language processing tasks across different domains. We utilized BanglaBERT for the violence detection task, employing three different classification heads. As BanglaBERT{'}s vocabulary lacks certain crucial words, our model incorporates some of them as new special tokens, based on their frequency in the dataset, and their embeddings are learned during training. The model achieved the 2nd position on the leaderboard, boasting an impressive macro-F1 Score of 76.04{\%} on the official test set. With the addition of new tokens, we achieved a 76.90{\%} macro-F1 score, surpassing the top score (76.044{\%}) on the test set.",
}
| The BLP-2023 Task 1 aims to develop a Natural Language Inference system tailored for detecting and analyzing threats from Bangla YouTube comments. Bangla language models like BanglaBERT have demonstrated remarkable performance in various Bangla natural language processing tasks across different domains. We utilized BanglaBERT for the violence detection task, employing three different classification heads. As BanglaBERT{'}s vocabulary lacks certain crucial words, our model incorporates some of them as new special tokens, based on their frequency in the dataset, and their embeddings are learned during training. The model achieved the 2nd position on the leaderboard, boasting an impressive macro-F1 Score of 76.04{\%} on the official test set. With the addition of new tokens, we achieved a 76.90{\%} macro-F1 score, surpassing the top score (76.044{\%}) on the test set. | [
"Fahim, Md"
] | Aambela at BLP-2023 Task 1: Focus on UNK tokens: Analyzing Violence Inciting Bangla Text with Adding Dataset Specific New Word Tokens | banglalp-1.24 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.25.bib | https://aclanthology.org/2023.banglalp-1.25/ | @inproceedings{shibu-etal-2023-sust,
title = "{SUST}{\_}{B}lack Box at {BLP}-2023 Task 1: Detecting Communal Violence in Texts: An Exploration of {MLM} and Weighted Ensemble Techniques",
author = "Shibu, Hrithik and
Datta, Shrestha and
Rahman, Zhalok and
Sami, Shahrab and
Miah, Md. Sumon and
Fairooz, Raisa and
Mollah, Md",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.25",
doi = "10.18653/v1/2023.banglalp-1.25",
pages = "208--213",
abstract = "In this study, we address the shared task of classifying violence-inciting texts from YouTube comments related to violent incidents in the Bengal region. We seamlessly integrated domain adaptation techniques by meticulously fine-tuning pre-existing Masked Language Models on a diverse array of informal texts. We employed a multifaceted approach, leveraging Transfer Learning, Stacking, and Ensemble techniques to enhance our model{'}s performance. Our integrated system, amalgamating the refined BanglaBERT model through MLM and our Weighted Ensemble approach, showcased superior efficacy, achieving macro F1 scores of 71{\%} and 72{\%}, respectively, while the MLM approach secured the 18th position among participants. This underscores the robustness and precision of our proposed paradigm in the nuanced detection and categorization of violent narratives within digital realms.",
}
| In this study, we address the shared task of classifying violence-inciting texts from YouTube comments related to violent incidents in the Bengal region. We seamlessly integrated domain adaptation techniques by meticulously fine-tuning pre-existing Masked Language Models on a diverse array of informal texts. We employed a multifaceted approach, leveraging Transfer Learning, Stacking, and Ensemble techniques to enhance our model{'}s performance. Our integrated system, amalgamating the refined BanglaBERT model through MLM and our Weighted Ensemble approach, showcased superior efficacy, achieving macro F1 scores of 71{\%} and 72{\%}, respectively, while the MLM approach secured the 18th position among participants. This underscores the robustness and precision of our proposed paradigm in the nuanced detection and categorization of violent narratives within digital realms. | [
"Shibu, Hrithik",
"Datta, Shrestha",
"Rahman, Zhalok",
"Sami, Shahrab",
"Miah, Md. Sumon",
"Fairooz, Raisa",
"Mollah, Md"
] | SUST_Black Box at BLP-2023 Task 1: Detecting Communal Violence in Texts: An Exploration of MLM and Weighted Ensemble Techniques | banglalp-1.25 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.26.bib | https://aclanthology.org/2023.banglalp-1.26/ | @inproceedings{tariquzzaman-etal-2023-linguists,
title = "the{\_}linguists at {BLP}-2023 Task 1: A Novel Informal {B}angla {F}asttext Embedding for Violence Inciting Text Detection",
author = "Tariquzzaman, Md. and
Kader, Md Wasif and
Anam, Audwit and
Haque, Naimul and
Kabir, Mohsinul and
Mahmud, Hasan and
Hasan, Md Kamrul",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.26",
doi = "10.18653/v1/2023.banglalp-1.26",
pages = "214--219",
abstract = "This paper introduces a novel informal Bangla word embedding for designing a cost-efficient solution for the task {``}Violence Inciting Text Detection{''} which focuses on developing classification systems to categorize violence that can potentially incite further violent actions. We propose a semi-supervised learning approach by training an informal Bangla FastText embedding, which is further fine-tuned on lightweight models on task specific dataset and yielded competitive results to our initial method using BanglaBERT, which secured the 7th position with an f1-score of 73.98{\%}. We conduct extensive experiments to assess the efficiency of the proposed embedding and how well it generalizes in terms of violence classification, along with it{'}s coverage on the task{'}s dataset. Our proposed Bangla IFT embedding achieved a competitive macro average F1 score of 70.45{\%}. Additionally, we provide a detailed analysis of our findings, delving into potential causes of misclassification in the detection of violence-inciting text.",
}
| This paper introduces a novel informal Bangla word embedding for designing a cost-efficient solution for the task {``}Violence Inciting Text Detection{''} which focuses on developing classification systems to categorize violence that can potentially incite further violent actions. We propose a semi-supervised learning approach by training an informal Bangla FastText embedding, which is further fine-tuned on lightweight models on task specific dataset and yielded competitive results to our initial method using BanglaBERT, which secured the 7th position with an f1-score of 73.98{\%}. We conduct extensive experiments to assess the efficiency of the proposed embedding and how well it generalizes in terms of violence classification, along with it{'}s coverage on the task{'}s dataset. Our proposed Bangla IFT embedding achieved a competitive macro average F1 score of 70.45{\%}. Additionally, we provide a detailed analysis of our findings, delving into potential causes of misclassification in the detection of violence-inciting text. | [
"Tariquzzaman, Md.",
"Kader, Md Wasif",
"Anam, Audwit",
"Haque, Naimul",
"Kabir, Mohsinul",
"Mahmud, Hasan",
"Hasan, Md Kamrul"
] | the_linguists at BLP-2023 Task 1: A Novel Informal Bangla Fasttext Embedding for Violence Inciting Text Detection | banglalp-1.26 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.27.bib | https://aclanthology.org/2023.banglalp-1.27/ | @inproceedings{mukherjee-etal-2023-ufal,
title = "{UFAL}-{ULD} at {BLP}-2023 Task 1: Violence Detection in {B}angla Text",
author = "Mukherjee, Sourabrata and
Ojha, Atul Kr. and
Du{\v{s}}ek, Ond{\v{r}}ej",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.27",
doi = "10.18653/v1/2023.banglalp-1.27",
pages = "220--224",
abstract = "In this paper, we present UFAL-ULD team{'}s system, desinged as a part of the BLP Shared Task 1: Violence Inciting Text Detection (VITD). This task aims to classify text, with a particular challenge of identifying incitement to violence into Direct, Indirect or Non-violence levels. We experimented with several pre-trained sequence classification models, including XLM-RoBERTa, BanglaBERT, Bangla BERT Base, and Multilingual BERT. Our best-performing model was based on the XLM-RoBERTa-base architecture, which outperformed the baseline models. Our system was ranked 20th among the 27 teams that participated in the task.",
}
| In this paper, we present UFAL-ULD team{'}s system, desinged as a part of the BLP Shared Task 1: Violence Inciting Text Detection (VITD). This task aims to classify text, with a particular challenge of identifying incitement to violence into Direct, Indirect or Non-violence levels. We experimented with several pre-trained sequence classification models, including XLM-RoBERTa, BanglaBERT, Bangla BERT Base, and Multilingual BERT. Our best-performing model was based on the XLM-RoBERTa-base architecture, which outperformed the baseline models. Our system was ranked 20th among the 27 teams that participated in the task. | [
"Mukherjee, Sourabrata",
"Ojha, Atul Kr.",
"Du{\\v{s}}ek, Ond{\\v{r}}ej"
] | UFAL-ULD at BLP-2023 Task 1: Violence Detection in Bangla Text | banglalp-1.27 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.28.bib | https://aclanthology.org/2023.banglalp-1.28/ | @inproceedings{dey-etal-2023-semantics,
title = "Semantics Squad at {BLP}-2023 Task 1: Violence Inciting {B}angla Text Detection with Fine-Tuned Transformer-Based Models",
author = "Dey, Krishno and
Tarannum, Prerona and
Hasan, Md. Arid and
Palma, Francis",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.28",
doi = "10.18653/v1/2023.banglalp-1.28",
pages = "225--229",
abstract = "This study investigates the application of Transformer-based models for violence threat identification. We participated in the BLP-2023 Shared Task 1 and in our initial submission, BanglaBERT large achieved 5th position on the leader-board with a macro F1 score of 0.7441, approaching the highest baseline of 0.7879 established for this task. In contrast, the top-performing system on the leaderboard achieved an F1 score of 0.7604. Subsequent experiments involving m-BERT, XLM-RoBERTa base, XLM-RoBERTa large, BanglishBERT, BanglaBERT, and BanglaBERT large models revealed that BanglaBERT achieved an F1 score of 0.7441, which closely approximated the baseline. Remarkably, m-BERT and XLM-RoBERTa base also approximated the baseline with macro F1 scores of 0.6584 and 0.6968, respectively. A notable finding from our study is the under-performance by larger models for the shared task dataset, which requires further investigation. Our findings underscore the potential of transformer-based models in identifying violence threats, offering valuable insights to enhance safety measures on online platforms.",
}
| This study investigates the application of Transformer-based models for violence threat identification. We participated in the BLP-2023 Shared Task 1 and in our initial submission, BanglaBERT large achieved 5th position on the leader-board with a macro F1 score of 0.7441, approaching the highest baseline of 0.7879 established for this task. In contrast, the top-performing system on the leaderboard achieved an F1 score of 0.7604. Subsequent experiments involving m-BERT, XLM-RoBERTa base, XLM-RoBERTa large, BanglishBERT, BanglaBERT, and BanglaBERT large models revealed that BanglaBERT achieved an F1 score of 0.7441, which closely approximated the baseline. Remarkably, m-BERT and XLM-RoBERTa base also approximated the baseline with macro F1 scores of 0.6584 and 0.6968, respectively. A notable finding from our study is the under-performance by larger models for the shared task dataset, which requires further investigation. Our findings underscore the potential of transformer-based models in identifying violence threats, offering valuable insights to enhance safety measures on online platforms. | [
"Dey, Krishno",
"Tarannum, Prerona",
"Hasan, Md. Arid",
"Palma, Francis"
] | Semantics Squad at BLP-2023 Task 1: Violence Inciting Bangla Text Detection with Fine-Tuned Transformer-Based Models | banglalp-1.28 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.29.bib | https://aclanthology.org/2023.banglalp-1.29/ | @inproceedings{veeramani-etal-2023-lowresourcenlu,
title = "{L}ow{R}esource{NLU} at {BLP}-2023 Task 1 {\&} 2: Enhancing Sentiment Classification and Violence Incitement Detection in {B}angla Through Aggregated Language Models",
author = "Veeramani, Hariram and
Thapa, Surendrabikram and
Naseem, Usman",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.29",
doi = "10.18653/v1/2023.banglalp-1.29",
pages = "230--235",
abstract = "Violence incitement detection and sentiment analysis hold significant importance in the field of natural language processing. However, in the case of the Bangla language, there are unique challenges due to its low-resource nature. In this paper, we address these challenges by presenting an innovative approach that leverages aggregated BERT models for two tasks at the BLP workshop in EMNLP 2023, specifically tailored for Bangla. Task 1 focuses on violence-inciting text detection, while task 2 centers on sentiment analysis. Our approach combines fine-tuning with textual entailment (utilizing BanglaBERT), Masked Language Model (MLM) training (making use of BanglaBERT), and the use of standalone Multilingual BERT. This comprehensive framework significantly enhances the accuracy of sentiment classification and violence incitement detection in Bangla text. Our method achieved the 11th rank in task 1 with an F1-score of 73.47 and the 4th rank in task 2 with an F1-score of 71.73. This paper provides a detailed system description along with an analysis of the impact of each component of our framework.",
}
| Violence incitement detection and sentiment analysis hold significant importance in the field of natural language processing. However, in the case of the Bangla language, there are unique challenges due to its low-resource nature. In this paper, we address these challenges by presenting an innovative approach that leverages aggregated BERT models for two tasks at the BLP workshop in EMNLP 2023, specifically tailored for Bangla. Task 1 focuses on violence-inciting text detection, while task 2 centers on sentiment analysis. Our approach combines fine-tuning with textual entailment (utilizing BanglaBERT), Masked Language Model (MLM) training (making use of BanglaBERT), and the use of standalone Multilingual BERT. This comprehensive framework significantly enhances the accuracy of sentiment classification and violence incitement detection in Bangla text. Our method achieved the 11th rank in task 1 with an F1-score of 73.47 and the 4th rank in task 2 with an F1-score of 71.73. This paper provides a detailed system description along with an analysis of the impact of each component of our framework. | [
"Veeramani, Hariram",
"Thapa, Surendrabikram",
"Naseem, Usman"
] | LowResourceNLU at BLP-2023 Task 1 & 2: Enhancing Sentiment Classification and Violence Incitement Detection in Bangla Through Aggregated Language Models | banglalp-1.29 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.30.bib | https://aclanthology.org/2023.banglalp-1.30/ | @inproceedings{das-etal-2023-team,
title = "Team Error Point at {BLP}-2023 Task 1: A Comprehensive Approach for Violence Inciting Text Detection using Deep Learning and Traditional Machine Learning Algorithm",
author = "Das, Rajesh and
Maowa, Jannatul and
Ajmain, Moshfiqur and
Yeiad, Kabid and
Islam, Mirajul and
Khushbu, Sharun",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.30",
doi = "10.18653/v1/2023.banglalp-1.30",
pages = "236--240",
abstract = "In the modern digital landscape, social media platforms have the dual role of fostering unprecedented connectivity and harboring a dark underbelly in the form of widespread violence-inciting content. Pioneering research in Bengali social media aims to provide a groundbreaking solution to this issue. This study thoroughly investigates violence-inciting text classification using a diverse range of machine learning and deep learning models, offering insights into content moderation and strategies for enhancing online safety. Situated at the intersection of technology and social responsibility, the aim is to empower platforms and communities to combat online violence. By providing insights into model selection and methodology, this work makes a significant contribution to the ongoing dialogue about the challenges posed by the darker aspects of the digital era. Our system scored 31.913 and ranked 26 among the participants.",
}
| In the modern digital landscape, social media platforms have the dual role of fostering unprecedented connectivity and harboring a dark underbelly in the form of widespread violence-inciting content. Pioneering research in Bengali social media aims to provide a groundbreaking solution to this issue. This study thoroughly investigates violence-inciting text classification using a diverse range of machine learning and deep learning models, offering insights into content moderation and strategies for enhancing online safety. Situated at the intersection of technology and social responsibility, the aim is to empower platforms and communities to combat online violence. By providing insights into model selection and methodology, this work makes a significant contribution to the ongoing dialogue about the challenges posed by the darker aspects of the digital era. Our system scored 31.913 and ranked 26 among the participants. | [
"Das, Rajesh",
"Maowa, Jannatul",
"Ajmain, Moshfiqur",
"Yeiad, Kabid",
"Islam, Mirajul",
"Khushbu, Sharun"
] | Team Error Point at BLP-2023 Task 1: A Comprehensive Approach for Violence Inciting Text Detection using Deep Learning and Traditional Machine Learning Algorithm | banglalp-1.30 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.31.bib | https://aclanthology.org/2023.banglalp-1.31/ | @inproceedings{hossain-etal-2023-nlp,
title = "{NLP}{\_}{CUET} at {BLP}-2023 Task 1: Fine-grained Categorization of Violence Inciting Text using Transformer-based Approach",
author = "Hossain, Jawad and
Ali Taher, Hasan Mesbaul and
Das, Avishek and
Hoque, Mohammed Moshiul",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.31",
doi = "10.18653/v1/2023.banglalp-1.31",
pages = "241--246",
abstract = "The amount of online textual content has increased significantly in recent years through social media posts, online chatting, web portals, and other digital platforms due to the significant increase in internet users and their unprompted access via digital devices. Unfortunately, the misappropriation of textual communication via the Internet has led to violence-inciting texts. Despite the availability of various forms of violence-inciting materials, text-based content is often used to carry out violent acts. Thus, developing a system to detect violence-inciting text has become vital. However, creating such a system in a low-resourced language like Bangla becomes challenging. Therefore, a shared task has been arranged to detect violence-inciting text in Bangla. This paper presents a hybrid approach (GAN+Bangla-ELECTRA) to classify violence-inciting text in Bangla into three classes: \textit{direct}, \textit{passive}, and \textit{non-violence}. We investigated a variety of deep learning (CNN, BiLSTM, BiLSTM+Attention), machine learning (LR, DT, MNB, SVM, RF, SGD), transformers (BERT, ELECTRA), and GAN-based models to detect violence inciting text in Bangla. Evaluation results demonstrate that the GAN+Bangla-ELECTRA model gained the highest macro $f_1$-score (74.59), which obtained us a rank of 3rd position at the BLP-2023 Task 1.",
}
| The amount of online textual content has increased significantly in recent years through social media posts, online chatting, web portals, and other digital platforms due to the significant increase in internet users and their unprompted access via digital devices. Unfortunately, the misappropriation of textual communication via the Internet has led to violence-inciting texts. Despite the availability of various forms of violence-inciting materials, text-based content is often used to carry out violent acts. Thus, developing a system to detect violence-inciting text has become vital. However, creating such a system in a low-resourced language like Bangla becomes challenging. Therefore, a shared task has been arranged to detect violence-inciting text in Bangla. This paper presents a hybrid approach (GAN+Bangla-ELECTRA) to classify violence-inciting text in Bangla into three classes: \textit{direct}, \textit{passive}, and \textit{non-violence}. We investigated a variety of deep learning (CNN, BiLSTM, BiLSTM+Attention), machine learning (LR, DT, MNB, SVM, RF, SGD), transformers (BERT, ELECTRA), and GAN-based models to detect violence inciting text in Bangla. Evaluation results demonstrate that the GAN+Bangla-ELECTRA model gained the highest macro $f_1$-score (74.59), which obtained us a rank of 3rd position at the BLP-2023 Task 1. | [
"Hossain, Jawad",
"Ali Taher, Hasan Mesbaul",
"Das, Avishek",
"Hoque, Mohammed Moshiul"
] | NLP_CUET at BLP-2023 Task 1: Fine-grained Categorization of Violence Inciting Text using Transformer-based Approach | banglalp-1.31 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.32.bib | https://aclanthology.org/2023.banglalp-1.32/ | @inproceedings{riyad-etal-2023-team,
title = "{T}eam{\_}{S}yrax at {BLP}-2023 Task 1: Data Augmentation and Ensemble Based Approach for Violence Inciting Text Detection in {B}angla",
author = "Riyad, Omar Faruqe and
Chakraborty, Trina and
Dey, Abhishek",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.32",
doi = "10.18653/v1/2023.banglalp-1.32",
pages = "247--254",
abstract = "This paper describes our participation in Task1 (VITD) of BLP Workshop 1 at EMNLP 2023,focused on the detection and categorizationof threats linked to violence, which could po-tentially encourage more violent actions. Ourapproach involves fine-tuning of pre-trainedtransformer models and employing techniqueslike self-training with external data, data aug-mentation through back-translation, and en-semble learning (bagging and majority voting).Notably, self-training improves performancewhen applied to data from external source butnot when applied to the test-set. Our anal-ysis highlights the effectiveness of ensemblemethods and data augmentation techniques inBangla Text Classification. Our system ini-tially scored 0.70450 and ranked 19th amongthe participants but post-competition experi-ments boosted our score to 0.72740.",
}
| This paper describes our participation in Task1 (VITD) of BLP Workshop 1 at EMNLP 2023,focused on the detection and categorizationof threats linked to violence, which could po-tentially encourage more violent actions. Ourapproach involves fine-tuning of pre-trainedtransformer models and employing techniqueslike self-training with external data, data aug-mentation through back-translation, and en-semble learning (bagging and majority voting).Notably, self-training improves performancewhen applied to data from external source butnot when applied to the test-set. Our anal-ysis highlights the effectiveness of ensemblemethods and data augmentation techniques inBangla Text Classification. Our system ini-tially scored 0.70450 and ranked 19th amongthe participants but post-competition experi-ments boosted our score to 0.72740. | [
"Riyad, Omar Faruqe",
"Chakraborty, Trina",
"Dey, Abhishek"
] | Team_Syrax at BLP-2023 Task 1: Data Augmentation and Ensemble Based Approach for Violence Inciting Text Detection in Bangla | banglalp-1.32 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.33.bib | https://aclanthology.org/2023.banglalp-1.33/ | @inproceedings{saha-etal-2023-blp,
title = "{BLP}-2023 Task 1: Violence Inciting Text Detection ({VITD})",
author = "Saha, Sourav and
Junaed, Jahedul Alam and
Saleki, Maryam and
Rahouti, Mohamed and
Mohammed, Nabeel and
Amin, Mohammad Ruhul",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.33",
doi = "10.18653/v1/2023.banglalp-1.33",
pages = "255--265",
abstract = "We present the comprehensive technical description of the outcome of the BLP shared task on Violence Inciting Text Detection (VITD).In recent years, social media has become a tool for groups of various religions and backgrounds to spread hatred, leading to physicalviolence with devastating consequences. To address this challenge, the VITD shared task was initiated, aiming to classify the level of violence incitement in various texts. The competition garnered significant interest with a total of 27 teams consisting of 88 participants successfully submitting their systems to the CodaLab leaderboard. During the post-workshop phase, we received 16 system papers on VITD from those participants. In this paper, we intend to discuss the VITD baseline performance, error analysis of the submitted models, and provide a comprehensive summary of the computational techniques applied by the participating teams",
}
| We present the comprehensive technical description of the outcome of the BLP shared task on Violence Inciting Text Detection (VITD).In recent years, social media has become a tool for groups of various religions and backgrounds to spread hatred, leading to physicalviolence with devastating consequences. To address this challenge, the VITD shared task was initiated, aiming to classify the level of violence incitement in various texts. The competition garnered significant interest with a total of 27 teams consisting of 88 participants successfully submitting their systems to the CodaLab leaderboard. During the post-workshop phase, we received 16 system papers on VITD from those participants. In this paper, we intend to discuss the VITD baseline performance, error analysis of the submitted models, and provide a comprehensive summary of the computational techniques applied by the participating teams | [
"Saha, Sourav",
"Junaed, Jahedul Alam",
"Saleki, Maryam",
"Rahouti, Mohamed",
"Mohammed, Nabeel",
"Amin, Mohammad Ruhul"
] | BLP-2023 Task 1: Violence Inciting Text Detection (VITD) | banglalp-1.33 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.34.bib | https://aclanthology.org/2023.banglalp-1.34/ | @inproceedings{saha-nanda-2023-banglanlp-blp,
title = "{B}angla{NLP} at {BLP}-2023 Task 2: Benchmarking different Transformer Models for Sentiment Analysis of {B}angla Social Media Posts",
author = "Saha, Saumajit and
Nanda, Albert",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.34",
doi = "10.18653/v1/2023.banglalp-1.34",
pages = "266--272",
abstract = "Bangla is the 7th most widely spoken language globally, with a staggering 234 million native speakers primarily hailing from India and Bangladesh. This morphologically rich language boasts a rich literary tradition, encompassing diverse dialects and language-specific challenges. Despite its linguistic richness and history, Bangla remains categorized as a low-resource language within the natural language processing (NLP) and speech community. This paper presents our submission to Task 2 (Sentiment Analysis of Bangla Social Media Posts) of the BLP Workshop. We experimented with various Transformer-based architectures to solve this task. Our quantitative results show that transfer learning really helps in better learning of the models in this low-resource language scenario. This becomes evident when we further finetuned a model that had already been finetuned on Twitter data for sentiment analysis task and that finetuned model performed the best among all other models. We also performed a detailed error analysis where we found some instances where ground truth labels need to be looked at. We obtained a micro-F1 of 67.02{\%} on the test set and our performance in this shared task is ranked at 21 in the leaderboard.",
}
| Bangla is the 7th most widely spoken language globally, with a staggering 234 million native speakers primarily hailing from India and Bangladesh. This morphologically rich language boasts a rich literary tradition, encompassing diverse dialects and language-specific challenges. Despite its linguistic richness and history, Bangla remains categorized as a low-resource language within the natural language processing (NLP) and speech community. This paper presents our submission to Task 2 (Sentiment Analysis of Bangla Social Media Posts) of the BLP Workshop. We experimented with various Transformer-based architectures to solve this task. Our quantitative results show that transfer learning really helps in better learning of the models in this low-resource language scenario. This becomes evident when we further finetuned a model that had already been finetuned on Twitter data for sentiment analysis task and that finetuned model performed the best among all other models. We also performed a detailed error analysis where we found some instances where ground truth labels need to be looked at. We obtained a micro-F1 of 67.02{\%} on the test set and our performance in this shared task is ranked at 21 in the leaderboard. | [
"Saha, Saumajit",
"N",
"a, Albert"
] | BanglaNLP at BLP-2023 Task 2: Benchmarking different Transformer Models for Sentiment Analysis of Bangla Social Media Posts | banglalp-1.34 | 2310.09238 | [
"https://github.com/Saumajit/BanglaNLP/tree/main/Task_2"
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.banglalp-1.35.bib | https://aclanthology.org/2023.banglalp-1.35/ | @inproceedings{liu-etal-2023-knowdee,
title = "Knowdee at {BLP}-2023 Task 2: Improving {B}angla Sentiment Analysis Using Ensembled Models with Pseudo-Labeling",
author = "Liu, Xiaoyi and
Teng, Mao and
Yang, SHuangtao and
Fu, Bo",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.35",
doi = "10.18653/v1/2023.banglalp-1.35",
pages = "273--278",
abstract = "This paper outlines our submission to the Sentiment Analysis Shared Task at the Bangla Language Processing (BLP) Workshop at EMNLP2023 (Hasan et al., 2023a). The objective of this task is to detect sentiment in each text by classifying it as Positive, Negative, or Neutral. This shared task is based on the MUltiplatform BAngla SEntiment (MUBASE) (Hasan et al., 2023b) and SentNob (Islam et al., 2021) dataset, which consists of public comments from various social media platforms. Our proposed method for this task is based on the pre-trained Bangla language model BanglaBERT (Bhattacharjee et al., 2022). We trained an ensemble of BanglaBERT on the original dataset and used it to generate pseudo-labels for data augmentation. This expanded dataset was then used to train our final models. During the evaluation phase, 30 teams submitted their systems, and our system achieved the second highest performance with F1 score of 0.7267. The source code of the proposed approach is available at https://github.com/KnowdeeAI/blp{\_}task2{\_}knowdee.git.",
}
| This paper outlines our submission to the Sentiment Analysis Shared Task at the Bangla Language Processing (BLP) Workshop at EMNLP2023 (Hasan et al., 2023a). The objective of this task is to detect sentiment in each text by classifying it as Positive, Negative, or Neutral. This shared task is based on the MUltiplatform BAngla SEntiment (MUBASE) (Hasan et al., 2023b) and SentNob (Islam et al., 2021) dataset, which consists of public comments from various social media platforms. Our proposed method for this task is based on the pre-trained Bangla language model BanglaBERT (Bhattacharjee et al., 2022). We trained an ensemble of BanglaBERT on the original dataset and used it to generate pseudo-labels for data augmentation. This expanded dataset was then used to train our final models. During the evaluation phase, 30 teams submitted their systems, and our system achieved the second highest performance with F1 score of 0.7267. The source code of the proposed approach is available at https://github.com/KnowdeeAI/blp{\_}task2{\_}knowdee.git. | [
"Liu, Xiaoyi",
"Teng, Mao",
"Yang, SHuangtao",
"Fu, Bo"
] | Knowdee at BLP-2023 Task 2: Improving Bangla Sentiment Analysis Using Ensembled Models with Pseudo-Labeling | banglalp-1.35 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.36.bib | https://aclanthology.org/2023.banglalp-1.36/ | @inproceedings{rahman-uzuner-2023-m1437,
title = "M1437 at {BLP}-2023 Task 2: Harnessing {B}angla Text for Sentiment Analysis: A Transformer-based Approach",
author = "Rahman, Majidur and
Uzuner, Ozlem",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.36",
doi = "10.18653/v1/2023.banglalp-1.36",
pages = "279--285",
abstract = "Analyzing public sentiment on social media is helpful in understanding the public{'}s emotions about any given topic. While numerous studies have been conducted in this field, there has been limited research on Bangla social media data. Team M1437 from George Mason University participated in the Sentiment Analysis shared task of the Bangla Language Processing (BLP) Workshop at EMNLP-2023. The team fine-tuned various BERT-based Transformer architectures to solve the task. This article shows that $BanglaBERT_{large}$, a language model pre-trained on Bangla text, outperformed other BERT-based models. This model achieved an F1 score of 73.15{\%} and top position in the development phase, was further tuned with external training data, and achieved an F1 score of 70.36{\%} in the evaluation phase, securing the fourteenth place on the leaderboard. The F1 score on the test set, when $BanglaBERT_{large}$ was trained without external training data, was 71.54{\%}.",
}
| Analyzing public sentiment on social media is helpful in understanding the public{'}s emotions about any given topic. While numerous studies have been conducted in this field, there has been limited research on Bangla social media data. Team M1437 from George Mason University participated in the Sentiment Analysis shared task of the Bangla Language Processing (BLP) Workshop at EMNLP-2023. The team fine-tuned various BERT-based Transformer architectures to solve the task. This article shows that $BanglaBERT_{large}$, a language model pre-trained on Bangla text, outperformed other BERT-based models. This model achieved an F1 score of 73.15{\%} and top position in the development phase, was further tuned with external training data, and achieved an F1 score of 70.36{\%} in the evaluation phase, securing the fourteenth place on the leaderboard. The F1 score on the test set, when $BanglaBERT_{large}$ was trained without external training data, was 71.54{\%}. | [
"Rahman, Majidur",
"Uzuner, Ozlem"
] | M1437 at BLP-2023 Task 2: Harnessing Bangla Text for Sentiment Analysis: A Transformer-based Approach | banglalp-1.36 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.37.bib | https://aclanthology.org/2023.banglalp-1.37/ | @inproceedings{goswami-etal-2023-nlpbdpatriots,
title = "nlp{BD}patriots at {BLP}-2023 Task 2: A Transfer Learning Approach towards {B}angla Sentiment Analysis",
author = "Goswami, Dhiman and
Raihan, Md Nishat and
Puspo, Sadiya Sayara Chowdhury and
Zampieri, Marcos",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.37",
doi = "10.18653/v1/2023.banglalp-1.37",
pages = "286--292",
abstract = "In this paper, we discuss the entry of nlpBDpatriots to some sophisticated approaches for classifying Bangla Sentiment Analysis. This is a shared task of the first workshop on Bangla Language Processing (BLP) organized under EMNLP. The main objective of this task is to identify the sentiment polarity of social media content. There are 30 groups of NLP enthusiasts who participate in this shared task and our best-performing approach for the task is transfer learning with data augmentation. Our group ranked $12^{th}$ position in this competition with this methodology securing a micro F1 score of 0.71.",
}
| In this paper, we discuss the entry of nlpBDpatriots to some sophisticated approaches for classifying Bangla Sentiment Analysis. This is a shared task of the first workshop on Bangla Language Processing (BLP) organized under EMNLP. The main objective of this task is to identify the sentiment polarity of social media content. There are 30 groups of NLP enthusiasts who participate in this shared task and our best-performing approach for the task is transfer learning with data augmentation. Our group ranked $12^{th}$ position in this competition with this methodology securing a micro F1 score of 0.71. | [
"Goswami, Dhiman",
"Raihan, Md Nishat",
"Puspo, Sadiya Sayara Chowdhury",
"Zampieri, Marcos"
] | nlpBDpatriots at BLP-2023 Task 2: A Transfer Learning Approach towards Bangla Sentiment Analysis | banglalp-1.37 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.38.bib | https://aclanthology.org/2023.banglalp-1.38/ | @inproceedings{khushbu-etal-2023-ushoshi2023,
title = "Ushoshi2023 at {BLP}-2023 Task 2: A Comparison of Traditional to Advanced Linguistic Models to Analyze Sentiment in {B}angla Texts",
author = "Khushbu, Sharun and
Nur, Nasheen and
Ahmed, Mohiuddin and
Nur, Nashtarin",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.38",
doi = "10.18653/v1/2023.banglalp-1.38",
pages = "293--299",
abstract = "This article describes our analytical approach designed for BLP Workshop-2023 Task-2: in Sentiment Analysis. During actual task submission, we used DistilBERT. However, we later applied rigorous hyperparameter tuning and pre-processing, improving the result to 68{\%} accuracy and a 68{\%} F1 micro score with vanilla LSTM. Traditional machine learning models were applied to compare the result where 75{\%} accuracy was achieved with traditional SVM. Our contributions are a) data augmentation using the oversampling method to remove data imbalance and b) attention masking for data encoding with masked language modeling to capture representations of language semantics effectively, by further demonstrating it with explainable AI. Originally, our system scored 0.26 micro-F1 in the competition and ranked 30th among the participants for a basic DistilBERT model, which we later improved to 0.68 and 0.65 with LSTM and XLM-RoBERTa-base models, respectively.",
}
| This article describes our analytical approach designed for BLP Workshop-2023 Task-2: in Sentiment Analysis. During actual task submission, we used DistilBERT. However, we later applied rigorous hyperparameter tuning and pre-processing, improving the result to 68{\%} accuracy and a 68{\%} F1 micro score with vanilla LSTM. Traditional machine learning models were applied to compare the result where 75{\%} accuracy was achieved with traditional SVM. Our contributions are a) data augmentation using the oversampling method to remove data imbalance and b) attention masking for data encoding with masked language modeling to capture representations of language semantics effectively, by further demonstrating it with explainable AI. Originally, our system scored 0.26 micro-F1 in the competition and ranked 30th among the participants for a basic DistilBERT model, which we later improved to 0.68 and 0.65 with LSTM and XLM-RoBERTa-base models, respectively. | [
"Khushbu, Sharun",
"Nur, Nasheen",
"Ahmed, Mohiuddin",
"Nur, Nashtarin"
] | Ushoshi2023 at BLP-2023 Task 2: A Comparison of Traditional to Advanced Linguistic Models to Analyze Sentiment in Bangla Texts | banglalp-1.38 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.39.bib | https://aclanthology.org/2023.banglalp-1.39/ | @inproceedings{fatema-etal-2023-emptymind,
title = "{E}mpty{M}ind at {BLP}-2023 Task 2: Sentiment Analysis of {B}angla Social Media Posts using Transformer-Based Models",
author = "Fatema, Karnis and
Das, Udoy and
Mia, Md Ayon and
Mowla, Md Sajidul and
Yahan, Mahshar and
Ullah, Md Fayez and
Sarker, Arpita and
Murad, Hasan",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.39",
doi = "10.18653/v1/2023.banglalp-1.39",
pages = "300--304",
abstract = "With the popularity of social media platforms, people are sharing their individual thoughts by posting, commenting, and messaging with their friends, which generates a significant amount of digital text data every day. Conducting sentiment analysis of social media content is a vibrant research domain within the realm of Natural Language Processing (NLP), and it has practical, real-world uses. Numerous prior studies have focused on sentiment analysis for languages that have abundant linguistic resources, such as English. However, limited prior research works have been done for automatic sentiment analysis in low-resource languages like Bangla. In this research work, we are going to finetune different transformer-based models for Bangla sentiment analysis. To train and evaluate the model, we have utilized a dataset provided in a shared task organized by the BLP Workshop co-located with EMNLP-2023. Moreover, we have conducted a comparative study among different machine learning models, deep learning models, and transformer-based models for Bangla sentiment analysis. Our findings show that the BanglaBERT (Large) model has achieved the best result with a micro F1-Score of 0.7109 and secured 7th position in the shared task 2 leaderboard of the BLP Workshop in EMNLP 2023.",
}
| With the popularity of social media platforms, people are sharing their individual thoughts by posting, commenting, and messaging with their friends, which generates a significant amount of digital text data every day. Conducting sentiment analysis of social media content is a vibrant research domain within the realm of Natural Language Processing (NLP), and it has practical, real-world uses. Numerous prior studies have focused on sentiment analysis for languages that have abundant linguistic resources, such as English. However, limited prior research works have been done for automatic sentiment analysis in low-resource languages like Bangla. In this research work, we are going to finetune different transformer-based models for Bangla sentiment analysis. To train and evaluate the model, we have utilized a dataset provided in a shared task organized by the BLP Workshop co-located with EMNLP-2023. Moreover, we have conducted a comparative study among different machine learning models, deep learning models, and transformer-based models for Bangla sentiment analysis. Our findings show that the BanglaBERT (Large) model has achieved the best result with a micro F1-Score of 0.7109 and secured 7th position in the shared task 2 leaderboard of the BLP Workshop in EMNLP 2023. | [
"Fatema, Karnis",
"Das, Udoy",
"Mia, Md Ayon",
"Mowla, Md Sajidul",
"Yahan, Mahshar",
"Ullah, Md Fayez",
"Sarker, Arpita",
"Murad, Hasan"
] | EmptyMind at BLP-2023 Task 2: Sentiment Analysis of Bangla Social Media Posts using Transformer-Based Models | banglalp-1.39 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.40.bib | https://aclanthology.org/2023.banglalp-1.40/ | @inproceedings{seth-etal-2023-rsm,
title = "{RSM}-{NLP} at {BLP}-2023 Task 2: {B}angla Sentiment Analysis using Weighted and Majority Voted Fine-Tuned Transformers",
author = "Seth, Pratinav and
Goel, Rashi and
Mathur, Komal and
Vemulapalli, Swetha",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.40",
doi = "10.18653/v1/2023.banglalp-1.40",
pages = "305--311",
abstract = "This paper describes our approach to submissions made at Shared Task 2 at BLP Workshop - Sentiment Analysis of Bangla Social Media Posts. Sentiment Analysis is an action research area in the digital age. With the rapid and constant growth of online social media sites and services and the increasing amount of textual data, the application of automatic Sentiment Analysis is on the rise. However, most of the research in this domain is based on the English language. Despite being the world{'}s sixth most widely spoken language, little work has been done in Bangla. This task aims to promote work on Bangla Sentiment Analysis while identifying the polarity of social media content by determining whether the sentiment expressed in the text is Positive, Negative, or Neutral. Our approach consists of experimenting and finetuning various multilingual and pre-trained BERT-based models on our downstream tasks and using a Majority Voting and Weighted ensemble model that outperforms individual baseline model scores. Our system scored 0.711 for the multiclass classification task and scored 10th place among the participants on the leaderboard for the shared task. Our code is available at https://github.com/ptnv-s/RSM-NLP-BLP-Task2 .",
}
| This paper describes our approach to submissions made at Shared Task 2 at BLP Workshop - Sentiment Analysis of Bangla Social Media Posts. Sentiment Analysis is an action research area in the digital age. With the rapid and constant growth of online social media sites and services and the increasing amount of textual data, the application of automatic Sentiment Analysis is on the rise. However, most of the research in this domain is based on the English language. Despite being the world{'}s sixth most widely spoken language, little work has been done in Bangla. This task aims to promote work on Bangla Sentiment Analysis while identifying the polarity of social media content by determining whether the sentiment expressed in the text is Positive, Negative, or Neutral. Our approach consists of experimenting and finetuning various multilingual and pre-trained BERT-based models on our downstream tasks and using a Majority Voting and Weighted ensemble model that outperforms individual baseline model scores. Our system scored 0.711 for the multiclass classification task and scored 10th place among the participants on the leaderboard for the shared task. Our code is available at https://github.com/ptnv-s/RSM-NLP-BLP-Task2 . | [
"Seth, Pratinav",
"Goel, Rashi",
"Mathur, Komal",
"Vemulapalli, Swetha"
] | RSM-NLP at BLP-2023 Task 2: Bangla Sentiment Analysis using Weighted and Majority Voted Fine-Tuned Transformers | banglalp-1.40 | 2310.14261 | [
"https://github.com/ptnv-s/rsm-nlp-blp-task2"
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
|
https://aclanthology.org/2023.banglalp-1.41.bib | https://aclanthology.org/2023.banglalp-1.41/ | @inproceedings{dey-etal-2023-semantics-squad,
title = "Semantics Squad at {BLP}-2023 Task 2: Sentiment Analysis of {B}angla Text with Fine Tuned Transformer Based Models",
author = "Dey, Krishno and
Hasan, Md. Arid and
Tarannum, Prerona and
Palma, Francis",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.41",
doi = "10.18653/v1/2023.banglalp-1.41",
pages = "312--316",
abstract = "Sentiment analysis (SA) is a crucial task in natural language processing, especially in contexts with a variety of linguistic features, like Bangla. We participated in BLP-2023 Shared Task 2 on SA of Bangla text. We investigated the performance of six transformer-based models for SA in Bangla on the shared task dataset. We fine-tuned these models and conducted a comprehensive performance evaluation. We ranked 20th on the leaderboard of the shared task with a blind submission that used BanglaBERT Small. BanglaBERT outperformed other models with 71.33{\%} accuracy, and the closest model was BanglaBERT Large, with an accuracy of 70.90{\%}. BanglaBERT consistently outperformed others, demonstrating the benefits of models developed using sizable datasets in Bangla.",
}
| Sentiment analysis (SA) is a crucial task in natural language processing, especially in contexts with a variety of linguistic features, like Bangla. We participated in BLP-2023 Shared Task 2 on SA of Bangla text. We investigated the performance of six transformer-based models for SA in Bangla on the shared task dataset. We fine-tuned these models and conducted a comprehensive performance evaluation. We ranked 20th on the leaderboard of the shared task with a blind submission that used BanglaBERT Small. BanglaBERT outperformed other models with 71.33{\%} accuracy, and the closest model was BanglaBERT Large, with an accuracy of 70.90{\%}. BanglaBERT consistently outperformed others, demonstrating the benefits of models developed using sizable datasets in Bangla. | [
"Dey, Krishno",
"Hasan, Md. Arid",
"Tarannum, Prerona",
"Palma, Francis"
] | Semantics Squad at BLP-2023 Task 2: Sentiment Analysis of Bangla Text with Fine Tuned Transformer Based Models | banglalp-1.41 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |
||
https://aclanthology.org/2023.banglalp-1.42.bib | https://aclanthology.org/2023.banglalp-1.42/ | @inproceedings{fahim-2023-aambela-blp,
title = "Aambela at {BLP}-2023 Task 2: Enhancing {B}angla{BERT} Performance for {B}angla Sentiment Analysis Task with In Task Pretraining and Adversarial Weight Perturbation",
author = "Fahim, Md",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Sadeque, Farig and
Amin, Ruhul",
booktitle = "Proceedings of the First Workshop on Bangla Language Processing (BLP-2023)",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.banglalp-1.42",
doi = "10.18653/v1/2023.banglalp-1.42",
pages = "317--323",
abstract = "This paper introduces the top-performing approachof {``}Aambela{''} for the BLP-2023 Task2: {``}Sentiment Analysis of Bangla Social MediaPosts{''}. The objective of the task was tocreate systems capable of automatically detectingsentiment in Bangla text from diverse socialmedia posts. My approach comprised finetuninga Bangla Language Model with threedistinct classification heads. To enhance performance,we employed two robust text classificationtechniques. To arrive at a final prediction,we employed a mode-based ensemble approachof various predictions from different models,which ultimately resulted in the 1st place in thecompetition.",
}
| This paper introduces the top-performing approachof {``}Aambela{''} for the BLP-2023 Task2: {``}Sentiment Analysis of Bangla Social MediaPosts{''}. The objective of the task was tocreate systems capable of automatically detectingsentiment in Bangla text from diverse socialmedia posts. My approach comprised finetuninga Bangla Language Model with threedistinct classification heads. To enhance performance,we employed two robust text classificationtechniques. To arrive at a final prediction,we employed a mode-based ensemble approachof various predictions from different models,which ultimately resulted in the 1st place in thecompetition. | [
"Fahim, Md"
] | Aambela at BLP-2023 Task 2: Enhancing BanglaBERT Performance for Bangla Sentiment Analysis Task with In Task Pretraining and Adversarial Weight Perturbation | banglalp-1.42 | [
""
] | -1 | -1 | -1 | -1 | [] | [] | [] | 0 | Poster |